summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2018-08-05 16:39:29 +0200
committerThomas Gleixner <tglx@linutronix.de>2018-08-05 16:39:29 +0200
commitf2701b77bbd992f3df4631de8493f21db0830452 (patch)
treeb05b2bf8b47002ae81c79aaa9de2a311b8ee075e
parentx86/KVM/VMX: Don't set l1tf_flush_l1d from vmx_handle_external_intr() (diff)
parentLinux 4.18-rc7 (diff)
downloadlinux-f2701b77bbd992f3df4631de8493f21db0830452.tar.xz
linux-f2701b77bbd992f3df4631de8493f21db0830452.zip
Merge 4.18-rc7 into master to pick up the KVM dependcy
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt5
-rw-r--r--Documentation/admin-guide/pm/intel_pstate.rst16
-rw-r--r--Documentation/core-api/kernel-api.rst2
-rw-r--r--Documentation/device-mapper/writecache.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt2
-rw-r--r--Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt2
-rw-r--r--Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt2
-rw-r--r--Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt23
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/hideep.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt2
-rw-r--r--Documentation/devicetree/bindings/mips/brcm/soc.txt2
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fman.txt2
-rw-r--r--Documentation/devicetree/bindings/power/power_domain.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/tps65090.txt2
-rw-r--r--Documentation/devicetree/bindings/reset/st,sti-softreset.txt2
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/qcom,apq8096.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/rockchip,dwc3.txt3
-rw-r--r--Documentation/devicetree/bindings/w1/w1-gpio.txt2
-rw-r--r--Documentation/driver-api/infrastructure.rst4
-rw-r--r--Documentation/filesystems/Locking7
-rw-r--r--Documentation/filesystems/cifs/AUTHORS7
-rw-r--r--Documentation/filesystems/cifs/CHANGES3
-rw-r--r--Documentation/filesystems/cifs/TODO17
-rw-r--r--Documentation/filesystems/vfs.txt13
-rw-r--r--Documentation/kbuild/kbuild.txt17
-rw-r--r--Documentation/kbuild/kconfig-language.txt6
-rw-r--r--Documentation/kbuild/kconfig.txt51
-rw-r--r--Documentation/networking/bonding.txt2
-rw-r--r--Documentation/networking/e100.rst131
-rw-r--r--Documentation/networking/e1000.rst149
-rw-r--r--Documentation/networking/strparser.txt2
-rw-r--r--Documentation/trace/histogram.txt23
-rw-r--r--Documentation/usb/gadget_configfs.txt2
-rw-r--r--Documentation/virtual/kvm/api.txt2
-rw-r--r--MAINTAINERS71
-rw-r--r--Makefile15
-rw-r--r--arch/alpha/Kconfig5
-rw-r--r--arch/alpha/kernel/osf_sys.c5
-rw-r--r--arch/alpha/lib/Makefile2
-rw-r--r--arch/alpha/lib/dec_and_lock.c44
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/Makefile15
-rw-r--r--arch/arc/configs/axs101_defconfig1
-rw-r--r--arch/arc/configs/axs103_defconfig1
-rw-r--r--arch/arc/configs/axs103_smp_defconfig1
-rw-r--r--arch/arc/configs/haps_hs_defconfig1
-rw-r--r--arch/arc/configs/haps_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/hsdk_defconfig1
-rw-r--r--arch/arc/configs/nsim_700_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_defconfig1
-rw-r--r--arch/arc/configs/nsim_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_hs_smp_defconfig1
-rw-r--r--arch/arc/configs/tb10x_defconfig1
-rw-r--r--arch/arc/include/asm/entry-compact.h6
-rw-r--r--arch/arc/include/asm/entry.h3
-rw-r--r--arch/arc/include/asm/mach_desc.h2
-rw-r--r--arch/arc/include/asm/page.h2
-rw-r--r--arch/arc/include/asm/pgtable.h2
-rw-r--r--arch/arc/kernel/irq.c2
-rw-r--r--arch/arc/kernel/process.c47
-rw-r--r--arch/arc/plat-hsdk/Kconfig3
-rw-r--r--arch/arc/plat-hsdk/platform.c62
-rw-r--r--arch/arm/Kconfig8
-rw-r--r--arch/arm/boot/dts/am335x-bone-common.dtsi1
-rw-r--r--arch/arm/boot/dts/am3517.dtsi9
-rw-r--r--arch/arm/boot/dts/am437x-sk-evm.dts2
-rw-r--r--arch/arm/boot/dts/armada-385-synology-ds116.dts2
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi2
-rw-r--r--arch/arm/boot/dts/bcm-cygnus.dtsi24
-rw-r--r--arch/arm/boot/dts/bcm-hr2.dtsi24
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi32
-rw-r--r--arch/arm/boot/dts/bcm5301x.dtsi2
-rw-r--r--arch/arm/boot/dts/da850.dtsi6
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/imx51-zii-rdu1.dts2
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6sx.dtsi2
-rw-r--r--arch/arm/boot/dts/omap4-droid4-xt894.dts9
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi4
-rw-r--r--arch/arm/boot/dts/socfpga_arria10.dtsi5
-rw-r--r--arch/arm/common/Makefile2
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig2
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig2
-rw-r--r--arch/arm/configs/multi_v7_defconfig378
-rw-r--r--arch/arm/crypto/speck-neon-core.S6
-rw-r--r--arch/arm/firmware/Makefile3
-rw-r--r--arch/arm/kernel/head-nommu.S2
-rw-r--r--arch/arm/kernel/process.c1
-rw-r--r--arch/arm/kernel/signal.c4
-rw-r--r--arch/arm/mach-bcm/Kconfig1
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c2
-rw-r--r--arch/arm/mach-omap2/omap-smp.c41
-rw-r--r--arch/arm/mach-pxa/irq.c4
-rw-r--r--arch/arm/mach-rpc/ecard.c2
-rw-r--r--arch/arm/mach-socfpga/Kconfig1
-rw-r--r--arch/arm/mm/init.c9
-rw-r--r--arch/arm/net/bpf_jit_32.c2
-rw-r--r--arch/arm/xen/enlighten.c7
-rw-r--r--arch/arm64/Makefile10
-rw-r--r--arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi6
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg-s400.dts15
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-axg.dtsi4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gx.dtsi12
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts3
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi7
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxl.dtsi8
-rw-r--r--arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi8
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts4
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts4
-rw-r--r--arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi4
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts2
-rw-r--r--arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts2
-rw-r--r--arch/arm64/boot/dts/marvell/armada-cp110.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi2
-rw-r--r--arch/arm64/boot/dts/qcom/msm8916.dtsi4
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts2
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts2
-rw-r--r--arch/arm64/configs/defconfig102
-rw-r--r--arch/arm64/crypto/aes-glue.c2
-rw-r--r--arch/arm64/include/asm/alternative.h7
-rw-r--r--arch/arm64/include/asm/kvm_host.h1
-rw-r--r--arch/arm64/include/asm/pgtable.h6
-rw-r--r--arch/arm64/include/asm/simd.h19
-rw-r--r--arch/arm64/include/asm/sysreg.h11
-rw-r--r--arch/arm64/include/asm/tlb.h4
-rw-r--r--arch/arm64/kernel/alternative.c51
-rw-r--r--arch/arm64/kernel/cpufeature.c6
-rw-r--r--arch/arm64/kernel/module.c5
-rw-r--r--arch/arm64/kernel/smp.c2
-rw-r--r--arch/arm64/kvm/fpsimd.c36
-rw-r--r--arch/arm64/mm/dma-mapping.c9
-rw-r--r--arch/arm64/mm/hugetlbpage.c7
-rw-r--r--arch/arm64/mm/init.c4
-rw-r--r--arch/arm64/mm/proc.S5
-rw-r--r--arch/ia64/include/asm/tlb.h2
-rw-r--r--arch/ia64/kernel/perfmon.c6
-rw-r--r--arch/ia64/mm/init.c14
-rw-r--r--arch/m68k/include/asm/mcf_pgalloc.h4
-rw-r--r--arch/microblaze/Kconfig.debug7
-rw-r--r--arch/microblaze/include/asm/setup.h5
-rw-r--r--arch/microblaze/include/asm/unistd.h2
-rw-r--r--arch/microblaze/include/uapi/asm/unistd.h2
-rw-r--r--arch/microblaze/kernel/Makefile4
-rw-r--r--arch/microblaze/kernel/heartbeat.c72
-rw-r--r--arch/microblaze/kernel/platform.c29
-rw-r--r--arch/microblaze/kernel/reset.c11
-rw-r--r--arch/microblaze/kernel/syscall_table.S2
-rw-r--r--arch/microblaze/kernel/timer.c7
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/ath79/common.c2
-rw-r--r--arch/mips/ath79/mach-pb44.c2
-rw-r--r--arch/mips/include/asm/io.h2
-rw-r--r--arch/mips/include/uapi/asm/unistd.h18
-rw-r--r--arch/mips/kernel/entry.S8
-rw-r--r--arch/mips/kernel/mcount.S27
-rw-r--r--arch/mips/kernel/process.c43
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/signal.c3
-rw-r--r--arch/mips/kernel/traps.c1
-rw-r--r--arch/mips/mm/ioremap.c37
-rw-r--r--arch/mips/pci/pci.c2
-rw-r--r--arch/nds32/Kconfig12
-rw-r--r--arch/nds32/Makefile2
-rw-r--r--arch/nds32/include/asm/cacheflush.h9
-rw-r--r--arch/nds32/include/asm/futex.h2
-rw-r--r--arch/nds32/kernel/setup.c3
-rw-r--r--arch/nds32/mm/cacheflush.c100
-rw-r--r--arch/openrisc/include/asm/pgalloc.h6
-rw-r--r--arch/openrisc/kernel/entry.S8
-rw-r--r--arch/openrisc/kernel/head.S9
-rw-r--r--arch/openrisc/kernel/traps.c2
-rw-r--r--arch/parisc/Kconfig6
-rw-r--r--arch/parisc/Makefile4
-rw-r--r--arch/parisc/include/asm/signal.h8
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/drivers.c25
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/parisc/kernel/unwind.c4
-rw-r--r--arch/powerpc/Makefile2
-rw-r--r--arch/powerpc/include/asm/book3s/32/pgalloc.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-4k.h21
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable-64k.h9
-rw-r--r--arch/powerpc/include/asm/book3s/64/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/mmu_context.h4
-rw-r--r--arch/powerpc/include/asm/nmi.h2
-rw-r--r--arch/powerpc/include/asm/nohash/32/pgalloc.h2
-rw-r--r--arch/powerpc/include/asm/nohash/64/pgalloc.h1
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/dt_cpu_ftrs.c3
-rw-r--r--arch/powerpc/kernel/idle_book3s.S2
-rw-r--r--arch/powerpc/kernel/pci_32.c4
-rw-r--r--arch/powerpc/kernel/pci_64.c4
-rw-r--r--arch/powerpc/kernel/rtas.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c12
-rw-r--r--arch/powerpc/kernel/setup_64.c8
-rw-r--r--arch/powerpc/kernel/signal.c4
-rw-r--r--arch/powerpc/kernel/signal_32.c8
-rw-r--r--arch/powerpc/kernel/signal_64.c4
-rw-r--r--arch/powerpc/kernel/smp.c6
-rw-r--r--arch/powerpc/kernel/stacktrace.c4
-rw-r--r--arch/powerpc/kernel/syscalls.c4
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c2
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c6
-rw-r--r--arch/powerpc/mm/hugetlbpage.c3
-rw-r--r--arch/powerpc/mm/mmu_context_iommu.c37
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c12
-rw-r--r--arch/powerpc/mm/subpage-prot.c4
-rw-r--r--arch/powerpc/mm/tlb-radix.c98
-rw-r--r--arch/powerpc/net/bpf_jit_comp64.c29
-rw-r--r--arch/powerpc/platforms/powermac/time.c29
-rw-r--r--arch/powerpc/xmon/xmon.c4
-rw-r--r--arch/riscv/Kconfig1
-rw-r--r--arch/riscv/include/uapi/asm/elf.h9
-rw-r--r--arch/riscv/kernel/irq.c4
-rw-r--r--arch/riscv/kernel/module.c26
-rw-r--r--arch/riscv/kernel/ptrace.c2
-rw-r--r--arch/riscv/kernel/setup.c5
-rw-r--r--arch/riscv/mm/init.c2
-rw-r--r--arch/s390/Kconfig3
-rw-r--r--arch/s390/include/asm/css_chars.h62
-rw-r--r--arch/s390/kernel/compat_wrapper.c1
-rw-r--r--arch/s390/kernel/entry.S8
-rw-r--r--arch/s390/kernel/signal.c3
-rw-r--r--arch/s390/kernel/syscalls/syscall.tbl2
-rw-r--r--arch/s390/mm/pgalloc.c4
-rw-r--r--arch/s390/net/bpf_jit_comp.c1
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/Makefile6
-rw-r--r--arch/x86/boot/compressed/eboot.c12
-rw-r--r--arch/x86/crypto/aegis128-aesni-asm.S1
-rw-r--r--arch/x86/crypto/aegis128l-aesni-asm.S1
-rw-r--r--arch/x86/crypto/aegis256-aesni-asm.S1
-rw-r--r--arch/x86/crypto/morus1280-avx2-asm.S1
-rw-r--r--arch/x86/crypto/morus1280-sse2-asm.S1
-rw-r--r--arch/x86/crypto/morus640-sse2-asm.S1
-rw-r--r--arch/x86/entry/common.c2
-rw-r--r--arch/x86/entry/entry_32.S2
-rw-r--r--arch/x86/entry/entry_64_compat.S16
-rw-r--r--arch/x86/events/intel/ds.c8
-rw-r--r--arch/x86/hyperv/hv_apic.c5
-rw-r--r--arch/x86/hyperv/hv_init.c5
-rw-r--r--arch/x86/include/asm/apm.h6
-rw-r--r--arch/x86/include/asm/asm.h59
-rw-r--r--arch/x86/include/asm/barrier.h2
-rw-r--r--arch/x86/include/asm/irqflags.h2
-rw-r--r--arch/x86/include/asm/mshyperv.h5
-rw-r--r--arch/x86/include/asm/pgalloc.h3
-rw-r--r--arch/x86/include/asm/pgtable.h2
-rw-r--r--arch/x86/include/asm/pgtable_64.h4
-rw-r--r--arch/x86/include/asm/uaccess_64.h7
-rw-r--r--arch/x86/include/asm/vmx.h3
-rw-r--r--arch/x86/kernel/Makefile1
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c60
-rw-r--r--arch/x86/kernel/apm_32.c5
-rw-r--r--arch/x86/kernel/cpu/amd.c4
-rw-r--r--arch/x86/kernel/cpu/bugs.c12
-rw-r--r--arch/x86/kernel/cpu/cacheinfo.c2
-rw-r--r--arch/x86/kernel/cpu/common.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c5
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c47
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c5
-rw-r--r--arch/x86/kernel/cpu/mtrr/if.c3
-rw-r--r--arch/x86/kernel/e820.c15
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/irqflags.S26
-rw-r--r--arch/x86/kernel/kvmclock.c12
-rw-r--r--arch/x86/kernel/quirks.c11
-rw-r--r--arch/x86/kernel/signal.c2
-rw-r--r--arch/x86/kernel/smpboot.c5
-rw-r--r--arch/x86/kernel/traps.c14
-rw-r--r--arch/x86/kernel/uprobes.c2
-rw-r--r--arch/x86/kvm/Kconfig2
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/vmx.c128
-rw-r--r--arch/x86/kvm/x86.c4
-rw-r--r--arch/x86/kvm/x86.h9
-rw-r--r--arch/x86/mm/fault.c21
-rw-r--r--arch/x86/mm/init_64.c20
-rw-r--r--arch/x86/platform/efi/efi_64.c4
-rw-r--r--arch/x86/purgatory/Makefile2
-rw-r--r--arch/x86/um/mem_32.c2
-rw-r--r--arch/x86/xen/enlighten.c7
-rw-r--r--arch/x86/xen/enlighten_pv.c26
-rw-r--r--arch/x86/xen/enlighten_pvh.c1
-rw-r--r--arch/x86/xen/irq.c4
-rw-r--r--arch/x86/xen/smp_pv.c5
-rw-r--r--block/bio.c57
-rw-r--r--block/blk-core.c12
-rw-r--r--block/blk-mq-debugfs.c2
-rw-r--r--block/blk-mq.c17
-rw-r--r--block/blk-softirq.c1
-rw-r--r--block/blk-timeout.c1
-rw-r--r--block/bsg.c2
-rw-r--r--block/sed-opal.c4
-rw-r--r--certs/blacklist.h2
-rw-r--r--crypto/af_alg.c17
-rw-r--r--crypto/algif_aead.c4
-rw-r--r--crypto/algif_skcipher.c4
-rw-r--r--crypto/asymmetric_keys/x509_cert_parser.c9
-rw-r--r--crypto/morus640.c3
-rw-r--r--crypto/sha3_generic.c2
-rw-r--r--drivers/acpi/acpi_lpss.c18
-rw-r--r--drivers/acpi/acpica/hwsleep.c15
-rw-r--r--drivers/acpi/acpica/psloop.c26
-rw-r--r--drivers/acpi/acpica/uterror.c6
-rw-r--r--drivers/acpi/battery.c9
-rw-r--r--drivers/acpi/ec.c20
-rw-r--r--drivers/acpi/nfit/core.c48
-rw-r--r--drivers/acpi/nfit/nfit.h1
-rw-r--r--drivers/acpi/osl.c72
-rw-r--r--drivers/acpi/pptt.c10
-rw-r--r--drivers/ata/Kconfig2
-rw-r--r--drivers/ata/ahci.c60
-rw-r--r--drivers/ata/ahci_mvebu.c2
-rw-r--r--drivers/ata/libahci.c7
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/libata-eh.c41
-rw-r--r--drivers/ata/libata-scsi.c18
-rw-r--r--drivers/ata/sata_fsl.c9
-rw-r--r--drivers/ata/sata_nv.c3
-rw-r--r--drivers/atm/iphase.c2
-rw-r--r--drivers/atm/zatm.c2
-rw-r--r--drivers/base/Makefile3
-rw-r--r--drivers/base/core.c15
-rw-r--r--drivers/base/dd.c8
-rw-r--r--drivers/base/power/domain.c23
-rw-r--r--drivers/block/drbd/drbd_req.c4
-rw-r--r--drivers/block/drbd/drbd_worker.c2
-rw-r--r--drivers/block/loop.c1
-rw-r--r--drivers/block/nbd.c138
-rw-r--r--drivers/block/null_blk.c2
-rw-r--r--drivers/bluetooth/hci_nokia.c2
-rw-r--r--drivers/bus/ti-sysc.c8
-rw-r--r--drivers/char/agp/alpha-agp.c2
-rw-r--r--drivers/char/agp/amd64-agp.c4
-rw-r--r--drivers/char/hw_random/core.c11
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c6
-rw-r--r--drivers/char/ipmi/kcs_bmc.c31
-rw-r--r--drivers/char/mem.c1
-rw-r--r--drivers/char/random.c39
-rw-r--r--drivers/clk/Makefile2
-rw-r--r--drivers/clk/clk-aspeed.c59
-rw-r--r--drivers/clk/clk.c3
-rw-r--r--drivers/clk/davinci/da8xx-cfgchip.c2
-rw-r--r--drivers/clk/davinci/psc.h2
-rw-r--r--drivers/clk/meson/clk-audio-divider.c2
-rw-r--r--drivers/clk/meson/gxbb.c1
-rw-r--r--drivers/clk/mvebu/armada-37xx-periph.c38
-rw-r--r--drivers/clk/qcom/gcc-msm8996.c1
-rw-r--r--drivers/clk/qcom/mmcc-msm8996.c1
-rw-r--r--drivers/clk/sunxi-ng/Makefile39
-rw-r--r--drivers/clocksource/arm_arch_timer.c2
-rw-r--r--drivers/clocksource/timer-stm32.c4
-rw-r--r--drivers/cpufreq/intel_pstate.c44
-rw-r--r--drivers/cpufreq/pcc-cpufreq.c4
-rw-r--r--drivers/cpufreq/qcom-cpufreq-kryo.c34
-rw-r--r--drivers/crypto/chelsio/chtls/chtls_io.c5
-rw-r--r--drivers/dax/device.c12
-rw-r--r--drivers/dax/super.c8
-rw-r--r--drivers/dma/k3dma.c2
-rw-r--r--drivers/dma/pl330.c2
-rw-r--r--drivers/dma/ti/omap-dma.c6
-rw-r--r--drivers/firmware/dmi-id.c2
-rw-r--r--drivers/firmware/dmi_scan.c1
-rw-r--r--drivers/firmware/efi/libstub/tpm.c2
-rw-r--r--drivers/fpga/altera-cvp.c6
-rw-r--r--drivers/gpio/gpio-uniphier.c6
-rw-r--r--drivers/gpio/gpiolib-of.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c131
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c33
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c39
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c4
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c65
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c20
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c19
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h5
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c96
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c23
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_hw.c3
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c9
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c12
-rw-r--r--drivers/gpu/drm/armada/armada_hw.h1
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c30
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c2
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c365
-rw-r--r--drivers/gpu/drm/drm_drv.c14
-rw-r--r--drivers/gpu/drm/drm_lease.c16
-rw-r--r--drivers/gpu/drm/drm_property.c6
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c24
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c24
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c51
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c110
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c44
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c23
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c58
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h29
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h25
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c85
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c2
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c49
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c44
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h5
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c2
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c20
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c13
-rw-r--r--drivers/gpu/drm/i915/intel_display.c37
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c34
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c6
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c6
-rw-r--r--drivers/gpu/drm/i915/intel_dvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c18
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c5
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c6
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c12
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c9
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c12
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/curs507a.c2
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c53
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c7
-rw-r--r--drivers/gpu/drm/sun4i/Makefile5
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c25
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c5
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c11
-rw-r--r--drivers/gpu/host1x/dev.c3
-rw-r--r--drivers/gpu/host1x/job.c3
-rw-r--r--drivers/gpu/ipu-v3/ipu-csi.c3
-rw-r--r--drivers/hid/hid-core.c5
-rw-r--r--drivers/hid/hid-debug.c8
-rw-r--r--drivers/hid/hid-google-hammer.c2
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-steam.c10
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c2
-rw-r--r--drivers/hid/intel-ish-hid/ipc/pci-ish.c22
-rw-r--r--drivers/hid/usbhid/hiddev.c11
-rw-r--r--drivers/hid/wacom_sys.c8
-rw-r--r--drivers/hid/wacom_wac.c10
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c7
-rw-r--r--drivers/hwmon/nct6775.c2
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c8
-rw-r--r--drivers/i2c/busses/i2c-cht-wc.c3
-rw-r--r--drivers/i2c/busses/i2c-davinci.c8
-rw-r--r--drivers/i2c/busses/i2c-gpio.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c5
-rw-r--r--drivers/i2c/busses/i2c-rcar.c54
-rw-r--r--drivers/i2c/busses/i2c-stu300.c2
-rw-r--r--drivers/i2c/busses/i2c-tegra.c17
-rw-r--r--drivers/i2c/i2c-core-base.c11
-rw-r--r--drivers/i2c/i2c-core-smbus.c14
-rw-r--r--drivers/iio/accel/mma8452.c2
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c2
-rw-r--r--drivers/iio/light/tsl2772.c2
-rw-r--r--drivers/iio/pressure/bmp280-core.c5
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c28
-rw-r--r--drivers/infiniband/core/uverbs_main.c14
-rw-r--r--drivers/infiniband/core/verbs.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/hw/hfi1/rc.c2
-rw-r--r--drivers/infiniband/hw/hfi1/uc.c4
-rw-r--r--drivers/infiniband/hw/hfi1/ud.c4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.c4
-rw-r--r--drivers/infiniband/hw/hfi1/verbs_txreq.h4
-rw-r--r--drivers/infiniband/hw/mlx4/mr.c7
-rw-r--r--drivers/infiniband/hw/mlx5/main.c38
-rw-r--r--drivers/infiniband/hw/mlx5/srq.c18
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c3
-rw-r--r--drivers/infiniband/sw/rxe/rxe_req.c5
-rw-r--r--drivers/input/input-mt.c12
-rw-r--r--drivers/input/joystick/xpad.c2
-rw-r--r--drivers/input/keyboard/goldfish_events.c9
-rw-r--r--drivers/input/misc/Kconfig10
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/sc27xx-vibra.c154
-rw-r--r--drivers/input/mouse/elan_i2c.h2
-rw-r--r--drivers/input/mouse/elan_i2c_core.c5
-rw-r--r--drivers/input/mouse/elan_i2c_smbus.c10
-rw-r--r--drivers/input/mouse/elantech.c11
-rw-r--r--drivers/input/mouse/psmouse-base.c12
-rw-r--r--drivers/input/rmi4/Kconfig1
-rw-r--r--drivers/input/rmi4/rmi_2d_sensor.c34
-rw-r--r--drivers/input/rmi4/rmi_bus.c50
-rw-r--r--drivers/input/rmi4/rmi_bus.h10
-rw-r--r--drivers/input/rmi4/rmi_driver.c52
-rw-r--r--drivers/input/rmi4/rmi_f01.c10
-rw-r--r--drivers/input/rmi4/rmi_f03.c9
-rw-r--r--drivers/input/rmi4/rmi_f11.c42
-rw-r--r--drivers/input/rmi4/rmi_f12.c8
-rw-r--r--drivers/input/rmi4/rmi_f30.c9
-rw-r--r--drivers/input/rmi4/rmi_f34.c5
-rw-r--r--drivers/input/rmi4/rmi_f54.c6
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h7
-rw-r--r--drivers/input/touchscreen/silead.c1
-rw-r--r--drivers/iommu/Kconfig1
-rw-r--r--drivers/iommu/intel-iommu.c94
-rw-r--r--drivers/irqchip/irq-gic-v2m.c2
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c62
-rw-r--r--drivers/irqchip/irq-ls-scfg-msi.c10
-rw-r--r--drivers/isdn/mISDN/socket.c2
-rw-r--r--drivers/lightnvm/Kconfig2
-rw-r--r--drivers/md/dm-raid.c2
-rw-r--r--drivers/md/dm-table.c7
-rw-r--r--drivers/md/dm-thin-metadata.c9
-rw-r--r--drivers/md/dm-thin.c11
-rw-r--r--drivers/md/dm-writecache.c53
-rw-r--r--drivers/md/dm-zoned-target.c2
-rw-r--r--drivers/md/dm.c8
-rw-r--r--drivers/md/md.c8
-rw-r--r--drivers/md/raid10.c7
-rw-r--r--drivers/media/rc/bpf-lirc.c14
-rw-r--r--drivers/misc/cxl/api.c8
-rw-r--r--drivers/misc/ibmasm/ibmasmfs.c27
-rw-r--r--drivers/misc/mei/interrupt.c5
-rw-r--r--drivers/misc/vmw_balloon.c4
-rw-r--r--drivers/mmc/core/slot-gpio.c2
-rw-r--r--drivers/mmc/host/dw_mmc.c7
-rw-r--r--drivers/mmc/host/renesas_sdhi_internal_dmac.c15
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c21
-rw-r--r--drivers/mmc/host/sunxi-mmc.c7
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c19
-rw-r--r--drivers/mtd/devices/mtd_dataflash.c4
-rw-r--r--drivers/mtd/nand/raw/denali_dt.c6
-rw-r--r--drivers/mtd/nand/raw/mxc_nand.c5
-rw-r--r--drivers/mtd/nand/raw/nand_base.c2
-rw-r--r--drivers/mtd/nand/raw/nand_macronix.c48
-rw-r--r--drivers/mtd/nand/raw/nand_micron.c2
-rw-r--r--drivers/mtd/spi-nor/cadence-quadspi.c6
-rw-r--r--drivers/net/bonding/bond_options.c23
-rw-r--r--drivers/net/can/m_can/m_can.c18
-rw-r--r--drivers/net/can/mscan/mpc5xxx_can.c5
-rw-r--r--drivers/net/can/peak_canfd/peak_pciefd_main.c19
-rw-r--r--drivers/net/can/xilinx_can.c392
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c21
-rw-r--r--drivers/net/ethernet/3com/Kconfig2
-rw-r--r--drivers/net/ethernet/amd/Kconfig6
-rw-r--r--drivers/net/ethernet/apm/xgene-v2/Kconfig1
-rw-r--r--drivers/net/ethernet/apm/xgene/Kconfig1
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h4
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c11
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c47
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c4
-rw-r--r--drivers/net/ethernet/arc/Kconfig6
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c8
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c1
-rw-r--r--drivers/net/ethernet/broadcom/Kconfig2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c4
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c24
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c30
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c2
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c13
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h2
-rw-r--r--drivers/net/ethernet/cadence/macb.h11
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c38
-rw-r--r--drivers/net/ethernet/cadence/macb_ptp.c5
-rw-r--r--drivers/net/ethernet/calxeda/Kconfig2
-rw-r--r--drivers/net/ethernet/cavium/Kconfig12
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c5
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c35
-rw-r--r--drivers/net/ethernet/cirrus/Kconfig1
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c3
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c12
-rw-r--r--drivers/net/ethernet/faraday/ftgmac100.c4
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c15
-rw-r--r--drivers/net/ethernet/freescale/fman/fman_port.c8
-rw-r--r--drivers/net/ethernet/hisilicon/Kconfig2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_tx.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c43
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c31
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c12
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c24
-rw-r--r--drivers/net/ethernet/marvell/Kconfig8
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/alloc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/sriov.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c48
-rw-r--r--drivers/net/ethernet/mscc/ocelot.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/bpf/main.c9
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/match.c14
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/offload.c11
-rw-r--r--drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c2
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed.h1
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dcbx.c19
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_debug.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_dev.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c15
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.h2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c11
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_main.c22
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_mcp.c52
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c21
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.c4
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_vf.h7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_ptp.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c2
-rw-r--r--drivers/net/ethernet/qualcomm/qca_spi.c21
-rw-r--r--drivers/net/ethernet/realtek/r8169.c6
-rw-r--r--drivers/net/ethernet/renesas/Kconfig2
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c93
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c94
-rw-r--r--drivers/net/ethernet/sfc/ef10.c30
-rw-r--r--drivers/net/ethernet/sfc/efx.c18
-rw-r--r--drivers/net/ethernet/sfc/farch.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c18
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/hwif.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c12
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/ethernet/sun/sungem.c22
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c19
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c1
-rw-r--r--drivers/net/geneve.c2
-rw-r--r--drivers/net/hamradio/bpqether.c8
-rw-r--r--drivers/net/hyperv/hyperv_net.h2
-rw-r--r--drivers/net/hyperv/netvsc.c54
-rw-r--r--drivers/net/hyperv/netvsc_drv.c17
-rw-r--r--drivers/net/hyperv/rndis_filter.c62
-rw-r--r--drivers/net/ieee802154/adf7242.c34
-rw-r--r--drivers/net/ieee802154/at86rf230.c15
-rw-r--r--drivers/net/ieee802154/fakelb.c2
-rw-r--r--drivers/net/ieee802154/mcr20a.c3
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c40
-rw-r--r--drivers/net/net_failover.c2
-rw-r--r--drivers/net/phy/dp83tc811.c2
-rw-r--r--drivers/net/phy/marvell.c54
-rw-r--r--drivers/net/phy/phy.c2
-rw-r--r--drivers/net/phy/phy_device.c7
-rw-r--r--drivers/net/phy/sfp-bus.c35
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/usb/asix_devices.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c3
-rw-r--r--drivers/net/usb/lan78xx.c42
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/r8152.c3
-rw-r--r--drivers/net/usb/rtl8150.c2
-rw-r--r--drivers/net/usb/smsc75xx.c62
-rw-r--r--drivers/net/virtio_net.c30
-rw-r--r--drivers/net/vxlan.c130
-rw-r--r--drivers/net/wireless/ath/ath10k/mac.c16
-rw-r--r--drivers/net/wireless/ath/ath10k/wmi.h1
-rw-r--r--drivers/net/wireless/ath/wcn36xx/testmode.c2
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/Kconfig1
-rw-r--r--drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c7
-rw-r--r--drivers/net/wireless/marvell/mwifiex/usb.c7
-rw-r--r--drivers/net/wireless/mediatek/mt7601u/phy.c6
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/Kconfig2
-rw-r--r--drivers/net/wireless/quantenna/qtnfmac/cfg80211.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.c17
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/base.h2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/core.c3
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/pci.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/ps.c4
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c2
-rw-r--r--drivers/net/xen-netfront.c11
-rw-r--r--drivers/nfc/pn533/usb.c4
-rw-r--r--drivers/nvdimm/claim.c1
-rw-r--r--drivers/nvdimm/pmem.c3
-rw-r--r--drivers/nvme/host/core.c64
-rw-r--r--drivers/nvme/host/fabrics.c10
-rw-r--r--drivers/nvme/host/fabrics.h3
-rw-r--r--drivers/nvme/host/fc.c8
-rw-r--r--drivers/nvme/host/nvme.h1
-rw-r--r--drivers/nvme/host/pci.c48
-rw-r--r--drivers/nvme/host/rdma.c78
-rw-r--r--drivers/nvme/target/configfs.c9
-rw-r--r--drivers/nvme/target/core.c10
-rw-r--r--drivers/nvme/target/fc.c44
-rw-r--r--drivers/nvme/target/loop.c2
-rw-r--r--drivers/nvmem/core.c4
-rw-r--r--drivers/of/base.c6
-rw-r--r--drivers/of/of_private.h2
-rw-r--r--drivers/of/overlay.c11
-rw-r--r--drivers/opp/core.c2
-rw-r--r--drivers/pci/Makefile6
-rw-r--r--drivers/pci/controller/Kconfig3
-rw-r--r--drivers/pci/controller/dwc/Kconfig1
-rw-r--r--drivers/pci/controller/dwc/pcie-designware-host.c3
-rw-r--r--drivers/pci/controller/pci-aardvark.c2
-rw-r--r--drivers/pci/controller/pci-ftpci100.c4
-rw-r--r--drivers/pci/controller/pci-hyperv.c8
-rw-r--r--drivers/pci/controller/pci-v3-semi.c2
-rw-r--r--drivers/pci/controller/pci-versatile.c2
-rw-r--r--drivers/pci/controller/pci-xgene.c2
-rw-r--r--drivers/pci/controller/pcie-mediatek.c2
-rw-r--r--drivers/pci/controller/pcie-rcar.c16
-rw-r--r--drivers/pci/controller/pcie-xilinx-nwl.c2
-rw-r--r--drivers/pci/controller/pcie-xilinx.c1
-rw-r--r--drivers/pci/endpoint/pci-epf-core.c62
-rw-r--r--drivers/pci/hotplug/acpi_pcihp.c10
-rw-r--r--drivers/pci/iov.c16
-rw-r--r--drivers/pci/of.c2
-rw-r--r--drivers/pci/pci-acpi.c12
-rw-r--r--drivers/pci/pci-driver.c1
-rw-r--r--drivers/pci/pci.c38
-rw-r--r--drivers/pci/pci.h4
-rw-r--r--drivers/pci/pcie/err.c2
-rw-r--r--drivers/perf/xgene_pmu.c2
-rw-r--r--drivers/phy/broadcom/phy-brcm-usb-init.c4
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c4
-rw-r--r--drivers/pinctrl/actions/pinctrl-owl.c2
-rw-r--r--drivers/pinctrl/bcm/pinctrl-nsp-mux.c6
-rw-r--r--drivers/pinctrl/devicetree.c7
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mt7622.c54
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c5
-rw-r--r--drivers/pinctrl/pinctrl-ingenic.c2
-rw-r--r--drivers/pinctrl/pinctrl-single.c14
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a77970.c14
-rw-r--r--drivers/platform/x86/dell-laptop.c2
-rw-r--r--drivers/ptp/ptp_chardev.c5
-rw-r--r--drivers/ptp/ptp_qoriq.c2
-rw-r--r--drivers/rtc/interface.c8
-rw-r--r--drivers/rtc/rtc-mrst.c4
-rw-r--r--drivers/s390/block/dasd.c184
-rw-r--r--drivers/s390/block/dasd_alias.c6
-rw-r--r--drivers/s390/block/dasd_diag.c3
-rw-r--r--drivers/s390/block/dasd_eckd.c46
-rw-r--r--drivers/s390/block/dasd_eer.c10
-rw-r--r--drivers/s390/block/dasd_fba.c6
-rw-r--r--drivers/s390/block/dasd_int.h34
-rw-r--r--drivers/s390/cio/Makefile1
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c140
-rw-r--r--drivers/s390/cio/vfio_ccw_drv.c5
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c17
-rw-r--r--drivers/s390/cio/vfio_ccw_trace.h54
-rw-r--r--drivers/s390/net/qeth_core.h13
-rw-r--r--drivers/s390/net/qeth_core_main.c47
-rw-r--r--drivers/s390/net/qeth_l2_main.c24
-rw-r--r--drivers/s390/net/qeth_l3_main.c3
-rw-r--r--drivers/scsi/aacraid/aachba.c15
-rw-r--r--drivers/scsi/cxlflash/main.h4
-rw-r--r--drivers/scsi/cxlflash/ocxl_hw.c5
-rw-r--r--drivers/scsi/hpsa.c25
-rw-r--r--drivers/scsi/hpsa.h1
-rw-r--r--drivers/scsi/ipr.c2
-rw-r--r--drivers/scsi/qedf/qedf_main.c12
-rw-r--r--drivers/scsi/qedi/qedi_main.c11
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h2
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c40
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c14
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c7
-rw-r--r--drivers/scsi/scsi_debug.c2
-rw-r--r--drivers/scsi/scsi_error.c14
-rw-r--r--drivers/scsi/scsi_transport_fc.c2
-rw-r--r--drivers/scsi/sd_zbc.c5
-rw-r--r--drivers/scsi/sg.c42
-rw-r--r--drivers/scsi/xen-scsifront.c33
-rw-r--r--drivers/soc/imx/gpc.c21
-rw-r--r--drivers/soc/imx/gpcv2.c13
-rw-r--r--drivers/soc/qcom/Kconfig3
-rw-r--r--drivers/soc/renesas/rcar-sysc.c35
-rw-r--r--drivers/staging/android/ion/ion_heap.c2
-rw-r--r--drivers/staging/comedi/drivers/quatech_daqp_cs.c2
-rw-r--r--drivers/staging/ks7010/ks_hostif.c12
-rw-r--r--drivers/staging/media/omap4iss/iss_video.c3
-rw-r--r--drivers/staging/rtl8188eu/Kconfig1
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c161
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c92
-rw-r--r--drivers/staging/rtl8723bs/core/rtw_ap.c2
-rw-r--r--drivers/staging/rtlwifi/rtl8822be/hw.c2
-rw-r--r--drivers/staging/rtlwifi/wifi.h1
-rw-r--r--drivers/staging/speakup/speakup_soft.c6
-rw-r--r--drivers/staging/typec/Kconfig1
-rw-r--r--drivers/target/target_core_pr.c15
-rw-r--r--drivers/target/target_core_user.c44
-rw-r--r--drivers/thunderbolt/domain.c4
-rw-r--r--drivers/tty/n_tty.c55
-rw-r--r--drivers/tty/serdev/core.c1
-rw-r--r--drivers/tty/serial/8250/8250_pci.c2
-rw-r--r--drivers/tty/vt/vt.c4
-rw-r--r--drivers/uio/uio.c139
-rw-r--r--drivers/usb/chipidea/Kconfig9
-rw-r--r--drivers/usb/chipidea/Makefile3
-rw-r--r--drivers/usb/chipidea/ci.h8
-rw-r--r--drivers/usb/chipidea/host.c5
-rw-r--r--drivers/usb/chipidea/ulpi.c3
-rw-r--r--drivers/usb/class/cdc-acm.c6
-rw-r--r--drivers/usb/core/hub.c8
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc2/core.h3
-rw-r--r--drivers/usb/dwc2/gadget.c26
-rw-r--r--drivers/usb/dwc2/hcd.c147
-rw-r--r--drivers/usb/dwc2/hcd.h8
-rw-r--r--drivers/usb/dwc2/hcd_intr.c20
-rw-r--r--drivers/usb/dwc2/hcd_queue.c5
-rw-r--r--drivers/usb/dwc3/core.c23
-rw-r--r--drivers/usb/dwc3/dwc3-of-simple.c3
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c13
-rw-r--r--drivers/usb/dwc3/ep0.c3
-rw-r--r--drivers/usb/gadget/composite.c4
-rw-r--r--drivers/usb/gadget/function/f_fs.c28
-rw-r--r--drivers/usb/gadget/function/f_uac2.c24
-rw-r--r--drivers/usb/gadget/function/u_audio.c88
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/ep0.c11
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c14
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/vhub.h33
-rw-r--r--drivers/usb/gadget/udc/r8a66597-udc.c6
-rw-r--r--drivers/usb/host/xhci-dbgcap.c12
-rw-r--r--drivers/usb/host/xhci-mem.c6
-rw-r--r--drivers/usb/host/xhci-tegra.c6
-rw-r--r--drivers/usb/host/xhci-trace.h36
-rw-r--r--drivers/usb/host/xhci.c48
-rw-r--r--drivers/usb/host/xhci.h4
-rw-r--r--drivers/usb/misc/yurex.c23
-rw-r--r--drivers/usb/phy/phy-fsl-usb.c4
-rw-r--r--drivers/usb/serial/ch341.c2
-rw-r--r--drivers/usb/serial/cp210x.c15
-rw-r--r--drivers/usb/serial/keyspan_pda.c4
-rw-r--r--drivers/usb/serial/mos7840.c3
-rw-r--r--drivers/usb/typec/tcpm.c17
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c13
-rw-r--r--drivers/usb/typec/ucsi/ucsi_acpi.c5
-rw-r--r--drivers/vfio/pci/Kconfig12
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c10
-rw-r--r--drivers/vfio/vfio_iommu_type1.c16
-rw-r--r--drivers/vhost/net.c3
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/grant-table.c4
-rw-r--r--drivers/xen/manage.c18
-rw-r--r--drivers/xen/privcmd-buf.c210
-rw-r--r--drivers/xen/privcmd.c9
-rw-r--r--drivers/xen/privcmd.h3
-rw-r--r--drivers/xen/xen-scsiback.c16
-rw-r--r--fs/aio.c153
-rw-r--r--fs/autofs/Makefile4
-rw-r--r--fs/autofs/dev-ioctl.c22
-rw-r--r--fs/autofs/init.c2
-rw-r--r--fs/binfmt_elf.c5
-rw-r--r--fs/block_dev.c9
-rw-r--r--fs/btrfs/extent_io.c12
-rw-r--r--fs/btrfs/inode.c7
-rw-r--r--fs/btrfs/ioctl.c12
-rw-r--r--fs/btrfs/qgroup.c17
-rw-r--r--fs/btrfs/scrub.c17
-rw-r--r--fs/btrfs/volumes.c2
-rw-r--r--fs/cachefiles/bind.c3
-rw-r--r--fs/cachefiles/namei.c3
-rw-r--r--fs/cachefiles/rdwr.c17
-rw-r--r--fs/ceph/inode.c1
-rw-r--r--fs/cifs/cifs_debug.c29
-rw-r--r--fs/cifs/cifsencrypt.c20
-rw-r--r--fs/cifs/cifsglob.h43
-rw-r--r--fs/cifs/cifsproto.h8
-rw-r--r--fs/cifs/cifssmb.c18
-rw-r--r--fs/cifs/connect.c16
-rw-r--r--fs/cifs/inode.c13
-rw-r--r--fs/cifs/misc.c9
-rw-r--r--fs/cifs/smb1ops.c1
-rw-r--r--fs/cifs/smb2file.c11
-rw-r--r--fs/cifs/smb2misc.c19
-rw-r--r--fs/cifs/smb2ops.c289
-rw-r--r--fs/cifs/smb2pdu.c365
-rw-r--r--fs/cifs/smb2pdu.h29
-rw-r--r--fs/cifs/smb2proto.h6
-rw-r--r--fs/cifs/smb2transport.c74
-rw-r--r--fs/cifs/smbdirect.c22
-rw-r--r--fs/cifs/smbdirect.h4
-rw-r--r--fs/cifs/trace.h3
-rw-r--r--fs/cifs/transport.c194
-rw-r--r--fs/eventfd.c19
-rw-r--r--fs/eventpoll.c15
-rw-r--r--fs/exec.c7
-rw-r--r--fs/ext2/ext2.h2
-rw-r--r--fs/ext2/super.c6
-rw-r--r--fs/ext4/balloc.c24
-rw-r--r--fs/ext4/ext4.h9
-rw-r--r--fs/ext4/ext4_extents.h1
-rw-r--r--fs/ext4/extents.c6
-rw-r--r--fs/ext4/ialloc.c22
-rw-r--r--fs/ext4/inline.c58
-rw-r--r--fs/ext4/inode.c23
-rw-r--r--fs/ext4/mballoc.c6
-rw-r--r--fs/ext4/mmp.c7
-rw-r--r--fs/ext4/super.c96
-rw-r--r--fs/ext4/xattr.c40
-rw-r--r--fs/fat/inode.c20
-rw-r--r--fs/fscache/cache.c2
-rw-r--r--fs/fscache/cookie.c7
-rw-r--r--fs/fscache/object.c1
-rw-r--r--fs/fscache/operation.c6
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/inode.c6
-rw-r--r--fs/internal.h1
-rw-r--r--fs/jbd2/transaction.c9
-rw-r--r--fs/jfs/xattr.c10
-rw-r--r--fs/nfs/delegation.c4
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c21
-rw-r--r--fs/nfs/nfs4proc.c33
-rw-r--r--fs/nfs/pnfs.h5
-rw-r--r--fs/pipe.c22
-rw-r--r--fs/proc/base.c28
-rw-r--r--fs/proc/generic.c11
-rw-r--r--fs/proc/task_mmu.c3
-rw-r--r--fs/quota/dquot.c7
-rw-r--r--fs/reiserfs/prints.c141
-rw-r--r--fs/select.c23
-rw-r--r--fs/squashfs/cache.c3
-rw-r--r--fs/squashfs/file.c8
-rw-r--r--fs/squashfs/fragment.c4
-rw-r--r--fs/squashfs/squashfs_fs.h6
-rw-r--r--fs/timerfd.c22
-rw-r--r--fs/udf/balloc.c5
-rw-r--r--fs/udf/directory.c8
-rw-r--r--fs/udf/inode.c8
-rw-r--r--fs/udf/namei.c14
-rw-r--r--fs/udf/udfdecl.h9
-rw-r--r--fs/userfaultfd.c12
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c31
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c5
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c26
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h2
-rw-r--r--fs/xfs/libxfs/xfs_format.h5
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c82
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c4
-rw-r--r--fs/xfs/xfs_bmap_util.c106
-rw-r--r--fs/xfs/xfs_fsmap.c4
-rw-r--r--fs/xfs/xfs_fsops.c2
-rw-r--r--fs/xfs/xfs_inode.c57
-rw-r--r--fs/xfs/xfs_iomap.c15
-rw-r--r--fs/xfs/xfs_trans.c7
-rw-r--r--include/acpi/processor.h3
-rw-r--r--include/asm-generic/qspinlock_types.h2
-rw-r--r--include/asm-generic/tlb.h8
-rw-r--r--include/crypto/if_alg.h3
-rw-r--r--include/dt-bindings/clock/imx6ul-clock.h40
-rw-r--r--include/linux/acpi.h3
-rw-r--r--include/linux/atmdev.h15
-rw-r--r--include/linux/backing-dev-defs.h2
-rw-r--r--include/linux/blk-mq.h14
-rw-r--r--include/linux/blkdev.h4
-rw-r--r--include/linux/bpf-cgroup.h27
-rw-r--r--include/linux/bpf.h20
-rw-r--r--include/linux/bpf_lirc.h5
-rw-r--r--include/linux/bpfilter.h6
-rw-r--r--include/linux/compat.h8
-rw-r--r--include/linux/compiler-gcc.h54
-rw-r--r--include/linux/compiler_types.h18
-rw-r--r--include/linux/dax.h2
-rw-r--r--include/linux/delayacct.h2
-rw-r--r--include/linux/dma-contiguous.h2
-rw-r--r--include/linux/eventfd.h1
-rw-r--r--include/linux/filter.h61
-rw-r--r--include/linux/fs.h3
-rw-r--r--include/linux/fsl/guts.h1
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--include/linux/hid.h3
-rw-r--r--include/linux/if_bridge.h4
-rw-r--r--include/linux/igmp.h2
-rw-r--r--include/linux/iio/buffer-dma.h2
-rw-r--r--include/linux/input/mt.h2
-rw-r--r--include/linux/intel-iommu.h1
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/irqdesc.h5
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/libata.h24
-rw-r--r--include/linux/marvell_phy.h2
-rw-r--r--include/linux/memory.h1
-rw-r--r--include/linux/mlx5/driver.h18
-rw-r--r--include/linux/mlx5/eswitch.h2
-rw-r--r--include/linux/mlx5/mlx5_ifc.h2
-rw-r--r--include/linux/mm.h20
-rw-r--r--include/linux/mod_devicetable.h1
-rw-r--r--include/linux/net.h1
-rw-r--r--include/linux/netdevice.h20
-rw-r--r--include/linux/nfs_xdr.h2
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/pm_domain.h6
-rw-r--r--include/linux/poll.h12
-rw-r--r--include/linux/refcount.h4
-rw-r--r--include/linux/ring_buffer.h1
-rw-r--r--include/linux/rmi.h2
-rw-r--r--include/linux/scatterlist.h18
-rw-r--r--include/linux/sched.h25
-rw-r--r--include/linux/sched/task.h2
-rw-r--r--include/linux/skbuff.h13
-rw-r--r--include/linux/slub_def.h4
-rw-r--r--include/linux/spinlock.h5
-rw-r--r--include/linux/syscalls.h5
-rw-r--r--include/linux/uio_driver.h2
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/cfg80211.h12
-rw-r--r--include/net/ip6_fib.h10
-rw-r--r--include/net/ip6_route.h6
-rw-r--r--include/net/ipv6.h13
-rw-r--r--include/net/iucv/af_iucv.h2
-rw-r--r--include/net/net_namespace.h1
-rw-r--r--include/net/netfilter/nf_tables.h5
-rw-r--r--include/net/netfilter/nf_tables_core.h6
-rw-r--r--include/net/netfilter/nf_tproxy.h4
-rw-r--r--include/net/netns/ipv6.h1
-rw-r--r--include/net/pkt_cls.h5
-rw-r--r--include/net/sctp/sctp.h3
-rw-r--r--include/net/tc_act/tc_csum.h1
-rw-r--r--include/net/tc_act/tc_tunnel_key.h1
-rw-r--r--include/net/tcp.h16
-rw-r--r--include/net/tls.h6
-rw-r--r--include/net/udp.h2
-rw-r--r--include/net/xdp_sock.h4
-rw-r--r--include/rdma/ib_verbs.h13
-rw-r--r--include/uapi/linux/aio_abi.h12
-rw-r--r--include/uapi/linux/bpf.h28
-rw-r--r--include/uapi/linux/btf.h2
-rw-r--r--include/uapi/linux/ethtool.h2
-rw-r--r--include/uapi/linux/nbd.h3
-rw-r--r--include/uapi/linux/rseq.h102
-rw-r--r--include/uapi/linux/target_core_user.h4
-rw-r--r--include/uapi/linux/tcp.h4
-rw-r--r--include/uapi/linux/types_32_64.h50
-rw-r--r--include/xen/xen.h6
-rw-r--r--init/Kconfig11
-rw-r--r--ipc/sem.c2
-rw-r--r--kernel/Makefile1
-rw-r--r--kernel/bpf/btf.c46
-rw-r--r--kernel/bpf/cgroup.c54
-rw-r--r--kernel/bpf/core.c39
-rw-r--r--kernel/bpf/devmap.c19
-rw-r--r--kernel/bpf/hashtab.c16
-rw-r--r--kernel/bpf/sockmap.c297
-rw-r--r--kernel/bpf/syscall.c115
-rw-r--r--kernel/bpf/verifier.c11
-rw-r--r--kernel/dma/Kconfig50
-rw-r--r--kernel/dma/Makefile11
-rw-r--r--kernel/dma/coherent.c (renamed from drivers/base/dma-coherent.c)0
-rw-r--r--kernel/dma/contiguous.c (renamed from drivers/base/dma-contiguous.c)0
-rw-r--r--kernel/dma/debug.c (renamed from lib/dma-debug.c)0
-rw-r--r--kernel/dma/direct.c (renamed from lib/dma-direct.c)0
-rw-r--r--kernel/dma/mapping.c (renamed from drivers/base/dma-mapping.c)2
-rw-r--r--kernel/dma/noncoherent.c (renamed from lib/dma-noncoherent.c)0
-rw-r--r--kernel/dma/swiotlb.c (renamed from lib/swiotlb.c)1
-rw-r--r--kernel/dma/virt.c (renamed from lib/dma-virt.c)2
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/events/ring_buffer.c6
-rw-r--r--kernel/fork.c33
-rw-r--r--kernel/irq/debugfs.c1
-rw-r--r--kernel/kthread.c38
-rw-r--r--kernel/locking/lockdep.c12
-rw-r--r--kernel/locking/rwsem.c1
-rw-r--r--kernel/memremap.c22
-rw-r--r--kernel/rseq.c48
-rw-r--r--kernel/sched/core.c67
-rw-r--r--kernel/sched/cpufreq_schedutil.c2
-rw-r--r--kernel/sched/deadline.c11
-rw-r--r--kernel/sched/fair.c45
-rw-r--r--kernel/sched/rt.c16
-rw-r--r--kernel/sched/sched.h11
-rw-r--r--kernel/softirq.c18
-rw-r--r--kernel/stop_machine.c6
-rw-r--r--kernel/time/hrtimer.c2
-rw-r--r--kernel/time/posix-cpu-timers.c2
-rw-r--r--kernel/time/tick-common.c3
-rw-r--r--kernel/time/time.c6
-rw-r--r--kernel/trace/ftrace.c13
-rw-r--r--kernel/trace/ring_buffer.c16
-rw-r--r--kernel/trace/trace.c25
-rw-r--r--kernel/trace/trace.h4
-rw-r--r--kernel/trace/trace_events_filter.c15
-rw-r--r--kernel/trace/trace_events_hist.c2
-rw-r--r--kernel/trace/trace_events_trigger.c18
-rw-r--r--kernel/trace/trace_functions_graph.c5
-rw-r--r--kernel/trace/trace_kprobe.c21
-rw-r--r--kernel/trace/trace_output.c5
-rw-r--r--lib/Kconfig47
-rw-r--r--lib/Kconfig.kasan3
-rw-r--r--lib/Makefile12
-rw-r--r--lib/dec_and_lock.c16
-rw-r--r--lib/iov_iter.c77
-rw-r--r--lib/percpu_ida.c2
-rw-r--r--lib/refcount.c28
-rw-r--r--lib/rhashtable.c27
-rw-r--r--lib/scatterlist.c6
-rw-r--r--lib/test_bpf.c20
-rw-r--r--lib/test_printf.c7
-rw-r--r--mm/backing-dev.c20
-rw-r--r--mm/debug.c18
-rw-r--r--mm/gup.c2
-rw-r--r--mm/huge_memory.c2
-rw-r--r--mm/hugetlb.c1
-rw-r--r--mm/kasan/kasan.c5
-rw-r--r--mm/memblock.c5
-rw-r--r--mm/memcontrol.c2
-rw-r--r--mm/mempolicy.c1
-rw-r--r--mm/mmap.c67
-rw-r--r--mm/nommu.c12
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/rmap.c8
-rw-r--r--mm/shmem.c1
-rw-r--r--mm/slab_common.c4
-rw-r--r--mm/slub.c7
-rw-r--r--mm/vmstat.c2
-rw-r--r--mm/zswap.c9
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/9p/client.c3
-rw-r--r--net/Makefile4
-rw-r--r--net/appletalk/ddp.c2
-rw-r--r--net/atm/br2684.c3
-rw-r--r--net/atm/clip.c3
-rw-r--r--net/atm/common.c14
-rw-r--r--net/atm/common.h2
-rw-r--r--net/atm/lec.c3
-rw-r--r--net/atm/mpc.c3
-rw-r--r--net/atm/pppoatm.c3
-rw-r--r--net/atm/pvc.c2
-rw-r--r--net/atm/raw.c4
-rw-r--r--net/atm/svc.c2
-rw-r--r--net/ax25/af_ax25.c2
-rw-r--r--net/batman-adv/bat_iv_ogm.c4
-rw-r--r--net/batman-adv/bat_v.c4
-rw-r--r--net/batman-adv/debugfs.c40
-rw-r--r--net/batman-adv/debugfs.h11
-rw-r--r--net/batman-adv/hard-interface.c37
-rw-r--r--net/batman-adv/translation-table.c7
-rw-r--r--net/bluetooth/af_bluetooth.c7
-rw-r--r--net/bluetooth/hci_sock.c2
-rw-r--r--net/bluetooth/l2cap_sock.c2
-rw-r--r--net/bluetooth/rfcomm/sock.c2
-rw-r--r--net/bluetooth/sco.c2
-rw-r--r--net/bpf/test_run.c17
-rw-r--r--net/bpfilter/.gitignore1
-rw-r--r--net/bpfilter/Kconfig2
-rw-r--r--net/bpfilter/Makefile15
-rw-r--r--net/bpfilter/bpfilter_kern.c11
-rw-r--r--net/bpfilter/bpfilter_umh_blob.S7
-rw-r--r--net/caif/caif_dev.c4
-rw-r--r--net/caif/caif_socket.c12
-rw-r--r--net/can/bcm.c2
-rw-r--r--net/can/raw.c2
-rw-r--r--net/core/datagram.c13
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/dev_ioctl.c11
-rw-r--r--net/core/fib_rules.c80
-rw-r--r--net/core/filter.c256
-rw-r--r--net/core/gen_stats.c16
-rw-r--r--net/core/page_pool.c2
-rw-r--r--net/core/rtnetlink.c9
-rw-r--r--net/core/skbuff.c14
-rw-r--r--net/core/sock.c13
-rw-r--r--net/dccp/ccids/ccid3.c16
-rw-r--r--net/dccp/dccp.h3
-rw-r--r--net/dccp/ipv4.c2
-rw-r--r--net/dccp/ipv6.c2
-rw-r--r--net/dccp/proto.c13
-rw-r--r--net/decnet/af_decnet.c6
-rw-r--r--net/dns_resolver/dns_key.c28
-rw-r--r--net/ieee802154/6lowpan/core.c6
-rw-r--r--net/ieee802154/socket.c4
-rw-r--r--net/ipv4/af_inet.c8
-rw-r--r--net/ipv4/fib_frontend.c1
-rw-r--r--net/ipv4/fou.c4
-rw-r--r--net/ipv4/gre_offload.c2
-rw-r--r--net/ipv4/igmp.c61
-rw-r--r--net/ipv4/inet_fragment.c2
-rw-r--r--net/ipv4/inet_hashtables.c4
-rw-r--r--net/ipv4/ip_output.c5
-rw-r--r--net/ipv4/ip_sockglue.c11
-rw-r--r--net/ipv4/netfilter/ip_tables.c1
-rw-r--r--net/ipv4/netfilter/nf_tproxy_ipv4.c18
-rw-r--r--net/ipv4/sysctl_net_ipv4.c23
-rw-r--r--net/ipv4/tcp.c39
-rw-r--r--net/ipv4/tcp_dctcp.c75
-rw-r--r--net/ipv4/tcp_input.c78
-rw-r--r--net/ipv4/tcp_ipv4.c23
-rw-r--r--net/ipv4/tcp_output.c36
-rw-r--r--net/ipv4/udp.c10
-rw-r--r--net/ipv4/udp_offload.c2
-rw-r--r--net/ipv6/Kconfig1
-rw-r--r--net/ipv6/addrconf.c12
-rw-r--r--net/ipv6/af_inet6.c4
-rw-r--r--net/ipv6/calipso.c9
-rw-r--r--net/ipv6/datagram.c7
-rw-r--r--net/ipv6/exthdrs.c111
-rw-r--r--net/ipv6/icmp.c5
-rw-r--r--net/ipv6/inet6_hashtables.c4
-rw-r--r--net/ipv6/ip6_fib.c161
-rw-r--r--net/ipv6/ip6_gre.c3
-rw-r--r--net/ipv6/ip6_output.c5
-rw-r--r--net/ipv6/ipv6_sockglue.c32
-rw-r--r--net/ipv6/mcast.c76
-rw-r--r--net/ipv6/ndisc.c2
-rw-r--r--net/ipv6/netfilter/ip6_tables.c1
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c8
-rw-r--r--net/ipv6/netfilter/nf_tproxy_ipv6.c18
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/route.c51
-rw-r--r--net/ipv6/seg6_hmac.c2
-rw-r--r--net/ipv6/seg6_iptunnel.c2
-rw-r--r--net/ipv6/tcp_ipv6.c6
-rw-r--r--net/iucv/af_iucv.c7
-rw-r--r--net/kcm/kcmsock.c10
-rw-r--r--net/key/af_key.c2
-rw-r--r--net/l2tp/l2tp_ip.c2
-rw-r--r--net/l2tp/l2tp_ip6.c2
-rw-r--r--net/l2tp/l2tp_ppp.c2
-rw-r--r--net/llc/af_llc.c2
-rw-r--r--net/mac80211/rx.c5
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/mac80211/util.c3
-rw-r--r--net/ncsi/ncsi-aen.c10
-rw-r--r--net/ncsi/ncsi-manage.c49
-rw-r--r--net/netfilter/Kconfig25
-rw-r--r--net/netfilter/Makefile7
-rw-r--r--net/netfilter/nf_conncount.c52
-rw-r--r--net/netfilter/nf_conntrack_core.c2
-rw-r--r--net/netfilter/nf_conntrack_helper.c5
-rw-r--r--net/netfilter/nf_conntrack_proto_dccp.c8
-rw-r--r--net/netfilter/nf_log.c13
-rw-r--r--net/netfilter/nf_tables_api.c304
-rw-r--r--net/netfilter/nf_tables_set_core.c28
-rw-r--r--net/netfilter/nfnetlink_queue.c3
-rw-r--r--net/netfilter/nft_compat.c13
-rw-r--r--net/netfilter/nft_immediate.c3
-rw-r--r--net/netfilter/nft_lookup.c13
-rw-r--r--net/netfilter/nft_set_bitmap.c19
-rw-r--r--net/netfilter/nft_set_hash.c30
-rw-r--r--net/netfilter/nft_set_rbtree.c26
-rw-r--r--net/netfilter/xt_TPROXY.c8
-rw-r--r--net/netlink/af_netlink.c2
-rw-r--r--net/netrom/af_netrom.c2
-rw-r--r--net/nfc/llcp_commands.c9
-rw-r--r--net/nfc/llcp_sock.c9
-rw-r--r--net/nfc/rawsock.c4
-rw-r--r--net/nsh/nsh.c2
-rw-r--r--net/packet/af_packet.c27
-rw-r--r--net/phonet/socket.c9
-rw-r--r--net/qrtr/qrtr.c15
-rw-r--r--net/rds/connection.c11
-rw-r--r--net/rds/loop.c56
-rw-r--r--net/rds/loop.h2
-rw-r--r--net/rose/af_rose.c2
-rw-r--r--net/rxrpc/af_rxrpc.c10
-rw-r--r--net/sched/act_csum.c6
-rw-r--r--net/sched/act_ife.c12
-rw-r--r--net/sched/act_tunnel_key.c6
-rw-r--r--net/sched/cls_api.c4
-rw-r--r--net/sched/cls_flower.c21
-rw-r--r--net/sched/sch_blackhole.c2
-rw-r--r--net/sched/sch_fq_codel.c25
-rw-r--r--net/sched/sch_hfsc.c4
-rw-r--r--net/sctp/chunk.c4
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/protocol.c2
-rw-r--r--net/sctp/socket.c4
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/smc/af_smc.c138
-rw-r--r--net/smc/smc.h8
-rw-r--r--net/smc/smc_clc.c3
-rw-r--r--net/smc/smc_close.c2
-rw-r--r--net/smc/smc_tx.c12
-rw-r--r--net/socket.c50
-rw-r--r--net/strparser/strparser.c22
-rw-r--r--net/sunrpc/xprt.c10
-rw-r--r--net/tipc/discover.c18
-rw-r--r--net/tipc/net.c17
-rw-r--r--net/tipc/node.c7
-rw-r--r--net/tipc/socket.c14
-rw-r--r--net/tls/tls_main.c2
-rw-r--r--net/tls/tls_sw.c29
-rw-r--r--net/unix/af_unix.c30
-rw-r--r--net/vmw_vsock/af_vsock.c19
-rw-r--r--net/vmw_vsock/virtio_transport.c2
-rw-r--r--net/wireless/nl80211.c60
-rw-r--r--net/wireless/reg.c28
-rw-r--r--net/wireless/trace.h18
-rw-r--r--net/x25/af_x25.c2
-rw-r--r--net/xdp/xsk.c40
-rw-r--r--net/xdp/xsk_queue.h9
-rw-r--r--samples/bpf/.gitignore49
-rw-r--r--samples/bpf/parse_varlen.c6
-rw-r--r--samples/bpf/test_overhead_user.c19
-rw-r--r--samples/bpf/trace_event_user.c27
-rwxr-xr-xsamples/bpf/xdp2skb_meta.sh6
-rw-r--r--samples/bpf/xdp_fwd_kern.c8
-rw-r--r--samples/bpf/xdpsock_user.c2
-rw-r--r--samples/vfio-mdev/mbochs.c23
-rw-r--r--scripts/Kbuild.include2
-rw-r--r--scripts/Makefile.build5
-rw-r--r--scripts/Makefile.clean3
-rw-r--r--scripts/Makefile.modbuiltin4
-rw-r--r--scripts/Makefile.modinst4
-rw-r--r--scripts/Makefile.modpost4
-rw-r--r--scripts/Makefile.modsign3
-rwxr-xr-xscripts/cc-can-link.sh2
-rwxr-xr-xscripts/checkpatch.pl12
-rwxr-xr-xscripts/extract-vmlinux2
-rwxr-xr-xscripts/gcc-x86_64-has-stack-protector.sh2
-rw-r--r--scripts/kconfig/expr.h3
-rw-r--r--scripts/kconfig/preprocess.c2
-rw-r--r--scripts/kconfig/zconf.y8
-rwxr-xr-xscripts/tags.sh3
-rw-r--r--security/keys/dh.c6
-rw-r--r--security/selinux/selinuxfs.c78
-rw-r--r--security/smack/smack_lsm.c1
-rw-r--r--sound/core/rawmidi.c20
-rw-r--r--sound/core/seq/seq_clientmgr.c3
-rw-r--r--sound/core/timer.c2
-rw-r--r--sound/pci/hda/hda_codec.c5
-rw-r--r--sound/pci/hda/hda_codec.h1
-rw-r--r--sound/pci/hda/patch_ca0132.c67
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_hdmi.c24
-rw-r--r--sound/pci/hda/patch_realtek.c26
-rw-r--r--sound/pci/lx6464es/lx6464es.c1
-rw-r--r--tools/arch/arm/include/uapi/asm/kvm.h1
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h1
-rw-r--r--tools/arch/powerpc/include/uapi/asm/kvm.h1
-rw-r--r--tools/arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h2
-rw-r--r--tools/bpf/bpftool/common.c11
-rw-r--r--tools/bpf/bpftool/perf.c5
-rw-r--r--tools/bpf/bpftool/prog.c16
-rw-r--r--tools/build/Build.include6
-rw-r--r--tools/build/Makefile2
-rw-r--r--tools/include/uapi/drm/drm.h7
-rw-r--r--tools/include/uapi/linux/bpf.h2
-rw-r--r--tools/include/uapi/linux/if_link.h2
-rw-r--r--tools/include/uapi/linux/kvm.h1
-rw-r--r--tools/objtool/check.c1
-rw-r--r--tools/objtool/elf.c47
-rw-r--r--tools/perf/Documentation/perf-stat.txt3
-rw-r--r--tools/perf/Makefile.config3
-rw-r--r--tools/perf/arch/powerpc/util/skip-callchain-idx.c2
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl2
-rw-r--r--tools/perf/arch/x86/util/perf_regs.c2
-rw-r--r--tools/perf/bench/numa.c5
-rw-r--r--tools/perf/builtin-annotate.c11
-rw-r--r--tools/perf/builtin-c2c.c10
-rw-r--r--tools/perf/builtin-report.c3
-rw-r--r--tools/perf/builtin-script.c42
-rw-r--r--tools/perf/builtin-stat.c48
-rw-r--r--tools/perf/jvmti/jvmti_agent.c3
-rw-r--r--tools/perf/pmu-events/Build2
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py40
-rwxr-xr-xtools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py4
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py2
-rw-r--r--tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py11
-rw-r--r--tools/perf/scripts/python/sched-migration.py14
-rw-r--r--tools/perf/tests/builtin-test.c2
-rw-r--r--tools/perf/tests/parse-events.c25
-rwxr-xr-xtools/perf/tests/shell/record+probe_libc_inet_pton.sh37
-rwxr-xr-xtools/perf/tests/shell/trace+probe_vfs_getname.sh2
-rw-r--r--tools/perf/tests/topology.c1
-rw-r--r--tools/perf/ui/gtk/hists.c2
-rw-r--r--tools/perf/util/c++/clang.cpp11
-rw-r--r--tools/perf/util/header.c12
-rw-r--r--tools/perf/util/hist.c12
-rw-r--r--tools/perf/util/hist.h4
-rw-r--r--tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c2
-rw-r--r--tools/perf/util/llvm-utils.c6
-rw-r--r--tools/perf/util/parse-events.y5
-rw-r--r--tools/perf/util/pmu.c99
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c37
-rw-r--r--tools/perf/util/sort.h4
-rw-r--r--tools/power/x86/turbostat/turbostat.82
-rw-r--r--tools/power/x86/turbostat/turbostat.c240
-rw-r--r--tools/testing/nvdimm/test/nfit.c3
-rw-r--r--tools/testing/selftests/bpf/Makefile2
-rw-r--r--tools/testing/selftests/bpf/config11
-rwxr-xr-xtools/testing/selftests/bpf/test_kmod.sh9
-rwxr-xr-xtools/testing/selftests/bpf/test_lirc_mode2.sh9
-rwxr-xr-xtools/testing/selftests/bpf/test_lwt_seg6local.sh9
-rwxr-xr-xtools/testing/selftests/bpf/test_offload.py12
-rw-r--r--tools/testing/selftests/bpf/test_sockmap.c6
-rwxr-xr-xtools/testing/selftests/bpf/test_tunnel.sh26
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c63
-rw-r--r--tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc28
-rw-r--r--tools/testing/selftests/net/.gitignore1
-rw-r--r--tools/testing/selftests/net/config2
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/net/fib_tests.sh41
-rwxr-xr-xtools/testing/selftests/net/udpgso_bench.sh3
-rwxr-xr-xtools/testing/selftests/pstore/pstore_post_reboot_tests5
-rw-r--r--tools/testing/selftests/rseq/param_test.c24
-rw-r--r--tools/testing/selftests/rseq/rseq-arm.h1
-rw-r--r--tools/testing/selftests/rseq/rseq-mips.h725
-rw-r--r--tools/testing/selftests/rseq/rseq.h26
-rwxr-xr-x[-rw-r--r--]tools/testing/selftests/rseq/run_param_test.sh0
-rw-r--r--tools/testing/selftests/sparc64/Makefile20
-rw-r--r--tools/testing/selftests/sparc64/drivers/Makefile2
-rwxr-xr-xtools/testing/selftests/static_keys/test_static_keys.sh13
-rw-r--r--tools/testing/selftests/sync/config4
-rwxr-xr-xtools/testing/selftests/sysctl/sysctl.sh20
-rwxr-xr-xtools/testing/selftests/user/test_user_copy.sh7
-rw-r--r--tools/testing/selftests/vm/compaction_test.c4
-rw-r--r--tools/testing/selftests/vm/mlock2-tests.c12
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests5
-rw-r--r--tools/testing/selftests/vm/userfaultfd.c4
-rw-r--r--tools/testing/selftests/x86/sigreturn.c59
-rwxr-xr-xtools/testing/selftests/zram/zram.sh5
-rwxr-xr-xtools/testing/selftests/zram/zram_lib.sh5
-rw-r--r--tools/usb/ffs-test.c19
-rw-r--r--tools/virtio/linux/scatterlist.h18
-rw-r--r--virt/kvm/Kconfig2
-rw-r--r--virt/kvm/arm/mmu.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c5
-rw-r--r--virt/kvm/eventfd.c17
-rw-r--r--virt/kvm/kvm_main.c19
1504 files changed, 16001 insertions, 8362 deletions
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index 9a08a7aa2bc3..1370b424a453 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -4924,3 +4924,8 @@
xirc2ps_cs= [NET,PCMCIA]
Format:
<irq>,<irq_mask>,<io>,<full_duplex>,<do_sound>,<lockup_hack>[,<irq2>[,<irq3>[,<irq4>]]]
+
+ xhci-hcd.quirks [USB,KNL]
+ A hex value specifying bitmask with supplemental xhci
+ host controller quirks. Meaning of each bit can be
+ consulted in header drivers/usb/host/xhci.h.
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst
index ab2fe0eda1d7..8f1d3de449b5 100644
--- a/Documentation/admin-guide/pm/intel_pstate.rst
+++ b/Documentation/admin-guide/pm/intel_pstate.rst
@@ -324,8 +324,7 @@ Global Attributes
``intel_pstate`` exposes several global attributes (files) in ``sysfs`` to
control its functionality at the system level. They are located in the
-``/sys/devices/system/cpu/cpufreq/intel_pstate/`` directory and affect all
-CPUs.
+``/sys/devices/system/cpu/intel_pstate/`` directory and affect all CPUs.
Some of them are not present if the ``intel_pstate=per_cpu_perf_limits``
argument is passed to the kernel in the command line.
@@ -379,6 +378,17 @@ argument is passed to the kernel in the command line.
but it affects the maximum possible value of per-policy P-state limits
(see `Interpretation of Policy Attributes`_ below for details).
+``hwp_dynamic_boost``
+ This attribute is only present if ``intel_pstate`` works in the
+ `active mode with the HWP feature enabled <Active Mode With HWP_>`_ in
+ the processor. If set (equal to 1), it causes the minimum P-state limit
+ to be increased dynamically for a short time whenever a task previously
+ waiting on I/O is selected to run on a given logical CPU (the purpose
+ of this mechanism is to improve performance).
+
+ This setting has no effect on logical CPUs whose minimum P-state limit
+ is directly set to the highest non-turbo P-state or above it.
+
.. _status_attr:
``status``
@@ -410,7 +420,7 @@ argument is passed to the kernel in the command line.
That only is supported in some configurations, though (for example, if
the `HWP feature is enabled in the processor <Active Mode With HWP_>`_,
the operation mode of the driver cannot be changed), and if it is not
- supported in the current configuration, writes to this attribute with
+ supported in the current configuration, writes to this attribute will
fail with an appropriate error.
Interpretation of Policy Attributes
diff --git a/Documentation/core-api/kernel-api.rst b/Documentation/core-api/kernel-api.rst
index 8e44aea366c2..76fe2d0f5e7d 100644
--- a/Documentation/core-api/kernel-api.rst
+++ b/Documentation/core-api/kernel-api.rst
@@ -284,7 +284,7 @@ Resources Management
MTRR Handling
-------------
-.. kernel-doc:: arch/x86/kernel/cpu/mtrr/main.c
+.. kernel-doc:: arch/x86/kernel/cpu/mtrr/mtrr.c
:export:
Security Framework
diff --git a/Documentation/device-mapper/writecache.txt b/Documentation/device-mapper/writecache.txt
index 4424fa2c67d7..01532b3008ae 100644
--- a/Documentation/device-mapper/writecache.txt
+++ b/Documentation/device-mapper/writecache.txt
@@ -15,6 +15,8 @@ Constructor parameters:
size)
5. the number of optional parameters (the parameters with an argument
count as two)
+ start_sector n (default: 0)
+ offset from the start of cache device in 512-byte sectors
high_watermark n (default: 50)
start writeback when the number of used blocks reach this
watermark
diff --git a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
index bdadc3da9556..6970f30a3770 100644
--- a/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
+++ b/Documentation/devicetree/bindings/arm/samsung/samsung-boards.txt
@@ -66,7 +66,7 @@ Required root node properties:
- "insignal,arndale-octa" - for Exynos5420-based Insignal Arndale
Octa board.
- "insignal,origen" - for Exynos4210-based Insignal Origen board.
- - "insignal,origen4412 - for Exynos4412-based Insignal Origen board.
+ - "insignal,origen4412" - for Exynos4412-based Insignal Origen board.
Optional nodes:
diff --git a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
index 6fddb4f4f71a..3055d5c2c04e 100644
--- a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
+++ b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
@@ -36,7 +36,7 @@ Optional nodes:
- port/ports: to describe a connection to an external encoder. The
binding follows Documentation/devicetree/bindings/graph.txt and
- suppors a single port with a single endpoint.
+ supports a single port with a single endpoint.
- See also Documentation/devicetree/bindings/display/tilcdc/panel.txt and
Documentation/devicetree/bindings/display/tilcdc/tfp410.txt for connecting
diff --git a/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt b/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt
index 20fc72d9e61e..45a61b462287 100644
--- a/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt
+++ b/Documentation/devicetree/bindings/gpio/nintendo,hollywood-gpio.txt
@@ -1,7 +1,7 @@
Nintendo Wii (Hollywood) GPIO controller
Required properties:
-- compatible: "nintendo,hollywood-gpio
+- compatible: "nintendo,hollywood-gpio"
- reg: Physical base address and length of the controller's registers.
- gpio-controller: Marks the device node as a GPIO controller.
- #gpio-cells: Should be <2>. The first cell is the pin number and the
diff --git a/Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt b/Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt
new file mode 100644
index 000000000000..f2ec0d4f2dff
--- /dev/null
+++ b/Documentation/devicetree/bindings/input/sprd,sc27xx-vibra.txt
@@ -0,0 +1,23 @@
+Spreadtrum SC27xx PMIC Vibrator
+
+Required properties:
+- compatible: should be "sprd,sc2731-vibrator".
+- reg: address of vibrator control register.
+
+Example :
+
+ sc2731_pmic: pmic@0 {
+ compatible = "sprd,sc2731";
+ reg = <0>;
+ spi-max-frequency = <26000000>;
+ interrupts = <GIC_SPI 31 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ vibrator@eb4 {
+ compatible = "sprd,sc2731-vibrator";
+ reg = <0xeb4>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/input/touchscreen/hideep.txt b/Documentation/devicetree/bindings/input/touchscreen/hideep.txt
index 121d9b7c79a2..1063c30d53f7 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/hideep.txt
+++ b/Documentation/devicetree/bindings/input/touchscreen/hideep.txt
@@ -32,7 +32,7 @@ i2c@00000000 {
reg = <0x6c>;
interrupt-parent = <&gpx1>;
interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
- vdd-supply = <&ldo15_reg>";
+ vdd-supply = <&ldo15_reg>;
vid-supply = <&ldo18_reg>;
reset-gpios = <&gpx1 5 0>;
touchscreen-size-x = <1080>;
diff --git a/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt b/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt
index 1099fe0788fa..f246ccbf8838 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/nvidia,tegra20-ictlr.txt
@@ -15,7 +15,7 @@ Required properties:
include "nvidia,tegra30-ictlr".
- reg : Specifies base physical address and size of the registers.
Each controller must be described separately (Tegra20 has 4 of them,
- whereas Tegra30 and later have 5"
+ whereas Tegra30 and later have 5).
- interrupt-controller : Identifies the node as an interrupt controller.
- #interrupt-cells : Specifies the number of cells needed to encode an
interrupt source. The value must be 3.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
index 136bd612bd83..6a36bf66d932 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/st,stm32-exti.txt
@@ -12,7 +12,7 @@ Required properties:
specifier, shall be 2
- interrupts: interrupts references to primary interrupt controller
(only needed for exti controller with multiple exti under
- same parent interrupt: st,stm32-exti and st,stm32h7-exti")
+ same parent interrupt: st,stm32-exti and st,stm32h7-exti)
Example:
diff --git a/Documentation/devicetree/bindings/mips/brcm/soc.txt b/Documentation/devicetree/bindings/mips/brcm/soc.txt
index 356c29789cf5..3a66d3c483e1 100644
--- a/Documentation/devicetree/bindings/mips/brcm/soc.txt
+++ b/Documentation/devicetree/bindings/mips/brcm/soc.txt
@@ -152,7 +152,7 @@ Required properties:
- compatible : should contain one of:
"brcm,bcm7425-timers"
"brcm,bcm7429-timers"
- "brcm,bcm7435-timers and
+ "brcm,bcm7435-timers" and
"brcm,brcmstb-timers"
- reg : the timers register range
- interrupts : the interrupt line for this timer block
diff --git a/Documentation/devicetree/bindings/net/fsl-fman.txt b/Documentation/devicetree/bindings/net/fsl-fman.txt
index df873d1f3b7c..f8c33890bc29 100644
--- a/Documentation/devicetree/bindings/net/fsl-fman.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fman.txt
@@ -238,7 +238,7 @@ PROPERTIES
Must include one of the following:
- "fsl,fman-dtsec" for dTSEC MAC
- "fsl,fman-xgec" for XGEC MAC
- - "fsl,fman-memac for mEMAC MAC
+ - "fsl,fman-memac" for mEMAC MAC
- cell-index
Usage: required
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt
index 9b387f861aed..7dec508987c7 100644
--- a/Documentation/devicetree/bindings/power/power_domain.txt
+++ b/Documentation/devicetree/bindings/power/power_domain.txt
@@ -133,7 +133,7 @@ located inside a PM domain with index 0 of a power controller represented by a
node with the label "power".
In the second example the consumer device are partitioned across two PM domains,
the first with index 0 and the second with index 1, of a power controller that
-is represented by a node with the label "power.
+is represented by a node with the label "power".
Optional properties:
- required-opps: This contains phandle to an OPP node in another device's OPP
diff --git a/Documentation/devicetree/bindings/regulator/tps65090.txt b/Documentation/devicetree/bindings/regulator/tps65090.txt
index ca69f5e3040c..ae326f263597 100644
--- a/Documentation/devicetree/bindings/regulator/tps65090.txt
+++ b/Documentation/devicetree/bindings/regulator/tps65090.txt
@@ -16,7 +16,7 @@ Required properties:
Optional properties:
- ti,enable-ext-control: This is applicable for DCDC1, DCDC2 and DCDC3.
If DCDCs are externally controlled then this property should be there.
-- "dcdc-ext-control-gpios: This is applicable for DCDC1, DCDC2 and DCDC3.
+- dcdc-ext-control-gpios: This is applicable for DCDC1, DCDC2 and DCDC3.
If DCDCs are externally controlled and if it is from GPIO then GPIO
number should be provided. If it is externally controlled and no GPIO
entry then driver will just configure this rails as external control
diff --git a/Documentation/devicetree/bindings/reset/st,sti-softreset.txt b/Documentation/devicetree/bindings/reset/st,sti-softreset.txt
index a21658f18fe6..3661e6153a92 100644
--- a/Documentation/devicetree/bindings/reset/st,sti-softreset.txt
+++ b/Documentation/devicetree/bindings/reset/st,sti-softreset.txt
@@ -15,7 +15,7 @@ Please refer to reset.txt in this directory for common reset
controller binding usage.
Required properties:
-- compatible: Should be st,stih407-softreset";
+- compatible: Should be "st,stih407-softreset";
- #reset-cells: 1, see below
example:
diff --git a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt
index d330c73de9a2..68b7d6207e3d 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qcom,geni-se.txt
@@ -39,7 +39,7 @@ Required properties:
Optional property:
- clock-frequency: Desired I2C bus clock frequency in Hz.
- When missing default to 400000Hz.
+ When missing default to 100000Hz.
Child nodes should conform to I2C bus binding as described in i2c.txt.
diff --git a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
index 6a4aadc4ce06..84b28dbe9f15 100644
--- a/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,apq8016-sbc.txt
@@ -30,7 +30,7 @@ Required properties:
Board connectors:
* Headset Mic
- * Secondary Mic",
+ * Secondary Mic
* DMIC
* Ext Spk
diff --git a/Documentation/devicetree/bindings/sound/qcom,apq8096.txt b/Documentation/devicetree/bindings/sound/qcom,apq8096.txt
index aa54e49fc8a2..c7600a93ab39 100644
--- a/Documentation/devicetree/bindings/sound/qcom,apq8096.txt
+++ b/Documentation/devicetree/bindings/sound/qcom,apq8096.txt
@@ -35,7 +35,7 @@ This binding describes the APQ8096 sound card, which uses qdsp for audio.
"Digital Mic3"
Audio pins and MicBias on WCD9335 Codec:
- "MIC_BIAS1
+ "MIC_BIAS1"
"MIC_BIAS2"
"MIC_BIAS3"
"MIC_BIAS4"
diff --git a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
index 252a05c5d976..c8c4b00ecb94 100644
--- a/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
+++ b/Documentation/devicetree/bindings/usb/rockchip,dwc3.txt
@@ -16,7 +16,8 @@ A child node must exist to represent the core DWC3 IP block. The name of
the node is not important. The content of the node is defined in dwc3.txt.
Phy documentation is provided in the following places:
-Documentation/devicetree/bindings/phy/qcom-dwc3-usb-phy.txt
+Documentation/devicetree/bindings/phy/phy-rockchip-inno-usb2.txt - USB2.0 PHY
+Documentation/devicetree/bindings/phy/phy-rockchip-typec.txt - Type-C PHY
Example device nodes:
diff --git a/Documentation/devicetree/bindings/w1/w1-gpio.txt b/Documentation/devicetree/bindings/w1/w1-gpio.txt
index 6e09c35d9f1a..37091902a021 100644
--- a/Documentation/devicetree/bindings/w1/w1-gpio.txt
+++ b/Documentation/devicetree/bindings/w1/w1-gpio.txt
@@ -15,7 +15,7 @@ Optional properties:
Examples:
- onewire@0 {
+ onewire {
compatible = "w1-gpio";
gpios = <&gpio 126 0>, <&gpio 105 0>;
};
diff --git a/Documentation/driver-api/infrastructure.rst b/Documentation/driver-api/infrastructure.rst
index bee1b9a1702f..6172f3cc3d0b 100644
--- a/Documentation/driver-api/infrastructure.rst
+++ b/Documentation/driver-api/infrastructure.rst
@@ -49,10 +49,10 @@ Device Drivers Base
Device Drivers DMA Management
-----------------------------
-.. kernel-doc:: drivers/base/dma-coherent.c
+.. kernel-doc:: kernel/dma/coherent.c
:export:
-.. kernel-doc:: drivers/base/dma-mapping.c
+.. kernel-doc:: kernel/dma/mapping.c
:export:
Device drivers PnP support
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 2c391338c675..37bf0a9de75c 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -441,8 +441,6 @@ prototypes:
int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
- struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
- __poll_t (*poll_mask) (struct file *, __poll_t);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
@@ -473,7 +471,7 @@ prototypes:
};
locking rules:
- All except for ->poll_mask may block.
+ All may block.
->llseek() locking has moved from llseek to the individual llseek
implementations. If your fs is not using generic_file_llseek, you
@@ -505,9 +503,6 @@ in sys_read() and friends.
the lease within the individual filesystem to record the result of the
operation
-->poll_mask can be called with or without the waitqueue lock for the waitqueue
-returned from ->get_poll_head.
-
--------------------------- dquot_operations -------------------------------
prototypes:
int (*write_dquot) (struct dquot *);
diff --git a/Documentation/filesystems/cifs/AUTHORS b/Documentation/filesystems/cifs/AUTHORS
index 9f4f87e16240..75865da2ce14 100644
--- a/Documentation/filesystems/cifs/AUTHORS
+++ b/Documentation/filesystems/cifs/AUTHORS
@@ -42,9 +42,11 @@ Jeff Layton (many, many fixes, as well as great work on the cifs Kerberos code)
Scott Lovenberg
Pavel Shilovsky (for great work adding SMB2 support, and various SMB3 features)
Aurelien Aptel (for DFS SMB3 work and some key bug fixes)
-Ronnie Sahlberg (for SMB3 xattr work and bug fixes)
+Ronnie Sahlberg (for SMB3 xattr work, bug fixes, and lots of great work on compounding)
Shirish Pargaonkar (for many ACL patches over the years)
Sachin Prabhu (many bug fixes, including for reconnect, copy offload and security)
+Paulo Alcantara
+Long Li (some great work on RDMA, SMB Direct)
Test case and Bug Report contributors
@@ -58,5 +60,4 @@ mention to the Stanford Checker (SWAT) which pointed out many minor
bugs in error paths. Valuable suggestions also have come from Al Viro
and Dave Miller.
-And thanks to the IBM LTC and Power test teams and SuSE testers for
-finding multiple bugs during excellent stress test runs.
+And thanks to the IBM LTC and Power test teams and SuSE and Citrix and RedHat testers for finding multiple bugs during excellent stress test runs.
diff --git a/Documentation/filesystems/cifs/CHANGES b/Documentation/filesystems/cifs/CHANGES
index bc0025cdd1c9..455e1cc494a9 100644
--- a/Documentation/filesystems/cifs/CHANGES
+++ b/Documentation/filesystems/cifs/CHANGES
@@ -1,3 +1,6 @@
+See https://wiki.samba.org/index.php/LinuxCIFSKernel for
+more current information.
+
Version 1.62
------------
Add sockopt=TCP_NODELAY mount option. EA (xattr) routines hardened
diff --git a/Documentation/filesystems/cifs/TODO b/Documentation/filesystems/cifs/TODO
index c5adf149b57f..852499aed64b 100644
--- a/Documentation/filesystems/cifs/TODO
+++ b/Documentation/filesystems/cifs/TODO
@@ -9,14 +9,14 @@ is a partial list of the known problems and missing features:
a) SMB3 (and SMB3.02) missing optional features:
- multichannel (started), integration with RDMA
- - directory leases (improved metadata caching)
- - T10 copy offload (copy chunk, and "Duplicate Extents" ioctl
+ - directory leases (improved metadata caching), started (root dir only)
+ - T10 copy offload ie "ODX" (copy chunk, and "Duplicate Extents" ioctl
currently the only two server side copy mechanisms supported)
b) improved sparse file support
c) Directory entry caching relies on a 1 second timer, rather than
-using Directory Leases
+using Directory Leases, currently only the root file handle is cached longer
d) quota support (needs minor kernel change since quota calls
to make it to network filesystems or deviceless filesystems)
@@ -42,6 +42,8 @@ mount or a per server basis to client UIDs or nobody if no mapping
exists. Also better integration with winbind for resolving SID owners
k) Add tools to take advantage of more smb3 specific ioctls and features
+(passthrough ioctl/fsctl for sending various SMB3 fsctls to the server
+is in progress)
l) encrypted file support
@@ -71,9 +73,8 @@ t) split cifs and smb3 support into separate modules so legacy (and less
secure) CIFS dialect can be disabled in environments that don't need it
and simplify the code.
-u) Finish up SMB3.1.1 dialect support
-
-v) POSIX Extensions for SMB3.1.1
+v) POSIX Extensions for SMB3.1.1 (started, create and mkdir support added
+so far).
KNOWN BUGS
====================================
@@ -92,8 +93,8 @@ Misc testing to do
1) check out max path names and max path name components against various server
types. Try nested symlinks (8 deep). Return max path name in stat -f information
-2) Improve xfstest's cifs enablement and adapt xfstests where needed to test
-cifs better
+2) Improve xfstest's cifs/smb3 enablement and adapt xfstests where needed to test
+cifs/smb3 better
3) Additional performance testing and optimization using iozone and similar -
there are some easy changes that can be done to parallelize sequential writes,
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 829a7b7857a4..f608180ad59d 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -857,8 +857,6 @@ struct file_operations {
ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
int (*iterate) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
- struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
- __poll_t (*poll_mask) (struct file *, __poll_t);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
@@ -903,17 +901,6 @@ otherwise noted.
activity on this file and (optionally) go to sleep until there
is activity. Called by the select(2) and poll(2) system calls
- get_poll_head: Returns the struct wait_queue_head that callers can
- wait on. Callers need to check the returned events using ->poll_mask
- once woken. Can return NULL to indicate polling is not supported,
- or any error code using the ERR_PTR convention to indicate that a
- grave error occured and ->poll_mask shall not be called.
-
- poll_mask: return the mask of EPOLL* values describing the file descriptor
- state. Called either before going to sleep on the waitqueue returned by
- get_poll_head, or after it has been woken. If ->get_poll_head and
- ->poll_mask are implemented ->poll does not need to be implement.
-
unlocked_ioctl: called by the ioctl(2) system call.
compat_ioctl: called by the ioctl(2) system call when 32 bit system calls
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt
index 6c9c69ec3986..114c7ce7b58d 100644
--- a/Documentation/kbuild/kbuild.txt
+++ b/Documentation/kbuild/kbuild.txt
@@ -50,6 +50,11 @@ LDFLAGS_MODULE
--------------------------------------------------
Additional options used for $(LD) when linking modules.
+KBUILD_KCONFIG
+--------------------------------------------------
+Set the top-level Kconfig file to the value of this environment
+variable. The default name is "Kconfig".
+
KBUILD_VERBOSE
--------------------------------------------------
Set the kbuild verbosity. Can be assigned same values as "V=...".
@@ -88,7 +93,8 @@ In most cases the name of the architecture is the same as the
directory name found in the arch/ directory.
But some architectures such as x86 and sparc have aliases.
x86: i386 for 32 bit, x86_64 for 64 bit
-sparc: sparc for 32 bit, sparc64 for 64 bit
+sh: sh for 32 bit, sh64 for 64 bit
+sparc: sparc32 for 32 bit, sparc64 for 64 bit
CROSS_COMPILE
--------------------------------------------------
@@ -148,15 +154,6 @@ stripped after they are installed. If INSTALL_MOD_STRIP is '1', then
the default option --strip-debug will be used. Otherwise,
INSTALL_MOD_STRIP value will be used as the options to the strip command.
-INSTALL_FW_PATH
---------------------------------------------------
-INSTALL_FW_PATH specifies where to install the firmware blobs.
-The default value is:
-
- $(INSTALL_MOD_PATH)/lib/firmware
-
-The value can be overridden in which case the default value is ignored.
-
INSTALL_HDR_PATH
--------------------------------------------------
INSTALL_HDR_PATH specifies where to install user space headers when
diff --git a/Documentation/kbuild/kconfig-language.txt b/Documentation/kbuild/kconfig-language.txt
index 3534a84d206c..64e0775a62d4 100644
--- a/Documentation/kbuild/kconfig-language.txt
+++ b/Documentation/kbuild/kconfig-language.txt
@@ -430,6 +430,12 @@ This sets the config program's title bar if the config program chooses
to use it. It should be placed at the top of the configuration, before any
other statement.
+'#' Kconfig source file comment:
+
+An unquoted '#' character anywhere in a source file line indicates
+the beginning of a source file comment. The remainder of that line
+is a comment.
+
Kconfig hints
-------------
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt
index 7233118f3a05..68c82914c0f3 100644
--- a/Documentation/kbuild/kconfig.txt
+++ b/Documentation/kbuild/kconfig.txt
@@ -2,9 +2,9 @@ This file contains some assistance for using "make *config".
Use "make help" to list all of the possible configuration targets.
-The xconfig ('qconf') and menuconfig ('mconf') programs also
-have embedded help text. Be sure to check it for navigation,
-search, and other general help text.
+The xconfig ('qconf'), menuconfig ('mconf'), and nconfig ('nconf')
+programs also have embedded help text. Be sure to check that for
+navigation, search, and other general help text.
======================================================================
General
@@ -17,13 +17,16 @@ this happens, using a previously working .config file and running
for you, so you may find that you need to see what NEW kernel
symbols have been introduced.
-To see a list of new config symbols when using "make oldconfig", use
+To see a list of new config symbols, use
cp user/some/old.config .config
make listnewconfig
and the config program will list any new symbols, one per line.
+Alternatively, you can use the brute force method:
+
+ make oldconfig
scripts/diffconfig .config.old .config | less
______________________________________________________________________
@@ -160,7 +163,7 @@ Searching in menuconfig:
This lists all config symbols that contain "hotplug",
e.g., HOTPLUG_CPU, MEMORY_HOTPLUG.
- For search help, enter / followed TAB-TAB-TAB (to highlight
+ For search help, enter / followed by TAB-TAB (to highlight
<Help>) and Enter. This will tell you that you can also use
regular expressions (regexes) in the search string, so if you
are not interested in MEMORY_HOTPLUG, you could try
@@ -203,6 +206,39 @@ Example:
======================================================================
+nconfig
+--------------------------------------------------
+
+nconfig is an alternate text-based configurator. It lists function
+keys across the bottom of the terminal (window) that execute commands.
+You can also just use the corresponding numeric key to execute the
+commands unless you are in a data entry window. E.g., instead of F6
+for Save, you can just press 6.
+
+Use F1 for Global help or F3 for the Short help menu.
+
+Searching in nconfig:
+
+ You can search either in the menu entry "prompt" strings
+ or in the configuration symbols.
+
+ Use / to begin a search through the menu entries. This does
+ not support regular expressions. Use <Down> or <Up> for
+ Next hit and Previous hit, respectively. Use <Esc> to
+ terminate the search mode.
+
+ F8 (SymSearch) searches the configuration symbols for the
+ given string or regular expression (regex).
+
+NCONFIG_MODE
+--------------------------------------------------
+This mode shows all sub-menus in one large tree.
+
+Example:
+ make NCONFIG_MODE=single_menu nconfig
+
+
+======================================================================
xconfig
--------------------------------------------------
@@ -230,8 +266,7 @@ gconfig
Searching in gconfig:
- None (gconfig isn't maintained as well as xconfig or menuconfig);
- however, gconfig does have a few more viewing choices than
- xconfig does.
+ There is no search command in gconfig. However, gconfig does
+ have several different viewing choices, modes, and options.
###
diff --git a/Documentation/networking/bonding.txt b/Documentation/networking/bonding.txt
index c13214d073a4..d3e5dd26db12 100644
--- a/Documentation/networking/bonding.txt
+++ b/Documentation/networking/bonding.txt
@@ -1490,7 +1490,7 @@ To remove an ARP target:
To configure the interval between learning packet transmits:
# echo 12 > /sys/class/net/bond0/bonding/lp_interval
- NOTE: the lp_inteval is the number of seconds between instances where
+ NOTE: the lp_interval is the number of seconds between instances where
the bonding driver sends learning packets to each slaves peer switch. The
default interval is 1 second.
diff --git a/Documentation/networking/e100.rst b/Documentation/networking/e100.rst
index d4d837027925..f81111eba9c5 100644
--- a/Documentation/networking/e100.rst
+++ b/Documentation/networking/e100.rst
@@ -1,3 +1,4 @@
+==============================================================
Linux* Base Driver for the Intel(R) PRO/100 Family of Adapters
==============================================================
@@ -46,123 +47,131 @@ Driver Configuration Parameters
The default value for each parameter is generally the recommended setting,
unless otherwise noted.
-Rx Descriptors: Number of receive descriptors. A receive descriptor is a data
+Rx Descriptors:
+ Number of receive descriptors. A receive descriptor is a data
structure that describes a receive buffer and its attributes to the network
controller. The data in the descriptor is used by the controller to write
data from the controller to host memory. In the 3.x.x driver the valid range
for this parameter is 64-256. The default value is 256. This parameter can be
changed using the command::
- ethtool -G eth? rx n
+ ethtool -G eth? rx n
Where n is the number of desired Rx descriptors.
-Tx Descriptors: Number of transmit descriptors. A transmit descriptor is a data
+Tx Descriptors:
+ Number of transmit descriptors. A transmit descriptor is a data
structure that describes a transmit buffer and its attributes to the network
controller. The data in the descriptor is used by the controller to read
data from the host memory to the controller. In the 3.x.x driver the valid
range for this parameter is 64-256. The default value is 128. This parameter
can be changed using the command::
- ethtool -G eth? tx n
+ ethtool -G eth? tx n
Where n is the number of desired Tx descriptors.
-Speed/Duplex: The driver auto-negotiates the link speed and duplex settings by
+Speed/Duplex:
+ The driver auto-negotiates the link speed and duplex settings by
default. The ethtool utility can be used as follows to force speed/duplex.::
- ethtool -s eth? autoneg off speed {10|100} duplex {full|half}
+ ethtool -s eth? autoneg off speed {10|100} duplex {full|half}
NOTE: setting the speed/duplex to incorrect values will cause the link to
fail.
-Event Log Message Level: The driver uses the message level flag to log events
+Event Log Message Level:
+ The driver uses the message level flag to log events
to syslog. The message level can be set at driver load time. It can also be
set using the command::
- ethtool -s eth? msglvl n
+ ethtool -s eth? msglvl n
Additional Configurations
=========================
- Configuring the Driver on Different Distributions
- -------------------------------------------------
+Configuring the Driver on Different Distributions
+-------------------------------------------------
- Configuring a network driver to load properly when the system is started is
- distribution dependent. Typically, the configuration process involves adding
- an alias line to /etc/modprobe.d/*.conf as well as editing other system
- startup scripts and/or configuration files. Many popular Linux
- distributions ship with tools to make these changes for you. To learn the
- proper way to configure a network device for your system, refer to your
- distribution documentation. If during this process you are asked for the
- driver or module name, the name for the Linux Base Driver for the Intel
- PRO/100 Family of Adapters is e100.
+Configuring a network driver to load properly when the system is started
+is distribution dependent. Typically, the configuration process involves
+adding an alias line to `/etc/modprobe.d/*.conf` as well as editing other
+system startup scripts and/or configuration files. Many popular Linux
+distributions ship with tools to make these changes for you. To learn
+the proper way to configure a network device for your system, refer to
+your distribution documentation. If during this process you are asked
+for the driver or module name, the name for the Linux Base Driver for
+the Intel PRO/100 Family of Adapters is e100.
- As an example, if you install the e100 driver for two PRO/100 adapters
- (eth0 and eth1), add the following to a configuration file in /etc/modprobe.d/
+As an example, if you install the e100 driver for two PRO/100 adapters
+(eth0 and eth1), add the following to a configuration file in
+/etc/modprobe.d/::
alias eth0 e100
alias eth1 e100
- Viewing Link Messages
- ---------------------
- In order to see link messages and other Intel driver information on your
- console, you must set the dmesg level up to six. This can be done by
- entering the following on the command line before loading the e100 driver::
+Viewing Link Messages
+---------------------
+
+In order to see link messages and other Intel driver information on your
+console, you must set the dmesg level up to six. This can be done by
+entering the following on the command line before loading the e100
+driver::
dmesg -n 6
- If you wish to see all messages issued by the driver, including debug
- messages, set the dmesg level to eight.
+If you wish to see all messages issued by the driver, including debug
+messages, set the dmesg level to eight.
- NOTE: This setting is not saved across reboots.
+NOTE: This setting is not saved across reboots.
+ethtool
+-------
- ethtool
- -------
+The driver utilizes the ethtool interface for driver configuration and
+diagnostics, as well as displaying statistical information. The ethtool
+version 1.6 or later is required for this functionality.
- The driver utilizes the ethtool interface for driver configuration and
- diagnostics, as well as displaying statistical information. The ethtool
- version 1.6 or later is required for this functionality.
+The latest release of ethtool can be found from
+https://www.kernel.org/pub/software/network/ethtool/
- The latest release of ethtool can be found from
- https://www.kernel.org/pub/software/network/ethtool/
+Enabling Wake on LAN* (WoL)
+---------------------------
+WoL is provided through the ethtool* utility. For instructions on
+enabling WoL with ethtool, refer to the ethtool man page. WoL will be
+enabled on the system during the next shut down or reboot. For this
+driver version, in order to enable WoL, the e100 driver must be loaded
+when shutting down or rebooting the system.
- Enabling Wake on LAN* (WoL)
- ---------------------------
- WoL is provided through the ethtool* utility. For instructions on enabling
- WoL with ethtool, refer to the ethtool man page.
+NAPI
+----
- WoL will be enabled on the system during the next shut down or reboot. For
- this driver version, in order to enable WoL, the e100 driver must be
- loaded when shutting down or rebooting the system.
+NAPI (Rx polling mode) is supported in the e100 driver.
- NAPI
- ----
+See https://wiki.linuxfoundation.org/networking/napi for more
+information on NAPI.
- NAPI (Rx polling mode) is supported in the e100 driver.
+Multiple Interfaces on Same Ethernet Broadcast Network
+------------------------------------------------------
- See https://wiki.linuxfoundation.org/networking/napi for more information
- on NAPI.
+Due to the default ARP behavior on Linux, it is not possible to have one
+system on two IP networks in the same Ethernet broadcast domain
+(non-partitioned switch) behave as expected. All Ethernet interfaces
+will respond to IP traffic for any IP address assigned to the system.
+This results in unbalanced receive traffic.
- Multiple Interfaces on Same Ethernet Broadcast Network
- ------------------------------------------------------
+If you have multiple interfaces in a server, either turn on ARP
+filtering by
- Due to the default ARP behavior on Linux, it is not possible to have
- one system on two IP networks in the same Ethernet broadcast domain
- (non-partitioned switch) behave as expected. All Ethernet interfaces
- will respond to IP traffic for any IP address assigned to the system.
- This results in unbalanced receive traffic.
+(1) entering::
- If you have multiple interfaces in a server, either turn on ARP
- filtering by
+ echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
- (1) entering:: echo 1 > /proc/sys/net/ipv4/conf/all/arp_filter
- (this only works if your kernel's version is higher than 2.4.5), or
+ (this only works if your kernel's version is higher than 2.4.5), or
- (2) installing the interfaces in separate broadcast domains (either
- in different switches or in a switch partitioned to VLANs).
+(2) installing the interfaces in separate broadcast domains (either
+ in different switches or in a switch partitioned to VLANs).
Support
diff --git a/Documentation/networking/e1000.rst b/Documentation/networking/e1000.rst
index 616848940e63..f10dd4086921 100644
--- a/Documentation/networking/e1000.rst
+++ b/Documentation/networking/e1000.rst
@@ -1,3 +1,4 @@
+===========================================================
Linux* Base Driver for Intel(R) Ethernet Network Connection
===========================================================
@@ -33,7 +34,8 @@ Command Line Parameters
The default value for each parameter is generally the recommended setting,
unless otherwise noted.
-NOTES: For more information about the AutoNeg, Duplex, and Speed
+NOTES:
+ For more information about the AutoNeg, Duplex, and Speed
parameters, see the "Speed and Duplex Configuration" section in
this document.
@@ -44,22 +46,27 @@ NOTES: For more information about the AutoNeg, Duplex, and Speed
AutoNeg
-------
+
(Supported only on adapters with copper connections)
-Valid Range: 0x01-0x0F, 0x20-0x2F
-Default Value: 0x2F
+
+:Valid Range: 0x01-0x0F, 0x20-0x2F
+:Default Value: 0x2F
This parameter is a bit-mask that specifies the speed and duplex settings
advertised by the adapter. When this parameter is used, the Speed and
Duplex parameters must not be specified.
-NOTE: Refer to the Speed and Duplex section of this readme for more
+NOTE:
+ Refer to the Speed and Duplex section of this readme for more
information on the AutoNeg parameter.
Duplex
------
+
(Supported only on adapters with copper connections)
-Valid Range: 0-2 (0=auto-negotiate, 1=half, 2=full)
-Default Value: 0
+
+:Valid Range: 0-2 (0=auto-negotiate, 1=half, 2=full)
+:Default Value: 0
This defines the direction in which data is allowed to flow. Can be
either one or two-directional. If both Duplex and the link partner are
@@ -69,18 +76,22 @@ duplex.
FlowControl
-----------
-Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
-Default Value: Reads flow control settings from the EEPROM
+
+:Valid Range: 0-3 (0=none, 1=Rx only, 2=Tx only, 3=Rx&Tx)
+:Default Value: Reads flow control settings from the EEPROM
This parameter controls the automatic generation(Tx) and response(Rx)
to Ethernet PAUSE frames.
InterruptThrottleRate
---------------------
+
(not supported on Intel(R) 82542, 82543 or 82544-based adapters)
-Valid Range: 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
- 4=simplified balancing)
-Default Value: 3
+
+:Valid Range:
+ 0,1,3,4,100-100000 (0=off, 1=dynamic, 3=dynamic conservative,
+ 4=simplified balancing)
+:Default Value: 3
The driver can limit the amount of interrupts per second that the adapter
will generate for incoming packets. It does this by writing a value to the
@@ -134,13 +145,15 @@ Setting InterruptThrottleRate to 0 turns off any interrupt moderation
and may improve small packet latency, but is generally not suitable
for bulk throughput traffic.
-NOTE: InterruptThrottleRate takes precedence over the TxAbsIntDelay and
+NOTE:
+ InterruptThrottleRate takes precedence over the TxAbsIntDelay and
RxAbsIntDelay parameters. In other words, minimizing the receive
and/or transmit absolute delays does not force the controller to
generate more interrupts than what the Interrupt Throttle Rate
allows.
-CAUTION: If you are using the Intel(R) PRO/1000 CT Network Connection
+CAUTION:
+ If you are using the Intel(R) PRO/1000 CT Network Connection
(controller 82547), setting InterruptThrottleRate to a value
greater than 75,000, may hang (stop transmitting) adapters
under certain network conditions. If this occurs a NETDEV
@@ -150,7 +163,8 @@ CAUTION: If you are using the Intel(R) PRO/1000 CT Network Connection
hang, ensure that InterruptThrottleRate is set no greater
than 75,000 and is not set to 0.
-NOTE: When e1000 is loaded with default settings and multiple adapters
+NOTE:
+ When e1000 is loaded with default settings and multiple adapters
are in use simultaneously, the CPU utilization may increase non-
linearly. In order to limit the CPU utilization without impacting
the overall throughput, we recommend that you load the driver as
@@ -167,9 +181,11 @@ NOTE: When e1000 is loaded with default settings and multiple adapters
RxDescriptors
-------------
-Valid Range: 48-256 for 82542 and 82543-based adapters
- 48-4096 for all other supported adapters
-Default Value: 256
+
+:Valid Range:
+ - 48-256 for 82542 and 82543-based adapters
+ - 48-4096 for all other supported adapters
+:Default Value: 256
This value specifies the number of receive buffer descriptors allocated
by the driver. Increasing this value allows the driver to buffer more
@@ -179,15 +195,17 @@ Each descriptor is 16 bytes. A receive buffer is also allocated for each
descriptor and can be either 2048, 4096, 8192, or 16384 bytes, depending
on the MTU setting. The maximum MTU size is 16110.
-NOTE: MTU designates the frame size. It only needs to be set for Jumbo
+NOTE:
+ MTU designates the frame size. It only needs to be set for Jumbo
Frames. Depending on the available system resources, the request
for a higher number of receive descriptors may be denied. In this
case, use a lower number.
RxIntDelay
----------
-Valid Range: 0-65535 (0=off)
-Default Value: 0
+
+:Valid Range: 0-65535 (0=off)
+:Default Value: 0
This value delays the generation of receive interrupts in units of 1.024
microseconds. Receive interrupt reduction can improve CPU efficiency if
@@ -197,7 +215,8 @@ of TCP traffic. If the system is reporting dropped receives, this value
may be set too high, causing the driver to run out of available receive
descriptors.
-CAUTION: When setting RxIntDelay to a value other than 0, adapters may
+CAUTION:
+ When setting RxIntDelay to a value other than 0, adapters may
hang (stop transmitting) under certain network conditions. If
this occurs a NETDEV WATCHDOG message is logged in the system
event log. In addition, the controller is automatically reset,
@@ -206,9 +225,11 @@ CAUTION: When setting RxIntDelay to a value other than 0, adapters may
RxAbsIntDelay
-------------
+
(This parameter is supported only on 82540, 82545 and later adapters.)
-Valid Range: 0-65535 (0=off)
-Default Value: 128
+
+:Valid Range: 0-65535 (0=off)
+:Default Value: 128
This value, in units of 1.024 microseconds, limits the delay in which a
receive interrupt is generated. Useful only if RxIntDelay is non-zero,
@@ -219,9 +240,11 @@ conditions.
Speed
-----
+
(This parameter is supported only on adapters with copper connections.)
-Valid Settings: 0, 10, 100, 1000
-Default Value: 0 (auto-negotiate at all supported speeds)
+
+:Valid Settings: 0, 10, 100, 1000
+:Default Value: 0 (auto-negotiate at all supported speeds)
Speed forces the line speed to the specified value in megabits per second
(Mbps). If this parameter is not specified or is set to 0 and the link
@@ -230,22 +253,26 @@ speed. Duplex should also be set when Speed is set to either 10 or 100.
TxDescriptors
-------------
-Valid Range: 48-256 for 82542 and 82543-based adapters
- 48-4096 for all other supported adapters
-Default Value: 256
+
+:Valid Range:
+ - 48-256 for 82542 and 82543-based adapters
+ - 48-4096 for all other supported adapters
+:Default Value: 256
This value is the number of transmit descriptors allocated by the driver.
Increasing this value allows the driver to queue more transmits. Each
descriptor is 16 bytes.
-NOTE: Depending on the available system resources, the request for a
+NOTE:
+ Depending on the available system resources, the request for a
higher number of transmit descriptors may be denied. In this case,
use a lower number.
TxIntDelay
----------
-Valid Range: 0-65535 (0=off)
-Default Value: 8
+
+:Valid Range: 0-65535 (0=off)
+:Default Value: 8
This value delays the generation of transmit interrupts in units of
1.024 microseconds. Transmit interrupt reduction can improve CPU
@@ -255,9 +282,11 @@ causing the driver to run out of available transmit descriptors.
TxAbsIntDelay
-------------
+
(This parameter is supported only on 82540, 82545 and later adapters.)
-Valid Range: 0-65535 (0=off)
-Default Value: 32
+
+:Valid Range: 0-65535 (0=off)
+:Default Value: 32
This value, in units of 1.024 microseconds, limits the delay in which a
transmit interrupt is generated. Useful only if TxIntDelay is non-zero,
@@ -268,18 +297,21 @@ network conditions.
XsumRX
------
+
(This parameter is NOT supported on the 82542-based adapter.)
-Valid Range: 0-1
-Default Value: 1
+
+:Valid Range: 0-1
+:Default Value: 1
A value of '1' indicates that the driver should enable IP checksum
offload for received packets (both UDP and TCP) to the adapter hardware.
Copybreak
---------
-Valid Range: 0-xxxxxxx (0=off)
-Default Value: 256
-Usage: modprobe e1000.ko copybreak=128
+
+:Valid Range: 0-xxxxxxx (0=off)
+:Default Value: 256
+:Usage: modprobe e1000.ko copybreak=128
Driver copies all packets below or equaling this size to a fresh RX
buffer before handing it up the stack.
@@ -291,8 +323,9 @@ it is also available during runtime at
SmartPowerDownEnable
--------------------
-Valid Range: 0-1
-Default Value: 0 (disabled)
+
+:Valid Range: 0-1
+:Default Value: 0 (disabled)
Allows PHY to turn off in lower power states. The user can turn off
this parameter in supported chipsets.
@@ -308,14 +341,14 @@ fiber interface board only links at 1000 Mbps full-duplex.
For copper-based boards, the keywords interact as follows:
- The default operation is auto-negotiate. The board advertises all
+- The default operation is auto-negotiate. The board advertises all
supported speed and duplex combinations, and it links at the highest
common speed and duplex mode IF the link partner is set to auto-negotiate.
- If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps
+- If Speed = 1000, limited auto-negotiation is enabled and only 1000 Mbps
is advertised (The 1000BaseT spec requires auto-negotiation.)
- If Speed = 10 or 100, then both Speed and Duplex should be set. Auto-
+- If Speed = 10 or 100, then both Speed and Duplex should be set. Auto-
negotiation is disabled, and the AutoNeg parameter is ignored. Partner
SHOULD also be forced.
@@ -327,13 +360,15 @@ process.
The parameter may be specified as either a decimal or hexadecimal value as
determined by the bitmap below.
+============== ====== ====== ======= ======= ====== ====== ======= ======
Bit position 7 6 5 4 3 2 1 0
Decimal Value 128 64 32 16 8 4 2 1
Hex value 80 40 20 10 8 4 2 1
Speed (Mbps) N/A N/A 1000 N/A 100 100 10 10
Duplex Full Full Half Full Half
+============== ====== ====== ======= ======= ====== ====== ======= ======
-Some examples of using AutoNeg:
+Some examples of using AutoNeg::
modprobe e1000 AutoNeg=0x01 (Restricts autonegotiation to 10 Half)
modprobe e1000 AutoNeg=1 (Same as above)
@@ -354,8 +389,9 @@ previously mentioned to force the adapter to the same speed and duplex.
Additional Configurations
=========================
- Jumbo Frames
- ------------
+Jumbo Frames
+------------
+
Jumbo Frames support is enabled by changing the MTU to a value larger than
the default of 1500. Use the ifconfig command to increase the MTU size.
For example::
@@ -367,11 +403,11 @@ Additional Configurations
MTU=9000
- to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>. This example
- applies to the Red Hat distributions; other distributions may store this
- setting in a different location.
+ to the file /etc/sysconfig/network-scripts/ifcfg-eth<x>. This example
+ applies to the Red Hat distributions; other distributions may store this
+ setting in a different location.
- Notes:
+Notes:
Degradation in throughput performance may be observed in some Jumbo frames
environments. If this is observed, increasing the application's socket buffer
size and/or increasing the /proc/sys/net/ipv4/tcp_*mem entry values may help.
@@ -385,12 +421,14 @@ Additional Configurations
poor performance or loss of link.
- Adapters based on the Intel(R) 82542 and 82573V/E controller do not
- support Jumbo Frames. These correspond to the following product names:
+ support Jumbo Frames. These correspond to the following product names::
+
Intel(R) PRO/1000 Gigabit Server Adapter
Intel(R) PRO/1000 PM Network Connection
- ethtool
- -------
+ethtool
+-------
+
The driver utilizes the ethtool interface for driver configuration and
diagnostics, as well as displaying statistical information. The ethtool
version 1.6 or later is required for this functionality.
@@ -398,8 +436,9 @@ Additional Configurations
The latest release of ethtool can be found from
https://www.kernel.org/pub/software/network/ethtool/
- Enabling Wake on LAN* (WoL)
- ---------------------------
+Enabling Wake on LAN* (WoL)
+---------------------------
+
WoL is configured through the ethtool* utility.
WoL will be enabled on the system during the next shut down or reboot.
diff --git a/Documentation/networking/strparser.txt b/Documentation/networking/strparser.txt
index 13081b3decef..a7d354ddda7b 100644
--- a/Documentation/networking/strparser.txt
+++ b/Documentation/networking/strparser.txt
@@ -48,7 +48,7 @@ void strp_pause(struct strparser *strp)
Temporarily pause a stream parser. Message parsing is suspended
and no new messages are delivered to the upper layer.
-void strp_pause(struct strparser *strp)
+void strp_unpause(struct strparser *strp)
Unpause a paused stream parser.
diff --git a/Documentation/trace/histogram.txt b/Documentation/trace/histogram.txt
index e73bcf9cb5f3..7ffea6aa22e3 100644
--- a/Documentation/trace/histogram.txt
+++ b/Documentation/trace/histogram.txt
@@ -1729,35 +1729,35 @@ If a variable isn't a key variable or prefixed with 'vals=', the
associated event field will be saved in a variable but won't be summed
as a value:
- # echo 'hist:keys=next_pid:ts1=common_timestamp ... >> event/trigger
+ # echo 'hist:keys=next_pid:ts1=common_timestamp ...' >> event/trigger
Multiple variables can be assigned at the same time. The below would
result in both ts0 and b being created as variables, with both
common_timestamp and field1 additionally being summed as values:
- # echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ... >> \
+ # echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ...' >> \
event/trigger
Note that variable assignments can appear either preceding or
following their use. The command below behaves identically to the
command above:
- # echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ... >> \
+ # echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ...' >> \
event/trigger
Any number of variables not bound to a 'vals=' prefix can also be
assigned by simply separating them with colons. Below is the same
thing but without the values being summed in the histogram:
- # echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ... >> event/trigger
+ # echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ...' >> event/trigger
Variables set as above can be referenced and used in expressions on
another event.
For example, here's how a latency can be calculated:
- # echo 'hist:keys=pid,prio:ts0=common_timestamp ... >> event1/trigger
- # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ... >> event2/trigger
+ # echo 'hist:keys=pid,prio:ts0=common_timestamp ...' >> event1/trigger
+ # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ...' >> event2/trigger
In the first line above, the event's timetamp is saved into the
variable ts0. In the next line, ts0 is subtracted from the second
@@ -1766,7 +1766,7 @@ yet another variable, 'wakeup_lat'. The hist trigger below in turn
makes use of the wakeup_lat variable to compute a combined latency
using the same key and variable from yet another event:
- # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ... >> event3/trigger
+ # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ...' >> event3/trigger
2.2.2 Synthetic Events
----------------------
@@ -1807,10 +1807,11 @@ the command that defined it with a '!':
At this point, there isn't yet an actual 'wakeup_latency' event
instantiated in the event subsytem - for this to happen, a 'hist
trigger action' needs to be instantiated and bound to actual fields
-and variables defined on other events (see Section 6.3.3 below).
+and variables defined on other events (see Section 2.2.3 below on
+how that is done using hist trigger 'onmatch' action). Once that is
+done, the 'wakeup_latency' synthetic event instance is created.
-Once that is done, an event instance is created, and a histogram can
-be defined using it:
+A histogram can now be defined for the new synthetic event:
# echo 'hist:keys=pid,prio,lat.log2:sort=pid,lat' >> \
/sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
@@ -1960,7 +1961,7 @@ hist trigger specification.
back to that pid, the timestamp difference is calculated. If the
resulting latency, stored in wakeup_lat, exceeds the current
maximum latency, the values specified in the save() fields are
- recoreded:
+ recorded:
# echo 'hist:keys=pid:ts0=common_timestamp.usecs \
if comm=="cyclictest"' >> \
diff --git a/Documentation/usb/gadget_configfs.txt b/Documentation/usb/gadget_configfs.txt
index 635e57493709..b8cb38a98c19 100644
--- a/Documentation/usb/gadget_configfs.txt
+++ b/Documentation/usb/gadget_configfs.txt
@@ -226,7 +226,7 @@ $ rm configs/<config name>.<number>/<function>
where <config name>.<number> specify the configuration and <function> is
a symlink to a function being removed from the configuration, e.g.:
-$ rm configfs/c.1/ncm.usb0
+$ rm configs/c.1/ncm.usb0
...
...
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 495b7742ab58..d10944e619d3 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -4610,7 +4610,7 @@ This capability indicates that kvm will implement the interfaces to handle
reset, migration and nested KVM for branch prediction blocking. The stfle
facility 82 should not be provided to the guest without this capability.
-8.14 KVM_CAP_HYPERV_TLBFLUSH
+8.18 KVM_CAP_HYPERV_TLBFLUSH
Architectures: x86
diff --git a/MAINTAINERS b/MAINTAINERS
index 9d5eeff51b5f..32fbc6f732d4 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -581,7 +581,7 @@ W: https://www.infradead.org/~dhowells/kafs/
AGPGART DRIVER
M: David Airlie <airlied@linux.ie>
-T: git git://people.freedesktop.org/~airlied/linux (part of drm maint)
+T: git git://anongit.freedesktop.org/drm/drm
S: Maintained
F: drivers/char/agp/
F: include/linux/agp*
@@ -2523,7 +2523,7 @@ S: Supported
F: drivers/scsi/esas2r
ATUSB IEEE 802.15.4 RADIO DRIVER
-M: Stefan Schmidt <stefan@osg.samsung.com>
+M: Stefan Schmidt <stefan@datenfreihafen.org>
L: linux-wpan@vger.kernel.org
S: Maintained
F: drivers/net/ieee802154/atusb.c
@@ -2971,9 +2971,13 @@ N: bcm585*
N: bcm586*
N: bcm88312
N: hr2
-F: arch/arm64/boot/dts/broadcom/ns2*
+N: stingray
+F: arch/arm64/boot/dts/broadcom/northstar2/*
+F: arch/arm64/boot/dts/broadcom/stingray/*
F: drivers/clk/bcm/clk-ns*
+F: drivers/clk/bcm/clk-sr*
F: drivers/pinctrl/bcm/pinctrl-ns*
+F: include/dt-bindings/clock/bcm-sr*
BROADCOM KONA GPIO DRIVER
M: Ray Jui <rjui@broadcom.com>
@@ -4360,12 +4364,7 @@ L: iommu@lists.linux-foundation.org
T: git git://git.infradead.org/users/hch/dma-mapping.git
W: http://git.infradead.org/users/hch/dma-mapping.git
S: Supported
-F: lib/dma-debug.c
-F: lib/dma-direct.c
-F: lib/dma-noncoherent.c
-F: lib/dma-virt.c
-F: drivers/base/dma-mapping.c
-F: drivers/base/dma-coherent.c
+F: kernel/dma/
F: include/asm-generic/dma-mapping.h
F: include/linux/dma-direct.h
F: include/linux/dma-mapping.h
@@ -4461,6 +4460,7 @@ F: Documentation/blockdev/drbd/
DRIVER CORE, KOBJECTS, DEBUGFS AND SYSFS
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+R: "Rafael J. Wysocki" <rafael@kernel.org>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/driver-core.git
S: Supported
F: Documentation/kobject.txt
@@ -4631,7 +4631,7 @@ F: include/uapi/drm/vmwgfx_drm.h
DRM DRIVERS
M: David Airlie <airlied@linux.ie>
L: dri-devel@lists.freedesktop.org
-T: git git://people.freedesktop.org/~airlied/linux
+T: git git://anongit.freedesktop.org/drm/drm
B: https://bugs.freedesktop.org/
C: irc://chat.freenode.net/dri-devel
S: Maintained
@@ -5674,7 +5674,7 @@ F: drivers/crypto/caam/
F: Documentation/devicetree/bindings/crypto/fsl-sec4.txt
FREESCALE DIU FRAMEBUFFER DRIVER
-M: Timur Tabi <timur@tabi.org>
+M: Timur Tabi <timur@kernel.org>
L: linux-fbdev@vger.kernel.org
S: Maintained
F: drivers/video/fbdev/fsl-diu-fb.*
@@ -5774,7 +5774,7 @@ S: Maintained
F: drivers/net/wan/fsl_ucc_hdlc*
FREESCALE QUICC ENGINE UCC UART DRIVER
-M: Timur Tabi <timur@tabi.org>
+M: Timur Tabi <timur@kernel.org>
L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: drivers/tty/serial/ucc_uart.c
@@ -5790,7 +5790,6 @@ F: include/linux/fsl/
FREESCALE SOC FS_ENET DRIVER
M: Pantelis Antoniou <pantelis.antoniou@gmail.com>
-M: Vitaly Bordug <vbordug@ru.mvista.com>
L: linuxppc-dev@lists.ozlabs.org
L: netdev@vger.kernel.org
S: Maintained
@@ -5798,7 +5797,7 @@ F: drivers/net/ethernet/freescale/fs_enet/
F: include/linux/fs_enet_pd.h
FREESCALE SOC SOUND DRIVERS
-M: Timur Tabi <timur@tabi.org>
+M: Timur Tabi <timur@kernel.org>
M: Nicolin Chen <nicoleotsuka@gmail.com>
M: Xiubo Li <Xiubo.Lee@gmail.com>
R: Fabio Estevam <fabio.estevam@nxp.com>
@@ -6909,7 +6908,7 @@ F: drivers/clk/clk-versaclock5.c
IEEE 802.15.4 SUBSYSTEM
M: Alexander Aring <alex.aring@gmail.com>
-M: Stefan Schmidt <stefan@osg.samsung.com>
+M: Stefan Schmidt <stefan@datenfreihafen.org>
L: linux-wpan@vger.kernel.org
W: http://wpan.cakelab.org/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sschmidt/wpan.git
@@ -7096,6 +7095,7 @@ F: include/uapi/linux/input.h
F: include/uapi/linux/input-event-codes.h
F: include/linux/input/
F: Documentation/devicetree/bindings/input/
+F: Documentation/devicetree/bindings/serio/
F: Documentation/input/
INPUT MULTITOUCH (MT) PROTOCOL
@@ -8629,7 +8629,7 @@ MARVELL MWIFIEX WIRELESS DRIVER
M: Amitkumar Karwar <amitkarwar@gmail.com>
M: Nishant Sarmukadam <nishants@marvell.com>
M: Ganapathi Bhat <gbhat@marvell.com>
-M: Xinming Hu <huxm@marvell.com>
+M: Xinming Hu <huxinming820@gmail.com>
L: linux-wireless@vger.kernel.org
S: Maintained
F: drivers/net/wireless/marvell/mwifiex/
@@ -9075,7 +9075,7 @@ S: Maintained
F: drivers/usb/mtu3/
MEGACHIPS STDPXXXX-GE-B850V3-FW LVDS/DP++ BRIDGES
-M: Peter Senna Tschudin <peter.senna@collabora.com>
+M: Peter Senna Tschudin <peter.senna@gmail.com>
M: Martin Donnelly <martin.donnelly@ge.com>
M: Martyn Welch <martyn.welch@collabora.co.uk>
S: Maintained
@@ -9756,6 +9756,11 @@ L: linux-scsi@vger.kernel.org
S: Maintained
F: drivers/scsi/NCR_D700.*
+NCSI LIBRARY:
+M: Samuel Mendoza-Jonas <sam@mendozajonas.com>
+S: Maintained
+F: net/ncsi/
+
NCT6775 HARDWARE MONITOR DRIVER
M: Guenter Roeck <linux@roeck-us.net>
L: linux-hwmon@vger.kernel.org
@@ -9882,6 +9887,7 @@ M: Andrew Lunn <andrew@lunn.ch>
M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
M: Florian Fainelli <f.fainelli@gmail.com>
S: Maintained
+F: Documentation/devicetree/bindings/net/dsa/
F: net/dsa/
F: include/net/dsa.h
F: include/linux/dsa/
@@ -10208,11 +10214,13 @@ F: sound/soc/codecs/sgtl5000*
NXP TDA998X DRM DRIVER
M: Russell King <linux@armlinux.org.uk>
-S: Supported
+S: Maintained
T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel
T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes
F: drivers/gpu/drm/i2c/tda998x_drv.c
F: include/drm/i2c/tda998x.h
+F: include/dt-bindings/display/tda998x.h
+K: "nxp,tda998x"
NXP TFA9879 DRIVER
M: Peter Rosin <peda@axentia.se>
@@ -11476,6 +11484,15 @@ W: http://wireless.kernel.org/en/users/Drivers/p54
S: Obsolete
F: drivers/net/wireless/intersil/prism54/
+PROC FILESYSTEM
+R: Alexey Dobriyan <adobriyan@gmail.com>
+L: linux-kernel@vger.kernel.org
+L: linux-fsdevel@vger.kernel.org
+S: Maintained
+F: fs/proc/
+F: include/linux/proc_fs.h
+F: tools/testing/selftests/proc/
+
PROC SYSCTL
M: "Luis R. Rodriguez" <mcgrof@kernel.org>
M: Kees Cook <keescook@chromium.org>
@@ -11808,9 +11825,9 @@ F: Documentation/devicetree/bindings/opp/kryo-cpufreq.txt
F: drivers/cpufreq/qcom-cpufreq-kryo.c
QUALCOMM EMAC GIGABIT ETHERNET DRIVER
-M: Timur Tabi <timur@codeaurora.org>
+M: Timur Tabi <timur@kernel.org>
L: netdev@vger.kernel.org
-S: Supported
+S: Maintained
F: drivers/net/ethernet/qualcomm/emac/
QUALCOMM HEXAGON ARCHITECTURE
@@ -11821,7 +11838,7 @@ S: Supported
F: arch/hexagon/
QUALCOMM HIDMA DRIVER
-M: Sinan Kaya <okaya@codeaurora.org>
+M: Sinan Kaya <okaya@kernel.org>
L: linux-arm-kernel@lists.infradead.org
L: linux-arm-msm@vger.kernel.org
L: dmaengine@vger.kernel.org
@@ -13648,7 +13665,7 @@ M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
L: iommu@lists.linux-foundation.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb.git
S: Supported
-F: lib/swiotlb.c
+F: kernel/dma/swiotlb.c
F: arch/*/kernel/pci-swiotlb.c
F: include/linux/swiotlb.h
@@ -15572,9 +15589,17 @@ M: x86@kernel.org
L: linux-kernel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
S: Maintained
+F: Documentation/devicetree/bindings/x86/
F: Documentation/x86/
F: arch/x86/
+X86 ENTRY CODE
+M: Andy Lutomirski <luto@kernel.org>
+L: linux-kernel@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/asm
+S: Maintained
+F: arch/x86/entry/
+
X86 MCE INFRASTRUCTURE
M: Tony Luck <tony.luck@intel.com>
M: Borislav Petkov <bp@alien8.de>
@@ -15597,7 +15622,7 @@ F: drivers/platform/x86/
F: drivers/platform/olpc/
X86 VDSO
-M: Andy Lutomirski <luto@amacapital.net>
+M: Andy Lutomirski <luto@kernel.org>
L: linux-kernel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso
S: Maintained
diff --git a/Makefile b/Makefile
index ca2af1ab91eb..85f3481a56d6 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 18
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc7
NAME = Merciless Moray
# *DOCUMENTATION*
@@ -353,9 +353,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
else if [ -x /bin/bash ]; then echo /bin/bash; \
else echo sh; fi ; fi)
-HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS)
-HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS)
-HOST_LFS_LIBS := $(shell getconf LFS_LIBS)
+HOST_LFS_CFLAGS := $(shell getconf LFS_CFLAGS 2>/dev/null)
+HOST_LFS_LDFLAGS := $(shell getconf LFS_LDFLAGS 2>/dev/null)
+HOST_LFS_LIBS := $(shell getconf LFS_LIBS 2>/dev/null)
HOSTCC = gcc
HOSTCXX = g++
@@ -507,11 +507,6 @@ ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-goto.sh $(CC) $(KBUILD_CFLA
KBUILD_AFLAGS += -DCC_HAVE_ASM_GOTO
endif
-ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/cc-can-link.sh $(CC)), y)
- CC_CAN_LINK := y
- export CC_CAN_LINK
-endif
-
# The expansion should be delayed until arch/$(SRCARCH)/Makefile is included.
# Some architectures define CROSS_COMPILE in arch/$(SRCARCH)/Makefile.
# CC_VERSION_TEXT is referenced from Kconfig (so it needs export),
@@ -1717,6 +1712,6 @@ endif # skip-makefile
PHONY += FORCE
FORCE:
-# Declare the contents of the .PHONY variable as phony. We keep that
+# Declare the contents of the PHONY variable as phony. We keep that
# information in a variable so we can use it in if_changed and friends.
.PHONY: $(PHONY)
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 0c4805a572c8..04a4a138ed13 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -555,11 +555,6 @@ config SMP
If you don't know what to do here, say N.
-config HAVE_DEC_LOCK
- bool
- depends on SMP
- default y
-
config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
index 6e921754c8fc..c210a25dd6da 100644
--- a/arch/alpha/kernel/osf_sys.c
+++ b/arch/alpha/kernel/osf_sys.c
@@ -1180,13 +1180,10 @@ SYSCALL_DEFINE2(osf_getrusage, int, who, struct rusage32 __user *, ru)
SYSCALL_DEFINE4(osf_wait4, pid_t, pid, int __user *, ustatus, int, options,
struct rusage32 __user *, ur)
{
- unsigned int status = 0;
struct rusage r;
- long err = kernel_wait4(pid, &status, options, &r);
+ long err = kernel_wait4(pid, ustatus, options, &r);
if (err <= 0)
return err;
- if (put_user(status, ustatus))
- return -EFAULT;
if (!ur)
return err;
if (put_tv_to_tv32(&ur->ru_utime, &r.ru_utime))
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index 04f9729de57c..854d5e79979e 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -35,8 +35,6 @@ lib-y = __divqu.o __remqu.o __divlu.o __remlu.o \
callback_srm.o srm_puts.o srm_printk.o \
fls.o
-lib-$(CONFIG_SMP) += dec_and_lock.o
-
# The division routines are built from single source, with different defines.
AFLAGS___divqu.o = -DDIV
AFLAGS___remqu.o = -DREM
diff --git a/arch/alpha/lib/dec_and_lock.c b/arch/alpha/lib/dec_and_lock.c
deleted file mode 100644
index a117707f57fe..000000000000
--- a/arch/alpha/lib/dec_and_lock.c
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/alpha/lib/dec_and_lock.c
- *
- * ll/sc version of atomic_dec_and_lock()
- *
- */
-
-#include <linux/spinlock.h>
-#include <linux/atomic.h>
-#include <linux/export.h>
-
- asm (".text \n\
- .global _atomic_dec_and_lock \n\
- .ent _atomic_dec_and_lock \n\
- .align 4 \n\
-_atomic_dec_and_lock: \n\
- .prologue 0 \n\
-1: ldl_l $1, 0($16) \n\
- subl $1, 1, $1 \n\
- beq $1, 2f \n\
- stl_c $1, 0($16) \n\
- beq $1, 4f \n\
- mb \n\
- clr $0 \n\
- ret \n\
-2: br $29, 3f \n\
-3: ldgp $29, 0($29) \n\
- br $atomic_dec_and_lock_1..ng \n\
- .subsection 2 \n\
-4: br 1b \n\
- .previous \n\
- .end _atomic_dec_and_lock");
-
-static int __used atomic_dec_and_lock_1(atomic_t *atomic, spinlock_t *lock)
-{
- /* Slow path */
- spin_lock(lock);
- if (atomic_dec_and_test(atomic))
- return 1;
- spin_unlock(lock);
- return 0;
-}
-EXPORT_SYMBOL(_atomic_dec_and_lock);
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index e81bcd271be7..9cf59fc60eab 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -413,7 +413,7 @@ config ARC_HAS_DIV_REM
config ARC_HAS_ACCL_REGS
bool "Reg Pair ACCL:ACCH (FPU and/or MPY > 6)"
- default n
+ default y
help
Depending on the configuration, CPU can contain accumulator reg-pair
(also referred to as r58:r59). These can also be used by gcc as GPR so
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index d37f49d6a27f..6c1b20dd76ad 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -16,7 +16,7 @@ endif
KBUILD_DEFCONFIG := nsim_700_defconfig
-cflags-y += -fno-common -pipe -fno-builtin -D__linux__
+cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
@@ -140,16 +140,3 @@ dtbs: scripts
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
-
-# Hacks to enable final link due to absence of link-time branch relexation
-# and gcc choosing optimal(shorter) branches at -O3
-#
-# vineetg Feb 2010: -mlong-calls switched off for overall kernel build
-# However lib/decompress_inflate.o (.init.text) calls
-# zlib_inflate_workspacesize (.text) causing relocation errors.
-# Thus forcing all exten calls in this file to be long calls
-export CFLAGS_decompress_inflate.o = -mmedium-calls
-export CFLAGS_initramfs.o = -mmedium-calls
-ifdef CONFIG_SMP
-export CFLAGS_core.o = -mmedium-calls
-endif
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index 09f85154c5a4..a635ea972304 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 09fed3ef22b6..aa507e423075 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index ea2f6d817d1a..eba07f468654 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/haps_hs_defconfig b/arch/arc/configs/haps_hs_defconfig
index ab231c040efe..098b19fbaa51 100644
--- a/arch/arc/configs/haps_hs_defconfig
+++ b/arch/arc/configs/haps_hs_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_EXPERT=y
CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
diff --git a/arch/arc/configs/haps_hs_smp_defconfig b/arch/arc/configs/haps_hs_smp_defconfig
index cf449cbf440d..0104c404d897 100644
--- a/arch/arc/configs/haps_hs_smp_defconfig
+++ b/arch/arc/configs/haps_hs_smp_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/hsdk_defconfig b/arch/arc/configs/hsdk_defconfig
index 1b54c72f4296..6491be0ddbc9 100644
--- a/arch/arc/configs/hsdk_defconfig
+++ b/arch/arc/configs/hsdk_defconfig
@@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
# CONFIG_VM_EVENT_COUNTERS is not set
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 31c2c70b34a1..99e05cf63fca 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index a578c721d50f..0dc4f9b737e7 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index 37d7395f3272..be3c30a15e54 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -9,7 +9,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 1e1470e2a7f0..3a74b9b21772 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index 084a6e42685b..ea2834b4dc1d 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -11,7 +11,6 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
CONFIG_KALLSYMS_ALL=y
CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index f36d47990415..80a5a1b4924b 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE="../arc_initramfs_hs/"
CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
CONFIG_KPROBES=y
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index 1aca2e8fd1ba..2cc87f909747 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -56,7 +56,6 @@ CONFIG_STMMAC_ETH=y
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
-CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
diff --git a/arch/arc/include/asm/entry-compact.h b/arch/arc/include/asm/entry-compact.h
index ec36d5b6d435..29f3988c9424 100644
--- a/arch/arc/include/asm/entry-compact.h
+++ b/arch/arc/include/asm/entry-compact.h
@@ -234,6 +234,9 @@
POP gp
RESTORE_R12_TO_R0
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ld r25, [sp, 12]
+#endif
ld sp, [sp] /* restore original sp */
/* orig_r0, ECR, user_r25 skipped automatically */
.endm
@@ -315,6 +318,9 @@
POP gp
RESTORE_R12_TO_R0
+#ifdef CONFIG_ARC_CURR_IN_REG
+ ld r25, [sp, 12]
+#endif
ld sp, [sp] /* restore original sp */
/* orig_r0, ECR, user_r25 skipped automatically */
.endm
diff --git a/arch/arc/include/asm/entry.h b/arch/arc/include/asm/entry.h
index 51597f344a62..302b0db8ea2b 100644
--- a/arch/arc/include/asm/entry.h
+++ b/arch/arc/include/asm/entry.h
@@ -86,9 +86,6 @@
POP r1
POP r0
-#ifdef CONFIG_ARC_CURR_IN_REG
- ld r25, [sp, 12]
-#endif
.endm
/*--------------------------------------------------------------
diff --git a/arch/arc/include/asm/mach_desc.h b/arch/arc/include/asm/mach_desc.h
index c28e6c347b49..871f3cb16af9 100644
--- a/arch/arc/include/asm/mach_desc.h
+++ b/arch/arc/include/asm/mach_desc.h
@@ -34,9 +34,7 @@ struct machine_desc {
const char *name;
const char **dt_compat;
void (*init_early)(void);
-#ifdef CONFIG_SMP
void (*init_per_cpu)(unsigned int);
-#endif
void (*init_machine)(void);
void (*init_late)(void);
diff --git a/arch/arc/include/asm/page.h b/arch/arc/include/asm/page.h
index 109baa06831c..09ddddf71cc5 100644
--- a/arch/arc/include/asm/page.h
+++ b/arch/arc/include/asm/page.h
@@ -105,7 +105,7 @@ typedef pte_t * pgtable_t;
#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
/* Default Permissions for stack/heaps pages (Non Executable) */
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE)
+#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define WANT_PAGE_VIRTUAL 1
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 8ec5599a0957..cf4be70d5892 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -377,7 +377,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
/* Decode a PTE containing swap "identifier "into constituents */
#define __swp_type(pte_lookalike) (((pte_lookalike).val) & 0x1f)
-#define __swp_offset(pte_lookalike) ((pte_lookalike).val << 13)
+#define __swp_offset(pte_lookalike) ((pte_lookalike).val >> 13)
/* NOPs, to keep generic kernel happy */
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
diff --git a/arch/arc/kernel/irq.c b/arch/arc/kernel/irq.c
index 538b36afe89e..62b185057c04 100644
--- a/arch/arc/kernel/irq.c
+++ b/arch/arc/kernel/irq.c
@@ -31,10 +31,10 @@ void __init init_IRQ(void)
/* a SMP H/w block could do IPI IRQ request here */
if (plat_smp_ops.init_per_cpu)
plat_smp_ops.init_per_cpu(smp_processor_id());
+#endif
if (machine_desc->init_per_cpu)
machine_desc->init_per_cpu(smp_processor_id());
-#endif
}
/*
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 5ac3b547453f..4674541eba3f 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -47,7 +47,8 @@ SYSCALL_DEFINE0(arc_gettls)
SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
{
struct pt_regs *regs = current_pt_regs();
- int uval = -EFAULT;
+ u32 uval;
+ int ret;
/*
* This is only for old cores lacking LLOCK/SCOND, which by defintion
@@ -60,23 +61,47 @@ SYSCALL_DEFINE3(arc_usr_cmpxchg, int *, uaddr, int, expected, int, new)
/* Z indicates to userspace if operation succeded */
regs->status32 &= ~STATUS_Z_MASK;
- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
- return -EFAULT;
+ ret = access_ok(VERIFY_WRITE, uaddr, sizeof(*uaddr));
+ if (!ret)
+ goto fail;
+again:
preempt_disable();
- if (__get_user(uval, uaddr))
- goto done;
+ ret = __get_user(uval, uaddr);
+ if (ret)
+ goto fault;
- if (uval == expected) {
- if (!__put_user(new, uaddr))
- regs->status32 |= STATUS_Z_MASK;
- }
+ if (uval != expected)
+ goto out;
-done:
- preempt_enable();
+ ret = __put_user(new, uaddr);
+ if (ret)
+ goto fault;
+
+ regs->status32 |= STATUS_Z_MASK;
+out:
+ preempt_enable();
return uval;
+
+fault:
+ preempt_enable();
+
+ if (unlikely(ret != -EFAULT))
+ goto fail;
+
+ down_read(&current->mm->mmap_sem);
+ ret = fixup_user_fault(current, current->mm, (unsigned long) uaddr,
+ FAULT_FLAG_WRITE, NULL);
+ up_read(&current->mm->mmap_sem);
+
+ if (likely(!ret))
+ goto again;
+
+fail:
+ force_sig(SIGSEGV, current);
+ return ret;
}
#ifdef CONFIG_ISA_ARCV2
diff --git a/arch/arc/plat-hsdk/Kconfig b/arch/arc/plat-hsdk/Kconfig
index 19ab3cf98f0f..9356753c2ed8 100644
--- a/arch/arc/plat-hsdk/Kconfig
+++ b/arch/arc/plat-hsdk/Kconfig
@@ -7,5 +7,8 @@
menuconfig ARC_SOC_HSDK
bool "ARC HS Development Kit SOC"
+ depends on ISA_ARCV2
+ select ARC_HAS_ACCL_REGS
select CLK_HSDK
select RESET_HSDK
+ select MIGHT_HAVE_PCI
diff --git a/arch/arc/plat-hsdk/platform.c b/arch/arc/plat-hsdk/platform.c
index 2958aedb649a..2588b842407c 100644
--- a/arch/arc/plat-hsdk/platform.c
+++ b/arch/arc/plat-hsdk/platform.c
@@ -42,6 +42,66 @@ static void __init hsdk_init_per_cpu(unsigned int cpu)
#define SDIO_UHS_REG_EXT (SDIO_BASE + 0x108)
#define SDIO_UHS_REG_EXT_DIV_2 (2 << 30)
+#define HSDK_GPIO_INTC (ARC_PERIPHERAL_BASE + 0x3000)
+
+static void __init hsdk_enable_gpio_intc_wire(void)
+{
+ /*
+ * Peripherals on CPU Card are wired to cpu intc via intermediate
+ * DW APB GPIO blocks (mainly for debouncing)
+ *
+ * ---------------------
+ * | snps,archs-intc |
+ * ---------------------
+ * |
+ * ----------------------
+ * | snps,archs-idu-intc |
+ * ----------------------
+ * | | | | |
+ * | [eth] [USB] [... other peripherals]
+ * |
+ * -------------------
+ * | snps,dw-apb-intc |
+ * -------------------
+ * | | | |
+ * [Bt] [HAPS] [... other peripherals]
+ *
+ * Current implementation of "irq-dw-apb-ictl" driver doesn't work well
+ * with stacked INTCs. In particular problem happens if its master INTC
+ * not yet instantiated. See discussion here -
+ * https://lkml.org/lkml/2015/3/4/755
+ *
+ * So setup the first gpio block as a passive pass thru and hide it from
+ * DT hardware topology - connect intc directly to cpu intc
+ * The GPIO "wire" needs to be init nevertheless (here)
+ *
+ * One side adv is that peripheral interrupt handling avoids one nested
+ * intc ISR hop
+ *
+ * According to HSDK User's Manual [1], "Table 2 Interrupt Mapping"
+ * we have the following GPIO input lines used as sources of interrupt:
+ * - GPIO[0] - Bluetooth interrupt of RS9113 module
+ * - GPIO[2] - HAPS interrupt (on HapsTrak 3 connector)
+ * - GPIO[3] - Audio codec (MAX9880A) interrupt
+ * - GPIO[8-23] - Available on Arduino and PMOD_x headers
+ * For now there's no use of Arduino and PMOD_x headers in Linux
+ * use-case so we only enable lines 0, 2 and 3.
+ *
+ * [1] https://github.com/foss-for-synopsys-dwc-arc-processors/ARC-Development-Systems-Forum/wiki/docs/ARC_HSDK_User_Guide.pdf
+ */
+#define GPIO_INTEN (HSDK_GPIO_INTC + 0x30)
+#define GPIO_INTMASK (HSDK_GPIO_INTC + 0x34)
+#define GPIO_INTTYPE_LEVEL (HSDK_GPIO_INTC + 0x38)
+#define GPIO_INT_POLARITY (HSDK_GPIO_INTC + 0x3c)
+#define GPIO_INT_CONNECTED_MASK 0x0d
+
+ iowrite32(0xffffffff, (void __iomem *) GPIO_INTMASK);
+ iowrite32(~GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTMASK);
+ iowrite32(0x00000000, (void __iomem *) GPIO_INTTYPE_LEVEL);
+ iowrite32(0xffffffff, (void __iomem *) GPIO_INT_POLARITY);
+ iowrite32(GPIO_INT_CONNECTED_MASK, (void __iomem *) GPIO_INTEN);
+}
+
static void __init hsdk_init_early(void)
{
/*
@@ -62,6 +122,8 @@ static void __init hsdk_init_early(void)
* minimum possible div-by-2.
*/
iowrite32(SDIO_UHS_REG_EXT_DIV_2, (void __iomem *) SDIO_UHS_REG_EXT);
+
+ hsdk_enable_gpio_intc_wire();
}
static const char *hsdk_compat[] __initconst = {
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 54eeb8d00bc6..843edfd000be 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1245,8 +1245,14 @@ config PCI
VESA. If you have PCI, say Y, otherwise N.
config PCI_DOMAINS
- bool
+ bool "Support for multiple PCI domains"
depends on PCI
+ help
+ Enable PCI domains kernel management. Say Y if your machine
+ has a PCI bus hierarchy that requires more than one PCI
+ domain (aka segment) to be correctly managed. Say N otherwise.
+
+ If you don't know what to do here, say N.
config PCI_DOMAINS_GENERIC
def_bool PCI_DOMAINS
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index f9e8667f5886..73b514dddf65 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -168,7 +168,6 @@
AM33XX_IOPAD(0x8f0, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_dat3.mmc0_dat3 */
AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_cmd.mmc0_cmd */
AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* mmc0_clk.mmc0_clk */
- AM33XX_IOPAD(0x9a0, PIN_INPUT | MUX_MODE4) /* mcasp0_aclkr.mmc0_sdwp */
>;
};
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
index ca294914bbb1..23ea381d363f 100644
--- a/arch/arm/boot/dts/am3517.dtsi
+++ b/arch/arm/boot/dts/am3517.dtsi
@@ -39,6 +39,8 @@
ti,davinci-ctrl-ram-size = <0x2000>;
ti,davinci-rmii-en = /bits/ 8 <1>;
local-mac-address = [ 00 00 00 00 00 00 ];
+ clocks = <&emac_ick>;
+ clock-names = "ick";
};
davinci_mdio: ethernet@5c030000 {
@@ -49,6 +51,8 @@
bus_freq = <1000000>;
#address-cells = <1>;
#size-cells = <0>;
+ clocks = <&emac_fck>;
+ clock-names = "fck";
};
uart4: serial@4809e000 {
@@ -87,6 +91,11 @@
};
};
+/* Table Table 5-79 of the TRM shows 480ab000 is reserved */
+&usb_otg_hs {
+ status = "disabled";
+};
+
&iva {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index 440351ad0b80..d4be3fd0b6f4 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -610,6 +610,8 @@
touchscreen-size-x = <480>;
touchscreen-size-y = <272>;
+
+ wakeup-source;
};
tlv320aic3106: tlv320aic3106@1b {
diff --git a/arch/arm/boot/dts/armada-385-synology-ds116.dts b/arch/arm/boot/dts/armada-385-synology-ds116.dts
index 6782ce481ac9..d8769956cbfc 100644
--- a/arch/arm/boot/dts/armada-385-synology-ds116.dts
+++ b/arch/arm/boot/dts/armada-385-synology-ds116.dts
@@ -139,7 +139,7 @@
3700 5
3900 6
4000 7>;
- cooling-cells = <2>;
+ #cooling-cells = <2>;
};
gpio-leds {
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 18edc9bc7927..929459c42760 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -547,7 +547,7 @@
thermal: thermal@e8078 {
compatible = "marvell,armada380-thermal";
- reg = <0xe4078 0x4>, <0xe4074 0x4>;
+ reg = <0xe4078 0x4>, <0xe4070 0x8>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/bcm-cygnus.dtsi b/arch/arm/boot/dts/bcm-cygnus.dtsi
index 9fe4f5a6379e..2c4df2d2d4a6 100644
--- a/arch/arm/boot/dts/bcm-cygnus.dtsi
+++ b/arch/arm/boot/dts/bcm-cygnus.dtsi
@@ -216,7 +216,7 @@
reg = <0x18008000 0x100>;
#address-cells = <1>;
#size-cells = <0>;
- interrupts = <GIC_SPI 85 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 85 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>;
status = "disabled";
};
@@ -245,7 +245,7 @@
reg = <0x1800b000 0x100>;
#address-cells = <1>;
#size-cells = <0>;
- interrupts = <GIC_SPI 86 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 86 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>;
status = "disabled";
};
@@ -256,7 +256,7 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_NONE>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <0>;
@@ -278,10 +278,10 @@
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>,
- <GIC_SPI 97 IRQ_TYPE_NONE>,
- <GIC_SPI 98 IRQ_TYPE_NONE>,
- <GIC_SPI 99 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>;
};
};
@@ -291,7 +291,7 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_NONE>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <1>;
@@ -313,10 +313,10 @@
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 102 IRQ_TYPE_NONE>,
- <GIC_SPI 103 IRQ_TYPE_NONE>,
- <GIC_SPI 104 IRQ_TYPE_NONE>,
- <GIC_SPI 105 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>;
};
};
diff --git a/arch/arm/boot/dts/bcm-hr2.dtsi b/arch/arm/boot/dts/bcm-hr2.dtsi
index 3f9cedd8011f..3084a7c95733 100644
--- a/arch/arm/boot/dts/bcm-hr2.dtsi
+++ b/arch/arm/boot/dts/bcm-hr2.dtsi
@@ -264,7 +264,7 @@
reg = <0x38000 0x50>;
#address-cells = <1>;
#size-cells = <0>;
- interrupts = <GIC_SPI 95 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>;
};
@@ -279,7 +279,7 @@
reg = <0x3b000 0x50>;
#address-cells = <1>;
#size-cells = <0>;
- interrupts = <GIC_SPI 96 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>;
};
};
@@ -300,7 +300,7 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &gic GIC_SPI 186 IRQ_TYPE_NONE>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 186 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <0>;
@@ -322,10 +322,10 @@
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 182 IRQ_TYPE_NONE>,
- <GIC_SPI 183 IRQ_TYPE_NONE>,
- <GIC_SPI 184 IRQ_TYPE_NONE>,
- <GIC_SPI 185 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 182 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 183 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 184 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 185 IRQ_TYPE_LEVEL_HIGH>;
brcm,pcie-msi-inten;
};
};
@@ -336,7 +336,7 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &gic GIC_SPI 192 IRQ_TYPE_NONE>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 192 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <1>;
@@ -358,10 +358,10 @@
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 188 IRQ_TYPE_NONE>,
- <GIC_SPI 189 IRQ_TYPE_NONE>,
- <GIC_SPI 190 IRQ_TYPE_NONE>,
- <GIC_SPI 191 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 188 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 189 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 190 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 191 IRQ_TYPE_LEVEL_HIGH>;
brcm,pcie-msi-inten;
};
};
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index dcc55aa84583..09ba85046322 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -391,7 +391,7 @@
reg = <0x38000 0x50>;
#address-cells = <1>;
#size-cells = <0>;
- interrupts = <GIC_SPI 89 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 89 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>;
dma-coherent;
status = "disabled";
@@ -496,7 +496,7 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_NONE>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 131 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <0>;
@@ -519,10 +519,10 @@
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 127 IRQ_TYPE_NONE>,
- <GIC_SPI 128 IRQ_TYPE_NONE>,
- <GIC_SPI 129 IRQ_TYPE_NONE>,
- <GIC_SPI 130 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 127 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 128 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 129 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 130 IRQ_TYPE_LEVEL_HIGH>;
brcm,pcie-msi-inten;
};
};
@@ -533,7 +533,7 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_NONE>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 137 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <1>;
@@ -556,10 +556,10 @@
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 133 IRQ_TYPE_NONE>,
- <GIC_SPI 134 IRQ_TYPE_NONE>,
- <GIC_SPI 135 IRQ_TYPE_NONE>,
- <GIC_SPI 136 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 133 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 134 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 135 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 136 IRQ_TYPE_LEVEL_HIGH>;
brcm,pcie-msi-inten;
};
};
@@ -570,7 +570,7 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_NONE>;
+ interrupt-map = <0 0 0 0 &gic GIC_SPI 143 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <2>;
@@ -593,10 +593,10 @@
compatible = "brcm,iproc-msi";
msi-controller;
interrupt-parent = <&gic>;
- interrupts = <GIC_SPI 139 IRQ_TYPE_NONE>,
- <GIC_SPI 140 IRQ_TYPE_NONE>,
- <GIC_SPI 141 IRQ_TYPE_NONE>,
- <GIC_SPI 142 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 139 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 140 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 141 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 142 IRQ_TYPE_LEVEL_HIGH>;
brcm,pcie-msi-inten;
};
};
diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi
index 9a076c409f4e..ef995e50ee12 100644
--- a/arch/arm/boot/dts/bcm5301x.dtsi
+++ b/arch/arm/boot/dts/bcm5301x.dtsi
@@ -365,7 +365,7 @@
i2c0: i2c@18009000 {
compatible = "brcm,iproc-i2c";
reg = <0x18009000 0x50>;
- interrupts = <GIC_SPI 121 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>;
#address-cells = <1>;
#size-cells = <0>;
clock-frequency = <100000>;
diff --git a/arch/arm/boot/dts/da850.dtsi b/arch/arm/boot/dts/da850.dtsi
index f6f1597b03df..0f4f817a9e22 100644
--- a/arch/arm/boot/dts/da850.dtsi
+++ b/arch/arm/boot/dts/da850.dtsi
@@ -549,11 +549,7 @@
gpio-controller;
#gpio-cells = <2>;
reg = <0x226000 0x1000>;
- interrupts = <42 IRQ_TYPE_EDGE_BOTH
- 43 IRQ_TYPE_EDGE_BOTH 44 IRQ_TYPE_EDGE_BOTH
- 45 IRQ_TYPE_EDGE_BOTH 46 IRQ_TYPE_EDGE_BOTH
- 47 IRQ_TYPE_EDGE_BOTH 48 IRQ_TYPE_EDGE_BOTH
- 49 IRQ_TYPE_EDGE_BOTH 50 IRQ_TYPE_EDGE_BOTH>;
+ interrupts = <42 43 44 45 46 47 48 49 50>;
ti,ngpio = <144>;
ti,davinci-gpio-unbanked = <0>;
status = "disabled";
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 9dcd14edc202..e03495a799ce 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1580,7 +1580,6 @@
dr_mode = "otg";
snps,dis_u3_susphy_quirk;
snps,dis_u2_susphy_quirk;
- snps,dis_metastability_quirk;
};
};
@@ -1608,6 +1607,7 @@
dr_mode = "otg";
snps,dis_u3_susphy_quirk;
snps,dis_u2_susphy_quirk;
+ snps,dis_metastability_quirk;
};
};
diff --git a/arch/arm/boot/dts/imx51-zii-rdu1.dts b/arch/arm/boot/dts/imx51-zii-rdu1.dts
index df9eca94d812..8a878687197b 100644
--- a/arch/arm/boot/dts/imx51-zii-rdu1.dts
+++ b/arch/arm/boot/dts/imx51-zii-rdu1.dts
@@ -770,7 +770,7 @@
pinctrl_ts: tsgrp {
fsl,pins = <
- MX51_PAD_CSI1_D8__GPIO3_12 0x85
+ MX51_PAD_CSI1_D8__GPIO3_12 0x04
MX51_PAD_CSI1_D9__GPIO3_13 0x85
>;
};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index 70483ce72ba6..77f8f030dd07 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -90,7 +90,7 @@
clocks = <&clks IMX6Q_CLK_ECSPI5>,
<&clks IMX6Q_CLK_ECSPI5>;
clock-names = "ipg", "per";
- dmas = <&sdma 11 7 1>, <&sdma 12 7 2>;
+ dmas = <&sdma 11 8 1>, <&sdma 12 8 2>;
dma-names = "rx", "tx";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
index 19a075aee19e..f14df0baf2ab 100644
--- a/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-zii-rdu2.dtsi
@@ -692,7 +692,7 @@
dsa,member = <0 0>;
eeprom-length = <512>;
interrupt-parent = <&gpio6>;
- interrupts = <3 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
interrupt-controller;
#interrupt-cells = <2>;
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index d8b94f47498b..4e4a55aad5c9 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -1344,7 +1344,7 @@
ranges = <0x81000000 0 0 0x08f80000 0 0x00010000 /* downstream I/O */
0x82000000 0 0x08000000 0x08000000 0 0x00f00000>; /* non-prefetchable memory */
num-lanes = <1>;
- interrupts = <GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "msi";
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0x7>;
diff --git a/arch/arm/boot/dts/omap4-droid4-xt894.dts b/arch/arm/boot/dts/omap4-droid4-xt894.dts
index bdf73cbcec3a..e7c3c563ff8f 100644
--- a/arch/arm/boot/dts/omap4-droid4-xt894.dts
+++ b/arch/arm/boot/dts/omap4-droid4-xt894.dts
@@ -159,13 +159,7 @@
dais = <&mcbsp2_port>, <&mcbsp3_port>;
};
-};
-
-&dss {
- status = "okay";
-};
-&gpio6 {
pwm8: dmtimer-pwm-8 {
pinctrl-names = "default";
pinctrl-0 = <&vibrator_direction_pin>;
@@ -192,7 +186,10 @@
pwm-names = "enable", "direction";
direction-duty-cycle-ns = <10000000>;
};
+};
+&dss {
+ status = "okay";
};
&dsi1 {
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 486d4e7433ed..b38f8c240558 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -748,13 +748,13 @@
nand0: nand@ff900000 {
#address-cells = <0x1>;
#size-cells = <0x1>;
- compatible = "denali,denali-nand-dt";
+ compatible = "altr,socfpga-denali-nand";
reg = <0xff900000 0x100000>,
<0xffb80000 0x10000>;
reg-names = "nand_data", "denali_reg";
interrupts = <0x0 0x90 0x4>;
dma-mask = <0xffffffff>;
- clocks = <&nand_clk>;
+ clocks = <&nand_x_clk>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/socfpga_arria10.dtsi b/arch/arm/boot/dts/socfpga_arria10.dtsi
index bead79e4b2aa..791ca15c799e 100644
--- a/arch/arm/boot/dts/socfpga_arria10.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria10.dtsi
@@ -593,8 +593,7 @@
#size-cells = <0>;
reg = <0xffda5000 0x100>;
interrupts = <0 102 4>;
- num-chipselect = <4>;
- bus-num = <0>;
+ num-cs = <4>;
/*32bit_access;*/
tx-dma-channel = <&pdma 16>;
rx-dma-channel = <&pdma 17>;
@@ -633,7 +632,7 @@
nand: nand@ffb90000 {
#address-cells = <1>;
#size-cells = <1>;
- compatible = "denali,denali-nand-dt", "altr,socfpga-denali-nand";
+ compatible = "altr,socfpga-denali-nand";
reg = <0xffb90000 0x72000>,
<0xffb80000 0x10000>;
reg-names = "nand_data", "denali_reg";
diff --git a/arch/arm/common/Makefile b/arch/arm/common/Makefile
index 1e9f7af8f70f..3157be413297 100644
--- a/arch/arm/common/Makefile
+++ b/arch/arm/common/Makefile
@@ -10,7 +10,7 @@ obj-$(CONFIG_DMABOUNCE) += dmabounce.o
obj-$(CONFIG_SHARP_LOCOMO) += locomo.o
obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
obj-$(CONFIG_SHARP_SCOOP) += scoop.o
-obj-$(CONFIG_SMP) += secure_cntvoff.o
+obj-$(CONFIG_CPU_V7) += secure_cntvoff.o
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o mcpm_platsmp.o vlock.o
CFLAGS_REMOVE_mcpm_entry.o = -pg
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index 054591dc9a00..4cd2f4a2bff4 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -141,9 +141,11 @@ CONFIG_USB_STORAGE=y
CONFIG_USB_CHIPIDEA=y
CONFIG_USB_CHIPIDEA_UDC=y
CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
CONFIG_NOP_USB_XCEIV=y
CONFIG_USB_GADGET=y
CONFIG_USB_ETH=m
+CONFIG_USB_ULPI_BUS=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index f70507ab91ee..200ebda47e0c 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -302,6 +302,7 @@ CONFIG_USB_STORAGE=y
CONFIG_USB_CHIPIDEA=y
CONFIG_USB_CHIPIDEA_UDC=y
CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_CHIPIDEA_ULPI=y
CONFIG_USB_SERIAL=m
CONFIG_USB_SERIAL_GENERIC=y
CONFIG_USB_SERIAL_FTDI_SIO=m
@@ -338,6 +339,7 @@ CONFIG_USB_GADGETFS=m
CONFIG_USB_FUNCTIONFS=m
CONFIG_USB_MASS_STORAGE=m
CONFIG_USB_G_SERIAL=m
+CONFIG_USB_ULPI_BUS=y
CONFIG_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 7e1c543162c3..8f6be1982545 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -1,5 +1,4 @@
CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_CGROUPS=y
@@ -10,20 +9,10 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_CMDLINE_PARTITION=y
-CONFIG_ARCH_MULTI_V7=y
-# CONFIG_ARCH_MULTI_V5 is not set
-# CONFIG_ARCH_MULTI_V4 is not set
CONFIG_ARCH_VIRT=y
CONFIG_ARCH_ALPINE=y
CONFIG_ARCH_ARTPEC=y
CONFIG_MACH_ARTPEC6=y
-CONFIG_ARCH_MVEBU=y
-CONFIG_MACH_ARMADA_370=y
-CONFIG_MACH_ARMADA_375=y
-CONFIG_MACH_ARMADA_38X=y
-CONFIG_MACH_ARMADA_39X=y
-CONFIG_MACH_ARMADA_XP=y
-CONFIG_MACH_DOVE=y
CONFIG_ARCH_AT91=y
CONFIG_SOC_SAMA5D2=y
CONFIG_SOC_SAMA5D3=y
@@ -32,9 +21,9 @@ CONFIG_ARCH_BCM=y
CONFIG_ARCH_BCM_CYGNUS=y
CONFIG_ARCH_BCM_HR2=y
CONFIG_ARCH_BCM_NSP=y
-CONFIG_ARCH_BCM_21664=y
-CONFIG_ARCH_BCM_281XX=y
CONFIG_ARCH_BCM_5301X=y
+CONFIG_ARCH_BCM_281XX=y
+CONFIG_ARCH_BCM_21664=y
CONFIG_ARCH_BCM2835=y
CONFIG_ARCH_BCM_63XX=y
CONFIG_ARCH_BRCMSTB=y
@@ -43,14 +32,14 @@ CONFIG_MACH_BERLIN_BG2=y
CONFIG_MACH_BERLIN_BG2CD=y
CONFIG_MACH_BERLIN_BG2Q=y
CONFIG_ARCH_DIGICOLOR=y
+CONFIG_ARCH_EXYNOS=y
+CONFIG_EXYNOS5420_MCPM=y
CONFIG_ARCH_HIGHBANK=y
CONFIG_ARCH_HISI=y
CONFIG_ARCH_HI3xxx=y
-CONFIG_ARCH_HIX5HD2=y
CONFIG_ARCH_HIP01=y
CONFIG_ARCH_HIP04=y
-CONFIG_ARCH_KEYSTONE=y
-CONFIG_ARCH_MESON=y
+CONFIG_ARCH_HIX5HD2=y
CONFIG_ARCH_MXC=y
CONFIG_SOC_IMX50=y
CONFIG_SOC_IMX51=y
@@ -60,29 +49,30 @@ CONFIG_SOC_IMX6SL=y
CONFIG_SOC_IMX6SX=y
CONFIG_SOC_IMX6UL=y
CONFIG_SOC_IMX7D=y
-CONFIG_SOC_VF610=y
CONFIG_SOC_LS1021A=y
+CONFIG_SOC_VF610=y
+CONFIG_ARCH_KEYSTONE=y
+CONFIG_ARCH_MEDIATEK=y
+CONFIG_ARCH_MESON=y
+CONFIG_ARCH_MVEBU=y
+CONFIG_MACH_ARMADA_370=y
+CONFIG_MACH_ARMADA_375=y
+CONFIG_MACH_ARMADA_38X=y
+CONFIG_MACH_ARMADA_39X=y
+CONFIG_MACH_ARMADA_XP=y
+CONFIG_MACH_DOVE=y
CONFIG_ARCH_OMAP3=y
CONFIG_ARCH_OMAP4=y
CONFIG_SOC_OMAP5=y
CONFIG_SOC_AM33XX=y
CONFIG_SOC_AM43XX=y
CONFIG_SOC_DRA7XX=y
+CONFIG_ARCH_SIRF=y
CONFIG_ARCH_QCOM=y
-CONFIG_ARCH_MEDIATEK=y
CONFIG_ARCH_MSM8X60=y
CONFIG_ARCH_MSM8960=y
CONFIG_ARCH_MSM8974=y
CONFIG_ARCH_ROCKCHIP=y
-CONFIG_ARCH_SOCFPGA=y
-CONFIG_PLAT_SPEAR=y
-CONFIG_ARCH_SPEAR13XX=y
-CONFIG_MACH_SPEAR1310=y
-CONFIG_MACH_SPEAR1340=y
-CONFIG_ARCH_STI=y
-CONFIG_ARCH_STM32=y
-CONFIG_ARCH_EXYNOS=y
-CONFIG_EXYNOS5420_MCPM=y
CONFIG_ARCH_RENESAS=y
CONFIG_ARCH_EMEV2=y
CONFIG_ARCH_R7S72100=y
@@ -99,40 +89,33 @@ CONFIG_ARCH_R8A7792=y
CONFIG_ARCH_R8A7793=y
CONFIG_ARCH_R8A7794=y
CONFIG_ARCH_SH73A0=y
+CONFIG_ARCH_SOCFPGA=y
+CONFIG_PLAT_SPEAR=y
+CONFIG_ARCH_SPEAR13XX=y
+CONFIG_MACH_SPEAR1310=y
+CONFIG_MACH_SPEAR1340=y
+CONFIG_ARCH_STI=y
+CONFIG_ARCH_STM32=y
CONFIG_ARCH_SUNXI=y
-CONFIG_ARCH_SIRF=y
CONFIG_ARCH_TEGRA=y
-CONFIG_ARCH_TEGRA_2x_SOC=y
-CONFIG_ARCH_TEGRA_3x_SOC=y
-CONFIG_ARCH_TEGRA_114_SOC=y
-CONFIG_ARCH_TEGRA_124_SOC=y
CONFIG_ARCH_UNIPHIER=y
CONFIG_ARCH_U8500=y
-CONFIG_MACH_HREFV60=y
-CONFIG_MACH_SNOWBALL=y
CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_VEXPRESS_TC2_PM=y
CONFIG_ARCH_WM8850=y
CONFIG_ARCH_ZYNQ=y
-CONFIG_TRUSTED_FOUNDATIONS=y
-CONFIG_PCI=y
-CONFIG_PCI_HOST_GENERIC=y
-CONFIG_PCI_DRA7XX=y
-CONFIG_PCI_DRA7XX_EP=y
-CONFIG_PCI_KEYSTONE=y
-CONFIG_PCI_MSI=y
+CONFIG_PCIEPORTBUS=y
CONFIG_PCI_MVEBU=y
CONFIG_PCI_TEGRA=y
CONFIG_PCI_RCAR_GEN2=y
CONFIG_PCIE_RCAR=y
-CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_DRA7XX_EP=y
+CONFIG_PCI_KEYSTONE=y
CONFIG_PCI_ENDPOINT=y
CONFIG_PCI_ENDPOINT_CONFIGFS=y
CONFIG_PCI_EPF_TEST=m
CONFIG_SMP=y
CONFIG_NR_CPUS=16
-CONFIG_HIGHPTE=y
-CONFIG_CMA=y
CONFIG_SECCOMP=y
CONFIG_ARM_APPENDED_DTB=y
CONFIG_ARM_ATAG_DTB_COMPAT=y
@@ -145,14 +128,14 @@ CONFIG_CPU_FREQ_GOV_POWERSAVE=m
CONFIG_CPU_FREQ_GOV_USERSPACE=m
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+CONFIG_CPUFREQ_DT=y
CONFIG_ARM_IMX6Q_CPUFREQ=y
CONFIG_QORIQ_CPUFREQ=y
CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
-CONFIG_NEON=y
-CONFIG_KERNEL_MODE_NEON=y
CONFIG_ARM_ZYNQ_CPUIDLE=y
CONFIG_ARM_EXYNOS_CPUIDLE=y
+CONFIG_KERNEL_MODE_NEON=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -170,23 +153,13 @@ CONFIG_IPV6_MIP6=m
CONFIG_IPV6_TUNNEL=m
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_NET_DSA=m
-CONFIG_NET_SWITCHDEV=y
CONFIG_CAN=y
-CONFIG_CAN_RAW=y
-CONFIG_CAN_BCM=y
-CONFIG_CAN_DEV=y
CONFIG_CAN_AT91=m
CONFIG_CAN_FLEXCAN=m
-CONFIG_CAN_RCAR=m
+CONFIG_CAN_SUN4I=y
CONFIG_CAN_XILINXCAN=y
+CONFIG_CAN_RCAR=m
CONFIG_CAN_MCP251X=y
-CONFIG_NET_DSA_BCM_SF2=m
-CONFIG_B53=m
-CONFIG_B53_SPI_DRIVER=m
-CONFIG_B53_MDIO_DRIVER=m
-CONFIG_B53_MMAP_DRIVER=m
-CONFIG_B53_SRAB_DRIVER=m
-CONFIG_CAN_SUN4I=y
CONFIG_BT=m
CONFIG_BT_HCIUART=m
CONFIG_BT_HCIUART_BCM=y
@@ -199,11 +172,9 @@ CONFIG_RFKILL_INPUT=y
CONFIG_RFKILL_GPIO=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=64
CONFIG_OMAP_OCP2SCP=y
CONFIG_SIMPLE_PM_BUS=y
-CONFIG_SUNXI_RSB=y
CONFIG_MTD=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_BLOCK=y
@@ -236,7 +207,6 @@ CONFIG_PCI_ENDPOINT_TEST=m
CONFIG_EEPROM_AT24=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
-CONFIG_SCSI_MULTI_LUN=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
@@ -251,14 +221,20 @@ CONFIG_SATA_MV=y
CONFIG_SATA_RCAR=y
CONFIG_NETDEVICES=y
CONFIG_VIRTIO_NET=y
-CONFIG_HIX5HD2_GMAC=y
+CONFIG_B53_SPI_DRIVER=m
+CONFIG_B53_MDIO_DRIVER=m
+CONFIG_B53_MMAP_DRIVER=m
+CONFIG_B53_SRAB_DRIVER=m
+CONFIG_NET_DSA_BCM_SF2=m
CONFIG_SUN4I_EMAC=y
-CONFIG_MACB=y
CONFIG_BCMGENET=m
CONFIG_BGMAC_BCMA=y
CONFIG_SYSTEMPORT=m
+CONFIG_MACB=y
CONFIG_NET_CALXEDA_XGMAC=y
CONFIG_GIANFAR=y
+CONFIG_HIX5HD2_GMAC=y
+CONFIG_E1000E=y
CONFIG_IGB=y
CONFIG_MV643XX_ETH=y
CONFIG_MVNETA=y
@@ -268,19 +244,17 @@ CONFIG_R8169=y
CONFIG_SH_ETH=y
CONFIG_SMSC911X=y
CONFIG_STMMAC_ETH=y
-CONFIG_STMMAC_PLATFORM=y
CONFIG_DWMAC_DWC_QOS_ETH=y
CONFIG_TI_CPSW=y
CONFIG_XILINX_EMACLITE=y
CONFIG_AT803X_PHY=y
-CONFIG_MARVELL_PHY=y
-CONFIG_SMSC_PHY=y
CONFIG_BROADCOM_PHY=y
CONFIG_ICPLUS_PHY=y
-CONFIG_REALTEK_PHY=y
+CONFIG_MARVELL_PHY=y
CONFIG_MICREL_PHY=y
-CONFIG_FIXED_PHY=y
+CONFIG_REALTEK_PHY=y
CONFIG_ROCKCHIP_PHY=y
+CONFIG_SMSC_PHY=y
CONFIG_USB_PEGASUS=y
CONFIG_USB_RTL8152=m
CONFIG_USB_LAN78XX=m
@@ -288,29 +262,29 @@ CONFIG_USB_USBNET=y
CONFIG_USB_NET_SMSC75XX=y
CONFIG_USB_NET_SMSC95XX=y
CONFIG_BRCMFMAC=m
-CONFIG_RT2X00=m
-CONFIG_RT2800USB=m
CONFIG_MWIFIEX=m
CONFIG_MWIFIEX_SDIO=m
+CONFIG_RT2X00=m
+CONFIG_RT2800USB=m
CONFIG_INPUT_JOYDEV=y
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_QT1070=m
CONFIG_KEYBOARD_GPIO=y
CONFIG_KEYBOARD_TEGRA=y
-CONFIG_KEYBOARD_SPEAR=y
+CONFIG_KEYBOARD_SAMSUNG=m
CONFIG_KEYBOARD_ST_KEYSCAN=y
+CONFIG_KEYBOARD_SPEAR=y
CONFIG_KEYBOARD_CROS_EC=m
-CONFIG_KEYBOARD_SAMSUNG=m
CONFIG_MOUSE_PS2_ELANTECH=y
CONFIG_MOUSE_CYAPA=m
CONFIG_MOUSE_ELAN_I2C=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ATMEL_MXT=m
CONFIG_TOUCHSCREEN_MMS114=m
+CONFIG_TOUCHSCREEN_WM97XX=m
CONFIG_TOUCHSCREEN_ST1232=m
CONFIG_TOUCHSCREEN_STMPE=y
CONFIG_TOUCHSCREEN_SUN4I=y
-CONFIG_TOUCHSCREEN_WM97XX=m
CONFIG_INPUT_MISC=y
CONFIG_INPUT_MAX77693_HAPTIC=m
CONFIG_INPUT_MAX8997_HAPTIC=m
@@ -327,13 +301,12 @@ CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_8250_EM=y
CONFIG_SERIAL_8250_MT6577=y
CONFIG_SERIAL_8250_UNIPHIER=y
+CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_SERIAL_ATMEL=y
CONFIG_SERIAL_ATMEL_CONSOLE=y
CONFIG_SERIAL_ATMEL_TTYAT=y
-CONFIG_SERIAL_BCM63XX=y
-CONFIG_SERIAL_BCM63XX_CONSOLE=y
CONFIG_SERIAL_MESON=y
CONFIG_SERIAL_MESON_CONSOLE=y
CONFIG_SERIAL_SAMSUNG=y
@@ -345,15 +318,14 @@ CONFIG_SERIAL_IMX=y
CONFIG_SERIAL_IMX_CONSOLE=y
CONFIG_SERIAL_SH_SCI=y
CONFIG_SERIAL_SH_SCI_NR_UARTS=20
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
-CONFIG_SERIAL_SH_SCI_DMA=y
CONFIG_SERIAL_MSM=y
CONFIG_SERIAL_MSM_CONSOLE=y
CONFIG_SERIAL_VT8500=y
CONFIG_SERIAL_VT8500_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_OMAP=y
CONFIG_SERIAL_OMAP_CONSOLE=y
+CONFIG_SERIAL_BCM63XX=y
+CONFIG_SERIAL_BCM63XX_CONSOLE=y
CONFIG_SERIAL_XILINX_PS_UART=y
CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
CONFIG_SERIAL_FSL_LPUART=y
@@ -365,12 +337,10 @@ CONFIG_SERIAL_ST_ASC_CONSOLE=y
CONFIG_SERIAL_STM32=y
CONFIG_SERIAL_STM32_CONSOLE=y
CONFIG_SERIAL_DEV_BUS=y
-CONFIG_HVC_DRIVER=y
CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_ST=y
CONFIG_I2C_CHARDEV=y
-CONFIG_I2C_DAVINCI=y
-CONFIG_I2C_MESON=y
-CONFIG_I2C_MUX=y
CONFIG_I2C_ARB_GPIO_CHALLENGE=m
CONFIG_I2C_MUX_PCA954x=y
CONFIG_I2C_MUX_PINCTRL=y
@@ -378,12 +348,13 @@ CONFIG_I2C_DEMUX_PINCTRL=y
CONFIG_I2C_AT91=m
CONFIG_I2C_BCM2835=y
CONFIG_I2C_CADENCE=y
+CONFIG_I2C_DAVINCI=y
CONFIG_I2C_DESIGNWARE_PLATFORM=y
CONFIG_I2C_DIGICOLOR=m
CONFIG_I2C_EMEV2=m
CONFIG_I2C_GPIO=m
-CONFIG_I2C_EXYNOS5=y
CONFIG_I2C_IMX=y
+CONFIG_I2C_MESON=y
CONFIG_I2C_MV64XXX=y
CONFIG_I2C_RIIC=y
CONFIG_I2C_RK3X=y
@@ -427,7 +398,6 @@ CONFIG_SPI_SPIDEV=y
CONFIG_SPMI=y
CONFIG_PINCTRL_AS3722=y
CONFIG_PINCTRL_PALMAS=y
-CONFIG_PINCTRL_BCM2835=y
CONFIG_PINCTRL_APQ8064=y
CONFIG_PINCTRL_APQ8084=y
CONFIG_PINCTRL_IPQ8064=y
@@ -437,25 +407,33 @@ CONFIG_PINCTRL_MSM8X74=y
CONFIG_PINCTRL_MSM8916=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
CONFIG_PINCTRL_QCOM_SSBI_PMIC=y
-CONFIG_GPIO_GENERIC_PLATFORM=y
CONFIG_GPIO_DAVINCI=y
CONFIG_GPIO_DWAPB=y
CONFIG_GPIO_EM=y
CONFIG_GPIO_RCAR=y
+CONFIG_GPIO_SYSCON=y
CONFIG_GPIO_UNIPHIER=y
CONFIG_GPIO_XILINX=y
CONFIG_GPIO_ZYNQ=y
CONFIG_GPIO_PCA953X=y
CONFIG_GPIO_PCA953X_IRQ=y
CONFIG_GPIO_PCF857X=y
-CONFIG_GPIO_TWL4030=y
CONFIG_GPIO_PALMAS=y
-CONFIG_GPIO_SYSCON=y
CONFIG_GPIO_TPS6586X=y
CONFIG_GPIO_TPS65910=y
+CONFIG_GPIO_TWL4030=y
+CONFIG_POWER_AVS=y
+CONFIG_ROCKCHIP_IODOMAIN=y
+CONFIG_POWER_RESET_AS3722=y
+CONFIG_POWER_RESET_GPIO=y
+CONFIG_POWER_RESET_GPIO_RESTART=y
+CONFIG_POWER_RESET_ST=y
+CONFIG_POWER_RESET_KEYSTONE=y
+CONFIG_POWER_RESET_RMOBILE=y
CONFIG_BATTERY_ACT8945A=y
CONFIG_BATTERY_CPCAP=m
CONFIG_BATTERY_SBS=y
+CONFIG_AXP20X_POWER=m
CONFIG_BATTERY_MAX17040=m
CONFIG_BATTERY_MAX17042=m
CONFIG_CHARGER_CPCAP=m
@@ -464,15 +442,6 @@ CONFIG_CHARGER_MAX77693=m
CONFIG_CHARGER_MAX8997=m
CONFIG_CHARGER_MAX8998=m
CONFIG_CHARGER_TPS65090=y
-CONFIG_AXP20X_POWER=m
-CONFIG_POWER_RESET_AS3722=y
-CONFIG_POWER_RESET_GPIO=y
-CONFIG_POWER_RESET_GPIO_RESTART=y
-CONFIG_POWER_RESET_KEYSTONE=y
-CONFIG_POWER_RESET_RMOBILE=y
-CONFIG_POWER_RESET_ST=y
-CONFIG_POWER_AVS=y
-CONFIG_ROCKCHIP_IODOMAIN=y
CONFIG_SENSORS_IIO_HWMON=y
CONFIG_SENSORS_LM90=y
CONFIG_SENSORS_LM95245=y
@@ -480,14 +449,12 @@ CONFIG_SENSORS_NTC_THERMISTOR=m
CONFIG_SENSORS_PWM_FAN=m
CONFIG_SENSORS_INA2XX=m
CONFIG_CPU_THERMAL=y
-CONFIG_BCM2835_THERMAL=m
-CONFIG_BRCMSTB_THERMAL=m
CONFIG_IMX_THERMAL=y
CONFIG_ROCKCHIP_THERMAL=y
CONFIG_RCAR_THERMAL=y
CONFIG_ARMADA_THERMAL=y
-CONFIG_DAVINCI_WATCHDOG=m
-CONFIG_EXYNOS_THERMAL=m
+CONFIG_BCM2835_THERMAL=m
+CONFIG_BRCMSTB_THERMAL=m
CONFIG_ST_THERMAL_MEMMAP=y
CONFIG_WATCHDOG=y
CONFIG_DA9063_WATCHDOG=m
@@ -495,20 +462,24 @@ CONFIG_XILINX_WATCHDOG=y
CONFIG_ARM_SP805_WATCHDOG=y
CONFIG_AT91SAM9X_WATCHDOG=y
CONFIG_SAMA5D4_WATCHDOG=y
+CONFIG_DW_WATCHDOG=y
+CONFIG_DAVINCI_WATCHDOG=m
CONFIG_ORION_WATCHDOG=y
CONFIG_RN5T618_WATCHDOG=y
-CONFIG_ST_LPC_WATCHDOG=y
CONFIG_SUNXI_WATCHDOG=y
CONFIG_IMX2_WDT=y
+CONFIG_ST_LPC_WATCHDOG=y
CONFIG_TEGRA_WATCHDOG=m
CONFIG_MESON_WATCHDOG=y
-CONFIG_DW_WATCHDOG=y
CONFIG_DIGICOLOR_WATCHDOG=y
CONFIG_RENESAS_WDT=m
-CONFIG_BCM2835_WDT=y
CONFIG_BCM47XX_WDT=y
-CONFIG_BCM7038_WDT=m
+CONFIG_BCM2835_WDT=y
CONFIG_BCM_KONA_WDT=y
+CONFIG_BCM7038_WDT=m
+CONFIG_BCMA_HOST_SOC=y
+CONFIG_BCMA_DRIVER_GMAC_CMN=y
+CONFIG_BCMA_DRIVER_GPIO=y
CONFIG_MFD_ACT8945A=y
CONFIG_MFD_AS3711=y
CONFIG_MFD_AS3722=y
@@ -516,7 +487,6 @@ CONFIG_MFD_ATMEL_FLEXCOM=y
CONFIG_MFD_ATMEL_HLCDC=m
CONFIG_MFD_BCM590XX=y
CONFIG_MFD_AC100=y
-CONFIG_MFD_AXP20X=y
CONFIG_MFD_AXP20X_I2C=y
CONFIG_MFD_AXP20X_RSB=y
CONFIG_MFD_CROS_EC=m
@@ -529,11 +499,11 @@ CONFIG_MFD_MAX77693=m
CONFIG_MFD_MAX8907=y
CONFIG_MFD_MAX8997=y
CONFIG_MFD_MAX8998=y
-CONFIG_MFD_RK808=y
CONFIG_MFD_CPCAP=y
CONFIG_MFD_PM8XXX=y
CONFIG_MFD_QCOM_RPM=y
CONFIG_MFD_SPMI_PMIC=y
+CONFIG_MFD_RK808=y
CONFIG_MFD_RN5T618=y
CONFIG_MFD_SEC_CORE=y
CONFIG_MFD_STMPE=y
@@ -543,10 +513,10 @@ CONFIG_MFD_TPS65217=y
CONFIG_MFD_TPS65218=y
CONFIG_MFD_TPS6586X=y
CONFIG_MFD_TPS65910=y
-CONFIG_REGULATOR_ACT8945A=y
-CONFIG_REGULATOR_AB8500=y
CONFIG_REGULATOR_ACT8865=y
+CONFIG_REGULATOR_ACT8945A=y
CONFIG_REGULATOR_ANATOP=y
+CONFIG_REGULATOR_AB8500=y
CONFIG_REGULATOR_AS3711=y
CONFIG_REGULATOR_AS3722=y
CONFIG_REGULATOR_AXP20X=y
@@ -554,10 +524,7 @@ CONFIG_REGULATOR_BCM590XX=y
CONFIG_REGULATOR_CPCAP=y
CONFIG_REGULATOR_DA9210=y
CONFIG_REGULATOR_FAN53555=y
-CONFIG_REGULATOR_RK808=y
CONFIG_REGULATOR_GPIO=y
-CONFIG_MFD_SYSCON=y
-CONFIG_POWER_RESET_SYSCON=y
CONFIG_REGULATOR_LP872X=y
CONFIG_REGULATOR_MAX14577=m
CONFIG_REGULATOR_MAX8907=y
@@ -571,7 +538,8 @@ CONFIG_REGULATOR_PALMAS=y
CONFIG_REGULATOR_PBIAS=y
CONFIG_REGULATOR_PWM=y
CONFIG_REGULATOR_QCOM_RPM=y
-CONFIG_REGULATOR_QCOM_SMD_RPM=y
+CONFIG_REGULATOR_QCOM_SMD_RPM=m
+CONFIG_REGULATOR_RK808=y
CONFIG_REGULATOR_RN5T618=y
CONFIG_REGULATOR_S2MPS11=y
CONFIG_REGULATOR_S5M8767=y
@@ -592,18 +560,17 @@ CONFIG_MEDIA_CEC_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
CONFIG_MEDIA_USB_SUPPORT=y
-CONFIG_USB_VIDEO_CLASS=y
-CONFIG_USB_GSPCA=y
+CONFIG_USB_VIDEO_CLASS=m
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_SOC_CAMERA=m
CONFIG_SOC_CAMERA_PLATFORM=m
-CONFIG_VIDEO_RCAR_VIN=m
-CONFIG_VIDEO_ATMEL_ISI=m
CONFIG_VIDEO_SAMSUNG_EXYNOS4_IS=m
CONFIG_VIDEO_S5P_FIMC=m
CONFIG_VIDEO_S5P_MIPI_CSIS=m
CONFIG_VIDEO_EXYNOS_FIMC_LITE=m
CONFIG_VIDEO_EXYNOS4_FIMC_IS=m
+CONFIG_VIDEO_RCAR_VIN=m
+CONFIG_VIDEO_ATMEL_ISI=m
CONFIG_V4L_MEM2MEM_DRIVERS=y
CONFIG_VIDEO_SAMSUNG_S5P_JPEG=m
CONFIG_VIDEO_SAMSUNG_S5P_MFC=m
@@ -614,19 +581,15 @@ CONFIG_VIDEO_STI_DELTA=m
CONFIG_VIDEO_RENESAS_JPU=m
CONFIG_VIDEO_RENESAS_VSP1=m
CONFIG_V4L_TEST_DRIVERS=y
+CONFIG_VIDEO_VIVID=m
CONFIG_CEC_PLATFORM_DRIVERS=y
CONFIG_VIDEO_SAMSUNG_S5P_CEC=m
# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
CONFIG_VIDEO_ADV7180=m
CONFIG_VIDEO_ML86V7667=m
CONFIG_DRM=y
-CONFIG_DRM_I2C_ADV7511=m
-CONFIG_DRM_I2C_ADV7511_AUDIO=y
# CONFIG_DRM_I2C_CH7006 is not set
# CONFIG_DRM_I2C_SIL164 is not set
-CONFIG_DRM_DUMB_VGA_DAC=m
-CONFIG_DRM_NXP_PTN3460=m
-CONFIG_DRM_PARADE_PS8622=m
CONFIG_DRM_NOUVEAU=m
CONFIG_DRM_EXYNOS=m
CONFIG_DRM_EXYNOS_FIMD=y
@@ -645,13 +608,18 @@ CONFIG_DRM_RCAR_LVDS=y
CONFIG_DRM_SUN4I=m
CONFIG_DRM_FSL_DCU=m
CONFIG_DRM_TEGRA=y
+CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_DRM_PANEL_SAMSUNG_LD9040=m
CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03=m
CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
-CONFIG_DRM_PANEL_SIMPLE=y
+CONFIG_DRM_DUMB_VGA_DAC=m
+CONFIG_DRM_NXP_PTN3460=m
+CONFIG_DRM_PARADE_PS8622=m
CONFIG_DRM_SII9234=m
+CONFIG_DRM_I2C_ADV7511=m
+CONFIG_DRM_I2C_ADV7511_AUDIO=y
CONFIG_DRM_STI=m
-CONFIG_DRM_VC4=y
+CONFIG_DRM_VC4=m
CONFIG_DRM_ETNAVIV=m
CONFIG_DRM_MXSFB=m
CONFIG_FB_ARMCLCD=y
@@ -659,8 +627,6 @@ CONFIG_FB_EFI=y
CONFIG_FB_WM8505=y
CONFIG_FB_SH_MOBILE_LCDC=y
CONFIG_FB_SIMPLE=y
-CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=m
CONFIG_BACKLIGHT_PWM=y
CONFIG_BACKLIGHT_AS3711=y
@@ -668,7 +634,6 @@ CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_SOUND=m
CONFIG_SND=m
-CONFIG_SND_DYNAMIC_MINORS=y
CONFIG_SND_HDA_TEGRA=m
CONFIG_SND_HDA_INPUT_BEEP=y
CONFIG_SND_HDA_PATCH_LOADER=y
@@ -692,7 +657,7 @@ CONFIG_SND_SOC_SNOW=m
CONFIG_SND_SOC_ODROID=m
CONFIG_SND_SOC_SH4_FSI=m
CONFIG_SND_SOC_RCAR=m
-CONFIG_SND_SIMPLE_SCU_CARD=m
+CONFIG_SND_SOC_STI=m
CONFIG_SND_SUN4I_CODEC=m
CONFIG_SND_SOC_TEGRA=m
CONFIG_SND_SOC_TEGRA20_I2S=m
@@ -703,31 +668,25 @@ CONFIG_SND_SOC_TEGRA_WM8903=m
CONFIG_SND_SOC_TEGRA_WM9712=m
CONFIG_SND_SOC_TEGRA_TRIMSLICE=m
CONFIG_SND_SOC_TEGRA_ALC5632=m
-CONFIG_SND_SOC_CPCAP=m
CONFIG_SND_SOC_TEGRA_MAX98090=m
CONFIG_SND_SOC_AK4642=m
+CONFIG_SND_SOC_CPCAP=m
CONFIG_SND_SOC_SGTL5000=m
CONFIG_SND_SOC_SPDIF=m
-CONFIG_SND_SOC_WM8978=m
-CONFIG_SND_SOC_STI=m
CONFIG_SND_SOC_STI_SAS=m
-CONFIG_SND_SIMPLE_CARD=m
+CONFIG_SND_SOC_WM8978=m
+CONFIG_SND_SIMPLE_SCU_CARD=m
CONFIG_USB=y
CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_MVEBU=y
-CONFIG_USB_XHCI_RCAR=m
CONFIG_USB_XHCI_TEGRA=m
CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_MSM=m
-CONFIG_USB_EHCI_EXYNOS=y
-CONFIG_USB_EHCI_TEGRA=y
CONFIG_USB_EHCI_HCD_STI=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_ISP1760=y
+CONFIG_USB_EHCI_TEGRA=y
+CONFIG_USB_EHCI_EXYNOS=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_STI=y
-CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_EXYNOS=m
CONFIG_USB_R8A66597_HCD=m
CONFIG_USB_RENESAS_USBHS=m
@@ -746,18 +705,18 @@ CONFIG_USB_TI_CPPI41_DMA=y
CONFIG_USB_TUSB_OMAP_DMA=y
CONFIG_USB_DWC3=y
CONFIG_USB_DWC2=y
-CONFIG_USB_HSIC_USB3503=y
CONFIG_USB_CHIPIDEA=y
CONFIG_USB_CHIPIDEA_UDC=y
CONFIG_USB_CHIPIDEA_HOST=y
+CONFIG_USB_ISP1760=y
+CONFIG_USB_HSIC_USB3503=y
CONFIG_AB8500_USB=y
-CONFIG_KEYSTONE_USB_PHY=y
+CONFIG_KEYSTONE_USB_PHY=m
CONFIG_NOP_USB_XCEIV=m
CONFIG_AM335X_PHY_USB=m
CONFIG_TWL6030_USB=m
CONFIG_USB_GPIO_VBUS=y
CONFIG_USB_ISP1301=y
-CONFIG_USB_MSM_OTG=m
CONFIG_USB_MXS_PHY=y
CONFIG_USB_GADGET=y
CONFIG_USB_FSL_USB2=y
@@ -793,21 +752,20 @@ CONFIG_MMC_SDHCI_OF_ESDHC=y
CONFIG_MMC_SDHCI_ESDHC_IMX=y
CONFIG_MMC_SDHCI_DOVE=y
CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_SDHCI_S3C=y
CONFIG_MMC_SDHCI_PXAV3=y
CONFIG_MMC_SDHCI_SPEAR=y
-CONFIG_MMC_SDHCI_S3C=y
CONFIG_MMC_SDHCI_S3C_DMA=y
CONFIG_MMC_SDHCI_BCM_KONA=y
+CONFIG_MMC_MESON_MX_SDIO=y
CONFIG_MMC_SDHCI_ST=y
CONFIG_MMC_OMAP=y
CONFIG_MMC_OMAP_HS=y
CONFIG_MMC_ATMELMCI=y
CONFIG_MMC_SDHCI_MSM=y
-CONFIG_MMC_MESON_MX_SDIO=y
CONFIG_MMC_MVSDIO=y
CONFIG_MMC_SDHI=y
CONFIG_MMC_DW=y
-CONFIG_MMC_DW_PLTFM=y
CONFIG_MMC_DW_EXYNOS=y
CONFIG_MMC_DW_ROCKCHIP=y
CONFIG_MMC_SH_MMCIF=y
@@ -847,94 +805,85 @@ CONFIG_RTC_DRV_MAX77686=y
CONFIG_RTC_DRV_RK808=m
CONFIG_RTC_DRV_RS5C372=m
CONFIG_RTC_DRV_BQ32K=m
-CONFIG_RTC_DRV_PALMAS=y
-CONFIG_RTC_DRV_ST_LPC=y
CONFIG_RTC_DRV_TWL4030=y
+CONFIG_RTC_DRV_PALMAS=y
CONFIG_RTC_DRV_TPS6586X=y
CONFIG_RTC_DRV_TPS65910=y
CONFIG_RTC_DRV_S35390A=m
CONFIG_RTC_DRV_RX8581=m
CONFIG_RTC_DRV_EM3027=y
+CONFIG_RTC_DRV_S5M=m
CONFIG_RTC_DRV_DA9063=m
CONFIG_RTC_DRV_EFI=m
CONFIG_RTC_DRV_DIGICOLOR=m
-CONFIG_RTC_DRV_S5M=m
CONFIG_RTC_DRV_S3C=m
CONFIG_RTC_DRV_PL031=y
CONFIG_RTC_DRV_AT91RM9200=m
CONFIG_RTC_DRV_AT91SAM9=m
CONFIG_RTC_DRV_VT8500=y
-CONFIG_RTC_DRV_SUN6I=y
CONFIG_RTC_DRV_SUNXI=y
CONFIG_RTC_DRV_MV=y
CONFIG_RTC_DRV_TEGRA=y
+CONFIG_RTC_DRV_ST_LPC=y
CONFIG_RTC_DRV_CPCAP=m
CONFIG_DMADEVICES=y
-CONFIG_DW_DMAC=y
CONFIG_AT_HDMAC=y
CONFIG_AT_XDMAC=y
+CONFIG_DMA_BCM2835=y
+CONFIG_DMA_SUN6I=y
CONFIG_FSL_EDMA=y
+CONFIG_IMX_DMA=y
+CONFIG_IMX_SDMA=y
CONFIG_MV_XOR=y
+CONFIG_MXS_DMA=y
+CONFIG_PL330_DMA=y
+CONFIG_SIRF_DMA=y
+CONFIG_STE_DMA40=y
+CONFIG_ST_FDMA=m
CONFIG_TEGRA20_APB_DMA=y
+CONFIG_XILINX_DMA=y
+CONFIG_QCOM_BAM_DMA=y
+CONFIG_DW_DMAC=y
CONFIG_SH_DMAE=y
CONFIG_RCAR_DMAC=y
CONFIG_RENESAS_USB_DMAC=m
-CONFIG_STE_DMA40=y
-CONFIG_SIRF_DMA=y
-CONFIG_TI_EDMA=y
-CONFIG_PL330_DMA=y
-CONFIG_IMX_SDMA=y
-CONFIG_IMX_DMA=y
-CONFIG_MXS_DMA=y
-CONFIG_DMA_BCM2835=y
-CONFIG_DMA_OMAP=y
-CONFIG_QCOM_BAM_DMA=y
-CONFIG_XILINX_DMA=y
-CONFIG_DMA_SUN6I=y
-CONFIG_ST_FDMA=m
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_MMIO=y
CONFIG_STAGING=y
-CONFIG_SENSORS_ISL29018=y
-CONFIG_SENSORS_ISL29028=y
CONFIG_MFD_NVEC=y
CONFIG_KEYBOARD_NVEC=y
CONFIG_SERIO_NVEC_PS2=y
CONFIG_NVEC_POWER=y
CONFIG_NVEC_PAZ00=y
-CONFIG_BCMA=y
-CONFIG_BCMA_HOST_SOC=y
-CONFIG_BCMA_DRIVER_GMAC_CMN=y
-CONFIG_BCMA_DRIVER_GPIO=y
-CONFIG_QCOM_GSBI=y
-CONFIG_QCOM_PM=y
-CONFIG_QCOM_SMEM=y
-CONFIG_QCOM_SMD_RPM=y
-CONFIG_QCOM_SMP2P=y
-CONFIG_QCOM_SMSM=y
-CONFIG_QCOM_WCNSS_CTRL=m
-CONFIG_ROCKCHIP_PM_DOMAINS=y
-CONFIG_COMMON_CLK_QCOM=y
-CONFIG_QCOM_CLK_RPM=y
-CONFIG_CHROME_PLATFORMS=y
CONFIG_STAGING_BOARD=y
-CONFIG_CROS_EC_CHARDEV=m
CONFIG_COMMON_CLK_MAX77686=y
CONFIG_COMMON_CLK_RK808=m
CONFIG_COMMON_CLK_S2MPS11=m
+CONFIG_COMMON_CLK_QCOM=y
+CONFIG_QCOM_CLK_RPM=y
CONFIG_APQ_MMCC_8084=y
CONFIG_MSM_GCC_8660=y
CONFIG_MSM_MMCC_8960=y
CONFIG_MSM_MMCC_8974=y
-CONFIG_HWSPINLOCK_QCOM=y
+CONFIG_BCM2835_MBOX=y
CONFIG_ROCKCHIP_IOMMU=y
CONFIG_TEGRA_IOMMU_GART=y
CONFIG_TEGRA_IOMMU_SMMU=y
CONFIG_REMOTEPROC=m
CONFIG_ST_REMOTEPROC=m
CONFIG_RPMSG_VIRTIO=m
+CONFIG_RASPBERRYPI_POWER=y
+CONFIG_QCOM_GSBI=y
+CONFIG_QCOM_PM=y
+CONFIG_QCOM_SMD_RPM=m
+CONFIG_QCOM_WCNSS_CTRL=m
+CONFIG_ROCKCHIP_PM_DOMAINS=y
+CONFIG_ARCH_TEGRA_2x_SOC=y
+CONFIG_ARCH_TEGRA_3x_SOC=y
+CONFIG_ARCH_TEGRA_114_SOC=y
+CONFIG_ARCH_TEGRA_124_SOC=y
CONFIG_PM_DEVFREQ=y
CONFIG_ARM_TEGRA_DEVFREQ=m
-CONFIG_MEMORY=y
-CONFIG_EXTCON=y
CONFIG_TI_AEMIF=y
CONFIG_IIO=y
CONFIG_IIO_SW_TRIGGER=y
@@ -947,56 +896,54 @@ CONFIG_VF610_ADC=m
CONFIG_XILINX_XADC=y
CONFIG_MPU3050_I2C=y
CONFIG_CM36651=m
+CONFIG_SENSORS_ISL29018=y
+CONFIG_SENSORS_ISL29028=y
CONFIG_AK8975=y
-CONFIG_RASPBERRYPI_POWER=y
CONFIG_IIO_HRTIMER_TRIGGER=y
CONFIG_PWM=y
CONFIG_PWM_ATMEL=m
CONFIG_PWM_ATMEL_HLCDC_PWM=m
CONFIG_PWM_ATMEL_TCB=m
+CONFIG_PWM_BCM2835=y
+CONFIG_PWM_BRCMSTB=m
CONFIG_PWM_FSL_FTM=m
CONFIG_PWM_MESON=m
CONFIG_PWM_RCAR=m
CONFIG_PWM_RENESAS_TPU=y
CONFIG_PWM_ROCKCHIP=m
CONFIG_PWM_SAMSUNG=m
+CONFIG_PWM_STI=y
CONFIG_PWM_SUN4I=y
CONFIG_PWM_TEGRA=y
CONFIG_PWM_VT8500=y
+CONFIG_KEYSTONE_IRQ=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_SUN9I_USB=y
CONFIG_PHY_HIX5HD2_SATA=y
-CONFIG_E1000E=y
-CONFIG_PWM_STI=y
-CONFIG_PWM_BCM2835=y
-CONFIG_PWM_BRCMSTB=m
-CONFIG_PHY_DM816X_USB=m
-CONFIG_OMAP_USB2=y
-CONFIG_TI_PIPE3=y
-CONFIG_TWL4030_USB=m
+CONFIG_PHY_BERLIN_SATA=y
CONFIG_PHY_BERLIN_USB=y
CONFIG_PHY_CPCAP_USB=m
-CONFIG_PHY_BERLIN_SATA=y
+CONFIG_PHY_QCOM_APQ8064_SATA=m
+CONFIG_PHY_RCAR_GEN2=m
CONFIG_PHY_ROCKCHIP_DP=m
CONFIG_PHY_ROCKCHIP_USB=y
-CONFIG_PHY_QCOM_APQ8064_SATA=m
+CONFIG_PHY_SAMSUNG_USB2=m
CONFIG_PHY_MIPHY28LP=y
-CONFIG_PHY_RCAR_GEN2=m
CONFIG_PHY_STIH407_USB=y
CONFIG_PHY_STM32_USBPHYC=y
-CONFIG_PHY_SUN4I_USB=y
-CONFIG_PHY_SUN9I_USB=y
-CONFIG_PHY_SAMSUNG_USB2=m
CONFIG_PHY_TEGRA_XUSB=y
-CONFIG_PHY_BRCM_SATA=y
-CONFIG_NVMEM=y
+CONFIG_PHY_DM816X_USB=m
+CONFIG_OMAP_USB2=y
+CONFIG_TI_PIPE3=y
+CONFIG_TWL4030_USB=m
CONFIG_NVMEM_IMX_OCOTP=y
CONFIG_NVMEM_SUNXI_SID=y
CONFIG_NVMEM_VF610_OCOTP=y
-CONFIG_BCM2835_MBOX=y
CONFIG_RASPBERRYPI_FIRMWARE=y
-CONFIG_EFI_VARS=m
-CONFIG_EFI_CAPSULE_LOADER=m
CONFIG_BCM47XX_NVRAM=y
CONFIG_BCM47XX_SPROM=y
+CONFIG_EFI_VARS=m
+CONFIG_EFI_CAPSULE_LOADER=m
CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
CONFIG_MSDOS_FS=y
@@ -1004,7 +951,6 @@ CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_UBIFS_FS=y
-CONFIG_TMPFS=y
CONFIG_SQUASHFS=y
CONFIG_SQUASHFS_LZO=y
CONFIG_SQUASHFS_XZ=y
@@ -1020,13 +966,7 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_NLS_UTF8=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_LOCKUP_DETECTOR=y
-CONFIG_CPUFREQ_DT=y
-CONFIG_KEYSTONE_IRQ=y
-CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_ST=y
CONFIG_CRYPTO_USER=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
@@ -1035,27 +975,19 @@ CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_CRYPTO_DEV_MARVELL_CESA=m
CONFIG_CRYPTO_DEV_EXYNOS_RNG=m
CONFIG_CRYPTO_DEV_S5P=m
+CONFIG_CRYPTO_DEV_ATMEL_AES=m
+CONFIG_CRYPTO_DEV_ATMEL_TDES=m
+CONFIG_CRYPTO_DEV_ATMEL_SHA=m
CONFIG_CRYPTO_DEV_SUN4I_SS=m
CONFIG_CRYPTO_DEV_ROCKCHIP=m
CONFIG_ARM_CRYPTO=y
-CONFIG_CRYPTO_SHA1_ARM=m
CONFIG_CRYPTO_SHA1_ARM_NEON=m
CONFIG_CRYPTO_SHA1_ARM_CE=m
CONFIG_CRYPTO_SHA2_ARM_CE=m
-CONFIG_CRYPTO_SHA256_ARM=m
CONFIG_CRYPTO_SHA512_ARM=m
CONFIG_CRYPTO_AES_ARM=m
CONFIG_CRYPTO_AES_ARM_BS=m
CONFIG_CRYPTO_AES_ARM_CE=m
-CONFIG_CRYPTO_CHACHA20_NEON=m
-CONFIG_CRYPTO_CRC32_ARM_CE=m
-CONFIG_CRYPTO_CRCT10DIF_ARM_CE=m
CONFIG_CRYPTO_GHASH_ARM_CE=m
-CONFIG_CRYPTO_DEV_ATMEL_AES=m
-CONFIG_CRYPTO_DEV_ATMEL_TDES=m
-CONFIG_CRYPTO_DEV_ATMEL_SHA=m
-CONFIG_VIDEO_VIVID=m
-CONFIG_VIRTIO=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_PCI_LEGACY=y
-CONFIG_VIRTIO_MMIO=y
+CONFIG_CRYPTO_CRC32_ARM_CE=m
+CONFIG_CRYPTO_CHACHA20_NEON=m
diff --git a/arch/arm/crypto/speck-neon-core.S b/arch/arm/crypto/speck-neon-core.S
index 3c1e203e53b9..57caa742016e 100644
--- a/arch/arm/crypto/speck-neon-core.S
+++ b/arch/arm/crypto/speck-neon-core.S
@@ -272,9 +272,11 @@
* Allocate stack space to store 128 bytes worth of tweaks. For
* performance, this space is aligned to a 16-byte boundary so that we
* can use the load/store instructions that declare 16-byte alignment.
+ * For Thumb2 compatibility, don't do the 'bic' directly on 'sp'.
*/
- sub sp, #128
- bic sp, #0xf
+ sub r12, sp, #128
+ bic r12, #0xf
+ mov sp, r12
.if \n == 64
// Load first tweak
diff --git a/arch/arm/firmware/Makefile b/arch/arm/firmware/Makefile
index a71f16536b6c..6e41336b0bc4 100644
--- a/arch/arm/firmware/Makefile
+++ b/arch/arm/firmware/Makefile
@@ -1 +1,4 @@
obj-$(CONFIG_TRUSTED_FOUNDATIONS) += trusted_foundations.o
+
+# tf_generic_smc() fails to build with -fsanitize-coverage=trace-pc
+KCOV_INSTRUMENT := n
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index dd546d65a383..7a9b86978ee1 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -177,7 +177,7 @@ M_CLASS(streq r3, [r12, #PMSAv8_MAIR1])
bic r0, r0, #CR_I
#endif
mcr p15, 0, r0, c1, c0, 0 @ write control reg
- isb
+ instr_sync
#elif defined (CONFIG_CPU_V7M)
#ifdef CONFIG_ARM_MPU
ldreq r3, [r12, MPU_CTRL]
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 225d1c58d2de..d9c299133111 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -338,6 +338,7 @@ static struct vm_area_struct gate_vma = {
static int __init gate_vma_init(void)
{
+ vma_init(&gate_vma, NULL);
gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
return 0;
}
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index f09e9d66d605..dec130e7078c 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -544,7 +544,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
* Increment event counter and perform fixup for the pre-signal
* frame.
*/
- rseq_signal_deliver(regs);
+ rseq_signal_deliver(ksig, regs);
/*
* Set up the stack frame
@@ -666,7 +666,7 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
} else {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
- rseq_handle_notify_resume(regs);
+ rseq_handle_notify_resume(NULL, regs);
}
}
local_irq_disable();
diff --git a/arch/arm/mach-bcm/Kconfig b/arch/arm/mach-bcm/Kconfig
index c46a728df44e..25aac6ee2ab1 100644
--- a/arch/arm/mach-bcm/Kconfig
+++ b/arch/arm/mach-bcm/Kconfig
@@ -20,6 +20,7 @@ config ARCH_BCM_IPROC
select GPIOLIB
select ARM_AMBA
select PINCTRL
+ select PCI_DOMAINS if PCI
help
This enables support for systems based on Broadcom IPROC architected SoCs.
The IPROC complex contains one or more ARM CPUs along with common
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index e22fb40e34bc..6d5beb11bd96 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -774,7 +774,7 @@ static struct gpiod_lookup_table mmc_gpios_table = {
GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_CD_PIN, "cd",
GPIO_ACTIVE_LOW),
GPIO_LOOKUP("davinci_gpio.0", DA850_MMCSD_WP_PIN, "wp",
- GPIO_ACTIVE_LOW),
+ GPIO_ACTIVE_HIGH),
},
};
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 69df3620eca5..1c73694c871a 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -109,6 +109,45 @@ void omap5_erratum_workaround_801819(void)
static inline void omap5_erratum_workaround_801819(void) { }
#endif
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+/*
+ * Configure ACR and enable ACTLR[0] (Enable invalidates of BTB with
+ * ICIALLU) to activate the workaround for secondary Core.
+ * NOTE: it is assumed that the primary core's configuration is done
+ * by the boot loader (kernel will detect a misconfiguration and complain
+ * if this is not done).
+ *
+ * In General Purpose(GP) devices, ACR bit settings can only be done
+ * by ROM code in "secure world" using the smc call and there is no
+ * option to update the "firmware" on such devices. This also works for
+ * High security(HS) devices, as a backup option in case the
+ * "update" is not done in the "security firmware".
+ */
+static void omap5_secondary_harden_predictor(void)
+{
+ u32 acr, acr_mask;
+
+ asm volatile ("mrc p15, 0, %0, c1, c0, 1" : "=r" (acr));
+
+ /*
+ * ACTLR[0] (Enable invalidates of BTB with ICIALLU)
+ */
+ acr_mask = BIT(0);
+
+ /* Do we already have it done.. if yes, skip expensive smc */
+ if ((acr & acr_mask) == acr_mask)
+ return;
+
+ acr |= acr_mask;
+ omap_smc1(OMAP5_DRA7_MON_SET_ACR_INDEX, acr);
+
+ pr_debug("%s: ARM ACR setup for CVE_2017_5715 applied on CPU%d\n",
+ __func__, smp_processor_id());
+}
+#else
+static inline void omap5_secondary_harden_predictor(void) { }
+#endif
+
static void omap4_secondary_init(unsigned int cpu)
{
/*
@@ -131,6 +170,8 @@ static void omap4_secondary_init(unsigned int cpu)
set_cntfreq();
/* Configure ACR to disable streaming WA for 801819 */
omap5_erratum_workaround_801819();
+ /* Enable ACR to allow for ICUALLU workaround */
+ omap5_secondary_harden_predictor();
}
/*
diff --git a/arch/arm/mach-pxa/irq.c b/arch/arm/mach-pxa/irq.c
index 9c10248fadcc..4e8c2116808e 100644
--- a/arch/arm/mach-pxa/irq.c
+++ b/arch/arm/mach-pxa/irq.c
@@ -185,7 +185,7 @@ static int pxa_irq_suspend(void)
{
int i;
- for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
+ for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
void __iomem *base = irq_base(i);
saved_icmr[i] = __raw_readl(base + ICMR);
@@ -204,7 +204,7 @@ static void pxa_irq_resume(void)
{
int i;
- for (i = 0; i < pxa_internal_irq_nr / 32; i++) {
+ for (i = 0; i < DIV_ROUND_UP(pxa_internal_irq_nr, 32); i++) {
void __iomem *base = irq_base(i);
__raw_writel(saved_icmr[i], base + ICMR);
diff --git a/arch/arm/mach-rpc/ecard.c b/arch/arm/mach-rpc/ecard.c
index 39aef4876ed4..8db62cc54a6a 100644
--- a/arch/arm/mach-rpc/ecard.c
+++ b/arch/arm/mach-rpc/ecard.c
@@ -237,8 +237,8 @@ static void ecard_init_pgtables(struct mm_struct *mm)
memcpy(dst_pgd, src_pgd, sizeof(pgd_t) * (EASI_SIZE / PGDIR_SIZE));
+ vma_init(&vma, mm);
vma.vm_flags = VM_EXEC;
- vma.vm_mm = mm;
flush_tlb_range(&vma, IO_START, IO_START + IO_SIZE);
flush_tlb_range(&vma, EASI_START, EASI_START + EASI_SIZE);
diff --git a/arch/arm/mach-socfpga/Kconfig b/arch/arm/mach-socfpga/Kconfig
index d0f62eacf59d..4adb901dd5eb 100644
--- a/arch/arm/mach-socfpga/Kconfig
+++ b/arch/arm/mach-socfpga/Kconfig
@@ -10,6 +10,7 @@ menuconfig ARCH_SOCFPGA
select HAVE_ARM_SCU
select HAVE_ARM_TWD if SMP
select MFD_SYSCON
+ select PCI_DOMAINS if PCI
if ARCH_SOCFPGA
config SOCFPGA_SUSPEND
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index c186474422f3..0cc8e04295a4 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -736,20 +736,29 @@ static int __mark_rodata_ro(void *unused)
return 0;
}
+static int kernel_set_to_readonly __read_mostly;
+
void mark_rodata_ro(void)
{
+ kernel_set_to_readonly = 1;
stop_machine(__mark_rodata_ro, NULL, NULL);
debug_checkwx();
}
void set_kernel_text_rw(void)
{
+ if (!kernel_set_to_readonly)
+ return;
+
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), false,
current->active_mm);
}
void set_kernel_text_ro(void)
{
+ if (!kernel_set_to_readonly)
+ return;
+
set_section_perms(ro_perms, ARRAY_SIZE(ro_perms), true,
current->active_mm);
}
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 6e8b71613039..f6a62ae44a65 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -1844,7 +1844,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
/* there are 2 passes here */
bpf_jit_dump(prog->len, image_size, 2, ctx.target);
- set_memory_ro((unsigned long)header, header->pages);
+ bpf_jit_binary_lock_ro(header);
prog->bpf_func = (void *)ctx.target;
prog->jited = 1;
prog->jited_len = image_size;
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 8073625371f5..07060e5b5864 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -59,6 +59,9 @@ struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata;
static __read_mostly unsigned int xen_events_irq;
+uint32_t xen_start_flags;
+EXPORT_SYMBOL(xen_start_flags);
+
int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned long addr,
xen_pfn_t *gfn, int nr,
@@ -293,9 +296,7 @@ void __init xen_early_init(void)
xen_setup_features();
if (xen_feature(XENFEAT_dom0))
- xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
- else
- xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED);
+ xen_start_flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
if (!console_set_on_cmdline && !xen_initial_domain())
add_preferred_console("hvc", 0, NULL);
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 45272266dafb..e7101b19d590 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -10,7 +10,7 @@
#
# Copyright (C) 1995-2001 by Russell King
-LDFLAGS_vmlinux :=-p --no-undefined -X
+LDFLAGS_vmlinux :=--no-undefined -X
CPPFLAGS_vmlinux.lds = -DTEXT_OFFSET=$(TEXT_OFFSET)
GZFLAGS :=-9
@@ -60,15 +60,15 @@ ifeq ($(CONFIG_CPU_BIG_ENDIAN), y)
KBUILD_CPPFLAGS += -mbig-endian
CHECKFLAGS += -D__AARCH64EB__
AS += -EB
-LD += -EB
-LDFLAGS += -maarch64linuxb
+# We must use the linux target here, since distributions don't tend to package
+# the ELF linker scripts with binutils, and this results in a build failure.
+LDFLAGS += -EB -maarch64linuxb
UTS_MACHINE := aarch64_be
else
KBUILD_CPPFLAGS += -mlittle-endian
CHECKFLAGS += -D__AARCH64EL__
AS += -EL
-LD += -EL
-LDFLAGS += -maarch64linux
+LDFLAGS += -EL -maarch64linux # See comment above
UTS_MACHINE := aarch64
endif
diff --git a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
index e6b059378dc0..67dac595dc72 100644
--- a/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
+++ b/arch/arm64/boot/dts/altera/socfpga_stratix10.dtsi
@@ -309,8 +309,7 @@
interrupts = <0 99 4>;
resets = <&rst SPIM0_RESET>;
reg-io-width = <4>;
- num-chipselect = <4>;
- bus-num = <0>;
+ num-cs = <4>;
status = "disabled";
};
@@ -322,8 +321,7 @@
interrupts = <0 100 4>;
resets = <&rst SPIM1_RESET>;
reg-io-width = <4>;
- num-chipselect = <4>;
- bus-num = <0>;
+ num-cs = <4>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts b/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
index 4b3331fbfe39..dff9b15eb3c0 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-axg-s400.dts
@@ -66,9 +66,22 @@
&ethmac {
status = "okay";
- phy-mode = "rgmii";
pinctrl-0 = <&eth_rgmii_y_pins>;
pinctrl-names = "default";
+ phy-handle = <&eth_phy0>;
+ phy-mode = "rgmii";
+
+ mdio {
+ compatible = "snps,dwmac-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ eth_phy0: ethernet-phy@0 {
+ /* Realtek RTL8211F (0x001cc916) */
+ reg = <0>;
+ eee-broken-1000t;
+ };
+ };
};
&uart_A {
diff --git a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
index fee87737a201..67d7115e4eff 100644
--- a/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-axg.dtsi
@@ -132,7 +132,7 @@
sd_emmc_b: sd@5000 {
compatible = "amlogic,meson-axg-mmc";
- reg = <0x0 0x5000 0x0 0x2000>;
+ reg = <0x0 0x5000 0x0 0x800>;
interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
clocks = <&clkc CLKID_SD_EMMC_B>,
@@ -144,7 +144,7 @@
sd_emmc_c: mmc@7000 {
compatible = "amlogic,meson-axg-mmc";
- reg = <0x0 0x7000 0x0 0x2000>;
+ reg = <0x0 0x7000 0x0 0x800>;
interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
clocks = <&clkc CLKID_SD_EMMC_C>,
diff --git a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
index 3c31e21cbed7..b8dc4dbb391b 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gx.dtsi
@@ -35,6 +35,12 @@
no-map;
};
+ /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */
+ secmon_reserved_alt: secmon@5000000 {
+ reg = <0x0 0x05000000 0x0 0x300000>;
+ no-map;
+ };
+
linux,cma {
compatible = "shared-dma-pool";
reusable;
@@ -457,21 +463,21 @@
sd_emmc_a: mmc@70000 {
compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
- reg = <0x0 0x70000 0x0 0x2000>;
+ reg = <0x0 0x70000 0x0 0x800>;
interrupts = <GIC_SPI 216 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
};
sd_emmc_b: mmc@72000 {
compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
- reg = <0x0 0x72000 0x0 0x2000>;
+ reg = <0x0 0x72000 0x0 0x800>;
interrupts = <GIC_SPI 217 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
};
sd_emmc_c: mmc@74000 {
compatible = "amlogic,meson-gx-mmc", "amlogic,meson-gxbb-mmc";
- reg = <0x0 0x74000 0x0 0x2000>;
+ reg = <0x0 0x74000 0x0 0x800>;
interrupts = <GIC_SPI 218 IRQ_TYPE_EDGE_RISING>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi
index eb327664a4d8..6aaafff674f9 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-mali.dtsi
@@ -6,7 +6,7 @@
&apb {
mali: gpu@c0000 {
- compatible = "amlogic,meson-gxbb-mali", "arm,mali-450";
+ compatible = "amlogic,meson-gxl-mali", "arm,mali-450";
reg = <0x0 0xc0000 0x0 0x40000>;
interrupts = <GIC_SPI 160 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 161 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
index 3e3eb31748a3..f63bceb88caa 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-libretech-cc.dts
@@ -234,9 +234,6 @@
bus-width = <4>;
cap-sd-highspeed;
- sd-uhs-sdr12;
- sd-uhs-sdr25;
- sd-uhs-sdr50;
max-frequency = <100000000>;
disable-wp;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
index 0cfd701809de..a1b31013ab6e 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl-s905x-p212.dtsi
@@ -189,3 +189,10 @@
&usb0 {
status = "okay";
};
+
+&usb2_phy0 {
+ /*
+ * HDMI_5V is also used as supply for the USB VBUS.
+ */
+ phy-supply = <&hdmi_5v>;
+};
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
index 27538eea547b..c87a80e9bcc6 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxl.dtsi
@@ -13,14 +13,6 @@
/ {
compatible = "amlogic,meson-gxl";
- reserved-memory {
- /* Alternate 3 MiB reserved for ARM Trusted Firmware (BL31) */
- secmon_reserved_alt: secmon@5000000 {
- reg = <0x0 0x05000000 0x0 0x300000>;
- no-map;
- };
- };
-
soc {
usb0: usb@c9000000 {
status = "disabled";
diff --git a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
index 4a2a6af8e752..4057197048dc 100644
--- a/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
+++ b/arch/arm64/boot/dts/broadcom/northstar2/ns2.dtsi
@@ -118,7 +118,7 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_NONE>;
+ interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 281 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <0>;
@@ -149,7 +149,7 @@
#interrupt-cells = <1>;
interrupt-map-mask = <0 0 0 0>;
- interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_NONE>;
+ interrupt-map = <0 0 0 0 &gic 0 GIC_SPI 305 IRQ_TYPE_LEVEL_HIGH>;
linux,pci-domain = <4>;
@@ -566,7 +566,7 @@
reg = <0x66080000 0x100>;
#address-cells = <1>;
#size-cells = <0>;
- interrupts = <GIC_SPI 394 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 394 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>;
status = "disabled";
};
@@ -594,7 +594,7 @@
reg = <0x660b0000 0x100>;
#address-cells = <1>;
#size-cells = <0>;
- interrupts = <GIC_SPI 395 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 395 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts b/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts
index eb6f08cdbd79..77efa28c4dd5 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts
+++ b/arch/arm64/boot/dts/broadcom/stingray/bcm958742k.dts
@@ -43,6 +43,10 @@
enet-phy-lane-swap;
};
+&sdio0 {
+ mmc-ddr-1_8v;
+};
+
&uart2 {
status = "okay";
};
diff --git a/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts b/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts
index 5084b037320f..55ba495ef56e 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts
+++ b/arch/arm64/boot/dts/broadcom/stingray/bcm958742t.dts
@@ -42,3 +42,7 @@
&gphy0 {
enet-phy-lane-swap;
};
+
+&sdio0 {
+ mmc-ddr-1_8v;
+};
diff --git a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
index 99aaff0b6d72..b203152ad67c 100644
--- a/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
+++ b/arch/arm64/boot/dts/broadcom/stingray/stingray.dtsi
@@ -409,7 +409,7 @@
reg = <0x000b0000 0x100>;
#address-cells = <1>;
#size-cells = <0>;
- interrupts = <GIC_SPI 177 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 177 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>;
status = "disabled";
};
@@ -453,7 +453,7 @@
reg = <0x000e0000 0x100>;
#address-cells = <1>;
#size-cells = <0>;
- interrupts = <GIC_SPI 178 IRQ_TYPE_NONE>;
+ interrupts = <GIC_SPI 178 IRQ_TYPE_LEVEL_HIGH>;
clock-frequency = <100000>;
status = "disabled";
};
diff --git a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
index c6999624ed8a..68c5a6c819ae 100644
--- a/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi3660-hikey960.dts
@@ -585,6 +585,8 @@
vmmc-supply = <&wlan_en>;
ti,non-removable;
non-removable;
+ cap-power-off-card;
+ keep-power-in-suspend;
#address-cells = <0x1>;
#size-cells = <0x0>;
status = "ok";
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index edb4ee0b8896..7f12624f6c8e 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -322,6 +322,8 @@
dwmmc_2: dwmmc2@f723f000 {
bus-width = <0x4>;
non-removable;
+ cap-power-off-card;
+ keep-power-in-suspend;
vmmc-supply = <&reg_vdd_3v3>;
mmc-pwrseq = <&wl1835_pwrseq>;
diff --git a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
index 7dabe25f6774..1c6ff8197a88 100644
--- a/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
+++ b/arch/arm64/boot/dts/marvell/armada-cp110.dtsi
@@ -149,7 +149,7 @@
CP110_LABEL(icu): interrupt-controller@1e0000 {
compatible = "marvell,cp110-icu";
- reg = <0x1e0000 0x10>;
+ reg = <0x1e0000 0x440>;
#interrupt-cells = <3>;
interrupt-controller;
msi-parent = <&gicp>;
diff --git a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
index 0f829db33efe..4d5ef01f43a3 100644
--- a/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
+++ b/arch/arm64/boot/dts/qcom/apq8096-db820c.dtsi
@@ -75,7 +75,7 @@
serial@75b1000 {
label = "LS-UART0";
- status = "okay";
+ status = "disabled";
pinctrl-names = "default", "sleep";
pinctrl-0 = <&blsp2_uart2_4pins_default>;
pinctrl-1 = <&blsp2_uart2_4pins_sleep>;
diff --git a/arch/arm64/boot/dts/qcom/msm8916.dtsi b/arch/arm64/boot/dts/qcom/msm8916.dtsi
index 650f356f69ca..c2625d15a8c0 100644
--- a/arch/arm64/boot/dts/qcom/msm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8916.dtsi
@@ -1191,14 +1191,14 @@
port@0 {
reg = <0>;
- etf_out: endpoint {
+ etf_in: endpoint {
slave-mode;
remote-endpoint = <&funnel0_out>;
};
};
port@1 {
reg = <0>;
- etf_in: endpoint {
+ etf_out: endpoint {
remote-endpoint = <&replicator_in>;
};
};
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts b/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts
index 9b4dc41703e3..ae3b5adf32df 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld11-global.dts
@@ -54,7 +54,7 @@
sound {
compatible = "audio-graph-card";
label = "UniPhier LD11";
- widgets = "Headphone", "Headphone Jack";
+ widgets = "Headphone", "Headphones";
dais = <&i2s_port2
&i2s_port3
&i2s_port4
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
index fe6608ea3277..7919233c9ce2 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ld20-global.dts
@@ -54,7 +54,7 @@
sound {
compatible = "audio-graph-card";
label = "UniPhier LD20";
- widgets = "Headphone", "Headphone Jack";
+ widgets = "Headphone", "Headphones";
dais = <&i2s_port2
&i2s_port3
&i2s_port4
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 3cfa8ca26738..f9a186f6af8a 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -47,6 +47,7 @@ CONFIG_ARCH_MVEBU=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_ROCKCHIP=y
CONFIG_ARCH_SEATTLE=y
+CONFIG_ARCH_SYNQUACER=y
CONFIG_ARCH_RENESAS=y
CONFIG_ARCH_R8A7795=y
CONFIG_ARCH_R8A7796=y
@@ -58,7 +59,6 @@ CONFIG_ARCH_R8A77995=y
CONFIG_ARCH_STRATIX10=y
CONFIG_ARCH_TEGRA=y
CONFIG_ARCH_SPRD=y
-CONFIG_ARCH_SYNQUACER=y
CONFIG_ARCH_THUNDER=y
CONFIG_ARCH_THUNDER2=y
CONFIG_ARCH_UNIPHIER=y
@@ -67,25 +67,23 @@ CONFIG_ARCH_XGENE=y
CONFIG_ARCH_ZX=y
CONFIG_ARCH_ZYNQMP=y
CONFIG_PCI=y
-CONFIG_HOTPLUG_PCI_PCIE=y
CONFIG_PCI_IOV=y
CONFIG_HOTPLUG_PCI=y
CONFIG_HOTPLUG_PCI_ACPI=y
-CONFIG_PCI_LAYERSCAPE=y
-CONFIG_PCI_HISI=y
-CONFIG_PCIE_QCOM=y
-CONFIG_PCIE_KIRIN=y
-CONFIG_PCIE_ARMADA_8K=y
-CONFIG_PCIE_HISI_STB=y
CONFIG_PCI_AARDVARK=y
CONFIG_PCI_TEGRA=y
CONFIG_PCIE_RCAR=y
-CONFIG_PCIE_ROCKCHIP=y
-CONFIG_PCIE_ROCKCHIP_HOST=m
CONFIG_PCI_HOST_GENERIC=y
CONFIG_PCI_XGENE=y
CONFIG_PCI_HOST_THUNDER_PEM=y
CONFIG_PCI_HOST_THUNDER_ECAM=y
+CONFIG_PCIE_ROCKCHIP_HOST=m
+CONFIG_PCI_LAYERSCAPE=y
+CONFIG_PCI_HISI=y
+CONFIG_PCIE_QCOM=y
+CONFIG_PCIE_ARMADA_8K=y
+CONFIG_PCIE_KIRIN=y
+CONFIG_PCIE_HISI_STB=y
CONFIG_ARM64_VA_BITS_48=y
CONFIG_SCHED_MC=y
CONFIG_NUMA=y
@@ -104,8 +102,6 @@ CONFIG_HIBERNATION=y
CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
CONFIG_ARM_CPUIDLE=y
CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_GOV_ATTR_SET=y
-CONFIG_CPU_FREQ_GOV_COMMON=y
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=m
CONFIG_CPU_FREQ_GOV_USERSPACE=y
@@ -113,11 +109,11 @@ CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=m
CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
CONFIG_CPUFREQ_DT=y
+CONFIG_ACPI_CPPC_CPUFREQ=m
CONFIG_ARM_ARMADA_37XX_CPUFREQ=y
CONFIG_ARM_BIG_LITTLE_CPUFREQ=y
CONFIG_ARM_SCPI_CPUFREQ=y
CONFIG_ARM_TEGRA186_CPUFREQ=y
-CONFIG_ACPI_CPPC_CPUFREQ=m
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -236,11 +232,6 @@ CONFIG_SMSC911X=y
CONFIG_SNI_AVE=y
CONFIG_SNI_NETSEC=y
CONFIG_STMMAC_ETH=m
-CONFIG_DWMAC_IPQ806X=m
-CONFIG_DWMAC_MESON=m
-CONFIG_DWMAC_ROCKCHIP=m
-CONFIG_DWMAC_SUNXI=m
-CONFIG_DWMAC_SUN8I=m
CONFIG_MDIO_BUS_MUX_MMIOREG=y
CONFIG_AT803X_PHY=m
CONFIG_MARVELL_PHY=m
@@ -269,8 +260,8 @@ CONFIG_WL18XX=m
CONFIG_WLCORE_SDIO=m
CONFIG_INPUT_EVDEV=y
CONFIG_KEYBOARD_ADC=m
-CONFIG_KEYBOARD_CROS_EC=y
CONFIG_KEYBOARD_GPIO=y
+CONFIG_KEYBOARD_CROS_EC=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_ATMEL_MXT=m
CONFIG_INPUT_MISC=y
@@ -296,17 +287,13 @@ CONFIG_SERIAL_SAMSUNG=y
CONFIG_SERIAL_SAMSUNG_CONSOLE=y
CONFIG_SERIAL_TEGRA=y
CONFIG_SERIAL_SH_SCI=y
-CONFIG_SERIAL_SH_SCI_NR_UARTS=11
-CONFIG_SERIAL_SH_SCI_CONSOLE=y
CONFIG_SERIAL_MSM=y
CONFIG_SERIAL_MSM_CONSOLE=y
CONFIG_SERIAL_XILINX_PS_UART=y
CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
CONFIG_SERIAL_MVEBU_UART=y
CONFIG_SERIAL_DEV_BUS=y
-CONFIG_SERIAL_DEV_CTRL_TTYPORT=y
CONFIG_VIRTIO_CONSOLE=y
-CONFIG_I2C_HID=m
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MUX=y
CONFIG_I2C_MUX_PCA954x=y
@@ -325,26 +312,26 @@ CONFIG_I2C_RCAR=y
CONFIG_I2C_CROS_EC_TUNNEL=y
CONFIG_SPI=y
CONFIG_SPI_ARMADA_3700=y
-CONFIG_SPI_MESON_SPICC=m
-CONFIG_SPI_MESON_SPIFC=m
CONFIG_SPI_BCM2835=m
CONFIG_SPI_BCM2835AUX=m
+CONFIG_SPI_MESON_SPICC=m
+CONFIG_SPI_MESON_SPIFC=m
CONFIG_SPI_ORION=y
CONFIG_SPI_PL022=y
-CONFIG_SPI_QUP=y
CONFIG_SPI_ROCKCHIP=y
+CONFIG_SPI_QUP=y
CONFIG_SPI_S3C64XX=y
CONFIG_SPI_SPIDEV=m
CONFIG_SPMI=y
-CONFIG_PINCTRL_IPQ8074=y
CONFIG_PINCTRL_SINGLE=y
CONFIG_PINCTRL_MAX77620=y
+CONFIG_PINCTRL_IPQ8074=y
CONFIG_PINCTRL_MSM8916=y
CONFIG_PINCTRL_MSM8994=y
CONFIG_PINCTRL_MSM8996=y
-CONFIG_PINCTRL_MT7622=y
CONFIG_PINCTRL_QDF2XXX=y
CONFIG_PINCTRL_QCOM_SPMI_PMIC=y
+CONFIG_PINCTRL_MT7622=y
CONFIG_GPIO_DWAPB=y
CONFIG_GPIO_MB86S7X=y
CONFIG_GPIO_PL061=y
@@ -368,13 +355,13 @@ CONFIG_SENSORS_INA2XX=m
CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
CONFIG_CPU_THERMAL=y
CONFIG_THERMAL_EMULATION=y
+CONFIG_ROCKCHIP_THERMAL=m
+CONFIG_RCAR_GEN3_THERMAL=y
CONFIG_ARMADA_THERMAL=y
CONFIG_BRCMSTB_THERMAL=m
CONFIG_EXYNOS_THERMAL=y
-CONFIG_RCAR_GEN3_THERMAL=y
-CONFIG_QCOM_TSENS=y
-CONFIG_ROCKCHIP_THERMAL=m
CONFIG_TEGRA_BPMP_THERMAL=m
+CONFIG_QCOM_TSENS=y
CONFIG_UNIPHIER_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_S3C2410_WATCHDOG=y
@@ -395,9 +382,9 @@ CONFIG_MFD_MAX77620=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_MFD_RK808=y
CONFIG_MFD_SEC_CORE=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_AXP20X=y
CONFIG_REGULATOR_FAN53555=y
-CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_GPIO=y
CONFIG_REGULATOR_HI6421V530=y
CONFIG_REGULATOR_HI655X=y
@@ -407,16 +394,15 @@ CONFIG_REGULATOR_QCOM_SMD_RPM=y
CONFIG_REGULATOR_QCOM_SPMI=y
CONFIG_REGULATOR_RK808=y
CONFIG_REGULATOR_S2MPS11=y
+CONFIG_RC_CORE=m
+CONFIG_RC_DECODERS=y
+CONFIG_RC_DEVICES=y
+CONFIG_IR_MESON=m
CONFIG_MEDIA_SUPPORT=m
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_ANALOG_TV_SUPPORT=y
CONFIG_MEDIA_DIGITAL_TV_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
-CONFIG_MEDIA_RC_SUPPORT=y
-CONFIG_RC_CORE=m
-CONFIG_RC_DEVICES=y
-CONFIG_RC_DECODERS=y
-CONFIG_IR_MESON=m
CONFIG_VIDEO_V4L2_SUBDEV_API=y
# CONFIG_DVB_NET is not set
CONFIG_V4L_MEM2MEM_DRIVERS=y
@@ -441,8 +427,7 @@ CONFIG_ROCKCHIP_DW_HDMI=y
CONFIG_ROCKCHIP_DW_MIPI_DSI=y
CONFIG_ROCKCHIP_INNO_HDMI=y
CONFIG_DRM_RCAR_DU=m
-CONFIG_DRM_RCAR_LVDS=y
-CONFIG_DRM_RCAR_VSP=y
+CONFIG_DRM_RCAR_LVDS=m
CONFIG_DRM_TEGRA=m
CONFIG_DRM_PANEL_SIMPLE=m
CONFIG_DRM_I2C_ADV7511=m
@@ -455,7 +440,6 @@ CONFIG_FB_ARMCLCD=y
CONFIG_BACKLIGHT_GENERIC=m
CONFIG_BACKLIGHT_PWM=m
CONFIG_BACKLIGHT_LP855X=m
-CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
@@ -468,6 +452,7 @@ CONFIG_SND_SOC_RCAR=m
CONFIG_SND_SOC_AK4613=m
CONFIG_SND_SIMPLE_CARD=m
CONFIG_SND_AUDIO_GRAPH_CARD=m
+CONFIG_I2C_HID=m
CONFIG_USB=y
CONFIG_USB_OTG=y
CONFIG_USB_XHCI_HCD=y
@@ -501,12 +486,12 @@ CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_ARMMMCI=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_ACPI=y
-CONFIG_MMC_SDHCI_F_SDH30=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_OF_ARASAN=y
CONFIG_MMC_SDHCI_OF_ESDHC=y
CONFIG_MMC_SDHCI_CADENCE=y
CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_MMC_SDHCI_F_SDH30=y
CONFIG_MMC_MESON_GX=y
CONFIG_MMC_SDHCI_MSM=y
CONFIG_MMC_SPI=y
@@ -524,11 +509,11 @@ CONFIG_LEDS_CLASS=y
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_PWM=y
CONFIG_LEDS_SYSCON=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_CPU=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_LEDS_TRIGGER_PANIC=y
-CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_EDAC=y
CONFIG_EDAC_GHES=y
CONFIG_RTC_CLASS=y
@@ -537,13 +522,13 @@ CONFIG_RTC_DRV_RK808=m
CONFIG_RTC_DRV_S5M=y
CONFIG_RTC_DRV_DS3232=y
CONFIG_RTC_DRV_EFI=y
+CONFIG_RTC_DRV_CROS_EC=y
CONFIG_RTC_DRV_S3C=y
CONFIG_RTC_DRV_PL031=y
CONFIG_RTC_DRV_SUN6I=y
CONFIG_RTC_DRV_ARMADA38X=y
CONFIG_RTC_DRV_TEGRA=y
CONFIG_RTC_DRV_XGENE=y
-CONFIG_RTC_DRV_CROS_EC=y
CONFIG_DMADEVICES=y
CONFIG_DMA_BCM2835=m
CONFIG_K3_DMA=y
@@ -579,7 +564,6 @@ CONFIG_HWSPINLOCK_QCOM=y
CONFIG_ARM_MHU=y
CONFIG_PLATFORM_MHU=y
CONFIG_BCM2835_MBOX=y
-CONFIG_HI6220_MBOX=y
CONFIG_QCOM_APCS_IPC=y
CONFIG_ROCKCHIP_IOMMU=y
CONFIG_TEGRA_IOMMU_SMMU=y
@@ -602,7 +586,6 @@ CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
CONFIG_EXTCON_USB_GPIO=y
CONFIG_EXTCON_USBC_CROS_EC=y
CONFIG_MEMORY=y
-CONFIG_TEGRA_MC=y
CONFIG_IIO=y
CONFIG_EXYNOS_ADC=y
CONFIG_ROCKCHIP_SARADC=m
@@ -618,27 +601,27 @@ CONFIG_PWM_RCAR=m
CONFIG_PWM_ROCKCHIP=y
CONFIG_PWM_SAMSUNG=y
CONFIG_PWM_TEGRA=m
+CONFIG_PHY_XGENE=y
+CONFIG_PHY_SUN4I_USB=y
+CONFIG_PHY_HI6220_USB=y
CONFIG_PHY_HISTB_COMBPHY=y
CONFIG_PHY_HISI_INNO_USB2=y
-CONFIG_PHY_RCAR_GEN3_USB2=y
-CONFIG_PHY_RCAR_GEN3_USB3=m
-CONFIG_PHY_HI6220_USB=y
-CONFIG_PHY_QCOM_USB_HS=y
-CONFIG_PHY_SUN4I_USB=y
CONFIG_PHY_MVEBU_CP110_COMPHY=y
CONFIG_PHY_QCOM_QMP=m
-CONFIG_PHY_ROCKCHIP_INNO_USB2=y
+CONFIG_PHY_QCOM_USB_HS=y
+CONFIG_PHY_RCAR_GEN3_USB2=y
+CONFIG_PHY_RCAR_GEN3_USB3=m
CONFIG_PHY_ROCKCHIP_EMMC=y
+CONFIG_PHY_ROCKCHIP_INNO_USB2=y
CONFIG_PHY_ROCKCHIP_PCIE=m
CONFIG_PHY_ROCKCHIP_TYPEC=y
-CONFIG_PHY_XGENE=y
CONFIG_PHY_TEGRA_XUSB=y
CONFIG_QCOM_L2_PMU=y
CONFIG_QCOM_L3_PMU=y
-CONFIG_MESON_EFUSE=m
CONFIG_QCOM_QFPROM=y
CONFIG_ROCKCHIP_EFUSE=y
CONFIG_UNIPHIER_EFUSE=y
+CONFIG_MESON_EFUSE=m
CONFIG_TEE=y
CONFIG_OPTEE=y
CONFIG_ARM_SCPI_PROTOCOL=y
@@ -647,7 +630,6 @@ CONFIG_EFI_CAPSULE_LOADER=y
CONFIG_ACPI=y
CONFIG_ACPI_APEI=y
CONFIG_ACPI_APEI_GHES=y
-CONFIG_ACPI_APEI_PCIEAER=y
CONFIG_ACPI_APEI_MEMORY_FAILURE=y
CONFIG_ACPI_APEI_EINJ=y
CONFIG_EXT2_FS=y
@@ -682,7 +664,6 @@ CONFIG_DEBUG_INFO=y
CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
-CONFIG_LOCKUP_DETECTOR=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set
@@ -691,20 +672,15 @@ CONFIG_SECURITY=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_ARM64_CRYPTO=y
-CONFIG_CRYPTO_SHA256_ARM64=m
-CONFIG_CRYPTO_SHA512_ARM64=m
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
+CONFIG_CRYPTO_SHA512_ARM64_CE=m
+CONFIG_CRYPTO_SHA3_ARM64=m
+CONFIG_CRYPTO_SM3_ARM64_CE=m
CONFIG_CRYPTO_GHASH_ARM64_CE=y
CONFIG_CRYPTO_CRCT10DIF_ARM64_CE=m
CONFIG_CRYPTO_CRC32_ARM64_CE=m
-CONFIG_CRYPTO_AES_ARM64=m
-CONFIG_CRYPTO_AES_ARM64_CE=m
CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
-CONFIG_CRYPTO_AES_ARM64_NEON_BLK=m
CONFIG_CRYPTO_CHACHA20_NEON=m
CONFIG_CRYPTO_AES_ARM64_BS=m
-CONFIG_CRYPTO_SHA512_ARM64_CE=m
-CONFIG_CRYPTO_SHA3_ARM64=m
-CONFIG_CRYPTO_SM3_ARM64_CE=m
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 253188fb8cb0..e3e50950a863 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -223,8 +223,8 @@ static int ctr_encrypt(struct skcipher_request *req)
kernel_neon_begin();
aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, rounds, blocks, walk.iv);
- err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
kernel_neon_end();
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
if (walk.nbytes) {
u8 __aligned(8) tail[AES_BLOCK_SIZE];
diff --git a/arch/arm64/include/asm/alternative.h b/arch/arm64/include/asm/alternative.h
index a91933b1e2e6..4b650ec1d7dd 100644
--- a/arch/arm64/include/asm/alternative.h
+++ b/arch/arm64/include/asm/alternative.h
@@ -28,7 +28,12 @@ typedef void (*alternative_cb_t)(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
void __init apply_alternatives_all(void);
-void apply_alternatives(void *start, size_t length);
+
+#ifdef CONFIG_MODULES
+void apply_alternatives_module(void *start, size_t length);
+#else
+static inline void apply_alternatives_module(void *start, size_t length) { }
+#endif
#define ALTINSTR_ENTRY(feature,cb) \
" .word 661b - .\n" /* label */ \
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index fda9a8ca48be..fe8777b12f86 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -306,6 +306,7 @@ struct kvm_vcpu_arch {
#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
#define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */
+#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 9f82d6b53851..1bdeca8918a6 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -224,10 +224,8 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
* Only if the new pte is valid and kernel, otherwise TLB maintenance
* or update_mmu_cache() have the necessary barriers.
*/
- if (pte_valid_not_user(pte)) {
+ if (pte_valid_not_user(pte))
dsb(ishst);
- isb();
- }
}
extern void __sync_icache_dcache(pte_t pteval);
@@ -434,7 +432,6 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
WRITE_ONCE(*pmdp, pmd);
dsb(ishst);
- isb();
}
static inline void pmd_clear(pmd_t *pmdp)
@@ -485,7 +482,6 @@ static inline void set_pud(pud_t *pudp, pud_t pud)
{
WRITE_ONCE(*pudp, pud);
dsb(ishst);
- isb();
}
static inline void pud_clear(pud_t *pudp)
diff --git a/arch/arm64/include/asm/simd.h b/arch/arm64/include/asm/simd.h
index fa8b3fe932e6..6495cc51246f 100644
--- a/arch/arm64/include/asm/simd.h
+++ b/arch/arm64/include/asm/simd.h
@@ -29,20 +29,15 @@ DECLARE_PER_CPU(bool, kernel_neon_busy);
static __must_check inline bool may_use_simd(void)
{
/*
- * The raw_cpu_read() is racy if called with preemption enabled.
- * This is not a bug: kernel_neon_busy is only set when
- * preemption is disabled, so we cannot migrate to another CPU
- * while it is set, nor can we migrate to a CPU where it is set.
- * So, if we find it clear on some CPU then we're guaranteed to
- * find it clear on any CPU we could migrate to.
- *
- * If we are in between kernel_neon_begin()...kernel_neon_end(),
- * the flag will be set, but preemption is also disabled, so we
- * can't migrate to another CPU and spuriously see it become
- * false.
+ * kernel_neon_busy is only set while preemption is disabled,
+ * and is clear whenever preemption is enabled. Since
+ * this_cpu_read() is atomic w.r.t. preemption, kernel_neon_busy
+ * cannot change under our feet -- if it's set we cannot be
+ * migrated, and if it's clear we cannot be migrated to a CPU
+ * where it is set.
*/
return !in_irq() && !irqs_disabled() && !in_nmi() &&
- !raw_cpu_read(kernel_neon_busy);
+ !this_cpu_read(kernel_neon_busy);
}
#else /* ! CONFIG_KERNEL_MODE_NEON */
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 6171178075dc..a8f84812c6e8 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -728,6 +728,17 @@ asm(
asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
} while (0)
+/*
+ * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the
+ * set mask are set. Other bits are left as-is.
+ */
+#define sysreg_clear_set(sysreg, clear, set) do { \
+ u64 __scs_val = read_sysreg(sysreg); \
+ u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
+ if (__scs_new != __scs_val) \
+ write_sysreg(__scs_new, sysreg); \
+} while (0)
+
static inline void config_sctlr_el1(u32 clear, u32 set)
{
u32 val;
diff --git a/arch/arm64/include/asm/tlb.h b/arch/arm64/include/asm/tlb.h
index ffdaea7954bb..d87f2d646caa 100644
--- a/arch/arm64/include/asm/tlb.h
+++ b/arch/arm64/include/asm/tlb.h
@@ -37,7 +37,9 @@ static inline void __tlb_remove_table(void *_table)
static inline void tlb_flush(struct mmu_gather *tlb)
{
- struct vm_area_struct vma = { .vm_mm = tlb->mm, };
+ struct vm_area_struct vma;
+
+ vma_init(&vma, tlb->mm);
/*
* The ASID allocator will either invalidate the ASID or mark
diff --git a/arch/arm64/kernel/alternative.c b/arch/arm64/kernel/alternative.c
index 5c4bce4ac381..36fb069fd049 100644
--- a/arch/arm64/kernel/alternative.c
+++ b/arch/arm64/kernel/alternative.c
@@ -122,7 +122,30 @@ static void patch_alternative(struct alt_instr *alt,
}
}
-static void __apply_alternatives(void *alt_region, bool use_linear_alias)
+/*
+ * We provide our own, private D-cache cleaning function so that we don't
+ * accidentally call into the cache.S code, which is patched by us at
+ * runtime.
+ */
+static void clean_dcache_range_nopatch(u64 start, u64 end)
+{
+ u64 cur, d_size, ctr_el0;
+
+ ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
+ d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0,
+ CTR_DMINLINE_SHIFT);
+ cur = start & ~(d_size - 1);
+ do {
+ /*
+ * We must clean+invalidate to the PoC in order to avoid
+ * Cortex-A53 errata 826319, 827319, 824069 and 819472
+ * (this corresponds to ARM64_WORKAROUND_CLEAN_CACHE)
+ */
+ asm volatile("dc civac, %0" : : "r" (cur) : "memory");
+ } while (cur += d_size, cur < end);
+}
+
+static void __apply_alternatives(void *alt_region, bool is_module)
{
struct alt_instr *alt;
struct alt_region *region = alt_region;
@@ -145,7 +168,7 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
pr_info_once("patching kernel code\n");
origptr = ALT_ORIG_PTR(alt);
- updptr = use_linear_alias ? lm_alias(origptr) : origptr;
+ updptr = is_module ? origptr : lm_alias(origptr);
nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
if (alt->cpufeature < ARM64_CB_PATCH)
@@ -155,8 +178,20 @@ static void __apply_alternatives(void *alt_region, bool use_linear_alias)
alt_cb(alt, origptr, updptr, nr_inst);
- flush_icache_range((uintptr_t)origptr,
- (uintptr_t)(origptr + nr_inst));
+ if (!is_module) {
+ clean_dcache_range_nopatch((u64)origptr,
+ (u64)(origptr + nr_inst));
+ }
+ }
+
+ /*
+ * The core module code takes care of cache maintenance in
+ * flush_module_icache().
+ */
+ if (!is_module) {
+ dsb(ish);
+ __flush_icache_all();
+ isb();
}
}
@@ -178,7 +213,7 @@ static int __apply_alternatives_multi_stop(void *unused)
isb();
} else {
BUG_ON(alternatives_applied);
- __apply_alternatives(&region, true);
+ __apply_alternatives(&region, false);
/* Barriers provided by the cache flushing */
WRITE_ONCE(alternatives_applied, 1);
}
@@ -192,12 +227,14 @@ void __init apply_alternatives_all(void)
stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
}
-void apply_alternatives(void *start, size_t length)
+#ifdef CONFIG_MODULES
+void apply_alternatives_module(void *start, size_t length)
{
struct alt_region region = {
.begin = start,
.end = start + length,
};
- __apply_alternatives(&region, false);
+ __apply_alternatives(&region, true);
}
+#endif
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d2856b129097..c6d80743f4ed 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -937,7 +937,7 @@ static int __init parse_kpti(char *str)
__kpti_forced = enabled ? 1 : -1;
return 0;
}
-__setup("kpti=", parse_kpti);
+early_param("kpti", parse_kpti);
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
#ifdef CONFIG_ARM64_HW_AFDBM
@@ -1351,9 +1351,9 @@ static void __update_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
static void update_cpu_capabilities(u16 scope_mask)
{
- __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
__update_cpu_capabilities(arm64_errata, scope_mask,
"enabling workaround for");
+ __update_cpu_capabilities(arm64_features, scope_mask, "detected:");
}
static int __enable_cpu_capability(void *arg)
@@ -1408,8 +1408,8 @@ __enable_cpu_capabilities(const struct arm64_cpu_capabilities *caps,
static void __init enable_cpu_capabilities(u16 scope_mask)
{
- __enable_cpu_capabilities(arm64_features, scope_mask);
__enable_cpu_capabilities(arm64_errata, scope_mask);
+ __enable_cpu_capabilities(arm64_features, scope_mask);
}
/*
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index 155fd91e78f4..f0f27aeefb73 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -448,9 +448,8 @@ int module_finalize(const Elf_Ehdr *hdr,
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
- if (strcmp(".altinstructions", secstrs + s->sh_name) == 0) {
- apply_alternatives((void *)s->sh_addr, s->sh_size);
- }
+ if (strcmp(".altinstructions", secstrs + s->sh_name) == 0)
+ apply_alternatives_module((void *)s->sh_addr, s->sh_size);
#ifdef CONFIG_ARM64_MODULE_PLTS
if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
!strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index f3e2e3aec0b0..2faa9863d2e5 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -179,7 +179,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *idle)
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
*/
-asmlinkage void secondary_start_kernel(void)
+asmlinkage notrace void secondary_start_kernel(void)
{
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
struct mm_struct *mm = &init_mm;
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index dc6ecfa5a2d2..aac7808ce216 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -5,13 +5,14 @@
* Copyright 2018 Arm Limited
* Author: Dave Martin <Dave.Martin@arm.com>
*/
-#include <linux/bottom_half.h>
+#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_host.h>
#include <asm/kvm_mmu.h>
+#include <asm/sysreg.h>
/*
* Called on entry to KVM_RUN unless this vcpu previously ran at least
@@ -61,10 +62,16 @@ void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu)
{
BUG_ON(!current->mm);
- vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_HOST_SVE_IN_USE);
+ vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
+ KVM_ARM64_HOST_SVE_IN_USE |
+ KVM_ARM64_HOST_SVE_ENABLED);
vcpu->arch.flags |= KVM_ARM64_FP_HOST;
+
if (test_thread_flag(TIF_SVE))
vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE;
+
+ if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
+ vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
}
/*
@@ -92,19 +99,30 @@ void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu)
*/
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
{
- local_bh_disable();
+ unsigned long flags;
- update_thread_flag(TIF_SVE,
- vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
+ local_irq_save(flags);
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
/* Clean guest FP state to memory and invalidate cpu view */
fpsimd_save();
fpsimd_flush_cpu_state();
- } else if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
- /* Ensure user trap controls are correctly restored */
- fpsimd_bind_task_to_cpu();
+ } else if (system_supports_sve()) {
+ /*
+ * The FPSIMD/SVE state in the CPU has not been touched, and we
+ * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
+ * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE
+ * for EL0. To avoid spurious traps, restore the trap state
+ * seen by kvm_arch_vcpu_load_fp():
+ */
+ if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED)
+ sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
+ else
+ sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
}
- local_bh_enable();
+ update_thread_flag(TIF_SVE,
+ vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
+
+ local_irq_restore(flags);
}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 49e217ac7e1e..61e93f0b5482 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -583,13 +583,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
size >> PAGE_SHIFT);
return NULL;
}
- if (!coherent)
- __dma_flush_area(page_to_virt(page), iosize);
-
addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
prot,
__builtin_return_address(0));
- if (!addr) {
+ if (addr) {
+ memset(addr, 0, size);
+ if (!coherent)
+ __dma_flush_area(page_to_virt(page), iosize);
+ } else {
iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
dma_release_from_contiguous(dev, page,
size >> PAGE_SHIFT);
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index ecc6818191df..1854e49aa18a 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -108,11 +108,13 @@ static pte_t get_clear_flush(struct mm_struct *mm,
unsigned long pgsize,
unsigned long ncontig)
{
- struct vm_area_struct vma = { .vm_mm = mm };
+ struct vm_area_struct vma;
pte_t orig_pte = huge_ptep_get(ptep);
bool valid = pte_valid(orig_pte);
unsigned long i, saddr = addr;
+ vma_init(&vma, mm);
+
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) {
pte_t pte = ptep_get_and_clear(mm, addr, ptep);
@@ -145,9 +147,10 @@ static void clear_flush(struct mm_struct *mm,
unsigned long pgsize,
unsigned long ncontig)
{
- struct vm_area_struct vma = { .vm_mm = mm };
+ struct vm_area_struct vma;
unsigned long i, saddr = addr;
+ vma_init(&vma, mm);
for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
pte_clear(mm, addr, ptep);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 325cfb3b858a..9abf8a1e7b25 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -611,11 +611,13 @@ void __init mem_init(void)
BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
#endif
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
/*
* Make sure we chose the upper bound of sizeof(struct page)
- * correctly.
+ * correctly when sizing the VMEMMAP array.
*/
BUILD_BUG_ON(sizeof(struct page) > (1 << STRUCT_PAGE_MAX_SHIFT));
+#endif
if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
extern int sysctl_overcommit_memory;
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 5f9a73a4452c..03646e6a2ef4 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -217,8 +217,9 @@ ENDPROC(idmap_cpu_replace_ttbr1)
.macro __idmap_kpti_put_pgtable_ent_ng, type
orr \type, \type, #PTE_NG // Same bit for blocks and pages
- str \type, [cur_\()\type\()p] // Update the entry and ensure it
- dc civac, cur_\()\type\()p // is visible to all CPUs.
+ str \type, [cur_\()\type\()p] // Update the entry and ensure
+ dmb sy // that it is visible to all
+ dc civac, cur_\()\type\()p // CPUs.
.endm
/*
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 44f0ac0df308..db89e7306081 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -120,7 +120,7 @@ ia64_tlb_flush_mmu_tlbonly(struct mmu_gather *tlb, unsigned long start, unsigned
*/
struct vm_area_struct vma;
- vma.vm_mm = tlb->mm;
+ vma_init(&vma, tlb->mm);
/* flush the address range from the tlb: */
flush_tlb_range(&vma, start, end);
/* now flush the virt. page-table area mapping the address range: */
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index 3b38c717008a..46bff1661836 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -2278,17 +2278,15 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
DPRINT(("smpl_buf @%p\n", smpl_buf));
/* allocate vma */
- vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ vma = vm_area_alloc(mm);
if (!vma) {
DPRINT(("Cannot allocate vma\n"));
goto error_kmem;
}
- INIT_LIST_HEAD(&vma->anon_vma_chain);
/*
* partially initialize the vma for the sampling buffer
*/
- vma->vm_mm = mm;
vma->vm_file = get_file(filp);
vma->vm_flags = VM_READ|VM_MAYREAD|VM_DONTEXPAND|VM_DONTDUMP;
vma->vm_page_prot = PAGE_READONLY; /* XXX may need to change */
@@ -2346,7 +2344,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
return 0;
error:
- kmem_cache_free(vm_area_cachep, vma);
+ vm_area_free(vma);
error_kmem:
pfm_rvfree(smpl_buf, size);
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 18278b448530..e6c6dfd98de2 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -114,10 +114,8 @@ ia64_init_addr_space (void)
* the problem. When the process attempts to write to the register backing store
* for the first time, it will get a SEGFAULT in this case.
*/
- vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ vma = vm_area_alloc(current->mm);
if (vma) {
- INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma->vm_mm = current->mm;
vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
vma->vm_end = vma->vm_start + PAGE_SIZE;
vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
@@ -125,7 +123,7 @@ ia64_init_addr_space (void)
down_write(&current->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
up_write(&current->mm->mmap_sem);
- kmem_cache_free(vm_area_cachep, vma);
+ vm_area_free(vma);
return;
}
up_write(&current->mm->mmap_sem);
@@ -133,10 +131,8 @@ ia64_init_addr_space (void)
/* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
if (!(current->personality & MMAP_PAGE_ZERO)) {
- vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ vma = vm_area_alloc(current->mm);
if (vma) {
- INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma->vm_mm = current->mm;
vma->vm_end = PAGE_SIZE;
vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
@@ -144,7 +140,7 @@ ia64_init_addr_space (void)
down_write(&current->mm->mmap_sem);
if (insert_vm_struct(current->mm, vma)) {
up_write(&current->mm->mmap_sem);
- kmem_cache_free(vm_area_cachep, vma);
+ vm_area_free(vma);
return;
}
up_write(&current->mm->mmap_sem);
@@ -277,7 +273,7 @@ static struct vm_area_struct gate_vma;
static int __init gate_vma_init(void)
{
- gate_vma.vm_mm = NULL;
+ vma_init(&gate_vma, NULL);
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
diff --git a/arch/m68k/include/asm/mcf_pgalloc.h b/arch/m68k/include/asm/mcf_pgalloc.h
index 8b707c249026..12fe700632f4 100644
--- a/arch/m68k/include/asm/mcf_pgalloc.h
+++ b/arch/m68k/include/asm/mcf_pgalloc.h
@@ -44,6 +44,7 @@ extern inline pmd_t *pmd_alloc_kernel(pgd_t *pgd, unsigned long address)
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t page,
unsigned long address)
{
+ pgtable_page_dtor(page);
__free_page(page);
}
@@ -74,8 +75,9 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
return page;
}
-extern inline void pte_free(struct mm_struct *mm, struct page *page)
+static inline void pte_free(struct mm_struct *mm, struct page *page)
{
+ pgtable_page_dtor(page);
__free_page(page);
}
diff --git a/arch/microblaze/Kconfig.debug b/arch/microblaze/Kconfig.debug
index 331a3bb66297..93a737c8d1a6 100644
--- a/arch/microblaze/Kconfig.debug
+++ b/arch/microblaze/Kconfig.debug
@@ -8,11 +8,4 @@ config TRACE_IRQFLAGS_SUPPORT
source "lib/Kconfig.debug"
-config HEART_BEAT
- bool "Heart beat function for kernel"
- default n
- help
- This option turns on/off heart beat kernel functionality.
- First GPIO node is taken.
-
endmenu
diff --git a/arch/microblaze/include/asm/setup.h b/arch/microblaze/include/asm/setup.h
index d5384f6f36f7..ce9b7b786156 100644
--- a/arch/microblaze/include/asm/setup.h
+++ b/arch/microblaze/include/asm/setup.h
@@ -19,15 +19,10 @@ extern char cmd_line[COMMAND_LINE_SIZE];
extern char *klimit;
-void microblaze_heartbeat(void);
-void microblaze_setup_heartbeat(void);
-
# ifdef CONFIG_MMU
extern void mmu_reset(void);
# endif /* CONFIG_MMU */
-extern void of_platform_reset_gpio_probe(void);
-
void time_init(void);
void init_IRQ(void);
void machine_early_init(const char *cmdline, unsigned int ram,
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index 9774e1d9507b..a62d09420a47 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -38,6 +38,6 @@
#endif /* __ASSEMBLY__ */
-#define __NR_syscalls 399
+#define __NR_syscalls 401
#endif /* _ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h
index eb156f914793..7a9f16a76413 100644
--- a/arch/microblaze/include/uapi/asm/unistd.h
+++ b/arch/microblaze/include/uapi/asm/unistd.h
@@ -415,5 +415,7 @@
#define __NR_pkey_alloc 396
#define __NR_pkey_free 397
#define __NR_statx 398
+#define __NR_io_pgetevents 399
+#define __NR_rseq 400
#endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
index 7e99cf6984a1..dd71637437f4 100644
--- a/arch/microblaze/kernel/Makefile
+++ b/arch/microblaze/kernel/Makefile
@@ -8,7 +8,6 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_timer.o = -pg
CFLAGS_REMOVE_intc.o = -pg
CFLAGS_REMOVE_early_printk.o = -pg
-CFLAGS_REMOVE_heartbeat.o = -pg
CFLAGS_REMOVE_ftrace.o = -pg
CFLAGS_REMOVE_process.o = -pg
endif
@@ -17,12 +16,11 @@ extra-y := head.o vmlinux.lds
obj-y += dma.o exceptions.o \
hw_exception_handler.o irq.o \
- platform.o process.o prom.o ptrace.o \
+ process.o prom.o ptrace.o \
reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o
obj-y += cpu/
-obj-$(CONFIG_HEART_BEAT) += heartbeat.o
obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o
obj-$(CONFIG_MMU) += misc.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/arch/microblaze/kernel/heartbeat.c b/arch/microblaze/kernel/heartbeat.c
deleted file mode 100644
index 2022130139d2..000000000000
--- a/arch/microblaze/kernel/heartbeat.c
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2007-2009 PetaLogix
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/sched.h>
-#include <linux/sched/loadavg.h>
-#include <linux/io.h>
-
-#include <asm/setup.h>
-#include <asm/page.h>
-#include <asm/prom.h>
-
-static unsigned int base_addr;
-
-void microblaze_heartbeat(void)
-{
- static unsigned int cnt, period, dist;
-
- if (base_addr) {
- if (cnt == 0 || cnt == dist)
- out_be32(base_addr, 1);
- else if (cnt == 7 || cnt == dist + 7)
- out_be32(base_addr, 0);
-
- if (++cnt > period) {
- cnt = 0;
- /*
- * The hyperbolic function below modifies the heartbeat
- * period length in dependency of the current (5min)
- * load. It goes through the points f(0)=126, f(1)=86,
- * f(5)=51, f(inf)->30.
- */
- period = ((672 << FSHIFT) / (5 * avenrun[0] +
- (7 << FSHIFT))) + 30;
- dist = period / 4;
- }
- }
-}
-
-void microblaze_setup_heartbeat(void)
-{
- struct device_node *gpio = NULL;
- int *prop;
- int j;
- const char * const gpio_list[] = {
- "xlnx,xps-gpio-1.00.a",
- NULL
- };
-
- for (j = 0; gpio_list[j] != NULL; j++) {
- gpio = of_find_compatible_node(NULL, NULL, gpio_list[j]);
- if (gpio)
- break;
- }
-
- if (gpio) {
- base_addr = be32_to_cpup(of_get_property(gpio, "reg", NULL));
- base_addr = (unsigned long) ioremap(base_addr, PAGE_SIZE);
- pr_notice("Heartbeat GPIO at 0x%x\n", base_addr);
-
- /* GPIO is configured as output */
- prop = (int *) of_get_property(gpio, "xlnx,is-bidir", NULL);
- if (prop)
- out_be32(base_addr + 4, 0);
- }
-}
diff --git a/arch/microblaze/kernel/platform.c b/arch/microblaze/kernel/platform.c
deleted file mode 100644
index 2540d60610d9..000000000000
--- a/arch/microblaze/kernel/platform.c
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Copyright 2008 Michal Simek <monstr@monstr.eu>
- *
- * based on virtex.c file
- *
- * Copyright 2007 Secret Lab Technologies Ltd.
- *
- * This file is licensed under the terms of the GNU General Public License
- * version 2. This program is licensed "as is" without any warranty of any
- * kind, whether express or implied.
- */
-
-#include <linux/init.h>
-#include <linux/of_platform.h>
-#include <asm/setup.h>
-
-static struct of_device_id xilinx_of_bus_ids[] __initdata = {
- { .compatible = "simple-bus", },
- { .compatible = "xlnx,compound", },
- {}
-};
-
-static int __init microblaze_device_probe(void)
-{
- of_platform_bus_probe(NULL, xilinx_of_bus_ids, NULL);
- of_platform_reset_gpio_probe();
- return 0;
-}
-device_initcall(microblaze_device_probe);
diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c
index bab4c8330ef4..fcbe1daf6316 100644
--- a/arch/microblaze/kernel/reset.c
+++ b/arch/microblaze/kernel/reset.c
@@ -18,7 +18,7 @@
static int handle; /* reset pin handle */
static unsigned int reset_val;
-void of_platform_reset_gpio_probe(void)
+static int of_platform_reset_gpio_probe(void)
{
int ret;
handle = of_get_named_gpio(of_find_node_by_path("/"),
@@ -27,13 +27,13 @@ void of_platform_reset_gpio_probe(void)
if (!gpio_is_valid(handle)) {
pr_info("Skipping unavailable RESET gpio %d (%s)\n",
handle, "reset");
- return;
+ return -ENODEV;
}
ret = gpio_request(handle, "reset");
if (ret < 0) {
pr_info("GPIO pin is already allocated\n");
- return;
+ return ret;
}
/* get current setup value */
@@ -51,11 +51,12 @@ void of_platform_reset_gpio_probe(void)
pr_info("RESET: Registered gpio device: %d, current val: %d\n",
handle, reset_val);
- return;
+ return 0;
err:
gpio_free(handle);
- return;
+ return ret;
}
+device_initcall(of_platform_reset_gpio_probe);
static void gpio_system_reset(void)
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index 56bcf313121f..6ab650593792 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -400,3 +400,5 @@ ENTRY(sys_call_table)
.long sys_pkey_alloc
.long sys_pkey_free
.long sys_statx
+ .long sys_io_pgetevents
+ .long sys_rseq
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
index 7de941cbbd94..a6683484b3a1 100644
--- a/arch/microblaze/kernel/timer.c
+++ b/arch/microblaze/kernel/timer.c
@@ -156,9 +156,6 @@ static inline void timer_ack(void)
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = &clockevent_xilinx_timer;
-#ifdef CONFIG_HEART_BEAT
- microblaze_heartbeat();
-#endif
timer_ack();
evt->event_handler(evt);
return IRQ_HANDLED;
@@ -318,10 +315,6 @@ static int __init xilinx_timer_init(struct device_node *timer)
return ret;
}
-#ifdef CONFIG_HEART_BEAT
- microblaze_setup_heartbeat();
-#endif
-
ret = xilinx_clocksource_init();
if (ret)
return ret;
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 3f9deec70b92..08c10c518f83 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -65,6 +65,7 @@ config MIPS
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RSEQ
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index 10a405d593df..c782b10ddf50 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -58,7 +58,7 @@ EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
void ath79_ddr_wb_flush(u32 reg)
{
- void __iomem *flush_reg = ath79_ddr_wb_flush_base + reg;
+ void __iomem *flush_reg = ath79_ddr_wb_flush_base + (reg * 4);
/* Flush the DDR write buffer. */
__raw_writel(0x1, flush_reg);
diff --git a/arch/mips/ath79/mach-pb44.c b/arch/mips/ath79/mach-pb44.c
index 6b2c6f3baefa..75fb96ca61db 100644
--- a/arch/mips/ath79/mach-pb44.c
+++ b/arch/mips/ath79/mach-pb44.c
@@ -34,7 +34,7 @@
#define PB44_KEYS_DEBOUNCE_INTERVAL (3 * PB44_KEYS_POLL_INTERVAL)
static struct gpiod_lookup_table pb44_i2c_gpiod_table = {
- .dev_id = "i2c-gpio",
+ .dev_id = "i2c-gpio.0",
.table = {
GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SDA,
NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index a7d0b836f2f7..cea8ad864b3f 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -414,6 +414,8 @@ static inline type pfx##in##bwlq##p(unsigned long port) \
__val = *__addr; \
slow; \
\
+ /* prevent prefetching of coherent DMA data prematurely */ \
+ rmb(); \
return pfx##ioswab##bwlq(__addr, __val); \
}
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index bb05e9916a5f..f25dd1d83fb7 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -388,17 +388,19 @@
#define __NR_pkey_alloc (__NR_Linux + 364)
#define __NR_pkey_free (__NR_Linux + 365)
#define __NR_statx (__NR_Linux + 366)
+#define __NR_rseq (__NR_Linux + 367)
+#define __NR_io_pgetevents (__NR_Linux + 368)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 366
+#define __NR_Linux_syscalls 368
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 366
+#define __NR_O32_Linux_syscalls 368
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -733,16 +735,18 @@
#define __NR_pkey_alloc (__NR_Linux + 324)
#define __NR_pkey_free (__NR_Linux + 325)
#define __NR_statx (__NR_Linux + 326)
+#define __NR_rseq (__NR_Linux + 327)
+#define __NR_io_pgetevents (__NR_Linux + 328)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 326
+#define __NR_Linux_syscalls 328
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 326
+#define __NR_64_Linux_syscalls 328
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -1081,15 +1085,17 @@
#define __NR_pkey_alloc (__NR_Linux + 328)
#define __NR_pkey_free (__NR_Linux + 329)
#define __NR_statx (__NR_Linux + 330)
+#define __NR_rseq (__NR_Linux + 331)
+#define __NR_io_pgetevents (__NR_Linux + 332)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 330
+#define __NR_Linux_syscalls 332
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 330
+#define __NR_N32_Linux_syscalls 332
#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 38a302919e6b..d7de8adcfcc8 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -79,6 +79,10 @@ FEXPORT(ret_from_fork)
jal schedule_tail # a0 = struct task_struct *prev
FEXPORT(syscall_exit)
+#ifdef CONFIG_DEBUG_RSEQ
+ move a0, sp
+ jal rseq_syscall
+#endif
local_irq_disable # make sure need_resched and
# signals dont change between
# sampling and return
@@ -141,6 +145,10 @@ work_notifysig: # deal with pending signals and
j resume_userspace_check
FEXPORT(syscall_exit_partial)
+#ifdef CONFIG_DEBUG_RSEQ
+ move a0, sp
+ jal rseq_syscall
+#endif
local_irq_disable # make sure need_resched doesn't
# change between and return
LONG_L a2, TI_FLAGS($28) # current->work
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index f2ee7e1e3342..cff52b283e03 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -119,10 +119,20 @@ NESTED(_mcount, PT_SIZE, ra)
EXPORT_SYMBOL(_mcount)
PTR_LA t1, ftrace_stub
PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */
- bne t1, t2, static_trace
+ beq t1, t2, fgraph_trace
nop
+ MCOUNT_SAVE_REGS
+
+ move a0, ra /* arg1: self return address */
+ jalr t2 /* (1) call *ftrace_trace_function */
+ move a1, AT /* arg2: parent's return address */
+
+ MCOUNT_RESTORE_REGS
+
+fgraph_trace:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ PTR_LA t1, ftrace_stub
PTR_L t3, ftrace_graph_return
bne t1, t3, ftrace_graph_caller
nop
@@ -131,24 +141,11 @@ EXPORT_SYMBOL(_mcount)
bne t1, t3, ftrace_graph_caller
nop
#endif
- b ftrace_stub
-#ifdef CONFIG_32BIT
- addiu sp, sp, 8
-#else
- nop
-#endif
-static_trace:
- MCOUNT_SAVE_REGS
-
- move a0, ra /* arg1: self return address */
- jalr t2 /* (1) call *ftrace_trace_function */
- move a1, AT /* arg2: parent's return address */
-
- MCOUNT_RESTORE_REGS
#ifdef CONFIG_32BIT
addiu sp, sp, 8
#endif
+
.globl ftrace_stub
ftrace_stub:
RETURN_BACK
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 8d85046adcc8..9670e70139fd 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -29,6 +29,7 @@
#include <linux/kallsyms.h>
#include <linux/random.h>
#include <linux/prctl.h>
+#include <linux/nmi.h>
#include <asm/asm.h>
#include <asm/bootinfo.h>
@@ -655,28 +656,42 @@ unsigned long arch_align_stack(unsigned long sp)
return sp & ALMASK;
}
-static void arch_dump_stack(void *info)
+static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
+static struct cpumask backtrace_csd_busy;
+
+static void handle_backtrace(void *info)
{
- struct pt_regs *regs;
+ nmi_cpu_backtrace(get_irq_regs());
+ cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
+}
- regs = get_irq_regs();
+static void raise_backtrace(cpumask_t *mask)
+{
+ call_single_data_t *csd;
+ int cpu;
- if (regs)
- show_regs(regs);
+ for_each_cpu(cpu, mask) {
+ /*
+ * If we previously sent an IPI to the target CPU & it hasn't
+ * cleared its bit in the busy cpumask then it didn't handle
+ * our previous IPI & it's not safe for us to reuse the
+ * call_single_data_t.
+ */
+ if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
+ pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
+ cpu);
+ continue;
+ }
- dump_stack();
+ csd = &per_cpu(backtrace_csd, cpu);
+ csd->func = handle_backtrace;
+ smp_call_function_single_async(cpu, csd);
+ }
}
void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
- long this_cpu = get_cpu();
-
- if (cpumask_test_cpu(this_cpu, mask) && !exclude_self)
- dump_stack();
-
- smp_call_function_many(mask, arch_dump_stack, NULL, 1);
-
- put_cpu();
+ nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
}
int mips_get_process_fp_mode(struct task_struct *task)
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index a9a7d78803cd..91d3c8c46097 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -590,3 +590,5 @@ EXPORT(sys_call_table)
PTR sys_pkey_alloc
PTR sys_pkey_free /* 4365 */
PTR sys_statx
+ PTR sys_rseq
+ PTR sys_io_pgetevents
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 65d5aeeb9bdb..358d9599983d 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -439,4 +439,6 @@ EXPORT(sys_call_table)
PTR sys_pkey_alloc
PTR sys_pkey_free /* 5325 */
PTR sys_statx
+ PTR sys_rseq
+ PTR sys_io_pgetevents
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index cbf190ef9e8a..c65eaacc1abf 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -434,4 +434,6 @@ EXPORT(sysn32_call_table)
PTR sys_pkey_alloc
PTR sys_pkey_free
PTR sys_statx /* 6330 */
+ PTR sys_rseq
+ PTR compat_sys_io_pgetevents
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 9ebe3e2403b1..73913f072e39 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -583,4 +583,6 @@ EXPORT(sys32_call_table)
PTR sys_pkey_alloc
PTR sys_pkey_free /* 4365 */
PTR sys_statx
+ PTR sys_rseq
+ PTR compat_sys_io_pgetevents
.size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 9e224469c788..0a9cfe7a0372 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -801,6 +801,8 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
regs->regs[0] = 0; /* Don't deal with this again. */
}
+ rseq_signal_deliver(ksig, regs);
+
if (sig_uses_siginfo(&ksig->ka, abi))
ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn,
ksig, regs, oldset);
@@ -868,6 +870,7 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ rseq_handle_notify_resume(NULL, regs);
}
user_enter();
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index d67fa74622ee..8d505a21396e 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -351,6 +351,7 @@ static void __show_regs(const struct pt_regs *regs)
void show_regs(struct pt_regs *regs)
{
__show_regs((struct pt_regs *)regs);
+ dump_stack();
}
void show_registers(struct pt_regs *regs)
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index 1986e09fb457..1601d90b087b 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -9,6 +9,7 @@
#include <linux/export.h>
#include <asm/addrspace.h>
#include <asm/byteorder.h>
+#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -98,6 +99,20 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
return error;
}
+static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
+ void *arg)
+{
+ unsigned long i;
+
+ for (i = 0; i < nr_pages; i++) {
+ if (pfn_valid(start_pfn + i) &&
+ !PageReserved(pfn_to_page(start_pfn + i)))
+ return 1;
+ }
+
+ return 0;
+}
+
/*
* Generic mapping function (not visible outside):
*/
@@ -116,8 +131,8 @@ static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
{
+ unsigned long offset, pfn, last_pfn;
struct vm_struct * area;
- unsigned long offset;
phys_addr_t last_addr;
void * addr;
@@ -137,18 +152,16 @@ void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long
return (void __iomem *) CKSEG1ADDR(phys_addr);
/*
- * Don't allow anybody to remap normal RAM that we're using..
+ * Don't allow anybody to remap RAM that may be allocated by the page
+ * allocator, since that could lead to races & data clobbering.
*/
- if (phys_addr < virt_to_phys(high_memory)) {
- char *t_addr, *t_end;
- struct page *page;
-
- t_addr = __va(phys_addr);
- t_end = t_addr + (size - 1);
-
- for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
- if(!PageReserved(page))
- return NULL;
+ pfn = PFN_DOWN(phys_addr);
+ last_pfn = PFN_DOWN(last_addr);
+ if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
+ __ioremap_check_ram) == 1) {
+ WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
+ &phys_addr, &last_addr);
+ return NULL;
}
/*
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 9632436d74d7..c2e94cf5ecda 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -54,5 +54,5 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar,
phys_addr_t size = resource_size(rsrc);
*start = fixup_bigphys_addr(rsrc->start, size);
- *end = rsrc->start + size;
+ *end = rsrc->start + size - 1;
}
diff --git a/arch/nds32/Kconfig b/arch/nds32/Kconfig
index 6aed974276d8..34f7222c5efe 100644
--- a/arch/nds32/Kconfig
+++ b/arch/nds32/Kconfig
@@ -12,17 +12,17 @@ config NDS32
select CLONE_BACKWARDS
select COMMON_CLK
select DMA_NONCOHERENT_OPS
- select GENERIC_ASHLDI3
- select GENERIC_ASHRDI3
- select GENERIC_LSHRDI3
- select GENERIC_CMPDI2
- select GENERIC_MULDI3
- select GENERIC_UCMPDI2
select GENERIC_ATOMIC64
select GENERIC_CPU_DEVICES
select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP
select GENERIC_IRQ_SHOW
+ select GENERIC_LIB_ASHLDI3
+ select GENERIC_LIB_ASHRDI3
+ select GENERIC_LIB_CMPDI2
+ select GENERIC_LIB_LSHRDI3
+ select GENERIC_LIB_MULDI3
+ select GENERIC_LIB_UCMPDI2
select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL
diff --git a/arch/nds32/Makefile b/arch/nds32/Makefile
index 513bb2e9baf9..031c676821ff 100644
--- a/arch/nds32/Makefile
+++ b/arch/nds32/Makefile
@@ -34,10 +34,12 @@ ifdef CONFIG_CPU_LITTLE_ENDIAN
KBUILD_CFLAGS += $(call cc-option, -EL)
KBUILD_AFLAGS += $(call cc-option, -EL)
LDFLAGS += $(call cc-option, -EL)
+CHECKFLAGS += -D__NDS32_EL__
else
KBUILD_CFLAGS += $(call cc-option, -EB)
KBUILD_AFLAGS += $(call cc-option, -EB)
LDFLAGS += $(call cc-option, -EB)
+CHECKFLAGS += -D__NDS32_EB__
endif
boot := arch/nds32/boot
diff --git a/arch/nds32/include/asm/cacheflush.h b/arch/nds32/include/asm/cacheflush.h
index 10b48f0d8e85..8b26198d51bb 100644
--- a/arch/nds32/include/asm/cacheflush.h
+++ b/arch/nds32/include/asm/cacheflush.h
@@ -8,6 +8,8 @@
#define PG_dcache_dirty PG_arch_1
+void flush_icache_range(unsigned long start, unsigned long end);
+void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#ifdef CONFIG_CPU_CACHE_ALIASING
void flush_cache_mm(struct mm_struct *mm);
void flush_cache_dup_mm(struct mm_struct *mm);
@@ -34,13 +36,16 @@ void flush_anon_page(struct vm_area_struct *vma,
void flush_kernel_dcache_page(struct page *page);
void flush_kernel_vmap_range(void *addr, int size);
void invalidate_kernel_vmap_range(void *addr, int size);
-void flush_icache_range(unsigned long start, unsigned long end);
-void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
#else
#include <asm-generic/cacheflush.h>
+#undef flush_icache_range
+#undef flush_icache_page
+#undef flush_icache_user_range
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len);
#endif
#endif /* __NDS32_CACHEFLUSH_H__ */
diff --git a/arch/nds32/include/asm/futex.h b/arch/nds32/include/asm/futex.h
index eab5e84bd991..cb6cb91cfdf8 100644
--- a/arch/nds32/include/asm/futex.h
+++ b/arch/nds32/include/asm/futex.h
@@ -16,7 +16,7 @@
" .popsection\n" \
" .pushsection .fixup,\"ax\"\n" \
"4: move %0, " err_reg "\n" \
- " j 3b\n" \
+ " b 3b\n" \
" .popsection"
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
diff --git a/arch/nds32/kernel/setup.c b/arch/nds32/kernel/setup.c
index 2f5b2ccebe47..63a1a5ef5219 100644
--- a/arch/nds32/kernel/setup.c
+++ b/arch/nds32/kernel/setup.c
@@ -278,7 +278,8 @@ static void __init setup_memory(void)
void __init setup_arch(char **cmdline_p)
{
- early_init_devtree( __dtb_start);
+ early_init_devtree(__atags_pointer ? \
+ phys_to_virt(__atags_pointer) : __dtb_start);
setup_cpuinfo();
diff --git a/arch/nds32/mm/cacheflush.c b/arch/nds32/mm/cacheflush.c
index ce8fd34497bf..254703653b6f 100644
--- a/arch/nds32/mm/cacheflush.c
+++ b/arch/nds32/mm/cacheflush.c
@@ -13,7 +13,39 @@
extern struct cache_info L1_cache_info[2];
-#ifndef CONFIG_CPU_CACHE_ALIASING
+void flush_icache_range(unsigned long start, unsigned long end)
+{
+ unsigned long line_size, flags;
+ line_size = L1_cache_info[DCACHE].line_size;
+ start = start & ~(line_size - 1);
+ end = (end + line_size - 1) & ~(line_size - 1);
+ local_irq_save(flags);
+ cpu_cache_wbinval_range(start, end, 1);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(flush_icache_range);
+
+void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+{
+ unsigned long flags;
+ unsigned long kaddr;
+ local_irq_save(flags);
+ kaddr = (unsigned long)kmap_atomic(page);
+ cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
+ kunmap_atomic((void *)kaddr);
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL(flush_icache_page);
+
+void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
+ unsigned long addr, int len)
+{
+ unsigned long kaddr;
+ kaddr = (unsigned long)kmap_atomic(page) + (addr & ~PAGE_MASK);
+ flush_icache_range(kaddr, kaddr + len);
+ kunmap_atomic((void *)kaddr);
+}
+
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
pte_t * pte)
{
@@ -35,19 +67,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
(vma->vm_flags & VM_EXEC)) {
-
- if (!PageHighMem(page)) {
- cpu_cache_wbinval_page((unsigned long)
- page_address(page),
- vma->vm_flags & VM_EXEC);
- } else {
- unsigned long kaddr = (unsigned long)kmap_atomic(page);
- cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
- kunmap_atomic((void *)kaddr);
- }
+ unsigned long kaddr;
+ local_irq_save(flags);
+ kaddr = (unsigned long)kmap_atomic(page);
+ cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
+ kunmap_atomic((void *)kaddr);
+ local_irq_restore(flags);
}
}
-#else
+#ifdef CONFIG_CPU_CACHE_ALIASING
extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
static inline unsigned long aliasing(unsigned long addr, unsigned long page)
@@ -317,52 +345,4 @@ void invalidate_kernel_vmap_range(void *addr, int size)
local_irq_restore(flags);
}
EXPORT_SYMBOL(invalidate_kernel_vmap_range);
-
-void flush_icache_range(unsigned long start, unsigned long end)
-{
- unsigned long line_size, flags;
- line_size = L1_cache_info[DCACHE].line_size;
- start = start & ~(line_size - 1);
- end = (end + line_size - 1) & ~(line_size - 1);
- local_irq_save(flags);
- cpu_cache_wbinval_range(start, end, 1);
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL(flush_icache_range);
-
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
-{
- unsigned long flags;
- local_irq_save(flags);
- cpu_cache_wbinval_page((unsigned long)page_address(page),
- vma->vm_flags & VM_EXEC);
- local_irq_restore(flags);
-}
-
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
- pte_t * pte)
-{
- struct page *page;
- unsigned long flags;
- unsigned long pfn = pte_pfn(*pte);
-
- if (!pfn_valid(pfn))
- return;
-
- if (vma->vm_mm == current->active_mm) {
- local_irq_save(flags);
- __nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
- __nds32__tlbop_rwr(*pte);
- __nds32__isb();
- local_irq_restore(flags);
- }
-
- page = pfn_to_page(pfn);
- if (test_and_clear_bit(PG_dcache_dirty, &page->flags) ||
- (vma->vm_flags & VM_EXEC)) {
- local_irq_save(flags);
- cpu_dcache_wbinval_page((unsigned long)page_address(page));
- local_irq_restore(flags);
- }
-}
#endif
diff --git a/arch/openrisc/include/asm/pgalloc.h b/arch/openrisc/include/asm/pgalloc.h
index 3e1a46615120..8999b9226512 100644
--- a/arch/openrisc/include/asm/pgalloc.h
+++ b/arch/openrisc/include/asm/pgalloc.h
@@ -98,8 +98,12 @@ static inline void pte_free(struct mm_struct *mm, struct page *pte)
__free_page(pte);
}
+#define __pte_free_tlb(tlb, pte, addr) \
+do { \
+ pgtable_page_dtor(pte); \
+ tlb_remove_page((tlb), (pte)); \
+} while (0)
-#define __pte_free_tlb(tlb, pte, addr) tlb_remove_page((tlb), (pte))
#define pmd_pgtable(pmd) pmd_page(pmd)
#define check_pgt_cache() do { } while (0)
diff --git a/arch/openrisc/kernel/entry.S b/arch/openrisc/kernel/entry.S
index 690d55272ba6..0c826ad6e994 100644
--- a/arch/openrisc/kernel/entry.S
+++ b/arch/openrisc/kernel/entry.S
@@ -277,12 +277,6 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
l.addi r3,r1,0 // pt_regs
/* r4 set be EXCEPTION_HANDLE */ // effective address of fault
- /*
- * __PHX__: TODO
- *
- * all this can be written much simpler. look at
- * DTLB miss handler in the CONFIG_GUARD_PROTECTED_CORE part
- */
#ifdef CONFIG_OPENRISC_NO_SPR_SR_DSX
l.lwz r6,PT_PC(r3) // address of an offending insn
l.lwz r6,0(r6) // instruction that caused pf
@@ -314,7 +308,7 @@ EXCEPTION_ENTRY(_data_page_fault_handler)
#else
- l.lwz r6,PT_SR(r3) // SR
+ l.mfspr r6,r0,SPR_SR // SR
l.andi r6,r6,SPR_SR_DSX // check for delay slot exception
l.sfne r6,r0 // exception happened in delay slot
l.bnf 7f
diff --git a/arch/openrisc/kernel/head.S b/arch/openrisc/kernel/head.S
index fb02b2a1d6f2..9fc6b60140f0 100644
--- a/arch/openrisc/kernel/head.S
+++ b/arch/openrisc/kernel/head.S
@@ -210,8 +210,7 @@
* r4 - EEAR exception EA
* r10 - current pointing to current_thread_info struct
* r12 - syscall 0, since we didn't come from syscall
- * r13 - temp it actually contains new SR, not needed anymore
- * r31 - handler address of the handler we'll jump to
+ * r30 - handler address of the handler we'll jump to
*
* handler has to save remaining registers to the exception
* ksp frame *before* tainting them!
@@ -244,6 +243,7 @@
/* r1 is KSP, r30 is __pa(KSP) */ ;\
tophys (r30,r1) ;\
l.sw PT_GPR12(r30),r12 ;\
+ /* r4 use for tmp before EA */ ;\
l.mfspr r12,r0,SPR_EPCR_BASE ;\
l.sw PT_PC(r30),r12 ;\
l.mfspr r12,r0,SPR_ESR_BASE ;\
@@ -263,7 +263,10 @@
/* r12 == 1 if we come from syscall */ ;\
CLEAR_GPR(r12) ;\
/* ----- turn on MMU ----- */ ;\
- l.ori r30,r0,(EXCEPTION_SR) ;\
+ /* Carry DSX into exception SR */ ;\
+ l.mfspr r30,r0,SPR_SR ;\
+ l.andi r30,r30,SPR_SR_DSX ;\
+ l.ori r30,r30,(EXCEPTION_SR) ;\
l.mtspr r0,r30,SPR_ESR_BASE ;\
/* r30: EA address of handler */ ;\
LOAD_SYMBOL_2_GPR(r30,handler) ;\
diff --git a/arch/openrisc/kernel/traps.c b/arch/openrisc/kernel/traps.c
index fac246e6f37a..d8981cbb852a 100644
--- a/arch/openrisc/kernel/traps.c
+++ b/arch/openrisc/kernel/traps.c
@@ -300,7 +300,7 @@ static inline int in_delay_slot(struct pt_regs *regs)
return 0;
}
#else
- return regs->sr & SPR_SR_DSX;
+ return mfspr(SPR_SR) & SPR_SR_DSX;
#endif
}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index c480770fabcd..17526bebcbd2 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -244,11 +244,11 @@ config PARISC_PAGE_SIZE_4KB
config PARISC_PAGE_SIZE_16KB
bool "16KB"
- depends on PA8X00
+ depends on PA8X00 && BROKEN
config PARISC_PAGE_SIZE_64KB
bool "64KB"
- depends on PA8X00
+ depends on PA8X00 && BROKEN
endchoice
@@ -347,7 +347,7 @@ config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
depends on SMP
- default "32"
+ default "4"
endmenu
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index 714284ea6cc2..5ce030266e7d 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -65,10 +65,6 @@ endif
# kernel.
cflags-y += -mdisable-fpregs
-# Without this, "ld -r" results in .text sections that are too big
-# (> 0x40000) for branches to reach stubs.
-cflags-y += -ffunction-sections
-
# Use long jumps instead of long branches (needed if your linker fails to
# link a too big vmlinux executable). Not enabled for building modules.
ifdef CONFIG_MLONGCALLS
diff --git a/arch/parisc/include/asm/signal.h b/arch/parisc/include/asm/signal.h
index eeb5c8858663..715c96ba2ec8 100644
--- a/arch/parisc/include/asm/signal.h
+++ b/arch/parisc/include/asm/signal.h
@@ -21,14 +21,6 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
-#ifndef __KERNEL__
-struct sigaction {
- __sighandler_t sa_handler;
- unsigned long sa_flags;
- sigset_t sa_mask; /* mask last for extensibility */
-};
-#endif
-
#include <asm/sigcontext.h>
#endif /* !__ASSEMBLY */
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 4872e77aa96b..dc77c5a51db7 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -364,8 +364,9 @@
#define __NR_preadv2 (__NR_Linux + 347)
#define __NR_pwritev2 (__NR_Linux + 348)
#define __NR_statx (__NR_Linux + 349)
+#define __NR_io_pgetevents (__NR_Linux + 350)
-#define __NR_Linux_syscalls (__NR_statx + 1)
+#define __NR_Linux_syscalls (__NR_io_pgetevents + 1)
#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/drivers.c b/arch/parisc/kernel/drivers.c
index e0e1c9775c32..5eb979d04b90 100644
--- a/arch/parisc/kernel/drivers.c
+++ b/arch/parisc/kernel/drivers.c
@@ -154,17 +154,14 @@ int register_parisc_driver(struct parisc_driver *driver)
{
/* FIXME: we need this because apparently the sti
* driver can be registered twice */
- if(driver->drv.name) {
- printk(KERN_WARNING
- "BUG: skipping previously registered driver %s\n",
- driver->name);
+ if (driver->drv.name) {
+ pr_warn("BUG: skipping previously registered driver %s\n",
+ driver->name);
return 1;
}
if (!driver->probe) {
- printk(KERN_WARNING
- "BUG: driver %s has no probe routine\n",
- driver->name);
+ pr_warn("BUG: driver %s has no probe routine\n", driver->name);
return 1;
}
@@ -491,12 +488,9 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
dev = create_parisc_device(mod_path);
if (dev->id.hw_type != HPHW_FAULTY) {
- printk(KERN_ERR "Two devices have hardware path [%s]. "
- "IODC data for second device: "
- "%02x%02x%02x%02x%02x%02x\n"
- "Rearranging GSC cards sometimes helps\n",
- parisc_pathname(dev), iodc_data[0], iodc_data[1],
- iodc_data[3], iodc_data[4], iodc_data[5], iodc_data[6]);
+ pr_err("Two devices have hardware path [%s]. IODC data for second device: %7phN\n"
+ "Rearranging GSC cards sometimes helps\n",
+ parisc_pathname(dev), iodc_data);
return NULL;
}
@@ -528,8 +522,7 @@ alloc_pa_dev(unsigned long hpa, struct hardware_path *mod_path)
* the keyboard controller
*/
if ((hpa & 0xfff) == 0 && insert_resource(&iomem_resource, &dev->hpa))
- printk("Unable to claim HPA %lx for device %s\n",
- hpa, name);
+ pr_warn("Unable to claim HPA %lx for device %s\n", hpa, name);
return dev;
}
@@ -875,7 +868,7 @@ static void print_parisc_device(struct parisc_device *dev)
static int count;
print_pa_hwpath(dev, hw_path);
- printk(KERN_INFO "%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
+ pr_info("%d. %s at 0x%px [%s] { %d, 0x%x, 0x%.3x, 0x%.5x }",
++count, dev->name, (void*) dev->hpa.start, hw_path, dev->id.hw_type,
dev->id.hversion_rev, dev->id.hversion, dev->id.sversion);
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 6308749359e4..fe3f2a49d2b1 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -445,6 +445,7 @@
ENTRY_COMP(preadv2)
ENTRY_COMP(pwritev2)
ENTRY_SAME(statx)
+ ENTRY_COMP(io_pgetevents) /* 350 */
.ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b))
diff --git a/arch/parisc/kernel/unwind.c b/arch/parisc/kernel/unwind.c
index 143f90e2f9f3..2ef83d78eec4 100644
--- a/arch/parisc/kernel/unwind.c
+++ b/arch/parisc/kernel/unwind.c
@@ -25,7 +25,7 @@
/* #define DEBUG 1 */
#ifdef DEBUG
-#define dbg(x...) printk(x)
+#define dbg(x...) pr_debug(x)
#else
#define dbg(x...)
#endif
@@ -182,7 +182,7 @@ int __init unwind_init(void)
start = (long)&__start___unwind[0];
stop = (long)&__stop___unwind[0];
- printk("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
+ dbg("unwind_init: start = 0x%lx, end = 0x%lx, entries = %lu\n",
start, stop,
(stop - start) / sizeof(struct unwind_table_entry));
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index bd06a3ccda31..fb96206de317 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -243,7 +243,9 @@ endif
cpu-as-$(CONFIG_4xx) += -Wa,-m405
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
cpu-as-$(CONFIG_E200) += -Wa,-me200
+cpu-as-$(CONFIG_E500) += -Wa,-me500
cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4
+cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc)
KBUILD_AFLAGS += $(cpu-as-y)
KBUILD_CFLAGS += $(cpu-as-y)
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
index 6a6673907e45..82e44b1a00ae 100644
--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -108,6 +108,7 @@ static inline void pgtable_free(void *table, unsigned index_size)
}
#define check_pgt_cache() do { } while (0)
+#define get_hugepd_cache_index(x) (x)
#ifdef CONFIG_SMP
static inline void pgtable_free_tlb(struct mmu_gather *tlb,
@@ -137,7 +138,6 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- pgtable_page_dtor(table);
pgtable_free_tlb(tlb, page_address(table), 0);
}
#endif /* _ASM_POWERPC_BOOK3S_32_PGALLOC_H */
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
index af5f2baac80f..a069dfcac9a9 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
@@ -49,6 +49,27 @@ static inline int hugepd_ok(hugepd_t hpd)
}
#define is_hugepd(hpd) (hugepd_ok(hpd))
+/*
+ * 16M and 16G huge page directory tables are allocated from slab cache
+ *
+ */
+#define H_16M_CACHE_INDEX (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE - 24)
+#define H_16G_CACHE_INDEX \
+ (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + H_PUD_INDEX_SIZE - 34)
+
+static inline int get_hugepd_cache_index(int index)
+{
+ switch (index) {
+ case H_16M_CACHE_INDEX:
+ return HTLB_16M_INDEX;
+ case H_16G_CACHE_INDEX:
+ return HTLB_16G_INDEX;
+ default:
+ BUG();
+ }
+ /* should not reach */
+}
+
#else /* !CONFIG_HUGETLB_PAGE */
static inline int pmd_huge(pmd_t pmd) { return 0; }
static inline int pud_huge(pud_t pud) { return 0; }
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
index fb4b3ba52339..d7ee249d6890 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
@@ -45,8 +45,17 @@ static inline int hugepd_ok(hugepd_t hpd)
{
return 0;
}
+
#define is_hugepd(pdep) 0
+/*
+ * This should never get called
+ */
+static inline int get_hugepd_cache_index(int index)
+{
+ BUG();
+}
+
#else /* !CONFIG_HUGETLB_PAGE */
static inline int pmd_huge(pmd_t pmd) { return 0; }
static inline int pud_huge(pud_t pud) { return 0; }
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 63cee159022b..42aafba7a308 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -287,6 +287,11 @@ enum pgtable_index {
PMD_INDEX,
PUD_INDEX,
PGD_INDEX,
+ /*
+ * Below are used with 4k page size and hugetlb
+ */
+ HTLB_16M_INDEX,
+ HTLB_16G_INDEX,
};
extern unsigned long __vmalloc_start;
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h
index 896efa559996..79d570cbf332 100644
--- a/arch/powerpc/include/asm/mmu_context.h
+++ b/arch/powerpc/include/asm/mmu_context.h
@@ -35,9 +35,9 @@ extern struct mm_iommu_table_group_mem_t *mm_iommu_lookup_rm(
extern struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
unsigned long ua, unsigned long entries);
extern long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
- unsigned long ua, unsigned long *hpa);
+ unsigned long ua, unsigned int pageshift, unsigned long *hpa);
extern long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
- unsigned long ua, unsigned long *hpa);
+ unsigned long ua, unsigned int pageshift, unsigned long *hpa);
extern long mm_iommu_mapped_inc(struct mm_iommu_table_group_mem_t *mem);
extern void mm_iommu_mapped_dec(struct mm_iommu_table_group_mem_t *mem);
#endif
diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h
index 0f571e0ebca1..bd9ba8defd72 100644
--- a/arch/powerpc/include/asm/nmi.h
+++ b/arch/powerpc/include/asm/nmi.h
@@ -8,7 +8,7 @@ extern void arch_touch_nmi_watchdog(void);
static inline void arch_touch_nmi_watchdog(void) {}
#endif
-#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_STACKTRACE)
+#if defined(CONFIG_NMI_IPI) && defined(CONFIG_STACKTRACE)
extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
bool exclude_self);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h
index 1707781d2f20..8825953c225b 100644
--- a/arch/powerpc/include/asm/nohash/32/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h
@@ -109,6 +109,7 @@ static inline void pgtable_free(void *table, unsigned index_size)
}
#define check_pgt_cache() do { } while (0)
+#define get_hugepd_cache_index(x) (x)
#ifdef CONFIG_SMP
static inline void pgtable_free_tlb(struct mmu_gather *tlb,
@@ -139,7 +140,6 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
tlb_flush_pgtable(tlb, address);
- pgtable_page_dtor(table);
pgtable_free_tlb(tlb, page_address(table), 0);
}
#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
index 0e693f322cb2..e2d62d033708 100644
--- a/arch/powerpc/include/asm/nohash/64/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
@@ -141,6 +141,7 @@ static inline void pgtable_free(void *table, int shift)
}
}
+#define get_hugepd_cache_index(x) (x)
#ifdef CONFIG_SMP
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
{
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index cfcf6a874cfa..01b5171ea189 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -393,3 +393,4 @@ SYSCALL(pkey_alloc)
SYSCALL(pkey_free)
SYSCALL(pkey_mprotect)
SYSCALL(rseq)
+COMPAT_SYS(io_pgetevents)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 1e9708632dce..c19379f0a32e 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 388
+#define NR_syscalls 389
#define __NR__exit __NR_exit
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index ac5ba55066dd..985534d0b448 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -399,5 +399,6 @@
#define __NR_pkey_free 385
#define __NR_pkey_mprotect 386
#define __NR_rseq 387
+#define __NR_io_pgetevents 388
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 4be1c0de9406..96dd3d871986 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -711,7 +711,8 @@ static __init void cpufeatures_cpu_quirks(void)
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
- } else /* DD2.1 and up have DD2_1 */
+ } else if ((version & 0xffff0000) == 0x004e0000)
+ /* DD2.1 and up have DD2_1 */
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
if ((version & 0xffff0000) == 0x004e0000) {
diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S
index e734f6e45abc..689306118b48 100644
--- a/arch/powerpc/kernel/idle_book3s.S
+++ b/arch/powerpc/kernel/idle_book3s.S
@@ -144,7 +144,9 @@ power9_restore_additional_sprs:
mtspr SPRN_MMCR1, r4
ld r3, STOP_MMCR2(r13)
+ ld r4, PACA_SPRG_VDSO(r13)
mtspr SPRN_MMCR2, r3
+ mtspr SPRN_SPRG3, r4
blr
/*
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 4f861055a852..d63b488d34d7 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -285,9 +285,6 @@ pci_bus_to_hose(int bus)
* Note that the returned IO or memory base is a physical address
*/
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
SYSCALL_DEFINE3(pciconfig_iobase, long, which,
unsigned long, bus, unsigned long, devfn)
{
@@ -313,4 +310,3 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which,
return result;
}
-#pragma GCC diagnostic pop
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 812171c09f42..dff28f903512 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -203,9 +203,6 @@ void pcibios_setup_phb_io_space(struct pci_controller *hose)
#define IOBASE_ISA_IO 3
#define IOBASE_ISA_MEM 4
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, in_bus,
unsigned long, in_devfn)
{
@@ -259,7 +256,6 @@ SYSCALL_DEFINE3(pciconfig_iobase, long, which, unsigned long, in_bus,
return -EOPNOTSUPP;
}
-#pragma GCC diagnostic pop
#ifdef CONFIG_NUMA
int pcibus_to_node(struct pci_bus *bus)
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 7fb9f83dcde8..8afd146bc9c7 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -1051,9 +1051,6 @@ struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
}
/* We assume to be passed big endian arguments */
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
{
struct rtas_args args;
@@ -1140,7 +1137,6 @@ SYSCALL_DEFINE1(rtas, struct rtas_args __user *, uargs)
return 0;
}
-#pragma GCC diagnostic pop
/*
* Call early during boot, before mem init, to retrieve the RTAS
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 62b1a40d8957..40b44bb53a4e 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -701,11 +701,18 @@ static int ppc_panic_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
/*
+ * panic does a local_irq_disable, but we really
+ * want interrupts to be hard disabled.
+ */
+ hard_irq_disable();
+
+ /*
* If firmware-assisted dump has been registered then trigger
* firmware-assisted dump and let firmware handle everything else.
*/
crash_fadump(NULL, ptr);
- ppc_md.panic(ptr); /* May not return */
+ if (ppc_md.panic)
+ ppc_md.panic(ptr); /* May not return */
return NOTIFY_DONE;
}
@@ -716,7 +723,8 @@ static struct notifier_block ppc_panic_block = {
void __init setup_panic(void)
{
- if (!ppc_md.panic)
+ /* PPC64 always does a hard irq disable in its panic handler */
+ if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic)
return;
atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 7a7ce8ad455e..225bc5f91049 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -387,6 +387,14 @@ void early_setup_secondary(void)
#endif /* CONFIG_SMP */
+void panic_smp_self_stop(void)
+{
+ hard_irq_disable();
+ spin_begin();
+ while (1)
+ spin_cpu_relax();
+}
+
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
static bool use_spinloop(void)
{
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 17fe4339ba59..b3e8db376ecd 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -134,7 +134,7 @@ static void do_signal(struct task_struct *tsk)
/* Re-enable the breakpoints for the signal stack */
thread_change_pc(tsk, tsk->thread.regs);
- rseq_signal_deliver(tsk->thread.regs);
+ rseq_signal_deliver(&ksig, tsk->thread.regs);
if (is32) {
if (ksig.ka.sa.sa_flags & SA_SIGINFO)
@@ -170,7 +170,7 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
- rseq_handle_notify_resume(regs);
+ rseq_handle_notify_resume(NULL, regs);
}
user_enter();
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 5eedbb282d42..e6474a45cef5 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -1038,9 +1038,6 @@ static int do_setcontext_tm(struct ucontext __user *ucp,
}
#endif
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
#ifdef CONFIG_PPC64
COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
struct ucontext __user *, new_ctx, int, ctx_size)
@@ -1134,7 +1131,6 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
set_thread_flag(TIF_RESTOREALL);
return 0;
}
-#pragma GCC diagnostic pop
#ifdef CONFIG_PPC64
COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
@@ -1231,9 +1227,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
return 0;
}
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
#ifdef CONFIG_PPC32
SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
int, ndbg, struct sig_dbg_op __user *, dbg)
@@ -1337,7 +1330,6 @@ SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
return 0;
}
#endif
-#pragma GCC diagnostic pop
/*
* OK, we're invoking a handler
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index d42b60020389..83d51bf586c7 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -625,9 +625,6 @@ static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
/*
* Handle {get,set,swap}_context operations
*/
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
struct ucontext __user *, new_ctx, long, ctx_size)
{
@@ -693,7 +690,6 @@ SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
set_thread_flag(TIF_RESTOREALL);
return 0;
}
-#pragma GCC diagnostic pop
/*
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 5eadfffabe35..4794d6b4f4d2 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -600,9 +600,6 @@ static void nmi_stop_this_cpu(struct pt_regs *regs)
nmi_ipi_busy_count--;
nmi_ipi_unlock();
- /* Remove this CPU */
- set_cpu_online(smp_processor_id(), false);
-
spin_begin();
while (1)
spin_cpu_relax();
@@ -617,9 +614,6 @@ void smp_send_stop(void)
static void stop_this_cpu(void *dummy)
{
- /* Remove this CPU */
- set_cpu_online(smp_processor_id(), false);
-
hard_irq_disable();
spin_begin();
while (1)
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index 07e97f289c52..e2c50b55138f 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -196,7 +196,7 @@ save_stack_trace_tsk_reliable(struct task_struct *tsk,
EXPORT_SYMBOL_GPL(save_stack_trace_tsk_reliable);
#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
-#ifdef CONFIG_PPC_BOOK3S_64
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
static void handle_backtrace_ipi(struct pt_regs *regs)
{
nmi_cpu_backtrace(regs);
@@ -242,4 +242,4 @@ void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
{
nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
}
-#endif /* CONFIG_PPC64 */
+#endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index 083fa06962fd..466216506eb2 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -62,9 +62,6 @@ out:
return ret;
}
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
SYSCALL_DEFINE6(mmap2, unsigned long, addr, size_t, len,
unsigned long, prot, unsigned long, flags,
unsigned long, fd, unsigned long, pgoff)
@@ -78,7 +75,6 @@ SYSCALL_DEFINE6(mmap, unsigned long, addr, size_t, len,
{
return do_mmap2(addr, len, prot, flags, fd, offset, PAGE_SHIFT);
}
-#pragma GCC diagnostic pop
#ifdef CONFIG_PPC32
/*
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index d066e37551ec..8c456fa691a5 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -449,7 +449,7 @@ long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
/* This only handles v2 IOMMU type, v1 is handled via ioctl() */
return H_TOO_HARD;
- if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, &hpa)))
+ if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
return H_HARDWARE;
if (mm_iommu_mapped_inc(mem))
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 925fc316a104..5b298f5a1a14 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -279,7 +279,8 @@ static long kvmppc_rm_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
if (!mem)
return H_TOO_HARD;
- if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, &hpa)))
+ if (WARN_ON_ONCE_RM(mm_iommu_ua_to_hpa_rm(mem, ua, tbl->it_page_shift,
+ &hpa)))
return H_HARDWARE;
pua = (void *) vmalloc_to_phys(pua);
@@ -469,7 +470,8 @@ long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
mem = mm_iommu_lookup_rm(vcpu->kvm->mm, ua, IOMMU_PAGE_SIZE_4K);
if (mem)
- prereg = mm_iommu_ua_to_hpa_rm(mem, ua, &tces) == 0;
+ prereg = mm_iommu_ua_to_hpa_rm(mem, ua,
+ IOMMU_PAGE_SHIFT_4K, &tces) == 0;
}
if (!prereg) {
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 7c5f479c5c00..8a9a49c13865 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -337,7 +337,8 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
if (shift >= pdshift)
hugepd_free(tlb, hugepte);
else
- pgtable_free_tlb(tlb, hugepte, pdshift - shift);
+ pgtable_free_tlb(tlb, hugepte,
+ get_hugepd_cache_index(pdshift - shift));
}
static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
diff --git a/arch/powerpc/mm/mmu_context_iommu.c b/arch/powerpc/mm/mmu_context_iommu.c
index abb43646927a..a4ca57612558 100644
--- a/arch/powerpc/mm/mmu_context_iommu.c
+++ b/arch/powerpc/mm/mmu_context_iommu.c
@@ -19,6 +19,7 @@
#include <linux/hugetlb.h>
#include <linux/swap.h>
#include <asm/mmu_context.h>
+#include <asm/pte-walk.h>
static DEFINE_MUTEX(mem_list_mutex);
@@ -27,6 +28,7 @@ struct mm_iommu_table_group_mem_t {
struct rcu_head rcu;
unsigned long used;
atomic64_t mapped;
+ unsigned int pageshift;
u64 ua; /* userspace address */
u64 entries; /* number of entries in hpas[] */
u64 *hpas; /* vmalloc'ed */
@@ -125,6 +127,8 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
{
struct mm_iommu_table_group_mem_t *mem;
long i, j, ret = 0, locked_entries = 0;
+ unsigned int pageshift;
+ unsigned long flags;
struct page *page = NULL;
mutex_lock(&mem_list_mutex);
@@ -159,6 +163,12 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
goto unlock_exit;
}
+ /*
+ * For a starting point for a maximum page size calculation
+ * we use @ua and @entries natural alignment to allow IOMMU pages
+ * smaller than huge pages but still bigger than PAGE_SIZE.
+ */
+ mem->pageshift = __ffs(ua | (entries << PAGE_SHIFT));
mem->hpas = vzalloc(array_size(entries, sizeof(mem->hpas[0])));
if (!mem->hpas) {
kfree(mem);
@@ -199,6 +209,23 @@ long mm_iommu_get(struct mm_struct *mm, unsigned long ua, unsigned long entries,
}
}
populate:
+ pageshift = PAGE_SHIFT;
+ if (PageCompound(page)) {
+ pte_t *pte;
+ struct page *head = compound_head(page);
+ unsigned int compshift = compound_order(head);
+
+ local_irq_save(flags); /* disables as well */
+ pte = find_linux_pte(mm->pgd, ua, NULL, &pageshift);
+ local_irq_restore(flags);
+
+ /* Double check it is still the same pinned page */
+ if (pte && pte_page(*pte) == head &&
+ pageshift == compshift)
+ pageshift = max_t(unsigned int, pageshift,
+ PAGE_SHIFT);
+ }
+ mem->pageshift = min(mem->pageshift, pageshift);
mem->hpas[i] = page_to_pfn(page) << PAGE_SHIFT;
}
@@ -349,7 +376,7 @@ struct mm_iommu_table_group_mem_t *mm_iommu_find(struct mm_struct *mm,
EXPORT_SYMBOL_GPL(mm_iommu_find);
long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
- unsigned long ua, unsigned long *hpa)
+ unsigned long ua, unsigned int pageshift, unsigned long *hpa)
{
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
u64 *va = &mem->hpas[entry];
@@ -357,6 +384,9 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
if (entry >= mem->entries)
return -EFAULT;
+ if (pageshift > mem->pageshift)
+ return -EFAULT;
+
*hpa = *va | (ua & ~PAGE_MASK);
return 0;
@@ -364,7 +394,7 @@ long mm_iommu_ua_to_hpa(struct mm_iommu_table_group_mem_t *mem,
EXPORT_SYMBOL_GPL(mm_iommu_ua_to_hpa);
long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
- unsigned long ua, unsigned long *hpa)
+ unsigned long ua, unsigned int pageshift, unsigned long *hpa)
{
const long entry = (ua - mem->ua) >> PAGE_SHIFT;
void *va = &mem->hpas[entry];
@@ -373,6 +403,9 @@ long mm_iommu_ua_to_hpa_rm(struct mm_iommu_table_group_mem_t *mem,
if (entry >= mem->entries)
return -EFAULT;
+ if (pageshift > mem->pageshift)
+ return -EFAULT;
+
pa = (void *) vmalloc_to_phys(va);
if (!pa)
return -EFAULT;
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index c1f4ca45c93a..4afbfbb64bfd 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -409,6 +409,18 @@ static inline void pgtable_free(void *table, int index)
case PUD_INDEX:
kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
break;
+#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
+ /* 16M hugepd directory at pud level */
+ case HTLB_16M_INDEX:
+ BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
+ kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
+ break;
+ /* 16G hugepd directory at the pgd level */
+ case HTLB_16G_INDEX:
+ BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
+ kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
+ break;
+#endif
/* We don't free pgd table via RCU callback */
default:
BUG();
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index 75cb646a79c3..9d16ee251fc0 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -186,9 +186,6 @@ static void subpage_mark_vma_nohuge(struct mm_struct *mm, unsigned long addr,
* in a 2-bit field won't allow writes to a page that is otherwise
* write-protected.
*/
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wpragmas"
-#pragma GCC diagnostic ignored "-Wattribute-alias"
SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
unsigned long, len, u32 __user *, map)
{
@@ -272,4 +269,3 @@ SYSCALL_DEFINE3(subpage_prot, unsigned long, addr,
up_write(&mm->mmap_sem);
return err;
}
-#pragma GCC diagnostic pop
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 67a6e86d3e7e..1135b43a597c 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -689,22 +689,17 @@ EXPORT_SYMBOL(radix__flush_tlb_kernel_range);
static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
-void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
+static inline void __radix__flush_tlb_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end,
+ bool flush_all_sizes)
{
- struct mm_struct *mm = vma->vm_mm;
unsigned long pid;
unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
unsigned long page_size = 1UL << page_shift;
unsigned long nr_pages = (end - start) >> page_shift;
bool local, full;
-#ifdef CONFIG_HUGETLB_PAGE
- if (is_vm_hugetlb_page(vma))
- return radix__flush_hugetlb_tlb_range(vma, start, end);
-#endif
-
pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
return;
@@ -738,37 +733,64 @@ is_local:
_tlbie_pid(pid, RIC_FLUSH_TLB);
}
} else {
- bool hflush = false;
+ bool hflush = flush_all_sizes;
+ bool gflush = flush_all_sizes;
unsigned long hstart, hend;
+ unsigned long gstart, gend;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT;
- hend = end >> HPAGE_PMD_SHIFT;
- if (hstart < hend) {
- hstart <<= HPAGE_PMD_SHIFT;
- hend <<= HPAGE_PMD_SHIFT;
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
hflush = true;
+
+ if (hflush) {
+ hstart = (start + PMD_SIZE - 1) & PMD_MASK;
+ hend = end & PMD_MASK;
+ if (hstart == hend)
+ hflush = false;
+ }
+
+ if (gflush) {
+ gstart = (start + PUD_SIZE - 1) & PUD_MASK;
+ gend = end & PUD_MASK;
+ if (gstart == gend)
+ gflush = false;
}
-#endif
asm volatile("ptesync": : :"memory");
if (local) {
__tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
if (hflush)
__tlbiel_va_range(hstart, hend, pid,
- HPAGE_PMD_SIZE, MMU_PAGE_2M);
+ PMD_SIZE, MMU_PAGE_2M);
+ if (gflush)
+ __tlbiel_va_range(gstart, gend, pid,
+ PUD_SIZE, MMU_PAGE_1G);
asm volatile("ptesync": : :"memory");
} else {
__tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
if (hflush)
__tlbie_va_range(hstart, hend, pid,
- HPAGE_PMD_SIZE, MMU_PAGE_2M);
+ PMD_SIZE, MMU_PAGE_2M);
+ if (gflush)
+ __tlbie_va_range(gstart, gend, pid,
+ PUD_SIZE, MMU_PAGE_1G);
fixup_tlbie();
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
}
preempt_enable();
}
+
+void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+
+{
+#ifdef CONFIG_HUGETLB_PAGE
+ if (is_vm_hugetlb_page(vma))
+ return radix__flush_hugetlb_tlb_range(vma, start, end);
+#endif
+
+ __radix__flush_tlb_range(vma->vm_mm, start, end, false);
+}
EXPORT_SYMBOL(radix__flush_tlb_range);
static int radix_get_mmu_psize(int page_size)
@@ -837,6 +859,8 @@ void radix__tlb_flush(struct mmu_gather *tlb)
int psize = 0;
struct mm_struct *mm = tlb->mm;
int page_size = tlb->page_size;
+ unsigned long start = tlb->start;
+ unsigned long end = tlb->end;
/*
* if page size is not something we understand, do a full mm flush
@@ -847,15 +871,45 @@ void radix__tlb_flush(struct mmu_gather *tlb)
*/
if (tlb->fullmm) {
__flush_all_mm(mm, true);
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
+ } else if (mm_tlb_flush_nested(mm)) {
+ /*
+ * If there is a concurrent invalidation that is clearing ptes,
+ * then it's possible this invalidation will miss one of those
+ * cleared ptes and miss flushing the TLB. If this invalidate
+ * returns before the other one flushes TLBs, that can result
+ * in it returning while there are still valid TLBs inside the
+ * range to be invalidated.
+ *
+ * See mm/memory.c:tlb_finish_mmu() for more details.
+ *
+ * The solution to this is ensure the entire range is always
+ * flushed here. The problem for powerpc is that the flushes
+ * are page size specific, so this "forced flush" would not
+ * do the right thing if there are a mix of page sizes in
+ * the range to be invalidated. So use __flush_tlb_range
+ * which invalidates all possible page sizes in the range.
+ *
+ * PWC flush probably is not be required because the core code
+ * shouldn't free page tables in this path, but accounting
+ * for the possibility makes us a bit more robust.
+ *
+ * need_flush_all is an uncommon case because page table
+ * teardown should be done with exclusive locks held (but
+ * after locks are dropped another invalidate could come
+ * in), it could be optimized further if necessary.
+ */
+ if (!tlb->need_flush_all)
+ __radix__flush_tlb_range(mm, start, end, true);
+ else
+ radix__flush_all_mm(mm);
+#endif
} else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
if (!tlb->need_flush_all)
radix__flush_tlb_mm(mm);
else
radix__flush_all_mm(mm);
} else {
- unsigned long start = tlb->start;
- unsigned long end = tlb->end;
-
if (!tlb->need_flush_all)
radix__flush_tlb_range_psize(mm, start, end, psize);
else
@@ -1043,6 +1097,8 @@ extern void radix_kvm_prefetch_workaround(struct mm_struct *mm)
for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
if (sib == cpu)
continue;
+ if (!cpu_possible(sib))
+ continue;
if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
flush = true;
}
diff --git a/arch/powerpc/net/bpf_jit_comp64.c b/arch/powerpc/net/bpf_jit_comp64.c
index 380cbf9a40d9..c0a9bcd28356 100644
--- a/arch/powerpc/net/bpf_jit_comp64.c
+++ b/arch/powerpc/net/bpf_jit_comp64.c
@@ -286,6 +286,7 @@ static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
u64 imm64;
u8 *func;
u32 true_cond;
+ u32 tmp_idx;
/*
* addrs[] maps a BPF bytecode address into a real offset from
@@ -637,11 +638,7 @@ emit_clear:
case BPF_STX | BPF_XADD | BPF_W:
/* Get EA into TMP_REG_1 */
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
- /* error if EA is not word-aligned */
- PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x03);
- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + 12);
- PPC_LI(b2p[BPF_REG_0], 0);
- PPC_JMP(exit_addr);
+ tmp_idx = ctx->idx * 4;
/* load value from memory into TMP_REG_2 */
PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
/* add value from src_reg into this */
@@ -649,32 +646,16 @@ emit_clear:
/* store result back */
PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
/* we're done if this succeeded */
- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
- /* otherwise, let's try once more */
- PPC_BPF_LWARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
- PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
- PPC_BPF_STWCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
- /* exit if the store was not successful */
- PPC_LI(b2p[BPF_REG_0], 0);
- PPC_BCC(COND_NE, exit_addr);
+ PPC_BCC_SHORT(COND_NE, tmp_idx);
break;
/* *(u64 *)(dst + off) += src */
case BPF_STX | BPF_XADD | BPF_DW:
PPC_ADDI(b2p[TMP_REG_1], dst_reg, off);
- /* error if EA is not doubleword-aligned */
- PPC_ANDI(b2p[TMP_REG_2], b2p[TMP_REG_1], 0x07);
- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (3*4));
- PPC_LI(b2p[BPF_REG_0], 0);
- PPC_JMP(exit_addr);
- PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
- PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
- PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
- PPC_BCC_SHORT(COND_EQ, (ctx->idx * 4) + (7*4));
+ tmp_idx = ctx->idx * 4;
PPC_BPF_LDARX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1], 0);
PPC_ADD(b2p[TMP_REG_2], b2p[TMP_REG_2], src_reg);
PPC_BPF_STDCX(b2p[TMP_REG_2], 0, b2p[TMP_REG_1]);
- PPC_LI(b2p[BPF_REG_0], 0);
- PPC_BCC(COND_NE, exit_addr);
+ PPC_BCC_SHORT(COND_NE, tmp_idx);
break;
/*
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c
index 7c968e46736f..12e6e4d30602 100644
--- a/arch/powerpc/platforms/powermac/time.c
+++ b/arch/powerpc/platforms/powermac/time.c
@@ -42,7 +42,11 @@
#define DBG(x...)
#endif
-/* Apparently the RTC stores seconds since 1 Jan 1904 */
+/*
+ * Offset between Unix time (1970-based) and Mac time (1904-based). Cuda and PMU
+ * times wrap in 2040. If we need to handle later times, the read_time functions
+ * need to be changed to interpret wrapped times as post-2040.
+ */
#define RTC_OFFSET 2082844800
/*
@@ -97,8 +101,11 @@ static time64_t cuda_get_time(void)
if (req.reply_len != 7)
printk(KERN_ERR "cuda_get_time: got %d byte reply\n",
req.reply_len);
- now = (req.reply[3] << 24) + (req.reply[4] << 16)
- + (req.reply[5] << 8) + req.reply[6];
+ now = (u32)((req.reply[3] << 24) + (req.reply[4] << 16) +
+ (req.reply[5] << 8) + req.reply[6]);
+ /* it's either after year 2040, or the RTC has gone backwards */
+ WARN_ON(now < RTC_OFFSET);
+
return now - RTC_OFFSET;
}
@@ -106,10 +113,10 @@ static time64_t cuda_get_time(void)
static int cuda_set_rtc_time(struct rtc_time *tm)
{
- time64_t nowtime;
+ u32 nowtime;
struct adb_request req;
- nowtime = rtc_tm_to_time64(tm) + RTC_OFFSET;
+ nowtime = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
if (cuda_request(&req, NULL, 6, CUDA_PACKET, CUDA_SET_TIME,
nowtime >> 24, nowtime >> 16, nowtime >> 8,
nowtime) < 0)
@@ -140,8 +147,12 @@ static time64_t pmu_get_time(void)
if (req.reply_len != 4)
printk(KERN_ERR "pmu_get_time: got %d byte reply from PMU\n",
req.reply_len);
- now = (req.reply[0] << 24) + (req.reply[1] << 16)
- + (req.reply[2] << 8) + req.reply[3];
+ now = (u32)((req.reply[0] << 24) + (req.reply[1] << 16) +
+ (req.reply[2] << 8) + req.reply[3]);
+
+ /* it's either after year 2040, or the RTC has gone backwards */
+ WARN_ON(now < RTC_OFFSET);
+
return now - RTC_OFFSET;
}
@@ -149,10 +160,10 @@ static time64_t pmu_get_time(void)
static int pmu_set_rtc_time(struct rtc_time *tm)
{
- time64_t nowtime;
+ u32 nowtime;
struct adb_request req;
- nowtime = rtc_tm_to_time64(tm) + RTC_OFFSET;
+ nowtime = lower_32_bits(rtc_tm_to_time64(tm) + RTC_OFFSET);
if (pmu_request(&req, NULL, 5, PMU_SET_RTC, nowtime >> 24,
nowtime >> 16, nowtime >> 8, nowtime) < 0)
return -ENXIO;
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 47166ad2a669..196978733e64 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2734,7 +2734,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
{
int nr, dotted;
unsigned long first_adr;
- unsigned long inst, last_inst = 0;
+ unsigned int inst, last_inst = 0;
unsigned char val[4];
dotted = 0;
@@ -2758,7 +2758,7 @@ generic_inst_dump(unsigned long adr, long count, int praddr,
dotted = 0;
last_inst = inst;
if (praddr)
- printf(REG" %.8lx", adr, inst);
+ printf(REG" %.8x", adr, inst);
printf("\t");
dump_func(inst, adr);
printf("\n");
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index f12680c9b947..4764fdeb4f1f 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -107,6 +107,7 @@ config ARCH_RV32I
select GENERIC_LIB_ASHLDI3
select GENERIC_LIB_ASHRDI3
select GENERIC_LIB_LSHRDI3
+ select GENERIC_LIB_UCMPDI2
config ARCH_RV64I
bool "RV64I"
diff --git a/arch/riscv/include/uapi/asm/elf.h b/arch/riscv/include/uapi/asm/elf.h
index 5cae4c30cd8e..1e0dfc36aab9 100644
--- a/arch/riscv/include/uapi/asm/elf.h
+++ b/arch/riscv/include/uapi/asm/elf.h
@@ -21,8 +21,13 @@ typedef struct user_regs_struct elf_gregset_t;
typedef union __riscv_fp_state elf_fpregset_t;
-#define ELF_RISCV_R_SYM(r_info) ((r_info) >> 32)
-#define ELF_RISCV_R_TYPE(r_info) ((r_info) & 0xffffffff)
+#if __riscv_xlen == 64
+#define ELF_RISCV_R_SYM(r_info) ELF64_R_SYM(r_info)
+#define ELF_RISCV_R_TYPE(r_info) ELF64_R_TYPE(r_info)
+#else
+#define ELF_RISCV_R_SYM(r_info) ELF32_R_SYM(r_info)
+#define ELF_RISCV_R_TYPE(r_info) ELF32_R_TYPE(r_info)
+#endif
/*
* RISC-V relocation types
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index b74cbfbce2d0..7bcdaed15703 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -16,10 +16,6 @@
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
-#ifdef CONFIG_RISCV_INTC
-#include <linux/irqchip/irq-riscv-intc.h>
-#endif
-
void __init init_IRQ(void)
{
irqchip_init();
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 1d5e9b934b8c..3303ed2cd419 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -37,7 +37,7 @@ static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v)
static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- s64 offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - (void *)location;
u32 imm12 = (offset & 0x1000) << (31 - 12);
u32 imm11 = (offset & 0x800) >> (11 - 7);
u32 imm10_5 = (offset & 0x7e0) << (30 - 10);
@@ -50,7 +50,7 @@ static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- s64 offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - (void *)location;
u32 imm20 = (offset & 0x100000) << (31 - 20);
u32 imm19_12 = (offset & 0xff000);
u32 imm11 = (offset & 0x800) << (20 - 11);
@@ -63,7 +63,7 @@ static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- s64 offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - (void *)location;
u16 imm8 = (offset & 0x100) << (12 - 8);
u16 imm7_6 = (offset & 0xc0) >> (6 - 5);
u16 imm5 = (offset & 0x20) >> (5 - 2);
@@ -78,7 +78,7 @@ static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- s64 offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - (void *)location;
u16 imm11 = (offset & 0x800) << (12 - 11);
u16 imm10 = (offset & 0x400) >> (10 - 8);
u16 imm9_8 = (offset & 0x300) << (12 - 11);
@@ -96,7 +96,7 @@ static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- s64 offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - (void *)location;
s32 hi20;
if (offset != (s32)offset) {
@@ -178,7 +178,7 @@ static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location,
static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- s64 offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - (void *)location;
s32 hi20;
/* Always emit the got entry */
@@ -200,7 +200,7 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- s64 offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - (void *)location;
s32 fill_v = offset;
u32 hi20, lo12;
@@ -227,7 +227,7 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
static int apply_r_riscv_call_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- s64 offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - (void *)location;
s32 fill_v = offset;
u32 hi20, lo12;
@@ -263,14 +263,14 @@ static int apply_r_riscv_align_rela(struct module *me, u32 *location,
static int apply_r_riscv_add32_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- *(u32 *)location += (*(u32 *)v);
+ *(u32 *)location += (u32)v;
return 0;
}
static int apply_r_riscv_sub32_rela(struct module *me, u32 *location,
Elf_Addr v)
{
- *(u32 *)location -= (*(u32 *)v);
+ *(u32 *)location -= (u32)v;
return 0;
}
@@ -347,7 +347,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int j;
for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) {
- u64 hi20_loc =
+ unsigned long hi20_loc =
sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[j].r_offset;
u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info);
@@ -360,12 +360,12 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
Elf_Sym *hi20_sym =
(Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_RISCV_R_SYM(rel[j].r_info);
- u64 hi20_sym_val =
+ unsigned long hi20_sym_val =
hi20_sym->st_value
+ rel[j].r_addend;
/* Calculate lo12 */
- u64 offset = hi20_sym_val - hi20_loc;
+ size_t offset = hi20_sym_val - hi20_loc;
if (IS_ENABLED(CONFIG_MODULE_SECTIONS)
&& hi20_type == R_RISCV_GOT_HI20) {
offset = module_emit_got_entry(
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index ba3e80712797..9f82a7e34c64 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -50,7 +50,7 @@ static int riscv_gpr_set(struct task_struct *target,
struct pt_regs *regs;
regs = task_pt_regs(target);
- ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0, -1);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1);
return ret;
}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index ee44a48faf79..f0d2070866d4 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -220,8 +220,3 @@ void __init setup_arch(char **cmdline_p)
riscv_fill_hwcap();
}
-static int __init riscv_device_init(void)
-{
- return of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
-}
-subsys_initcall_sync(riscv_device_init);
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index c77df8142be2..58a522f9bcc3 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -28,7 +28,9 @@ static void __init zone_sizes_init(void)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
+#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
+#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init_nodes(max_zone_pfns);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index baed39772c84..8a1863d9ed53 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -140,7 +140,7 @@ config S390
select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if FUTEX
- select HAVE_GCC_PLUGINS
+ select HAVE_GCC_PLUGINS if BROKEN
select HAVE_KERNEL_BZIP2
select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZ4
@@ -160,6 +160,7 @@ config S390
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RSEQ
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING
select MODULES_USE_ELF_RELA
diff --git a/arch/s390/include/asm/css_chars.h b/arch/s390/include/asm/css_chars.h
index 0563fd3e8458..480bb02ccacd 100644
--- a/arch/s390/include/asm/css_chars.h
+++ b/arch/s390/include/asm/css_chars.h
@@ -6,36 +6,38 @@
struct css_general_char {
u64 : 12;
- u32 dynio : 1; /* bit 12 */
- u32 : 4;
- u32 eadm : 1; /* bit 17 */
- u32 : 23;
- u32 aif : 1; /* bit 41 */
- u32 : 3;
- u32 mcss : 1; /* bit 45 */
- u32 fcs : 1; /* bit 46 */
- u32 : 1;
- u32 ext_mb : 1; /* bit 48 */
- u32 : 7;
- u32 aif_tdd : 1; /* bit 56 */
- u32 : 1;
- u32 qebsm : 1; /* bit 58 */
- u32 : 2;
- u32 aiv : 1; /* bit 61 */
- u32 : 5;
- u32 aif_osa : 1; /* bit 67 */
- u32 : 12;
- u32 eadm_rf : 1; /* bit 80 */
- u32 : 1;
- u32 cib : 1; /* bit 82 */
- u32 : 5;
- u32 fcx : 1; /* bit 88 */
- u32 : 19;
- u32 alt_ssi : 1; /* bit 108 */
- u32 : 1;
- u32 narf : 1; /* bit 110 */
- u32 : 12;
- u32 util_str : 1;/* bit 123 */
+ u64 dynio : 1; /* bit 12 */
+ u64 : 4;
+ u64 eadm : 1; /* bit 17 */
+ u64 : 23;
+ u64 aif : 1; /* bit 41 */
+ u64 : 3;
+ u64 mcss : 1; /* bit 45 */
+ u64 fcs : 1; /* bit 46 */
+ u64 : 1;
+ u64 ext_mb : 1; /* bit 48 */
+ u64 : 7;
+ u64 aif_tdd : 1; /* bit 56 */
+ u64 : 1;
+ u64 qebsm : 1; /* bit 58 */
+ u64 : 2;
+ u64 aiv : 1; /* bit 61 */
+ u64 : 2;
+
+ u64 : 3;
+ u64 aif_osa : 1; /* bit 67 */
+ u64 : 12;
+ u64 eadm_rf : 1; /* bit 80 */
+ u64 : 1;
+ u64 cib : 1; /* bit 82 */
+ u64 : 5;
+ u64 fcx : 1; /* bit 88 */
+ u64 : 19;
+ u64 alt_ssi : 1; /* bit 108 */
+ u64 : 1;
+ u64 narf : 1; /* bit 110 */
+ u64 : 12;
+ u64 util_str : 1;/* bit 123 */
} __packed;
extern struct css_general_char css_general_characteristics;
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index 607c5e9fba3d..2ce28bf0c5ec 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -183,3 +183,4 @@ COMPAT_SYSCALL_WRAP2(s390_guarded_storage, int, command, struct gs_cb *, gs_cb);
COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer);
COMPAT_SYSCALL_WRAP4(s390_sthyi, unsigned long, code, void __user *, info, u64 __user *, rc, unsigned long, flags);
COMPAT_SYSCALL_WRAP5(kexec_file_load, int, kernel_fd, int, initrd_fd, unsigned long, cmdline_len, const char __user *, cmdline_ptr, unsigned long, flags)
+COMPAT_SYSCALL_WRAP4(rseq, struct rseq __user *, rseq, u32, rseq_len, int, flags, u32, sig)
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index f03402efab4b..150130c897c3 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -357,6 +357,10 @@ ENTRY(system_call)
stg %r2,__PT_R2(%r11) # store return value
.Lsysc_return:
+#ifdef CONFIG_DEBUG_RSEQ
+ lgr %r2,%r11
+ brasl %r14,rseq_syscall
+#endif
LOCKDEP_SYS_EXIT
.Lsysc_tif:
TSTMSK __PT_FLAGS(%r11),_PIF_WORK
@@ -1265,7 +1269,7 @@ cleanup_critical:
jl 0f
clg %r9,BASED(.Lcleanup_table+104) # .Lload_fpu_regs_end
jl .Lcleanup_load_fpu_regs
-0: BR_EX %r14
+0: BR_EX %r14,%r11
.align 8
.Lcleanup_table:
@@ -1301,7 +1305,7 @@ cleanup_critical:
ni __SIE_PROG0C+3(%r9),0xfe # no longer in SIE
lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
larl %r9,sie_exit # skip forward to sie_exit
- BR_EX %r14
+ BR_EX %r14,%r11
#endif
.Lcleanup_system_call:
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 2d2960ab3e10..22f08245aa5d 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -498,7 +498,7 @@ void do_signal(struct pt_regs *regs)
}
/* No longer in a system call */
clear_pt_regs_flag(regs, PIF_SYSCALL);
-
+ rseq_signal_deliver(&ksig, regs);
if (is_compat_task())
handle_signal32(&ksig, oldset, regs);
else
@@ -537,4 +537,5 @@ void do_notify_resume(struct pt_regs *regs)
{
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ rseq_handle_notify_resume(NULL, regs);
}
diff --git a/arch/s390/kernel/syscalls/syscall.tbl b/arch/s390/kernel/syscalls/syscall.tbl
index 8b210ead7956..022fc099b628 100644
--- a/arch/s390/kernel/syscalls/syscall.tbl
+++ b/arch/s390/kernel/syscalls/syscall.tbl
@@ -389,3 +389,5 @@
379 common statx sys_statx compat_sys_statx
380 common s390_sthyi sys_s390_sthyi compat_sys_s390_sthyi
381 common kexec_file_load sys_kexec_file_load compat_sys_kexec_file_load
+382 common io_pgetevents sys_io_pgetevents compat_sys_io_pgetevents
+383 common rseq sys_rseq compat_sys_rseq
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 84bd6329a88d..e3bd5627afef 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -252,6 +252,8 @@ void page_table_free(struct mm_struct *mm, unsigned long *table)
spin_unlock_bh(&mm->context.lock);
if (mask != 0)
return;
+ } else {
+ atomic_xor_bits(&page->_refcount, 3U << 24);
}
pgtable_page_dtor(page);
@@ -304,6 +306,8 @@ static void __tlb_remove_table(void *_table)
break;
/* fallthrough */
case 3: /* 4K page table with pgstes */
+ if (mask & 3)
+ atomic_xor_bits(&page->_refcount, 3 << 24);
pgtable_page_dtor(page);
__free_page(page);
break;
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index d2db8acb1a55..5f0234ec8038 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -1286,6 +1286,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *fp)
goto free_addrs;
}
if (bpf_jit_prog(&jit, fp)) {
+ bpf_jit_binary_free(header);
fp = orig_fp;
goto free_addrs;
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 7a34fdf8daf0..6b8065d718bd 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -63,7 +63,7 @@ config X86
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_REFCOUNT
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
- select ARCH_HAS_UACCESS_MCSAFE if X86_64
+ select ARCH_HAS_UACCESS_MCSAFE if X86_64 && X86_MCE
select ARCH_HAS_SET_MEMORY
select ARCH_HAS_SG_CHAIN
select ARCH_HAS_STRICT_KERNEL_RWX
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index f0a6ea22429d..a08e82856563 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -258,11 +258,6 @@ archscripts: scripts_basic
archheaders:
$(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
-archprepare:
-ifeq ($(CONFIG_KEXEC_FILE),y)
- $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
-endif
-
###
# Kernel objects
@@ -327,7 +322,6 @@ archclean:
$(Q)rm -rf $(objtree)/arch/x86_64
$(Q)$(MAKE) $(clean)=$(boot)
$(Q)$(MAKE) $(clean)=arch/x86/tools
- $(Q)$(MAKE) $(clean)=arch/x86/purgatory
define archhelp
echo '* bzImage - Compressed kernel image (arch/x86/boot/bzImage)'
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index a8a8642d2b0b..e98522ea6f09 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -114,18 +114,12 @@ __setup_efi_pci(efi_pci_io_protocol_t *pci, struct pci_setup_rom **__rom)
struct pci_setup_rom *rom = NULL;
efi_status_t status;
unsigned long size;
- uint64_t attributes, romsize;
+ uint64_t romsize;
void *romimage;
- status = efi_call_proto(efi_pci_io_protocol, attributes, pci,
- EfiPciIoAttributeOperationGet, 0, 0,
- &attributes);
- if (status != EFI_SUCCESS)
- return status;
-
/*
- * Some firmware images contain EFI function pointers at the place where the
- * romimage and romsize fields are supposed to be. Typically the EFI
+ * Some firmware images contain EFI function pointers at the place where
+ * the romimage and romsize fields are supposed to be. Typically the EFI
* code is mapped at high addresses, translating to an unrealistically
* large romsize. The UEFI spec limits the size of option ROMs to 16
* MiB so we reject any ROMs over 16 MiB in size to catch this.
diff --git a/arch/x86/crypto/aegis128-aesni-asm.S b/arch/x86/crypto/aegis128-aesni-asm.S
index 9254e0b6cc06..717bf0776421 100644
--- a/arch/x86/crypto/aegis128-aesni-asm.S
+++ b/arch/x86/crypto/aegis128-aesni-asm.S
@@ -535,6 +535,7 @@ ENTRY(crypto_aegis128_aesni_enc_tail)
movdqu STATE3, 0x40(STATEP)
FRAME_END
+ ret
ENDPROC(crypto_aegis128_aesni_enc_tail)
.macro decrypt_block a s0 s1 s2 s3 s4 i
diff --git a/arch/x86/crypto/aegis128l-aesni-asm.S b/arch/x86/crypto/aegis128l-aesni-asm.S
index 9263c344f2c7..4eda2b8db9e1 100644
--- a/arch/x86/crypto/aegis128l-aesni-asm.S
+++ b/arch/x86/crypto/aegis128l-aesni-asm.S
@@ -645,6 +645,7 @@ ENTRY(crypto_aegis128l_aesni_enc_tail)
state_store0
FRAME_END
+ ret
ENDPROC(crypto_aegis128l_aesni_enc_tail)
/*
diff --git a/arch/x86/crypto/aegis256-aesni-asm.S b/arch/x86/crypto/aegis256-aesni-asm.S
index 1d977d515bf9..32aae8397268 100644
--- a/arch/x86/crypto/aegis256-aesni-asm.S
+++ b/arch/x86/crypto/aegis256-aesni-asm.S
@@ -543,6 +543,7 @@ ENTRY(crypto_aegis256_aesni_enc_tail)
state_store0
FRAME_END
+ ret
ENDPROC(crypto_aegis256_aesni_enc_tail)
/*
diff --git a/arch/x86/crypto/morus1280-avx2-asm.S b/arch/x86/crypto/morus1280-avx2-asm.S
index 37d422e77931..07653d4582a6 100644
--- a/arch/x86/crypto/morus1280-avx2-asm.S
+++ b/arch/x86/crypto/morus1280-avx2-asm.S
@@ -453,6 +453,7 @@ ENTRY(crypto_morus1280_avx2_enc_tail)
vmovdqu STATE4, (4 * 32)(%rdi)
FRAME_END
+ ret
ENDPROC(crypto_morus1280_avx2_enc_tail)
/*
diff --git a/arch/x86/crypto/morus1280-sse2-asm.S b/arch/x86/crypto/morus1280-sse2-asm.S
index 1fe637c7be9d..bd1aa1b60869 100644
--- a/arch/x86/crypto/morus1280-sse2-asm.S
+++ b/arch/x86/crypto/morus1280-sse2-asm.S
@@ -652,6 +652,7 @@ ENTRY(crypto_morus1280_sse2_enc_tail)
movdqu STATE4_HI, (9 * 16)(%rdi)
FRAME_END
+ ret
ENDPROC(crypto_morus1280_sse2_enc_tail)
/*
diff --git a/arch/x86/crypto/morus640-sse2-asm.S b/arch/x86/crypto/morus640-sse2-asm.S
index 71c72a0a0862..efa02816d921 100644
--- a/arch/x86/crypto/morus640-sse2-asm.S
+++ b/arch/x86/crypto/morus640-sse2-asm.S
@@ -437,6 +437,7 @@ ENTRY(crypto_morus640_sse2_enc_tail)
movdqu STATE4, (4 * 16)(%rdi)
FRAME_END
+ ret
ENDPROC(crypto_morus640_sse2_enc_tail)
/*
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 92190879b228..3b2490b81918 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -164,7 +164,7 @@ static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags)
if (cached_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
- rseq_handle_notify_resume(regs);
+ rseq_handle_notify_resume(NULL, regs);
}
if (cached_flags & _TIF_USER_RETURN_NOTIFY)
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 2582881d19ce..c371bfee137a 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -477,7 +477,7 @@ ENTRY(entry_SYSENTER_32)
* whereas POPF does not.)
*/
addl $PT_EFLAGS-PT_DS, %esp /* point esp at pt_regs->flags */
- btr $X86_EFLAGS_IF_BIT, (%esp)
+ btrl $X86_EFLAGS_IF_BIT, (%esp)
popfl
/*
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index 9de7f1e1dede..7d0df78db727 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -84,13 +84,13 @@ ENTRY(entry_SYSENTER_compat)
pushq %rdx /* pt_regs->dx */
pushq %rcx /* pt_regs->cx */
pushq $-ENOSYS /* pt_regs->ax */
- pushq %r8 /* pt_regs->r8 */
+ pushq $0 /* pt_regs->r8 = 0 */
xorl %r8d, %r8d /* nospec r8 */
- pushq %r9 /* pt_regs->r9 */
+ pushq $0 /* pt_regs->r9 = 0 */
xorl %r9d, %r9d /* nospec r9 */
- pushq %r10 /* pt_regs->r10 */
+ pushq $0 /* pt_regs->r10 = 0 */
xorl %r10d, %r10d /* nospec r10 */
- pushq %r11 /* pt_regs->r11 */
+ pushq $0 /* pt_regs->r11 = 0 */
xorl %r11d, %r11d /* nospec r11 */
pushq %rbx /* pt_regs->rbx */
xorl %ebx, %ebx /* nospec rbx */
@@ -374,13 +374,13 @@ ENTRY(entry_INT80_compat)
pushq %rcx /* pt_regs->cx */
xorl %ecx, %ecx /* nospec cx */
pushq $-ENOSYS /* pt_regs->ax */
- pushq $0 /* pt_regs->r8 = 0 */
+ pushq %r8 /* pt_regs->r8 */
xorl %r8d, %r8d /* nospec r8 */
- pushq $0 /* pt_regs->r9 = 0 */
+ pushq %r9 /* pt_regs->r9 */
xorl %r9d, %r9d /* nospec r9 */
- pushq $0 /* pt_regs->r10 = 0 */
+ pushq %r10 /* pt_regs->r10*/
xorl %r10d, %r10d /* nospec r10 */
- pushq $0 /* pt_regs->r11 = 0 */
+ pushq %r11 /* pt_regs->r11 */
xorl %r11d, %r11d /* nospec r11 */
pushq %rbx /* pt_regs->rbx */
xorl %ebx, %ebx /* nospec rbx */
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 8a10a045b57b..8cf03f101938 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -408,9 +408,11 @@ static int alloc_bts_buffer(int cpu)
ds->bts_buffer_base = (unsigned long) cea;
ds_update_cea(cea, buffer, BTS_BUFFER_SIZE, PAGE_KERNEL);
ds->bts_index = ds->bts_buffer_base;
- max = BTS_RECORD_SIZE * (BTS_BUFFER_SIZE / BTS_RECORD_SIZE);
- ds->bts_absolute_maximum = ds->bts_buffer_base + max;
- ds->bts_interrupt_threshold = ds->bts_absolute_maximum - (max / 16);
+ max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
+ ds->bts_absolute_maximum = ds->bts_buffer_base +
+ max * BTS_RECORD_SIZE;
+ ds->bts_interrupt_threshold = ds->bts_absolute_maximum -
+ (max / 16) * BTS_RECORD_SIZE;
return 0;
}
diff --git a/arch/x86/hyperv/hv_apic.c b/arch/x86/hyperv/hv_apic.c
index f68855499391..402338365651 100644
--- a/arch/x86/hyperv/hv_apic.c
+++ b/arch/x86/hyperv/hv_apic.c
@@ -114,6 +114,8 @@ static bool __send_ipi_mask_ex(const struct cpumask *mask, int vector)
ipi_arg->vp_set.format = HV_GENERIC_SET_SPARSE_4K;
nr_bank = cpumask_to_vpset(&(ipi_arg->vp_set), mask);
}
+ if (nr_bank < 0)
+ goto ipi_mask_ex_done;
if (!nr_bank)
ipi_arg->vp_set.format = HV_GENERIC_SET_ALL;
@@ -158,6 +160,9 @@ static bool __send_ipi_mask(const struct cpumask *mask, int vector)
for_each_cpu(cur_cpu, mask) {
vcpu = hv_cpu_number_to_vp_number(cur_cpu);
+ if (vcpu == VP_INVAL)
+ goto ipi_mask_done;
+
/*
* This particular version of the IPI hypercall can
* only target upto 64 CPUs.
diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c
index 4c431e1c1eff..1ff420217298 100644
--- a/arch/x86/hyperv/hv_init.c
+++ b/arch/x86/hyperv/hv_init.c
@@ -265,7 +265,7 @@ void __init hyperv_init(void)
{
u64 guest_id, required_msrs;
union hv_x64_msr_hypercall_contents hypercall_msr;
- int cpuhp;
+ int cpuhp, i;
if (x86_hyper_type != X86_HYPER_MS_HYPERV)
return;
@@ -293,6 +293,9 @@ void __init hyperv_init(void)
if (!hv_vp_index)
return;
+ for (i = 0; i < num_possible_cpus(); i++)
+ hv_vp_index[i] = VP_INVAL;
+
hv_vp_assist_page = kcalloc(num_possible_cpus(),
sizeof(*hv_vp_assist_page), GFP_KERNEL);
if (!hv_vp_assist_page) {
diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
index c356098b6fb9..4d4015ddcf26 100644
--- a/arch/x86/include/asm/apm.h
+++ b/arch/x86/include/asm/apm.h
@@ -7,8 +7,6 @@
#ifndef _ASM_X86_MACH_DEFAULT_APM_H
#define _ASM_X86_MACH_DEFAULT_APM_H
-#include <asm/nospec-branch.h>
-
#ifdef APM_ZERO_SEGS
# define APM_DO_ZERO_SEGS \
"pushl %%ds\n\t" \
@@ -34,7 +32,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
* N.B. We do NOT need a cld after the BIOS call
* because we always save and restore the flags.
*/
- firmware_restrict_branch_speculation_start();
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
@@ -47,7 +44,6 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
"=S" (*esi)
: "a" (func), "b" (ebx_in), "c" (ecx_in)
: "memory", "cc");
- firmware_restrict_branch_speculation_end();
}
static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
@@ -60,7 +56,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
* N.B. We do NOT need a cld after the BIOS call
* because we always save and restore the flags.
*/
- firmware_restrict_branch_speculation_start();
__asm__ __volatile__(APM_DO_ZERO_SEGS
"pushl %%edi\n\t"
"pushl %%ebp\n\t"
@@ -73,7 +68,6 @@ static inline bool apm_bios_call_simple_asm(u32 func, u32 ebx_in,
"=S" (si)
: "a" (func), "b" (ebx_in), "c" (ecx_in)
: "memory", "cc");
- firmware_restrict_branch_speculation_end();
return error;
}
diff --git a/arch/x86/include/asm/asm.h b/arch/x86/include/asm/asm.h
index 219faaec51df..990770f9e76b 100644
--- a/arch/x86/include/asm/asm.h
+++ b/arch/x86/include/asm/asm.h
@@ -46,6 +46,65 @@
#define _ASM_SI __ASM_REG(si)
#define _ASM_DI __ASM_REG(di)
+#ifndef __x86_64__
+/* 32 bit */
+
+#define _ASM_ARG1 _ASM_AX
+#define _ASM_ARG2 _ASM_DX
+#define _ASM_ARG3 _ASM_CX
+
+#define _ASM_ARG1L eax
+#define _ASM_ARG2L edx
+#define _ASM_ARG3L ecx
+
+#define _ASM_ARG1W ax
+#define _ASM_ARG2W dx
+#define _ASM_ARG3W cx
+
+#define _ASM_ARG1B al
+#define _ASM_ARG2B dl
+#define _ASM_ARG3B cl
+
+#else
+/* 64 bit */
+
+#define _ASM_ARG1 _ASM_DI
+#define _ASM_ARG2 _ASM_SI
+#define _ASM_ARG3 _ASM_DX
+#define _ASM_ARG4 _ASM_CX
+#define _ASM_ARG5 r8
+#define _ASM_ARG6 r9
+
+#define _ASM_ARG1Q rdi
+#define _ASM_ARG2Q rsi
+#define _ASM_ARG3Q rdx
+#define _ASM_ARG4Q rcx
+#define _ASM_ARG5Q r8
+#define _ASM_ARG6Q r9
+
+#define _ASM_ARG1L edi
+#define _ASM_ARG2L esi
+#define _ASM_ARG3L edx
+#define _ASM_ARG4L ecx
+#define _ASM_ARG5L r8d
+#define _ASM_ARG6L r9d
+
+#define _ASM_ARG1W di
+#define _ASM_ARG2W si
+#define _ASM_ARG3W dx
+#define _ASM_ARG4W cx
+#define _ASM_ARG5W r8w
+#define _ASM_ARG6W r9w
+
+#define _ASM_ARG1B dil
+#define _ASM_ARG2B sil
+#define _ASM_ARG3B dl
+#define _ASM_ARG4B cl
+#define _ASM_ARG5B r8b
+#define _ASM_ARG6B r9b
+
+#endif
+
/*
* Macros to generate condition code outputs from inline assembly,
* The output operand must be type "bool".
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 042b5e892ed1..14de0432d288 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -38,7 +38,7 @@ static inline unsigned long array_index_mask_nospec(unsigned long index,
{
unsigned long mask;
- asm ("cmp %1,%2; sbb %0,%0;"
+ asm volatile ("cmp %1,%2; sbb %0,%0;"
:"=r" (mask)
:"g"(size),"r" (index)
:"cc");
diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
index 89f08955fff7..c4fc17220df9 100644
--- a/arch/x86/include/asm/irqflags.h
+++ b/arch/x86/include/asm/irqflags.h
@@ -13,7 +13,7 @@
* Interrupt control:
*/
-static inline unsigned long native_save_fl(void)
+extern inline unsigned long native_save_fl(void)
{
unsigned long flags;
diff --git a/arch/x86/include/asm/mshyperv.h b/arch/x86/include/asm/mshyperv.h
index 3cd14311edfa..5a7375ed5f7c 100644
--- a/arch/x86/include/asm/mshyperv.h
+++ b/arch/x86/include/asm/mshyperv.h
@@ -9,6 +9,8 @@
#include <asm/hyperv-tlfs.h>
#include <asm/nospec-branch.h>
+#define VP_INVAL U32_MAX
+
struct ms_hyperv_info {
u32 features;
u32 misc_features;
@@ -20,7 +22,6 @@ struct ms_hyperv_info {
extern struct ms_hyperv_info ms_hyperv;
-
/*
* Generate the guest ID.
*/
@@ -281,6 +282,8 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
*/
for_each_cpu(cpu, cpus) {
vcpu = hv_cpu_number_to_vp_number(cpu);
+ if (vcpu == VP_INVAL)
+ return -1;
vcpu_bank = vcpu / 64;
vcpu_offset = vcpu % 64;
__set_bit(vcpu_offset, (unsigned long *)
diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
index ada6410fd2ec..fbd578daa66e 100644
--- a/arch/x86/include/asm/pgalloc.h
+++ b/arch/x86/include/asm/pgalloc.h
@@ -184,6 +184,9 @@ static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
{
+ if (!pgtable_l5_enabled())
+ return;
+
BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
free_page((unsigned long)p4d);
}
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 26fd42a91946..df58cdbad841 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -916,7 +916,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
/* to find an entry in a page-table-directory. */
-static __always_inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
{
if (!pgtable_l5_enabled())
return (p4d_t *)pgd;
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 3c687e9a95a4..82ff20b0ae45 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -216,7 +216,7 @@ static inline pgd_t pti_set_user_pgd(pgd_t *pgdp, pgd_t pgd)
}
#endif
-static __always_inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
+static inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
{
pgd_t pgd;
@@ -230,7 +230,7 @@ static __always_inline void native_set_p4d(p4d_t *p4dp, p4d_t p4d)
*p4dp = native_make_p4d(native_pgd_val(pgd));
}
-static __always_inline void native_p4d_clear(p4d_t *p4d)
+static inline void native_p4d_clear(p4d_t *p4d)
{
native_set_p4d(p4d, native_make_p4d(0));
}
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 62acb613114b..a9d637bc301d 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -52,7 +52,12 @@ copy_to_user_mcsafe(void *to, const void *from, unsigned len)
unsigned long ret;
__uaccess_begin();
- ret = memcpy_mcsafe(to, from, len);
+ /*
+ * Note, __memcpy_mcsafe() is explicitly used since it can
+ * handle exceptions / faults. memcpy_mcsafe() may fall back to
+ * memcpy() which lacks this handling.
+ */
+ ret = __memcpy_mcsafe(to, from, len);
__uaccess_end();
return ret;
}
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 94a8547d915b..91241ccd4cfa 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -114,6 +114,7 @@
#define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f
#define VMX_MISC_SAVE_EFER_LMA 0x00000020
#define VMX_MISC_ACTIVITY_HLT 0x00000040
+#define VMX_MISC_ZERO_LEN_INS 0x40000000
/* VMFUNC functions */
#define VMX_VMFUNC_EPTP_SWITCHING 0x00000001
@@ -351,11 +352,13 @@ enum vmcs_field {
#define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK
#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
+#define INTR_TYPE_RESERVED (1 << 8) /* reserved */
#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
#define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */
#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
#define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */
#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */
+#define INTR_TYPE_OTHER_EVENT (7 << 8) /* other event */
/* GUEST_INTERRUPTIBILITY_INFO flags. */
#define GUEST_INTR_STATE_STI 0x00000001
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 02d6f5cf4e70..8824d01c0c35 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -61,6 +61,7 @@ obj-y += alternative.o i8253.o hw_breakpoint.o
obj-y += tsc.o tsc_msr.o io_delay.o rtc.o
obj-y += pci-iommu_table.o
obj-y += resource.o
+obj-y += irqflags.o
obj-y += process.o
obj-y += fpu/
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index efaf2d4f9c3c..d492752f79e1 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/crash_dump.h>
#include <linux/reboot.h>
+#include <linux/memory.h>
#include <asm/uv/uv_mmrs.h>
#include <asm/uv/uv_hub.h>
@@ -392,6 +393,51 @@ extern int uv_hub_info_version(void)
}
EXPORT_SYMBOL(uv_hub_info_version);
+/* Default UV memory block size is 2GB */
+static unsigned long mem_block_size = (2UL << 30);
+
+/* Kernel parameter to specify UV mem block size */
+static int parse_mem_block_size(char *ptr)
+{
+ unsigned long size = memparse(ptr, NULL);
+
+ /* Size will be rounded down by set_block_size() below */
+ mem_block_size = size;
+ return 0;
+}
+early_param("uv_memblksize", parse_mem_block_size);
+
+static __init int adj_blksize(u32 lgre)
+{
+ unsigned long base = (unsigned long)lgre << UV_GAM_RANGE_SHFT;
+ unsigned long size;
+
+ for (size = mem_block_size; size > MIN_MEMORY_BLOCK_SIZE; size >>= 1)
+ if (IS_ALIGNED(base, size))
+ break;
+
+ if (size >= mem_block_size)
+ return 0;
+
+ mem_block_size = size;
+ return 1;
+}
+
+static __init void set_block_size(void)
+{
+ unsigned int order = ffs(mem_block_size);
+
+ if (order) {
+ /* adjust for ffs return of 1..64 */
+ set_memory_block_size_order(order - 1);
+ pr_info("UV: mem_block_size set to 0x%lx\n", mem_block_size);
+ } else {
+ /* bad or zero value, default to 1UL << 31 (2GB) */
+ pr_err("UV: mem_block_size error with 0x%lx\n", mem_block_size);
+ set_memory_block_size_order(31);
+ }
+}
+
/* Build GAM range lookup table: */
static __init void build_uv_gr_table(void)
{
@@ -1180,23 +1226,30 @@ static void __init decode_gam_rng_tbl(unsigned long ptr)
<< UV_GAM_RANGE_SHFT);
int order = 0;
char suffix[] = " KMGTPE";
+ int flag = ' ';
while (size > 9999 && order < sizeof(suffix)) {
size /= 1024;
order++;
}
+ /* adjust max block size to current range start */
+ if (gre->type == 1 || gre->type == 2)
+ if (adj_blksize(lgre))
+ flag = '*';
+
if (!index) {
pr_info("UV: GAM Range Table...\n");
- pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
+ pr_info("UV: # %20s %14s %6s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
}
- pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d %04x %02x %02x\n",
+ pr_info("UV: %2d: 0x%014lx-0x%014lx%c %5lu%c %3d %04x %02x %02x\n",
index++,
(unsigned long)lgre << UV_GAM_RANGE_SHFT,
(unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
- size, suffix[order],
+ flag, size, suffix[order],
gre->type, gre->nasid, gre->sockid, gre->pnode);
+ /* update to next range start */
lgre = gre->limit;
if (sock_min > gre->sockid)
sock_min = gre->sockid;
@@ -1427,6 +1480,7 @@ static void __init uv_system_init_hub(void)
build_socket_tables();
build_uv_gr_table();
+ set_block_size();
uv_init_hub_info(&hub_info);
uv_possible_blades = num_possible_nodes();
if (!_node_to_pnode)
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index 5d0de79fdab0..ec00d1ff5098 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -240,6 +240,7 @@
#include <asm/olpc.h>
#include <asm/paravirt.h>
#include <asm/reboot.h>
+#include <asm/nospec-branch.h>
#if defined(CONFIG_APM_DISPLAY_BLANK) && defined(CONFIG_VT)
extern int (*console_blank_hook)(int);
@@ -614,11 +615,13 @@ static long __apm_bios_call(void *_call)
gdt[0x40 / 8] = bad_bios_desc;
apm_irq_save(flags);
+ firmware_restrict_branch_speculation_start();
APM_DO_SAVE_SEGS;
apm_bios_call_asm(call->func, call->ebx, call->ecx,
&call->eax, &call->ebx, &call->ecx, &call->edx,
&call->esi);
APM_DO_RESTORE_SEGS;
+ firmware_restrict_branch_speculation_end();
apm_irq_restore(flags);
gdt[0x40 / 8] = save_desc_40;
put_cpu();
@@ -690,10 +693,12 @@ static long __apm_bios_call_simple(void *_call)
gdt[0x40 / 8] = bad_bios_desc;
apm_irq_save(flags);
+ firmware_restrict_branch_speculation_start();
APM_DO_SAVE_SEGS;
error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx,
&call->eax);
APM_DO_RESTORE_SEGS;
+ firmware_restrict_branch_speculation_end();
apm_irq_restore(flags);
gdt[0x40 / 8] = save_desc_40;
put_cpu();
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 74061e421e62..97e962afb967 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -549,7 +549,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
nodes_per_socket = ((value >> 3) & 7) + 1;
}
- if (c->x86 >= 0x15 && c->x86 <= 0x17) {
+ if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
+ !boot_cpu_has(X86_FEATURE_VIRT_SSBD) &&
+ c->x86 >= 0x15 && c->x86 <= 0x17) {
unsigned int bit;
switch (c->x86) {
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index fa6123bdd032..eb4b574be237 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -29,6 +29,7 @@
#include <asm/set_memory.h>
#include <asm/intel-family.h>
#include <asm/e820/api.h>
+#include <asm/hypervisor.h>
static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
@@ -165,7 +166,8 @@ x86_virt_spec_ctrl(u64 guest_spec_ctrl, u64 guest_virt_spec_ctrl, bool setguest)
guestval |= guest_spec_ctrl & x86_spec_ctrl_mask;
/* SSBD controlled in MSR_SPEC_CTRL */
- if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD))
+ if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) ||
+ static_cpu_has(X86_FEATURE_AMD_SSBD))
hostval |= ssbd_tif_to_spec_ctrl(ti->flags);
if (hostval != guestval) {
@@ -543,9 +545,10 @@ static enum ssb_mitigation __init __ssb_select_mitigation(void)
* Intel uses the SPEC CTRL MSR Bit(2) for this, while AMD may
* use a completely different MSR and bit dependent on family.
*/
- if (!static_cpu_has(X86_FEATURE_MSR_SPEC_CTRL))
+ if (!static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) &&
+ !static_cpu_has(X86_FEATURE_AMD_SSBD)) {
x86_amd_ssb_disable();
- else {
+ } else {
x86_spec_ctrl_base |= SPEC_CTRL_SSBD;
x86_spec_ctrl_mask |= SPEC_CTRL_SSBD;
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
@@ -781,6 +784,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
if (boot_cpu_has(X86_FEATURE_PTI))
return sprintf(buf, "Mitigation: PTI\n");
+ if (hypervisor_is_type(X86_HYPER_XEN_PV))
+ return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
+
break;
case X86_BUG_SPECTRE_V1:
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index 38354c66df81..0c5fcbd998cf 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -671,7 +671,7 @@ void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
num_sharing_cache = ((eax >> 14) & 0xfff) + 1;
if (num_sharing_cache) {
- int bits = get_count_order(num_sharing_cache) - 1;
+ int bits = get_count_order(num_sharing_cache);
per_cpu(cpu_llc_id, cpu) = c->apicid >> bits;
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 0768492f4687..9eda6f730ec4 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1,3 +1,6 @@
+/* cpu_feature_enabled() cannot be used this early */
+#define USE_EARLY_PGTABLE_L5
+
#include <linux/bootmem.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 5bbd06f38ff6..f34d89c01edc 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -160,6 +160,11 @@ static struct severity {
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
USER
),
+ MCESEV(
+ PANIC, "Data load in unrecoverable area of kernel",
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
+ KERNEL
+ ),
#endif
MCESEV(
PANIC, "Action required: unknown MCACOD",
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index e4cf6ff1c2e1..8c50754c09c1 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -772,23 +772,25 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
struct pt_regs *regs)
{
- int i, ret = 0;
char *tmp;
+ int i;
for (i = 0; i < mca_cfg.banks; i++) {
m->status = mce_rdmsrl(msr_ops.status(i));
- if (m->status & MCI_STATUS_VAL) {
- __set_bit(i, validp);
- if (quirk_no_way_out)
- quirk_no_way_out(i, m, regs);
- }
+ if (!(m->status & MCI_STATUS_VAL))
+ continue;
+
+ __set_bit(i, validp);
+ if (quirk_no_way_out)
+ quirk_no_way_out(i, m, regs);
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+ mce_read_aux(m, i);
*msg = tmp;
- ret = 1;
+ return 1;
}
}
- return ret;
+ return 0;
}
/*
@@ -1205,13 +1207,18 @@ void do_machine_check(struct pt_regs *regs, long error_code)
lmce = m.mcgstatus & MCG_STATUS_LMCES;
/*
+ * Local machine check may already know that we have to panic.
+ * Broadcast machine check begins rendezvous in mce_start()
* Go through all banks in exclusion of the other CPUs. This way we
* don't report duplicated events on shared banks because the first one
- * to see it will clear it. If this is a Local MCE, then no need to
- * perform rendezvous.
+ * to see it will clear it.
*/
- if (!lmce)
+ if (lmce) {
+ if (no_way_out)
+ mce_panic("Fatal local machine check", &m, msg);
+ } else {
order = mce_start(&no_way_out);
+ }
for (i = 0; i < cfg->banks; i++) {
__clear_bit(i, toclear);
@@ -1287,12 +1294,17 @@ void do_machine_check(struct pt_regs *regs, long error_code)
no_way_out = worst >= MCE_PANIC_SEVERITY;
} else {
/*
- * Local MCE skipped calling mce_reign()
- * If we found a fatal error, we need to panic here.
+ * If there was a fatal machine check we should have
+ * already called mce_panic earlier in this function.
+ * Since we re-read the banks, we might have found
+ * something new. Check again to see if we found a
+ * fatal error. We call "mce_severity()" again to
+ * make sure we have the right "msg".
*/
- if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
- mce_panic("Machine check from unknown source",
- NULL, NULL);
+ if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
+ mce_severity(&m, cfg->tolerant, &msg, true);
+ mce_panic("Local fatal machine check!", &m, msg);
+ }
}
/*
@@ -2153,9 +2165,6 @@ static ssize_t store_int_with_restart(struct device *s,
if (check_interval == old_check_interval)
return ret;
- if (check_interval < 1)
- check_interval = 1;
-
mutex_lock(&mce_sysfs_mutex);
mce_restart();
mutex_unlock(&mce_sysfs_mutex);
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 1c2cfa0644aa..97ccf4c3b45b 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -190,8 +190,11 @@ static void save_microcode_patch(void *data, unsigned int size)
p = memdup_patch(data, size);
if (!p)
pr_err("Error allocating buffer %p\n", data);
- else
+ else {
list_replace(&iter->plist, &p->plist);
+ kfree(iter->data);
+ kfree(iter);
+ }
}
}
diff --git a/arch/x86/kernel/cpu/mtrr/if.c b/arch/x86/kernel/cpu/mtrr/if.c
index 4021d3859499..40eee6cc4124 100644
--- a/arch/x86/kernel/cpu/mtrr/if.c
+++ b/arch/x86/kernel/cpu/mtrr/if.c
@@ -106,7 +106,8 @@ mtrr_write(struct file *file, const char __user *buf, size_t len, loff_t * ppos)
memset(line, 0, LINE_SIZE);
- length = strncpy_from_user(line, buf, LINE_SIZE - 1);
+ len = min_t(size_t, len, LINE_SIZE - 1);
+ length = strncpy_from_user(line, buf, len);
if (length < 0)
return length;
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index d1f25c831447..c88c23c658c1 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -1248,6 +1248,7 @@ void __init e820__memblock_setup(void)
{
int i;
u64 end;
+ u64 addr = 0;
/*
* The bootstrap memblock region count maximum is 128 entries
@@ -1264,13 +1265,21 @@ void __init e820__memblock_setup(void)
struct e820_entry *entry = &e820_table->entries[i];
end = entry->addr + entry->size;
+ if (addr < entry->addr)
+ memblock_reserve(addr, entry->addr - addr);
+ addr = end;
if (end != (resource_size_t)end)
continue;
+ /*
+ * all !E820_TYPE_RAM ranges (including gap ranges) are put
+ * into memblock.reserved to make sure that struct pages in
+ * such regions are not left uninitialized after bootup.
+ */
if (entry->type != E820_TYPE_RAM && entry->type != E820_TYPE_RESERVED_KERN)
- continue;
-
- memblock_add(entry->addr, entry->size);
+ memblock_reserve(entry->addr, entry->size);
+ else
+ memblock_add(entry->addr, entry->size);
}
/* Throw away partial pages: */
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index a21d6ace648e..8047379e575a 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -44,7 +44,7 @@ static unsigned int __initdata next_early_pgt;
pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
#ifdef CONFIG_X86_5LEVEL
-unsigned int __pgtable_l5_enabled __initdata;
+unsigned int __pgtable_l5_enabled __ro_after_init;
unsigned int pgdir_shift __ro_after_init = 39;
EXPORT_SYMBOL(pgdir_shift);
unsigned int ptrs_per_p4d __ro_after_init = 1;
diff --git a/arch/x86/kernel/irqflags.S b/arch/x86/kernel/irqflags.S
new file mode 100644
index 000000000000..ddeeaac8adda
--- /dev/null
+++ b/arch/x86/kernel/irqflags.S
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <asm/asm.h>
+#include <asm/export.h>
+#include <linux/linkage.h>
+
+/*
+ * unsigned long native_save_fl(void)
+ */
+ENTRY(native_save_fl)
+ pushf
+ pop %_ASM_AX
+ ret
+ENDPROC(native_save_fl)
+EXPORT_SYMBOL(native_save_fl)
+
+/*
+ * void native_restore_fl(unsigned long flags)
+ * %eax/%rdi: flags
+ */
+ENTRY(native_restore_fl)
+ push %_ASM_ARG1
+ popf
+ ret
+ENDPROC(native_restore_fl)
+EXPORT_SYMBOL(native_restore_fl)
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index bf8d1eb7fca3..3b8e7c13c614 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -138,6 +138,7 @@ static unsigned long kvm_get_tsc_khz(void)
src = &hv_clock[cpu].pvti;
tsc_khz = pvclock_tsc_khz(src);
put_cpu();
+ setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ);
return tsc_khz;
}
@@ -319,6 +320,8 @@ void __init kvmclock_init(void)
printk(KERN_INFO "kvm-clock: Using msrs %x and %x",
msr_kvm_system_time, msr_kvm_wall_clock);
+ pvclock_set_pvti_cpu0_va(hv_clock);
+
if (kvm_para_has_feature(KVM_FEATURE_CLOCKSOURCE_STABLE_BIT))
pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT);
@@ -366,14 +369,11 @@ int __init kvm_setup_vsyscall_timeinfo(void)
vcpu_time = &hv_clock[cpu].pvti;
flags = pvclock_read_flags(vcpu_time);
- if (!(flags & PVCLOCK_TSC_STABLE_BIT)) {
- put_cpu();
- return 1;
- }
-
- pvclock_set_pvti_cpu0_va(hv_clock);
put_cpu();
+ if (!(flags & PVCLOCK_TSC_STABLE_BIT))
+ return 1;
+
kvm_clock.archdata.vclock_mode = VCLOCK_PVCLOCK;
#endif
return 0;
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 697a4ce04308..736348ead421 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -645,12 +645,19 @@ static void quirk_intel_brickland_xeon_ras_cap(struct pci_dev *pdev)
/* Skylake */
static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
{
- u32 capid0;
+ u32 capid0, capid5;
pci_read_config_dword(pdev, 0x84, &capid0);
+ pci_read_config_dword(pdev, 0x98, &capid5);
- if ((capid0 & 0xc0) == 0xc0)
+ /*
+ * CAPID0{7:6} indicate whether this is an advanced RAS SKU
+ * CAPID5{8:5} indicate that various NVDIMM usage modes are
+ * enabled, so memory machine check recovery is also enabled.
+ */
+ if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
static_branch_inc(&mcsafe_key);
+
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 445ca11ff863..92a3b312a53c 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -692,7 +692,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
* Increment event counter and perform fixup for the pre-signal
* frame.
*/
- rseq_signal_deliver(regs);
+ rseq_signal_deliver(ksig, regs);
/* Set up the stack frame */
if (is_ia32_frame(ksig)) {
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f5d30c68fd09..f02ecaf97904 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -222,6 +222,11 @@ static void notrace start_secondary(void *unused)
#ifdef CONFIG_X86_32
/* switch away from the initial page table */
load_cr3(swapper_pg_dir);
+ /*
+ * Initialize the CR4 shadow before doing anything that could
+ * try to read it.
+ */
+ cr4_init_shadow();
__flush_tlb_all();
#endif
load_current_idt();
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index a535dd64de63..e6db475164ed 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -835,16 +835,18 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
"simd exception";
- if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
- return;
cond_local_irq_enable(regs);
if (!user_mode(regs)) {
- if (!fixup_exception(regs, trapnr)) {
- task->thread.error_code = error_code;
- task->thread.trap_nr = trapnr;
+ if (fixup_exception(regs, trapnr))
+ return;
+
+ task->thread.error_code = error_code;
+ task->thread.trap_nr = trapnr;
+
+ if (notify_die(DIE_TRAP, str, regs, error_code,
+ trapnr, SIGFPE) != NOTIFY_STOP)
die(str, regs, error_code);
- }
return;
}
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 58d8d800875d..deb576b23b7c 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -293,7 +293,7 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
/* has the side-effect of processing the entire instruction */
insn_get_length(insn);
- if (WARN_ON_ONCE(!insn_complete(insn)))
+ if (!insn_complete(insn))
return -ENOEXEC;
if (is_prefix_bad(insn))
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 92fd433c50b9..1bbec387d289 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -85,7 +85,7 @@ config KVM_AMD_SEV
def_bool y
bool "AMD Secure Encrypted Virtualization (SEV) support"
depends on KVM_AMD && X86_64
- depends on CRYPTO_DEV_CCP && CRYPTO_DEV_CCP_DD && CRYPTO_DEV_SP_PSP
+ depends on CRYPTO_DEV_SP_PSP && !(KVM_AMD=y && CRYPTO_DEV_CCP_DD=m)
---help---
Provides support for launching Encrypted VMs on AMD processors.
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9beb772b9eb6..a44e568363a4 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -890,7 +890,7 @@ static int mmu_topup_memory_cache_page(struct kvm_mmu_memory_cache *cache,
if (cache->nobjs >= min)
return 0;
while (cache->nobjs < ARRAY_SIZE(cache->objects)) {
- page = (void *)__get_free_page(GFP_KERNEL);
+ page = (void *)__get_free_page(GFP_KERNEL_ACCOUNT);
if (!page)
return -ENOMEM;
cache->objects[cache->nobjs++] = page;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c6594fe38437..8384f7cb30d8 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1843,6 +1843,17 @@ static inline bool nested_cpu_has_vmwrite_any_field(struct kvm_vcpu *vcpu)
MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
}
+static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
+{
+ return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
+}
+
+static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
+{
+ return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
+ CPU_BASED_MONITOR_TRAP_FLAG;
+}
+
static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
{
return vmcs12->cpu_based_vm_exec_control & bit;
@@ -2719,6 +2730,7 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
struct vcpu_vmx *vmx = to_vmx(vcpu);
#ifdef CONFIG_X86_64
int cpu = raw_smp_processor_id();
+ unsigned long fs_base, kernel_gs_base;
#endif
int i;
@@ -2734,12 +2746,20 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
#ifdef CONFIG_X86_64
- save_fsgs_for_kvm();
- vmx->host_state.fs_sel = current->thread.fsindex;
- vmx->host_state.gs_sel = current->thread.gsindex;
-#else
- savesegment(fs, vmx->host_state.fs_sel);
- savesegment(gs, vmx->host_state.gs_sel);
+ if (likely(is_64bit_mm(current->mm))) {
+ save_fsgs_for_kvm();
+ vmx->host_state.fs_sel = current->thread.fsindex;
+ vmx->host_state.gs_sel = current->thread.gsindex;
+ fs_base = current->thread.fsbase;
+ kernel_gs_base = current->thread.gsbase;
+ } else {
+#endif
+ savesegment(fs, vmx->host_state.fs_sel);
+ savesegment(gs, vmx->host_state.gs_sel);
+#ifdef CONFIG_X86_64
+ fs_base = read_msr(MSR_FS_BASE);
+ kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
+ }
#endif
if (!(vmx->host_state.fs_sel & 7)) {
vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
@@ -2759,10 +2779,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
savesegment(ds, vmx->host_state.ds_sel);
savesegment(es, vmx->host_state.es_sel);
- vmcs_writel(HOST_FS_BASE, current->thread.fsbase);
+ vmcs_writel(HOST_FS_BASE, fs_base);
vmcs_writel(HOST_GS_BASE, cpu_kernelmode_gs_base(cpu));
- vmx->msr_host_kernel_gs_base = current->thread.gsbase;
+ vmx->msr_host_kernel_gs_base = kernel_gs_base;
if (is_long_mode(&vmx->vcpu))
wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
#else
@@ -4470,11 +4490,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
vmcs_conf->order = get_order(vmcs_conf->size);
vmcs_conf->basic_cap = vmx_msr_high & ~0x1fff;
- /* KVM supports Enlightened VMCS v1 only */
- if (static_branch_unlikely(&enable_evmcs))
- vmcs_conf->revision_id = KVM_EVMCS_VERSION;
- else
- vmcs_conf->revision_id = vmx_msr_low;
+ vmcs_conf->revision_id = vmx_msr_low;
vmcs_conf->pin_based_exec_ctrl = _pin_based_exec_control;
vmcs_conf->cpu_based_exec_ctrl = _cpu_based_exec_control;
@@ -4544,7 +4560,13 @@ static struct vmcs *alloc_vmcs_cpu(int cpu)
return NULL;
vmcs = page_address(pages);
memset(vmcs, 0, vmcs_config.size);
- vmcs->revision_id = vmcs_config.revision_id; /* vmcs revision id */
+
+ /* KVM supports Enlightened VMCS v1 only */
+ if (static_branch_unlikely(&enable_evmcs))
+ vmcs->revision_id = KVM_EVMCS_VERSION;
+ else
+ vmcs->revision_id = vmcs_config.revision_id;
+
return vmcs;
}
@@ -4712,6 +4734,19 @@ static __init int alloc_kvm_area(void)
return -ENOMEM;
}
+ /*
+ * When eVMCS is enabled, alloc_vmcs_cpu() sets
+ * vmcs->revision_id to KVM_EVMCS_VERSION instead of
+ * revision_id reported by MSR_IA32_VMX_BASIC.
+ *
+ * However, even though not explictly documented by
+ * TLFS, VMXArea passed as VMXON argument should
+ * still be marked with revision_id reported by
+ * physical CPU.
+ */
+ if (static_branch_unlikely(&enable_evmcs))
+ vmcs->revision_id = vmcs_config.revision_id;
+
per_cpu(vmxarea, cpu) = vmcs;
}
return 0;
@@ -11882,6 +11917,62 @@ static int check_vmentry_prereqs(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
!nested_cr3_valid(vcpu, vmcs12->host_cr3))
return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
+ /*
+ * From the Intel SDM, volume 3:
+ * Fields relevant to VM-entry event injection must be set properly.
+ * These fields are the VM-entry interruption-information field, the
+ * VM-entry exception error code, and the VM-entry instruction length.
+ */
+ if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
+ u32 intr_info = vmcs12->vm_entry_intr_info_field;
+ u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
+ u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
+ bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
+ bool should_have_error_code;
+ bool urg = nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_UNRESTRICTED_GUEST);
+ bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
+
+ /* VM-entry interruption-info field: interruption type */
+ if (intr_type == INTR_TYPE_RESERVED ||
+ (intr_type == INTR_TYPE_OTHER_EVENT &&
+ !nested_cpu_supports_monitor_trap_flag(vcpu)))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+ /* VM-entry interruption-info field: vector */
+ if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
+ (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
+ (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+ /* VM-entry interruption-info field: deliver error code */
+ should_have_error_code =
+ intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
+ x86_exception_has_error_code(vector);
+ if (has_error_code != should_have_error_code)
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+ /* VM-entry exception error code */
+ if (has_error_code &&
+ vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+ /* VM-entry interruption-info field: reserved bits */
+ if (intr_info & INTR_INFO_RESVD_BITS_MASK)
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+ /* VM-entry instruction length */
+ switch (intr_type) {
+ case INTR_TYPE_SOFT_EXCEPTION:
+ case INTR_TYPE_SOFT_INTR:
+ case INTR_TYPE_PRIV_SW_EXCEPTION:
+ if ((vmcs12->vm_entry_instruction_len > 15) ||
+ (vmcs12->vm_entry_instruction_len == 0 &&
+ !nested_cpu_has_zero_length_injection(vcpu)))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ }
+ }
+
return 0;
}
@@ -11948,7 +12039,6 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
- u32 msr_entry_idx;
u32 exit_qual;
int r;
@@ -11970,10 +12060,10 @@ static int enter_vmx_non_root_mode(struct kvm_vcpu *vcpu)
nested_get_vmcs12_pages(vcpu, vmcs12);
r = EXIT_REASON_MSR_LOAD_FAIL;
- msr_entry_idx = nested_vmx_load_msr(vcpu,
- vmcs12->vm_entry_msr_load_addr,
- vmcs12->vm_entry_msr_load_count);
- if (msr_entry_idx)
+ exit_qual = nested_vmx_load_msr(vcpu,
+ vmcs12->vm_entry_msr_load_addr,
+ vmcs12->vm_entry_msr_load_count);
+ if (exit_qual)
goto fail;
/*
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 902d535dff8f..79c8ca2c2ad9 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1098,6 +1098,7 @@ static u32 msr_based_features[] = {
MSR_F10H_DECFG,
MSR_IA32_UCODE_REV,
+ MSR_IA32_ARCH_CAPABILITIES,
};
static unsigned int num_msr_based_features;
@@ -1106,7 +1107,8 @@ static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
{
switch (msr->index) {
case MSR_IA32_UCODE_REV:
- rdmsrl(msr->index, msr->data);
+ case MSR_IA32_ARCH_CAPABILITIES:
+ rdmsrl_safe(msr->index, &msr->data);
break;
default:
if (kvm_x86_ops->get_msr_feature(msr))
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 331993c49dae..257f27620bc2 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -110,6 +110,15 @@ static inline bool is_la57_mode(struct kvm_vcpu *vcpu)
#endif
}
+static inline bool x86_exception_has_error_code(unsigned int vector)
+{
+ static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
+ BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
+ BIT(PF_VECTOR) | BIT(AC_VECTOR);
+
+ return (1U << vector) & exception_has_error_code;
+}
+
static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
{
return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9a84a0d08727..2aafa6ab6103 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -641,11 +641,6 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
return 0;
}
-static const char nx_warning[] = KERN_CRIT
-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
-static const char smep_warning[] = KERN_CRIT
-"unable to execute userspace code (SMEP?) (uid: %d)\n";
-
static void
show_fault_oops(struct pt_regs *regs, unsigned long error_code,
unsigned long address)
@@ -664,20 +659,18 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
pte = lookup_address_in_pgd(pgd, address, &level);
if (pte && pte_present(*pte) && !pte_exec(*pte))
- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
+ pr_crit("kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n",
+ from_kuid(&init_user_ns, current_uid()));
if (pte && pte_present(*pte) && pte_exec(*pte) &&
(pgd_flags(*pgd) & _PAGE_USER) &&
(__read_cr4() & X86_CR4_SMEP))
- printk(smep_warning, from_kuid(&init_user_ns, current_uid()));
+ pr_crit("unable to execute userspace code (SMEP?) (uid: %d)\n",
+ from_kuid(&init_user_ns, current_uid()));
}
- printk(KERN_ALERT "BUG: unable to handle kernel ");
- if (address < PAGE_SIZE)
- printk(KERN_CONT "NULL pointer dereference");
- else
- printk(KERN_CONT "paging request");
-
- printk(KERN_CONT " at %px\n", (void *) address);
+ pr_alert("BUG: unable to handle kernel %s at %px\n",
+ address < PAGE_SIZE ? "NULL pointer dereference" : "paging request",
+ (void *)address);
dump_pagetable(address);
}
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 045f492d5f68..a688617c727e 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1350,16 +1350,28 @@ int kern_addr_valid(unsigned long addr)
/* Amount of ram needed to start using large blocks */
#define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
+/* Adjustable memory block size */
+static unsigned long set_memory_block_size;
+int __init set_memory_block_size_order(unsigned int order)
+{
+ unsigned long size = 1UL << order;
+
+ if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE)
+ return -EINVAL;
+
+ set_memory_block_size = size;
+ return 0;
+}
+
static unsigned long probe_memory_block_size(void)
{
unsigned long boot_mem_end = max_pfn << PAGE_SHIFT;
unsigned long bz;
- /* If this is UV system, always set 2G block size */
- if (is_uv_system()) {
- bz = MAX_BLOCK_SIZE;
+ /* If memory block size has been set, then use it */
+ bz = set_memory_block_size;
+ if (bz)
goto done;
- }
/* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) {
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index e01f7ceb9e7a..77873ce700ae 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -166,14 +166,14 @@ void __init efi_call_phys_epilog(pgd_t *save_pgd)
pgd = pgd_offset_k(pgd_idx * PGDIR_SIZE);
set_pgd(pgd_offset_k(pgd_idx * PGDIR_SIZE), save_pgd[pgd_idx]);
- if (!(pgd_val(*pgd) & _PAGE_PRESENT))
+ if (!pgd_present(*pgd))
continue;
for (i = 0; i < PTRS_PER_P4D; i++) {
p4d = p4d_offset(pgd,
pgd_idx * PGDIR_SIZE + i * P4D_SIZE);
- if (!(p4d_val(*p4d) & _PAGE_PRESENT))
+ if (!p4d_present(*p4d))
continue;
pud = (pud_t *)p4d_page_vaddr(*p4d);
diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile
index 2e9ee023e6bc..81a8e33115ad 100644
--- a/arch/x86/purgatory/Makefile
+++ b/arch/x86/purgatory/Makefile
@@ -6,7 +6,7 @@ purgatory-y := purgatory.o stack.o setup-x86_$(BITS).o sha256.o entry64.o string
targets += $(purgatory-y)
PURGATORY_OBJS = $(addprefix $(obj)/,$(purgatory-y))
-$(obj)/sha256.o: $(srctree)/lib/sha256.c
+$(obj)/sha256.o: $(srctree)/lib/sha256.c FORCE
$(call if_changed_rule,cc_o_c)
LDFLAGS_purgatory.ro := -e purgatory_start -r --no-undefined -nostdlib -z nodefaultlib
diff --git a/arch/x86/um/mem_32.c b/arch/x86/um/mem_32.c
index 744afdc18cf3..56c44d865f7b 100644
--- a/arch/x86/um/mem_32.c
+++ b/arch/x86/um/mem_32.c
@@ -16,7 +16,7 @@ static int __init gate_vma_init(void)
if (!FIXADDR_USER_START)
return 0;
- gate_vma.vm_mm = NULL;
+ vma_init(&gate_vma, NULL);
gate_vma.vm_start = FIXADDR_USER_START;
gate_vma.vm_end = FIXADDR_USER_END;
gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index df208af3cd74..2eeddd814653 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -66,6 +66,13 @@ __read_mostly int xen_have_vector_callback;
EXPORT_SYMBOL_GPL(xen_have_vector_callback);
/*
+ * NB: needs to live in .data because it's used by xen_prepare_pvh which runs
+ * before clearing the bss.
+ */
+uint32_t xen_start_flags __attribute__((section(".data"))) = 0;
+EXPORT_SYMBOL(xen_start_flags);
+
+/*
* Point at some empty memory to start with. We map the real shared_info
* page as soon as fixmap is up and running.
*/
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 357969a3697c..439a94bf89ad 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -1203,15 +1203,24 @@ asmlinkage __visible void __init xen_start_kernel(void)
return;
xen_domain_type = XEN_PV_DOMAIN;
+ xen_start_flags = xen_start_info->flags;
xen_setup_features();
- xen_setup_machphys_mapping();
-
/* Install Xen paravirt ops */
pv_info = xen_info;
pv_init_ops.patch = paravirt_patch_default;
pv_cpu_ops = xen_cpu_ops;
+ xen_init_irq_ops();
+
+ /*
+ * Setup xen_vcpu early because it is needed for
+ * local_irq_disable(), irqs_disabled(), e.g. in printk().
+ *
+ * Don't do the full vcpu_info placement stuff until we have
+ * the cpu_possible_mask and a non-dummy shared_info.
+ */
+ xen_vcpu_info_reset(0);
x86_platform.get_nmi_reason = xen_get_nmi_reason;
@@ -1224,10 +1233,12 @@ asmlinkage __visible void __init xen_start_kernel(void)
* Set up some pagetable state before starting to set any ptes.
*/
+ xen_setup_machphys_mapping();
xen_init_mmu_ops();
/* Prevent unwanted bits from being set in PTEs. */
__supported_pte_mask &= ~_PAGE_GLOBAL;
+ __default_kernel_pte_mask &= ~_PAGE_GLOBAL;
/*
* Prevent page tables from being allocated in highmem, even
@@ -1248,20 +1259,9 @@ asmlinkage __visible void __init xen_start_kernel(void)
get_cpu_cap(&boot_cpu_data);
x86_configure_nx();
- xen_init_irq_ops();
-
/* Let's presume PV guests always boot on vCPU with id 0. */
per_cpu(xen_vcpu_id, 0) = 0;
- /*
- * Setup xen_vcpu early because idt_setup_early_handler needs it for
- * local_irq_disable(), irqs_disabled().
- *
- * Don't do the full vcpu_info placement stuff until we have
- * the cpu_possible_mask and a non-dummy shared_info.
- */
- xen_vcpu_info_reset(0);
-
idt_setup_early_handler();
xen_init_capabilities();
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index aa1c6a6831a9..c85d1a88f476 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -97,6 +97,7 @@ void __init xen_prepare_pvh(void)
}
xen_pvh = 1;
+ xen_start_flags = pvh_start_info.flags;
msr = cpuid_ebx(xen_cpuid_base() + 2);
pfn = __pa(hypercall_page);
diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c
index 74179852e46c..7515a19fd324 100644
--- a/arch/x86/xen/irq.c
+++ b/arch/x86/xen/irq.c
@@ -128,8 +128,6 @@ static const struct pv_irq_ops xen_irq_ops __initconst = {
void __init xen_init_irq_ops(void)
{
- /* For PVH we use default pv_irq_ops settings. */
- if (!xen_feature(XENFEAT_hvm_callback_vector))
- pv_irq_ops = xen_irq_ops;
+ pv_irq_ops = xen_irq_ops;
x86_init.irqs.intr_init = xen_init_IRQ;
}
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 2e20ae2fa2d6..e3b18ad49889 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -32,6 +32,7 @@
#include <xen/interface/vcpu.h>
#include <xen/interface/xenpmu.h>
+#include <asm/spec-ctrl.h>
#include <asm/xen/interface.h>
#include <asm/xen/hypercall.h>
@@ -70,6 +71,8 @@ static void cpu_bringup(void)
cpu_data(cpu).x86_max_cores = 1;
set_cpu_sibling_map(cpu);
+ speculative_store_bypass_ht_init();
+
xen_setup_cpu_clockevents();
notify_cpu_starting(cpu);
@@ -250,6 +253,8 @@ static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
}
set_cpu_sibling_map(0);
+ speculative_store_bypass_ht_init();
+
xen_pmu_init(0);
if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0))
diff --git a/block/bio.c b/block/bio.c
index 9710e275f230..047c5dca6d90 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -903,25 +903,27 @@ int bio_add_page(struct bio *bio, struct page *page,
EXPORT_SYMBOL(bio_add_page);
/**
- * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
+ * __bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
* @bio: bio to add pages to
* @iter: iov iterator describing the region to be mapped
*
- * Pins as many pages from *iter and appends them to @bio's bvec array. The
+ * Pins pages from *iter and appends them to @bio's bvec array. The
* pages will have to be released using put_page() when done.
+ * For multi-segment *iter, this function only adds pages from the
+ * the next non-empty segment of the iov iterator.
*/
-int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+static int __bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
{
- unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt;
+ unsigned short nr_pages = bio->bi_max_vecs - bio->bi_vcnt, idx;
struct bio_vec *bv = bio->bi_io_vec + bio->bi_vcnt;
struct page **pages = (struct page **)bv;
- size_t offset, diff;
+ size_t offset;
ssize_t size;
size = iov_iter_get_pages(iter, pages, LONG_MAX, nr_pages, &offset);
if (unlikely(size <= 0))
return size ? size : -EFAULT;
- nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
+ idx = nr_pages = (size + offset + PAGE_SIZE - 1) / PAGE_SIZE;
/*
* Deep magic below: We need to walk the pinned pages backwards
@@ -934,21 +936,46 @@ int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
bio->bi_iter.bi_size += size;
bio->bi_vcnt += nr_pages;
- diff = (nr_pages * PAGE_SIZE - offset) - size;
- while (nr_pages--) {
- bv[nr_pages].bv_page = pages[nr_pages];
- bv[nr_pages].bv_len = PAGE_SIZE;
- bv[nr_pages].bv_offset = 0;
+ while (idx--) {
+ bv[idx].bv_page = pages[idx];
+ bv[idx].bv_len = PAGE_SIZE;
+ bv[idx].bv_offset = 0;
}
bv[0].bv_offset += offset;
bv[0].bv_len -= offset;
- if (diff)
- bv[bio->bi_vcnt - 1].bv_len -= diff;
+ bv[nr_pages - 1].bv_len -= nr_pages * PAGE_SIZE - offset - size;
iov_iter_advance(iter, size);
return 0;
}
+
+/**
+ * bio_iov_iter_get_pages - pin user or kernel pages and add them to a bio
+ * @bio: bio to add pages to
+ * @iter: iov iterator describing the region to be mapped
+ *
+ * Pins pages from *iter and appends them to @bio's bvec array. The
+ * pages will have to be released using put_page() when done.
+ * The function tries, but does not guarantee, to pin as many pages as
+ * fit into the bio, or are requested in *iter, whatever is smaller.
+ * If MM encounters an error pinning the requested pages, it stops.
+ * Error is returned only if 0 pages could be pinned.
+ */
+int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter)
+{
+ unsigned short orig_vcnt = bio->bi_vcnt;
+
+ do {
+ int ret = __bio_iov_iter_get_pages(bio, iter);
+
+ if (unlikely(ret))
+ return bio->bi_vcnt > orig_vcnt ? 0 : ret;
+
+ } while (iov_iter_count(iter) && !bio_full(bio));
+
+ return 0;
+}
EXPORT_SYMBOL_GPL(bio_iov_iter_get_pages);
static void submit_bio_wait_endio(struct bio *bio)
@@ -1807,9 +1834,6 @@ again:
if (!bio_integrity_endio(bio))
return;
- if (WARN_ONCE(bio->bi_next, "driver left bi_next not NULL"))
- bio->bi_next = NULL;
-
/*
* Need to have a real endio function for chained bios, otherwise
* various corner cases will break (like stacking block devices that
@@ -1869,6 +1893,7 @@ struct bio *bio_split(struct bio *bio, int sectors,
bio_integrity_trim(split);
bio_advance(bio, split->bi_iter.bi_size);
+ bio->bi_iter.bi_done = 0;
if (bio_flagged(bio, BIO_TRACE_COMPLETION))
bio_set_flag(split, BIO_TRACE_COMPLETION);
diff --git a/block/blk-core.c b/block/blk-core.c
index cf0ee764b908..f84a9b7b6f5a 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -273,10 +273,6 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
bio_advance(bio, nbytes);
/* don't actually finish bio if it's part of flush sequence */
- /*
- * XXX this code looks suspicious - it's not consistent with advancing
- * req->bio in caller
- */
if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
bio_endio(bio);
}
@@ -3081,10 +3077,8 @@ bool blk_update_request(struct request *req, blk_status_t error,
struct bio *bio = req->bio;
unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
- if (bio_bytes == bio->bi_iter.bi_size) {
+ if (bio_bytes == bio->bi_iter.bi_size)
req->bio = bio->bi_next;
- bio->bi_next = NULL;
- }
/* Completion has already been traced */
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
@@ -3479,6 +3473,10 @@ static void __blk_rq_prep_clone(struct request *dst, struct request *src)
dst->cpu = src->cpu;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
+ if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
+ dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
+ dst->special_vec = src->special_vec;
+ }
dst->nr_phys_segments = src->nr_phys_segments;
dst->ioprio = src->ioprio;
dst->extra_len = src->extra_len;
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index ffa622366922..1c4532e92938 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -356,7 +356,7 @@ static const char *const blk_mq_rq_state_name_array[] = {
static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
{
- if (WARN_ON_ONCE((unsigned int)rq_state >
+ if (WARN_ON_ONCE((unsigned int)rq_state >=
ARRAY_SIZE(blk_mq_rq_state_name_array)))
return "(?)";
return blk_mq_rq_state_name_array[rq_state];
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 70c65bb6c013..654b0dc7e001 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -558,10 +558,8 @@ static void __blk_mq_complete_request(struct request *rq)
bool shared = false;
int cpu;
- if (cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) !=
- MQ_RQ_IN_FLIGHT)
+ if (!blk_mq_mark_complete(rq))
return;
-
if (rq->internal_tag != -1)
blk_mq_sched_completed_request(rq);
@@ -781,7 +779,6 @@ static void blk_mq_rq_timed_out(struct request *req, bool reserved)
WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
}
- req->rq_flags &= ~RQF_TIMED_OUT;
blk_add_timer(req);
}
@@ -1076,6 +1073,9 @@ static bool blk_mq_mark_tag_wait(struct blk_mq_hw_ctx **hctx,
#define BLK_MQ_RESOURCE_DELAY 3 /* ms units */
+/*
+ * Returns true if we did some work AND can potentially do more.
+ */
bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
bool got_budget)
{
@@ -1206,8 +1206,17 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
blk_mq_run_hw_queue(hctx, true);
else if (needs_restart && (ret == BLK_STS_RESOURCE))
blk_mq_delay_run_hw_queue(hctx, BLK_MQ_RESOURCE_DELAY);
+
+ return false;
}
+ /*
+ * If the host/device is unable to accept more work, inform the
+ * caller of that.
+ */
+ if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
+ return false;
+
return (queued + errors) != 0;
}
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 01e2b353a2b9..15c1f5e12eb8 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -144,6 +144,7 @@ do_local:
local_irq_restore(flags);
}
+EXPORT_SYMBOL(__blk_complete_request);
/**
* blk_complete_request - end I/O on a request
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 4b8a48d48ba1..f2cfd56e1606 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -210,6 +210,7 @@ void blk_add_timer(struct request *req)
if (!req->timeout)
req->timeout = q->rq_timeout;
+ req->rq_flags &= ~RQF_TIMED_OUT;
blk_rq_set_deadline(req, jiffies + req->timeout);
/*
diff --git a/block/bsg.c b/block/bsg.c
index 66602c489956..3da540faf673 100644
--- a/block/bsg.c
+++ b/block/bsg.c
@@ -267,8 +267,6 @@ bsg_map_hdr(struct request_queue *q, struct sg_io_v4 *hdr, fmode_t mode)
} else if (hdr->din_xfer_len) {
ret = blk_rq_map_user(q, rq, NULL, uptr64(hdr->din_xferp),
hdr->din_xfer_len, GFP_KERNEL);
- } else {
- ret = blk_rq_map_user(q, rq, NULL, NULL, 0, GFP_KERNEL);
}
if (ret)
diff --git a/block/sed-opal.c b/block/sed-opal.c
index 945f4b8610e0..e0de4dd448b3 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -877,7 +877,7 @@ static size_t response_get_string(const struct parsed_resp *resp, int n,
return 0;
}
- if (n > resp->num) {
+ if (n >= resp->num) {
pr_debug("Response has %d tokens. Can't access %d\n",
resp->num, n);
return 0;
@@ -916,7 +916,7 @@ static u64 response_get_u64(const struct parsed_resp *resp, int n)
return 0;
}
- if (n > resp->num) {
+ if (n >= resp->num) {
pr_debug("Response has %d tokens. Can't access %d\n",
resp->num, n);
return 0;
diff --git a/certs/blacklist.h b/certs/blacklist.h
index 150d82da8e99..1efd6fa0dc60 100644
--- a/certs/blacklist.h
+++ b/certs/blacklist.h
@@ -1,3 +1,3 @@
#include <linux/kernel.h>
-extern const char __initdata *const blacklist_hashes[];
+extern const char __initconst *const blacklist_hashes[];
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 49fa8582138b..c166f424871c 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -1060,12 +1060,19 @@ void af_alg_async_cb(struct crypto_async_request *_req, int err)
}
EXPORT_SYMBOL_GPL(af_alg_async_cb);
-__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events)
+/**
+ * af_alg_poll - poll system call handler
+ */
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
- __poll_t mask = 0;
+ __poll_t mask;
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+ mask = 0;
if (!ctx->more || ctx->used)
mask |= EPOLLIN | EPOLLRDNORM;
@@ -1075,7 +1082,7 @@ __poll_t af_alg_poll_mask(struct socket *sock, __poll_t events)
return mask;
}
-EXPORT_SYMBOL_GPL(af_alg_poll_mask);
+EXPORT_SYMBOL_GPL(af_alg_poll);
/**
* af_alg_alloc_areq - allocate struct af_alg_async_req
@@ -1148,8 +1155,10 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
/* make one iovec available as scatterlist */
err = af_alg_make_sg(&rsgl->sgl, &msg->msg_iter, seglen);
- if (err < 0)
+ if (err < 0) {
+ rsgl->sg_num_bytes = 0;
return err;
+ }
/* chain the new scatterlist with previous one */
if (areq->last_rsgl)
diff --git a/crypto/algif_aead.c b/crypto/algif_aead.c
index 825524f27438..c40a8c7ee8ae 100644
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -375,7 +375,7 @@ static struct proto_ops algif_aead_ops = {
.sendmsg = aead_sendmsg,
.sendpage = af_alg_sendpage,
.recvmsg = aead_recvmsg,
- .poll_mask = af_alg_poll_mask,
+ .poll = af_alg_poll,
};
static int aead_check_key(struct socket *sock)
@@ -471,7 +471,7 @@ static struct proto_ops algif_aead_ops_nokey = {
.sendmsg = aead_sendmsg_nokey,
.sendpage = aead_sendpage_nokey,
.recvmsg = aead_recvmsg_nokey,
- .poll_mask = af_alg_poll_mask,
+ .poll = af_alg_poll,
};
static void *aead_bind(const char *name, u32 type, u32 mask)
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 4c04eb9888ad..cfdaab2b7d76 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -206,7 +206,7 @@ static struct proto_ops algif_skcipher_ops = {
.sendmsg = skcipher_sendmsg,
.sendpage = af_alg_sendpage,
.recvmsg = skcipher_recvmsg,
- .poll_mask = af_alg_poll_mask,
+ .poll = af_alg_poll,
};
static int skcipher_check_key(struct socket *sock)
@@ -302,7 +302,7 @@ static struct proto_ops algif_skcipher_ops_nokey = {
.sendmsg = skcipher_sendmsg_nokey,
.sendpage = skcipher_sendpage_nokey,
.recvmsg = skcipher_recvmsg_nokey,
- .poll_mask = af_alg_poll_mask,
+ .poll = af_alg_poll,
};
static void *skcipher_bind(const char *name, u32 type, u32 mask)
diff --git a/crypto/asymmetric_keys/x509_cert_parser.c b/crypto/asymmetric_keys/x509_cert_parser.c
index 7d81e6bb461a..b6cabac4b62b 100644
--- a/crypto/asymmetric_keys/x509_cert_parser.c
+++ b/crypto/asymmetric_keys/x509_cert_parser.c
@@ -249,6 +249,15 @@ int x509_note_signature(void *context, size_t hdrlen,
return -EINVAL;
}
+ if (strcmp(ctx->cert->sig->pkey_algo, "rsa") == 0) {
+ /* Discard the BIT STRING metadata */
+ if (vlen < 1 || *(const u8 *)value != 0)
+ return -EBADMSG;
+
+ value++;
+ vlen--;
+ }
+
ctx->cert->raw_sig = value;
ctx->cert->raw_sig_size = vlen;
return 0;
diff --git a/crypto/morus640.c b/crypto/morus640.c
index 9fbcde307daf..5eede3749e64 100644
--- a/crypto/morus640.c
+++ b/crypto/morus640.c
@@ -274,8 +274,9 @@ static void crypto_morus640_decrypt_chunk(struct morus640_state *state, u8 *dst,
union morus640_block_in tail;
memcpy(tail.bytes, src, size);
+ memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
- crypto_morus640_load_a(&m, src);
+ crypto_morus640_load_a(&m, tail.bytes);
crypto_morus640_core(state, &m);
crypto_morus640_store_a(tail.bytes, &m);
memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 264ec12c0b9c..7f6735d9003f 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -152,7 +152,7 @@ static SHA3_INLINE void keccakf_round(u64 st[25])
st[24] ^= bc[ 4];
}
-static void __optimize("O3") keccakf(u64 st[25])
+static void keccakf(u64 st[25])
{
int round;
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 38a286975c31..f8fecfec5df9 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -22,6 +22,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/pwm.h>
+#include <linux/suspend.h>
#include <linux/delay.h>
#include "internal.h"
@@ -946,9 +947,10 @@ static void lpss_iosf_exit_d3_state(void)
mutex_unlock(&lpss_iosf_mutex);
}
-static int acpi_lpss_suspend(struct device *dev, bool wakeup)
+static int acpi_lpss_suspend(struct device *dev, bool runtime)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+ bool wakeup = runtime || device_may_wakeup(dev);
int ret;
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
@@ -961,13 +963,14 @@ static int acpi_lpss_suspend(struct device *dev, bool wakeup)
* wrong status for devices being about to be powered off. See
* lpss_iosf_enter_d3_state() for further information.
*/
- if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
+ if ((runtime || !pm_suspend_via_firmware()) &&
+ lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
lpss_iosf_enter_d3_state();
return ret;
}
-static int acpi_lpss_resume(struct device *dev)
+static int acpi_lpss_resume(struct device *dev, bool runtime)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
@@ -976,7 +979,8 @@ static int acpi_lpss_resume(struct device *dev)
* This call is kept first to be in symmetry with
* acpi_lpss_runtime_suspend() one.
*/
- if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
+ if ((runtime || !pm_resume_via_firmware()) &&
+ lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
lpss_iosf_exit_d3_state();
ret = acpi_dev_resume(dev);
@@ -1000,12 +1004,12 @@ static int acpi_lpss_suspend_late(struct device *dev)
return 0;
ret = pm_generic_suspend_late(dev);
- return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
+ return ret ? ret : acpi_lpss_suspend(dev, false);
}
static int acpi_lpss_resume_early(struct device *dev)
{
- int ret = acpi_lpss_resume(dev);
+ int ret = acpi_lpss_resume(dev, false);
return ret ? ret : pm_generic_resume_early(dev);
}
@@ -1020,7 +1024,7 @@ static int acpi_lpss_runtime_suspend(struct device *dev)
static int acpi_lpss_runtime_resume(struct device *dev)
{
- int ret = acpi_lpss_resume(dev);
+ int ret = acpi_lpss_resume(dev, true);
return ret ? ret : pm_generic_runtime_resume(dev);
}
diff --git a/drivers/acpi/acpica/hwsleep.c b/drivers/acpi/acpica/hwsleep.c
index fc0c2e2328cd..fe9d46d81750 100644
--- a/drivers/acpi/acpica/hwsleep.c
+++ b/drivers/acpi/acpica/hwsleep.c
@@ -51,16 +51,23 @@ acpi_status acpi_hw_legacy_sleep(u8 sleep_state)
return_ACPI_STATUS(status);
}
- /*
- * 1) Disable all GPEs
- * 2) Enable all wakeup GPEs
- */
+ /* Disable all GPEs */
status = acpi_hw_disable_all_gpes();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
+ /*
+ * If the target sleep state is S5, clear all GPEs and fixed events too
+ */
+ if (sleep_state == ACPI_STATE_S5) {
+ status = acpi_hw_clear_acpi_status();
+ if (ACPI_FAILURE(status)) {
+ return_ACPI_STATUS(status);
+ }
+ }
acpi_gbl_system_awake_and_running = FALSE;
+ /* Enable all wakeup GPEs */
status = acpi_hw_enable_all_wakeup_gpes();
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/psloop.c b/drivers/acpi/acpica/psloop.c
index bc5f05906bd1..ee840be150b5 100644
--- a/drivers/acpi/acpica/psloop.c
+++ b/drivers/acpi/acpica/psloop.c
@@ -497,6 +497,18 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
status =
acpi_ps_create_op(walk_state, aml_op_start, &op);
if (ACPI_FAILURE(status)) {
+ /*
+ * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
+ * executing it as a control method. However, if we encounter
+ * an error while loading the table, we need to keep trying to
+ * load the table rather than aborting the table load. Set the
+ * status to AE_OK to proceed with the table load.
+ */
+ if ((walk_state->
+ parse_flags & ACPI_PARSE_MODULE_LEVEL)
+ && status == AE_ALREADY_EXISTS) {
+ status = AE_OK;
+ }
if (status == AE_CTRL_PARSE_CONTINUE) {
continue;
}
@@ -694,6 +706,20 @@ acpi_status acpi_ps_parse_loop(struct acpi_walk_state *walk_state)
acpi_ps_next_parse_state(walk_state, op, status);
if (status == AE_CTRL_PENDING) {
status = AE_OK;
+ } else
+ if ((walk_state->
+ parse_flags & ACPI_PARSE_MODULE_LEVEL)
+ && ACPI_FAILURE(status)) {
+ /*
+ * ACPI_PARSE_MODULE_LEVEL means that we are loading a table by
+ * executing it as a control method. However, if we encounter
+ * an error while loading the table, we need to keep trying to
+ * load the table rather than aborting the table load. Set the
+ * status to AE_OK to proceed with the table load. If we get a
+ * failure at this point, it means that the dispatcher got an
+ * error while processing Op (most likely an AML operand error.
+ */
+ status = AE_OK;
}
}
diff --git a/drivers/acpi/acpica/uterror.c b/drivers/acpi/acpica/uterror.c
index 5a64ddaed8a3..e47430272692 100644
--- a/drivers/acpi/acpica/uterror.c
+++ b/drivers/acpi/acpica/uterror.c
@@ -182,19 +182,19 @@ acpi_ut_prefixed_namespace_error(const char *module_name,
switch (lookup_status) {
case AE_ALREADY_EXISTS:
- acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR);
message = "Failure creating";
break;
case AE_NOT_FOUND:
- acpi_os_printf("\n" ACPI_MSG_BIOS_ERROR);
+ acpi_os_printf(ACPI_MSG_BIOS_ERROR);
message = "Could not resolve";
break;
default:
- acpi_os_printf("\n" ACPI_MSG_ERROR);
+ acpi_os_printf(ACPI_MSG_ERROR);
message = "Failure resolving";
break;
}
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index b0113a5802a3..d79ad844c78f 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -717,10 +717,11 @@ void battery_hook_register(struct acpi_battery_hook *hook)
*/
pr_err("extension failed to load: %s", hook->name);
__battery_hook_unregister(hook, 0);
- return;
+ goto end;
}
}
pr_info("new extension: %s\n", hook->name);
+end:
mutex_unlock(&hook_mutex);
}
EXPORT_SYMBOL_GPL(battery_hook_register);
@@ -732,7 +733,7 @@ EXPORT_SYMBOL_GPL(battery_hook_register);
*/
static void battery_hook_add_battery(struct acpi_battery *battery)
{
- struct acpi_battery_hook *hook_node;
+ struct acpi_battery_hook *hook_node, *tmp;
mutex_lock(&hook_mutex);
INIT_LIST_HEAD(&battery->list);
@@ -744,15 +745,15 @@ static void battery_hook_add_battery(struct acpi_battery *battery)
* when a battery gets hotplugged or initialized
* during the battery module initialization.
*/
- list_for_each_entry(hook_node, &battery_hook_list, list) {
+ list_for_each_entry_safe(hook_node, tmp, &battery_hook_list, list) {
if (hook_node->add_battery(battery->bat)) {
/*
* The notification of the extensions has failed, to
* prevent further errors we will unload the extension.
*/
- __battery_hook_unregister(hook_node, 0);
pr_err("error in extension, unloading: %s",
hook_node->name);
+ __battery_hook_unregister(hook_node, 0);
}
}
mutex_unlock(&hook_mutex);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index bb94cf0731fe..917f77f4cb55 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2037,6 +2037,17 @@ static inline void acpi_ec_query_exit(void)
}
}
+static const struct dmi_system_id acpi_ec_no_wakeup[] = {
+ {
+ .ident = "Thinkpad X1 Carbon 6th",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_FAMILY, "Thinkpad X1 Carbon 6th"),
+ },
+ },
+ { },
+};
+
int __init acpi_ec_init(void)
{
int result;
@@ -2047,6 +2058,15 @@ int __init acpi_ec_init(void)
if (result)
return result;
+ /*
+ * Disable EC wakeup on following systems to prevent periodic
+ * wakeup from EC GPE.
+ */
+ if (dmi_check_system(acpi_ec_no_wakeup)) {
+ ec_no_wakeup = true;
+ pr_debug("Disabling EC wakeup on suspend-to-idle\n");
+ }
+
/* Drivers must be started after acpi_ec_query_init() */
dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
/*
diff --git a/drivers/acpi/nfit/core.c b/drivers/acpi/nfit/core.c
index d15814e1727f..7c479002e798 100644
--- a/drivers/acpi/nfit/core.c
+++ b/drivers/acpi/nfit/core.c
@@ -408,6 +408,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
const guid_t *guid;
int rc, i;
+ if (cmd_rc)
+ *cmd_rc = -EINVAL;
func = cmd;
if (cmd == ND_CMD_CALL) {
call_pkg = buf;
@@ -518,6 +520,8 @@ int acpi_nfit_ctl(struct nvdimm_bus_descriptor *nd_desc, struct nvdimm *nvdimm,
* If we return an error (like elsewhere) then caller wouldn't
* be able to rely upon data returned to make calculation.
*/
+ if (cmd_rc)
+ *cmd_rc = 0;
return 0;
}
@@ -1273,7 +1277,7 @@ static ssize_t scrub_show(struct device *dev,
mutex_lock(&acpi_desc->init_mutex);
rc = sprintf(buf, "%d%s", acpi_desc->scrub_count,
- work_busy(&acpi_desc->dwork.work)
+ acpi_desc->scrub_busy
&& !acpi_desc->cancel ? "+\n" : "\n");
mutex_unlock(&acpi_desc->init_mutex);
}
@@ -2939,6 +2943,32 @@ static unsigned int __acpi_nfit_scrub(struct acpi_nfit_desc *acpi_desc,
return 0;
}
+static void __sched_ars(struct acpi_nfit_desc *acpi_desc, unsigned int tmo)
+{
+ lockdep_assert_held(&acpi_desc->init_mutex);
+
+ acpi_desc->scrub_busy = 1;
+ /* note this should only be set from within the workqueue */
+ if (tmo)
+ acpi_desc->scrub_tmo = tmo;
+ queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
+}
+
+static void sched_ars(struct acpi_nfit_desc *acpi_desc)
+{
+ __sched_ars(acpi_desc, 0);
+}
+
+static void notify_ars_done(struct acpi_nfit_desc *acpi_desc)
+{
+ lockdep_assert_held(&acpi_desc->init_mutex);
+
+ acpi_desc->scrub_busy = 0;
+ acpi_desc->scrub_count++;
+ if (acpi_desc->scrub_count_state)
+ sysfs_notify_dirent(acpi_desc->scrub_count_state);
+}
+
static void acpi_nfit_scrub(struct work_struct *work)
{
struct acpi_nfit_desc *acpi_desc;
@@ -2949,14 +2979,10 @@ static void acpi_nfit_scrub(struct work_struct *work)
mutex_lock(&acpi_desc->init_mutex);
query_rc = acpi_nfit_query_poison(acpi_desc);
tmo = __acpi_nfit_scrub(acpi_desc, query_rc);
- if (tmo) {
- queue_delayed_work(nfit_wq, &acpi_desc->dwork, tmo * HZ);
- acpi_desc->scrub_tmo = tmo;
- } else {
- acpi_desc->scrub_count++;
- if (acpi_desc->scrub_count_state)
- sysfs_notify_dirent(acpi_desc->scrub_count_state);
- }
+ if (tmo)
+ __sched_ars(acpi_desc, tmo);
+ else
+ notify_ars_done(acpi_desc);
memset(acpi_desc->ars_status, 0, acpi_desc->max_ars);
mutex_unlock(&acpi_desc->init_mutex);
}
@@ -3037,7 +3063,7 @@ static int acpi_nfit_register_regions(struct acpi_nfit_desc *acpi_desc)
break;
}
- queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
+ sched_ars(acpi_desc);
return 0;
}
@@ -3239,7 +3265,7 @@ int acpi_nfit_ars_rescan(struct acpi_nfit_desc *acpi_desc, unsigned long flags)
}
}
if (scheduled) {
- queue_delayed_work(nfit_wq, &acpi_desc->dwork, 0);
+ sched_ars(acpi_desc);
dev_dbg(dev, "ars_scan triggered\n");
}
mutex_unlock(&acpi_desc->init_mutex);
diff --git a/drivers/acpi/nfit/nfit.h b/drivers/acpi/nfit/nfit.h
index 7d15856a739f..a97ff42fe311 100644
--- a/drivers/acpi/nfit/nfit.h
+++ b/drivers/acpi/nfit/nfit.h
@@ -203,6 +203,7 @@ struct acpi_nfit_desc {
unsigned int max_ars;
unsigned int scrub_count;
unsigned int scrub_mode;
+ unsigned int scrub_busy:1;
unsigned int cancel:1;
unsigned long dimm_cmd_force_en;
unsigned long bus_cmd_force_en;
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 7ca41bf023c9..8df9abfa947b 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -45,6 +45,8 @@
#include <linux/uaccess.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include "acpica/accommon.h"
+#include "acpica/acnamesp.h"
#include "internal.h"
#define _COMPONENT ACPI_OS_SERVICES
@@ -1490,6 +1492,76 @@ int acpi_check_region(resource_size_t start, resource_size_t n,
}
EXPORT_SYMBOL(acpi_check_region);
+static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level,
+ void *_res, void **return_value)
+{
+ struct acpi_mem_space_context **mem_ctx;
+ union acpi_operand_object *handler_obj;
+ union acpi_operand_object *region_obj2;
+ union acpi_operand_object *region_obj;
+ struct resource *res = _res;
+ acpi_status status;
+
+ region_obj = acpi_ns_get_attached_object(handle);
+ if (!region_obj)
+ return AE_OK;
+
+ handler_obj = region_obj->region.handler;
+ if (!handler_obj)
+ return AE_OK;
+
+ if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
+ return AE_OK;
+
+ if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE))
+ return AE_OK;
+
+ region_obj2 = acpi_ns_get_secondary_object(region_obj);
+ if (!region_obj2)
+ return AE_OK;
+
+ mem_ctx = (void *)&region_obj2->extra.region_context;
+
+ if (!(mem_ctx[0]->address >= res->start &&
+ mem_ctx[0]->address < res->end))
+ return AE_OK;
+
+ status = handler_obj->address_space.setup(region_obj,
+ ACPI_REGION_DEACTIVATE,
+ NULL, (void **)mem_ctx);
+ if (ACPI_SUCCESS(status))
+ region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
+
+ return status;
+}
+
+/**
+ * acpi_release_memory - Release any mappings done to a memory region
+ * @handle: Handle to namespace node
+ * @res: Memory resource
+ * @level: A level that terminates the search
+ *
+ * Walks through @handle and unmaps all SystemMemory Operation Regions that
+ * overlap with @res and that have already been activated (mapped).
+ *
+ * This is a helper that allows drivers to place special requirements on memory
+ * region that may overlap with operation regions, primarily allowing them to
+ * safely map the region as non-cached memory.
+ *
+ * The unmapped Operation Regions will be automatically remapped next time they
+ * are called, so the drivers do not need to do anything else.
+ */
+acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
+ u32 level)
+{
+ if (!(res->flags & IORESOURCE_MEM))
+ return AE_TYPE;
+
+ return acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
+ acpi_deactivate_mem_region, NULL, res, NULL);
+}
+EXPORT_SYMBOL_GPL(acpi_release_memory);
+
/*
* Let drivers know whether the resource checks are effective
*/
diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c
index e5ea1974d1e3..d1e26cb599bf 100644
--- a/drivers/acpi/pptt.c
+++ b/drivers/acpi/pptt.c
@@ -481,8 +481,14 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table,
if (cpu_node) {
cpu_node = acpi_find_processor_package_id(table, cpu_node,
level, flag);
- /* Only the first level has a guaranteed id */
- if (level == 0)
+ /*
+ * As per specification if the processor structure represents
+ * an actual processor, then ACPI processor ID must be valid.
+ * For processor containers ACPI_PPTT_ACPI_PROCESSOR_ID_VALID
+ * should be set if the UID is valid
+ */
+ if (level == 0 ||
+ cpu_node->flags & ACPI_PPTT_ACPI_PROCESSOR_ID_VALID)
return cpu_node->acpi_processor_id;
return ACPI_PTR_DIFF(cpu_node, table);
}
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 2b16e7c8fff3..39b181d6bd0d 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -398,7 +398,6 @@ config SATA_DWC_VDEBUG
config SATA_HIGHBANK
tristate "Calxeda Highbank SATA support"
- depends on HAS_DMA
depends on ARCH_HIGHBANK || COMPILE_TEST
help
This option enables support for the Calxeda Highbank SoC's
@@ -408,7 +407,6 @@ config SATA_HIGHBANK
config SATA_MV
tristate "Marvell SATA support"
- depends on HAS_DMA
depends on PCI || ARCH_DOVE || ARCH_MV78XX0 || \
ARCH_MVEBU || ARCH_ORION5X || COMPILE_TEST
select GENERIC_PHY
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 738fb22978dd..b2b9eba1d214 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -400,6 +400,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x0f23), board_ahci_mobile }, /* Bay Trail AHCI */
{ PCI_VDEVICE(INTEL, 0x22a3), board_ahci_mobile }, /* Cherry Tr. AHCI */
{ PCI_VDEVICE(INTEL, 0x5ae3), board_ahci_mobile }, /* ApolloLake AHCI */
+ { PCI_VDEVICE(INTEL, 0x34d3), board_ahci_mobile }, /* Ice Lake LP AHCI */
/* JMicron 360/1/3/5/6, match class to avoid IDE function */
{ PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -1280,6 +1281,59 @@ static bool ahci_broken_suspend(struct pci_dev *pdev)
return strcmp(buf, dmi->driver_data) < 0;
}
+static bool ahci_broken_lpm(struct pci_dev *pdev)
+{
+ static const struct dmi_system_id sysids[] = {
+ /* Various Lenovo 50 series have LPM issues with older BIOSen */
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X250"),
+ },
+ .driver_data = "20180406", /* 1.31 */
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad L450"),
+ },
+ .driver_data = "20180420", /* 1.28 */
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T450s"),
+ },
+ .driver_data = "20180315", /* 1.33 */
+ },
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad W541"),
+ },
+ /*
+ * Note date based on release notes, 2.35 has been
+ * reported to be good, but I've been unable to get
+ * a hold of the reporter to get the DMI BIOS date.
+ * TODO: fix this.
+ */
+ .driver_data = "20180310", /* 2.35 */
+ },
+ { } /* terminate list */
+ };
+ const struct dmi_system_id *dmi = dmi_first_match(sysids);
+ int year, month, date;
+ char buf[9];
+
+ if (!dmi)
+ return false;
+
+ dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
+ snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
+
+ return strcmp(buf, dmi->driver_data) < 0;
+}
+
static bool ahci_broken_online(struct pci_dev *pdev)
{
#define ENCODE_BUSDEVFN(bus, slot, func) \
@@ -1694,6 +1748,12 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
"quirky BIOS, skipping spindown on poweroff\n");
}
+ if (ahci_broken_lpm(pdev)) {
+ pi.flags |= ATA_FLAG_NO_LPM;
+ dev_warn(&pdev->dev,
+ "BIOS update required for Link Power Management support\n");
+ }
+
if (ahci_broken_suspend(pdev)) {
hpriv->flags |= AHCI_HFLAG_NO_SUSPEND;
dev_warn(&pdev->dev,
diff --git a/drivers/ata/ahci_mvebu.c b/drivers/ata/ahci_mvebu.c
index 0045dacd814b..72d90b4c3aae 100644
--- a/drivers/ata/ahci_mvebu.c
+++ b/drivers/ata/ahci_mvebu.c
@@ -82,7 +82,7 @@ static void ahci_mvebu_regret_option(struct ahci_host_priv *hpriv)
*
* Return: 0 on success; Error code otherwise.
*/
-int ahci_mvebu_stop_engine(struct ata_port *ap)
+static int ahci_mvebu_stop_engine(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
u32 tmp, port_fbs;
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 965842a08743..09620c2ffa0f 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -35,6 +35,7 @@
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/module.h>
+#include <linux/nospec.h>
#include <linux/blkdev.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
@@ -1146,10 +1147,12 @@ static ssize_t ahci_led_store(struct ata_port *ap, const char *buf,
/* get the slot number from the message */
pmp = (state & EM_MSG_LED_PMP_SLOT) >> 8;
- if (pmp < EM_MAX_SLOTS)
+ if (pmp < EM_MAX_SLOTS) {
+ pmp = array_index_nospec(pmp, EM_MAX_SLOTS);
emp = &pp->em_priv[pmp];
- else
+ } else {
return -EINVAL;
+ }
/* mask off the activity bits if we are in sw_activity
* mode, user should turn off sw_activity before setting
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 27d15ed7fa3d..cc71c63df381 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2493,6 +2493,9 @@ int ata_dev_configure(struct ata_device *dev)
(id[ATA_ID_SATA_CAPABILITY] & 0xe) == 0x2)
dev->horkage |= ATA_HORKAGE_NOLPM;
+ if (ap->flags & ATA_FLAG_NO_LPM)
+ dev->horkage |= ATA_HORKAGE_NOLPM;
+
if (dev->horkage & ATA_HORKAGE_NOLPM) {
ata_dev_warn(dev, "LPM support broken, forcing max_power\n");
dev->link->ap->target_lpm_policy = ATA_LPM_MAX_POWER;
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index d5412145d76d..01306c018398 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -614,8 +614,7 @@ void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap,
list_for_each_entry_safe(scmd, tmp, eh_work_q, eh_entry) {
struct ata_queued_cmd *qc;
- for (i = 0; i < ATA_MAX_QUEUE; i++) {
- qc = __ata_qc_from_tag(ap, i);
+ ata_qc_for_each_raw(ap, qc, i) {
if (qc->flags & ATA_QCFLAG_ACTIVE &&
qc->scsicmd == scmd)
break;
@@ -818,14 +817,13 @@ EXPORT_SYMBOL_GPL(ata_port_wait_eh);
static int ata_eh_nr_in_flight(struct ata_port *ap)
{
+ struct ata_queued_cmd *qc;
unsigned int tag;
int nr = 0;
/* count only non-internal commands */
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- if (ata_tag_internal(tag))
- continue;
- if (ata_qc_from_tag(ap, tag))
+ ata_qc_for_each(ap, qc, tag) {
+ if (qc)
nr++;
}
@@ -847,13 +845,13 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t)
goto out_unlock;
if (cnt == ap->fastdrain_cnt) {
+ struct ata_queued_cmd *qc;
unsigned int tag;
/* No progress during the last interval, tag all
* in-flight qcs as timed out and freeze the port.
*/
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
+ ata_qc_for_each(ap, qc, tag) {
if (qc)
qc->err_mask |= AC_ERR_TIMEOUT;
}
@@ -999,6 +997,7 @@ void ata_port_schedule_eh(struct ata_port *ap)
static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
{
+ struct ata_queued_cmd *qc;
int tag, nr_aborted = 0;
WARN_ON(!ap->ops->error_handler);
@@ -1007,9 +1006,7 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
ata_eh_set_pending(ap, 0);
/* include internal tag in iteration */
- for (tag = 0; tag <= ATA_MAX_QUEUE; tag++) {
- struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
-
+ ata_qc_for_each_with_internal(ap, qc, tag) {
if (qc && (!link || qc->dev->link == link)) {
qc->flags |= ATA_QCFLAG_FAILED;
ata_qc_complete(qc);
@@ -1712,9 +1709,7 @@ void ata_eh_analyze_ncq_error(struct ata_link *link)
return;
/* has LLDD analyzed already? */
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- qc = __ata_qc_from_tag(ap, tag);
-
+ ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_FAILED))
continue;
@@ -2136,6 +2131,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_queued_cmd *qc;
struct ata_device *dev;
unsigned int all_err_mask = 0, eflags = 0;
int tag, nr_failed = 0, nr_quiet = 0;
@@ -2168,9 +2164,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
all_err_mask |= ehc->i.err_mask;
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
-
+ ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_FAILED) ||
ata_dev_phys_link(qc->dev) != link)
continue;
@@ -2436,6 +2430,7 @@ static void ata_eh_link_report(struct ata_link *link)
{
struct ata_port *ap = link->ap;
struct ata_eh_context *ehc = &link->eh_context;
+ struct ata_queued_cmd *qc;
const char *frozen, *desc;
char tries_buf[6] = "";
int tag, nr_failed = 0;
@@ -2447,9 +2442,7 @@ static void ata_eh_link_report(struct ata_link *link)
if (ehc->i.desc[0] != '\0')
desc = ehc->i.desc;
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
-
+ ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_FAILED) ||
ata_dev_phys_link(qc->dev) != link ||
((qc->flags & ATA_QCFLAG_QUIET) &&
@@ -2511,8 +2504,7 @@ static void ata_eh_link_report(struct ata_link *link)
ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
#endif
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
+ ata_qc_for_each_raw(ap, qc, tag) {
struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
char data_buf[20] = "";
char cdb_buf[70] = "";
@@ -3992,12 +3984,11 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
*/
void ata_eh_finish(struct ata_port *ap)
{
+ struct ata_queued_cmd *qc;
int tag;
/* retry or finish qcs */
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
-
+ ata_qc_for_each_raw(ap, qc, tag) {
if (!(qc->flags & ATA_QCFLAG_FAILED))
continue;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 6a91d04351d9..aad1b01447de 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3805,10 +3805,20 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
*/
goto invalid_param_len;
}
- if (block > dev->n_sectors)
- goto out_of_range;
all = cdb[14] & 0x1;
+ if (all) {
+ /*
+ * Ignore the block address (zone ID) as defined by ZBC.
+ */
+ block = 0;
+ } else if (block >= dev->n_sectors) {
+ /*
+ * Block must be a valid zone ID (a zone start LBA).
+ */
+ fp = 2;
+ goto invalid_fld;
+ }
if (ata_ncq_enabled(qc->dev) &&
ata_fpdma_zac_mgmt_out_supported(qc->dev)) {
@@ -3837,10 +3847,6 @@ static unsigned int ata_scsi_zbc_out_xlat(struct ata_queued_cmd *qc)
invalid_fld:
ata_scsi_set_invalid_field(qc->dev, scmd, fp, 0xff);
return 1;
- out_of_range:
- /* "Logical Block Address out of range" */
- ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x21, 0x00);
- return 1;
invalid_param_len:
/* "Parameter list length error" */
ata_scsi_set_sense(qc->dev, scmd, ILLEGAL_REQUEST, 0x1a, 0x0);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index b8d9cfc60374..4dc528bf8e85 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -395,12 +395,6 @@ static inline unsigned int sata_fsl_tag(unsigned int tag,
{
/* We let libATA core do actual (queue) tag allocation */
- /* all non NCQ/queued commands should have tag#0 */
- if (ata_tag_internal(tag)) {
- DPRINTK("mapping internal cmds to tag#0\n");
- return 0;
- }
-
if (unlikely(tag >= SATA_FSL_QUEUE_DEPTH)) {
DPRINTK("tag %d invalid : out of range\n", tag);
return 0;
@@ -1229,8 +1223,7 @@ static void sata_fsl_host_intr(struct ata_port *ap)
/* Workaround for data length mismatch errata */
if (unlikely(hstatus & INT_ON_DATA_LENGTH_MISMATCH)) {
- for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
- qc = ata_qc_from_tag(ap, tag);
+ ata_qc_for_each_with_internal(ap, qc, tag) {
if (qc && ata_is_atapi(qc->tf.protocol)) {
u32 hcontrol;
/* Set HControl[27] to clear error registers */
diff --git a/drivers/ata/sata_nv.c b/drivers/ata/sata_nv.c
index 10ae11aa1926..72c9b922a77b 100644
--- a/drivers/ata/sata_nv.c
+++ b/drivers/ata/sata_nv.c
@@ -675,7 +675,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
struct ata_port *ap = ata_shost_to_port(sdev->host);
struct nv_adma_port_priv *pp = ap->private_data;
struct nv_adma_port_priv *port0, *port1;
- struct scsi_device *sdev0, *sdev1;
struct pci_dev *pdev = to_pci_dev(ap->host->dev);
unsigned long segment_boundary, flags;
unsigned short sg_tablesize;
@@ -736,8 +735,6 @@ static int nv_adma_slave_config(struct scsi_device *sdev)
port0 = ap->host->ports[0]->private_data;
port1 = ap->host->ports[1]->private_data;
- sdev0 = ap->host->ports[0]->link.device[0].sdev;
- sdev1 = ap->host->ports[1]->link.device[0].sdev;
if ((port0->flags & NV_ADMA_ATAPI_SETUP_COMPLETE) ||
(port1->flags & NV_ADMA_ATAPI_SETUP_COMPLETE)) {
/*
diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
index ff81a576347e..82532c299bb5 100644
--- a/drivers/atm/iphase.c
+++ b/drivers/atm/iphase.c
@@ -1618,7 +1618,7 @@ static int rx_init(struct atm_dev *dev)
skb_queue_head_init(&iadev->rx_dma_q);
iadev->rx_free_desc_qhead = NULL;
- iadev->rx_open = kcalloc(4, iadev->num_vc, GFP_KERNEL);
+ iadev->rx_open = kcalloc(iadev->num_vc, sizeof(void *), GFP_KERNEL);
if (!iadev->rx_open) {
printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n",
dev->number);
diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
index a8d2eb0ceb8d..2c288d1f42bb 100644
--- a/drivers/atm/zatm.c
+++ b/drivers/atm/zatm.c
@@ -1483,6 +1483,8 @@ static int zatm_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
return -EFAULT;
if (pool < 0 || pool > ZATM_LAST_POOL)
return -EINVAL;
+ pool = array_index_nospec(pool,
+ ZATM_LAST_POOL + 1);
if (copy_from_user(&info,
&((struct zatm_pool_req __user *) arg)->info,
sizeof(info))) return -EFAULT;
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index b074f242a435..704f44295810 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -8,10 +8,7 @@ obj-y := component.o core.o bus.o dd.o syscore.o \
topology.o container.o property.o cacheinfo.o \
devcon.o
obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
-obj-$(CONFIG_DMA_CMA) += dma-contiguous.o
obj-y += power/
-obj-$(CONFIG_HAS_DMA) += dma-mapping.o
-obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
obj-$(CONFIG_ISA_BUS_API) += isa.o
obj-y += firmware_loader/
obj-$(CONFIG_NUMA) += node.o
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 36622b52e419..df3e1a44707a 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -236,6 +236,13 @@ struct device_link *device_link_add(struct device *consumer,
link->rpm_active = true;
}
pm_runtime_new_link(consumer);
+ /*
+ * If the link is being added by the consumer driver at probe
+ * time, balance the decrementation of the supplier's runtime PM
+ * usage counter after consumer probe in driver_probe_device().
+ */
+ if (consumer->links.status == DL_DEV_PROBING)
+ pm_runtime_get_noresume(supplier);
}
get_device(supplier);
link->supplier = supplier;
@@ -255,12 +262,12 @@ struct device_link *device_link_add(struct device *consumer,
switch (consumer->links.status) {
case DL_DEV_PROBING:
/*
- * Balance the decrementation of the supplier's
- * runtime PM usage counter after consumer probe
- * in driver_probe_device().
+ * Some callers expect the link creation during
+ * consumer driver probe to resume the supplier
+ * even without DL_FLAG_RPM_ACTIVE.
*/
if (flags & DL_FLAG_PM_RUNTIME)
- pm_runtime_get_sync(supplier);
+ pm_runtime_resume(supplier);
link->status = DL_STATE_CONSUMER_PROBE;
break;
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 1435d7281c66..6ebcd65d64b6 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -434,14 +434,6 @@ re_probe:
goto probe_failed;
}
- /*
- * Ensure devices are listed in devices_kset in correct order
- * It's important to move Dev to the end of devices_kset before
- * calling .probe, because it could be recursive and parent Dev
- * should always go first
- */
- devices_kset_move_last(dev);
-
if (dev->bus->probe) {
ret = dev->bus->probe(dev);
if (ret)
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 4925af5c4cf0..9e8484189034 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2235,7 +2235,7 @@ static void genpd_dev_pm_sync(struct device *dev)
}
static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
- unsigned int index)
+ unsigned int index, bool power_on)
{
struct of_phandle_args pd_args;
struct generic_pm_domain *pd;
@@ -2271,9 +2271,11 @@ static int __genpd_dev_pm_attach(struct device *dev, struct device_node *np,
dev->pm_domain->detach = genpd_dev_pm_detach;
dev->pm_domain->sync = genpd_dev_pm_sync;
- genpd_lock(pd);
- ret = genpd_power_on(pd, 0);
- genpd_unlock(pd);
+ if (power_on) {
+ genpd_lock(pd);
+ ret = genpd_power_on(pd, 0);
+ genpd_unlock(pd);
+ }
if (ret)
genpd_remove_device(pd, dev);
@@ -2307,7 +2309,7 @@ int genpd_dev_pm_attach(struct device *dev)
"#power-domain-cells") != 1)
return 0;
- return __genpd_dev_pm_attach(dev, dev->of_node, 0);
+ return __genpd_dev_pm_attach(dev, dev->of_node, 0, true);
}
EXPORT_SYMBOL_GPL(genpd_dev_pm_attach);
@@ -2359,14 +2361,14 @@ struct device *genpd_dev_pm_attach_by_id(struct device *dev,
}
/* Try to attach the device to the PM domain at the specified index. */
- ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index);
+ ret = __genpd_dev_pm_attach(genpd_dev, dev->of_node, index, false);
if (ret < 1) {
device_unregister(genpd_dev);
return ret ? ERR_PTR(ret) : NULL;
}
- pm_runtime_set_active(genpd_dev);
pm_runtime_enable(genpd_dev);
+ genpd_queue_power_off_work(dev_to_genpd(genpd_dev));
return genpd_dev;
}
@@ -2487,10 +2489,9 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
* power domain corresponding to a DT node's "required-opps" property.
*
* @dev: Device for which the performance-state needs to be found.
- * @opp_node: DT node where the "required-opps" property is present. This can be
+ * @np: DT node where the "required-opps" property is present. This can be
* the device node itself (if it doesn't have an OPP table) or a node
* within the OPP table of a device (if device has an OPP table).
- * @state: Pointer to return performance state.
*
* Returns performance state corresponding to the "required-opps" property of
* a DT node. This calls platform specific genpd->opp_to_performance_state()
@@ -2499,7 +2500,7 @@ EXPORT_SYMBOL_GPL(of_genpd_parse_idle_states);
* Returns performance state on success and 0 on failure.
*/
unsigned int of_genpd_opp_to_performance_state(struct device *dev,
- struct device_node *opp_node)
+ struct device_node *np)
{
struct generic_pm_domain *genpd;
struct dev_pm_opp *opp;
@@ -2514,7 +2515,7 @@ unsigned int of_genpd_opp_to_performance_state(struct device *dev,
genpd_lock(genpd);
- opp = of_dev_pm_opp_find_required_opp(&genpd->dev, opp_node);
+ opp = of_dev_pm_opp_find_required_opp(&genpd->dev, np);
if (IS_ERR(opp)) {
dev_err(dev, "Failed to find required OPP: %ld\n",
PTR_ERR(opp));
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index a47e4987ee46..d146fedc38bb 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -1244,8 +1244,8 @@ drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long
_drbd_start_io_acct(device, req);
/* process discards always from our submitter thread */
- if ((bio_op(bio) & REQ_OP_WRITE_ZEROES) ||
- (bio_op(bio) & REQ_OP_DISCARD))
+ if (bio_op(bio) == REQ_OP_WRITE_ZEROES ||
+ bio_op(bio) == REQ_OP_DISCARD)
goto queue_for_submitter_thread;
if (rw == WRITE && req->private_bio && req->i.size
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 1476cb3439f4..5e793dd7adfb 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -282,8 +282,8 @@ void drbd_request_endio(struct bio *bio)
what = COMPLETED_OK;
}
- bio_put(req->private_bio);
req->private_bio = ERR_PTR(blk_status_to_errno(bio->bi_status));
+ bio_put(bio);
/* not req_mod(), we need irqsave here! */
spin_lock_irqsave(&device->resource->req_lock, flags);
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index d6b6f434fd4b..4cb1d1be3cfb 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1613,6 +1613,7 @@ static int lo_compat_ioctl(struct block_device *bdev, fmode_t mode,
arg = (unsigned long) compat_ptr(arg);
case LOOP_SET_FD:
case LOOP_CHANGE_FD:
+ case LOOP_SET_BLOCK_SIZE:
err = lo_ioctl(bdev, mode, cmd, arg);
break;
default:
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 3b7083b8ecbb..3fb95c8d9fd8 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -76,6 +76,7 @@ struct link_dead_args {
#define NBD_HAS_CONFIG_REF 4
#define NBD_BOUND 5
#define NBD_DESTROY_ON_DISCONNECT 6
+#define NBD_DISCONNECT_ON_CLOSE 7
struct nbd_config {
u32 flags;
@@ -111,12 +112,16 @@ struct nbd_device {
struct task_struct *task_setup;
};
+#define NBD_CMD_REQUEUED 1
+
struct nbd_cmd {
struct nbd_device *nbd;
+ struct mutex lock;
int index;
int cookie;
- struct completion send_complete;
blk_status_t status;
+ unsigned long flags;
+ u32 cmd_cookie;
};
#if IS_ENABLED(CONFIG_DEBUG_FS)
@@ -138,12 +143,42 @@ static void nbd_config_put(struct nbd_device *nbd);
static void nbd_connect_reply(struct genl_info *info, int index);
static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
static void nbd_dead_link_work(struct work_struct *work);
+static void nbd_disconnect_and_put(struct nbd_device *nbd);
static inline struct device *nbd_to_dev(struct nbd_device *nbd)
{
return disk_to_dev(nbd->disk);
}
+static void nbd_requeue_cmd(struct nbd_cmd *cmd)
+{
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+
+ if (!test_and_set_bit(NBD_CMD_REQUEUED, &cmd->flags))
+ blk_mq_requeue_request(req, true);
+}
+
+#define NBD_COOKIE_BITS 32
+
+static u64 nbd_cmd_handle(struct nbd_cmd *cmd)
+{
+ struct request *req = blk_mq_rq_from_pdu(cmd);
+ u32 tag = blk_mq_unique_tag(req);
+ u64 cookie = cmd->cmd_cookie;
+
+ return (cookie << NBD_COOKIE_BITS) | tag;
+}
+
+static u32 nbd_handle_to_tag(u64 handle)
+{
+ return (u32)handle;
+}
+
+static u32 nbd_handle_to_cookie(u64 handle)
+{
+ return (u32)(handle >> NBD_COOKIE_BITS);
+}
+
static const char *nbdcmd_to_ascii(int cmd)
{
switch (cmd) {
@@ -317,6 +352,9 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
}
config = nbd->config;
+ if (!mutex_trylock(&cmd->lock))
+ return BLK_EH_RESET_TIMER;
+
if (config->num_connections > 1) {
dev_err_ratelimited(nbd_to_dev(nbd),
"Connection timed out, retrying (%d/%d alive)\n",
@@ -341,7 +379,8 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
nbd_mark_nsock_dead(nbd, nsock, 1);
mutex_unlock(&nsock->tx_lock);
}
- blk_mq_requeue_request(req, true);
+ mutex_unlock(&cmd->lock);
+ nbd_requeue_cmd(cmd);
nbd_config_put(nbd);
return BLK_EH_DONE;
}
@@ -351,6 +390,7 @@ static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req,
}
set_bit(NBD_TIMEDOUT, &config->runtime_flags);
cmd->status = BLK_STS_IOERR;
+ mutex_unlock(&cmd->lock);
sock_shutdown(nbd);
nbd_config_put(nbd);
done:
@@ -428,9 +468,9 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
struct iov_iter from;
unsigned long size = blk_rq_bytes(req);
struct bio *bio;
+ u64 handle;
u32 type;
u32 nbd_cmd_flags = 0;
- u32 tag = blk_mq_unique_tag(req);
int sent = nsock->sent, skip = 0;
iov_iter_kvec(&from, WRITE | ITER_KVEC, &iov, 1, sizeof(request));
@@ -472,6 +512,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
goto send_pages;
}
iov_iter_advance(&from, sent);
+ } else {
+ cmd->cmd_cookie++;
}
cmd->index = index;
cmd->cookie = nsock->cookie;
@@ -480,7 +522,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9);
request.len = htonl(size);
}
- memcpy(request.handle, &tag, sizeof(tag));
+ handle = nbd_cmd_handle(cmd);
+ memcpy(request.handle, &handle, sizeof(handle));
dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n",
req, nbdcmd_to_ascii(type),
@@ -498,6 +541,7 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
nsock->pending = req;
nsock->sent = sent;
}
+ set_bit(NBD_CMD_REQUEUED, &cmd->flags);
return BLK_STS_RESOURCE;
}
dev_err_ratelimited(disk_to_dev(nbd->disk),
@@ -539,6 +583,7 @@ send_pages:
*/
nsock->pending = req;
nsock->sent = sent;
+ set_bit(NBD_CMD_REQUEUED, &cmd->flags);
return BLK_STS_RESOURCE;
}
dev_err(disk_to_dev(nbd->disk),
@@ -571,10 +616,12 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
struct nbd_reply reply;
struct nbd_cmd *cmd;
struct request *req = NULL;
+ u64 handle;
u16 hwq;
u32 tag;
struct kvec iov = {.iov_base = &reply, .iov_len = sizeof(reply)};
struct iov_iter to;
+ int ret = 0;
reply.magic = 0;
iov_iter_kvec(&to, READ | ITER_KVEC, &iov, 1, sizeof(reply));
@@ -592,8 +639,8 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
return ERR_PTR(-EPROTO);
}
- memcpy(&tag, reply.handle, sizeof(u32));
-
+ memcpy(&handle, reply.handle, sizeof(handle));
+ tag = nbd_handle_to_tag(handle);
hwq = blk_mq_unique_tag_to_hwq(tag);
if (hwq < nbd->tag_set.nr_hw_queues)
req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq],
@@ -604,11 +651,25 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
return ERR_PTR(-ENOENT);
}
cmd = blk_mq_rq_to_pdu(req);
+
+ mutex_lock(&cmd->lock);
+ if (cmd->cmd_cookie != nbd_handle_to_cookie(handle)) {
+ dev_err(disk_to_dev(nbd->disk), "Double reply on req %p, cmd_cookie %u, handle cookie %u\n",
+ req, cmd->cmd_cookie, nbd_handle_to_cookie(handle));
+ ret = -ENOENT;
+ goto out;
+ }
+ if (test_bit(NBD_CMD_REQUEUED, &cmd->flags)) {
+ dev_err(disk_to_dev(nbd->disk), "Raced with timeout on req %p\n",
+ req);
+ ret = -ENOENT;
+ goto out;
+ }
if (ntohl(reply.error)) {
dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n",
ntohl(reply.error));
cmd->status = BLK_STS_IOERR;
- return cmd;
+ goto out;
}
dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req);
@@ -633,18 +694,18 @@ static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd, int index)
if (nbd_disconnected(config) ||
config->num_connections <= 1) {
cmd->status = BLK_STS_IOERR;
- return cmd;
+ goto out;
}
- return ERR_PTR(-EIO);
+ ret = -EIO;
+ goto out;
}
dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n",
req, bvec.bv_len);
}
- } else {
- /* See the comment in nbd_queue_rq. */
- wait_for_completion(&cmd->send_complete);
}
- return cmd;
+out:
+ mutex_unlock(&cmd->lock);
+ return ret ? ERR_PTR(ret) : cmd;
}
static void recv_work(struct work_struct *work)
@@ -803,7 +864,7 @@ again:
*/
blk_mq_start_request(req);
if (unlikely(nsock->pending && nsock->pending != req)) {
- blk_mq_requeue_request(req, true);
+ nbd_requeue_cmd(cmd);
ret = 0;
goto out;
}
@@ -816,7 +877,7 @@ again:
dev_err_ratelimited(disk_to_dev(nbd->disk),
"Request send failed, requeueing\n");
nbd_mark_nsock_dead(nbd, nsock, 1);
- blk_mq_requeue_request(req, true);
+ nbd_requeue_cmd(cmd);
ret = 0;
}
out:
@@ -840,7 +901,8 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
* that the server is misbehaving (or there was an error) before we're
* done sending everything over the wire.
*/
- init_completion(&cmd->send_complete);
+ mutex_lock(&cmd->lock);
+ clear_bit(NBD_CMD_REQUEUED, &cmd->flags);
/* We can be called directly from the user space process, which means we
* could possibly have signals pending so our sendmsg will fail. In
@@ -852,7 +914,7 @@ static blk_status_t nbd_queue_rq(struct blk_mq_hw_ctx *hctx,
ret = BLK_STS_IOERR;
else if (!ret)
ret = BLK_STS_OK;
- complete(&cmd->send_complete);
+ mutex_unlock(&cmd->lock);
return ret;
}
@@ -1305,6 +1367,12 @@ out:
static void nbd_release(struct gendisk *disk, fmode_t mode)
{
struct nbd_device *nbd = disk->private_data;
+ struct block_device *bdev = bdget_disk(disk, 0);
+
+ if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
+ bdev->bd_openers == 0)
+ nbd_disconnect_and_put(nbd);
+
nbd_config_put(nbd);
nbd_put(nbd);
}
@@ -1452,6 +1520,8 @@ static int nbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
{
struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq);
cmd->nbd = set->driver_data;
+ cmd->flags = 0;
+ mutex_init(&cmd->lock);
return 0;
}
@@ -1705,6 +1775,10 @@ again:
&config->runtime_flags);
put_dev = true;
}
+ if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
+ set_bit(NBD_DISCONNECT_ON_CLOSE,
+ &config->runtime_flags);
+ }
}
if (info->attrs[NBD_ATTR_SOCKETS]) {
@@ -1749,6 +1823,17 @@ out:
return ret;
}
+static void nbd_disconnect_and_put(struct nbd_device *nbd)
+{
+ mutex_lock(&nbd->config_lock);
+ nbd_disconnect(nbd);
+ nbd_clear_sock(nbd);
+ mutex_unlock(&nbd->config_lock);
+ if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
+ &nbd->config->runtime_flags))
+ nbd_config_put(nbd);
+}
+
static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
{
struct nbd_device *nbd;
@@ -1781,13 +1866,7 @@ static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
nbd_put(nbd);
return 0;
}
- mutex_lock(&nbd->config_lock);
- nbd_disconnect(nbd);
- nbd_clear_sock(nbd);
- mutex_unlock(&nbd->config_lock);
- if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
- &nbd->config->runtime_flags))
- nbd_config_put(nbd);
+ nbd_disconnect_and_put(nbd);
nbd_config_put(nbd);
nbd_put(nbd);
return 0;
@@ -1798,7 +1877,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
struct nbd_device *nbd = NULL;
struct nbd_config *config;
int index;
- int ret = -EINVAL;
+ int ret = 0;
bool put_dev = false;
if (!netlink_capable(skb, CAP_SYS_ADMIN))
@@ -1838,6 +1917,7 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
!nbd->task_recv) {
dev_err(nbd_to_dev(nbd),
"not configured, cannot reconfigure\n");
+ ret = -EINVAL;
goto out;
}
@@ -1862,6 +1942,14 @@ static int nbd_genl_reconfigure(struct sk_buff *skb, struct genl_info *info)
&config->runtime_flags))
refcount_inc(&nbd->refs);
}
+
+ if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
+ set_bit(NBD_DISCONNECT_ON_CLOSE,
+ &config->runtime_flags);
+ } else {
+ clear_bit(NBD_DISCONNECT_ON_CLOSE,
+ &config->runtime_flags);
+ }
}
if (info->attrs[NBD_ATTR_SOCKETS]) {
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 7948049f6c43..042c778e5a4e 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -1365,7 +1365,7 @@ static blk_qc_t null_queue_bio(struct request_queue *q, struct bio *bio)
static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
{
pr_info("null: rq %p timed out\n", rq);
- blk_mq_complete_request(rq);
+ __blk_complete_request(rq);
return BLK_EH_DONE;
}
diff --git a/drivers/bluetooth/hci_nokia.c b/drivers/bluetooth/hci_nokia.c
index 14d159e2042d..2dc33e65d2d0 100644
--- a/drivers/bluetooth/hci_nokia.c
+++ b/drivers/bluetooth/hci_nokia.c
@@ -29,7 +29,7 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
-#include <linux/unaligned/le_struct.h>
+#include <asm/unaligned.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/hci_core.h>
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index 1cc29629d238..80d60f43db56 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -169,9 +169,9 @@ static int sysc_get_clocks(struct sysc *ddata)
const char *name;
int nr_fck = 0, nr_ick = 0, i, error = 0;
- ddata->clock_roles = devm_kzalloc(ddata->dev,
- sizeof(*ddata->clock_roles) *
+ ddata->clock_roles = devm_kcalloc(ddata->dev,
SYSC_MAX_CLOCKS,
+ sizeof(*ddata->clock_roles),
GFP_KERNEL);
if (!ddata->clock_roles)
return -ENOMEM;
@@ -200,8 +200,8 @@ static int sysc_get_clocks(struct sysc *ddata)
return -EINVAL;
}
- ddata->clocks = devm_kzalloc(ddata->dev,
- sizeof(*ddata->clocks) * ddata->nr_clocks,
+ ddata->clocks = devm_kcalloc(ddata->dev,
+ ddata->nr_clocks, sizeof(*ddata->clocks),
GFP_KERNEL);
if (!ddata->clocks)
return -ENOMEM;
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
index 53fe633df1e8..c9bf2c219841 100644
--- a/drivers/char/agp/alpha-agp.c
+++ b/drivers/char/agp/alpha-agp.c
@@ -11,7 +11,7 @@
#include "agp.h"
-static int alpha_core_agp_vm_fault(struct vm_fault *vmf)
+static vm_fault_t alpha_core_agp_vm_fault(struct vm_fault *vmf)
{
alpha_agp_info *agp = agp_bridge->dev_private_data;
dma_addr_t dma_addr;
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index e50c29c97ca7..c69e39fdd02b 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -156,7 +156,7 @@ static u64 amd64_configure(struct pci_dev *hammer, u64 gatt_table)
/* Address to map to */
pci_read_config_dword(hammer, AMD64_GARTAPERTUREBASE, &tmp);
- aperturebase = tmp << 25;
+ aperturebase = (u64)tmp << 25;
aper_base = (aperturebase & PCI_BASE_ADDRESS_MEM_MASK);
enable_gart_translation(hammer, gatt_table);
@@ -277,7 +277,7 @@ static int fix_northbridge(struct pci_dev *nb, struct pci_dev *agp, u16 cap)
pci_read_config_dword(nb, AMD64_GARTAPERTURECTL, &nb_order);
nb_order = (nb_order >> 1) & 7;
pci_read_config_dword(nb, AMD64_GARTAPERTUREBASE, &nb_base);
- nb_aper = nb_base << 25;
+ nb_aper = (u64)nb_base << 25;
/* Northbridge seems to contain crap. Try the AGP bridge. */
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 91bb98c42a1c..aaf9e5afaad4 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -516,11 +516,18 @@ EXPORT_SYMBOL_GPL(hwrng_register);
void hwrng_unregister(struct hwrng *rng)
{
+ int err;
+
mutex_lock(&rng_mutex);
list_del(&rng->list);
- if (current_rng == rng)
- enable_best_rng();
+ if (current_rng == rng) {
+ err = enable_best_rng();
+ if (err) {
+ drop_current_rng();
+ cur_rng_set_by_user = 0;
+ }
+ }
if (list_empty(&rng_list)) {
mutex_unlock(&rng_mutex);
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index ad353be871bf..90ec010bffbd 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -2088,8 +2088,10 @@ static int try_smi_init(struct smi_info *new_smi)
return 0;
out_err:
- ipmi_unregister_smi(new_smi->intf);
- new_smi->intf = NULL;
+ if (new_smi->intf) {
+ ipmi_unregister_smi(new_smi->intf);
+ new_smi->intf = NULL;
+ }
kfree(init_name);
diff --git a/drivers/char/ipmi/kcs_bmc.c b/drivers/char/ipmi/kcs_bmc.c
index fbfc05e3f3d1..bb882ab161fe 100644
--- a/drivers/char/ipmi/kcs_bmc.c
+++ b/drivers/char/ipmi/kcs_bmc.c
@@ -210,34 +210,23 @@ static void kcs_bmc_handle_cmd(struct kcs_bmc *kcs_bmc)
int kcs_bmc_handle_event(struct kcs_bmc *kcs_bmc)
{
unsigned long flags;
- int ret = 0;
+ int ret = -ENODATA;
u8 status;
spin_lock_irqsave(&kcs_bmc->lock, flags);
- if (!kcs_bmc->running) {
- kcs_force_abort(kcs_bmc);
- ret = -ENODEV;
- goto out_unlock;
- }
-
- status = read_status(kcs_bmc) & (KCS_STATUS_IBF | KCS_STATUS_CMD_DAT);
-
- switch (status) {
- case KCS_STATUS_IBF | KCS_STATUS_CMD_DAT:
- kcs_bmc_handle_cmd(kcs_bmc);
- break;
-
- case KCS_STATUS_IBF:
- kcs_bmc_handle_data(kcs_bmc);
- break;
+ status = read_status(kcs_bmc);
+ if (status & KCS_STATUS_IBF) {
+ if (!kcs_bmc->running)
+ kcs_force_abort(kcs_bmc);
+ else if (status & KCS_STATUS_CMD_DAT)
+ kcs_bmc_handle_cmd(kcs_bmc);
+ else
+ kcs_bmc_handle_data(kcs_bmc);
- default:
- ret = -ENODATA;
- break;
+ ret = 0;
}
-out_unlock:
spin_unlock_irqrestore(&kcs_bmc->lock, flags);
return ret;
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index ffeb60d3434c..df66a9dd0aae 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -708,6 +708,7 @@ static int mmap_zero(struct file *file, struct vm_area_struct *vma)
#endif
if (vma->vm_flags & VM_SHARED)
return shmem_zero_setup(vma);
+ vma_set_anonymous(vma);
return 0;
}
diff --git a/drivers/char/random.c b/drivers/char/random.c
index a8fb0020ba5c..bd449ad52442 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -402,7 +402,8 @@ static struct poolinfo {
/*
* Static global variables
*/
-static DECLARE_WAIT_QUEUE_HEAD(random_wait);
+static DECLARE_WAIT_QUEUE_HEAD(random_read_wait);
+static DECLARE_WAIT_QUEUE_HEAD(random_write_wait);
static struct fasync_struct *fasync;
static DEFINE_SPINLOCK(random_ready_list_lock);
@@ -721,8 +722,8 @@ retry:
/* should we wake readers? */
if (entropy_bits >= random_read_wakeup_bits &&
- wq_has_sleeper(&random_wait)) {
- wake_up_interruptible_poll(&random_wait, POLLIN);
+ wq_has_sleeper(&random_read_wait)) {
+ wake_up_interruptible(&random_read_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
}
/* If the input pool is getting full, send some
@@ -1396,7 +1397,7 @@ retry:
trace_debit_entropy(r->name, 8 * ibytes);
if (ibytes &&
(r->entropy_count >> ENTROPY_SHIFT) < random_write_wakeup_bits) {
- wake_up_interruptible_poll(&random_wait, POLLOUT);
+ wake_up_interruptible(&random_write_wait);
kill_fasync(&fasync, SIGIO, POLL_OUT);
}
@@ -1838,7 +1839,7 @@ _random_read(int nonblock, char __user *buf, size_t nbytes)
if (nonblock)
return -EAGAIN;
- wait_event_interruptible(random_wait,
+ wait_event_interruptible(random_read_wait,
ENTROPY_BITS(&input_pool) >=
random_read_wakeup_bits);
if (signal_pending(current))
@@ -1875,17 +1876,14 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
return ret;
}
-static struct wait_queue_head *
-random_get_poll_head(struct file *file, __poll_t events)
-{
- return &random_wait;
-}
-
static __poll_t
-random_poll_mask(struct file *file, __poll_t events)
+random_poll(struct file *file, poll_table * wait)
{
- __poll_t mask = 0;
+ __poll_t mask;
+ poll_wait(file, &random_read_wait, wait);
+ poll_wait(file, &random_write_wait, wait);
+ mask = 0;
if (ENTROPY_BITS(&input_pool) >= random_read_wakeup_bits)
mask |= EPOLLIN | EPOLLRDNORM;
if (ENTROPY_BITS(&input_pool) < random_write_wakeup_bits)
@@ -1897,14 +1895,22 @@ static int
write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
{
size_t bytes;
- __u32 buf[16];
+ __u32 t, buf[16];
const char __user *p = buffer;
while (count > 0) {
+ int b, i = 0;
+
bytes = min(count, sizeof(buf));
if (copy_from_user(&buf, p, bytes))
return -EFAULT;
+ for (b = bytes ; b > 0 ; b -= sizeof(__u32), i++) {
+ if (!arch_get_random_int(&t))
+ break;
+ buf[i] ^= t;
+ }
+
count -= bytes;
p += bytes;
@@ -1992,8 +1998,7 @@ static int random_fasync(int fd, struct file *filp, int on)
const struct file_operations random_fops = {
.read = random_read,
.write = random_write,
- .get_poll_head = random_get_poll_head,
- .poll_mask = random_poll_mask,
+ .poll = random_poll,
.unlocked_ioctl = random_ioctl,
.fasync = random_fasync,
.llseek = noop_llseek,
@@ -2326,7 +2331,7 @@ void add_hwgenerator_randomness(const char *buffer, size_t count,
* We'll be woken up again once below random_write_wakeup_thresh,
* or when the calling thread is about to terminate.
*/
- wait_event_interruptible(random_wait, kthread_should_stop() ||
+ wait_event_interruptible(random_write_wait, kthread_should_stop() ||
ENTROPY_BITS(&input_pool) <= random_write_wakeup_bits);
mix_pool_bytes(poolp, buffer, count);
credit_entropy_bits(poolp, entropy);
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index ae40cbe770f0..0bb25dd009d1 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -96,7 +96,7 @@ obj-$(CONFIG_ARCH_SPRD) += sprd/
obj-$(CONFIG_ARCH_STI) += st/
obj-$(CONFIG_ARCH_STRATIX10) += socfpga/
obj-$(CONFIG_ARCH_SUNXI) += sunxi/
-obj-$(CONFIG_ARCH_SUNXI) += sunxi-ng/
+obj-$(CONFIG_SUNXI_CCU) += sunxi-ng/
obj-$(CONFIG_ARCH_TEGRA) += tegra/
obj-y += ti/
obj-$(CONFIG_CLK_UNIPHIER) += uniphier/
diff --git a/drivers/clk/clk-aspeed.c b/drivers/clk/clk-aspeed.c
index 38b366b00c57..7b70a074095d 100644
--- a/drivers/clk/clk-aspeed.c
+++ b/drivers/clk/clk-aspeed.c
@@ -24,7 +24,7 @@
#define ASPEED_MPLL_PARAM 0x20
#define ASPEED_HPLL_PARAM 0x24
#define AST2500_HPLL_BYPASS_EN BIT(20)
-#define AST2400_HPLL_STRAPPED BIT(18)
+#define AST2400_HPLL_PROGRAMMED BIT(18)
#define AST2400_HPLL_BYPASS_EN BIT(17)
#define ASPEED_MISC_CTRL 0x2c
#define UART_DIV13_EN BIT(12)
@@ -91,8 +91,8 @@ static const struct aspeed_gate_data aspeed_gates[] = {
[ASPEED_CLK_GATE_GCLK] = { 1, 7, "gclk-gate", NULL, 0 }, /* 2D engine */
[ASPEED_CLK_GATE_MCLK] = { 2, -1, "mclk-gate", "mpll", CLK_IS_CRITICAL }, /* SDRAM */
[ASPEED_CLK_GATE_VCLK] = { 3, 6, "vclk-gate", NULL, 0 }, /* Video Capture */
- [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", 0 }, /* PCIe/PCI */
- [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, 0 }, /* DAC */
+ [ASPEED_CLK_GATE_BCLK] = { 4, 8, "bclk-gate", "bclk", CLK_IS_CRITICAL }, /* PCIe/PCI */
+ [ASPEED_CLK_GATE_DCLK] = { 5, -1, "dclk-gate", NULL, CLK_IS_CRITICAL }, /* DAC */
[ASPEED_CLK_GATE_REFCLK] = { 6, -1, "refclk-gate", "clkin", CLK_IS_CRITICAL },
[ASPEED_CLK_GATE_USBPORT2CLK] = { 7, 3, "usb-port2-gate", NULL, 0 }, /* USB2.0 Host port 2 */
[ASPEED_CLK_GATE_LCLK] = { 8, 5, "lclk-gate", NULL, 0 }, /* LPC */
@@ -212,9 +212,22 @@ static int aspeed_clk_is_enabled(struct clk_hw *hw)
{
struct aspeed_clk_gate *gate = to_aspeed_clk_gate(hw);
u32 clk = BIT(gate->clock_idx);
+ u32 rst = BIT(gate->reset_idx);
u32 enval = (gate->flags & CLK_GATE_SET_TO_DISABLE) ? 0 : clk;
u32 reg;
+ /*
+ * If the IP is in reset, treat the clock as not enabled,
+ * this happens with some clocks such as the USB one when
+ * coming from cold reset. Without this, aspeed_clk_enable()
+ * will fail to lift the reset.
+ */
+ if (gate->reset_idx >= 0) {
+ regmap_read(gate->map, ASPEED_RESET_CTRL, &reg);
+ if (reg & rst)
+ return 0;
+ }
+
regmap_read(gate->map, ASPEED_CLK_STOP_CTRL, &reg);
return ((reg & clk) == enval) ? 1 : 0;
@@ -565,29 +578,45 @@ builtin_platform_driver(aspeed_clk_driver);
static void __init aspeed_ast2400_cc(struct regmap *map)
{
struct clk_hw *hw;
- u32 val, freq, div;
+ u32 val, div, clkin, hpll;
+ const u16 hpll_rates[][4] = {
+ {384, 360, 336, 408},
+ {400, 375, 350, 425},
+ };
+ int rate;
/*
* CLKIN is the crystal oscillator, 24, 48 or 25MHz selected by
* strapping
*/
regmap_read(map, ASPEED_STRAP, &val);
- if (val & CLKIN_25MHZ_EN)
- freq = 25000000;
- else if (val & AST2400_CLK_SOURCE_SEL)
- freq = 48000000;
- else
- freq = 24000000;
- hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, freq);
- pr_debug("clkin @%u MHz\n", freq / 1000000);
+ rate = (val >> 8) & 3;
+ if (val & CLKIN_25MHZ_EN) {
+ clkin = 25000000;
+ hpll = hpll_rates[1][rate];
+ } else if (val & AST2400_CLK_SOURCE_SEL) {
+ clkin = 48000000;
+ hpll = hpll_rates[0][rate];
+ } else {
+ clkin = 24000000;
+ hpll = hpll_rates[0][rate];
+ }
+ hw = clk_hw_register_fixed_rate(NULL, "clkin", NULL, 0, clkin);
+ pr_debug("clkin @%u MHz\n", clkin / 1000000);
/*
* High-speed PLL clock derived from the crystal. This the CPU clock,
- * and we assume that it is enabled
+ * and we assume that it is enabled. It can be configured through the
+ * HPLL_PARAM register, or set to a specified frequency by strapping.
*/
regmap_read(map, ASPEED_HPLL_PARAM, &val);
- WARN(val & AST2400_HPLL_STRAPPED, "hpll is strapped not configured");
- aspeed_clk_data->hws[ASPEED_CLK_HPLL] = aspeed_ast2400_calc_pll("hpll", val);
+ if (val & AST2400_HPLL_PROGRAMMED)
+ hw = aspeed_ast2400_calc_pll("hpll", val);
+ else
+ hw = clk_hw_register_fixed_rate(NULL, "hpll", "clkin", 0,
+ hpll * 1000000);
+
+ aspeed_clk_data->hws[ASPEED_CLK_HPLL] = hw;
/*
* Strap bits 11:10 define the CPU/AHB clock frequency ratio (aka HCLK)
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 9760b526ca31..e2ed078abd90 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -24,7 +24,6 @@
#include <linux/pm_runtime.h>
#include <linux/sched.h>
#include <linux/clkdev.h>
-#include <linux/stringify.h>
#include "clk.h"
@@ -2559,7 +2558,7 @@ static const struct {
unsigned long flag;
const char *name;
} clk_flags[] = {
-#define ENTRY(f) { f, __stringify(f) }
+#define ENTRY(f) { f, #f }
ENTRY(CLK_SET_RATE_GATE),
ENTRY(CLK_SET_PARENT_GATE),
ENTRY(CLK_SET_RATE_PARENT),
diff --git a/drivers/clk/davinci/da8xx-cfgchip.c b/drivers/clk/davinci/da8xx-cfgchip.c
index aae62a5b8734..d1bbee19ed0f 100644
--- a/drivers/clk/davinci/da8xx-cfgchip.c
+++ b/drivers/clk/davinci/da8xx-cfgchip.c
@@ -672,7 +672,7 @@ static int of_da8xx_usb_phy_clk_init(struct device *dev, struct regmap *regmap)
usb1 = da8xx_cfgchip_register_usb1_clk48(dev, regmap);
if (IS_ERR(usb1)) {
- if (PTR_ERR(usb0) == -EPROBE_DEFER)
+ if (PTR_ERR(usb1) == -EPROBE_DEFER)
return -EPROBE_DEFER;
dev_warn(dev, "Failed to register usb1_clk48 (%ld)\n",
diff --git a/drivers/clk/davinci/psc.h b/drivers/clk/davinci/psc.h
index 6a42529d31a9..cc5614567a70 100644
--- a/drivers/clk/davinci/psc.h
+++ b/drivers/clk/davinci/psc.h
@@ -107,7 +107,7 @@ extern const struct davinci_psc_init_data of_da850_psc1_init_data;
#ifdef CONFIG_ARCH_DAVINCI_DM355
extern const struct davinci_psc_init_data dm355_psc_init_data;
#endif
-#ifdef CONFIG_ARCH_DAVINCI_DM356
+#ifdef CONFIG_ARCH_DAVINCI_DM365
extern const struct davinci_psc_init_data dm365_psc_init_data;
#endif
#ifdef CONFIG_ARCH_DAVINCI_DM644x
diff --git a/drivers/clk/meson/clk-audio-divider.c b/drivers/clk/meson/clk-audio-divider.c
index 58f546e04807..e4cf96ba704e 100644
--- a/drivers/clk/meson/clk-audio-divider.c
+++ b/drivers/clk/meson/clk-audio-divider.c
@@ -51,7 +51,7 @@ static unsigned long audio_divider_recalc_rate(struct clk_hw *hw,
struct meson_clk_audio_div_data *adiv = meson_clk_audio_div_data(clk);
unsigned long divider;
- divider = meson_parm_read(clk->map, &adiv->div);
+ divider = meson_parm_read(clk->map, &adiv->div) + 1;
return DIV_ROUND_UP_ULL((u64)parent_rate, divider);
}
diff --git a/drivers/clk/meson/gxbb.c b/drivers/clk/meson/gxbb.c
index 240658404367..177fffb9ebef 100644
--- a/drivers/clk/meson/gxbb.c
+++ b/drivers/clk/meson/gxbb.c
@@ -498,6 +498,7 @@ static struct clk_regmap gxbb_fclk_div2 = {
.ops = &clk_regmap_gate_ops,
.parent_names = (const char *[]){ "fclk_div2_div" },
.num_parents = 1,
+ .flags = CLK_IS_CRITICAL,
},
};
diff --git a/drivers/clk/mvebu/armada-37xx-periph.c b/drivers/clk/mvebu/armada-37xx-periph.c
index 6860bd5a37c5..44e4e27eddad 100644
--- a/drivers/clk/mvebu/armada-37xx-periph.c
+++ b/drivers/clk/mvebu/armada-37xx-periph.c
@@ -35,6 +35,7 @@
#define CLK_SEL 0x10
#define CLK_DIS 0x14
+#define ARMADA_37XX_DVFS_LOAD_1 1
#define LOAD_LEVEL_NR 4
#define ARMADA_37XX_NB_L0L1 0x18
@@ -507,6 +508,40 @@ static long clk_pm_cpu_round_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL;
}
+/*
+ * Switching the CPU from the L2 or L3 frequencies (300 and 200 Mhz
+ * respectively) to L0 frequency (1.2 Ghz) requires a significant
+ * amount of time to let VDD stabilize to the appropriate
+ * voltage. This amount of time is large enough that it cannot be
+ * covered by the hardware countdown register. Due to this, the CPU
+ * might start operating at L0 before the voltage is stabilized,
+ * leading to CPU stalls.
+ *
+ * To work around this problem, we prevent switching directly from the
+ * L2/L3 frequencies to the L0 frequency, and instead switch to the L1
+ * frequency in-between. The sequence therefore becomes:
+ * 1. First switch from L2/L3(200/300MHz) to L1(600MHZ)
+ * 2. Sleep 20ms for stabling VDD voltage
+ * 3. Then switch from L1(600MHZ) to L0(1200Mhz).
+ */
+static void clk_pm_cpu_set_rate_wa(unsigned long rate, struct regmap *base)
+{
+ unsigned int cur_level;
+
+ if (rate != 1200 * 1000 * 1000)
+ return;
+
+ regmap_read(base, ARMADA_37XX_NB_CPU_LOAD, &cur_level);
+ cur_level &= ARMADA_37XX_NB_CPU_LOAD_MASK;
+ if (cur_level <= ARMADA_37XX_DVFS_LOAD_1)
+ return;
+
+ regmap_update_bits(base, ARMADA_37XX_NB_CPU_LOAD,
+ ARMADA_37XX_NB_CPU_LOAD_MASK,
+ ARMADA_37XX_DVFS_LOAD_1);
+ msleep(20);
+}
+
static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
@@ -537,6 +572,9 @@ static int clk_pm_cpu_set_rate(struct clk_hw *hw, unsigned long rate,
*/
reg = ARMADA_37XX_NB_CPU_LOAD;
mask = ARMADA_37XX_NB_CPU_LOAD_MASK;
+
+ clk_pm_cpu_set_rate_wa(rate, base);
+
regmap_update_bits(base, reg, mask, load_level);
return rate;
diff --git a/drivers/clk/qcom/gcc-msm8996.c b/drivers/clk/qcom/gcc-msm8996.c
index 9f35b3fe1d97..ff8d66fd94e6 100644
--- a/drivers/clk/qcom/gcc-msm8996.c
+++ b/drivers/clk/qcom/gcc-msm8996.c
@@ -2781,6 +2781,7 @@ static struct clk_branch gcc_ufs_rx_cfg_clk = {
static struct clk_branch gcc_ufs_tx_symbol_0_clk = {
.halt_reg = 0x75018,
+ .halt_check = BRANCH_HALT_SKIP,
.clkr = {
.enable_reg = 0x75018,
.enable_mask = BIT(0),
diff --git a/drivers/clk/qcom/mmcc-msm8996.c b/drivers/clk/qcom/mmcc-msm8996.c
index 1a25ee4f3658..4b20d1b67a1b 100644
--- a/drivers/clk/qcom/mmcc-msm8996.c
+++ b/drivers/clk/qcom/mmcc-msm8996.c
@@ -2910,6 +2910,7 @@ static struct gdsc mmagic_bimc_gdsc = {
.name = "mmagic_bimc",
},
.pwrsts = PWRSTS_OFF_ON,
+ .flags = ALWAYS_ON,
};
static struct gdsc mmagic_video_gdsc = {
diff --git a/drivers/clk/sunxi-ng/Makefile b/drivers/clk/sunxi-ng/Makefile
index acaa14cfa25c..49454700f2e5 100644
--- a/drivers/clk/sunxi-ng/Makefile
+++ b/drivers/clk/sunxi-ng/Makefile
@@ -1,24 +1,24 @@
# SPDX-License-Identifier: GPL-2.0
# Common objects
-lib-$(CONFIG_SUNXI_CCU) += ccu_common.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_mmc_timing.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_reset.o
+obj-y += ccu_common.o
+obj-y += ccu_mmc_timing.o
+obj-y += ccu_reset.o
# Base clock types
-lib-$(CONFIG_SUNXI_CCU) += ccu_div.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_frac.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_gate.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_mux.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_mult.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_phase.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_sdm.o
+obj-y += ccu_div.o
+obj-y += ccu_frac.o
+obj-y += ccu_gate.o
+obj-y += ccu_mux.o
+obj-y += ccu_mult.o
+obj-y += ccu_phase.o
+obj-y += ccu_sdm.o
# Multi-factor clocks
-lib-$(CONFIG_SUNXI_CCU) += ccu_nk.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_nkm.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_nkmp.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_nm.o
-lib-$(CONFIG_SUNXI_CCU) += ccu_mp.o
+obj-y += ccu_nk.o
+obj-y += ccu_nkm.o
+obj-y += ccu_nkmp.o
+obj-y += ccu_nm.o
+obj-y += ccu_mp.o
# SoC support
obj-$(CONFIG_SUN50I_A64_CCU) += ccu-sun50i-a64.o
@@ -38,12 +38,3 @@ obj-$(CONFIG_SUN8I_R40_CCU) += ccu-sun8i-r40.o
obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80.o
obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-de.o
obj-$(CONFIG_SUN9I_A80_CCU) += ccu-sun9i-a80-usb.o
-
-# The lib-y file goals is supposed to work only in arch/*/lib or lib/. In our
-# case, we want to use that goal, but even though lib.a will be properly
-# generated, it will not be linked in, eventually resulting in a linker error
-# for missing symbols.
-#
-# We can work around that by explicitly adding lib.a to the obj-y goal. This is
-# an undocumented behaviour, but works well for now.
-obj-$(CONFIG_SUNXI_CCU) += lib.a
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 57cb2f00fc07..d8c7f5750cdb 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -735,7 +735,7 @@ static void __arch_timer_setup(unsigned type,
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
clk->name = "arch_mem_timer";
clk->rating = 400;
- clk->cpumask = cpu_all_mask;
+ clk->cpumask = cpu_possible_mask;
if (arch_timer_mem_use_virtual) {
clk->set_state_shutdown = arch_timer_shutdown_virt_mem;
clk->set_state_oneshot_stopped = arch_timer_shutdown_virt_mem;
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index e5cdc3af684c..2717f88c7904 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -304,8 +304,10 @@ static int __init stm32_timer_init(struct device_node *node)
to->private_data = kzalloc(sizeof(struct stm32_timer_private),
GFP_KERNEL);
- if (!to->private_data)
+ if (!to->private_data) {
+ ret = -ENOMEM;
goto deinit;
+ }
rstc = of_reset_control_get(node, NULL);
if (!IS_ERR(rstc)) {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 1de5ec8d5ea3..3c3971256130 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -294,6 +294,7 @@ struct pstate_funcs {
static struct pstate_funcs pstate_funcs __read_mostly;
static int hwp_active __read_mostly;
+static int hwp_mode_bdw __read_mostly;
static bool per_cpu_limits __read_mostly;
static bool hwp_boost __read_mostly;
@@ -1413,7 +1414,15 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
cpu->pstate.scaling = pstate_funcs.get_scaling();
cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
- cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+
+ if (hwp_active && !hwp_mode_bdw) {
+ unsigned int phy_max, current_max;
+
+ intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
+ cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
+ } else {
+ cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ }
if (pstate_funcs.get_aperf_mperf_shift)
cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
@@ -2385,6 +2394,18 @@ static bool __init intel_pstate_no_acpi_pss(void)
return true;
}
+static bool __init intel_pstate_no_acpi_pcch(void)
+{
+ acpi_status status;
+ acpi_handle handle;
+
+ status = acpi_get_handle(NULL, "\\_SB", &handle);
+ if (ACPI_FAILURE(status))
+ return true;
+
+ return !acpi_has_method(handle, "PCCH");
+}
+
static bool __init intel_pstate_has_acpi_ppc(void)
{
int i;
@@ -2444,7 +2465,10 @@ static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
switch (plat_info[idx].data) {
case PSS:
- return intel_pstate_no_acpi_pss();
+ if (!intel_pstate_no_acpi_pss())
+ return false;
+
+ return intel_pstate_no_acpi_pcch();
case PPC:
return intel_pstate_has_acpi_ppc() && !force_load;
}
@@ -2467,28 +2491,36 @@ static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
static inline void intel_pstate_request_control_from_smm(void) {}
#endif /* CONFIG_ACPI */
+#define INTEL_PSTATE_HWP_BROADWELL 0x01
+
+#define ICPU_HWP(model, hwp_mode) \
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode }
+
static const struct x86_cpu_id hwp_support_ids[] __initconst = {
- { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
+ ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
+ ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL),
+ ICPU_HWP(X86_MODEL_ANY, 0),
{}
};
static int __init intel_pstate_init(void)
{
+ const struct x86_cpu_id *id;
int rc;
if (no_load)
return -ENODEV;
- if (x86_match_cpu(hwp_support_ids)) {
+ id = x86_match_cpu(hwp_support_ids);
+ if (id) {
copy_cpu_funcs(&core_funcs);
if (!no_hwp) {
hwp_active++;
+ hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
goto hwp_cpu_matched;
}
} else {
- const struct x86_cpu_id *id;
-
id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id)
return -ENODEV;
diff --git a/drivers/cpufreq/pcc-cpufreq.c b/drivers/cpufreq/pcc-cpufreq.c
index 3f0ce2ae35ee..0c56c9759672 100644
--- a/drivers/cpufreq/pcc-cpufreq.c
+++ b/drivers/cpufreq/pcc-cpufreq.c
@@ -580,6 +580,10 @@ static int __init pcc_cpufreq_init(void)
{
int ret;
+ /* Skip initialization if another cpufreq driver is there. */
+ if (cpufreq_get_current_driver())
+ return 0;
+
if (acpi_disabled)
return 0;
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
index d049fe4b80c4..efc9a7ae4857 100644
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
@@ -42,6 +42,8 @@ enum _msm8996_version {
NUM_OF_MSM8996_VERSIONS,
};
+struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
+
static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
{
size_t len;
@@ -74,7 +76,6 @@ static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
{
struct opp_table *opp_tables[NR_CPUS] = {0};
- struct platform_device *cpufreq_dt_pdev;
enum _msm8996_version msm8996_version;
struct nvmem_cell *speedbin_nvmem;
struct device_node *np;
@@ -86,8 +87,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
int ret;
cpu_dev = get_cpu_device(0);
- if (NULL == cpu_dev)
- ret = -ENODEV;
+ if (!cpu_dev)
+ return -ENODEV;
msm8996_version = qcom_cpufreq_kryo_get_msm_id();
if (NUM_OF_MSM8996_VERSIONS == msm8996_version) {
@@ -96,8 +97,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
}
np = dev_pm_opp_of_get_opp_desc_node(cpu_dev);
- if (IS_ERR(np))
- return PTR_ERR(np);
+ if (!np)
+ return -ENOENT;
ret = of_device_is_compatible(np, "operating-points-v2-kryo-cpu");
if (!ret) {
@@ -115,6 +116,8 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
speedbin = nvmem_cell_read(speedbin_nvmem, &len);
nvmem_cell_put(speedbin_nvmem);
+ if (IS_ERR(speedbin))
+ return PTR_ERR(speedbin);
switch (msm8996_version) {
case MSM8996_V3:
@@ -127,6 +130,7 @@ static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
BUG();
break;
}
+ kfree(speedbin);
for_each_possible_cpu(cpu) {
cpu_dev = get_cpu_device(cpu);
@@ -162,8 +166,15 @@ free_opp:
return ret;
}
+static int qcom_cpufreq_kryo_remove(struct platform_device *pdev)
+{
+ platform_device_unregister(cpufreq_dt_pdev);
+ return 0;
+}
+
static struct platform_driver qcom_cpufreq_kryo_driver = {
.probe = qcom_cpufreq_kryo_probe,
+ .remove = qcom_cpufreq_kryo_remove,
.driver = {
.name = "qcom-cpufreq-kryo",
},
@@ -172,6 +183,7 @@ static struct platform_driver qcom_cpufreq_kryo_driver = {
static const struct of_device_id qcom_cpufreq_kryo_match_list[] __initconst = {
{ .compatible = "qcom,apq8096", },
{ .compatible = "qcom,msm8996", },
+ {}
};
/*
@@ -198,8 +210,9 @@ static int __init qcom_cpufreq_kryo_init(void)
if (unlikely(ret < 0))
return ret;
- ret = PTR_ERR_OR_ZERO(platform_device_register_simple(
- "qcom-cpufreq-kryo", -1, NULL, 0));
+ kryo_cpufreq_pdev = platform_device_register_simple(
+ "qcom-cpufreq-kryo", -1, NULL, 0);
+ ret = PTR_ERR_OR_ZERO(kryo_cpufreq_pdev);
if (0 == ret)
return 0;
@@ -208,5 +221,12 @@ static int __init qcom_cpufreq_kryo_init(void)
}
module_init(qcom_cpufreq_kryo_init);
+static void __init qcom_cpufreq_kryo_exit(void)
+{
+ platform_device_unregister(kryo_cpufreq_pdev);
+ platform_driver_unregister(&qcom_cpufreq_kryo_driver);
+}
+module_exit(qcom_cpufreq_kryo_exit);
+
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Kryo CPUfreq driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 00c7aab8e7d0..afebbd87c4aa 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -1548,15 +1548,14 @@ skip_copy:
tp->urg_data = 0;
if ((avail + offset) >= skb->len) {
- if (likely(skb))
- chtls_free_skb(sk, skb);
- buffers_freed++;
if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
tp->copied_seq += skb->len;
hws->rcvpld = skb->hdr_len;
} else {
tp->copied_seq += hws->rcvpld;
}
+ chtls_free_skb(sk, skb);
+ buffers_freed++;
hws->copied_seq = 0;
if (copied >= target &&
!skb_peek(&sk->sk_receive_queue))
diff --git a/drivers/dax/device.c b/drivers/dax/device.c
index de2f8297a210..108c37fca782 100644
--- a/drivers/dax/device.c
+++ b/drivers/dax/device.c
@@ -189,14 +189,16 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
/* prevent private mappings from being established */
if ((vma->vm_flags & VM_MAYSHARE) != VM_MAYSHARE) {
- dev_info(dev, "%s: %s: fail, attempted private mapping\n",
+ dev_info_ratelimited(dev,
+ "%s: %s: fail, attempted private mapping\n",
current->comm, func);
return -EINVAL;
}
mask = dax_region->align - 1;
if (vma->vm_start & mask || vma->vm_end & mask) {
- dev_info(dev, "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
+ dev_info_ratelimited(dev,
+ "%s: %s: fail, unaligned vma (%#lx - %#lx, %#lx)\n",
current->comm, func, vma->vm_start, vma->vm_end,
mask);
return -EINVAL;
@@ -204,13 +206,15 @@ static int check_vma(struct dev_dax *dev_dax, struct vm_area_struct *vma,
if ((dax_region->pfn_flags & (PFN_DEV|PFN_MAP)) == PFN_DEV
&& (vma->vm_flags & VM_DONTCOPY) == 0) {
- dev_info(dev, "%s: %s: fail, dax range requires MADV_DONTFORK\n",
+ dev_info_ratelimited(dev,
+ "%s: %s: fail, dax range requires MADV_DONTFORK\n",
current->comm, func);
return -EINVAL;
}
if (!vma_is_dax(vma)) {
- dev_info(dev, "%s: %s: fail, vma is not DAX capable\n",
+ dev_info_ratelimited(dev,
+ "%s: %s: fail, vma is not DAX capable\n",
current->comm, func);
return -EINVAL;
}
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 903d9c473749..45276abf03aa 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -86,6 +86,7 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
{
struct dax_device *dax_dev;
bool dax_enabled = false;
+ struct request_queue *q;
pgoff_t pgoff;
int err, id;
void *kaddr;
@@ -99,6 +100,13 @@ bool __bdev_dax_supported(struct block_device *bdev, int blocksize)
return false;
}
+ q = bdev_get_queue(bdev);
+ if (!q || !blk_queue_dax(q)) {
+ pr_debug("%s: error: request queue doesn't support dax\n",
+ bdevname(bdev, buf));
+ return false;
+ }
+
err = bdev_dax_pgoff(bdev, 0, PAGE_SIZE, &pgoff);
if (err) {
pr_debug("%s: error: unaligned partition for dax\n",
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index fa31cccbe04f..6bfa217ed6d0 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -794,7 +794,7 @@ static struct dma_chan *k3_of_dma_simple_xlate(struct of_phandle_args *dma_spec,
struct k3_dma_dev *d = ofdma->of_dma_data;
unsigned int request = dma_spec->args[0];
- if (request > d->dma_requests)
+ if (request >= d->dma_requests)
return NULL;
return dma_get_slave_channel(&(d->chans[request].vc.chan));
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index defcdde4d358..de0957fe9668 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -3033,7 +3033,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
pd->src_addr_widths = PL330_DMA_BUSWIDTHS;
pd->dst_addr_widths = PL330_DMA_BUSWIDTHS;
pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
- pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
+ pd->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
pd->max_burst = ((pl330->quirks & PL330_QUIRK_BROKEN_NO_FLUSHP) ?
1 : PL330_MAX_BURST);
diff --git a/drivers/dma/ti/omap-dma.c b/drivers/dma/ti/omap-dma.c
index 9b5ca8691f27..a4a931ddf6f6 100644
--- a/drivers/dma/ti/omap-dma.c
+++ b/drivers/dma/ti/omap-dma.c
@@ -1485,7 +1485,11 @@ static int omap_dma_probe(struct platform_device *pdev)
od->ddev.src_addr_widths = OMAP_DMA_BUSWIDTHS;
od->ddev.dst_addr_widths = OMAP_DMA_BUSWIDTHS;
od->ddev.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
- od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
+ if (__dma_omap15xx(od->plat->dma_attr))
+ od->ddev.residue_granularity =
+ DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
+ else
+ od->ddev.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
od->ddev.max_burst = SZ_16M - 1; /* CCEN: 24bit unsigned */
od->ddev.dev = &pdev->dev;
INIT_LIST_HEAD(&od->ddev.channels);
diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
index 951b6c79f166..624a11cb07e2 100644
--- a/drivers/firmware/dmi-id.c
+++ b/drivers/firmware/dmi-id.c
@@ -47,6 +47,7 @@ DEFINE_DMI_ATTR_WITH_SHOW(product_name, 0444, DMI_PRODUCT_NAME);
DEFINE_DMI_ATTR_WITH_SHOW(product_version, 0444, DMI_PRODUCT_VERSION);
DEFINE_DMI_ATTR_WITH_SHOW(product_serial, 0400, DMI_PRODUCT_SERIAL);
DEFINE_DMI_ATTR_WITH_SHOW(product_uuid, 0400, DMI_PRODUCT_UUID);
+DEFINE_DMI_ATTR_WITH_SHOW(product_sku, 0444, DMI_PRODUCT_SKU);
DEFINE_DMI_ATTR_WITH_SHOW(product_family, 0444, DMI_PRODUCT_FAMILY);
DEFINE_DMI_ATTR_WITH_SHOW(board_vendor, 0444, DMI_BOARD_VENDOR);
DEFINE_DMI_ATTR_WITH_SHOW(board_name, 0444, DMI_BOARD_NAME);
@@ -193,6 +194,7 @@ static void __init dmi_id_init_attr_table(void)
ADD_DMI_ATTR(product_serial, DMI_PRODUCT_SERIAL);
ADD_DMI_ATTR(product_uuid, DMI_PRODUCT_UUID);
ADD_DMI_ATTR(product_family, DMI_PRODUCT_FAMILY);
+ ADD_DMI_ATTR(product_sku, DMI_PRODUCT_SKU);
ADD_DMI_ATTR(board_vendor, DMI_BOARD_VENDOR);
ADD_DMI_ATTR(board_name, DMI_BOARD_NAME);
ADD_DMI_ATTR(board_version, DMI_BOARD_VERSION);
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index 54e66adef252..f2483548cde9 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -447,6 +447,7 @@ static void __init dmi_decode(const struct dmi_header *dm, void *dummy)
dmi_save_ident(dm, DMI_PRODUCT_VERSION, 6);
dmi_save_ident(dm, DMI_PRODUCT_SERIAL, 7);
dmi_save_uuid(dm, DMI_PRODUCT_UUID, 8);
+ dmi_save_ident(dm, DMI_PRODUCT_SKU, 25);
dmi_save_ident(dm, DMI_PRODUCT_FAMILY, 26);
break;
case 2: /* Base Board Information */
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index caa37a6dd9d4..a90b0b8fc69a 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -64,7 +64,7 @@ static void efi_retrieve_tpm2_eventlog_1_2(efi_system_table_t *sys_table_arg)
efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
efi_status_t status;
- efi_physical_addr_t log_location, log_last_entry;
+ efi_physical_addr_t log_location = 0, log_last_entry = 0;
struct linux_efi_tpm_eventlog *log_tbl = NULL;
unsigned long first_entry_addr, last_entry_addr;
size_t log_size, last_entry_size;
diff --git a/drivers/fpga/altera-cvp.c b/drivers/fpga/altera-cvp.c
index dd4edd8f22ce..7fa793672a7a 100644
--- a/drivers/fpga/altera-cvp.c
+++ b/drivers/fpga/altera-cvp.c
@@ -455,8 +455,10 @@ static int altera_cvp_probe(struct pci_dev *pdev,
mgr = fpga_mgr_create(&pdev->dev, conf->mgr_name,
&altera_cvp_ops, conf);
- if (!mgr)
- return -ENOMEM;
+ if (!mgr) {
+ ret = -ENOMEM;
+ goto err_unmap;
+ }
pci_set_drvdata(pdev, mgr);
diff --git a/drivers/gpio/gpio-uniphier.c b/drivers/gpio/gpio-uniphier.c
index d3cf9502e7e7..58faeb1cef63 100644
--- a/drivers/gpio/gpio-uniphier.c
+++ b/drivers/gpio/gpio-uniphier.c
@@ -181,7 +181,11 @@ static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node);
fwspec.param_count = 2;
fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET;
- fwspec.param[1] = IRQ_TYPE_NONE;
+ /*
+ * IRQ_TYPE_NONE is rejected by the parent irq domain. Set LEVEL_HIGH
+ * temporarily. Anyway, ->irq_set_type() will override it later.
+ */
+ fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
return irq_create_fwspec_mapping(&fwspec);
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 28d968088131..53a14ee8ad6d 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -64,7 +64,8 @@ static void of_gpio_flags_quirks(struct device_node *np,
* Note that active low is the default.
*/
if (IS_ENABLED(CONFIG_REGULATOR) &&
- (of_device_is_compatible(np, "reg-fixed-voltage") ||
+ (of_device_is_compatible(np, "regulator-fixed") ||
+ of_device_is_compatible(np, "reg-fixed-voltage") ||
of_device_is_compatible(np, "regulator-gpio"))) {
/*
* The regulator GPIO handles are specified such that the
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index a59c07590cee..7dcbac8af9a7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -190,6 +190,7 @@ struct amdgpu_job;
struct amdgpu_irq_src;
struct amdgpu_fpriv;
struct amdgpu_bo_va_mapping;
+struct amdgpu_atif;
enum amdgpu_cp_irq {
AMDGPU_CP_IRQ_GFX_EOP = 0,
@@ -1269,43 +1270,6 @@ struct amdgpu_vram_scratch {
/*
* ACPI
*/
-struct amdgpu_atif_notification_cfg {
- bool enabled;
- int command_code;
-};
-
-struct amdgpu_atif_notifications {
- bool display_switch;
- bool expansion_mode_change;
- bool thermal_state;
- bool forced_power_state;
- bool system_power_state;
- bool display_conf_change;
- bool px_gfx_switch;
- bool brightness_change;
- bool dgpu_display_event;
-};
-
-struct amdgpu_atif_functions {
- bool system_params;
- bool sbios_requests;
- bool select_active_disp;
- bool lid_state;
- bool get_tv_standard;
- bool set_tv_standard;
- bool get_panel_expansion_mode;
- bool set_panel_expansion_mode;
- bool temperature_change;
- bool graphics_device_types;
-};
-
-struct amdgpu_atif {
- struct amdgpu_atif_notifications notifications;
- struct amdgpu_atif_functions functions;
- struct amdgpu_atif_notification_cfg notification_cfg;
- struct amdgpu_encoder *encoder_for_bl;
-};
-
struct amdgpu_atcs_functions {
bool get_ext_state;
bool pcie_perf_req;
@@ -1466,7 +1430,7 @@ struct amdgpu_device {
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_regs[AMDGPU_DEBUGFS_MAX_COMPONENTS];
#endif
- struct amdgpu_atif atif;
+ struct amdgpu_atif *atif;
struct amdgpu_atcs atcs;
struct mutex srbm_mutex;
/* GRBM index mutex. Protects concurrent access to GRBM index */
@@ -1894,6 +1858,12 @@ static inline bool amdgpu_atpx_dgpu_req_power_for_displays(void) { return false;
static inline bool amdgpu_has_atpx(void) { return false; }
#endif
+#if defined(CONFIG_VGA_SWITCHEROO) && defined(CONFIG_ACPI)
+void *amdgpu_atpx_get_dhandle(void);
+#else
+static inline void *amdgpu_atpx_get_dhandle(void) { return NULL; }
+#endif
+
/*
* KMS
*/
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index f4c474a95875..71efcf38f11b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -57,6 +57,10 @@
#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
+#define ACP_BT_PLAY_REGS_START 0x14970
+#define ACP_BT_PLAY_REGS_END 0x14a24
+#define ACP_BT_COMP1_REG_OFFSET 0xac
+#define ACP_BT_COMP2_REG_OFFSET 0xa8
#define mmACP_PGFSM_RETAIN_REG 0x51c9
#define mmACP_PGFSM_CONFIG_REG 0x51ca
@@ -77,7 +81,7 @@
#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
#define ACP_TIMEOUT_LOOP 0x000000FF
-#define ACP_DEVS 3
+#define ACP_DEVS 4
#define ACP_SRC_ID 162
enum {
@@ -316,14 +320,13 @@ static int acp_hw_init(void *handle)
if (adev->acp.acp_cell == NULL)
return -ENOMEM;
- adev->acp.acp_res = kcalloc(4, sizeof(struct resource), GFP_KERNEL);
-
+ adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
if (adev->acp.acp_res == NULL) {
kfree(adev->acp.acp_cell);
return -ENOMEM;
}
- i2s_pdata = kcalloc(2, sizeof(struct i2s_platform_data), GFP_KERNEL);
+ i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
if (i2s_pdata == NULL) {
kfree(adev->acp.acp_res);
kfree(adev->acp.acp_cell);
@@ -358,6 +361,20 @@ static int acp_hw_init(void *handle)
i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
+ i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
+ switch (adev->asic_type) {
+ case CHIP_STONEY:
+ i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
+ break;
+ default:
+ break;
+ }
+
+ i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
+ i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
+ i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
+ i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
+
adev->acp.acp_res[0].name = "acp2x_dma";
adev->acp.acp_res[0].flags = IORESOURCE_MEM;
adev->acp.acp_res[0].start = acp_base;
@@ -373,13 +390,18 @@ static int acp_hw_init(void *handle)
adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
- adev->acp.acp_res[3].name = "acp2x_dma_irq";
- adev->acp.acp_res[3].flags = IORESOURCE_IRQ;
- adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162);
- adev->acp.acp_res[3].end = adev->acp.acp_res[3].start;
+ adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
+ adev->acp.acp_res[3].flags = IORESOURCE_MEM;
+ adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
+ adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
+
+ adev->acp.acp_res[4].name = "acp2x_dma_irq";
+ adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
+ adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
+ adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
adev->acp.acp_cell[0].name = "acp_audio_dma";
- adev->acp.acp_cell[0].num_resources = 4;
+ adev->acp.acp_cell[0].num_resources = 5;
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
adev->acp.acp_cell[0].platform_data = &adev->asic_type;
adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
@@ -396,6 +418,12 @@ static int acp_hw_init(void *handle)
adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
+ adev->acp.acp_cell[3].name = "designware-i2s";
+ adev->acp.acp_cell[3].num_resources = 1;
+ adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
+ adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
+ adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
+
r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
ACP_DEVS);
if (r)
@@ -451,7 +479,6 @@ static int acp_hw_init(void *handle)
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 8fa850a070e0..0d8c3fc6eace 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -34,6 +34,45 @@
#include "amd_acpi.h"
#include "atom.h"
+struct amdgpu_atif_notification_cfg {
+ bool enabled;
+ int command_code;
+};
+
+struct amdgpu_atif_notifications {
+ bool display_switch;
+ bool expansion_mode_change;
+ bool thermal_state;
+ bool forced_power_state;
+ bool system_power_state;
+ bool display_conf_change;
+ bool px_gfx_switch;
+ bool brightness_change;
+ bool dgpu_display_event;
+};
+
+struct amdgpu_atif_functions {
+ bool system_params;
+ bool sbios_requests;
+ bool select_active_disp;
+ bool lid_state;
+ bool get_tv_standard;
+ bool set_tv_standard;
+ bool get_panel_expansion_mode;
+ bool set_panel_expansion_mode;
+ bool temperature_change;
+ bool graphics_device_types;
+};
+
+struct amdgpu_atif {
+ acpi_handle handle;
+
+ struct amdgpu_atif_notifications notifications;
+ struct amdgpu_atif_functions functions;
+ struct amdgpu_atif_notification_cfg notification_cfg;
+ struct amdgpu_encoder *encoder_for_bl;
+};
+
/* Call the ATIF method
*/
/**
@@ -46,8 +85,9 @@
* Executes the requested ATIF function (all asics).
* Returns a pointer to the acpi output buffer.
*/
-static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
- struct acpi_buffer *params)
+static union acpi_object *amdgpu_atif_call(struct amdgpu_atif *atif,
+ int function,
+ struct acpi_buffer *params)
{
acpi_status status;
union acpi_object atif_arg_elements[2];
@@ -70,7 +110,8 @@ static union acpi_object *amdgpu_atif_call(acpi_handle handle, int function,
atif_arg_elements[1].integer.value = 0;
}
- status = acpi_evaluate_object(handle, "ATIF", &atif_arg, &buffer);
+ status = acpi_evaluate_object(atif->handle, NULL, &atif_arg,
+ &buffer);
/* Fail only if calling the method fails and ATIF is supported */
if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
@@ -141,15 +182,14 @@ static void amdgpu_atif_parse_functions(struct amdgpu_atif_functions *f, u32 mas
* (all asics).
* returns 0 on success, error on failure.
*/
-static int amdgpu_atif_verify_interface(acpi_handle handle,
- struct amdgpu_atif *atif)
+static int amdgpu_atif_verify_interface(struct amdgpu_atif *atif)
{
union acpi_object *info;
struct atif_verify_interface output;
size_t size;
int err = 0;
- info = amdgpu_atif_call(handle, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
+ info = amdgpu_atif_call(atif, ATIF_FUNCTION_VERIFY_INTERFACE, NULL);
if (!info)
return -EIO;
@@ -176,6 +216,35 @@ out:
return err;
}
+static acpi_handle amdgpu_atif_probe_handle(acpi_handle dhandle)
+{
+ acpi_handle handle = NULL;
+ char acpi_method_name[255] = { 0 };
+ struct acpi_buffer buffer = { sizeof(acpi_method_name), acpi_method_name };
+ acpi_status status;
+
+ /* For PX/HG systems, ATIF and ATPX are in the iGPU's namespace, on dGPU only
+ * systems, ATIF is in the dGPU's namespace.
+ */
+ status = acpi_get_handle(dhandle, "ATIF", &handle);
+ if (ACPI_SUCCESS(status))
+ goto out;
+
+ if (amdgpu_has_atpx()) {
+ status = acpi_get_handle(amdgpu_atpx_get_dhandle(), "ATIF",
+ &handle);
+ if (ACPI_SUCCESS(status))
+ goto out;
+ }
+
+ DRM_DEBUG_DRIVER("No ATIF handle found\n");
+ return NULL;
+out:
+ acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
+ DRM_DEBUG_DRIVER("Found ATIF handle %s\n", acpi_method_name);
+ return handle;
+}
+
/**
* amdgpu_atif_get_notification_params - determine notify configuration
*
@@ -188,15 +257,16 @@ out:
* where n is specified in the result if a notifier is used.
* Returns 0 on success, error on failure.
*/
-static int amdgpu_atif_get_notification_params(acpi_handle handle,
- struct amdgpu_atif_notification_cfg *n)
+static int amdgpu_atif_get_notification_params(struct amdgpu_atif *atif)
{
union acpi_object *info;
+ struct amdgpu_atif_notification_cfg *n = &atif->notification_cfg;
struct atif_system_params params;
size_t size;
int err = 0;
- info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS, NULL);
+ info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_PARAMETERS,
+ NULL);
if (!info) {
err = -EIO;
goto out;
@@ -250,14 +320,15 @@ out:
* (all asics).
* Returns 0 on success, error on failure.
*/
-static int amdgpu_atif_get_sbios_requests(acpi_handle handle,
- struct atif_sbios_requests *req)
+static int amdgpu_atif_get_sbios_requests(struct amdgpu_atif *atif,
+ struct atif_sbios_requests *req)
{
union acpi_object *info;
size_t size;
int count = 0;
- info = amdgpu_atif_call(handle, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS, NULL);
+ info = amdgpu_atif_call(atif, ATIF_FUNCTION_GET_SYSTEM_BIOS_REQUESTS,
+ NULL);
if (!info)
return -EIO;
@@ -290,11 +361,10 @@ out:
* Returns NOTIFY code
*/
static int amdgpu_atif_handler(struct amdgpu_device *adev,
- struct acpi_bus_event *event)
+ struct acpi_bus_event *event)
{
- struct amdgpu_atif *atif = &adev->atif;
+ struct amdgpu_atif *atif = adev->atif;
struct atif_sbios_requests req;
- acpi_handle handle;
int count;
DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
@@ -303,14 +373,14 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0)
return NOTIFY_DONE;
- if (!atif->notification_cfg.enabled ||
+ if (!atif ||
+ !atif->notification_cfg.enabled ||
event->type != atif->notification_cfg.command_code)
/* Not our event */
return NOTIFY_DONE;
/* Check pending SBIOS requests */
- handle = ACPI_HANDLE(&adev->pdev->dev);
- count = amdgpu_atif_get_sbios_requests(handle, &req);
+ count = amdgpu_atif_get_sbios_requests(atif, &req);
if (count <= 0)
return NOTIFY_DONE;
@@ -641,8 +711,8 @@ static int amdgpu_acpi_event(struct notifier_block *nb,
*/
int amdgpu_acpi_init(struct amdgpu_device *adev)
{
- acpi_handle handle;
- struct amdgpu_atif *atif = &adev->atif;
+ acpi_handle handle, atif_handle;
+ struct amdgpu_atif *atif;
struct amdgpu_atcs *atcs = &adev->atcs;
int ret;
@@ -658,12 +728,26 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
DRM_DEBUG_DRIVER("Call to ATCS verify_interface failed: %d\n", ret);
}
+ /* Probe for ATIF, and initialize it if found */
+ atif_handle = amdgpu_atif_probe_handle(handle);
+ if (!atif_handle)
+ goto out;
+
+ atif = kzalloc(sizeof(*atif), GFP_KERNEL);
+ if (!atif) {
+ DRM_WARN("Not enough memory to initialize ATIF\n");
+ goto out;
+ }
+ atif->handle = atif_handle;
+
/* Call the ATIF method */
- ret = amdgpu_atif_verify_interface(handle, atif);
+ ret = amdgpu_atif_verify_interface(atif);
if (ret) {
DRM_DEBUG_DRIVER("Call to ATIF verify_interface failed: %d\n", ret);
+ kfree(atif);
goto out;
}
+ adev->atif = atif;
if (atif->notifications.brightness_change) {
struct drm_encoder *tmp;
@@ -693,8 +777,7 @@ int amdgpu_acpi_init(struct amdgpu_device *adev)
}
if (atif->functions.system_params) {
- ret = amdgpu_atif_get_notification_params(handle,
- &atif->notification_cfg);
+ ret = amdgpu_atif_get_notification_params(atif);
if (ret) {
DRM_DEBUG_DRIVER("Call to GET_SYSTEM_PARAMS failed: %d\n",
ret);
@@ -720,4 +803,6 @@ out:
void amdgpu_acpi_fini(struct amdgpu_device *adev)
{
unregister_acpi_notifier(&adev->acpi_nb);
+ if (adev->atif)
+ kfree(adev->atif);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index daa06e7c5bb7..ca8bf1c9a98e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -90,6 +90,12 @@ bool amdgpu_atpx_dgpu_req_power_for_displays(void) {
return amdgpu_atpx_priv.atpx.dgpu_req_power_for_displays;
}
+#if defined(CONFIG_ACPI)
+void *amdgpu_atpx_get_dhandle(void) {
+ return amdgpu_atpx_priv.dhandle;
+}
+#endif
+
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -569,6 +575,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0, 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 82312a7bc6ad..9c85a90be293 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -927,6 +927,10 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
r = amdgpu_bo_vm_update_pte(p);
if (r)
return r;
+
+ r = reservation_object_reserve_shared(vm->root.base.bo->tbo.resv);
+ if (r)
+ return r;
}
return amdgpu_cs_sync_rings(p);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3317d1536f4f..2c5f093e79e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2158,10 +2158,18 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
switch (asic_type) {
#if defined(CONFIG_DRM_AMD_DC)
case CHIP_BONAIRE:
- case CHIP_HAWAII:
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
+ /*
+ * We have systems in the wild with these ASICs that require
+ * LVDS and VGA support which is not supported with DC.
+ *
+ * Fallback to the non-DC driver here by default so as not to
+ * cause regressions.
+ */
+ return amdgpu_dc > 0;
+ case CHIP_HAWAII:
case CHIP_CARRIZO:
case CHIP_STONEY:
case CHIP_POLARIS10:
@@ -2739,6 +2747,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (r)
return r;
+ /* Make sure IB tests flushed */
+ flush_delayed_work(&adev->late_init_work);
+
/* blat the mode back in */
if (fbcon) {
if (!amdgpu_device_has_dc_support(adev)) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 39ec6b8890a1..e74d620d9699 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -376,7 +376,7 @@ int amdgpu_fence_driver_start_ring(struct amdgpu_ring *ring,
struct amdgpu_device *adev = ring->adev;
uint64_t index;
- if (ring != &adev->uvd.inst[ring->me].ring) {
+ if (ring->funcs->type != AMDGPU_RING_TYPE_UVD) {
ring->fence_drv.cpu_addr = &adev->wb.wb[ring->fence_offs];
ring->fence_drv.gpu_addr = adev->wb.gpu_addr + (ring->fence_offs * 4);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index f70eeed9ed76..7aaa263ad8c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -231,6 +231,12 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ib->flags & AMDGPU_IB_FLAG_TC_WB_NOT_INVALIDATE)
fence_flags |= AMDGPU_FENCE_FLAG_TC_WB_ONLY;
+ /* wrap the last IB with fence */
+ if (job && job->uf_addr) {
+ amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
+ fence_flags | AMDGPU_FENCE_FLAG_64BIT);
+ }
+
r = amdgpu_fence_emit(ring, f, fence_flags);
if (r) {
dev_err(adev->dev, "failed to emit fence (%d)\n", r);
@@ -243,12 +249,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
if (ring->funcs->insert_end)
ring->funcs->insert_end(ring);
- /* wrap the last IB with fence */
- if (job && job->uf_addr) {
- amdgpu_ring_emit_fence(ring, job->uf_addr, job->uf_sequence,
- fence_flags | AMDGPU_FENCE_FLAG_64BIT);
- }
-
if (patch_offset != ~0 && ring->funcs->patch_cond_exec)
amdgpu_ring_patch_cond_exec(ring, patch_offset);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5e4e1bd90383..3526efa8960e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -762,8 +762,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
adev->vram_pin_size += amdgpu_bo_size(bo);
- if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- adev->invisible_pin_size += amdgpu_bo_size(bo);
+ adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
adev->gart_pin_size += amdgpu_bo_size(bo);
}
@@ -790,25 +789,22 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
bo->pin_count--;
if (bo->pin_count)
return 0;
- for (i = 0; i < bo->placement.num_placement; i++) {
- bo->placements[i].lpfn = 0;
- bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
- }
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (unlikely(r)) {
- dev_err(adev->dev, "%p validate failed for unpin\n", bo);
- goto error;
- }
if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
adev->vram_pin_size -= amdgpu_bo_size(bo);
- if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- adev->invisible_pin_size -= amdgpu_bo_size(bo);
+ adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
} else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
adev->gart_pin_size -= amdgpu_bo_size(bo);
}
-error:
+ for (i = 0; i < bo->placement.num_placement; i++) {
+ bo->placements[i].lpfn = 0;
+ bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
+ }
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (unlikely(r))
+ dev_err(adev->dev, "%p validate failed for unpin\n", bo);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index b455da487782..fc818b4d849c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1882,7 +1882,7 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
if (!amdgpu_device_has_dc_support(adev)) {
mutex_lock(&adev->pm.mutex);
amdgpu_dpm_get_active_displays(adev);
- adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtcs;
+ adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
/* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index e969c879d87e..e5da4654b630 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -73,6 +73,7 @@ bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_mem_reg *mem);
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
+u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index bcf68f80bbf0..3ff08e326838 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -130,7 +130,7 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
unsigned version_major, version_minor, family_id;
int i, j, r;
- INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
+ INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
@@ -314,12 +314,12 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
void *ptr;
int i, j;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
if (adev->uvd.inst[j].vcpu_bo == NULL)
continue;
- cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
-
/* only valid for physical mode */
if (adev->asic_type < CHIP_POLARIS10) {
for (i = 0; i < adev->uvd.max_handles; ++i)
@@ -1145,7 +1145,7 @@ int amdgpu_uvd_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle,
static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
- container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
+ container_of(work, struct amdgpu_device, uvd.idle_work.work);
unsigned fences = 0, i, j;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
@@ -1167,7 +1167,7 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
AMD_CG_STATE_GATE);
}
} else {
- schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
+ schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
}
}
@@ -1179,7 +1179,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
if (amdgpu_sriov_vf(adev))
return;
- set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
+ set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
if (set_clocks) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, true);
@@ -1196,7 +1196,7 @@ void amdgpu_uvd_ring_begin_use(struct amdgpu_ring *ring)
void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
{
if (!amdgpu_sriov_vf(ring->adev))
- schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
+ schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index b1579fba134c..8b23a1b00c76 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -44,7 +44,6 @@ struct amdgpu_uvd_inst {
void *saved_bo;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
- struct delayed_work idle_work;
struct amdgpu_ring ring;
struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
struct amdgpu_irq_src irq;
@@ -62,6 +61,7 @@ struct amdgpu_uvd {
bool address_64_bit;
bool use_ctx_buf;
struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
+ struct delayed_work idle_work;
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 127e87b470ff..1b4ad9b2a755 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -52,7 +52,7 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
unsigned long bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
- unsigned version_major, version_minor, family_id;
+ unsigned char fw_check;
int r;
INIT_DELAYED_WORK(&adev->vcn.idle_work, amdgpu_vcn_idle_work_handler);
@@ -83,12 +83,33 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version);
- family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
- version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
- version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
- DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
- version_major, version_minor, family_id);
+ /* Bit 20-23, it is encode major and non-zero for new naming convention.
+ * This field is part of version minor and DRM_DISABLED_FLAG in old naming
+ * convention. Since the l:wq!atest version minor is 0x5B and DRM_DISABLED_FLAG
+ * is zero in old naming convention, this field is always zero so far.
+ * These four bits are used to tell which naming convention is present.
+ */
+ fw_check = (le32_to_cpu(hdr->ucode_version) >> 20) & 0xf;
+ if (fw_check) {
+ unsigned int dec_ver, enc_major, enc_minor, vep, fw_rev;
+
+ fw_rev = le32_to_cpu(hdr->ucode_version) & 0xfff;
+ enc_minor = (le32_to_cpu(hdr->ucode_version) >> 12) & 0xff;
+ enc_major = fw_check;
+ dec_ver = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xf;
+ vep = (le32_to_cpu(hdr->ucode_version) >> 28) & 0xf;
+ DRM_INFO("Found VCN firmware Version ENC: %hu.%hu DEC: %hu VEP: %hu Revision: %hu\n",
+ enc_major, enc_minor, dec_ver, vep, fw_rev);
+ } else {
+ unsigned int version_major, version_minor, family_id;
+
+ family_id = le32_to_cpu(hdr->ucode_version) & 0xff;
+ version_major = (le32_to_cpu(hdr->ucode_version) >> 24) & 0xff;
+ version_minor = (le32_to_cpu(hdr->ucode_version) >> 8) & 0xff;
+ DRM_INFO("Found VCN firmware Version: %hu.%hu Family ID: %hu\n",
+ version_major, version_minor, family_id);
+ }
bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
+ AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index b0eb2f537392..fdcb498f6d19 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -107,6 +107,9 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
return;
list_add_tail(&base->bo_list, &bo->va);
+ if (bo->tbo.type == ttm_bo_type_kernel)
+ list_move(&base->vm_status, &vm->relocated);
+
if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
return;
@@ -468,7 +471,6 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
pt->parent = amdgpu_bo_ref(parent->base.bo);
amdgpu_vm_bo_base_init(&entry->base, vm, pt);
- list_move(&entry->base.vm_status, &vm->relocated);
}
if (level < AMDGPU_VM_PTB) {
@@ -1463,7 +1465,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
uint64_t count;
max_entries = min(max_entries, 16ull * 1024ull);
- for (count = 1; count < max_entries; ++count) {
+ for (count = 1;
+ count < max_entries / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ ++count) {
uint64_t idx = pfn + count;
if (pages_addr[idx] !=
@@ -1476,7 +1480,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
dma_addr = pages_addr;
} else {
addr = pages_addr[pfn];
- max_entries = count;
+ max_entries = count * (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
}
} else if (flags & AMDGPU_PTE_VALID) {
@@ -1491,7 +1495,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (r)
return r;
- pfn += last - start + 1;
+ pfn += (last - start + 1) / (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
if (nodes && nodes->size == pfn) {
pfn = 0;
++nodes;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 9aca653bec07..b6333f92ba45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -97,6 +97,38 @@ static u64 amdgpu_vram_mgr_vis_size(struct amdgpu_device *adev,
}
/**
+ * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
+ *
+ * @bo: &amdgpu_bo buffer object (must be in VRAM)
+ *
+ * Returns:
+ * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
+ */
+u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_mem_reg *mem = &bo->tbo.mem;
+ struct drm_mm_node *nodes = mem->mm_node;
+ unsigned pages = mem->num_pages;
+ u64 usage = 0;
+
+ if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
+ return 0;
+
+ if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ return amdgpu_bo_size(bo);
+
+ while (nodes && pages) {
+ usage += nodes->size << PAGE_SHIFT;
+ usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
+ pages -= nodes->size;
+ ++nodes;
+ }
+
+ return usage;
+}
+
+/**
* amdgpu_vram_mgr_new - allocate new ranges
*
* @man: TTM memory type manager
@@ -135,7 +167,8 @@ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
}
- nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
+ nodes = kvmalloc_array(num_nodes, sizeof(*nodes),
+ GFP_KERNEL | __GFP_ZERO);
if (!nodes)
return -ENOMEM;
@@ -190,7 +223,7 @@ error:
drm_mm_remove_node(&nodes[i]);
spin_unlock(&mgr->lock);
- kfree(nodes);
+ kvfree(nodes);
return r == -ENOSPC ? 0 : r;
}
@@ -229,7 +262,7 @@ static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
atomic64_sub(usage, &mgr->usage);
atomic64_sub(vis_usage, &mgr->vis_usage);
- kfree(mem->mm_node);
+ kvfree(mem->mm_node);
mem->mm_node = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 0999c843f623..a71b97519cc0 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -900,7 +900,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_phys_funcs = {
.emit_frame_size =
4 + /* vce_v3_0_emit_pipeline_sync */
6, /* amdgpu_vce_ring_emit_fence x1 no user fence */
- .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
+ .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
.emit_ib = amdgpu_vce_ring_emit_ib,
.emit_fence = amdgpu_vce_ring_emit_fence,
.test_ring = amdgpu_vce_ring_test_ring,
@@ -924,7 +924,7 @@ static const struct amdgpu_ring_funcs vce_v3_0_ring_vm_funcs = {
6 + /* vce_v3_0_emit_vm_flush */
4 + /* vce_v3_0_emit_pipeline_sync */
6 + 6, /* amdgpu_vce_ring_emit_fence x2 vm fence */
- .emit_ib_size = 4, /* amdgpu_vce_ring_emit_ib */
+ .emit_ib_size = 5, /* vce_v3_0_ring_emit_ib */
.emit_ib = vce_v3_0_ring_emit_ib,
.emit_vm_flush = vce_v3_0_emit_vm_flush,
.emit_pipeline_sync = vce_v3_0_emit_pipeline_sync,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f9add85157e7..770c6b24be0b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2175,6 +2175,46 @@ get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing)
return color_space;
}
+static void reduce_mode_colour_depth(struct dc_crtc_timing *timing_out)
+{
+ if (timing_out->display_color_depth <= COLOR_DEPTH_888)
+ return;
+
+ timing_out->display_color_depth--;
+}
+
+static void adjust_colour_depth_from_display_info(struct dc_crtc_timing *timing_out,
+ const struct drm_display_info *info)
+{
+ int normalized_clk;
+ if (timing_out->display_color_depth <= COLOR_DEPTH_888)
+ return;
+ do {
+ normalized_clk = timing_out->pix_clk_khz;
+ /* YCbCr 4:2:0 requires additional adjustment of 1/2 */
+ if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420)
+ normalized_clk /= 2;
+ /* Adjusting pix clock following on HDMI spec based on colour depth */
+ switch (timing_out->display_color_depth) {
+ case COLOR_DEPTH_101010:
+ normalized_clk = (normalized_clk * 30) / 24;
+ break;
+ case COLOR_DEPTH_121212:
+ normalized_clk = (normalized_clk * 36) / 24;
+ break;
+ case COLOR_DEPTH_161616:
+ normalized_clk = (normalized_clk * 48) / 24;
+ break;
+ default:
+ return;
+ }
+ if (normalized_clk <= info->max_tmds_clock)
+ return;
+ reduce_mode_colour_depth(timing_out);
+
+ } while (timing_out->display_color_depth > COLOR_DEPTH_888);
+
+}
/*****************************************************************************/
static void
@@ -2183,6 +2223,7 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
const struct drm_connector *connector)
{
struct dc_crtc_timing *timing_out = &stream->timing;
+ const struct drm_display_info *info = &connector->display_info;
memset(timing_out, 0, sizeof(struct dc_crtc_timing));
@@ -2191,8 +2232,10 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
timing_out->v_border_top = 0;
timing_out->v_border_bottom = 0;
/* TODO: un-hardcode */
-
- if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ if (drm_mode_is_420_only(info, mode_in)
+ && stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420;
+ else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCRCB444)
&& stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444;
else
@@ -2228,6 +2271,8 @@ fill_stream_properties_from_drm_display_mode(struct dc_stream_state *stream,
stream->out_transfer_func->type = TF_TYPE_PREDEFINED;
stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
+ if (stream->sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A)
+ adjust_colour_depth_from_display_info(timing_out, info);
}
static void fill_audio_info(struct audio_info *audio_info,
@@ -3928,10 +3973,11 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
if (acrtc->base.state->event)
prepare_flip_isr(acrtc);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+
surface_updates->surface = dc_stream_get_status(acrtc_state->stream)->plane_states[0];
surface_updates->flip_addr = &addr;
-
dc_commit_updates_for_stream(adev->dm.dc,
surface_updates,
1,
@@ -3944,9 +3990,6 @@ static void amdgpu_dm_do_flip(struct drm_crtc *crtc,
__func__,
addr.address.grph.addr.high_part,
addr.address.grph.addr.low_part);
-
-
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
/*
@@ -4206,6 +4249,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct drm_connector *connector;
struct drm_connector_state *old_con_state, *new_con_state;
struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state;
+ int crtc_disable_count = 0;
drm_atomic_helper_update_legacy_modeset_state(dev, state);
@@ -4410,6 +4454,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc);
bool modeset_needed;
+ if (old_crtc_state->active && !new_crtc_state->active)
+ crtc_disable_count++;
+
dm_new_crtc_state = to_dm_crtc_state(new_crtc_state);
dm_old_crtc_state = to_dm_crtc_state(old_crtc_state);
modeset_needed = modeset_required(
@@ -4463,11 +4510,9 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
* so we can put the GPU into runtime suspend if we're not driving any
* displays anymore
*/
+ for (i = 0; i < crtc_disable_count; i++)
+ pm_runtime_put_autosuspend(dev->dev);
pm_runtime_mark_last_busy(dev->dev);
- for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
- if (old_crtc_state->active && !new_crtc_state->active)
- pm_runtime_put_autosuspend(dev->dev);
- }
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 4304d9e408b8..ace9ad578ca0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -83,22 +83,21 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
enum i2c_mot_mode mot = (msg->request & DP_AUX_I2C_MOT) ?
I2C_MOT_TRUE : I2C_MOT_FALSE;
enum ddc_result res;
- uint32_t read_bytes = msg->size;
+ ssize_t read_bytes;
if (WARN_ON(msg->size > 16))
return -E2BIG;
switch (msg->request & ~DP_AUX_I2C_MOT) {
case DP_AUX_NATIVE_READ:
- res = dal_ddc_service_read_dpcd_data(
+ read_bytes = dal_ddc_service_read_dpcd_data(
TO_DM_AUX(aux)->ddc_service,
false,
I2C_MOT_UNDEF,
msg->address,
msg->buffer,
- msg->size,
- &read_bytes);
- break;
+ msg->size);
+ return read_bytes;
case DP_AUX_NATIVE_WRITE:
res = dal_ddc_service_write_dpcd_data(
TO_DM_AUX(aux)->ddc_service,
@@ -109,15 +108,14 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
msg->size);
break;
case DP_AUX_I2C_READ:
- res = dal_ddc_service_read_dpcd_data(
+ read_bytes = dal_ddc_service_read_dpcd_data(
TO_DM_AUX(aux)->ddc_service,
true,
mot,
msg->address,
msg->buffer,
- msg->size,
- &read_bytes);
- break;
+ msg->size);
+ return read_bytes;
case DP_AUX_I2C_WRITE:
res = dal_ddc_service_write_dpcd_data(
TO_DM_AUX(aux)->ddc_service,
@@ -139,9 +137,7 @@ static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux,
r == DDC_RESULT_SUCESSFULL);
#endif
- if (res != DDC_RESULT_SUCESSFULL)
- return -EIO;
- return read_bytes;
+ return msg->size;
}
static enum drm_connector_status
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 5a3346124a01..5a2e952c5bea 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -255,8 +255,9 @@ static void pp_to_dc_clock_levels_with_latency(
DC_DECODE_PP_CLOCK_TYPE(dc_clk_type));
for (i = 0; i < clk_level_info->num_levels; i++) {
- DRM_DEBUG("DM_PPLIB:\t %d\n", pp_clks->data[i].clocks_in_khz);
- clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz;
+ DRM_DEBUG("DM_PPLIB:\t %d in 10kHz\n", pp_clks->data[i].clocks_in_khz);
+ /* translate 10kHz to kHz */
+ clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz * 10;
clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us;
}
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index ae48d603ebd6..49c2face1e7a 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -629,14 +629,13 @@ bool dal_ddc_service_query_ddc_data(
return ret;
}
-enum ddc_result dal_ddc_service_read_dpcd_data(
+ssize_t dal_ddc_service_read_dpcd_data(
struct ddc_service *ddc,
bool i2c,
enum i2c_mot_mode mot,
uint32_t address,
uint8_t *data,
- uint32_t len,
- uint32_t *read)
+ uint32_t len)
{
struct aux_payload read_payload = {
.i2c_over_aux = i2c,
@@ -653,8 +652,6 @@ enum ddc_result dal_ddc_service_read_dpcd_data(
.mot = mot
};
- *read = 0;
-
if (len > DEFAULT_AUX_MAX_DATA_SIZE) {
BREAK_TO_DEBUGGER();
return DDC_RESULT_FAILED_INVALID_OPERATION;
@@ -664,8 +661,7 @@ enum ddc_result dal_ddc_service_read_dpcd_data(
ddc->ctx->i2caux,
ddc->ddc_pin,
&command)) {
- *read = command.payloads->length;
- return DDC_RESULT_SUCESSFULL;
+ return (ssize_t)command.payloads->length;
}
return DDC_RESULT_FAILED_OPERATION;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 7857cb42b3e6..bdd121485cbc 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -1767,12 +1767,10 @@ static void dp_test_send_link_training(struct dc_link *link)
dp_retrain_link_dp_test(link, &link_settings, false);
}
-/* TODO hbr2 compliance eye output is unstable
+/* TODO Raven hbr2 compliance eye output is unstable
* (toggling on and off) with debugger break
* This caueses intermittent PHY automation failure
* Need to look into the root cause */
-static uint8_t force_tps4_for_cp2520 = 1;
-
static void dp_test_send_phy_test_pattern(struct dc_link *link)
{
union phy_test_pattern dpcd_test_pattern;
@@ -1832,13 +1830,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
break;
case PHY_TEST_PATTERN_CP2520_1:
/* CP2520 pattern is unstable, temporarily use TPS4 instead */
- test_pattern = (force_tps4_for_cp2520 == 1) ?
+ test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
DP_TEST_PATTERN_TRAINING_PATTERN4 :
DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
break;
case PHY_TEST_PATTERN_CP2520_2:
/* CP2520 pattern is unstable, temporarily use TPS4 instead */
- test_pattern = (force_tps4_for_cp2520 == 1) ?
+ test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
DP_TEST_PATTERN_TRAINING_PATTERN4 :
DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 9cfde0ccf4e9..53c71296f3dd 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -76,6 +76,7 @@ struct dc_caps {
bool is_apu;
bool dual_link_dvi;
bool post_blend_color_processing;
+ bool force_dp_tps4_for_cp2520;
};
struct dc_dcc_surface_param {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index b235a75355b8..bae752332a9f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -741,6 +741,29 @@ static struct mem_input_funcs dce_mi_funcs = {
.mem_input_is_flip_pending = dce_mi_is_flip_pending
};
+static struct mem_input_funcs dce112_mi_funcs = {
+ .mem_input_program_display_marks = dce112_mi_program_display_marks,
+ .allocate_mem_input = dce_mi_allocate_dmif,
+ .free_mem_input = dce_mi_free_dmif,
+ .mem_input_program_surface_flip_and_addr =
+ dce_mi_program_surface_flip_and_addr,
+ .mem_input_program_pte_vm = dce_mi_program_pte_vm,
+ .mem_input_program_surface_config =
+ dce_mi_program_surface_config,
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending
+};
+
+static struct mem_input_funcs dce120_mi_funcs = {
+ .mem_input_program_display_marks = dce120_mi_program_display_marks,
+ .allocate_mem_input = dce_mi_allocate_dmif,
+ .free_mem_input = dce_mi_free_dmif,
+ .mem_input_program_surface_flip_and_addr =
+ dce_mi_program_surface_flip_and_addr,
+ .mem_input_program_pte_vm = dce_mi_program_pte_vm,
+ .mem_input_program_surface_config =
+ dce_mi_program_surface_config,
+ .mem_input_is_flip_pending = dce_mi_is_flip_pending
+};
void dce_mem_input_construct(
struct dce_mem_input *dce_mi,
@@ -769,7 +792,7 @@ void dce112_mem_input_construct(
const struct dce_mem_input_mask *mi_mask)
{
dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
- dce_mi->base.funcs->mem_input_program_display_marks = dce112_mi_program_display_marks;
+ dce_mi->base.funcs = &dce112_mi_funcs;
}
void dce120_mem_input_construct(
@@ -781,5 +804,5 @@ void dce120_mem_input_construct(
const struct dce_mem_input_mask *mi_mask)
{
dce_mem_input_construct(dce_mi, ctx, inst, regs, mi_shift, mi_mask);
- dce_mi->base.funcs->mem_input_program_display_marks = dce120_mi_program_display_marks;
+ dce_mi->base.funcs = &dce120_mi_funcs;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 38ec0d609297..344dd2e69e7c 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -678,9 +678,22 @@ bool dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context)
{
- /* TODO implement when needed but for now hardcode max value*/
- context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+ int i;
+ bool at_least_one_pipe = false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream)
+ at_least_one_pipe = true;
+ }
+
+ if (at_least_one_pipe) {
+ /* TODO implement when needed but for now hardcode max value*/
+ context->bw.dce.dispclk_khz = 681000;
+ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+ } else {
+ context->bw.dce.dispclk_khz = 0;
+ context->bw.dce.yclk_khz = 0;
+ }
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index df5cb2d1d164..34dac84066a0 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -1027,6 +1027,8 @@ static bool construct(
dc->caps.max_slave_planes = 1;
dc->caps.is_apu = true;
dc->caps.post_blend_color_processing = false;
+ /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
+ dc->caps.force_dp_tps4_for_cp2520 = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
index 30b3a08b91be..090b7a8dd67b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_ddc.h
@@ -102,14 +102,13 @@ bool dal_ddc_service_query_ddc_data(
uint8_t *read_buf,
uint32_t read_size);
-enum ddc_result dal_ddc_service_read_dpcd_data(
+ssize_t dal_ddc_service_read_dpcd_data(
struct ddc_service *ddc,
bool i2c,
enum i2c_mot_mode mot,
uint32_t address,
uint8_t *data,
- uint32_t len,
- uint32_t *read);
+ uint32_t len);
enum ddc_result dal_ddc_service_write_dpcd_data(
struct ddc_service *ddc,
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 092d800b703a..33b4de4ad66e 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1433,7 +1433,10 @@ struct atom_smc_dpm_info_v4_1
uint8_t acggfxclkspreadpercent;
uint16_t acggfxclkspreadfreq;
- uint32_t boardreserved[10];
+ uint8_t Vr2_I2C_address;
+ uint8_t padding_vr2[3];
+
+ uint32_t boardreserved[9];
};
/*
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
index 5325661fedff..d27c1c9df286 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.c
@@ -512,14 +512,82 @@ int pp_atomfwctrl_get_clk_information_by_clkid(struct pp_hwmgr *hwmgr, BIOS_CLKI
return 0;
}
+static void pp_atomfwctrl_copy_vbios_bootup_values_3_2(struct pp_hwmgr *hwmgr,
+ struct pp_atomfwctrl_bios_boot_up_values *boot_values,
+ struct atom_firmware_info_v3_2 *fw_info)
+{
+ uint32_t frequency = 0;
+
+ boot_values->ulRevision = fw_info->firmware_revision;
+ boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz;
+ boot_values->ulUClk = fw_info->bootup_mclk_in10khz;
+ boot_values->usVddc = fw_info->bootup_vddc_mv;
+ boot_values->usVddci = fw_info->bootup_vddci_mv;
+ boot_values->usMvddc = fw_info->bootup_mvddc_mv;
+ boot_values->usVddGfx = fw_info->bootup_vddgfx_mv;
+ boot_values->ucCoolingID = fw_info->coolingsolution_id;
+ boot_values->ulSocClk = 0;
+ boot_values->ulDCEFClk = 0;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_SOCCLK_ID, &frequency))
+ boot_values->ulSocClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCEFCLK_ID, &frequency))
+ boot_values->ulDCEFClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_ECLK_ID, &frequency))
+ boot_values->ulEClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_VCLK_ID, &frequency))
+ boot_values->ulVClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU11_SYSPLL0_DCLK_ID, &frequency))
+ boot_values->ulDClk = frequency;
+}
+
+static void pp_atomfwctrl_copy_vbios_bootup_values_3_1(struct pp_hwmgr *hwmgr,
+ struct pp_atomfwctrl_bios_boot_up_values *boot_values,
+ struct atom_firmware_info_v3_1 *fw_info)
+{
+ uint32_t frequency = 0;
+
+ boot_values->ulRevision = fw_info->firmware_revision;
+ boot_values->ulGfxClk = fw_info->bootup_sclk_in10khz;
+ boot_values->ulUClk = fw_info->bootup_mclk_in10khz;
+ boot_values->usVddc = fw_info->bootup_vddc_mv;
+ boot_values->usVddci = fw_info->bootup_vddci_mv;
+ boot_values->usMvddc = fw_info->bootup_mvddc_mv;
+ boot_values->usVddGfx = fw_info->bootup_vddgfx_mv;
+ boot_values->ucCoolingID = fw_info->coolingsolution_id;
+ boot_values->ulSocClk = 0;
+ boot_values->ulDCEFClk = 0;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_SOCCLK_ID, &frequency))
+ boot_values->ulSocClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCEFCLK_ID, &frequency))
+ boot_values->ulDCEFClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_ECLK_ID, &frequency))
+ boot_values->ulEClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_VCLK_ID, &frequency))
+ boot_values->ulVClk = frequency;
+
+ if (!pp_atomfwctrl_get_clk_information_by_clkid(hwmgr, SMU9_SYSPLL0_DCLK_ID, &frequency))
+ boot_values->ulDClk = frequency;
+}
+
int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
struct pp_atomfwctrl_bios_boot_up_values *boot_values)
{
- struct atom_firmware_info_v3_1 *info = NULL;
+ struct atom_firmware_info_v3_2 *fwinfo_3_2;
+ struct atom_firmware_info_v3_1 *fwinfo_3_1;
+ struct atom_common_table_header *info = NULL;
uint16_t ix;
ix = GetIndexIntoMasterDataTable(firmwareinfo);
- info = (struct atom_firmware_info_v3_1 *)
+ info = (struct atom_common_table_header *)
smu_atom_get_data_table(hwmgr->adev,
ix, NULL, NULL, NULL);
@@ -528,16 +596,18 @@ int pp_atomfwctrl_get_vbios_bootup_values(struct pp_hwmgr *hwmgr,
return -EINVAL;
}
- boot_values->ulRevision = info->firmware_revision;
- boot_values->ulGfxClk = info->bootup_sclk_in10khz;
- boot_values->ulUClk = info->bootup_mclk_in10khz;
- boot_values->usVddc = info->bootup_vddc_mv;
- boot_values->usVddci = info->bootup_vddci_mv;
- boot_values->usMvddc = info->bootup_mvddc_mv;
- boot_values->usVddGfx = info->bootup_vddgfx_mv;
- boot_values->ucCoolingID = info->coolingsolution_id;
- boot_values->ulSocClk = 0;
- boot_values->ulDCEFClk = 0;
+ if ((info->format_revision == 3) && (info->content_revision == 2)) {
+ fwinfo_3_2 = (struct atom_firmware_info_v3_2 *)info;
+ pp_atomfwctrl_copy_vbios_bootup_values_3_2(hwmgr,
+ boot_values, fwinfo_3_2);
+ } else if ((info->format_revision == 3) && (info->content_revision == 1)) {
+ fwinfo_3_1 = (struct atom_firmware_info_v3_1 *)info;
+ pp_atomfwctrl_copy_vbios_bootup_values_3_1(hwmgr,
+ boot_values, fwinfo_3_1);
+ } else {
+ pr_info("Fw info table revision does not match!");
+ return -EINVAL;
+ }
return 0;
}
@@ -629,5 +699,7 @@ int pp_atomfwctrl_get_smc_dpm_information(struct pp_hwmgr *hwmgr,
param->acggfxclkspreadpercent = info->acggfxclkspreadpercent;
param->acggfxclkspreadfreq = info->acggfxclkspreadfreq;
+ param->Vr2_I2C_address = info->Vr2_I2C_address;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
index fe10aa4db5e6..22e21668c93a 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/ppatomfwctrl.h
@@ -136,6 +136,9 @@ struct pp_atomfwctrl_bios_boot_up_values {
uint32_t ulUClk;
uint32_t ulSocClk;
uint32_t ulDCEFClk;
+ uint32_t ulEClk;
+ uint32_t ulVClk;
+ uint32_t ulDClk;
uint16_t usVddc;
uint16_t usVddci;
uint16_t usMvddc;
@@ -207,6 +210,8 @@ struct pp_atomfwctrl_smc_dpm_parameters
uint8_t acggfxclkspreadenabled;
uint8_t acggfxclkspreadpercent;
uint16_t acggfxclkspreadfreq;
+
+ uint8_t Vr2_I2C_address;
};
int pp_atomfwctrl_get_gpu_pll_dividers_vega10(struct pp_hwmgr *hwmgr,
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index dbe4b1f66784..22364875a943 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -1090,7 +1090,7 @@ static int vega10_disable_se_edc_config(struct pp_hwmgr *hwmgr)
static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- int result;
+ int result = 0;
uint32_t num_se = 0;
uint32_t count, data;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 782e2098824d..c98e5de777cd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -81,6 +81,7 @@ static void vega12_set_default_registry_data(struct pp_hwmgr *hwmgr)
data->registry_data.disallowed_features = 0x0;
data->registry_data.od_state_in_dc_support = 0;
+ data->registry_data.thermal_support = 1;
data->registry_data.skip_baco_hardware = 0;
data->registry_data.log_avfs_param = 0;
@@ -803,6 +804,9 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
data->vbios_boot_state.soc_clock = boot_up_values.ulSocClk;
data->vbios_boot_state.dcef_clock = boot_up_values.ulDCEFClk;
data->vbios_boot_state.uc_cooling_id = boot_up_values.ucCoolingID;
+ data->vbios_boot_state.eclock = boot_up_values.ulEClk;
+ data->vbios_boot_state.dclock = boot_up_values.ulDClk;
+ data->vbios_boot_state.vclock = boot_up_values.ulVClk;
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetMinDeepSleepDcefclk,
(uint32_t)(data->vbios_boot_state.dcef_clock / 100));
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
index e81ded1ec198..49b38df8c7f2 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.h
@@ -167,6 +167,9 @@ struct vega12_vbios_boot_state {
uint32_t mem_clock;
uint32_t soc_clock;
uint32_t dcef_clock;
+ uint32_t eclock;
+ uint32_t dclock;
+ uint32_t vclock;
};
#define DPMTABLE_OD_UPDATE_SCLK 0x00000001
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index 888ddca902d8..29914700ee82 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -230,6 +230,8 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
ppsmc_pptable->AcgThresholdFreqLow = 0xFFFF;
}
+ ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
index 2f8a3b983cce..b08526fd1619 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/vega12/smu9_driver_if.h
@@ -499,7 +499,10 @@ typedef struct {
uint8_t AcgGfxclkSpreadPercent;
uint16_t AcgGfxclkSpreadFreq;
- uint32_t BoardReserved[10];
+ uint8_t Vr2_I2C_address;
+ uint8_t padding_vr2[3];
+
+ uint32_t BoardReserved[9];
uint32_t MmHubPadding[7];
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
index d644a9bb9078..9f407c48d4f0 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smu7_smumgr.c
@@ -381,6 +381,7 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
uint32_t fw_to_load;
int result = 0;
struct SMU_DRAMData_TOC *toc;
+ uint32_t num_entries = 0;
if (!hwmgr->reload_fw) {
pr_info("skip reloading...\n");
@@ -422,41 +423,41 @@ int smu7_request_smu_load_fw(struct pp_hwmgr *hwmgr)
}
toc = (struct SMU_DRAMData_TOC *)smu_data->header;
- toc->num_entries = 0;
toc->structure_version = 1;
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_RLC_G, &toc->entry[toc->num_entries++]),
+ UCODE_ID_RLC_G, &toc->entry[num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_CP_CE, &toc->entry[toc->num_entries++]),
+ UCODE_ID_CP_CE, &toc->entry[num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_CP_PFP, &toc->entry[toc->num_entries++]),
+ UCODE_ID_CP_PFP, &toc->entry[num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_CP_ME, &toc->entry[toc->num_entries++]),
+ UCODE_ID_CP_ME, &toc->entry[num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_CP_MEC, &toc->entry[toc->num_entries++]),
+ UCODE_ID_CP_MEC, &toc->entry[num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_CP_MEC_JT1, &toc->entry[toc->num_entries++]),
+ UCODE_ID_CP_MEC_JT1, &toc->entry[num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_CP_MEC_JT2, &toc->entry[toc->num_entries++]),
+ UCODE_ID_CP_MEC_JT2, &toc->entry[num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_SDMA0, &toc->entry[toc->num_entries++]),
+ UCODE_ID_SDMA0, &toc->entry[num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_SDMA1, &toc->entry[toc->num_entries++]),
+ UCODE_ID_SDMA1, &toc->entry[num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
if (!hwmgr->not_vf)
PP_ASSERT_WITH_CODE(0 == smu7_populate_single_firmware_entry(hwmgr,
- UCODE_ID_MEC_STORAGE, &toc->entry[toc->num_entries++]),
+ UCODE_ID_MEC_STORAGE, &toc->entry[num_entries++]),
"Failed to Get Firmware Entry.", return -EINVAL);
+ toc->num_entries = num_entries;
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_HI, upper_32_bits(smu_data->header_buffer.mc_addr));
smu7_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_DRV_DRAM_ADDR_LO, lower_32_bits(smu_data->header_buffer.mc_addr));
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 8d20faa198cf..0a788d76ed5f 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -278,7 +278,6 @@ static int malidp_init(struct drm_device *drm)
static void malidp_fini(struct drm_device *drm)
{
- drm_atomic_helper_shutdown(drm);
drm_mode_config_cleanup(drm);
}
@@ -646,6 +645,7 @@ vblank_fail:
malidp_de_irq_fini(drm);
drm->irq_enabled = false;
irq_init_fail:
+ drm_atomic_helper_shutdown(drm);
component_unbind_all(dev, drm);
bind_fail:
of_node_put(malidp->crtc.port);
@@ -681,6 +681,7 @@ static void malidp_unbind(struct device *dev)
malidp_se_irq_fini(drm);
malidp_de_irq_fini(drm);
drm->irq_enabled = false;
+ drm_atomic_helper_shutdown(drm);
component_unbind_all(dev, drm);
of_node_put(malidp->crtc.port);
malidp->crtc.port = NULL;
diff --git a/drivers/gpu/drm/arm/malidp_hw.c b/drivers/gpu/drm/arm/malidp_hw.c
index d789b46dc817..069783e715f1 100644
--- a/drivers/gpu/drm/arm/malidp_hw.c
+++ b/drivers/gpu/drm/arm/malidp_hw.c
@@ -634,7 +634,8 @@ const struct malidp_hw malidp_device[MALIDP_MAX_DEVICES] = {
.vsync_irq = MALIDP500_DE_IRQ_VSYNC,
},
.se_irq_map = {
- .irq_mask = MALIDP500_SE_IRQ_CONF_MODE,
+ .irq_mask = MALIDP500_SE_IRQ_CONF_MODE |
+ MALIDP500_SE_IRQ_GLOBAL,
.vsync_irq = 0,
},
.dc_irq_map = {
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 7a44897c50fe..29409a65d864 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -23,6 +23,7 @@
/* Layer specific register offsets */
#define MALIDP_LAYER_FORMAT 0x000
+#define LAYER_FORMAT_MASK 0x3f
#define MALIDP_LAYER_CONTROL 0x004
#define LAYER_ENABLE (1 << 0)
#define LAYER_FLOWCFG_MASK 7
@@ -235,8 +236,8 @@ static int malidp_de_plane_check(struct drm_plane *plane,
if (state->rotation & MALIDP_ROTATED_MASK) {
int val;
- val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_h,
- state->crtc_w,
+ val = mp->hwdev->hw->rotmem_required(mp->hwdev, state->crtc_w,
+ state->crtc_h,
fb->format->format);
if (val < 0)
return val;
@@ -337,7 +338,9 @@ static void malidp_de_plane_update(struct drm_plane *plane,
dest_w = plane->state->crtc_w;
dest_h = plane->state->crtc_h;
- malidp_hw_write(mp->hwdev, ms->format, mp->layer->base);
+ val = malidp_hw_read(mp->hwdev, mp->layer->base);
+ val = (val & ~LAYER_FORMAT_MASK) | ms->format;
+ malidp_hw_write(mp->hwdev, val, mp->layer->base);
for (i = 0; i < ms->n_planes; i++) {
/* calculate the offset for the layer's plane registers */
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 03eeee11dd5b..42a40daff132 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -519,8 +519,9 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
/*
- * This is rediculous - rather than writing bits to clear, we
- * have to set the actual status register value. This is racy.
+ * Reading the ISR appears to clear bits provided CLEAN_SPU_IRQ_ISR
+ * is set. Writing has some other effect to acknowledge the IRQ -
+ * without this, we only get a single IRQ.
*/
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
@@ -1116,16 +1117,22 @@ armada_drm_crtc_set_property(struct drm_crtc *crtc,
static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ unsigned long flags;
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
armada_drm_crtc_enable_irq(dcrtc, VSYNC_IRQ_ENA);
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
return 0;
}
static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ unsigned long flags;
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
armada_drm_crtc_disable_irq(dcrtc, VSYNC_IRQ_ENA);
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
}
static const struct drm_crtc_funcs armada_crtc_funcs = {
@@ -1415,6 +1422,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+ readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h
index 27319a8335e2..345dc4d0851e 100644
--- a/drivers/gpu/drm/armada/armada_hw.h
+++ b/drivers/gpu/drm/armada/armada_hw.h
@@ -160,6 +160,7 @@ enum {
CFG_ALPHAM_GRA = 0x1 << 16,
CFG_ALPHAM_CFG = 0x2 << 16,
CFG_ALPHA_MASK = 0xff << 8,
+#define CFG_ALPHA(x) ((x) << 8)
CFG_PIXCMD_MASK = 0xff,
};
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index c391955009d6..afa7ded3ae31 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -28,6 +28,7 @@ struct armada_ovl_plane_properties {
uint16_t contrast;
uint16_t saturation;
uint32_t colorkey_mode;
+ uint32_t colorkey_enable;
};
struct armada_ovl_plane {
@@ -54,11 +55,13 @@ armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
spin_lock_irq(&dcrtc->irq_lock);
- armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
- CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
- dcrtc->base + LCD_SPU_DMA_CTRL1);
-
- armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
+ armada_updatel(prop->colorkey_mode,
+ CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+ dcrtc->base + LCD_SPU_DMA_CTRL1);
+ if (dcrtc->variant->has_spu_adv_reg)
+ armada_updatel(prop->colorkey_enable,
+ ADV_GRACOLORKEY | ADV_VIDCOLORKEY,
+ dcrtc->base + LCD_SPU_ADV_REG);
spin_unlock_irq(&dcrtc->irq_lock);
}
@@ -321,8 +324,17 @@ static int armada_ovl_plane_set_property(struct drm_plane *plane,
dplane->prop.colorkey_vb |= K2B(val);
update_attr = true;
} else if (property == priv->colorkey_mode_prop) {
- dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
- dplane->prop.colorkey_mode |= CFG_CKMODE(val);
+ if (val == CKMODE_DISABLE) {
+ dplane->prop.colorkey_mode =
+ CFG_CKMODE(CKMODE_DISABLE) |
+ CFG_ALPHAM_CFG | CFG_ALPHA(255);
+ dplane->prop.colorkey_enable = 0;
+ } else {
+ dplane->prop.colorkey_mode =
+ CFG_CKMODE(val) |
+ CFG_ALPHAM_GRA | CFG_ALPHA(0);
+ dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
+ }
update_attr = true;
} else if (property == priv->brightness_prop) {
dplane->prop.brightness = val - 256;
@@ -453,7 +465,9 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
dplane->prop.colorkey_yr = 0xfefefe00;
dplane->prop.colorkey_ug = 0x01010100;
dplane->prop.colorkey_vb = 0x01010100;
- dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
+ dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB) |
+ CFG_ALPHAM_GRA | CFG_ALPHA(0);
+ dplane->prop.colorkey_enable = ADV_GRACOLORKEY;
dplane->prop.brightness = 0;
dplane->prop.contrast = 0x4000;
dplane->prop.saturation = 0x4000;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 73c875db45f4..47e0992f3908 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -839,7 +839,7 @@ static int atmel_hlcdc_plane_init_properties(struct atmel_hlcdc_plane *plane)
return ret;
}
- if (desc->layout.xstride && desc->layout.pstride) {
+ if (desc->layout.xstride[0] && desc->layout.pstride[0]) {
int ret;
ret = drm_plane_create_rotation_property(&plane->base,
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 7ab36042a822..a6e8f4591e63 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -14,6 +14,7 @@
#include <drm/bridge/mhl.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -36,8 +37,11 @@
#define SII8620_BURST_BUF_LEN 288
#define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
-#define MHL1_MAX_LCLK 225000
-#define MHL3_MAX_LCLK 600000
+
+#define MHL1_MAX_PCLK 75000
+#define MHL1_MAX_PCLK_PP_MODE 150000
+#define MHL3_MAX_PCLK 200000
+#define MHL3_MAX_PCLK_PP_MODE 300000
enum sii8620_mode {
CM_DISCONNECTED,
@@ -69,9 +73,7 @@ struct sii8620 {
struct regulator_bulk_data supplies[2];
struct mutex lock; /* context lock, protects fields below */
int error;
- int pixel_clock;
unsigned int use_packed_pixel:1;
- int video_code;
enum sii8620_mode mode;
enum sii8620_sink_type sink_type;
u8 cbus_status;
@@ -79,7 +81,9 @@ struct sii8620 {
u8 xstat[MHL_XDS_SIZE];
u8 devcap[MHL_DCAP_SIZE];
u8 xdevcap[MHL_XDC_SIZE];
- u8 avif[HDMI_INFOFRAME_SIZE(AVI)];
+ bool feature_complete;
+ bool devcap_read;
+ bool sink_detected;
struct edid *edid;
unsigned int gen2_write_burst:1;
enum sii8620_mt_state mt_state;
@@ -476,7 +480,7 @@ static void sii8620_update_array(u8 *dst, u8 *src, int count)
}
}
-static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
+static void sii8620_identify_sink(struct sii8620 *ctx)
{
static const char * const sink_str[] = {
[SINK_NONE] = "NONE",
@@ -487,7 +491,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
char sink_name[20];
struct device *dev = ctx->dev;
- if (ret < 0)
+ if (!ctx->sink_detected || !ctx->devcap_read)
return;
sii8620_fetch_edid(ctx);
@@ -496,6 +500,7 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
sii8620_mhl_disconnected(ctx);
return;
}
+ sii8620_set_upstream_edid(ctx);
if (drm_detect_hdmi_monitor(ctx->edid))
ctx->sink_type = SINK_HDMI;
@@ -508,53 +513,6 @@ static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
sink_str[ctx->sink_type], sink_name);
}
-static void sii8620_hsic_init(struct sii8620 *ctx)
-{
- if (!sii8620_is_mhl3(ctx))
- return;
-
- sii8620_write(ctx, REG_FCGC,
- BIT_FCGC_HSIC_HOSTMODE | BIT_FCGC_HSIC_ENABLE);
- sii8620_setbits(ctx, REG_HRXCTRL3,
- BIT_HRXCTRL3_HRX_STAY_RESET | BIT_HRXCTRL3_STATUS_EN, ~0);
- sii8620_setbits(ctx, REG_TTXNUMB, MSK_TTXNUMB_TTX_NUMBPS, 4);
- sii8620_setbits(ctx, REG_TRXCTRL, BIT_TRXCTRL_TRX_FROM_SE_COC, ~0);
- sii8620_setbits(ctx, REG_HTXCTRL, BIT_HTXCTRL_HTX_DRVCONN1, 0);
- sii8620_setbits(ctx, REG_KEEPER, MSK_KEEPER_MODE, VAL_KEEPER_MODE_HOST);
- sii8620_write_seq_static(ctx,
- REG_TDMLLCTL, 0,
- REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST |
- BIT_UTSRST_KEEPER_SRST | BIT_UTSRST_FC_SRST,
- REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST,
- REG_HRXINTL, 0xff,
- REG_HRXINTH, 0xff,
- REG_TTXINTL, 0xff,
- REG_TTXINTH, 0xff,
- REG_TRXINTL, 0xff,
- REG_TRXINTH, 0xff,
- REG_HTXINTL, 0xff,
- REG_HTXINTH, 0xff,
- REG_FCINTR0, 0xff,
- REG_FCINTR1, 0xff,
- REG_FCINTR2, 0xff,
- REG_FCINTR3, 0xff,
- REG_FCINTR4, 0xff,
- REG_FCINTR5, 0xff,
- REG_FCINTR6, 0xff,
- REG_FCINTR7, 0xff
- );
-}
-
-static void sii8620_edid_read(struct sii8620 *ctx, int ret)
-{
- if (ret < 0)
- return;
-
- sii8620_set_upstream_edid(ctx);
- sii8620_hsic_init(ctx);
- sii8620_enable_hpd(ctx);
-}
-
static void sii8620_mr_devcap(struct sii8620 *ctx)
{
u8 dcap[MHL_DCAP_SIZE];
@@ -570,6 +528,8 @@ static void sii8620_mr_devcap(struct sii8620 *ctx)
dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L],
dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]);
sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
+ ctx->devcap_read = true;
+ sii8620_identify_sink(ctx);
}
static void sii8620_mr_xdevcap(struct sii8620 *ctx)
@@ -807,6 +767,7 @@ static void sii8620_burst_rx_all(struct sii8620 *ctx)
static void sii8620_fetch_edid(struct sii8620 *ctx)
{
u8 lm_ddc, ddc_cmd, int3, cbus;
+ unsigned long timeout;
int fetched, i;
int edid_len = EDID_LENGTH;
u8 *edid;
@@ -856,23 +817,31 @@ static void sii8620_fetch_edid(struct sii8620 *ctx)
REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK
);
- do {
- int3 = sii8620_readb(ctx, REG_INTR3);
+ int3 = 0;
+ timeout = jiffies + msecs_to_jiffies(200);
+ for (;;) {
cbus = sii8620_readb(ctx, REG_CBUS_STATUS);
-
- if (int3 & BIT_DDC_CMD_DONE)
- break;
-
- if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) {
+ if (~cbus & BIT_CBUS_STATUS_CBUS_CONNECTED) {
+ kfree(edid);
+ edid = NULL;
+ goto end;
+ }
+ if (int3 & BIT_DDC_CMD_DONE) {
+ if (sii8620_readb(ctx, REG_DDC_DOUT_CNT)
+ >= FETCH_SIZE)
+ break;
+ } else {
+ int3 = sii8620_readb(ctx, REG_INTR3);
+ }
+ if (time_is_before_jiffies(timeout)) {
+ ctx->error = -ETIMEDOUT;
+ dev_err(ctx->dev, "timeout during EDID read\n");
kfree(edid);
edid = NULL;
goto end;
}
- } while (1);
-
- sii8620_readb(ctx, REG_DDC_STATUS);
- while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE)
usleep_range(10, 20);
+ }
sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE);
if (fetched + FETCH_SIZE == EDID_LENGTH) {
@@ -971,8 +940,17 @@ static int sii8620_hw_on(struct sii8620 *ctx)
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret)
return ret;
+
usleep_range(10000, 20000);
- return clk_prepare_enable(ctx->clk_xtal);
+ ret = clk_prepare_enable(ctx->clk_xtal);
+ if (ret)
+ return ret;
+
+ msleep(100);
+ gpiod_set_value(ctx->gpio_reset, 0);
+ msleep(100);
+
+ return 0;
}
static int sii8620_hw_off(struct sii8620 *ctx)
@@ -982,17 +960,6 @@ static int sii8620_hw_off(struct sii8620 *ctx)
return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
}
-static void sii8620_hw_reset(struct sii8620 *ctx)
-{
- usleep_range(10000, 20000);
- gpiod_set_value(ctx->gpio_reset, 0);
- usleep_range(5000, 20000);
- gpiod_set_value(ctx->gpio_reset, 1);
- usleep_range(10000, 20000);
- gpiod_set_value(ctx->gpio_reset, 0);
- msleep(300);
-}
-
static void sii8620_cbus_reset(struct sii8620 *ctx)
{
sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
@@ -1055,23 +1022,23 @@ static void sii8620_set_format(struct sii8620 *ctx)
BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED,
ctx->use_packed_pixel ? ~0 : 0);
} else {
- if (ctx->use_packed_pixel)
+ if (ctx->use_packed_pixel) {
sii8620_write_seq_static(ctx,
REG_VID_MODE, BIT_VID_MODE_M1080P,
REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1,
REG_MHLTX_CTL6, 0x60
);
- else
+ } else {
sii8620_write_seq_static(ctx,
REG_VID_MODE, 0,
REG_MHL_TOP_CTL, 1,
REG_MHLTX_CTL6, 0xa0
);
+ }
}
if (ctx->use_packed_pixel)
- out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL) |
- BIT_TPI_OUTPUT_CSCMODE709;
+ out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL);
else
out_fmt = VAL_TPI_FORMAT(RGB, FULL);
@@ -1128,18 +1095,28 @@ static ssize_t mhl3_infoframe_pack(struct mhl3_infoframe *frame,
return frm_len;
}
-static void sii8620_set_infoframes(struct sii8620 *ctx)
+static void sii8620_set_infoframes(struct sii8620 *ctx,
+ struct drm_display_mode *mode)
{
struct mhl3_infoframe mhl_frm;
union hdmi_infoframe frm;
u8 buf[31];
int ret;
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
+ mode,
+ true);
+ if (ctx->use_packed_pixel)
+ frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
+
+ if (!ret)
+ ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
+ if (ret > 0)
+ sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
+
if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) {
sii8620_write(ctx, REG_TPI_SC,
BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
- sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif + 3,
- ARRAY_SIZE(ctx->avif) - 3);
sii8620_write(ctx, REG_PKT_FILTER_0,
BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
BIT_PKT_FILTER_0_DROP_MPEG_PKT |
@@ -1148,16 +1125,6 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
return;
}
- ret = hdmi_avi_infoframe_init(&frm.avi);
- frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
- frm.avi.active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
- frm.avi.picture_aspect = HDMI_PICTURE_ASPECT_16_9;
- frm.avi.colorimetry = HDMI_COLORIMETRY_ITU_709;
- frm.avi.video_code = ctx->video_code;
- if (!ret)
- ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
- if (ret > 0)
- sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
sii8620_write(ctx, REG_PKT_FILTER_0,
BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
BIT_PKT_FILTER_0_DROP_MPEG_PKT |
@@ -1177,6 +1144,9 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
static void sii8620_start_video(struct sii8620 *ctx)
{
+ struct drm_display_mode *mode =
+ &ctx->bridge.encoder->crtc->state->adjusted_mode;
+
if (!sii8620_is_mhl3(ctx))
sii8620_stop_video(ctx);
@@ -1195,8 +1165,14 @@ static void sii8620_start_video(struct sii8620 *ctx)
sii8620_set_format(ctx);
if (!sii8620_is_mhl3(ctx)) {
- sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
- MHL_DST_LM_CLK_MODE_NORMAL | MHL_DST_LM_PATH_ENABLED);
+ u8 link_mode = MHL_DST_LM_PATH_ENABLED;
+
+ if (ctx->use_packed_pixel)
+ link_mode |= MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
+ else
+ link_mode |= MHL_DST_LM_CLK_MODE_NORMAL;
+
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), link_mode);
sii8620_set_auto_zone(ctx);
} else {
static const struct {
@@ -1213,10 +1189,10 @@ static void sii8620_start_video(struct sii8620 *ctx)
MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 },
};
u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN;
- int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3);
+ int clk = mode->clock * (ctx->use_packed_pixel ? 2 : 3);
int i;
- for (i = 0; i < ARRAY_SIZE(clk_spec); ++i)
+ for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i)
if (clk < clk_spec[i].max_clk)
break;
@@ -1242,7 +1218,7 @@ static void sii8620_start_video(struct sii8620 *ctx)
clk_spec[i].link_rate);
}
- sii8620_set_infoframes(ctx);
+ sii8620_set_infoframes(ctx, mode);
}
static void sii8620_disable_hpd(struct sii8620 *ctx)
@@ -1534,6 +1510,16 @@ static void sii8620_set_mode(struct sii8620 *ctx, enum sii8620_mode mode)
);
}
+static void sii8620_hpd_unplugged(struct sii8620 *ctx)
+{
+ sii8620_disable_hpd(ctx);
+ ctx->sink_type = SINK_NONE;
+ ctx->sink_detected = false;
+ ctx->feature_complete = false;
+ kfree(ctx->edid);
+ ctx->edid = NULL;
+}
+
static void sii8620_disconnect(struct sii8620 *ctx)
{
sii8620_disable_gen2_write_burst(ctx);
@@ -1561,7 +1547,7 @@ static void sii8620_disconnect(struct sii8620 *ctx)
REG_MHL_DP_CTL6, 0x2A,
REG_MHL_DP_CTL7, 0x03
);
- sii8620_disable_hpd(ctx);
+ sii8620_hpd_unplugged(ctx);
sii8620_write_seq_static(ctx,
REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
REG_MHL_COC_CTL1, 0x07,
@@ -1609,10 +1595,8 @@ static void sii8620_disconnect(struct sii8620 *ctx)
memset(ctx->xstat, 0, sizeof(ctx->xstat));
memset(ctx->devcap, 0, sizeof(ctx->devcap));
memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap));
+ ctx->devcap_read = false;
ctx->cbus_status = 0;
- ctx->sink_type = SINK_NONE;
- kfree(ctx->edid);
- ctx->edid = NULL;
sii8620_mt_cleanup(ctx);
}
@@ -1699,17 +1683,18 @@ static void sii8620_status_dcap_ready(struct sii8620 *ctx)
static void sii8620_status_changed_path(struct sii8620 *ctx)
{
- if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) {
- sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
- MHL_DST_LM_CLK_MODE_NORMAL
- | MHL_DST_LM_PATH_ENABLED);
- if (!sii8620_is_mhl3(ctx))
- sii8620_mt_read_devcap(ctx, false);
- sii8620_mt_set_cont(ctx, sii8620_sink_detected);
- } else {
- sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
- MHL_DST_LM_CLK_MODE_NORMAL);
- }
+ u8 link_mode;
+
+ if (ctx->use_packed_pixel)
+ link_mode = MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
+ else
+ link_mode = MHL_DST_LM_CLK_MODE_NORMAL;
+
+ if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
+ link_mode |= MHL_DST_LM_PATH_ENABLED;
+
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ link_mode);
}
static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
@@ -1722,9 +1707,14 @@ static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
- if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
+ if (ctx->stat[MHL_DST_CONNECTED_RDY] & st[MHL_DST_CONNECTED_RDY] &
+ MHL_DST_CONN_DCAP_RDY) {
sii8620_status_dcap_ready(ctx);
+ if (!sii8620_is_mhl3(ctx))
+ sii8620_mt_read_devcap(ctx, false);
+ }
+
if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
sii8620_status_changed_path(ctx);
}
@@ -1808,8 +1798,11 @@ static void sii8620_msc_mr_set_int(struct sii8620 *ctx)
}
if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ)
sii8620_send_features(ctx);
- if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE)
- sii8620_edid_read(ctx, 0);
+ if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) {
+ ctx->feature_complete = true;
+ if (ctx->edid)
+ sii8620_enable_hpd(ctx);
+ }
}
static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
@@ -1884,6 +1877,15 @@ static void sii8620_irq_msc(struct sii8620 *ctx)
if (stat & BIT_CBUS_MSC_MR_WRITE_STAT)
sii8620_msc_mr_write_stat(ctx);
+ if (stat & BIT_CBUS_HPD_CHG) {
+ if (ctx->cbus_status & BIT_CBUS_STATUS_CBUS_HPD) {
+ ctx->sink_detected = true;
+ sii8620_identify_sink(ctx);
+ } else {
+ sii8620_hpd_unplugged(ctx);
+ }
+ }
+
if (stat & BIT_CBUS_MSC_MR_SET_INT)
sii8620_msc_mr_set_int(ctx);
@@ -1931,14 +1933,6 @@ static void sii8620_irq_edid(struct sii8620 *ctx)
ctx->mt_state = MT_STATE_DONE;
}
-static void sii8620_scdt_high(struct sii8620 *ctx)
-{
- sii8620_write_seq_static(ctx,
- REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI,
- REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI,
- );
-}
-
static void sii8620_irq_scdt(struct sii8620 *ctx)
{
u8 stat = sii8620_readb(ctx, REG_INTR5);
@@ -1946,53 +1940,13 @@ static void sii8620_irq_scdt(struct sii8620 *ctx)
if (stat & BIT_INTR_SCDT_CHANGE) {
u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3);
- if (cstat & BIT_TMDS_CSTAT_P3_SCDT) {
- if (ctx->sink_type == SINK_HDMI)
- /* enable infoframe interrupt */
- sii8620_scdt_high(ctx);
- else
- sii8620_start_video(ctx);
- }
+ if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
+ sii8620_start_video(ctx);
}
sii8620_write(ctx, REG_INTR5, stat);
}
-static void sii8620_new_vsi(struct sii8620 *ctx)
-{
- u8 vsif[11];
-
- sii8620_write(ctx, REG_RX_HDMI_CTRL2,
- VAL_RX_HDMI_CTRL2_DEFVAL |
- BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI);
- sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif,
- ARRAY_SIZE(vsif));
-}
-
-static void sii8620_new_avi(struct sii8620 *ctx)
-{
- sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL);
- sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif,
- ARRAY_SIZE(ctx->avif));
-}
-
-static void sii8620_irq_infr(struct sii8620 *ctx)
-{
- u8 stat = sii8620_readb(ctx, REG_INTR8)
- & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI);
-
- sii8620_write(ctx, REG_INTR8, stat);
-
- if (stat & BIT_CEA_NEW_VSI)
- sii8620_new_vsi(ctx);
-
- if (stat & BIT_CEA_NEW_AVI)
- sii8620_new_avi(ctx);
-
- if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI))
- sii8620_start_video(ctx);
-}
-
static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret)
{
if (ret < 0)
@@ -2043,11 +1997,11 @@ static void sii8620_irq_ddc(struct sii8620 *ctx)
if (stat & BIT_DDC_CMD_DONE) {
sii8620_write(ctx, REG_INTR3_MASK, 0);
- if (sii8620_is_mhl3(ctx))
+ if (sii8620_is_mhl3(ctx) && !ctx->feature_complete)
sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE),
MHL_INT_RC_FEAT_REQ);
else
- sii8620_edid_read(ctx, 0);
+ sii8620_enable_hpd(ctx);
}
sii8620_write(ctx, REG_INTR3, stat);
}
@@ -2074,7 +2028,6 @@ static irqreturn_t sii8620_irq_thread(int irq, void *data)
{ BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
{ BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc },
{ BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
- { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
};
struct sii8620 *ctx = data;
u8 stats[LEN_FAST_INTR_STAT];
@@ -2112,7 +2065,6 @@ static void sii8620_cable_in(struct sii8620 *ctx)
dev_err(dev, "Error powering on, %d.\n", ret);
return;
}
- sii8620_hw_reset(ctx);
sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver));
ret = sii8620_clear_error(ctx);
@@ -2268,17 +2220,43 @@ static void sii8620_detach(struct drm_bridge *bridge)
rc_unregister_device(ctx->rc_dev);
}
+static int sii8620_is_packing_required(struct sii8620 *ctx,
+ const struct drm_display_mode *mode)
+{
+ int max_pclk, max_pclk_pp_mode;
+
+ if (sii8620_is_mhl3(ctx)) {
+ max_pclk = MHL3_MAX_PCLK;
+ max_pclk_pp_mode = MHL3_MAX_PCLK_PP_MODE;
+ } else {
+ max_pclk = MHL1_MAX_PCLK;
+ max_pclk_pp_mode = MHL1_MAX_PCLK_PP_MODE;
+ }
+
+ if (mode->clock < max_pclk)
+ return 0;
+ else if (mode->clock < max_pclk_pp_mode)
+ return 1;
+ else
+ return -1;
+}
+
static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
+ int pack_required = sii8620_is_packing_required(ctx, mode);
bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] &
MHL_DCAP_VID_LINK_PPIXEL;
- unsigned int max_pclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK :
- MHL1_MAX_LCLK;
- max_pclk /= can_pack ? 2 : 3;
- return (mode->clock > max_pclk) ? MODE_CLOCK_HIGH : MODE_OK;
+ switch (pack_required) {
+ case 0:
+ return MODE_OK;
+ case 1:
+ return (can_pack) ? MODE_OK : MODE_CLOCK_HIGH;
+ default:
+ return MODE_CLOCK_HIGH;
+ }
}
static bool sii8620_mode_fixup(struct drm_bridge *bridge,
@@ -2286,43 +2264,14 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge,
struct drm_display_mode *adjusted_mode)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
- int max_lclk;
- bool ret = true;
mutex_lock(&ctx->lock);
- max_lclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : MHL1_MAX_LCLK;
- if (max_lclk > 3 * adjusted_mode->clock) {
- ctx->use_packed_pixel = 0;
- goto end;
- }
- if ((ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) &&
- max_lclk > 2 * adjusted_mode->clock) {
- ctx->use_packed_pixel = 1;
- goto end;
- }
- ret = false;
-end:
- if (ret) {
- u8 vic = drm_match_cea_mode(adjusted_mode);
-
- if (!vic) {
- union hdmi_infoframe frm;
- u8 mhl_vic[] = { 0, 95, 94, 93, 98 };
-
- /* FIXME: We need the connector here */
- drm_hdmi_vendor_infoframe_from_display_mode(
- &frm.vendor.hdmi, NULL, adjusted_mode);
- vic = frm.vendor.hdmi.vic;
- if (vic >= ARRAY_SIZE(mhl_vic))
- vic = 0;
- vic = mhl_vic[vic];
- }
- ctx->video_code = vic;
- ctx->pixel_clock = adjusted_mode->clock;
- }
+ ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode);
+
mutex_unlock(&ctx->lock);
- return ret;
+
+ return true;
}
static const struct drm_bridge_funcs sii8620_bridge_funcs = {
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index b553a6f2ff0e..7af748ed1c58 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -369,13 +369,6 @@ EXPORT_SYMBOL(drm_dev_exit);
*/
void drm_dev_unplug(struct drm_device *dev)
{
- drm_dev_unregister(dev);
-
- mutex_lock(&drm_global_mutex);
- if (dev->open_count == 0)
- drm_dev_put(dev);
- mutex_unlock(&drm_global_mutex);
-
/*
* After synchronizing any critical read section is guaranteed to see
* the new value of ->unplugged, and any critical section which might
@@ -384,6 +377,13 @@ void drm_dev_unplug(struct drm_device *dev)
*/
dev->unplugged = true;
synchronize_srcu(&drm_unplug_srcu);
+
+ drm_dev_unregister(dev);
+
+ mutex_lock(&drm_global_mutex);
+ if (dev->open_count == 0)
+ drm_dev_put(dev);
+ mutex_unlock(&drm_global_mutex);
}
EXPORT_SYMBOL(drm_dev_unplug);
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index 50c73c0a20b9..d638c0fb3418 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -553,24 +553,13 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
/* Clone the lessor file to create a new file for us */
DRM_DEBUG_LEASE("Allocating lease file\n");
- path_get(&lessor_file->f_path);
- lessee_file = alloc_file(&lessor_file->f_path,
- lessor_file->f_mode,
- fops_get(lessor_file->f_inode->i_fop));
-
+ lessee_file = filp_clone_open(lessor_file);
if (IS_ERR(lessee_file)) {
ret = PTR_ERR(lessee_file);
goto out_lessee;
}
- /* Initialize the new file for DRM */
- DRM_DEBUG_LEASE("Initializing the file with %p\n", lessee_file->f_op->open);
- ret = lessee_file->f_op->open(lessee_file->f_inode, lessee_file);
- if (ret)
- goto out_lessee_file;
-
lessee_priv = lessee_file->private_data;
-
/* Change the file to a master one */
drm_master_put(&lessee_priv->master);
lessee_priv->master = lessee;
@@ -588,9 +577,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
return 0;
-out_lessee_file:
- fput(lessee_file);
-
out_lessee:
drm_master_put(&lessee);
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 1f8031e30f53..cdb10f885a4f 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -532,7 +532,7 @@ static void drm_property_free_blob(struct kref *kref)
drm_mode_object_unregister(blob->dev, &blob->base);
- kfree(blob);
+ kvfree(blob);
}
/**
@@ -559,7 +559,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
return ERR_PTR(-EINVAL);
- blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+ blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
if (!blob)
return ERR_PTR(-ENOMEM);
@@ -576,7 +576,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
true, drm_property_free_blob);
if (ret) {
- kfree(blob);
+ kvfree(blob);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index e5013a999147..540b59fb4103 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -631,8 +631,11 @@ static struct platform_driver etnaviv_platform_driver = {
},
};
+static struct platform_device *etnaviv_drm;
+
static int __init etnaviv_init(void)
{
+ struct platform_device *pdev;
int ret;
struct device_node *np;
@@ -644,7 +647,7 @@ static int __init etnaviv_init(void)
ret = platform_driver_register(&etnaviv_platform_driver);
if (ret != 0)
- platform_driver_unregister(&etnaviv_gpu_driver);
+ goto unregister_gpu_driver;
/*
* If the DT contains at least one available GPU device, instantiate
@@ -653,20 +656,33 @@ static int __init etnaviv_init(void)
for_each_compatible_node(np, NULL, "vivante,gc") {
if (!of_device_is_available(np))
continue;
-
- platform_device_register_simple("etnaviv", -1, NULL, 0);
+ pdev = platform_device_register_simple("etnaviv", -1,
+ NULL, 0);
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ of_node_put(np);
+ goto unregister_platform_driver;
+ }
+ etnaviv_drm = pdev;
of_node_put(np);
break;
}
+ return 0;
+
+unregister_platform_driver:
+ platform_driver_unregister(&etnaviv_platform_driver);
+unregister_gpu_driver:
+ platform_driver_unregister(&etnaviv_gpu_driver);
return ret;
}
module_init(etnaviv_init);
static void __exit etnaviv_exit(void)
{
- platform_driver_unregister(&etnaviv_gpu_driver);
+ platform_device_unregister(etnaviv_drm);
platform_driver_unregister(&etnaviv_platform_driver);
+ platform_driver_unregister(&etnaviv_gpu_driver);
}
module_exit(etnaviv_exit);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index dd430f0f8ff5..90f17ff7888e 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -131,6 +131,9 @@ struct etnaviv_gpu {
struct work_struct sync_point_work;
int sync_point_event;
+ /* hang detection */
+ u32 hangcheck_dma_addr;
+
void __iomem *mmio;
int irq;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index a74eb57af15b..50d6b88cb7aa 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -10,6 +10,7 @@
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_sched.h"
+#include "state.xml.h"
static int etnaviv_job_hang_limit = 0;
module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
@@ -85,6 +86,29 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
struct etnaviv_gpu *gpu = submit->gpu;
+ u32 dma_addr;
+ int change;
+
+ /*
+ * If the GPU managed to complete this jobs fence, the timout is
+ * spurious. Bail out.
+ */
+ if (fence_completed(gpu, submit->out_fence->seqno))
+ return;
+
+ /*
+ * If the GPU is still making forward progress on the front-end (which
+ * should never loop) we shift out the timeout to give it a chance to
+ * finish the job.
+ */
+ dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+ change = dma_addr - gpu->hangcheck_dma_addr;
+ if (change < 0 || change > 16) {
+ gpu->hangcheck_dma_addr = dma_addr;
+ schedule_delayed_work(&sched_job->work_tdr,
+ sched_job->sched->timeout);
+ return;
+ }
/* block scheduler */
kthread_park(gpu->sched.thread);
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 82c95c34447f..e868773ea509 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -265,7 +265,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
unsigned long val;
val = readl(ctx->addr + DECON_WINCONx(win));
- val &= ~WINCONx_BPPMODE_MASK;
+ val &= WINCONx_ENWIN_F;
switch (fb->format->format) {
case DRM_FORMAT_XRGB1555:
@@ -356,8 +356,8 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
writel(val, ctx->addr + DECON_VIDOSDxB(win));
}
- val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
- VIDOSD_Wx_ALPHA_B_F(0x0);
+ val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) |
+ VIDOSD_Wx_ALPHA_B_F(0xff);
writel(val, ctx->addr + DECON_VIDOSDxC(win));
val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index a81b4a5e24a7..ed3cc2989f93 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -420,7 +420,7 @@ err_mode_config_cleanup:
err_free_private:
kfree(private);
err_free_drm:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -444,7 +444,7 @@ static void exynos_drm_unbind(struct device *dev)
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops exynos_drm_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 7fcc1a7ab1a0..27b7d34d776c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -138,7 +138,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
err:
while (i--)
- drm_gem_object_unreference_unlocked(&exynos_gem[i]->base);
+ drm_gem_object_put_unlocked(&exynos_gem[i]->base);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 6127ef25acd6..e8d0670bb5f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -470,17 +470,18 @@ static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation)
static void fimc_set_window(struct fimc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
+ unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg, h1, h2, v1, v2;
/* cropped image */
h1 = buf->rect.x;
- h2 = buf->buf.width - buf->rect.w - buf->rect.x;
+ h2 = real_width - buf->rect.w - buf->rect.x;
v1 = buf->rect.y;
v2 = buf->buf.height - buf->rect.h - buf->rect.y;
DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h,
- buf->buf.width, buf->buf.height);
+ real_width, buf->buf.height);
DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2);
/*
@@ -503,12 +504,13 @@ static void fimc_set_window(struct fimc_context *ctx,
static void fimc_src_set_size(struct fimc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
+ unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg;
- DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
+ DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
/* original size */
- cfg = (EXYNOS_ORGISIZE_HORIZONTAL(buf->buf.width) |
+ cfg = (EXYNOS_ORGISIZE_HORIZONTAL(real_width) |
EXYNOS_ORGISIZE_VERTICAL(buf->buf.height));
fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
@@ -529,7 +531,7 @@ static void fimc_src_set_size(struct fimc_context *ctx,
* for now, we support only ITU601 8 bit mode
*/
cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
- EXYNOS_CISRCFMT_SOURCEHSIZE(buf->buf.width) |
+ EXYNOS_CISRCFMT_SOURCEHSIZE(real_width) |
EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height));
fimc_write(ctx, cfg, EXYNOS_CISRCFMT);
@@ -842,12 +844,13 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
static void fimc_dst_set_size(struct fimc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
+ unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg, cfg_ext;
- DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
+ DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
/* original size */
- cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(buf->buf.width) |
+ cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(real_width) |
EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height));
fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 6e1494fa71b4..bdf5a7655228 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -143,7 +143,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
/* drop reference from allocate - handle holds it now. */
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
@@ -186,7 +186,7 @@ unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
exynos_gem = to_exynos_gem(obj);
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return exynos_gem->size;
}
@@ -329,13 +329,13 @@ void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
return;
}
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
/*
* decrease obj->refcount one more time because we has already
* increased it at exynos_drm_gem_get_dma_addr().
*/
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
}
static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
@@ -383,7 +383,7 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
args->flags = exynos_gem->flags;
args->size = exynos_gem->size;
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 35ac66730563..7ba414b52faa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -492,21 +492,25 @@ static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt)
GSC_IN_CHROMA_ORDER_CRCB);
break;
case DRM_FORMAT_NV21:
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P);
+ break;
case DRM_FORMAT_NV61:
- cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
- GSC_IN_YUV420_2P);
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P);
break;
case DRM_FORMAT_YUV422:
cfg |= GSC_IN_YUV422_3P;
break;
case DRM_FORMAT_YUV420:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P);
+ break;
case DRM_FORMAT_YVU420:
- cfg |= GSC_IN_YUV420_3P;
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P);
break;
case DRM_FORMAT_NV12:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P);
+ break;
case DRM_FORMAT_NV16:
- cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
- GSC_IN_YUV420_2P);
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P);
break;
}
@@ -523,30 +527,30 @@ static void gsc_src_set_transf(struct gsc_context *ctx, unsigned int rotation)
switch (degree) {
case DRM_MODE_ROTATE_0:
- if (rotation & DRM_MODE_REFLECT_Y)
- cfg |= GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_X)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (rotation & DRM_MODE_REFLECT_Y)
cfg |= GSC_IN_ROT_YFLIP;
break;
case DRM_MODE_ROTATE_90:
cfg |= GSC_IN_ROT_90;
- if (rotation & DRM_MODE_REFLECT_Y)
- cfg |= GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_X)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (rotation & DRM_MODE_REFLECT_Y)
cfg |= GSC_IN_ROT_YFLIP;
break;
case DRM_MODE_ROTATE_180:
cfg |= GSC_IN_ROT_180;
- if (rotation & DRM_MODE_REFLECT_Y)
- cfg &= ~GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_X)
+ cfg &= ~GSC_IN_ROT_XFLIP;
+ if (rotation & DRM_MODE_REFLECT_Y)
cfg &= ~GSC_IN_ROT_YFLIP;
break;
case DRM_MODE_ROTATE_270:
cfg |= GSC_IN_ROT_270;
- if (rotation & DRM_MODE_REFLECT_Y)
- cfg &= ~GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_X)
+ cfg &= ~GSC_IN_ROT_XFLIP;
+ if (rotation & DRM_MODE_REFLECT_Y)
cfg &= ~GSC_IN_ROT_YFLIP;
break;
}
@@ -577,7 +581,7 @@ static void gsc_src_set_size(struct gsc_context *ctx,
cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
GSC_SRCIMG_WIDTH_MASK);
- cfg |= (GSC_SRCIMG_WIDTH(buf->buf.width) |
+ cfg |= (GSC_SRCIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
GSC_SRCIMG_HEIGHT(buf->buf.height));
gsc_write(cfg, GSC_SRCIMG_SIZE);
@@ -672,18 +676,25 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt)
GSC_OUT_CHROMA_ORDER_CRCB);
break;
case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV61:
cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
break;
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P);
+ break;
case DRM_FORMAT_YUV422:
+ cfg |= GSC_OUT_YUV422_3P;
+ break;
case DRM_FORMAT_YUV420:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P);
+ break;
case DRM_FORMAT_YVU420:
- cfg |= GSC_OUT_YUV420_3P;
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P);
break;
case DRM_FORMAT_NV12:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P);
+ break;
case DRM_FORMAT_NV16:
- cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
- GSC_OUT_YUV420_2P);
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P);
break;
}
@@ -868,7 +879,7 @@ static void gsc_dst_set_size(struct gsc_context *ctx,
/* original size */
cfg = gsc_read(GSC_DSTIMG_SIZE);
cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK);
- cfg |= GSC_DSTIMG_WIDTH(buf->buf.width) |
+ cfg |= GSC_DSTIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
GSC_DSTIMG_HEIGHT(buf->buf.height);
gsc_write(cfg, GSC_DSTIMG_SIZE);
@@ -1341,7 +1352,7 @@ static const struct drm_exynos_ipp_limit gsc_5420_limits[] = {
};
static const struct drm_exynos_ipp_limit gsc_5433_limits[] = {
- { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 2 }, .v = { 16, 8191, 2 }) },
+ { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 16 }, .v = { 16, 8191, 2 }) },
{ IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) },
{ IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) },
{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 },
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 26374e58c557..b435db8fc916 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -345,27 +345,6 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
int ret = 0;
int i;
- /* basic checks */
- if (buf->buf.width == 0 || buf->buf.height == 0)
- return -EINVAL;
- buf->format = drm_format_info(buf->buf.fourcc);
- for (i = 0; i < buf->format->num_planes; i++) {
- unsigned int width = (i == 0) ? buf->buf.width :
- DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
-
- if (buf->buf.pitch[i] == 0)
- buf->buf.pitch[i] = width * buf->format->cpp[i];
- if (buf->buf.pitch[i] < width * buf->format->cpp[i])
- return -EINVAL;
- if (!buf->buf.gem_id[i])
- return -ENOENT;
- }
-
- /* pitch for additional planes must match */
- if (buf->format->num_planes > 2 &&
- buf->buf.pitch[1] != buf->buf.pitch[2])
- return -EINVAL;
-
/* get GEM buffers and check their size */
for (i = 0; i < buf->format->num_planes; i++) {
unsigned int height = (i == 0) ? buf->buf.height :
@@ -428,7 +407,7 @@ enum drm_ipp_size_id {
IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX
};
-static const enum drm_ipp_size_id limit_id_fallback[IPP_LIMIT_MAX][4] = {
+static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = {
[IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
[IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
@@ -495,12 +474,13 @@ static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf,
enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA;
struct drm_ipp_limit l;
struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v;
+ int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
if (!limits)
return 0;
__get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l);
- if (!__size_limit_check(buf->buf.width, &l.h) ||
+ if (!__size_limit_check(real_width, &l.h) ||
!__size_limit_check(buf->buf.height, &l.v))
return -EINVAL;
@@ -560,10 +540,62 @@ static int exynos_drm_ipp_check_scale_limits(
return 0;
}
+static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
+ struct exynos_drm_ipp_buffer *buf,
+ struct exynos_drm_ipp_buffer *src,
+ struct exynos_drm_ipp_buffer *dst,
+ bool rotate, bool swap)
+{
+ const struct exynos_drm_ipp_formats *fmt;
+ int ret, i;
+
+ fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier,
+ buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE :
+ DRM_EXYNOS_IPP_FORMAT_DESTINATION);
+ if (!fmt) {
+ DRM_DEBUG_DRIVER("Task %pK: %s format not supported\n", task,
+ buf == src ? "src" : "dst");
+ return -EINVAL;
+ }
+
+ /* basic checks */
+ if (buf->buf.width == 0 || buf->buf.height == 0)
+ return -EINVAL;
+
+ buf->format = drm_format_info(buf->buf.fourcc);
+ for (i = 0; i < buf->format->num_planes; i++) {
+ unsigned int width = (i == 0) ? buf->buf.width :
+ DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
+
+ if (buf->buf.pitch[i] == 0)
+ buf->buf.pitch[i] = width * buf->format->cpp[i];
+ if (buf->buf.pitch[i] < width * buf->format->cpp[i])
+ return -EINVAL;
+ if (!buf->buf.gem_id[i])
+ return -ENOENT;
+ }
+
+ /* pitch for additional planes must match */
+ if (buf->format->num_planes > 2 &&
+ buf->buf.pitch[1] != buf->buf.pitch[2])
+ return -EINVAL;
+
+ /* check driver limits */
+ ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits,
+ fmt->num_limits,
+ rotate,
+ buf == dst ? swap : false);
+ if (ret)
+ return ret;
+ ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
+ fmt->limits,
+ fmt->num_limits, swap);
+ return ret;
+}
+
static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
{
struct exynos_drm_ipp *ipp = task->ipp;
- const struct exynos_drm_ipp_formats *src_fmt, *dst_fmt;
struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
unsigned int rotation = task->transform.rotation;
int ret = 0;
@@ -607,37 +639,11 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
return -EINVAL;
}
- src_fmt = __ipp_format_get(ipp, src->buf.fourcc, src->buf.modifier,
- DRM_EXYNOS_IPP_FORMAT_SOURCE);
- if (!src_fmt) {
- DRM_DEBUG_DRIVER("Task %pK: src format not supported\n", task);
- return -EINVAL;
- }
- ret = exynos_drm_ipp_check_size_limits(src, src_fmt->limits,
- src_fmt->num_limits,
- rotate, false);
- if (ret)
- return ret;
- ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
- src_fmt->limits,
- src_fmt->num_limits, swap);
+ ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap);
if (ret)
return ret;
- dst_fmt = __ipp_format_get(ipp, dst->buf.fourcc, dst->buf.modifier,
- DRM_EXYNOS_IPP_FORMAT_DESTINATION);
- if (!dst_fmt) {
- DRM_DEBUG_DRIVER("Task %pK: dst format not supported\n", task);
- return -EINVAL;
- }
- ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits,
- dst_fmt->num_limits,
- false, swap);
- if (ret)
- return ret;
- ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
- dst_fmt->limits,
- dst_fmt->num_limits, swap);
+ ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index 38a2a7f1204b..7098c6d35266 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -132,7 +132,7 @@ static void exynos_drm_plane_reset(struct drm_plane *plane)
if (plane->state) {
exynos_state = to_exynos_plane_state(plane->state);
if (exynos_state->base.fb)
- drm_framebuffer_unreference(exynos_state->base.fb);
+ drm_framebuffer_put(exynos_state->base.fb);
kfree(exynos_state);
plane->state = NULL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 1a76dd3d52e1..a820a68429b9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -168,9 +168,9 @@ static void rotator_dst_set_transf(struct rot_context *rot,
val &= ~ROT_CONTROL_FLIP_MASK;
if (rotation & DRM_MODE_REFLECT_X)
- val |= ROT_CONTROL_FLIP_HORIZONTAL;
- if (rotation & DRM_MODE_REFLECT_Y)
val |= ROT_CONTROL_FLIP_VERTICAL;
+ if (rotation & DRM_MODE_REFLECT_Y)
+ val |= ROT_CONTROL_FLIP_HORIZONTAL;
val &= ~ROT_CONTROL_ROT_MASK;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 91d4382343d0..0ddb6eec7b11 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -30,6 +30,7 @@
#define scaler_write(cfg, offset) writel(cfg, scaler->regs + (offset))
#define SCALER_MAX_CLK 4
#define SCALER_AUTOSUSPEND_DELAY 2000
+#define SCALER_RESET_WAIT_RETRIES 100
struct scaler_data {
const char *clk_name[SCALER_MAX_CLK];
@@ -51,9 +52,9 @@ struct scaler_context {
static u32 scaler_get_format(u32 drm_fmt)
{
switch (drm_fmt) {
- case DRM_FORMAT_NV21:
- return SCALER_YUV420_2P_UV;
case DRM_FORMAT_NV12:
+ return SCALER_YUV420_2P_UV;
+ case DRM_FORMAT_NV21:
return SCALER_YUV420_2P_VU;
case DRM_FORMAT_YUV420:
return SCALER_YUV420_3P;
@@ -63,15 +64,15 @@ static u32 scaler_get_format(u32 drm_fmt)
return SCALER_YUV422_1P_UYVY;
case DRM_FORMAT_YVYU:
return SCALER_YUV422_1P_YVYU;
- case DRM_FORMAT_NV61:
- return SCALER_YUV422_2P_UV;
case DRM_FORMAT_NV16:
+ return SCALER_YUV422_2P_UV;
+ case DRM_FORMAT_NV61:
return SCALER_YUV422_2P_VU;
case DRM_FORMAT_YUV422:
return SCALER_YUV422_3P;
- case DRM_FORMAT_NV42:
- return SCALER_YUV444_2P_UV;
case DRM_FORMAT_NV24:
+ return SCALER_YUV444_2P_UV;
+ case DRM_FORMAT_NV42:
return SCALER_YUV444_2P_VU;
case DRM_FORMAT_YUV444:
return SCALER_YUV444_3P;
@@ -100,6 +101,23 @@ static u32 scaler_get_format(u32 drm_fmt)
return 0;
}
+static inline int scaler_reset(struct scaler_context *scaler)
+{
+ int retry = SCALER_RESET_WAIT_RETRIES;
+
+ scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
+ do {
+ cpu_relax();
+ } while (retry > 1 &&
+ scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
+ do {
+ cpu_relax();
+ scaler_write(1, SCALER_INT_EN);
+ } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
+
+ return retry ? 0 : -EIO;
+}
+
static inline void scaler_enable_int(struct scaler_context *scaler)
{
u32 val;
@@ -354,9 +372,13 @@ static int scaler_commit(struct exynos_drm_ipp *ipp,
u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc);
struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect;
- scaler->task = task;
-
pm_runtime_get_sync(scaler->dev);
+ if (scaler_reset(scaler)) {
+ pm_runtime_put(scaler->dev);
+ return -EIO;
+ }
+
+ scaler->task = task;
scaler_set_src_fmt(scaler, src_fmt);
scaler_set_src_base(scaler, &task->src);
@@ -394,7 +416,11 @@ static inline void scaler_disable_int(struct scaler_context *scaler)
static inline u32 scaler_get_int_status(struct scaler_context *scaler)
{
- return scaler_read(SCALER_INT_STATUS);
+ u32 val = scaler_read(SCALER_INT_STATUS);
+
+ scaler_write(val, SCALER_INT_STATUS);
+
+ return val;
}
static inline int scaler_task_done(u32 val)
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
index 4704a993cbb7..16b39734115c 100644
--- a/drivers/gpu/drm/exynos/regs-gsc.h
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -138,6 +138,7 @@
#define GSC_OUT_YUV420_3P (3 << 4)
#define GSC_OUT_YUV422_1P (4 << 4)
#define GSC_OUT_YUV422_2P (5 << 4)
+#define GSC_OUT_YUV422_3P (6 << 4)
#define GSC_OUT_YUV444 (7 << 4)
#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
#define GSC_OUT_TILE_C_16x8 (0 << 2)
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index b51c05d03f14..7f562410f9cf 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -862,6 +862,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
{
struct intel_vgpu *vgpu = s->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
+ u32 ctx_sr_ctl;
if (offset + 4 > gvt->device_info.mmio_size) {
gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
@@ -894,6 +895,28 @@ static int cmd_reg_handler(struct parser_exec_state *s,
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
}
+ /* TODO
+ * Right now only scan LRI command on KBL and in inhibit context.
+ * It's good enough to support initializing mmio by lri command in
+ * vgpu inhibit context on KBL.
+ */
+ if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
+ intel_gvt_mmio_is_in_ctx(gvt, offset) &&
+ !strncmp(cmd, "lri", 3)) {
+ intel_gvt_hypervisor_read_gpa(s->vgpu,
+ s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
+ /* check inhibit context */
+ if (ctx_sr_ctl & 1) {
+ u32 data = cmd_val(s, index + 1);
+
+ if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset))
+ intel_vgpu_mask_mmio_write(vgpu,
+ offset, &data, 4);
+ else
+ vgpu_vreg(vgpu, offset) = data;
+ }
+ }
+
/* TODO: Update the global mask if this MMIO is a masked-MMIO */
intel_gvt_mmio_set_cmd_accessed(gvt, offset);
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 6d8180e8d1e2..4b072ade8c38 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -196,7 +196,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -216,7 +216,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
(PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -236,7 +236,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
(PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 23296547da95..4efec8fa6c1d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1592,6 +1592,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
vgpu_free_mm(mm);
return ERR_PTR(-ENOMEM);
}
+ mm->ggtt_mm.last_partial_off = -1UL;
return mm;
}
@@ -1616,6 +1617,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
invalidate_ppgtt_mm(mm);
} else {
vfree(mm->ggtt_mm.virtual_ggtt);
+ mm->ggtt_mm.last_partial_off = -1UL;
}
vgpu_free_mm(mm);
@@ -1868,6 +1870,62 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
bytes);
+ /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
+ * write, we assume the two 4 bytes writes are consecutive.
+ * Otherwise, we abort and report error
+ */
+ if (bytes < info->gtt_entry_size) {
+ if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
+ /* the first partial part*/
+ ggtt_mm->ggtt_mm.last_partial_off = off;
+ ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+ return 0;
+ } else if ((g_gtt_index ==
+ (ggtt_mm->ggtt_mm.last_partial_off >>
+ info->gtt_entry_size_shift)) &&
+ (off != ggtt_mm->ggtt_mm.last_partial_off)) {
+ /* the second partial part */
+
+ int last_off = ggtt_mm->ggtt_mm.last_partial_off &
+ (info->gtt_entry_size - 1);
+
+ memcpy((void *)&e.val64 + last_off,
+ (void *)&ggtt_mm->ggtt_mm.last_partial_data +
+ last_off, bytes);
+
+ ggtt_mm->ggtt_mm.last_partial_off = -1UL;
+ } else {
+ int last_offset;
+
+ gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
+ ggtt_mm->ggtt_mm.last_partial_off, off,
+ bytes, info->gtt_entry_size);
+
+ /* set host ggtt entry to scratch page and clear
+ * virtual ggtt entry as not present for last
+ * partially write offset
+ */
+ last_offset = ggtt_mm->ggtt_mm.last_partial_off &
+ (~(info->gtt_entry_size - 1));
+
+ ggtt_get_host_entry(ggtt_mm, &m, last_offset);
+ ggtt_invalidate_pte(vgpu, &m);
+ ops->set_pfn(&m, gvt->gtt.scratch_mfn);
+ ops->clear_present(&m);
+ ggtt_set_host_entry(ggtt_mm, &m, last_offset);
+ ggtt_invalidate(gvt->dev_priv);
+
+ ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
+ ops->clear_present(&e);
+ ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
+
+ ggtt_mm->ggtt_mm.last_partial_off = off;
+ ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+
+ return 0;
+ }
+ }
+
if (ops->test_present(&e)) {
gfn = ops->get_pfn(&e);
m = e;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index 3792f2b7f4ff..97e62647418a 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -150,6 +150,8 @@ struct intel_vgpu_mm {
} ppgtt_mm;
struct {
void *virtual_ggtt;
+ unsigned long last_partial_off;
+ u64 last_partial_data;
} ggtt_mm;
};
};
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 05d15a095310..858967daf04b 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -268,6 +268,8 @@ struct intel_gvt_mmio {
#define F_CMD_ACCESSED (1 << 5)
/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
+/* This reg is saved/restored in context */
+#define F_IN_CTX (1 << 7)
struct gvt_mmio_block *mmio_block;
unsigned int num_mmio_block;
@@ -639,6 +641,33 @@ static inline bool intel_gvt_mmio_has_mode_mask(
return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
}
+/**
+ * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if a MMIO has a in-context mask, false if it isn't.
+ *
+ */
+static inline bool intel_gvt_mmio_is_in_ctx(
+ struct intel_gvt *gvt, unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
+}
+
+/**
+ * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline void intel_gvt_mmio_set_in_ctx(
+ struct intel_gvt *gvt, unsigned int offset)
+{
+ gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
+}
+
int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
int intel_gvt_debugfs_init(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index bcbc47a88a70..8f1caacdc78a 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -3046,6 +3046,30 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
}
/**
+ * intel_vgpu_mask_mmio_write - write mask register
+ * @vgpu: a vGPU
+ * @offset: access offset
+ * @p_data: write data buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 mask, old_vreg;
+
+ old_vreg = vgpu_vreg(vgpu, offset);
+ write_vreg(vgpu, offset, p_data, bytes);
+ mask = vgpu_vreg(vgpu, offset) >> 16;
+ vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
+ (vgpu_vreg(vgpu, offset) & mask);
+
+ return 0;
+}
+
+/**
* intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
* force-nopriv register
*
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 71b620875943..dac8c6401e26 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -98,4 +98,6 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
void *pdata, unsigned int bytes, bool is_read);
+int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index 0f949554d118..5ca9caf7552a 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -581,7 +581,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
- if (mmio->in_context)
+ if (mmio->in_context) {
gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
+ intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
+ }
}
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 34c125e2d90c..71e1aa54f774 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -340,14 +340,21 @@ struct drm_i915_file_private {
unsigned int bsd_engine;
-/* Client can have a maximum of 3 contexts banned before
- * it is denied of creating new contexts. As one context
- * ban needs 4 consecutive hangs, and more if there is
- * progress in between, this is a last resort stop gap measure
- * to limit the badly behaving clients access to gpu.
+/*
+ * Every context ban increments per client ban score. Also
+ * hangs in short succession increments ban score. If ban threshold
+ * is reached, client is considered banned and submitting more work
+ * will fail. This is a stop gap measure to limit the badly behaving
+ * clients access to gpu. Note that unbannable contexts never increment
+ * the client ban score.
*/
-#define I915_MAX_CLIENT_CONTEXT_BANS 3
- atomic_t context_bans;
+#define I915_CLIENT_SCORE_HANG_FAST 1
+#define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
+#define I915_CLIENT_SCORE_CONTEXT_BAN 3
+#define I915_CLIENT_SCORE_BANNED 9
+ /** ban_score: Accumulated score of all ctx bans and fast hangs. */
+ atomic_t ban_score;
+ unsigned long hang_timestamp;
};
/* Interface history:
@@ -645,6 +652,7 @@ enum intel_sbi_destination {
#define QUIRK_BACKLIGHT_PRESENT (1<<3)
#define QUIRK_PIN_SWIZZLED_PAGES (1<<5)
#define QUIRK_INCREASE_T12_DELAY (1<<6)
+#define QUIRK_INCREASE_DDI_DISABLED_TIME (1<<7)
struct intel_fbdev;
struct intel_fbc_work;
@@ -2238,9 +2246,6 @@ static inline struct scatterlist *____sg_next(struct scatterlist *sg)
**/
static inline struct scatterlist *__sg_next(struct scatterlist *sg)
{
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
return sg_is_last(sg) ? NULL : ____sg_next(sg);
}
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3704f4c0c2c9..17c5097721e8 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2002,7 +2002,6 @@ int i915_gem_fault(struct vm_fault *vmf)
bool write = !!(vmf->flags & FAULT_FLAG_WRITE);
struct i915_vma *vma;
pgoff_t page_offset;
- unsigned int flags;
int ret;
/* We don't use vmf->pgoff since that has the fake offset */
@@ -2038,27 +2037,34 @@ int i915_gem_fault(struct vm_fault *vmf)
goto err_unlock;
}
- /* If the object is smaller than a couple of partial vma, it is
- * not worth only creating a single partial vma - we may as well
- * clear enough space for the full object.
- */
- flags = PIN_MAPPABLE;
- if (obj->base.size > 2 * MIN_CHUNK_PAGES << PAGE_SHIFT)
- flags |= PIN_NONBLOCK | PIN_NONFAULT;
/* Now pin it into the GTT as needed */
- vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, flags);
+ vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
+ PIN_MAPPABLE |
+ PIN_NONBLOCK |
+ PIN_NONFAULT);
if (IS_ERR(vma)) {
/* Use a partial view if it is bigger than available space */
struct i915_ggtt_view view =
compute_partial_view(obj, page_offset, MIN_CHUNK_PAGES);
+ unsigned int flags;
- /* Userspace is now writing through an untracked VMA, abandon
+ flags = PIN_MAPPABLE;
+ if (view.type == I915_GGTT_VIEW_NORMAL)
+ flags |= PIN_NONBLOCK; /* avoid warnings for pinned */
+
+ /*
+ * Userspace is now writing through an untracked VMA, abandon
* all hope that the hardware is able to track future writes.
*/
obj->frontbuffer_ggtt_origin = ORIGIN_CPU;
- vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, PIN_MAPPABLE);
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
+ if (IS_ERR(vma) && !view.type) {
+ flags = PIN_MAPPABLE;
+ view.type = I915_GGTT_VIEW_PARTIAL;
+ vma = i915_gem_object_ggtt_pin(obj, &view, 0, 0, flags);
+ }
}
if (IS_ERR(vma)) {
ret = PTR_ERR(vma);
@@ -2933,32 +2939,54 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
return 0;
}
+static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv,
+ const struct i915_gem_context *ctx)
+{
+ unsigned int score;
+ unsigned long prev_hang;
+
+ if (i915_gem_context_is_banned(ctx))
+ score = I915_CLIENT_SCORE_CONTEXT_BAN;
+ else
+ score = 0;
+
+ prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
+ if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
+ score += I915_CLIENT_SCORE_HANG_FAST;
+
+ if (score) {
+ atomic_add(score, &file_priv->ban_score);
+
+ DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
+ ctx->name, score,
+ atomic_read(&file_priv->ban_score));
+ }
+}
+
static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
{
- bool banned;
+ unsigned int score;
+ bool banned, bannable;
atomic_inc(&ctx->guilty_count);
- banned = false;
- if (i915_gem_context_is_bannable(ctx)) {
- unsigned int score;
+ bannable = i915_gem_context_is_bannable(ctx);
+ score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
+ banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
- score = atomic_add_return(CONTEXT_SCORE_GUILTY,
- &ctx->ban_score);
- banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
+ DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n",
+ ctx->name, atomic_read(&ctx->guilty_count),
+ score, yesno(banned && bannable));
- DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
- ctx->name, score, yesno(banned));
- }
- if (!banned)
+ /* Cool contexts don't accumulate client ban score */
+ if (!bannable)
return;
- i915_gem_context_set_banned(ctx);
- if (!IS_ERR_OR_NULL(ctx->file_priv)) {
- atomic_inc(&ctx->file_priv->context_bans);
- DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
- ctx->name, atomic_read(&ctx->file_priv->context_bans));
- }
+ if (banned)
+ i915_gem_context_set_banned(ctx);
+
+ if (!IS_ERR_OR_NULL(ctx->file_priv))
+ i915_gem_client_mark_guilty(ctx->file_priv, ctx);
}
static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
@@ -5736,6 +5764,7 @@ int i915_gem_open(struct drm_i915_private *i915, struct drm_file *file)
INIT_LIST_HEAD(&file_priv->mm.request_list);
file_priv->bsd_engine = -1;
+ file_priv->hang_timestamp = jiffies;
ret = i915_gem_context_open(i915, file);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 33f8a4b3c981..060335d3d9e0 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -652,7 +652,7 @@ int i915_gem_switch_to_kernel_context(struct drm_i915_private *dev_priv)
static bool client_is_banned(struct drm_i915_file_private *file_priv)
{
- return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS;
+ return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f627a8c47c58..22df17c8ca9b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -489,7 +489,9 @@ eb_validate_vma(struct i915_execbuffer *eb,
}
static int
-eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
+eb_add_vma(struct i915_execbuffer *eb,
+ unsigned int i, unsigned batch_idx,
+ struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
int err;
@@ -522,6 +524,24 @@ eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
eb->flags[i] = entry->flags;
vma->exec_flags = &eb->flags[i];
+ /*
+ * SNA is doing fancy tricks with compressing batch buffers, which leads
+ * to negative relocation deltas. Usually that works out ok since the
+ * relocate address is still positive, except when the batch is placed
+ * very low in the GTT. Ensure this doesn't happen.
+ *
+ * Note that actual hangs have only been observed on gen7, but for
+ * paranoia do it everywhere.
+ */
+ if (i == batch_idx) {
+ if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
+ eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
+ if (eb->reloc_cache.has_fence)
+ eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
+
+ eb->batch = vma;
+ }
+
err = 0;
if (eb_pin_vma(eb, entry, vma)) {
if (entry->offset != vma->node.start) {
@@ -716,7 +736,7 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
{
struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
struct drm_i915_gem_object *obj;
- unsigned int i;
+ unsigned int i, batch;
int err;
if (unlikely(i915_gem_context_is_closed(eb->ctx)))
@@ -728,6 +748,8 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
INIT_LIST_HEAD(&eb->relocs);
INIT_LIST_HEAD(&eb->unbound);
+ batch = eb_batch_index(eb);
+
for (i = 0; i < eb->buffer_count; i++) {
u32 handle = eb->exec[i].handle;
struct i915_lut_handle *lut;
@@ -770,33 +792,16 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
lut->handle = handle;
add_vma:
- err = eb_add_vma(eb, i, vma);
+ err = eb_add_vma(eb, i, batch, vma);
if (unlikely(err))
goto err_vma;
GEM_BUG_ON(vma != eb->vma[i]);
GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
+ eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
}
- /* take note of the batch buffer before we might reorder the lists */
- i = eb_batch_index(eb);
- eb->batch = eb->vma[i];
- GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]);
-
- /*
- * SNA is doing fancy tricks with compressing batch buffers, which leads
- * to negative relocation deltas. Usually that works out ok since the
- * relocate address is still positive, except when the batch is placed
- * very low in the GTT. Ensure this doesn't happen.
- *
- * Note that actual hangs have only been observed on gen7, but for
- * paranoia do it everywhere.
- */
- if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
- eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
- if (eb->reloc_cache.has_fence)
- eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
-
eb->args->flags |= __EXEC_VALIDATED;
return eb_reserve(eb);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f9bc3aaa90d0..c16cb025755e 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1893,9 +1893,17 @@ static void i9xx_pipestat_irq_ack(struct drm_i915_private *dev_priv,
/*
* Clear the PIPE*STAT regs before the IIR
+ *
+ * Toggle the enable bits to make sure we get an
+ * edge in the ISR pipe event bit if we don't clear
+ * all the enabled status bits. Otherwise the edge
+ * triggered IIR on i965/g4x wouldn't notice that
+ * an interrupt is still pending.
*/
- if (pipe_stats[pipe])
- I915_WRITE(reg, enable_mask | pipe_stats[pipe]);
+ if (pipe_stats[pipe]) {
+ I915_WRITE(reg, pipe_stats[pipe]);
+ I915_WRITE(reg, enable_mask);
+ }
}
spin_unlock(&dev_priv->irq_lock);
}
@@ -1990,10 +1998,38 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
{
- u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
+ u32 hotplug_status = 0, hotplug_status_mask;
+ int i;
- if (hotplug_status)
+ if (IS_G4X(dev_priv) ||
+ IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ hotplug_status_mask = HOTPLUG_INT_STATUS_G4X |
+ DP_AUX_CHANNEL_MASK_INT_STATUS_G4X;
+ else
+ hotplug_status_mask = HOTPLUG_INT_STATUS_I915;
+
+ /*
+ * We absolutely have to clear all the pending interrupt
+ * bits in PORT_HOTPLUG_STAT. Otherwise the ISR port
+ * interrupt bit won't have an edge, and the i965/g4x
+ * edge triggered IIR will not notice that an interrupt
+ * is still pending. We can't use PORT_HOTPLUG_EN to
+ * guarantee the edge as the act of toggling the enable
+ * bits can itself generate a new hotplug interrupt :(
+ */
+ for (i = 0; i < 10; i++) {
+ u32 tmp = I915_READ(PORT_HOTPLUG_STAT) & hotplug_status_mask;
+
+ if (tmp == 0)
+ return hotplug_status;
+
+ hotplug_status |= tmp;
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
+ }
+
+ WARN_ONCE(1,
+ "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
+ I915_READ(PORT_HOTPLUG_STAT));
return hotplug_status;
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f11bb213ec07..7720569f2024 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2425,12 +2425,17 @@ enum i915_power_well_id {
#define _3D_CHICKEN _MMIO(0x2084)
#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
#define _3D_CHICKEN2 _MMIO(0x208c)
+
+#define FF_SLICE_CHICKEN _MMIO(0x2088)
+#define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1)
+
/* Disables pipelining of read flushes past the SF-WIZ interface.
* Required on all Ironlake steppings according to the B-Spec, but the
* particular danger of not doing so is not specified.
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 _MMIO(0x2090)
+#define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12)
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 9324d476e0a7..0531c01c3604 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -109,7 +109,7 @@ vma_create(struct drm_i915_gem_object *obj,
obj->base.size >> PAGE_SHIFT));
vma->size = view->partial.size;
vma->size <<= PAGE_SHIFT;
- GEM_BUG_ON(vma->size >= obj->base.size);
+ GEM_BUG_ON(vma->size > obj->base.size);
} else if (view->type == I915_GGTT_VIEW_ROTATED) {
vma->size = intel_rotation_info_size(&view->rotated);
vma->size <<= PAGE_SHIFT;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index de0e22322c76..072b326d5ee0 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -304,6 +304,9 @@ intel_crt_mode_valid(struct drm_connector *connector,
int max_dotclk = dev_priv->max_dotclk_freq;
int max_clock;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
@@ -337,6 +340,12 @@ static bool intel_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
return true;
}
@@ -344,6 +353,12 @@ static bool pch_crt_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
pipe_config->has_pch_encoder = true;
return true;
@@ -354,6 +369,11 @@ static bool hsw_crt_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
pipe_config->has_pch_encoder = true;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index f4a8598a2d39..fed26d6e4e27 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1782,15 +1782,24 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
-void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder)
+void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
i915_reg_t reg = TRANS_DDI_FUNC_CTL(cpu_transcoder);
uint32_t val = I915_READ(reg);
val &= ~(TRANS_DDI_FUNC_ENABLE | TRANS_DDI_PORT_MASK | TRANS_DDI_DP_VC_PAYLOAD_ALLOC);
val |= TRANS_DDI_PORT_NONE;
I915_WRITE(reg, val);
+
+ if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
+ DRM_DEBUG_KMS("Quirk Increase DDI disabled time\n");
+ /* Quirk time at 100ms for reliable operation */
+ msleep(100);
+ }
}
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dee3a8e659f1..dec0d60921bf 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -5809,7 +5809,7 @@ static void haswell_crtc_disable(struct intel_crtc_state *old_crtc_state,
intel_ddi_set_vc_payload_alloc(intel_crtc->config, false);
if (!transcoder_is_dsi(cpu_transcoder))
- intel_ddi_disable_transcoder_func(dev_priv, cpu_transcoder);
+ intel_ddi_disable_transcoder_func(old_crtc_state);
if (INTEL_GEN(dev_priv) >= 9)
skylake_scaler_disable(intel_crtc);
@@ -14469,12 +14469,22 @@ static enum drm_mode_status
intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
+ /*
+ * Can't reject DBLSCAN here because Xorg ddxen can add piles
+ * of DBLSCAN modes to the output's mode list when they detect
+ * the scaling mode property on the connector. And they don't
+ * ask the kernel to validate those modes in any way until
+ * modeset time at which point the client gets a protocol error.
+ * So in order to not upset those clients we silently ignore the
+ * DBLSCAN flag on such connectors. For other connectors we will
+ * reject modes with the DBLSCAN flag in encoder->compute_config().
+ * And we always reject DBLSCAN modes in connector->mode_valid()
+ * as we never want such modes on the connector's mode list.
+ */
+
if (mode->vscan > 1)
return MODE_NO_VSCAN;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
if (mode->flags & DRM_MODE_FLAG_HSKEW)
return MODE_H_ILLEGAL;
@@ -14636,6 +14646,18 @@ static void quirk_increase_t12_delay(struct drm_device *dev)
DRM_INFO("Applying T12 delay quirk\n");
}
+/*
+ * GeminiLake NUC HDMI outputs require additional off time
+ * this allows the onboard retimer to correctly sync to signal
+ */
+static void quirk_increase_ddi_disabled_time(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+
+ dev_priv->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
+ DRM_INFO("Applying Increase DDI Disabled quirk\n");
+}
+
struct intel_quirk {
int device;
int subsystem_vendor;
@@ -14722,6 +14744,13 @@ static struct intel_quirk intel_quirks[] = {
/* Toshiba Satellite P50-C-18C */
{ 0x191B, 0x1179, 0xF840, quirk_increase_t12_delay },
+
+ /* GeminiLake NUC */
+ { 0x3185, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x8086, 0x2072, quirk_increase_ddi_disabled_time },
+ /* ASRock ITX*/
+ { 0x3185, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
+ { 0x3184, 0x1849, 0x2212, quirk_increase_ddi_disabled_time },
};
static void intel_init_quirks(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8320f0e8e3be..16faea30114a 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -420,6 +420,9 @@ intel_dp_mode_valid(struct drm_connector *connector,
int max_rate, mode_rate, max_lanes, max_link_clock;
int max_dotclk;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
@@ -1862,7 +1865,10 @@ intel_dp_compute_config(struct intel_encoder *encoder,
conn_state->scaling_mode);
}
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
+ if (HAS_GMCH_DISPLAY(dev_priv) &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return false;
@@ -2784,16 +2790,6 @@ static void g4x_disable_dp(struct intel_encoder *encoder,
const struct drm_connector_state *old_conn_state)
{
intel_disable_dp(encoder, old_crtc_state, old_conn_state);
-
- /* disable the port before the pipe on g4x */
- intel_dp_link_down(encoder, old_crtc_state);
-}
-
-static void ilk_disable_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- intel_disable_dp(encoder, old_crtc_state, old_conn_state);
}
static void vlv_disable_dp(struct intel_encoder *encoder,
@@ -2807,13 +2803,19 @@ static void vlv_disable_dp(struct intel_encoder *encoder,
intel_disable_dp(encoder, old_crtc_state, old_conn_state);
}
-static void ilk_post_disable_dp(struct intel_encoder *encoder,
+static void g4x_post_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = encoder->port;
+ /*
+ * Bspec does not list a specific disable sequence for g4x DP.
+ * Follow the ilk+ sequence (disable pipe before the port) for
+ * g4x DP as it does not suffer from underruns like the normal
+ * g4x modeset sequence (disable pipe after the port).
+ */
intel_dp_link_down(encoder, old_crtc_state);
/* Only ilk+ has port A */
@@ -6337,7 +6339,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+ if (!HAS_GMCH_DISPLAY(dev_priv))
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
@@ -6436,15 +6438,11 @@ bool intel_dp_init(struct drm_i915_private *dev_priv,
intel_encoder->enable = vlv_enable_dp;
intel_encoder->disable = vlv_disable_dp;
intel_encoder->post_disable = vlv_post_disable_dp;
- } else if (INTEL_GEN(dev_priv) >= 5) {
- intel_encoder->pre_enable = g4x_pre_enable_dp;
- intel_encoder->enable = g4x_enable_dp;
- intel_encoder->disable = ilk_disable_dp;
- intel_encoder->post_disable = ilk_post_disable_dp;
} else {
intel_encoder->pre_enable = g4x_pre_enable_dp;
intel_encoder->enable = g4x_enable_dp;
intel_encoder->disable = g4x_disable_dp;
+ intel_encoder->post_disable = g4x_post_disable_dp;
}
intel_dig_port->dp.output_reg = output_reg;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 9e6956c08688..5890500a3a8b 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -48,6 +48,9 @@ static bool intel_dp_mst_compute_config(struct intel_encoder *encoder,
bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_LIMITED_M_N);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
pipe_config->has_pch_encoder = false;
bpp = 24;
if (intel_dp->compliance.test_data.bpc) {
@@ -366,6 +369,9 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
if (!intel_dp)
return MODE_ERROR;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 0361130500a6..b8eefbffc77d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1388,8 +1388,7 @@ void hsw_fdi_link_train(struct intel_crtc *crtc,
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port);
bool intel_ddi_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe);
void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state);
-void intel_ddi_disable_transcoder_func(struct drm_i915_private *dev_priv,
- enum transcoder cpu_transcoder);
+void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state);
void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state);
void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state);
struct intel_encoder *
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index cf39ca90d887..f349b3920199 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -326,6 +326,9 @@ static bool intel_dsi_compute_config(struct intel_encoder *encoder,
conn_state->scaling_mode);
}
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
@@ -1266,6 +1269,9 @@ intel_dsi_mode_valid(struct drm_connector *connector,
DRM_DEBUG_KMS("\n");
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
if (fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a70d767313aa..61d908e0df0e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -219,6 +219,9 @@ intel_dvo_mode_valid(struct drm_connector *connector,
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
int target_clock = mode->clock;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
/* XXX: Validate clock range */
if (fixed_mode) {
@@ -254,6 +257,9 @@ static bool intel_dvo_compute_config(struct intel_encoder *encoder,
if (fixed_mode)
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ee929f31f7db..d8cb53ef4351 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1557,6 +1557,9 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
bool force_dvi =
READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
clock = mode->clock;
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
@@ -1677,6 +1680,9 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
int desired_bpp;
bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
if (pipe_config->has_hdmi_sink)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 15434cad5430..7c4c8fb1dae4 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1545,11 +1545,21 @@ static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
batch = gen8_emit_flush_coherentl3_wa(engine, batch);
+ *batch++ = MI_LOAD_REGISTER_IMM(3);
+
/* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
- *batch++ = MI_LOAD_REGISTER_IMM(1);
*batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
*batch++ = _MASKED_BIT_DISABLE(
GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
+
+ /* BSpec: 11391 */
+ *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN);
+ *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX);
+
+ /* BSpec: 11299 */
+ *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3);
+ *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX);
+
*batch++ = MI_NOOP;
/* WaClearSlmSpaceAtContextSwitch:kbl */
@@ -2641,10 +2651,8 @@ static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
context_size += LRC_HEADER_PAGES * PAGE_SIZE;
ctx_obj = i915_gem_object_create(ctx->i915, context_size);
- if (IS_ERR(ctx_obj)) {
- ret = PTR_ERR(ctx_obj);
- goto error_deref_obj;
- }
+ if (IS_ERR(ctx_obj))
+ return PTR_ERR(ctx_obj);
vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
if (IS_ERR(vma)) {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index d278f24ba6ae..48f618dc9abb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -380,6 +380,8 @@ intel_lvds_mode_valid(struct drm_connector *connector,
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > fixed_mode->vdisplay)
@@ -429,6 +431,9 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder,
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
if (HAS_PCH_SPLIT(dev_priv)) {
pipe_config->has_pch_encoder = true;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 25005023c243..26975df4e593 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1160,6 +1160,9 @@ static bool intel_sdvo_compute_config(struct intel_encoder *encoder,
adjusted_mode);
}
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
/*
* Make the CRTC code factor in the SDVO pixel multiplier. The
* SDVO device will factor out the multiplier during mode_set.
@@ -1621,6 +1624,9 @@ intel_sdvo_mode_valid(struct drm_connector *connector,
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
if (intel_sdvo->pixel_clock_min > mode->clock)
return MODE_CLOCK_LOW;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 885fc3809f7f..b55b5c157e38 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -850,6 +850,9 @@ intel_tv_mode_valid(struct drm_connector *connector,
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
if (mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
@@ -877,16 +880,21 @@ intel_tv_compute_config(struct intel_encoder *encoder,
struct drm_connector_state *conn_state)
{
const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
if (!tv_mode)
return false;
- pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
+ adjusted_mode->crtc_clock = tv_mode->clock;
DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
/* TV has it's own notion of sync and other mode flags, so clear them. */
- pipe_config->base.adjusted_mode.flags = 0;
+ adjusted_mode->flags = 0;
/*
* FIXME: We don't check whether the input mode is actually what we want
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 56dd7a9a8e25..dd5312b02a8d 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -612,6 +612,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(imx_ldb->regmap);
}
+ /* disable LDB by resetting the control register to POR default */
+ regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
+
imx_ldb->dev = dev;
if (of_id)
@@ -652,14 +655,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
if (ret || i < 0 || i > 1)
return -EINVAL;
+ if (!of_device_is_available(child))
+ continue;
+
if (dual && i > 0) {
dev_warn(dev, "dual-channel mode, ignoring second output\n");
continue;
}
- if (!of_device_is_available(child))
- continue;
-
channel = &imx_ldb->channel[i];
channel->ldb = imx_ldb;
channel->chno = i;
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 32b1a6cdecfc..d3443125e661 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -197,8 +197,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
priv->io_base = regs;
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "hhi");
- if (!res)
- return -EINVAL;
+ if (!res) {
+ ret = -EINVAL;
+ goto free_drm;
+ }
/* Simply ioremap since it may be a shared register zone */
regs = devm_ioremap(dev, res->start, resource_size(res));
if (!regs) {
@@ -215,8 +217,10 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
}
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dmc");
- if (!res)
- return -EINVAL;
+ if (!res) {
+ ret = -EINVAL;
+ goto free_drm;
+ }
/* Simply ioremap since it may be a shared register zone */
regs = devm_ioremap(dev, res->start, resource_size(res));
if (!regs) {
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 501d2d290e9c..70dce544984e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -55,6 +55,9 @@ nv04_display_create(struct drm_device *dev)
nouveau_display(dev)->init = nv04_display_init;
nouveau_display(dev)->fini = nv04_display_fini;
+ /* Pre-nv50 doesn't support atomic, so don't expose the ioctls */
+ dev->driver->driver_features &= ~DRIVER_ATOMIC;
+
nouveau_hw_save_vga_fonts(dev, 1);
nv04_crtc_create(dev, 0);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index 291c08117ab6..397143b639c6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -132,7 +132,7 @@ curs507a_new_(const struct nv50_wimm_func *func, struct nouveau_drm *drm,
nvif_object_map(&wndw->wimm.base.user, NULL, 0);
wndw->immd = func;
- wndw->ctxdma.parent = &disp->core->chan.base.user;
+ wndw->ctxdma.parent = NULL;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index b83465ae7c1b..9bae4db84cfb 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1585,8 +1585,9 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
*****************************************************************************/
static void
-nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock)
+nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
{
+ struct nouveau_drm *drm = nouveau_drm(state->dev);
struct nv50_disp *disp = nv50_disp(drm->dev);
struct nv50_core *core = disp->core;
struct nv50_mstm *mstm;
@@ -1618,6 +1619,22 @@ nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock)
}
static void
+nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock)
+{
+ struct drm_plane_state *new_plane_state;
+ struct drm_plane *plane;
+ int i;
+
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ if (interlock[wndw->interlock.type] & wndw->interlock.data) {
+ if (wndw->func->update)
+ wndw->func->update(wndw, interlock);
+ }
+ }
+}
+
+static void
nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -1684,7 +1701,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
help->disable(encoder);
interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
if (outp->flush_disable) {
- nv50_disp_atomic_commit_core(drm, interlock);
+ nv50_disp_atomic_commit_wndw(state, interlock);
+ nv50_disp_atomic_commit_core(state, interlock);
memset(interlock, 0x00, sizeof(interlock));
}
}
@@ -1693,15 +1711,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
/* Flush disable. */
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
if (atom->flush_disable) {
- for_each_new_plane_in_state(state, plane, new_plane_state, i) {
- struct nv50_wndw *wndw = nv50_wndw(plane);
- if (interlock[wndw->interlock.type] & wndw->interlock.data) {
- if (wndw->func->update)
- wndw->func->update(wndw, interlock);
- }
- }
-
- nv50_disp_atomic_commit_core(drm, interlock);
+ nv50_disp_atomic_commit_wndw(state, interlock);
+ nv50_disp_atomic_commit_core(state, interlock);
memset(interlock, 0x00, sizeof(interlock));
}
}
@@ -1762,18 +1773,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
}
/* Flush update. */
- for_each_new_plane_in_state(state, plane, new_plane_state, i) {
- struct nv50_wndw *wndw = nv50_wndw(plane);
- if (interlock[wndw->interlock.type] & wndw->interlock.data) {
- if (wndw->func->update)
- wndw->func->update(wndw, interlock);
- }
- }
+ nv50_disp_atomic_commit_wndw(state, interlock);
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
if (interlock[NV50_DISP_INTERLOCK_BASE] ||
+ interlock[NV50_DISP_INTERLOCK_OVLY] ||
+ interlock[NV50_DISP_INTERLOCK_WNDW] ||
!atom->state.legacy_cursor_update)
- nv50_disp_atomic_commit_core(drm, interlock);
+ nv50_disp_atomic_commit_core(state, interlock);
else
disp->core->func->update(disp->core, interlock, false);
}
@@ -1871,7 +1878,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
nv50_disp_atomic_commit_tail(state);
drm_for_each_crtc(crtc, dev) {
- if (crtc->state->enable) {
+ if (crtc->state->active) {
if (!drm->have_disp_power_ref) {
drm->have_disp_power_ref = true;
return 0;
@@ -2119,10 +2126,6 @@ nv50_display_destroy(struct drm_device *dev)
kfree(disp);
}
-MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
-static int nouveau_atomic = 0;
-module_param_named(atomic, nouveau_atomic, int, 0400);
-
int
nv50_display_create(struct drm_device *dev)
{
@@ -2147,8 +2150,6 @@ nv50_display_create(struct drm_device *dev)
disp->disp = &nouveau_display(dev)->disp;
dev->mode_config.funcs = &nv50_disp_func;
dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
- if (nouveau_atomic)
- dev->driver->driver_features |= DRIVER_ATOMIC;
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 224963b533a6..c5a9bc1af5af 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -444,14 +444,17 @@ nv50_wndw_prepare_fb(struct drm_plane *plane, struct drm_plane_state *state)
if (ret)
return ret;
- ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
- if (IS_ERR(ctxdma)) {
- nouveau_bo_unpin(fb->nvbo);
- return PTR_ERR(ctxdma);
+ if (wndw->ctxdma.parent) {
+ ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
+ if (IS_ERR(ctxdma)) {
+ nouveau_bo_unpin(fb->nvbo);
+ return PTR_ERR(ctxdma);
+ }
+
+ asyw->image.handle[0] = ctxdma->object.handle;
}
asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
- asyw->image.handle[0] = ctxdma->object.handle;
asyw->image.offset[0] = fb->nvbo->bo.offset;
if (wndw->func->prepare) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index debbbf0fd4bd..408b955e5c39 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -267,6 +267,7 @@ nouveau_backlight_init(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->client.device;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
INIT_LIST_HEAD(&drm->bl_connectors);
@@ -275,7 +276,8 @@ nouveau_backlight_init(struct drm_device *dev)
return 0;
}
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
connector->connector_type != DRM_MODE_CONNECTOR_eDP)
continue;
@@ -292,7 +294,7 @@ nouveau_backlight_init(struct drm_device *dev)
break;
}
}
-
+ drm_connector_list_iter_end(&conn_iter);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 7b557c354307..af68eae4c626 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -1208,14 +1208,19 @@ nouveau_connector_create(struct drm_device *dev, int index)
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_connector *nv_connector = NULL;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
int type, ret = 0;
bool dummy;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
nv_connector = nouveau_connector(connector);
- if (nv_connector->index == index)
+ if (nv_connector->index == index) {
+ drm_connector_list_iter_end(&conn_iter);
return connector;
+ }
}
+ drm_connector_list_iter_end(&conn_iter);
nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
if (!nv_connector)
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index a4d1a059bd3d..dc7454e7f19a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -33,6 +33,7 @@
#include <drm/drm_encoder.h>
#include <drm/drm_dp_helper.h>
#include "nouveau_crtc.h"
+#include "nouveau_encoder.h"
struct nvkm_i2c_port;
@@ -60,19 +61,46 @@ static inline struct nouveau_connector *nouveau_connector(
return container_of(con, struct nouveau_connector, base);
}
+static inline bool
+nouveau_connector_is_mst(struct drm_connector *connector)
+{
+ const struct nouveau_encoder *nv_encoder;
+ const struct drm_encoder *encoder;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return false;
+
+ nv_encoder = find_encoder(connector, DCB_OUTPUT_ANY);
+ if (!nv_encoder)
+ return false;
+
+ encoder = &nv_encoder->base.base;
+ return encoder->encoder_type == DRM_MODE_ENCODER_DPMST;
+}
+
+#define nouveau_for_each_non_mst_connector_iter(connector, iter) \
+ drm_for_each_connector_iter(connector, iter) \
+ for_each_if(!nouveau_connector_is_mst(connector))
+
static inline struct nouveau_connector *
nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
{
struct drm_device *dev = nv_crtc->base.dev;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct nouveau_connector *nv_connector = NULL;
struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder && connector->encoder->crtc == crtc)
- return nouveau_connector(connector);
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
+ if (connector->encoder && connector->encoder->crtc == crtc) {
+ nv_connector = nouveau_connector(connector);
+ break;
+ }
}
+ drm_connector_list_iter_end(&conn_iter);
- return NULL;
+ return nv_connector;
}
struct drm_connector *
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 774b429142bc..ec7861457b84 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -404,6 +404,7 @@ nouveau_display_init(struct drm_device *dev)
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
int ret;
ret = disp->init(dev);
@@ -411,10 +412,12 @@ nouveau_display_init(struct drm_device *dev)
return ret;
/* enable hotplug interrupts */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
struct nouveau_connector *conn = nouveau_connector(connector);
nvif_notify_get(&conn->hpd);
}
+ drm_connector_list_iter_end(&conn_iter);
/* enable flip completion events */
nvif_notify_get(&drm->flip);
@@ -427,6 +430,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
if (!suspend) {
if (drm_drv_uses_atomic_modeset(dev))
@@ -439,10 +443,12 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
nvif_notify_put(&drm->flip);
/* disable hotplug interrupts */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
struct nouveau_connector *conn = nouveau_connector(connector);
nvif_notify_put(&conn->hpd);
}
+ drm_connector_list_iter_end(&conn_iter);
drm_kms_helper_poll_disable(dev);
disp->fini(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 775443c9af94..f5d3158f0378 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -81,6 +81,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
int nouveau_modeset = -1;
module_param_named(modeset, nouveau_modeset, int, 0400);
+MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
+static int nouveau_atomic = 0;
+module_param_named(atomic, nouveau_atomic, int, 0400);
+
MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
static int nouveau_runtime_pm = -1;
module_param_named(runpm, nouveau_runtime_pm, int, 0400);
@@ -509,6 +513,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
pci_set_master(pdev);
+ if (nouveau_atomic)
+ driver_pci.driver_features |= DRIVER_ATOMIC;
+
ret = drm_get_pci_dev(pdev, pent, &driver_pci);
if (ret) {
nvkm_device_del(&device);
@@ -874,22 +881,11 @@ nouveau_pmops_runtime_resume(struct device *dev)
static int
nouveau_pmops_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct drm_crtc *crtc;
-
if (!nouveau_pmops_runtime()) {
pm_runtime_forbid(dev);
return -EBUSY;
}
- list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
- if (crtc->enabled) {
- DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
- return -EBUSY;
- }
- }
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 300daee74209..e6ccafcb9c41 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -616,7 +616,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
struct nouveau_bo *nvbo;
uint32_t data;
- if (unlikely(r->bo_index > req->nr_buffers)) {
+ if (unlikely(r->bo_index >= req->nr_buffers)) {
NV_PRINTK(err, cli, "reloc bo index invalid\n");
ret = -EINVAL;
break;
@@ -626,7 +626,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
if (b->presumed.valid)
continue;
- if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
+ if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
NV_PRINTK(err, cli, "reloc container bo index invalid\n");
ret = -EINVAL;
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 73b5d46104bd..434d2fc5bb1c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -140,6 +140,9 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
if (fb->func->init)
fb->func->init(fb);
+ if (fb->func->init_remapper)
+ fb->func->init_remapper(fb);
+
if (fb->func->init_page) {
ret = fb->func->init_page(fb);
if (WARN_ON(ret))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
index dffe1f5e1071..8205ce436b3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
@@ -37,6 +37,14 @@ gp100_fb_init_unkn(struct nvkm_fb *base)
}
void
+gp100_fb_init_remapper(struct nvkm_fb *fb)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ /* Disable address remapper. */
+ nvkm_mask(device, 0x100c14, 0x00040000, 0x00000000);
+}
+
+void
gp100_fb_init(struct nvkm_fb *base)
{
struct gf100_fb *fb = gf100_fb(base);
@@ -56,6 +64,7 @@ gp100_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gp100_fb_init,
+ .init_remapper = gp100_fb_init_remapper,
.init_page = gm200_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
.ram_new = gp100_ram_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index b84b9861ef26..b4d74e815674 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -31,6 +31,7 @@ gp102_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gp100_fb_init,
+ .init_remapper = gp100_fb_init_remapper,
.init_page = gm200_fb_init_page,
.ram_new = gp100_ram_new,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 2857f31466bf..1e4ad61c19e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -11,6 +11,7 @@ struct nvkm_fb_func {
u32 (*tags)(struct nvkm_fb *);
int (*oneinit)(struct nvkm_fb *);
void (*init)(struct nvkm_fb *);
+ void (*init_remapper)(struct nvkm_fb *);
int (*init_page)(struct nvkm_fb *);
void (*init_unkn)(struct nvkm_fb *);
void (*intr)(struct nvkm_fb *);
@@ -69,5 +70,6 @@ int gf100_fb_init_page(struct nvkm_fb *);
int gm200_fb_init_page(struct nvkm_fb *);
+void gp100_fb_init_remapper(struct nvkm_fb *);
void gp100_fb_init_unkn(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index b8cda9449241..768207fbbae3 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -623,7 +623,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
struct qxl_cursor_cmd *cmd;
struct qxl_cursor *cursor;
struct drm_gem_object *obj;
- struct qxl_bo *cursor_bo = NULL, *user_bo = NULL;
+ struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
int ret;
void *user_ptr;
int size = 64*64*4;
@@ -677,7 +677,7 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
cursor_bo, 0);
cmd->type = QXL_CURSOR_SET;
- qxl_bo_unref(&qcrtc->cursor_bo);
+ old_cursor_bo = qcrtc->cursor_bo;
qcrtc->cursor_bo = cursor_bo;
cursor_bo = NULL;
} else {
@@ -697,6 +697,9 @@ static void qxl_cursor_atomic_update(struct drm_plane *plane,
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
+ if (old_cursor_bo)
+ qxl_bo_unref(&old_cursor_bo);
+
qxl_bo_unref(&cursor_bo);
return;
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index 2589f4acd5ae..9c81301d0eed 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -32,7 +32,10 @@ obj-$(CONFIG_DRM_SUN4I) += sun4i-tcon.o
obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o
obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o
-obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o sun4i-frontend.o
+obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o
+ifdef CONFIG_DRM_SUN4I_BACKEND
+obj-$(CONFIG_DRM_SUN4I) += sun4i-frontend.o
+endif
obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o
obj-$(CONFIG_DRM_SUN6I_DSI) += sun6i-dsi.o
obj-$(CONFIG_DRM_SUN8I_DW_HDMI) += sun8i-drm-hdmi.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 08747fc3ee71..8232b39e16ca 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -17,7 +17,6 @@
#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <uapi/drm/drm_mode.h>
@@ -418,9 +417,6 @@ static void sun4i_tcon0_mode_set_lvds(struct sun4i_tcon *tcon,
static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
const struct drm_display_mode *mode)
{
- struct drm_panel *panel = tcon->panel;
- struct drm_connector *connector = panel->connector;
- struct drm_display_info display_info = connector->display_info;
unsigned int bp, hsync, vsync;
u8 clk_delay;
u32 val = 0;
@@ -478,27 +474,6 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
- /*
- * On A20 and similar SoCs, the only way to achieve Positive Edge
- * (Rising Edge), is setting dclk clock phase to 2/3(240°).
- * By default TCON works in Negative Edge(Falling Edge),
- * this is why phase is set to 0 in that case.
- * Unfortunately there's no way to logically invert dclk through
- * IO_POL register.
- * The only acceptable way to work, triple checked with scope,
- * is using clock phase set to 0° for Negative Edge and set to 240°
- * for Positive Edge.
- * On A33 and similar SoCs there would be a 90° phase option,
- * but it divides also dclk by 2.
- * Following code is a way to avoid quirks all around TCON
- * and DOTCLOCK drivers.
- */
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
- clk_set_phase(tcon->dclk, 240);
-
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
- clk_set_phase(tcon->dclk, 0);
-
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
val);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 776c1513e582..a2bd5876c633 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -398,7 +398,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
* unaligned offset is malformed and cause commands stream
* corruption on the buffer address relocation.
*/
- if (offset & 3 || offset >= obj->gem.size) {
+ if (offset & 3 || offset > obj->gem.size) {
err = -EINVAL;
goto fail;
}
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 2ebdc6d5a76e..d5583190f3e4 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -137,7 +137,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
if (cmd > (char *) urb->transfer_buffer) {
/* Send partial buffer remaining before exiting */
- int len = cmd - (char *) urb->transfer_buffer;
+ int len;
+ if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length)
+ *cmd++ = 0xAF;
+ len = cmd - (char *) urb->transfer_buffer;
ret = udl_submit_urb(dev, urb, len);
bytes_sent += len;
} else
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 0c87b1ac6b68..b992644c17e6 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -153,11 +153,11 @@ static void udl_compress_hline16(
raw_pixels_count_byte = cmd++; /* we'll know this later */
raw_pixel_start = pixel;
- cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1,
- min((int)(pixel_end - pixel) / bpp,
- (int)(cmd_buffer_end - cmd) / 2))) * bpp;
+ cmd_pixel_end = pixel + min3(MAX_CMD_PIXELS + 1UL,
+ (unsigned long)(pixel_end - pixel) / bpp,
+ (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) * bpp;
- prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
+ prefetch_range((void *) pixel, cmd_pixel_end - pixel);
pixel_val16 = get_pixel_val16(pixel, bpp);
while (pixel < cmd_pixel_end) {
@@ -193,6 +193,9 @@ static void udl_compress_hline16(
if (pixel > raw_pixel_start) {
/* finalize last RAW span */
*raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
+ } else {
+ /* undo unused byte */
+ cmd--;
}
*cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF;
diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
index f1d5f76e9c33..d88073e7d22d 100644
--- a/drivers/gpu/host1x/dev.c
+++ b/drivers/gpu/host1x/dev.c
@@ -218,6 +218,9 @@ static int host1x_probe(struct platform_device *pdev)
return err;
}
+ if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
+ goto skip_iommu;
+
host->group = iommu_group_get(&pdev->dev);
if (host->group) {
struct iommu_domain_geometry *geometry;
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index e2f4a4d93d20..527a1cddb14f 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -569,7 +569,8 @@ void host1x_job_unpin(struct host1x_job *job)
for (i = 0; i < job->num_unpins; i++) {
struct host1x_job_unpin_data *unpin = &job->unpins[i];
- if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
+ if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) &&
+ unpin->size && host->domain) {
iommu_unmap(host->domain, job->addr_phys[i],
unpin->size);
free_iova(&host->iova,
diff --git a/drivers/gpu/ipu-v3/ipu-csi.c b/drivers/gpu/ipu-v3/ipu-csi.c
index caa05b0702e1..5450a2db1219 100644
--- a/drivers/gpu/ipu-v3/ipu-csi.c
+++ b/drivers/gpu/ipu-v3/ipu-csi.c
@@ -339,7 +339,8 @@ static void fill_csi_bus_cfg(struct ipu_csi_bus_config *csicfg,
break;
case V4L2_MBUS_BT656:
csicfg->ext_vsync = 0;
- if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field))
+ if (V4L2_FIELD_HAS_BOTH(mbus_fmt->field) ||
+ mbus_fmt->field == V4L2_FIELD_ALTERNATE)
csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_INTERLACED;
else
csicfg->clk_mode = IPU_CSI_CLK_MODE_CCIR656_PROGRESSIVE;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index f858cc72011d..3942ee61bd1c 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1952,6 +1952,8 @@ static int hid_device_probe(struct device *dev)
}
hdev->io_started = false;
+ clear_bit(ffs(HID_STAT_REPROBED), &hdev->status);
+
if (!hdev->driver) {
id = hid_match_device(hdev, hdrv);
if (id == NULL) {
@@ -2215,7 +2217,8 @@ static int __hid_bus_reprobe_drivers(struct device *dev, void *data)
struct hid_device *hdev = to_hid_device(dev);
if (hdev->driver == hdrv &&
- !hdrv->match(hdev, hid_ignore_special_drivers))
+ !hdrv->match(hdev, hid_ignore_special_drivers) &&
+ !test_and_set_bit(ffs(HID_STAT_REPROBED), &hdev->status))
return device_reprobe(dev);
return 0;
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 8469b6964ff6..b48100236df8 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -1154,6 +1154,8 @@ copy_rest:
goto out;
if (list->tail > list->head) {
len = list->tail - list->head;
+ if (len > count)
+ len = count;
if (copy_to_user(buffer + ret, &list->hid_debug_buf[list->head], len)) {
ret = -EFAULT;
@@ -1163,6 +1165,8 @@ copy_rest:
list->head += len;
} else {
len = HID_DEBUG_BUFSIZE - list->head;
+ if (len > count)
+ len = count;
if (copy_to_user(buffer, &list->hid_debug_buf[list->head], len)) {
ret = -EFAULT;
@@ -1170,7 +1174,9 @@ copy_rest:
}
list->head = 0;
ret += len;
- goto copy_rest;
+ count -= len;
+ if (count > 0)
+ goto copy_rest;
}
}
diff --git a/drivers/hid/hid-google-hammer.c b/drivers/hid/hid-google-hammer.c
index 7b8e17b03cb8..6bf4da7ad63a 100644
--- a/drivers/hid/hid-google-hammer.c
+++ b/drivers/hid/hid-google-hammer.c
@@ -124,6 +124,8 @@ static const struct hid_device_id hammer_devices[] = {
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_STAFF) },
{ HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WAND) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_GOOGLE, USB_DEVICE_ID_GOOGLE_WHISKERS) },
{ }
};
MODULE_DEVICE_TABLE(hid, hammer_devices);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index a85634fe033f..c7981ddd8776 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -452,6 +452,7 @@
#define USB_DEVICE_ID_GOOGLE_TOUCH_ROSE 0x5028
#define USB_DEVICE_ID_GOOGLE_STAFF 0x502b
#define USB_DEVICE_ID_GOOGLE_WAND 0x502d
+#define USB_DEVICE_ID_GOOGLE_WHISKERS 0x5030
#define USB_VENDOR_ID_GOTOP 0x08f2
#define USB_DEVICE_ID_SUPER_Q2 0x007f
diff --git a/drivers/hid/hid-steam.c b/drivers/hid/hid-steam.c
index cb86cc834201..0422ec2b13d2 100644
--- a/drivers/hid/hid-steam.c
+++ b/drivers/hid/hid-steam.c
@@ -573,7 +573,7 @@ static bool steam_is_valve_interface(struct hid_device *hdev)
static int steam_client_ll_parse(struct hid_device *hdev)
{
- struct steam_device *steam = hid_get_drvdata(hdev);
+ struct steam_device *steam = hdev->driver_data;
return hid_parse_report(hdev, steam->hdev->dev_rdesc,
steam->hdev->dev_rsize);
@@ -590,7 +590,7 @@ static void steam_client_ll_stop(struct hid_device *hdev)
static int steam_client_ll_open(struct hid_device *hdev)
{
- struct steam_device *steam = hid_get_drvdata(hdev);
+ struct steam_device *steam = hdev->driver_data;
int ret;
ret = hid_hw_open(steam->hdev);
@@ -605,7 +605,7 @@ static int steam_client_ll_open(struct hid_device *hdev)
static void steam_client_ll_close(struct hid_device *hdev)
{
- struct steam_device *steam = hid_get_drvdata(hdev);
+ struct steam_device *steam = hdev->driver_data;
mutex_lock(&steam->mutex);
steam->client_opened = false;
@@ -623,7 +623,7 @@ static int steam_client_ll_raw_request(struct hid_device *hdev,
size_t count, unsigned char report_type,
int reqtype)
{
- struct steam_device *steam = hid_get_drvdata(hdev);
+ struct steam_device *steam = hdev->driver_data;
return hid_hw_raw_request(steam->hdev, reportnum, buf, count,
report_type, reqtype);
@@ -710,7 +710,7 @@ static int steam_probe(struct hid_device *hdev,
ret = PTR_ERR(steam->client_hdev);
goto client_hdev_fail;
}
- hid_set_drvdata(steam->client_hdev, steam);
+ steam->client_hdev->driver_data = steam;
/*
* With the real steam controller interface, do not connect hidraw.
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index c1652bb7bd15..eae0cb3ddec6 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -484,7 +484,7 @@ static void i2c_hid_get_input(struct i2c_hid *ihid)
return;
}
- if ((ret_size > size) || (ret_size <= 2)) {
+ if ((ret_size > size) || (ret_size < 2)) {
dev_err(&ihid->client->dev, "%s: incomplete report (%d/%d)\n",
__func__, size, ret_size);
return;
diff --git a/drivers/hid/intel-ish-hid/ipc/pci-ish.c b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
index 582e449be9fe..a2c53ea3b5ed 100644
--- a/drivers/hid/intel-ish-hid/ipc/pci-ish.c
+++ b/drivers/hid/intel-ish-hid/ipc/pci-ish.c
@@ -205,8 +205,7 @@ static void ish_remove(struct pci_dev *pdev)
kfree(ishtp_dev);
}
-#ifdef CONFIG_PM
-static struct device *ish_resume_device;
+static struct device __maybe_unused *ish_resume_device;
/* 50ms to get resume response */
#define WAIT_FOR_RESUME_ACK_MS 50
@@ -220,7 +219,7 @@ static struct device *ish_resume_device;
* in that case a simple resume message is enough, others we need
* a reset sequence.
*/
-static void ish_resume_handler(struct work_struct *work)
+static void __maybe_unused ish_resume_handler(struct work_struct *work)
{
struct pci_dev *pdev = to_pci_dev(ish_resume_device);
struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -262,7 +261,7 @@ static void ish_resume_handler(struct work_struct *work)
*
* Return: 0 to the pm core
*/
-static int ish_suspend(struct device *device)
+static int __maybe_unused ish_suspend(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -288,7 +287,7 @@ static int ish_suspend(struct device *device)
return 0;
}
-static DECLARE_WORK(resume_work, ish_resume_handler);
+static __maybe_unused DECLARE_WORK(resume_work, ish_resume_handler);
/**
* ish_resume() - ISH resume callback
* @device: device pointer
@@ -297,7 +296,7 @@ static DECLARE_WORK(resume_work, ish_resume_handler);
*
* Return: 0 to the pm core
*/
-static int ish_resume(struct device *device)
+static int __maybe_unused ish_resume(struct device *device)
{
struct pci_dev *pdev = to_pci_dev(device);
struct ishtp_device *dev = pci_get_drvdata(pdev);
@@ -311,21 +310,14 @@ static int ish_resume(struct device *device)
return 0;
}
-static const struct dev_pm_ops ish_pm_ops = {
- .suspend = ish_suspend,
- .resume = ish_resume,
-};
-#define ISHTP_ISH_PM_OPS (&ish_pm_ops)
-#else
-#define ISHTP_ISH_PM_OPS NULL
-#endif /* CONFIG_PM */
+static SIMPLE_DEV_PM_OPS(ish_pm_ops, ish_suspend, ish_resume);
static struct pci_driver ish_driver = {
.name = KBUILD_MODNAME,
.id_table = ish_pci_tbl,
.probe = ish_probe,
.remove = ish_remove,
- .driver.pm = ISHTP_ISH_PM_OPS,
+ .driver.pm = &ish_pm_ops,
};
module_pci_driver(ish_driver);
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index e3ce233f8bdc..23872d08308c 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -36,6 +36,7 @@
#include <linux/hiddev.h>
#include <linux/compat.h>
#include <linux/vmalloc.h>
+#include <linux/nospec.h>
#include "usbhid.h"
#ifdef CONFIG_USB_DYNAMIC_MINORS
@@ -469,10 +470,14 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
if (uref->field_index >= report->maxfield)
goto inval;
+ uref->field_index = array_index_nospec(uref->field_index,
+ report->maxfield);
field = report->field[uref->field_index];
if (uref->usage_index >= field->maxusage)
goto inval;
+ uref->usage_index = array_index_nospec(uref->usage_index,
+ field->maxusage);
uref->usage_code = field->usage[uref->usage_index].hid;
@@ -499,6 +504,8 @@ static noinline int hiddev_ioctl_usage(struct hiddev *hiddev, unsigned int cmd,
if (uref->field_index >= report->maxfield)
goto inval;
+ uref->field_index = array_index_nospec(uref->field_index,
+ report->maxfield);
field = report->field[uref->field_index];
@@ -753,6 +760,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (finfo.field_index >= report->maxfield)
break;
+ finfo.field_index = array_index_nospec(finfo.field_index,
+ report->maxfield);
field = report->field[finfo.field_index];
memset(&finfo, 0, sizeof(finfo));
@@ -797,6 +806,8 @@ static long hiddev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (cinfo.index >= hid->maxcollection)
break;
+ cinfo.index = array_index_nospec(cinfo.index,
+ hid->maxcollection);
cinfo.type = hid->collection[cinfo.index].type;
cinfo.usage = hid->collection[cinfo.index].usage;
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index c101369b51de..d6797535fff9 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -395,6 +395,14 @@ static void wacom_usage_mapping(struct hid_device *hdev,
}
}
+ /* 2nd-generation Intuos Pro Large has incorrect Y maximum */
+ if (hdev->vendor == USB_VENDOR_ID_WACOM &&
+ hdev->product == 0x0358 &&
+ WACOM_PEN_FIELD(field) &&
+ wacom_equivalent_usage(usage->hid) == HID_GD_Y) {
+ field->logical_maximum = 43200;
+ }
+
switch (usage->hid) {
case HID_GD_X:
features->x_max = field->logical_maximum;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 0bb44d0088ed..ad7afa74d365 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -3365,8 +3365,14 @@ void wacom_setup_device_quirks(struct wacom *wacom)
if (features->type >= INTUOSHT && features->type <= BAMBOO_PT)
features->device_type |= WACOM_DEVICETYPE_PAD;
- features->x_max = 4096;
- features->y_max = 4096;
+ if (features->type == INTUOSHT2) {
+ features->x_max = features->x_max / 10;
+ features->y_max = features->y_max / 10;
+ }
+ else {
+ features->x_max = 4096;
+ features->y_max = 4096;
+ }
}
else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
features->device_type |= WACOM_DEVICETYPE_PAD;
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index bf3bb7e1adab..9d3ef879dc51 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1074,6 +1074,13 @@ static struct dmi_system_id i8k_blacklist_fan_support_dmi_table[] __initdata = {
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
},
},
+ {
+ .ident = "Dell XPS13 9333",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
+ },
+ },
{ }
};
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 155d4d1d1585..f9d1349c3286 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -4175,7 +4175,7 @@ static int nct6775_probe(struct platform_device *pdev)
* The temperature is already monitored if the respective bit in <mask>
* is set.
*/
- for (i = 0; i < 32; i++) {
+ for (i = 0; i < 31; i++) {
if (!(data->temp_mask & BIT(i + 1)))
continue;
if (!reg_temp_alternate[i])
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 4a34f311e1ff..6ec65adaba49 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -647,10 +647,10 @@ static int __i2c_bit_add_bus(struct i2c_adapter *adap,
if (bit_adap->getscl == NULL)
adap->quirks = &i2c_bit_quirk_no_clk_stretch;
- /* Bring bus to a known state. Looks like STOP if bus is not free yet */
- setscl(bit_adap, 1);
- udelay(bit_adap->udelay);
- setsda(bit_adap, 1);
+ /*
+ * We tried forcing SCL/SDA to an initial state here. But that caused a
+ * regression, sadly. Check Bugzilla #200045 for details.
+ */
ret = add_adapter(adap);
if (ret < 0)
diff --git a/drivers/i2c/busses/i2c-cht-wc.c b/drivers/i2c/busses/i2c-cht-wc.c
index 44cffad43701..c4d176f5ed79 100644
--- a/drivers/i2c/busses/i2c-cht-wc.c
+++ b/drivers/i2c/busses/i2c-cht-wc.c
@@ -234,7 +234,8 @@ static const struct irq_chip cht_wc_i2c_irq_chip = {
.name = "cht_wc_ext_chrg_irq_chip",
};
-static const char * const bq24190_suppliers[] = { "fusb302-typec-source" };
+static const char * const bq24190_suppliers[] = {
+ "tcpm-source-psy-i2c-fusb302" };
static const struct property_entry bq24190_props[] = {
PROPERTY_ENTRY_STRING_ARRAY("supplied-from", bq24190_suppliers),
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 75d6ab177055..7379043711df 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -237,12 +237,16 @@ static void i2c_davinci_calc_clk_dividers(struct davinci_i2c_dev *dev)
/*
* It's not always possible to have 1 to 2 ratio when d=7, so fall back
* to minimal possible clkh in this case.
+ *
+ * Note:
+ * CLKH is not allowed to be 0, in this case I2C clock is not generated
+ * at all
*/
- if (clk >= clkl + d) {
+ if (clk > clkl + d) {
clkh = clk - clkl - d;
clkl -= d;
} else {
- clkh = 0;
+ clkh = 1;
clkl = clk - (d << 1);
}
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index 005e6e0330c2..66f85bbf3591 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -279,9 +279,9 @@ static int i2c_gpio_probe(struct platform_device *pdev)
* required for an I2C bus.
*/
if (pdata->scl_is_open_drain)
- gflags = GPIOD_OUT_LOW;
+ gflags = GPIOD_OUT_HIGH;
else
- gflags = GPIOD_OUT_LOW_OPEN_DRAIN;
+ gflags = GPIOD_OUT_HIGH_OPEN_DRAIN;
priv->scl = i2c_gpio_get_desc(dev, "scl", 1, gflags);
if (IS_ERR(priv->scl))
return PTR_ERR(priv->scl);
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 0207e194f84b..498c5e891649 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -368,6 +368,7 @@ static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx,
goto err_desc;
}
+ reinit_completion(&dma->cmd_complete);
txdesc->callback = i2c_imx_dma_callback;
txdesc->callback_param = i2c_imx;
if (dma_submit_error(dmaengine_submit(txdesc))) {
@@ -622,7 +623,6 @@ static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx,
* The first byte must be transmitted by the CPU.
*/
imx_i2c_write_reg(i2c_8bit_addr_from_msg(msgs), i2c_imx, IMX_I2C_I2DR);
- reinit_completion(&i2c_imx->dma->cmd_complete);
time_left = wait_for_completion_timeout(
&i2c_imx->dma->cmd_complete,
msecs_to_jiffies(DMA_TIMEOUT));
@@ -681,7 +681,6 @@ static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx,
if (result)
return result;
- reinit_completion(&i2c_imx->dma->cmd_complete);
time_left = wait_for_completion_timeout(
&i2c_imx->dma->cmd_complete,
msecs_to_jiffies(DMA_TIMEOUT));
@@ -1010,7 +1009,7 @@ static int i2c_imx_init_recovery_info(struct imx_i2c_struct *i2c_imx,
i2c_imx->pinctrl_pins_gpio = pinctrl_lookup_state(i2c_imx->pinctrl,
"gpio");
rinfo->sda_gpiod = devm_gpiod_get(&pdev->dev, "sda", GPIOD_IN);
- rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH);
+ rinfo->scl_gpiod = devm_gpiod_get(&pdev->dev, "scl", GPIOD_OUT_HIGH_OPEN_DRAIN);
if (PTR_ERR(rinfo->sda_gpiod) == -EPROBE_DEFER ||
PTR_ERR(rinfo->scl_gpiod) == -EPROBE_DEFER) {
diff --git a/drivers/i2c/busses/i2c-rcar.c b/drivers/i2c/busses/i2c-rcar.c
index 5e310efd9446..3c1c817f6968 100644
--- a/drivers/i2c/busses/i2c-rcar.c
+++ b/drivers/i2c/busses/i2c-rcar.c
@@ -32,6 +32,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
+#include <linux/reset.h>
#include <linux/slab.h>
/* register offsets */
@@ -111,8 +112,9 @@
#define ID_ARBLOST (1 << 3)
#define ID_NACK (1 << 4)
/* persistent flags */
+#define ID_P_NO_RXDMA (1 << 30) /* HW forbids RXDMA sometimes */
#define ID_P_PM_BLOCKED (1 << 31)
-#define ID_P_MASK ID_P_PM_BLOCKED
+#define ID_P_MASK (ID_P_PM_BLOCKED | ID_P_NO_RXDMA)
enum rcar_i2c_type {
I2C_RCAR_GEN1,
@@ -141,6 +143,8 @@ struct rcar_i2c_priv {
struct dma_chan *dma_rx;
struct scatterlist sg;
enum dma_data_direction dma_direction;
+
+ struct reset_control *rstc;
};
#define rcar_i2c_priv_to_dev(p) ((p)->adap.dev.parent)
@@ -370,6 +374,11 @@ static void rcar_i2c_dma_unmap(struct rcar_i2c_priv *priv)
dma_unmap_single(chan->device->dev, sg_dma_address(&priv->sg),
sg_dma_len(&priv->sg), priv->dma_direction);
+ /* Gen3 can only do one RXDMA per transfer and we just completed it */
+ if (priv->devtype == I2C_RCAR_GEN3 &&
+ priv->dma_direction == DMA_FROM_DEVICE)
+ priv->flags |= ID_P_NO_RXDMA;
+
priv->dma_direction = DMA_NONE;
}
@@ -407,8 +416,9 @@ static void rcar_i2c_dma(struct rcar_i2c_priv *priv)
unsigned char *buf;
int len;
- /* Do not use DMA if it's not available or for messages < 8 bytes */
- if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE))
+ /* Do various checks to see if DMA is feasible at all */
+ if (IS_ERR(chan) || msg->len < 8 || !(msg->flags & I2C_M_DMA_SAFE) ||
+ (read && priv->flags & ID_P_NO_RXDMA))
return;
if (read) {
@@ -739,6 +749,25 @@ static void rcar_i2c_release_dma(struct rcar_i2c_priv *priv)
}
}
+/* I2C is a special case, we need to poll the status of a reset */
+static int rcar_i2c_do_reset(struct rcar_i2c_priv *priv)
+{
+ int i, ret;
+
+ ret = reset_control_reset(priv->rstc);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < LOOP_TIMEOUT; i++) {
+ ret = reset_control_status(priv->rstc);
+ if (ret == 0)
+ return 0;
+ udelay(1);
+ }
+
+ return -ETIMEDOUT;
+}
+
static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
struct i2c_msg *msgs,
int num)
@@ -750,6 +779,16 @@ static int rcar_i2c_master_xfer(struct i2c_adapter *adap,
pm_runtime_get_sync(dev);
+ /* Gen3 needs a reset before allowing RXDMA once */
+ if (priv->devtype == I2C_RCAR_GEN3) {
+ priv->flags |= ID_P_NO_RXDMA;
+ if (!IS_ERR(priv->rstc)) {
+ ret = rcar_i2c_do_reset(priv);
+ if (ret == 0)
+ priv->flags &= ~ID_P_NO_RXDMA;
+ }
+ }
+
rcar_i2c_init(priv);
ret = rcar_i2c_bus_barrier(priv);
@@ -920,6 +959,15 @@ static int rcar_i2c_probe(struct platform_device *pdev)
if (ret < 0)
goto out_pm_put;
+ if (priv->devtype == I2C_RCAR_GEN3) {
+ priv->rstc = devm_reset_control_get_exclusive(&pdev->dev, NULL);
+ if (!IS_ERR(priv->rstc)) {
+ ret = reset_control_status(priv->rstc);
+ if (ret < 0)
+ priv->rstc = ERR_PTR(-ENOTSUPP);
+ }
+ }
+
/* Stay always active when multi-master to keep arbitration working */
if (of_property_read_bool(dev->of_node, "multi-master"))
priv->flags |= ID_P_PM_BLOCKED;
diff --git a/drivers/i2c/busses/i2c-stu300.c b/drivers/i2c/busses/i2c-stu300.c
index e866c481bfc3..fce52bdab2b7 100644
--- a/drivers/i2c/busses/i2c-stu300.c
+++ b/drivers/i2c/busses/i2c-stu300.c
@@ -127,7 +127,7 @@ enum stu300_error {
/*
* The number of address send athemps tried before giving up.
- * If the first one failes it seems like 5 to 8 attempts are required.
+ * If the first one fails it seems like 5 to 8 attempts are required.
*/
#define NUM_ADDR_RESEND_ATTEMPTS 12
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 5fccd1f1bca8..797def5319f1 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -545,6 +545,14 @@ static int tegra_i2c_disable_packet_mode(struct tegra_i2c_dev *i2c_dev)
{
u32 cnfg;
+ /*
+ * NACK interrupt is generated before the I2C controller generates
+ * the STOP condition on the bus. So wait for 2 clock periods
+ * before disabling the controller so that the STOP condition has
+ * been delivered properly.
+ */
+ udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
+
cnfg = i2c_readl(i2c_dev, I2C_CNFG);
if (cnfg & I2C_CNFG_PACKET_MODE_EN)
i2c_writel(i2c_dev, cnfg & ~I2C_CNFG_PACKET_MODE_EN, I2C_CNFG);
@@ -706,15 +714,6 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
if (likely(i2c_dev->msg_err == I2C_ERR_NONE))
return 0;
- /*
- * NACK interrupt is generated before the I2C controller generates
- * the STOP condition on the bus. So wait for 2 clock periods
- * before resetting the controller so that the STOP condition has
- * been delivered properly.
- */
- if (i2c_dev->msg_err == I2C_ERR_NO_ACK)
- udelay(DIV_ROUND_UP(2 * 1000000, i2c_dev->bus_clk_rate));
-
tegra_i2c_init(i2c_dev);
if (i2c_dev->msg_err == I2C_ERR_NO_ACK) {
if (msg->flags & I2C_M_IGNORE_NAK)
diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
index 31d16ada6e7d..301285c54603 100644
--- a/drivers/i2c/i2c-core-base.c
+++ b/drivers/i2c/i2c-core-base.c
@@ -198,7 +198,16 @@ int i2c_generic_scl_recovery(struct i2c_adapter *adap)
val = !val;
bri->set_scl(adap, val);
- ndelay(RECOVERY_NDELAY);
+
+ /*
+ * If we can set SDA, we will always create STOP here to ensure
+ * the additional pulses will do no harm. This is achieved by
+ * letting SDA follow SCL half a cycle later.
+ */
+ ndelay(RECOVERY_NDELAY / 2);
+ if (bri->set_sda)
+ bri->set_sda(adap, val);
+ ndelay(RECOVERY_NDELAY / 2);
}
/* check if recovery actually succeeded */
diff --git a/drivers/i2c/i2c-core-smbus.c b/drivers/i2c/i2c-core-smbus.c
index f3f683041e7f..51970bae3c4a 100644
--- a/drivers/i2c/i2c-core-smbus.c
+++ b/drivers/i2c/i2c-core-smbus.c
@@ -465,15 +465,18 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
status = i2c_transfer(adapter, msg, num);
if (status < 0)
- return status;
- if (status != num)
- return -EIO;
+ goto cleanup;
+ if (status != num) {
+ status = -EIO;
+ goto cleanup;
+ }
+ status = 0;
/* Check PEC if last message is a read */
if (i && (msg[num-1].flags & I2C_M_RD)) {
status = i2c_smbus_check_pec(partial_pec, &msg[num-1]);
if (status < 0)
- return status;
+ goto cleanup;
}
if (read_write == I2C_SMBUS_READ)
@@ -499,12 +502,13 @@ static s32 i2c_smbus_xfer_emulated(struct i2c_adapter *adapter, u16 addr,
break;
}
+cleanup:
if (msg[0].flags & I2C_M_DMA_SAFE)
kfree(msg[0].buf);
if (msg[1].flags & I2C_M_DMA_SAFE)
kfree(msg[1].buf);
- return 0;
+ return status;
}
/**
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 7e3d82cff3d5..c149c9c360fc 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -1053,7 +1053,7 @@ static irqreturn_t mma8452_interrupt(int irq, void *p)
if (src < 0)
return IRQ_NONE;
- if (!(src & data->chip_info->enabled_events))
+ if (!(src & (data->chip_info->enabled_events | MMA8452_INT_DRDY)))
return IRQ_NONE;
if (src & MMA8452_INT_DRDY) {
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index f9c0624505a2..42618fe4f83e 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -959,6 +959,8 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
}
irq_type = irqd_get_trigger_type(desc);
+ if (!irq_type)
+ irq_type = IRQF_TRIGGER_RISING;
if (irq_type == IRQF_TRIGGER_RISING)
st->irq_mask = INV_MPU6050_ACTIVE_HIGH;
else if (irq_type == IRQF_TRIGGER_FALLING)
diff --git a/drivers/iio/light/tsl2772.c b/drivers/iio/light/tsl2772.c
index 34d42a2504c9..df5b2a0da96c 100644
--- a/drivers/iio/light/tsl2772.c
+++ b/drivers/iio/light/tsl2772.c
@@ -582,6 +582,8 @@ static int tsl2772_als_calibrate(struct iio_dev *indio_dev)
"%s: failed to get lux\n", __func__);
return lux_val;
}
+ if (lux_val == 0)
+ return -ERANGE;
ret = (chip->settings.als_cal_target * chip->settings.als_gain_trim) /
lux_val;
diff --git a/drivers/iio/pressure/bmp280-core.c b/drivers/iio/pressure/bmp280-core.c
index 5ec3e41b65f2..fe87d27779d9 100644
--- a/drivers/iio/pressure/bmp280-core.c
+++ b/drivers/iio/pressure/bmp280-core.c
@@ -415,10 +415,9 @@ static int bmp280_read_humid(struct bmp280_data *data, int *val, int *val2)
}
comp_humidity = bmp280_compensate_humidity(data, adc_humidity);
- *val = comp_humidity;
- *val2 = 1024;
+ *val = comp_humidity * 1000 / 1024;
- return IIO_VAL_FRACTIONAL;
+ return IIO_VAL_INT;
}
static int bmp280_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 3e90b6a1d9d2..cc06e8404e9b 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -3488,8 +3488,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
struct ib_flow_attr *flow_attr;
struct ib_qp *qp;
struct ib_uflow_resources *uflow_res;
+ struct ib_uverbs_flow_spec_hdr *kern_spec;
int err = 0;
- void *kern_spec;
void *ib_spec;
int i;
@@ -3538,8 +3538,8 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
if (!kern_flow_attr)
return -ENOMEM;
- memcpy(kern_flow_attr, &cmd.flow_attr, sizeof(*kern_flow_attr));
- err = ib_copy_from_udata(kern_flow_attr + 1, ucore,
+ *kern_flow_attr = cmd.flow_attr;
+ err = ib_copy_from_udata(&kern_flow_attr->flow_specs, ucore,
cmd.flow_attr.size);
if (err)
goto err_free_attr;
@@ -3559,6 +3559,11 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
goto err_uobj;
}
+ if (qp->qp_type != IB_QPT_UD && qp->qp_type != IB_QPT_RAW_PACKET) {
+ err = -EINVAL;
+ goto err_put;
+ }
+
flow_attr = kzalloc(struct_size(flow_attr, flows,
cmd.flow_attr.num_of_specs), GFP_KERNEL);
if (!flow_attr) {
@@ -3578,21 +3583,22 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
flow_attr->flags = kern_flow_attr->flags;
flow_attr->size = sizeof(*flow_attr);
- kern_spec = kern_flow_attr + 1;
+ kern_spec = kern_flow_attr->flow_specs;
ib_spec = flow_attr + 1;
for (i = 0; i < flow_attr->num_of_specs &&
- cmd.flow_attr.size > offsetof(struct ib_uverbs_flow_spec, reserved) &&
- cmd.flow_attr.size >=
- ((struct ib_uverbs_flow_spec *)kern_spec)->size; i++) {
- err = kern_spec_to_ib_spec(file->ucontext, kern_spec, ib_spec,
- uflow_res);
+ cmd.flow_attr.size >= sizeof(*kern_spec) &&
+ cmd.flow_attr.size >= kern_spec->size;
+ i++) {
+ err = kern_spec_to_ib_spec(
+ file->ucontext, (struct ib_uverbs_flow_spec *)kern_spec,
+ ib_spec, uflow_res);
if (err)
goto err_free;
flow_attr->size +=
((union ib_flow_spec *) ib_spec)->size;
- cmd.flow_attr.size -= ((struct ib_uverbs_flow_spec *)kern_spec)->size;
- kern_spec += ((struct ib_uverbs_flow_spec *) kern_spec)->size;
+ cmd.flow_attr.size -= kern_spec->size;
+ kern_spec = ((void *)kern_spec) + kern_spec->size;
ib_spec += ((union ib_flow_spec *) ib_spec)->size;
}
if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 3ae2339dd27a..2094d136513d 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -736,10 +736,6 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if (ret)
return ret;
- if (!file->ucontext &&
- (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended))
- return -EINVAL;
-
if (extended) {
if (count < (sizeof(hdr) + sizeof(ex_hdr)))
return -EINVAL;
@@ -759,6 +755,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
goto out;
}
+ /*
+ * Must be after the ib_dev check, as once the RCU clears ib_dev ==
+ * NULL means ucontext == NULL
+ */
+ if (!file->ucontext &&
+ (command != IB_USER_VERBS_CMD_GET_CONTEXT || extended)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
if (!verify_command_mask(ib_dev, command, extended)) {
ret = -EOPNOTSUPP;
goto out;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 0b56828c1319..9d6beb948535 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1562,11 +1562,12 @@ EXPORT_SYMBOL(ib_destroy_qp);
/* Completion queues */
-struct ib_cq *ib_create_cq(struct ib_device *device,
- ib_comp_handler comp_handler,
- void (*event_handler)(struct ib_event *, void *),
- void *cq_context,
- const struct ib_cq_init_attr *cq_attr)
+struct ib_cq *__ib_create_cq(struct ib_device *device,
+ ib_comp_handler comp_handler,
+ void (*event_handler)(struct ib_event *, void *),
+ void *cq_context,
+ const struct ib_cq_init_attr *cq_attr,
+ const char *caller)
{
struct ib_cq *cq;
@@ -1580,12 +1581,13 @@ struct ib_cq *ib_create_cq(struct ib_device *device,
cq->cq_context = cq_context;
atomic_set(&cq->usecnt, 0);
cq->res.type = RDMA_RESTRACK_CQ;
+ cq->res.kern_name = caller;
rdma_restrack_add(&cq->res);
}
return cq;
}
-EXPORT_SYMBOL(ib_create_cq);
+EXPORT_SYMBOL(__ib_create_cq);
int rdma_set_cq_moderation(struct ib_cq *cq, u16 cq_count, u16 cq_period)
{
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 1445918e3239..7b76e6f81aeb 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -774,7 +774,7 @@ static int c4iw_set_page(struct ib_mr *ibmr, u64 addr)
{
struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
- if (unlikely(mhp->mpl_len == mhp->max_mpl_len))
+ if (unlikely(mhp->mpl_len == mhp->attr.pbl_size))
return -ENOMEM;
mhp->mpl[mhp->mpl_len++] = addr;
diff --git a/drivers/infiniband/hw/hfi1/rc.c b/drivers/infiniband/hw/hfi1/rc.c
index 1a1a47ac53c6..f15c93102081 100644
--- a/drivers/infiniband/hw/hfi1/rc.c
+++ b/drivers/infiniband/hw/hfi1/rc.c
@@ -271,7 +271,7 @@ int hfi1_make_rc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
lockdep_assert_held(&qp->s_lock);
ps->s_txreq = get_txreq(ps->dev, qp);
- if (IS_ERR(ps->s_txreq))
+ if (!ps->s_txreq)
goto bail_no_tx;
if (priv->hdr_type == HFI1_PKT_TYPE_9B) {
diff --git a/drivers/infiniband/hw/hfi1/uc.c b/drivers/infiniband/hw/hfi1/uc.c
index b7b671017e59..e254dcec6f64 100644
--- a/drivers/infiniband/hw/hfi1/uc.c
+++ b/drivers/infiniband/hw/hfi1/uc.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -72,7 +72,7 @@ int hfi1_make_uc_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
int middle = 0;
ps->s_txreq = get_txreq(ps->dev, qp);
- if (IS_ERR(ps->s_txreq))
+ if (!ps->s_txreq)
goto bail_no_tx;
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_SEND_OK)) {
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 1ab332f1866e..70d39fc450a1 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2015, 2016 Intel Corporation.
+ * Copyright(c) 2015 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -503,7 +503,7 @@ int hfi1_make_ud_req(struct rvt_qp *qp, struct hfi1_pkt_state *ps)
u32 lid;
ps->s_txreq = get_txreq(ps->dev, qp);
- if (IS_ERR(ps->s_txreq))
+ if (!ps->s_txreq)
goto bail_no_tx;
if (!(ib_rvt_state_ops[qp->state] & RVT_PROCESS_NEXT_SEND_OK)) {
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.c b/drivers/infiniband/hw/hfi1/verbs_txreq.c
index 873e48ea923f..c4ab2d5b4502 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.c
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.c
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 - 2017 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -94,7 +94,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp)
__must_hold(&qp->s_lock)
{
- struct verbs_txreq *tx = ERR_PTR(-EBUSY);
+ struct verbs_txreq *tx = NULL;
write_seqlock(&dev->txwait_lock);
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
diff --git a/drivers/infiniband/hw/hfi1/verbs_txreq.h b/drivers/infiniband/hw/hfi1/verbs_txreq.h
index 729244c3086c..1c19bbc764b2 100644
--- a/drivers/infiniband/hw/hfi1/verbs_txreq.h
+++ b/drivers/infiniband/hw/hfi1/verbs_txreq.h
@@ -1,5 +1,5 @@
/*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -83,7 +83,7 @@ static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
if (unlikely(!tx)) {
/* call slow path to get the lock */
tx = __get_txreq(dev, qp);
- if (IS_ERR(tx))
+ if (!tx)
return tx;
}
tx->qp = qp;
diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c
index ed1f253faf97..c7c85c22e4e3 100644
--- a/drivers/infiniband/hw/mlx4/mr.c
+++ b/drivers/infiniband/hw/mlx4/mr.c
@@ -486,8 +486,11 @@ int mlx4_ib_rereg_user_mr(struct ib_mr *mr, int flags,
}
if (flags & IB_MR_REREG_ACCESS) {
- if (ib_access_writable(mr_access_flags) && !mmr->umem->writable)
- return -EPERM;
+ if (ib_access_writable(mr_access_flags) &&
+ !mmr->umem->writable) {
+ err = -EPERM;
+ goto release_mpt_entry;
+ }
err = mlx4_mr_hw_change_access(dev->dev, *pmpt_entry,
convert_access(mr_access_flags));
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e52dd21519b4..b3ba9a222550 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -3199,8 +3199,8 @@ static int flow_counters_set_data(struct ib_counters *ibcounters,
if (!mcounters->hw_cntrs_hndl) {
mcounters->hw_cntrs_hndl = mlx5_fc_create(
to_mdev(ibcounters->device)->mdev, false);
- if (!mcounters->hw_cntrs_hndl) {
- ret = -ENOMEM;
+ if (IS_ERR(mcounters->hw_cntrs_hndl)) {
+ ret = PTR_ERR(mcounters->hw_cntrs_hndl);
goto free;
}
hw_hndl = true;
@@ -3546,29 +3546,35 @@ static struct ib_flow *mlx5_ib_create_flow(struct ib_qp *qp,
return ERR_PTR(-ENOMEM);
err = ib_copy_from_udata(ucmd, udata, required_ucmd_sz);
- if (err) {
- kfree(ucmd);
- return ERR_PTR(err);
- }
+ if (err)
+ goto free_ucmd;
}
- if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO)
- return ERR_PTR(-ENOMEM);
+ if (flow_attr->priority > MLX5_IB_FLOW_LAST_PRIO) {
+ err = -ENOMEM;
+ goto free_ucmd;
+ }
if (domain != IB_FLOW_DOMAIN_USER ||
flow_attr->port > dev->num_ports ||
(flow_attr->flags & ~(IB_FLOW_ATTR_FLAGS_DONT_TRAP |
- IB_FLOW_ATTR_FLAGS_EGRESS)))
- return ERR_PTR(-EINVAL);
+ IB_FLOW_ATTR_FLAGS_EGRESS))) {
+ err = -EINVAL;
+ goto free_ucmd;
+ }
if (is_egress &&
(flow_attr->type == IB_FLOW_ATTR_ALL_DEFAULT ||
- flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT))
- return ERR_PTR(-EINVAL);
+ flow_attr->type == IB_FLOW_ATTR_MC_DEFAULT)) {
+ err = -EINVAL;
+ goto free_ucmd;
+ }
dst = kzalloc(sizeof(*dst), GFP_KERNEL);
- if (!dst)
- return ERR_PTR(-ENOMEM);
+ if (!dst) {
+ err = -ENOMEM;
+ goto free_ucmd;
+ }
mutex_lock(&dev->flow_db->lock);
@@ -3637,8 +3643,8 @@ destroy_ft:
unlock:
mutex_unlock(&dev->flow_db->lock);
kfree(dst);
+free_ucmd:
kfree(ucmd);
- kfree(handler);
return ERR_PTR(err);
}
@@ -6107,7 +6113,7 @@ static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
dev->num_ports = max(MLX5_CAP_GEN(mdev, num_ports),
MLX5_CAP_GEN(mdev, num_vhca_ports));
- if (MLX5_VPORT_MANAGER(mdev) &&
+ if (MLX5_ESWITCH_MANAGER(mdev) &&
mlx5_ib_eswitch_mode(mdev->priv.eswitch) == SRIOV_OFFLOADS) {
dev->rep = mlx5_ib_vport_rep(mdev->priv.eswitch, 0);
diff --git a/drivers/infiniband/hw/mlx5/srq.c b/drivers/infiniband/hw/mlx5/srq.c
index 0af7b7905550..f5de5adc9b1a 100644
--- a/drivers/infiniband/hw/mlx5/srq.c
+++ b/drivers/infiniband/hw/mlx5/srq.c
@@ -266,18 +266,24 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
desc_size = sizeof(struct mlx5_wqe_srq_next_seg) +
srq->msrq.max_gs * sizeof(struct mlx5_wqe_data_seg);
- if (desc_size == 0 || srq->msrq.max_gs > desc_size)
- return ERR_PTR(-EINVAL);
+ if (desc_size == 0 || srq->msrq.max_gs > desc_size) {
+ err = -EINVAL;
+ goto err_srq;
+ }
desc_size = roundup_pow_of_two(desc_size);
desc_size = max_t(size_t, 32, desc_size);
- if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg))
- return ERR_PTR(-EINVAL);
+ if (desc_size < sizeof(struct mlx5_wqe_srq_next_seg)) {
+ err = -EINVAL;
+ goto err_srq;
+ }
srq->msrq.max_avail_gather = (desc_size - sizeof(struct mlx5_wqe_srq_next_seg)) /
sizeof(struct mlx5_wqe_data_seg);
srq->msrq.wqe_shift = ilog2(desc_size);
buf_size = srq->msrq.max * desc_size;
- if (buf_size < desc_size)
- return ERR_PTR(-EINVAL);
+ if (buf_size < desc_size) {
+ err = -EINVAL;
+ goto err_srq;
+ }
in.type = init_attr->srq_type;
if (pd->uobject)
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index f7ac8fc9b531..f07b8df96f43 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -1957,6 +1957,9 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
}
if (attr_mask & (IB_QP_AV | IB_QP_PATH_MTU)) {
+ if (rdma_protocol_iwarp(&dev->ibdev, 1))
+ return -EINVAL;
+
if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 ||
attr->path_mtu > IB_MTU_4096) {
diff --git a/drivers/infiniband/sw/rxe/rxe_req.c b/drivers/infiniband/sw/rxe/rxe_req.c
index f30eeba3f772..8be27238a86e 100644
--- a/drivers/infiniband/sw/rxe/rxe_req.c
+++ b/drivers/infiniband/sw/rxe/rxe_req.c
@@ -645,6 +645,9 @@ next_wqe:
} else {
goto exit;
}
+ if ((wqe->wr.send_flags & IB_SEND_SIGNALED) ||
+ qp->sq_sig_type == IB_SIGNAL_ALL_WR)
+ rxe_run_task(&qp->comp.task, 1);
qp->req.wqe_index = next_index(qp->sq.queue,
qp->req.wqe_index);
goto next_wqe;
@@ -709,6 +712,7 @@ next_wqe:
if (fill_packet(qp, wqe, &pkt, skb, payload)) {
pr_debug("qp#%d Error during fill packet\n", qp_num(qp));
+ kfree_skb(skb);
goto err;
}
@@ -740,7 +744,6 @@ next_wqe:
goto next_wqe;
err:
- kfree_skb(skb);
wqe->status = IB_WC_LOC_PROT_ERR;
wqe->state = wqe_state_error;
__rxe_do_task(&qp->comp.task);
diff --git a/drivers/input/input-mt.c b/drivers/input/input-mt.c
index cf30523c6ef6..6c7326c93721 100644
--- a/drivers/input/input-mt.c
+++ b/drivers/input/input-mt.c
@@ -131,8 +131,10 @@ EXPORT_SYMBOL(input_mt_destroy_slots);
* inactive, or if the tool type is changed, a new tracking id is
* assigned to the slot. The tool type is only reported if the
* corresponding absbit field is set.
+ *
+ * Returns true if contact is active.
*/
-void input_mt_report_slot_state(struct input_dev *dev,
+bool input_mt_report_slot_state(struct input_dev *dev,
unsigned int tool_type, bool active)
{
struct input_mt *mt = dev->mt;
@@ -140,22 +142,24 @@ void input_mt_report_slot_state(struct input_dev *dev,
int id;
if (!mt)
- return;
+ return false;
slot = &mt->slots[mt->slot];
slot->frame = mt->frame;
if (!active) {
input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, -1);
- return;
+ return false;
}
id = input_mt_get_value(slot, ABS_MT_TRACKING_ID);
- if (id < 0 || input_mt_get_value(slot, ABS_MT_TOOL_TYPE) != tool_type)
+ if (id < 0)
id = input_mt_new_trkid(mt);
input_event(dev, EV_ABS, ABS_MT_TRACKING_ID, id);
input_event(dev, EV_ABS, ABS_MT_TOOL_TYPE, tool_type);
+
+ return true;
}
EXPORT_SYMBOL(input_mt_report_slot_state);
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 48e36acbeb49..cd620e009bad 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -125,7 +125,7 @@ static const struct xpad_device {
u8 mapping;
u8 xtype;
} xpad_device[] = {
- { 0x0079, 0x18d4, "GPD Win 2 Controller", 0, XTYPE_XBOX360 },
+ { 0x0079, 0x18d4, "GPD Win 2 X-Box Controller", 0, XTYPE_XBOX360 },
{ 0x044f, 0x0f00, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f03, "Thrustmaster Wheel", 0, XTYPE_XBOX },
{ 0x044f, 0x0f07, "Thrustmaster, Inc. Controller", 0, XTYPE_XBOX },
diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c
index f6e643b589b6..e8dae6195b30 100644
--- a/drivers/input/keyboard/goldfish_events.c
+++ b/drivers/input/keyboard/goldfish_events.c
@@ -45,7 +45,7 @@ struct event_dev {
static irqreturn_t events_interrupt(int irq, void *dev_id)
{
struct event_dev *edev = dev_id;
- unsigned type, code, value;
+ unsigned int type, code, value;
type = __raw_readl(edev->addr + REG_READ);
code = __raw_readl(edev->addr + REG_READ);
@@ -57,7 +57,7 @@ static irqreturn_t events_interrupt(int irq, void *dev_id)
}
static void events_import_bits(struct event_dev *edev,
- unsigned long bits[], unsigned type, size_t count)
+ unsigned long bits[], unsigned int type, size_t count)
{
void __iomem *addr = edev->addr;
int i, j;
@@ -99,6 +99,7 @@ static void events_import_abs_params(struct event_dev *edev)
for (j = 0; j < ARRAY_SIZE(val); j++) {
int offset = (i * ARRAY_SIZE(val) + j) * sizeof(u32);
+
val[j] = __raw_readl(edev->addr + REG_DATA + offset);
}
@@ -112,7 +113,7 @@ static int events_probe(struct platform_device *pdev)
struct input_dev *input_dev;
struct event_dev *edev;
struct resource *res;
- unsigned keymapnamelen;
+ unsigned int keymapnamelen;
void __iomem *addr;
int irq;
int i;
@@ -150,7 +151,7 @@ static int events_probe(struct platform_device *pdev)
for (i = 0; i < keymapnamelen; i++)
edev->name[i] = __raw_readb(edev->addr + REG_DATA + i);
- pr_debug("events_probe() keymap=%s\n", edev->name);
+ pr_debug("%s: keymap=%s\n", __func__, edev->name);
input_dev->name = edev->name;
input_dev->id.bustype = BUS_HOST;
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index c25606e00693..ca59a2be9bc5 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -841,4 +841,14 @@ config INPUT_RAVE_SP_PWRBUTTON
To compile this driver as a module, choose M here: the
module will be called rave-sp-pwrbutton.
+config INPUT_SC27XX_VIBRA
+ tristate "Spreadtrum sc27xx vibrator support"
+ depends on MFD_SC27XX_PMIC || COMPILE_TEST
+ select INPUT_FF_MEMLESS
+ help
+ This option enables support for Spreadtrum sc27xx vibrator driver.
+
+ To compile this driver as a module, choose M here. The module will
+ be called sc27xx_vibra.
+
endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 72cde28649e2..9d0f9d1ff68f 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -66,6 +66,7 @@ obj-$(CONFIG_INPUT_RETU_PWRBUTTON) += retu-pwrbutton.o
obj-$(CONFIG_INPUT_AXP20X_PEK) += axp20x-pek.o
obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o
obj-$(CONFIG_INPUT_RK805_PWRKEY) += rk805-pwrkey.o
+obj-$(CONFIG_INPUT_SC27XX_VIBRA) += sc27xx-vibra.o
obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o
obj-$(CONFIG_INPUT_SIRFSOC_ONKEY) += sirfsoc-onkey.o
obj-$(CONFIG_INPUT_SOC_BUTTON_ARRAY) += soc_button_array.o
diff --git a/drivers/input/misc/sc27xx-vibra.c b/drivers/input/misc/sc27xx-vibra.c
new file mode 100644
index 000000000000..295251abbdac
--- /dev/null
+++ b/drivers/input/misc/sc27xx-vibra.c
@@ -0,0 +1,154 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Spreadtrum Communications Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/input.h>
+#include <linux/workqueue.h>
+
+#define CUR_DRV_CAL_SEL GENMASK(13, 12)
+#define SLP_LDOVIBR_PD_EN BIT(9)
+#define LDO_VIBR_PD BIT(8)
+
+struct vibra_info {
+ struct input_dev *input_dev;
+ struct work_struct play_work;
+ struct regmap *regmap;
+ u32 base;
+ u32 strength;
+ bool enabled;
+};
+
+static void sc27xx_vibra_set(struct vibra_info *info, bool on)
+{
+ if (on) {
+ regmap_update_bits(info->regmap, info->base, LDO_VIBR_PD, 0);
+ regmap_update_bits(info->regmap, info->base,
+ SLP_LDOVIBR_PD_EN, 0);
+ info->enabled = true;
+ } else {
+ regmap_update_bits(info->regmap, info->base, LDO_VIBR_PD,
+ LDO_VIBR_PD);
+ regmap_update_bits(info->regmap, info->base,
+ SLP_LDOVIBR_PD_EN, SLP_LDOVIBR_PD_EN);
+ info->enabled = false;
+ }
+}
+
+static int sc27xx_vibra_hw_init(struct vibra_info *info)
+{
+ return regmap_update_bits(info->regmap, info->base, CUR_DRV_CAL_SEL, 0);
+}
+
+static void sc27xx_vibra_play_work(struct work_struct *work)
+{
+ struct vibra_info *info = container_of(work, struct vibra_info,
+ play_work);
+
+ if (info->strength && !info->enabled)
+ sc27xx_vibra_set(info, true);
+ else if (info->strength == 0 && info->enabled)
+ sc27xx_vibra_set(info, false);
+}
+
+static int sc27xx_vibra_play(struct input_dev *input, void *data,
+ struct ff_effect *effect)
+{
+ struct vibra_info *info = input_get_drvdata(input);
+
+ info->strength = effect->u.rumble.weak_magnitude;
+ schedule_work(&info->play_work);
+
+ return 0;
+}
+
+static void sc27xx_vibra_close(struct input_dev *input)
+{
+ struct vibra_info *info = input_get_drvdata(input);
+
+ cancel_work_sync(&info->play_work);
+ if (info->enabled)
+ sc27xx_vibra_set(info, false);
+}
+
+static int sc27xx_vibra_probe(struct platform_device *pdev)
+{
+ struct vibra_info *info;
+ int error;
+
+ info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
+ if (!info)
+ return -ENOMEM;
+
+ info->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!info->regmap) {
+ dev_err(&pdev->dev, "failed to get vibrator regmap.\n");
+ return -ENODEV;
+ }
+
+ error = device_property_read_u32(&pdev->dev, "reg", &info->base);
+ if (error) {
+ dev_err(&pdev->dev, "failed to get vibrator base address.\n");
+ return error;
+ }
+
+ info->input_dev = devm_input_allocate_device(&pdev->dev);
+ if (!info->input_dev) {
+ dev_err(&pdev->dev, "failed to allocate input device.\n");
+ return -ENOMEM;
+ }
+
+ info->input_dev->name = "sc27xx:vibrator";
+ info->input_dev->id.version = 0;
+ info->input_dev->close = sc27xx_vibra_close;
+
+ input_set_drvdata(info->input_dev, info);
+ input_set_capability(info->input_dev, EV_FF, FF_RUMBLE);
+ INIT_WORK(&info->play_work, sc27xx_vibra_play_work);
+ info->enabled = false;
+
+ error = sc27xx_vibra_hw_init(info);
+ if (error) {
+ dev_err(&pdev->dev, "failed to initialize the vibrator.\n");
+ return error;
+ }
+
+ error = input_ff_create_memless(info->input_dev, NULL,
+ sc27xx_vibra_play);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register vibrator to FF.\n");
+ return error;
+ }
+
+ error = input_register_device(info->input_dev);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register input device.\n");
+ return error;
+ }
+
+ return 0;
+}
+
+static const struct of_device_id sc27xx_vibra_of_match[] = {
+ { .compatible = "sprd,sc2731-vibrator", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sc27xx_vibra_of_match);
+
+static struct platform_driver sc27xx_vibra_driver = {
+ .driver = {
+ .name = "sc27xx-vibrator",
+ .of_match_table = sc27xx_vibra_of_match,
+ },
+ .probe = sc27xx_vibra_probe,
+};
+
+module_platform_driver(sc27xx_vibra_driver);
+
+MODULE_DESCRIPTION("Spreadtrum SC27xx Vibrator Driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Xiaotong Lu <xiaotong.lu@spreadtrum.com>");
diff --git a/drivers/input/mouse/elan_i2c.h b/drivers/input/mouse/elan_i2c.h
index 599544c1a91c..243e0fa6e3e3 100644
--- a/drivers/input/mouse/elan_i2c.h
+++ b/drivers/input/mouse/elan_i2c.h
@@ -27,6 +27,8 @@
#define ETP_DISABLE_POWER 0x0001
#define ETP_PRESSURE_OFFSET 25
+#define ETP_CALIBRATE_MAX_LEN 3
+
/* IAP Firmware handling */
#define ETP_PRODUCT_ID_FORMAT_STRING "%d.0"
#define ETP_FW_NAME "elan_i2c_" ETP_PRODUCT_ID_FORMAT_STRING ".bin"
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 8ff75114e762..f5ae24865355 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -613,7 +613,7 @@ static ssize_t calibrate_store(struct device *dev,
int tries = 20;
int retval;
int error;
- u8 val[3];
+ u8 val[ETP_CALIBRATE_MAX_LEN];
retval = mutex_lock_interruptible(&data->sysfs_mutex);
if (retval)
@@ -1345,6 +1345,9 @@ static const struct acpi_device_id elan_acpi_id[] = {
{ "ELAN060C", 0 },
{ "ELAN0611", 0 },
{ "ELAN0612", 0 },
+ { "ELAN0618", 0 },
+ { "ELAN061D", 0 },
+ { "ELAN0622", 0 },
{ "ELAN1000", 0 },
{ }
};
diff --git a/drivers/input/mouse/elan_i2c_smbus.c b/drivers/input/mouse/elan_i2c_smbus.c
index cfcb32559925..c060d270bc4d 100644
--- a/drivers/input/mouse/elan_i2c_smbus.c
+++ b/drivers/input/mouse/elan_i2c_smbus.c
@@ -56,7 +56,7 @@
static int elan_smbus_initialize(struct i2c_client *client)
{
u8 check[ETP_SMBUS_HELLOPACKET_LEN] = { 0x55, 0x55, 0x55, 0x55, 0x55 };
- u8 values[ETP_SMBUS_HELLOPACKET_LEN] = { 0, 0, 0, 0, 0 };
+ u8 values[I2C_SMBUS_BLOCK_MAX] = {0};
int len, error;
/* Get hello packet */
@@ -117,12 +117,16 @@ static int elan_smbus_calibrate(struct i2c_client *client)
static int elan_smbus_calibrate_result(struct i2c_client *client, u8 *val)
{
int error;
+ u8 buf[I2C_SMBUS_BLOCK_MAX] = {0};
+
+ BUILD_BUG_ON(ETP_CALIBRATE_MAX_LEN > sizeof(buf));
error = i2c_smbus_read_block_data(client,
- ETP_SMBUS_CALIBRATE_QUERY, val);
+ ETP_SMBUS_CALIBRATE_QUERY, buf);
if (error < 0)
return error;
+ memcpy(val, buf, ETP_CALIBRATE_MAX_LEN);
return 0;
}
@@ -472,6 +476,8 @@ static int elan_smbus_get_report(struct i2c_client *client, u8 *report)
{
int len;
+ BUILD_BUG_ON(I2C_SMBUS_BLOCK_MAX > ETP_SMBUS_REPORT_LEN);
+
len = i2c_smbus_read_block_data(client,
ETP_SMBUS_PACKET_QUERY,
&report[ETP_SMBUS_REPORT_OFFSET]);
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index fb4d902c4403..dd85b16dc6f8 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -799,7 +799,7 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
else if (ic_version == 7 && etd->info.samples[1] == 0x2A)
sanity_check = ((packet[3] & 0x1c) == 0x10);
else
- sanity_check = ((packet[0] & 0x0c) == 0x04 &&
+ sanity_check = ((packet[0] & 0x08) == 0x00 &&
(packet[3] & 0x1c) == 0x10);
if (!sanity_check)
@@ -1175,6 +1175,12 @@ static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
{ }
};
+static const char * const middle_button_pnp_ids[] = {
+ "LEN2131", /* ThinkPad P52 w/ NFC */
+ "LEN2132", /* ThinkPad P52 */
+ NULL
+};
+
/*
* Set the appropriate event bits for the input subsystem
*/
@@ -1194,7 +1200,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
__clear_bit(EV_REL, dev->evbit);
__set_bit(BTN_LEFT, dev->keybit);
- if (dmi_check_system(elantech_dmi_has_middle_button))
+ if (dmi_check_system(elantech_dmi_has_middle_button) ||
+ psmouse_matches_pnp_id(psmouse, middle_button_pnp_ids))
__set_bit(BTN_MIDDLE, dev->keybit);
__set_bit(BTN_RIGHT, dev->keybit);
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 5ff5b1952be0..d3ff1fc09af7 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -192,8 +192,8 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
else
input_report_rel(dev, REL_WHEEL, -wheel);
- input_report_key(dev, BTN_SIDE, BIT(4));
- input_report_key(dev, BTN_EXTRA, BIT(5));
+ input_report_key(dev, BTN_SIDE, packet[3] & BIT(4));
+ input_report_key(dev, BTN_EXTRA, packet[3] & BIT(5));
break;
}
break;
@@ -203,13 +203,13 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
input_report_rel(dev, REL_WHEEL, -(s8) packet[3]);
/* Extra buttons on Genius NewNet 3D */
- input_report_key(dev, BTN_SIDE, BIT(6));
- input_report_key(dev, BTN_EXTRA, BIT(7));
+ input_report_key(dev, BTN_SIDE, packet[0] & BIT(6));
+ input_report_key(dev, BTN_EXTRA, packet[0] & BIT(7));
break;
case PSMOUSE_THINKPS:
/* Extra button on ThinkingMouse */
- input_report_key(dev, BTN_EXTRA, BIT(3));
+ input_report_key(dev, BTN_EXTRA, packet[0] & BIT(3));
/*
* Without this bit of weirdness moving up gives wildly
@@ -223,7 +223,7 @@ psmouse_ret_t psmouse_process_byte(struct psmouse *psmouse)
* Cortron PS2 Trackball reports SIDE button in the
* 4th bit of the first byte.
*/
- input_report_key(dev, BTN_SIDE, BIT(3));
+ input_report_key(dev, BTN_SIDE, packet[0] & BIT(3));
packet[0] |= BIT(3);
break;
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
index 7172b88cd064..fad2eae4a118 100644
--- a/drivers/input/rmi4/Kconfig
+++ b/drivers/input/rmi4/Kconfig
@@ -3,6 +3,7 @@
#
config RMI4_CORE
tristate "Synaptics RMI4 bus support"
+ select IRQ_DOMAIN
help
Say Y here if you want to support the Synaptics RMI4 bus. This is
required for all RMI4 device support.
diff --git a/drivers/input/rmi4/rmi_2d_sensor.c b/drivers/input/rmi4/rmi_2d_sensor.c
index 8bb866c7b985..8eeffa066022 100644
--- a/drivers/input/rmi4/rmi_2d_sensor.c
+++ b/drivers/input/rmi4/rmi_2d_sensor.c
@@ -32,15 +32,15 @@ void rmi_2d_sensor_abs_process(struct rmi_2d_sensor *sensor,
if (obj->type == RMI_2D_OBJECT_NONE)
return;
- if (axis_align->swap_axes)
- swap(obj->x, obj->y);
-
if (axis_align->flip_x)
obj->x = sensor->max_x - obj->x;
if (axis_align->flip_y)
obj->y = sensor->max_y - obj->y;
+ if (axis_align->swap_axes)
+ swap(obj->x, obj->y);
+
/*
* Here checking if X offset or y offset are specified is
* redundant. We just add the offsets or clip the values.
@@ -120,15 +120,15 @@ void rmi_2d_sensor_rel_report(struct rmi_2d_sensor *sensor, int x, int y)
x = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)x));
y = min(RMI_2D_REL_POS_MAX, max(RMI_2D_REL_POS_MIN, (int)y));
- if (axis_align->swap_axes)
- swap(x, y);
-
if (axis_align->flip_x)
x = min(RMI_2D_REL_POS_MAX, -x);
if (axis_align->flip_y)
y = min(RMI_2D_REL_POS_MAX, -y);
+ if (axis_align->swap_axes)
+ swap(x, y);
+
if (x || y) {
input_report_rel(sensor->input, REL_X, x);
input_report_rel(sensor->input, REL_Y, y);
@@ -141,17 +141,10 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
struct input_dev *input = sensor->input;
int res_x;
int res_y;
+ int max_x, max_y;
int input_flags = 0;
if (sensor->report_abs) {
- if (sensor->axis_align.swap_axes) {
- swap(sensor->max_x, sensor->max_y);
- swap(sensor->axis_align.clip_x_low,
- sensor->axis_align.clip_y_low);
- swap(sensor->axis_align.clip_x_high,
- sensor->axis_align.clip_y_high);
- }
-
sensor->min_x = sensor->axis_align.clip_x_low;
if (sensor->axis_align.clip_x_high)
sensor->max_x = min(sensor->max_x,
@@ -163,14 +156,19 @@ static void rmi_2d_sensor_set_input_params(struct rmi_2d_sensor *sensor)
sensor->axis_align.clip_y_high);
set_bit(EV_ABS, input->evbit);
- input_set_abs_params(input, ABS_MT_POSITION_X, 0, sensor->max_x,
- 0, 0);
- input_set_abs_params(input, ABS_MT_POSITION_Y, 0, sensor->max_y,
- 0, 0);
+
+ max_x = sensor->max_x;
+ max_y = sensor->max_y;
+ if (sensor->axis_align.swap_axes)
+ swap(max_x, max_y);
+ input_set_abs_params(input, ABS_MT_POSITION_X, 0, max_x, 0, 0);
+ input_set_abs_params(input, ABS_MT_POSITION_Y, 0, max_y, 0, 0);
if (sensor->x_mm && sensor->y_mm) {
res_x = (sensor->max_x - sensor->min_x) / sensor->x_mm;
res_y = (sensor->max_y - sensor->min_y) / sensor->y_mm;
+ if (sensor->axis_align.swap_axes)
+ swap(res_x, res_y);
input_abs_set_res(input, ABS_X, res_x);
input_abs_set_res(input, ABS_Y, res_y);
diff --git a/drivers/input/rmi4/rmi_bus.c b/drivers/input/rmi4/rmi_bus.c
index c5fa53adba8d..bd0d5ff01b08 100644
--- a/drivers/input/rmi4/rmi_bus.c
+++ b/drivers/input/rmi4/rmi_bus.c
@@ -9,6 +9,8 @@
#include <linux/kernel.h>
#include <linux/device.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
#include <linux/list.h>
#include <linux/pm.h>
#include <linux/rmi.h>
@@ -167,6 +169,39 @@ static inline void rmi_function_of_probe(struct rmi_function *fn)
{}
#endif
+static struct irq_chip rmi_irq_chip = {
+ .name = "rmi4",
+};
+
+static int rmi_create_function_irq(struct rmi_function *fn,
+ struct rmi_function_handler *handler)
+{
+ struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev);
+ int i, error;
+
+ for (i = 0; i < fn->num_of_irqs; i++) {
+ set_bit(fn->irq_pos + i, fn->irq_mask);
+
+ fn->irq[i] = irq_create_mapping(drvdata->irqdomain,
+ fn->irq_pos + i);
+
+ irq_set_chip_data(fn->irq[i], fn);
+ irq_set_chip_and_handler(fn->irq[i], &rmi_irq_chip,
+ handle_simple_irq);
+ irq_set_nested_thread(fn->irq[i], 1);
+
+ error = devm_request_threaded_irq(&fn->dev, fn->irq[i], NULL,
+ handler->attention, IRQF_ONESHOT,
+ dev_name(&fn->dev), fn);
+ if (error) {
+ dev_err(&fn->dev, "Error %d registering IRQ\n", error);
+ return error;
+ }
+ }
+
+ return 0;
+}
+
static int rmi_function_probe(struct device *dev)
{
struct rmi_function *fn = to_rmi_function(dev);
@@ -178,7 +213,14 @@ static int rmi_function_probe(struct device *dev)
if (handler->probe) {
error = handler->probe(fn);
- return error;
+ if (error)
+ return error;
+ }
+
+ if (fn->num_of_irqs && handler->attention) {
+ error = rmi_create_function_irq(fn, handler);
+ if (error)
+ return error;
}
return 0;
@@ -230,12 +272,18 @@ err_put_device:
void rmi_unregister_function(struct rmi_function *fn)
{
+ int i;
+
rmi_dbg(RMI_DEBUG_CORE, &fn->dev, "Unregistering F%02X.\n",
fn->fd.function_number);
device_del(&fn->dev);
of_node_put(fn->dev.of_node);
put_device(&fn->dev);
+
+ for (i = 0; i < fn->num_of_irqs; i++)
+ irq_dispose_mapping(fn->irq[i]);
+
}
/**
diff --git a/drivers/input/rmi4/rmi_bus.h b/drivers/input/rmi4/rmi_bus.h
index b7625a9ac66a..96383eab41ba 100644
--- a/drivers/input/rmi4/rmi_bus.h
+++ b/drivers/input/rmi4/rmi_bus.h
@@ -14,6 +14,12 @@
struct rmi_device;
+/*
+ * The interrupt source count in the function descriptor can represent up to
+ * 6 interrupt sources in the normal manner.
+ */
+#define RMI_FN_MAX_IRQS 6
+
/**
* struct rmi_function - represents the implementation of an RMI4
* function for a particular device (basically, a driver for that RMI4 function)
@@ -26,6 +32,7 @@ struct rmi_device;
* @irq_pos: The position in the irq bitfield this function holds
* @irq_mask: For convenience, can be used to mask IRQ bits off during ATTN
* interrupt handling.
+ * @irqs: assigned virq numbers (up to num_of_irqs)
*
* @node: entry in device's list of functions
*/
@@ -36,6 +43,7 @@ struct rmi_function {
struct list_head node;
unsigned int num_of_irqs;
+ int irq[RMI_FN_MAX_IRQS];
unsigned int irq_pos;
unsigned long irq_mask[];
};
@@ -76,7 +84,7 @@ struct rmi_function_handler {
void (*remove)(struct rmi_function *fn);
int (*config)(struct rmi_function *fn);
int (*reset)(struct rmi_function *fn);
- int (*attention)(struct rmi_function *fn, unsigned long *irq_bits);
+ irqreturn_t (*attention)(int irq, void *ctx);
int (*suspend)(struct rmi_function *fn);
int (*resume)(struct rmi_function *fn);
};
diff --git a/drivers/input/rmi4/rmi_driver.c b/drivers/input/rmi4/rmi_driver.c
index 7d29053dfb0f..fc3ab93b7aea 100644
--- a/drivers/input/rmi4/rmi_driver.c
+++ b/drivers/input/rmi4/rmi_driver.c
@@ -21,6 +21,7 @@
#include <linux/pm.h>
#include <linux/slab.h>
#include <linux/of.h>
+#include <linux/irqdomain.h>
#include <uapi/linux/input.h>
#include <linux/rmi.h>
#include "rmi_bus.h"
@@ -127,28 +128,11 @@ static int rmi_driver_process_config_requests(struct rmi_device *rmi_dev)
return 0;
}
-static void process_one_interrupt(struct rmi_driver_data *data,
- struct rmi_function *fn)
-{
- struct rmi_function_handler *fh;
-
- if (!fn || !fn->dev.driver)
- return;
-
- fh = to_rmi_function_handler(fn->dev.driver);
- if (fh->attention) {
- bitmap_and(data->fn_irq_bits, data->irq_status, fn->irq_mask,
- data->irq_count);
- if (!bitmap_empty(data->fn_irq_bits, data->irq_count))
- fh->attention(fn, data->fn_irq_bits);
- }
-}
-
static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
{
struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
struct device *dev = &rmi_dev->dev;
- struct rmi_function *entry;
+ int i;
int error;
if (!data)
@@ -173,16 +157,8 @@ static int rmi_process_interrupt_requests(struct rmi_device *rmi_dev)
*/
mutex_unlock(&data->irq_mutex);
- /*
- * It would be nice to be able to use irq_chip to handle these
- * nested IRQs. Unfortunately, most of the current customers for
- * this driver are using older kernels (3.0.x) that don't support
- * the features required for that. Once they've shifted to more
- * recent kernels (say, 3.3 and higher), this should be switched to
- * use irq_chip.
- */
- list_for_each_entry(entry, &data->function_list, node)
- process_one_interrupt(data, entry);
+ for_each_set_bit(i, data->irq_status, data->irq_count)
+ handle_nested_irq(irq_find_mapping(data->irqdomain, i));
if (data->input)
input_sync(data->input);
@@ -1001,9 +977,13 @@ EXPORT_SYMBOL_GPL(rmi_driver_resume);
static int rmi_driver_remove(struct device *dev)
{
struct rmi_device *rmi_dev = to_rmi_device(dev);
+ struct rmi_driver_data *data = dev_get_drvdata(&rmi_dev->dev);
rmi_disable_irq(rmi_dev, false);
+ irq_domain_remove(data->irqdomain);
+ data->irqdomain = NULL;
+
rmi_f34_remove_sysfs(rmi_dev);
rmi_free_function_list(rmi_dev);
@@ -1035,7 +1015,8 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
{
struct rmi_device *rmi_dev = data->rmi_dev;
struct device *dev = &rmi_dev->dev;
- int irq_count;
+ struct fwnode_handle *fwnode = rmi_dev->xport->dev->fwnode;
+ int irq_count = 0;
size_t size;
int retval;
@@ -1046,7 +1027,6 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
* being accessed.
*/
rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Counting IRQs.\n", __func__);
- irq_count = 0;
data->bootloader_mode = false;
retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_count_irqs);
@@ -1058,6 +1038,15 @@ int rmi_probe_interrupts(struct rmi_driver_data *data)
if (data->bootloader_mode)
dev_warn(dev, "Device in bootloader mode.\n");
+ /* Allocate and register a linear revmap irq_domain */
+ data->irqdomain = irq_domain_create_linear(fwnode, irq_count,
+ &irq_domain_simple_ops,
+ data);
+ if (!data->irqdomain) {
+ dev_err(&rmi_dev->dev, "Failed to create IRQ domain\n");
+ return -ENOMEM;
+ }
+
data->irq_count = irq_count;
data->num_of_irq_regs = (data->irq_count + 7) / 8;
@@ -1080,10 +1069,9 @@ int rmi_init_functions(struct rmi_driver_data *data)
{
struct rmi_device *rmi_dev = data->rmi_dev;
struct device *dev = &rmi_dev->dev;
- int irq_count;
+ int irq_count = 0;
int retval;
- irq_count = 0;
rmi_dbg(RMI_DEBUG_CORE, dev, "%s: Creating functions.\n", __func__);
retval = rmi_scan_pdt(rmi_dev, &irq_count, rmi_create_function);
if (retval < 0) {
diff --git a/drivers/input/rmi4/rmi_f01.c b/drivers/input/rmi4/rmi_f01.c
index 8a07ae147df6..4edaa14fe878 100644
--- a/drivers/input/rmi4/rmi_f01.c
+++ b/drivers/input/rmi4/rmi_f01.c
@@ -681,9 +681,9 @@ static int rmi_f01_resume(struct rmi_function *fn)
return 0;
}
-static int rmi_f01_attention(struct rmi_function *fn,
- unsigned long *irq_bits)
+static irqreturn_t rmi_f01_attention(int irq, void *ctx)
{
+ struct rmi_function *fn = ctx;
struct rmi_device *rmi_dev = fn->rmi_dev;
int error;
u8 device_status;
@@ -692,7 +692,7 @@ static int rmi_f01_attention(struct rmi_function *fn,
if (error) {
dev_err(&fn->dev,
"Failed to read device status: %d.\n", error);
- return error;
+ return IRQ_RETVAL(error);
}
if (RMI_F01_STATUS_BOOTLOADER(device_status))
@@ -704,11 +704,11 @@ static int rmi_f01_attention(struct rmi_function *fn,
error = rmi_dev->driver->reset_handler(rmi_dev);
if (error) {
dev_err(&fn->dev, "Device reset failed: %d\n", error);
- return error;
+ return IRQ_RETVAL(error);
}
}
- return 0;
+ return IRQ_HANDLED;
}
struct rmi_function_handler rmi_f01_handler = {
diff --git a/drivers/input/rmi4/rmi_f03.c b/drivers/input/rmi4/rmi_f03.c
index 88822196d6b7..aaa1edc95522 100644
--- a/drivers/input/rmi4/rmi_f03.c
+++ b/drivers/input/rmi4/rmi_f03.c
@@ -244,8 +244,9 @@ static int rmi_f03_config(struct rmi_function *fn)
return 0;
}
-static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f03_attention(int irq, void *ctx)
{
+ struct rmi_function *fn = ctx;
struct rmi_device *rmi_dev = fn->rmi_dev;
struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
struct f03_data *f03 = dev_get_drvdata(&fn->dev);
@@ -262,7 +263,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
/* First grab the data passed by the transport device */
if (drvdata->attn_data.size < ob_len) {
dev_warn(&fn->dev, "F03 interrupted, but data is missing!\n");
- return 0;
+ return IRQ_HANDLED;
}
memcpy(obs, drvdata->attn_data.data, ob_len);
@@ -277,7 +278,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
"%s: Failed to read F03 output buffers: %d\n",
__func__, error);
serio_interrupt(f03->serio, 0, SERIO_TIMEOUT);
- return error;
+ return IRQ_RETVAL(error);
}
}
@@ -303,7 +304,7 @@ static int rmi_f03_attention(struct rmi_function *fn, unsigned long *irq_bits)
serio_interrupt(f03->serio, ob_data, serio_flags);
}
- return 0;
+ return IRQ_HANDLED;
}
static void rmi_f03_remove(struct rmi_function *fn)
diff --git a/drivers/input/rmi4/rmi_f11.c b/drivers/input/rmi4/rmi_f11.c
index 12a233251793..df64d6aed4f7 100644
--- a/drivers/input/rmi4/rmi_f11.c
+++ b/drivers/input/rmi4/rmi_f11.c
@@ -570,9 +570,7 @@ static inline u8 rmi_f11_parse_finger_state(const u8 *f_state, u8 n_finger)
}
static void rmi_f11_finger_handler(struct f11_data *f11,
- struct rmi_2d_sensor *sensor,
- unsigned long *irq_bits, int num_irq_regs,
- int size)
+ struct rmi_2d_sensor *sensor, int size)
{
const u8 *f_state = f11->data.f_state;
u8 finger_state;
@@ -581,12 +579,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
int rel_fingers;
int abs_size = sensor->nbr_fingers * RMI_F11_ABS_BYTES;
- int abs_bits = bitmap_and(f11->result_bits, irq_bits, f11->abs_mask,
- num_irq_regs * 8);
- int rel_bits = bitmap_and(f11->result_bits, irq_bits, f11->rel_mask,
- num_irq_regs * 8);
-
- if (abs_bits) {
+ if (sensor->report_abs) {
if (abs_size > size)
abs_fingers = size / RMI_F11_ABS_BYTES;
else
@@ -604,19 +597,7 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
rmi_f11_abs_pos_process(f11, sensor, &sensor->objs[i],
finger_state, i);
}
- }
- if (rel_bits) {
- if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size)
- rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES;
- else
- rel_fingers = sensor->nbr_fingers;
-
- for (i = 0; i < rel_fingers; i++)
- rmi_f11_rel_pos_report(f11, i);
- }
-
- if (abs_bits) {
/*
* the absolute part is made in 2 parts to allow the kernel
* tracking to take place.
@@ -638,7 +619,16 @@ static void rmi_f11_finger_handler(struct f11_data *f11,
}
input_mt_sync_frame(sensor->input);
+ } else if (sensor->report_rel) {
+ if ((abs_size + sensor->nbr_fingers * RMI_F11_REL_BYTES) > size)
+ rel_fingers = (size - abs_size) / RMI_F11_REL_BYTES;
+ else
+ rel_fingers = sensor->nbr_fingers;
+
+ for (i = 0; i < rel_fingers; i++)
+ rmi_f11_rel_pos_report(f11, i);
}
+
}
static int f11_2d_construct_data(struct f11_data *f11)
@@ -1276,8 +1266,9 @@ static int rmi_f11_config(struct rmi_function *fn)
return 0;
}
-static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f11_attention(int irq, void *ctx)
{
+ struct rmi_function *fn = ctx;
struct rmi_device *rmi_dev = fn->rmi_dev;
struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
struct f11_data *f11 = dev_get_drvdata(&fn->dev);
@@ -1303,13 +1294,12 @@ static int rmi_f11_attention(struct rmi_function *fn, unsigned long *irq_bits)
data_base_addr, f11->sensor.data_pkt,
f11->sensor.pkt_size);
if (error < 0)
- return error;
+ return IRQ_RETVAL(error);
}
- rmi_f11_finger_handler(f11, &f11->sensor, irq_bits,
- drvdata->num_of_irq_regs, valid_bytes);
+ rmi_f11_finger_handler(f11, &f11->sensor, valid_bytes);
- return 0;
+ return IRQ_HANDLED;
}
static int rmi_f11_resume(struct rmi_function *fn)
diff --git a/drivers/input/rmi4/rmi_f12.c b/drivers/input/rmi4/rmi_f12.c
index a3d1aa88f2a9..5c7f48915779 100644
--- a/drivers/input/rmi4/rmi_f12.c
+++ b/drivers/input/rmi4/rmi_f12.c
@@ -197,10 +197,10 @@ static void rmi_f12_process_objects(struct f12_data *f12, u8 *data1, int size)
rmi_2d_sensor_abs_report(sensor, &sensor->objs[i], i);
}
-static int rmi_f12_attention(struct rmi_function *fn,
- unsigned long *irq_nr_regs)
+static irqreturn_t rmi_f12_attention(int irq, void *ctx)
{
int retval;
+ struct rmi_function *fn = ctx;
struct rmi_device *rmi_dev = fn->rmi_dev;
struct rmi_driver_data *drvdata = dev_get_drvdata(&rmi_dev->dev);
struct f12_data *f12 = dev_get_drvdata(&fn->dev);
@@ -222,7 +222,7 @@ static int rmi_f12_attention(struct rmi_function *fn,
if (retval < 0) {
dev_err(&fn->dev, "Failed to read object data. Code: %d.\n",
retval);
- return retval;
+ return IRQ_RETVAL(retval);
}
}
@@ -232,7 +232,7 @@ static int rmi_f12_attention(struct rmi_function *fn,
input_mt_sync_frame(sensor->input);
- return 0;
+ return IRQ_HANDLED;
}
static int rmi_f12_write_control_regs(struct rmi_function *fn)
diff --git a/drivers/input/rmi4/rmi_f30.c b/drivers/input/rmi4/rmi_f30.c
index 82e0f0d43d55..5e3ed5ac0c3e 100644
--- a/drivers/input/rmi4/rmi_f30.c
+++ b/drivers/input/rmi4/rmi_f30.c
@@ -122,8 +122,9 @@ static void rmi_f30_report_button(struct rmi_function *fn,
}
}
-static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f30_attention(int irq, void *ctx)
{
+ struct rmi_function *fn = ctx;
struct f30_data *f30 = dev_get_drvdata(&fn->dev);
struct rmi_driver_data *drvdata = dev_get_drvdata(&fn->rmi_dev->dev);
int error;
@@ -134,7 +135,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
if (drvdata->attn_data.size < f30->register_count) {
dev_warn(&fn->dev,
"F30 interrupted, but data is missing\n");
- return 0;
+ return IRQ_HANDLED;
}
memcpy(f30->data_regs, drvdata->attn_data.data,
f30->register_count);
@@ -147,7 +148,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
dev_err(&fn->dev,
"%s: Failed to read F30 data registers: %d\n",
__func__, error);
- return error;
+ return IRQ_RETVAL(error);
}
}
@@ -159,7 +160,7 @@ static int rmi_f30_attention(struct rmi_function *fn, unsigned long *irq_bits)
rmi_f03_commit_buttons(f30->f03);
}
- return 0;
+ return IRQ_HANDLED;
}
static int rmi_f30_config(struct rmi_function *fn)
diff --git a/drivers/input/rmi4/rmi_f34.c b/drivers/input/rmi4/rmi_f34.c
index f1f5ac539d5d..87a7d4ba382d 100644
--- a/drivers/input/rmi4/rmi_f34.c
+++ b/drivers/input/rmi4/rmi_f34.c
@@ -100,8 +100,9 @@ static int rmi_f34_command(struct f34_data *f34, u8 command,
return 0;
}
-static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits)
+static irqreturn_t rmi_f34_attention(int irq, void *ctx)
{
+ struct rmi_function *fn = ctx;
struct f34_data *f34 = dev_get_drvdata(&fn->dev);
int ret;
u8 status;
@@ -126,7 +127,7 @@ static int rmi_f34_attention(struct rmi_function *fn, unsigned long *irq_bits)
complete(&f34->v7.cmd_done);
}
- return 0;
+ return IRQ_HANDLED;
}
static int rmi_f34_write_blocks(struct f34_data *f34, const void *data,
diff --git a/drivers/input/rmi4/rmi_f54.c b/drivers/input/rmi4/rmi_f54.c
index e8a59d164019..a6f515bcab22 100644
--- a/drivers/input/rmi4/rmi_f54.c
+++ b/drivers/input/rmi4/rmi_f54.c
@@ -610,11 +610,6 @@ error:
mutex_unlock(&f54->data_mutex);
}
-static int rmi_f54_attention(struct rmi_function *fn, unsigned long *irqbits)
-{
- return 0;
-}
-
static int rmi_f54_config(struct rmi_function *fn)
{
struct rmi_driver *drv = fn->rmi_dev->driver;
@@ -756,6 +751,5 @@ struct rmi_function_handler rmi_f54_handler = {
.func = 0x54,
.probe = rmi_f54_probe,
.config = rmi_f54_config,
- .attention = rmi_f54_attention,
.remove = rmi_f54_remove,
};
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index b353d494ad40..136f6e7bf797 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -527,6 +527,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "N24_25BU"),
},
},
+ {
+ /* Lenovo LaVie Z */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo LaVie Z"),
+ },
+ },
{ }
};
diff --git a/drivers/input/touchscreen/silead.c b/drivers/input/touchscreen/silead.c
index ff7043f74a3d..d196ac3d8b8c 100644
--- a/drivers/input/touchscreen/silead.c
+++ b/drivers/input/touchscreen/silead.c
@@ -603,6 +603,7 @@ static const struct acpi_device_id silead_ts_acpi_match[] = {
{ "GSL3692", 0 },
{ "MSSL1680", 0 },
{ "MSSL0001", 0 },
+ { "MSSL0002", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, silead_ts_acpi_match);
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index e055d228bfb9..689ffe538370 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -142,7 +142,6 @@ config DMAR_TABLE
config INTEL_IOMMU
bool "Support for Intel IOMMU using DMA Remapping Devices"
depends on PCI_MSI && ACPI && (X86 || IA64_GENERIC)
- select DMA_DIRECT_OPS
select IOMMU_API
select IOMMU_IOVA
select NEED_DMA_MAP_STATE
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 14e4b3722428..115ff26e9ced 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -31,7 +31,6 @@
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/dma-mapping.h>
-#include <linux/dma-direct.h>
#include <linux/mempool.h>
#include <linux/memory.h>
#include <linux/cpu.h>
@@ -485,14 +484,37 @@ static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int intel_iommu_ecs = 1;
+static int intel_iommu_pasid28;
static int iommu_identity_mapping;
#define IDENTMAP_ALL 1
#define IDENTMAP_GFX 2
#define IDENTMAP_AZALIA 4
-#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap))
-#define pasid_enabled(iommu) (ecs_enabled(iommu) && ecap_pasid(iommu->ecap))
+/* Broadwell and Skylake have broken ECS support — normal so-called "second
+ * level" translation of DMA requests-without-PASID doesn't actually happen
+ * unless you also set the NESTE bit in an extended context-entry. Which of
+ * course means that SVM doesn't work because it's trying to do nested
+ * translation of the physical addresses it finds in the process page tables,
+ * through the IOVA->phys mapping found in the "second level" page tables.
+ *
+ * The VT-d specification was retroactively changed to change the definition
+ * of the capability bits and pretend that Broadwell/Skylake never happened...
+ * but unfortunately the wrong bit was changed. It's ECS which is broken, but
+ * for some reason it was the PASID capability bit which was redefined (from
+ * bit 28 on BDW/SKL to bit 40 in future).
+ *
+ * So our test for ECS needs to eschew those implementations which set the old
+ * PASID capabiity bit 28, since those are the ones on which ECS is broken.
+ * Unless we are working around the 'pasid28' limitations, that is, by putting
+ * the device into passthrough mode for normal DMA and thus masking the bug.
+ */
+#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
+ (intel_iommu_pasid28 || !ecap_broken_pasid(iommu->ecap)))
+/* PASID support is thus enabled if ECS is enabled and *either* of the old
+ * or new capability bits are set. */
+#define pasid_enabled(iommu) (ecs_enabled(iommu) && \
+ (ecap_pasid(iommu->ecap) || ecap_broken_pasid(iommu->ecap)))
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@@ -555,6 +577,11 @@ static int __init intel_iommu_setup(char *str)
printk(KERN_INFO
"Intel-IOMMU: disable extended context table support\n");
intel_iommu_ecs = 0;
+ } else if (!strncmp(str, "pasid28", 7)) {
+ printk(KERN_INFO
+ "Intel-IOMMU: enable pre-production PASID support\n");
+ intel_iommu_pasid28 = 1;
+ iommu_identity_mapping |= IDENTMAP_GFX;
} else if (!strncmp(str, "tboot_noforce", 13)) {
printk(KERN_INFO
"Intel-IOMMU: not forcing on after tboot. This could expose security risk for tboot\n");
@@ -3713,30 +3740,61 @@ static void *intel_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flags,
unsigned long attrs)
{
- void *vaddr;
+ struct page *page = NULL;
+ int order;
- vaddr = dma_direct_alloc(dev, size, dma_handle, flags, attrs);
- if (iommu_no_mapping(dev) || !vaddr)
- return vaddr;
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
- *dma_handle = __intel_map_single(dev, virt_to_phys(vaddr),
- PAGE_ALIGN(size), DMA_BIDIRECTIONAL,
- dev->coherent_dma_mask);
- if (!*dma_handle)
- goto out_free_pages;
- return vaddr;
+ if (!iommu_no_mapping(dev))
+ flags &= ~(GFP_DMA | GFP_DMA32);
+ else if (dev->coherent_dma_mask < dma_get_required_mask(dev)) {
+ if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
+ flags |= GFP_DMA;
+ else
+ flags |= GFP_DMA32;
+ }
+
+ if (gfpflags_allow_blocking(flags)) {
+ unsigned int count = size >> PAGE_SHIFT;
+
+ page = dma_alloc_from_contiguous(dev, count, order, flags);
+ if (page && iommu_no_mapping(dev) &&
+ page_to_phys(page) + size > dev->coherent_dma_mask) {
+ dma_release_from_contiguous(dev, page, count);
+ page = NULL;
+ }
+ }
+
+ if (!page)
+ page = alloc_pages(flags, order);
+ if (!page)
+ return NULL;
+ memset(page_address(page), 0, size);
+
+ *dma_handle = __intel_map_single(dev, page_to_phys(page), size,
+ DMA_BIDIRECTIONAL,
+ dev->coherent_dma_mask);
+ if (*dma_handle)
+ return page_address(page);
+ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+ __free_pages(page, order);
-out_free_pages:
- dma_direct_free(dev, size, vaddr, *dma_handle, attrs);
return NULL;
}
static void intel_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle, unsigned long attrs)
{
- if (!iommu_no_mapping(dev))
- intel_unmap(dev, dma_handle, PAGE_ALIGN(size));
- dma_direct_free(dev, size, vaddr, dma_handle, attrs);
+ int order;
+ struct page *page = virt_to_page(vaddr);
+
+ size = PAGE_ALIGN(size);
+ order = get_order(size);
+
+ intel_unmap(dev, dma_handle, size);
+ if (!dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT))
+ __free_pages(page, order);
}
static void intel_unmap_sg(struct device *dev, struct scatterlist *sglist,
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 0f52d44b3f69..f5fe0100f9ff 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -199,7 +199,7 @@ static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
fail:
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
- gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs));
+ gicv2m_unalloc_msi(v2m, hwirq, nr_irqs);
return err;
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 5377d7e2afba..d7842d312d3e 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -182,6 +182,22 @@ static struct its_collection *dev_event_to_col(struct its_device *its_dev,
return its->collections + its_dev->event_map.col_map[event];
}
+static struct its_collection *valid_col(struct its_collection *col)
+{
+ if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
+ return NULL;
+
+ return col;
+}
+
+static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
+{
+ if (valid_col(its->collections + vpe->col_idx))
+ return vpe;
+
+ return NULL;
+}
+
/*
* ITS command descriptors - parameters to be encoded in a command
* block.
@@ -439,7 +455,7 @@ static struct its_collection *its_build_mapti_cmd(struct its_node *its,
its_fixup_cmd(cmd);
- return col;
+ return valid_col(col);
}
static struct its_collection *its_build_movi_cmd(struct its_node *its,
@@ -458,7 +474,7 @@ static struct its_collection *its_build_movi_cmd(struct its_node *its,
its_fixup_cmd(cmd);
- return col;
+ return valid_col(col);
}
static struct its_collection *its_build_discard_cmd(struct its_node *its,
@@ -476,7 +492,7 @@ static struct its_collection *its_build_discard_cmd(struct its_node *its,
its_fixup_cmd(cmd);
- return col;
+ return valid_col(col);
}
static struct its_collection *its_build_inv_cmd(struct its_node *its,
@@ -494,7 +510,7 @@ static struct its_collection *its_build_inv_cmd(struct its_node *its,
its_fixup_cmd(cmd);
- return col;
+ return valid_col(col);
}
static struct its_collection *its_build_int_cmd(struct its_node *its,
@@ -512,7 +528,7 @@ static struct its_collection *its_build_int_cmd(struct its_node *its,
its_fixup_cmd(cmd);
- return col;
+ return valid_col(col);
}
static struct its_collection *its_build_clear_cmd(struct its_node *its,
@@ -530,7 +546,7 @@ static struct its_collection *its_build_clear_cmd(struct its_node *its,
its_fixup_cmd(cmd);
- return col;
+ return valid_col(col);
}
static struct its_collection *its_build_invall_cmd(struct its_node *its,
@@ -554,7 +570,7 @@ static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
its_fixup_cmd(cmd);
- return desc->its_vinvall_cmd.vpe;
+ return valid_vpe(its, desc->its_vinvall_cmd.vpe);
}
static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
@@ -576,7 +592,7 @@ static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
its_fixup_cmd(cmd);
- return desc->its_vmapp_cmd.vpe;
+ return valid_vpe(its, desc->its_vmapp_cmd.vpe);
}
static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
@@ -599,7 +615,7 @@ static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
its_fixup_cmd(cmd);
- return desc->its_vmapti_cmd.vpe;
+ return valid_vpe(its, desc->its_vmapti_cmd.vpe);
}
static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
@@ -622,7 +638,7 @@ static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
its_fixup_cmd(cmd);
- return desc->its_vmovi_cmd.vpe;
+ return valid_vpe(its, desc->its_vmovi_cmd.vpe);
}
static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
@@ -640,7 +656,7 @@ static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
its_fixup_cmd(cmd);
- return desc->its_vmovp_cmd.vpe;
+ return valid_vpe(its, desc->its_vmovp_cmd.vpe);
}
static u64 its_cmd_ptr_to_offset(struct its_node *its,
@@ -1824,11 +1840,16 @@ static int its_alloc_tables(struct its_node *its)
static int its_alloc_collections(struct its_node *its)
{
+ int i;
+
its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
GFP_KERNEL);
if (!its->collections)
return -ENOMEM;
+ for (i = 0; i < nr_cpu_ids; i++)
+ its->collections[i].target_address = ~0ULL;
+
return 0;
}
@@ -2310,7 +2331,14 @@ static int its_irq_domain_activate(struct irq_domain *domain,
cpu_mask = cpumask_of_node(its_dev->its->numa_node);
/* Bind the LPI to the first possible CPU */
- cpu = cpumask_first(cpu_mask);
+ cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
+ if (cpu >= nr_cpu_ids) {
+ if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
+ return -EINVAL;
+
+ cpu = cpumask_first(cpu_online_mask);
+ }
+
its_dev->event_map.col_map[event] = cpu;
irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -3399,6 +3427,16 @@ static int redist_disable_lpis(void)
u64 timeout = USEC_PER_SEC;
u64 val;
+ /*
+ * If coming via a CPU hotplug event, we don't need to disable
+ * LPIs before trying to re-enable them. They are already
+ * configured and all is well in the world. Detect this case
+ * by checking the allocation of the pending table for the
+ * current CPU.
+ */
+ if (gic_data_rdist()->pend_page)
+ return 0;
+
if (!gic_rdists_supports_plpis()) {
pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
return -ENXIO;
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index 1ec3bfe56693..c671b3212010 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -93,8 +93,12 @@ static void ls_scfg_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
msg->address_lo = lower_32_bits(msi_data->msiir_addr);
msg->data = data->hwirq;
- if (msi_affinity_flag)
- msg->data |= cpumask_first(data->common->affinity);
+ if (msi_affinity_flag) {
+ const struct cpumask *mask;
+
+ mask = irq_data_get_effective_affinity_mask(data);
+ msg->data |= cpumask_first(mask);
+ }
iommu_dma_map_msi_msg(data->irq, msg);
}
@@ -121,7 +125,7 @@ static int ls_scfg_msi_set_affinity(struct irq_data *irq_data,
return -EINVAL;
}
- cpumask_copy(irq_data->common->affinity, mask);
+ irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
return IRQ_SET_MASK_OK;
}
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index 98f90aadd141..18c0a1281914 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -588,7 +588,7 @@ static const struct proto_ops data_sock_ops = {
.getname = data_sock_getname,
.sendmsg = mISDN_sock_sendmsg,
.recvmsg = mISDN_sock_recvmsg,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = data_sock_setsockopt,
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 10c08982185a..9c03f35d9df1 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -4,7 +4,7 @@
menuconfig NVM
bool "Open-Channel SSD target support"
- depends on BLOCK && HAS_DMA && PCI
+ depends on BLOCK && PCI
select BLK_DEV_NVME
help
Say Y here to get to enable Open-channel SSDs.
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index ab13fcec3fca..75df4c9d8b54 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -588,7 +588,7 @@ static const char *raid10_md_layout_to_format(int layout)
}
/* Return md raid10 algorithm for @name */
-static const int raid10_name_to_format(const char *name)
+static int raid10_name_to_format(const char *name)
{
if (!strcasecmp(name, "near"))
return ALGORITHM_RAID10_NEAR;
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 938766794c2e..3d0e2c198f06 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -885,9 +885,7 @@ EXPORT_SYMBOL_GPL(dm_table_set_type);
static int device_supports_dax(struct dm_target *ti, struct dm_dev *dev,
sector_t start, sector_t len, void *data)
{
- struct request_queue *q = bdev_get_queue(dev->bdev);
-
- return q && blk_queue_dax(q);
+ return bdev_dax_supported(dev->bdev, PAGE_SIZE);
}
static bool dm_table_supports_dax(struct dm_table *t)
@@ -1907,6 +1905,9 @@ void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
if (dm_table_supports_dax(t))
blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+ else
+ blk_queue_flag_clear(QUEUE_FLAG_DAX, q);
+
if (dm_table_supports_dax_write_cache(t))
dax_write_cache(t->md->dax_dev, true);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 36ef284ad086..72142021b5c9 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -776,7 +776,6 @@ static int __write_changed_details(struct dm_pool_metadata *pmd)
static int __commit_transaction(struct dm_pool_metadata *pmd)
{
int r;
- size_t metadata_len, data_len;
struct thin_disk_superblock *disk_super;
struct dm_block *sblock;
@@ -797,14 +796,6 @@ static int __commit_transaction(struct dm_pool_metadata *pmd)
if (r < 0)
return r;
- r = dm_sm_root_size(pmd->metadata_sm, &metadata_len);
- if (r < 0)
- return r;
-
- r = dm_sm_root_size(pmd->data_sm, &data_len);
- if (r < 0)
- return r;
-
r = save_sm_roots(pmd);
if (r < 0)
return r;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 7945238df1c0..b900723bbd0f 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1386,6 +1386,8 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
+static void requeue_bios(struct pool *pool);
+
static void check_for_space(struct pool *pool)
{
int r;
@@ -1398,8 +1400,10 @@ static void check_for_space(struct pool *pool)
if (r)
return;
- if (nr_free)
+ if (nr_free) {
set_pool_mode(pool, PM_WRITE);
+ requeue_bios(pool);
+ }
}
/*
@@ -1476,7 +1480,10 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
r = dm_pool_alloc_data_block(pool->pmd, result);
if (r) {
- metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
+ if (r == -ENOSPC)
+ set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
+ else
+ metadata_operation_failed(pool, "dm_pool_alloc_data_block", r);
return r;
}
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index 5961c7794ef3..87107c995cb5 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -136,6 +136,7 @@ struct dm_writecache {
struct dm_target *ti;
struct dm_dev *dev;
struct dm_dev *ssd_dev;
+ sector_t start_sector;
void *memory_map;
uint64_t memory_map_size;
size_t metadata_sectors;
@@ -259,7 +260,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
if (da != p) {
long i;
wc->memory_map = NULL;
- pages = kvmalloc(p * sizeof(struct page *), GFP_KERNEL);
+ pages = kvmalloc_array(p, sizeof(struct page *), GFP_KERNEL);
if (!pages) {
r = -ENOMEM;
goto err2;
@@ -293,6 +294,10 @@ static int persistent_memory_claim(struct dm_writecache *wc)
}
dax_read_unlock(id);
+
+ wc->memory_map += (size_t)wc->start_sector << SECTOR_SHIFT;
+ wc->memory_map_size -= (size_t)wc->start_sector << SECTOR_SHIFT;
+
return 0;
err3:
kvfree(pages);
@@ -311,7 +316,7 @@ static int persistent_memory_claim(struct dm_writecache *wc)
static void persistent_memory_release(struct dm_writecache *wc)
{
if (wc->memory_vmapped)
- vunmap(wc->memory_map);
+ vunmap(wc->memory_map - ((size_t)wc->start_sector << SECTOR_SHIFT));
}
static struct page *persistent_memory_page(void *addr)
@@ -359,7 +364,7 @@ static void *memory_data(struct dm_writecache *wc, struct wc_entry *e)
static sector_t cache_sector(struct dm_writecache *wc, struct wc_entry *e)
{
- return wc->metadata_sectors +
+ return wc->start_sector + wc->metadata_sectors +
((sector_t)e->index << (wc->block_size_bits - SECTOR_SHIFT));
}
@@ -471,6 +476,7 @@ static void ssd_commit_flushed(struct dm_writecache *wc)
if (unlikely(region.sector + region.count > wc->metadata_sectors))
region.count = wc->metadata_sectors - region.sector;
+ region.sector += wc->start_sector;
atomic_inc(&endio.count);
req.bi_op = REQ_OP_WRITE;
req.bi_op_flags = REQ_SYNC;
@@ -859,7 +865,7 @@ static int writecache_alloc_entries(struct dm_writecache *wc)
if (wc->entries)
return 0;
- wc->entries = vmalloc(sizeof(struct wc_entry) * wc->n_blocks);
+ wc->entries = vmalloc(array_size(sizeof(struct wc_entry), wc->n_blocks));
if (!wc->entries)
return -ENOMEM;
for (b = 0; b < wc->n_blocks; b++) {
@@ -1481,9 +1487,9 @@ static void __writecache_writeback_pmem(struct dm_writecache *wc, struct writeba
wb->bio.bi_iter.bi_sector = read_original_sector(wc, e);
wb->page_offset = PAGE_SIZE;
if (max_pages <= WB_LIST_INLINE ||
- unlikely(!(wb->wc_list = kmalloc(max_pages * sizeof(struct wc_entry *),
- GFP_NOIO | __GFP_NORETRY |
- __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
+ unlikely(!(wb->wc_list = kmalloc_array(max_pages, sizeof(struct wc_entry *),
+ GFP_NOIO | __GFP_NORETRY |
+ __GFP_NOMEMALLOC | __GFP_NOWARN)))) {
wb->wc_list = wb->wc_list_inline;
max_pages = WB_LIST_INLINE;
}
@@ -1946,14 +1952,6 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
wc->memory_map_size = i_size_read(wc->ssd_dev->bdev->bd_inode);
- if (WC_MODE_PMEM(wc)) {
- r = persistent_memory_claim(wc);
- if (r) {
- ti->error = "Unable to map persistent memory for cache";
- goto bad;
- }
- }
-
/*
* Parse the cache block size
*/
@@ -1982,7 +1980,16 @@ static int writecache_ctr(struct dm_target *ti, unsigned argc, char **argv)
while (opt_params) {
string = dm_shift_arg(&as), opt_params--;
- if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
+ if (!strcasecmp(string, "start_sector") && opt_params >= 1) {
+ unsigned long long start_sector;
+ string = dm_shift_arg(&as), opt_params--;
+ if (sscanf(string, "%llu%c", &start_sector, &dummy) != 1)
+ goto invalid_optional;
+ wc->start_sector = start_sector;
+ if (wc->start_sector != start_sector ||
+ wc->start_sector >= wc->memory_map_size >> SECTOR_SHIFT)
+ goto invalid_optional;
+ } else if (!strcasecmp(string, "high_watermark") && opt_params >= 1) {
string = dm_shift_arg(&as), opt_params--;
if (sscanf(string, "%d%c", &high_wm_percent, &dummy) != 1)
goto invalid_optional;
@@ -2039,12 +2046,20 @@ invalid_optional:
goto bad;
}
- if (!WC_MODE_PMEM(wc)) {
+ if (WC_MODE_PMEM(wc)) {
+ r = persistent_memory_claim(wc);
+ if (r) {
+ ti->error = "Unable to map persistent memory for cache";
+ goto bad;
+ }
+ } else {
struct dm_io_region region;
struct dm_io_request req;
size_t n_blocks, n_metadata_blocks;
uint64_t n_bitmap_bits;
+ wc->memory_map_size -= (uint64_t)wc->start_sector << SECTOR_SHIFT;
+
bio_list_init(&wc->flush_list);
wc->flush_thread = kthread_create(writecache_flush_thread, wc, "dm_writecache_flush");
if (IS_ERR(wc->flush_thread)) {
@@ -2097,7 +2112,7 @@ invalid_optional:
}
region.bdev = wc->ssd_dev->bdev;
- region.sector = 0;
+ region.sector = wc->start_sector;
region.count = wc->metadata_sectors;
req.bi_op = REQ_OP_READ;
req.bi_op_flags = REQ_SYNC;
@@ -2265,7 +2280,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
static struct target_type writecache_target = {
.name = "writecache",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.module = THIS_MODULE,
.ctr = writecache_ctr,
.dtr = writecache_dtr,
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 3c0e45f4dcf5..a44183ff4be0 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -787,7 +787,7 @@ static int dmz_ctr(struct dm_target *ti, unsigned int argc, char **argv)
/* Chunk BIO work */
mutex_init(&dmz->chunk_lock);
- INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_KERNEL);
+ INIT_RADIX_TREE(&dmz->chunk_rxtree, GFP_NOIO);
dmz->chunk_wq = alloc_workqueue("dmz_cwq_%s", WQ_MEM_RECLAIM | WQ_UNBOUND,
0, dev->name);
if (!dmz->chunk_wq) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index e65429a29c06..b0dd7027848b 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1056,8 +1056,7 @@ static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
if (len < 1)
goto out;
nr_pages = min(len, nr_pages);
- if (ti->type->direct_access)
- ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
+ ret = ti->type->direct_access(ti, pgoff, nr_pages, kaddr, pfn);
out:
dm_put_live_table(md, srcu_idx);
@@ -1606,10 +1605,9 @@ static blk_qc_t __split_and_process_bio(struct mapped_device *md,
* the usage of io->orig_bio in dm_remap_zone_report()
* won't be affected by this reassignment.
*/
- struct bio *b = bio_clone_bioset(bio, GFP_NOIO,
- &md->queue->bio_split);
+ struct bio *b = bio_split(bio, bio_sectors(bio) - ci.sector_count,
+ GFP_NOIO, &md->queue->bio_split);
ci.io->orig_bio = b;
- bio_advance(bio, (bio_sectors(bio) - ci.sector_count) << 9);
bio_chain(b, bio);
ret = generic_make_request(bio);
break;
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 29b0cd9ec951..994aed2f9dff 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5547,7 +5547,8 @@ int md_run(struct mddev *mddev)
else
pr_warn("md: personality for level %s is not loaded!\n",
mddev->clevel);
- return -EINVAL;
+ err = -EINVAL;
+ goto abort;
}
spin_unlock(&pers_lock);
if (mddev->level != pers->level) {
@@ -5560,7 +5561,8 @@ int md_run(struct mddev *mddev)
pers->start_reshape == NULL) {
/* This personality cannot handle reshaping... */
module_put(pers->owner);
- return -EINVAL;
+ err = -EINVAL;
+ goto abort;
}
if (pers->sync_request) {
@@ -5629,7 +5631,7 @@ int md_run(struct mddev *mddev)
mddev->private = NULL;
module_put(pers->owner);
bitmap_destroy(mddev);
- return err;
+ goto abort;
}
if (mddev->queue) {
bool nonrot = true;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 478cf446827f..35bd3a62451b 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3893,6 +3893,13 @@ static int raid10_run(struct mddev *mddev)
disk->rdev->saved_raid_disk < 0)
conf->fullsync = 1;
}
+
+ if (disk->replacement &&
+ !test_bit(In_sync, &disk->replacement->flags) &&
+ disk->replacement->saved_raid_disk < 0) {
+ conf->fullsync = 1;
+ }
+
disk->recovery_disabled = mddev->recovery_disabled - 1;
}
diff --git a/drivers/media/rc/bpf-lirc.c b/drivers/media/rc/bpf-lirc.c
index 40826bba06b6..fcfab6635f9c 100644
--- a/drivers/media/rc/bpf-lirc.c
+++ b/drivers/media/rc/bpf-lirc.c
@@ -207,29 +207,19 @@ void lirc_bpf_free(struct rc_dev *rcdev)
bpf_prog_array_free(rcdev->raw->progs);
}
-int lirc_prog_attach(const union bpf_attr *attr)
+int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog)
{
- struct bpf_prog *prog;
struct rc_dev *rcdev;
int ret;
if (attr->attach_flags)
return -EINVAL;
- prog = bpf_prog_get_type(attr->attach_bpf_fd,
- BPF_PROG_TYPE_LIRC_MODE2);
- if (IS_ERR(prog))
- return PTR_ERR(prog);
-
rcdev = rc_dev_get_from_fd(attr->target_fd);
- if (IS_ERR(rcdev)) {
- bpf_prog_put(prog);
+ if (IS_ERR(rcdev))
return PTR_ERR(rcdev);
- }
ret = lirc_bpf_attach(rcdev, prog);
- if (ret)
- bpf_prog_put(prog);
put_device(&rcdev->dev);
diff --git a/drivers/misc/cxl/api.c b/drivers/misc/cxl/api.c
index 753b1a698fc4..6b16946f9b05 100644
--- a/drivers/misc/cxl/api.c
+++ b/drivers/misc/cxl/api.c
@@ -103,15 +103,15 @@ static struct file *cxl_getfile(const char *name,
d_instantiate(path.dentry, inode);
file = alloc_file(&path, OPEN_FMODE(flags), fops);
- if (IS_ERR(file))
- goto err_dput;
+ if (IS_ERR(file)) {
+ path_put(&path);
+ goto err_fs;
+ }
file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
file->private_data = priv;
return file;
-err_dput:
- path_put(&path);
err_inode:
iput(inode);
err_fs:
diff --git a/drivers/misc/ibmasm/ibmasmfs.c b/drivers/misc/ibmasm/ibmasmfs.c
index e05c3245930a..fa840666bdd1 100644
--- a/drivers/misc/ibmasm/ibmasmfs.c
+++ b/drivers/misc/ibmasm/ibmasmfs.c
@@ -507,35 +507,14 @@ static int remote_settings_file_close(struct inode *inode, struct file *file)
static ssize_t remote_settings_file_read(struct file *file, char __user *buf, size_t count, loff_t *offset)
{
void __iomem *address = (void __iomem *)file->private_data;
- unsigned char *page;
- int retval;
int len = 0;
unsigned int value;
-
- if (*offset < 0)
- return -EINVAL;
- if (count == 0 || count > 1024)
- return 0;
- if (*offset != 0)
- return 0;
-
- page = (unsigned char *)__get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
+ char lbuf[20];
value = readl(address);
- len = sprintf(page, "%d\n", value);
-
- if (copy_to_user(buf, page, len)) {
- retval = -EFAULT;
- goto exit;
- }
- *offset += len;
- retval = len;
+ len = snprintf(lbuf, sizeof(lbuf), "%d\n", value);
-exit:
- free_page((unsigned long)page);
- return retval;
+ return simple_read_from_buffer(buf, count, offset, lbuf, len);
}
static ssize_t remote_settings_file_write(struct file *file, const char __user *ubuff, size_t count, loff_t *offset)
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index b0b8f18a85e3..6649f0d56d2f 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -310,8 +310,11 @@ int mei_irq_read_handler(struct mei_device *dev,
if (&cl->link == &dev->file_list) {
/* A message for not connected fixed address clients
* should be silently discarded
+ * On power down client may be force cleaned,
+ * silently discard such messages
*/
- if (hdr_is_fixed(mei_hdr)) {
+ if (hdr_is_fixed(mei_hdr) ||
+ dev->dev_state == MEI_DEV_POWER_DOWN) {
mei_irq_discard_msg(dev, mei_hdr);
ret = 0;
goto reset_slots;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index efd733472a35..56c6f79a5c5a 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -467,7 +467,7 @@ static int vmballoon_send_batched_lock(struct vmballoon *b,
unsigned int num_pages, bool is_2m_pages, unsigned int *target)
{
unsigned long status;
- unsigned long pfn = page_to_pfn(b->page);
+ unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
STATS_INC(b->stats.lock[is_2m_pages]);
@@ -515,7 +515,7 @@ static bool vmballoon_send_batched_unlock(struct vmballoon *b,
unsigned int num_pages, bool is_2m_pages, unsigned int *target)
{
unsigned long status;
- unsigned long pfn = page_to_pfn(b->page);
+ unsigned long pfn = PHYS_PFN(virt_to_phys(b->batch_page));
STATS_INC(b->stats.unlock[is_2m_pages]);
diff --git a/drivers/mmc/core/slot-gpio.c b/drivers/mmc/core/slot-gpio.c
index ef05e0039378..2a833686784b 100644
--- a/drivers/mmc/core/slot-gpio.c
+++ b/drivers/mmc/core/slot-gpio.c
@@ -27,8 +27,8 @@ struct mmc_gpio {
bool override_cd_active_level;
irqreturn_t (*cd_gpio_isr)(int irq, void *dev_id);
char *ro_label;
- char cd_label[0];
u32 cd_debounce_delay_ms;
+ char cd_label[];
};
static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 623f4d27fa01..80dc2fd6576c 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -1065,8 +1065,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
* It's used when HS400 mode is enabled.
*/
if (data->flags & MMC_DATA_WRITE &&
- !(host->timing != MMC_TIMING_MMC_HS400))
- return;
+ host->timing != MMC_TIMING_MMC_HS400)
+ goto disable;
if (data->flags & MMC_DATA_WRITE)
enable = SDMMC_CARD_WR_THR_EN;
@@ -1074,7 +1074,8 @@ static void dw_mci_ctrl_thld(struct dw_mci *host, struct mmc_data *data)
enable = SDMMC_CARD_RD_THR_EN;
if (host->timing != MMC_TIMING_MMC_HS200 &&
- host->timing != MMC_TIMING_UHS_SDR104)
+ host->timing != MMC_TIMING_UHS_SDR104 &&
+ host->timing != MMC_TIMING_MMC_HS400)
goto disable;
blksz_depth = blksz / (1 << host->data_shift);
diff --git a/drivers/mmc/host/renesas_sdhi_internal_dmac.c b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
index f7f9773d161f..d032bd63444d 100644
--- a/drivers/mmc/host/renesas_sdhi_internal_dmac.c
+++ b/drivers/mmc/host/renesas_sdhi_internal_dmac.c
@@ -139,8 +139,7 @@ renesas_sdhi_internal_dmac_abort_dma(struct tmio_mmc_host *host) {
renesas_sdhi_internal_dmac_dm_write(host, DM_CM_RST,
RST_RESERVED_BITS | val);
- if (host->data && host->data->flags & MMC_DATA_READ)
- clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
+ clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
renesas_sdhi_internal_dmac_enable_dma(host, true);
}
@@ -164,17 +163,14 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
goto force_pio;
/* This DMAC cannot handle if buffer is not 8-bytes alignment */
- if (!IS_ALIGNED(sg_dma_address(sg), 8)) {
- dma_unmap_sg(&host->pdev->dev, sg, host->sg_len,
- mmc_get_dma_dir(data));
- goto force_pio;
- }
+ if (!IS_ALIGNED(sg_dma_address(sg), 8))
+ goto force_pio_with_unmap;
if (data->flags & MMC_DATA_READ) {
dtran_mode |= DTRAN_MODE_CH_NUM_CH1;
if (test_bit(SDHI_INTERNAL_DMAC_ONE_RX_ONLY, &global_flags) &&
test_and_set_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags))
- goto force_pio;
+ goto force_pio_with_unmap;
} else {
dtran_mode |= DTRAN_MODE_CH_NUM_CH0;
}
@@ -189,6 +185,9 @@ renesas_sdhi_internal_dmac_start_dma(struct tmio_mmc_host *host,
return;
+force_pio_with_unmap:
+ dma_unmap_sg(&host->pdev->dev, sg, host->sg_len, mmc_get_dma_dir(data));
+
force_pio:
host->force_pio = true;
renesas_sdhi_internal_dmac_enable_dma(host, false);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index d6aef70d34fa..4eb3d29ecde1 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -312,6 +312,15 @@ static u32 esdhc_readl_le(struct sdhci_host *host, int reg)
if (imx_data->socdata->flags & ESDHC_FLAG_HS400)
val |= SDHCI_SUPPORT_HS400;
+
+ /*
+ * Do not advertise faster UHS modes if there are no
+ * pinctrl states for 100MHz/200MHz.
+ */
+ if (IS_ERR_OR_NULL(imx_data->pins_100mhz) ||
+ IS_ERR_OR_NULL(imx_data->pins_200mhz))
+ val &= ~(SDHCI_SUPPORT_SDR50 | SDHCI_SUPPORT_DDR50
+ | SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_HS400);
}
}
@@ -1158,18 +1167,6 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
ESDHC_PINCTRL_STATE_100MHZ);
imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
ESDHC_PINCTRL_STATE_200MHZ);
- if (IS_ERR(imx_data->pins_100mhz) ||
- IS_ERR(imx_data->pins_200mhz)) {
- dev_warn(mmc_dev(host->mmc),
- "could not get ultra high speed state, work on normal mode\n");
- /*
- * fall back to not supporting uhs by specifying no
- * 1.8v quirk
- */
- host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
- }
- } else {
- host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
}
/* call to generic mmc_of_parse to support additional capabilities */
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index e7472590f2ed..8e7f3e35ee3d 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1446,6 +1446,7 @@ static int sunxi_mmc_runtime_resume(struct device *dev)
sunxi_mmc_init_host(host);
sunxi_mmc_set_bus_width(host, mmc->ios.bus_width);
sunxi_mmc_set_clk(host, &mmc->ios);
+ enable_irq(host->irq);
return 0;
}
@@ -1455,6 +1456,12 @@ static int sunxi_mmc_runtime_suspend(struct device *dev)
struct mmc_host *mmc = dev_get_drvdata(dev);
struct sunxi_mmc_host *host = mmc_priv(mmc);
+ /*
+ * When clocks are off, it's possible receiving
+ * fake interrupts, which will stall the system.
+ * Disabling the irq will prevent this.
+ */
+ disable_irq(host->irq);
sunxi_mmc_reset_host(host);
sunxi_mmc_disable(host);
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index a0c655628d6d..1b64ac8c5bc8 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -2526,7 +2526,7 @@ static int cfi_atmel_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
struct ppb_lock {
struct flchip *chip;
- loff_t offset;
+ unsigned long adr;
int locked;
};
@@ -2544,8 +2544,9 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
unsigned long timeo;
int ret;
+ adr += chip->start;
mutex_lock(&chip->mutex);
- ret = get_chip(map, chip, adr + chip->start, FL_LOCKING);
+ ret = get_chip(map, chip, adr, FL_LOCKING);
if (ret) {
mutex_unlock(&chip->mutex);
return ret;
@@ -2563,8 +2564,8 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
if (thunk == DO_XXLOCK_ONEBLOCK_LOCK) {
chip->state = FL_LOCKING;
- map_write(map, CMD(0xA0), chip->start + adr);
- map_write(map, CMD(0x00), chip->start + adr);
+ map_write(map, CMD(0xA0), adr);
+ map_write(map, CMD(0x00), adr);
} else if (thunk == DO_XXLOCK_ONEBLOCK_UNLOCK) {
/*
* Unlocking of one specific sector is not supported, so we
@@ -2602,7 +2603,7 @@ static int __maybe_unused do_ppb_xxlock(struct map_info *map,
map_write(map, CMD(0x00), chip->start);
chip->state = FL_READY;
- put_chip(map, chip, adr + chip->start);
+ put_chip(map, chip, adr);
mutex_unlock(&chip->mutex);
return ret;
@@ -2659,9 +2660,9 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
* sectors shall be unlocked, so lets keep their locking
* status at "unlocked" (locked=0) for the final re-locking.
*/
- if ((adr < ofs) || (adr >= (ofs + len))) {
+ if ((offset < ofs) || (offset >= (ofs + len))) {
sect[sectors].chip = &cfi->chips[chipnum];
- sect[sectors].offset = offset;
+ sect[sectors].adr = adr;
sect[sectors].locked = do_ppb_xxlock(
map, &cfi->chips[chipnum], adr, 0,
DO_XXLOCK_ONEBLOCK_GETLOCK);
@@ -2675,6 +2676,8 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
i++;
if (adr >> cfi->chipshift) {
+ if (offset >= (ofs + len))
+ break;
adr = 0;
chipnum++;
@@ -2705,7 +2708,7 @@ static int __maybe_unused cfi_ppb_unlock(struct mtd_info *mtd, loff_t ofs,
*/
for (i = 0; i < sectors; i++) {
if (sect[i].locked)
- do_ppb_xxlock(map, sect[i].chip, sect[i].offset, 0,
+ do_ppb_xxlock(map, sect[i].chip, sect[i].adr, 0,
DO_XXLOCK_ONEBLOCK_LOCK);
}
diff --git a/drivers/mtd/devices/mtd_dataflash.c b/drivers/mtd/devices/mtd_dataflash.c
index 3a6f450d1093..53febe8a68c3 100644
--- a/drivers/mtd/devices/mtd_dataflash.c
+++ b/drivers/mtd/devices/mtd_dataflash.c
@@ -733,8 +733,8 @@ static struct flash_info dataflash_data[] = {
{ "AT45DB642x", 0x1f2800, 8192, 1056, 11, SUP_POW2PS},
{ "at45db642d", 0x1f2800, 8192, 1024, 10, SUP_POW2PS | IS_POW2PS},
- { "AT45DB641E", 0x1f28000100, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
- { "at45db641e", 0x1f28000100, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
+ { "AT45DB641E", 0x1f28000100ULL, 32768, 264, 9, SUP_EXTID | SUP_POW2PS},
+ { "at45db641e", 0x1f28000100ULL, 32768, 256, 8, SUP_EXTID | SUP_POW2PS | IS_POW2PS},
};
static struct flash_info *jedec_lookup(struct spi_device *spi,
diff --git a/drivers/mtd/nand/raw/denali_dt.c b/drivers/mtd/nand/raw/denali_dt.c
index cfd33e6ca77f..5869e90cc14b 100644
--- a/drivers/mtd/nand/raw/denali_dt.c
+++ b/drivers/mtd/nand/raw/denali_dt.c
@@ -123,7 +123,11 @@ static int denali_dt_probe(struct platform_device *pdev)
if (ret)
return ret;
- denali->clk_x_rate = clk_get_rate(dt->clk);
+ /*
+ * Hardcode the clock rate for the backward compatibility.
+ * This works for both SOCFPGA and UniPhier.
+ */
+ denali->clk_x_rate = 200000000;
ret = denali_init(denali);
if (ret)
diff --git a/drivers/mtd/nand/raw/mxc_nand.c b/drivers/mtd/nand/raw/mxc_nand.c
index 45786e707b7b..26cef218bb43 100644
--- a/drivers/mtd/nand/raw/mxc_nand.c
+++ b/drivers/mtd/nand/raw/mxc_nand.c
@@ -48,7 +48,7 @@
#define NFC_V1_V2_CONFIG (host->regs + 0x0a)
#define NFC_V1_V2_ECC_STATUS_RESULT (host->regs + 0x0c)
#define NFC_V1_V2_RSLTMAIN_AREA (host->regs + 0x0e)
-#define NFC_V1_V2_RSLTSPARE_AREA (host->regs + 0x10)
+#define NFC_V21_RSLTSPARE_AREA (host->regs + 0x10)
#define NFC_V1_V2_WRPROT (host->regs + 0x12)
#define NFC_V1_UNLOCKSTART_BLKADDR (host->regs + 0x14)
#define NFC_V1_UNLOCKEND_BLKADDR (host->regs + 0x16)
@@ -1274,6 +1274,9 @@ static void preset_v2(struct mtd_info *mtd)
writew(config1, NFC_V1_V2_CONFIG1);
/* preset operation */
+ /* spare area size in 16-bit half-words */
+ writew(mtd->oobsize / 2, NFC_V21_RSLTSPARE_AREA);
+
/* Unlock the internal RAM Buffer */
writew(0x2, NFC_V1_V2_CONFIG);
diff --git a/drivers/mtd/nand/raw/nand_base.c b/drivers/mtd/nand/raw/nand_base.c
index 10c4f9919850..b01d15ec4c56 100644
--- a/drivers/mtd/nand/raw/nand_base.c
+++ b/drivers/mtd/nand/raw/nand_base.c
@@ -440,7 +440,7 @@ static int nand_block_bad(struct mtd_info *mtd, loff_t ofs)
for (; page < page_end; page++) {
res = chip->ecc.read_oob(mtd, chip, page);
- if (res)
+ if (res < 0)
return res;
bad = chip->oob_poi[chip->badblockpos];
diff --git a/drivers/mtd/nand/raw/nand_macronix.c b/drivers/mtd/nand/raw/nand_macronix.c
index 7ed1f87e742a..49c546c97c6f 100644
--- a/drivers/mtd/nand/raw/nand_macronix.c
+++ b/drivers/mtd/nand/raw/nand_macronix.c
@@ -17,23 +17,47 @@
#include <linux/mtd/rawnand.h>
+/*
+ * Macronix AC series does not support using SET/GET_FEATURES to change
+ * the timings unlike what is declared in the parameter page. Unflag
+ * this feature to avoid unnecessary downturns.
+ */
+static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
+{
+ unsigned int i;
+ static const char * const broken_get_timings[] = {
+ "MX30LF1G18AC",
+ "MX30LF1G28AC",
+ "MX30LF2G18AC",
+ "MX30LF2G28AC",
+ "MX30LF4G18AC",
+ "MX30LF4G28AC",
+ "MX60LF8G18AC",
+ };
+
+ if (!chip->parameters.supports_set_get_features)
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(broken_get_timings); i++) {
+ if (!strcmp(broken_get_timings[i], chip->parameters.model))
+ break;
+ }
+
+ if (i == ARRAY_SIZE(broken_get_timings))
+ return;
+
+ bitmap_clear(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+ bitmap_clear(chip->parameters.set_feature_list,
+ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+}
+
static int macronix_nand_init(struct nand_chip *chip)
{
if (nand_is_slc(chip))
chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
- /*
- * MX30LF2G18AC chip does not support using SET/GET_FEATURES to change
- * the timings unlike what is declared in the parameter page. Unflag
- * this feature to avoid unnecessary downturns.
- */
- if (chip->parameters.supports_set_get_features &&
- !strcmp("MX30LF2G18AC", chip->parameters.model)) {
- bitmap_clear(chip->parameters.get_feature_list,
- ONFI_FEATURE_ADDR_TIMING_MODE, 1);
- bitmap_clear(chip->parameters.set_feature_list,
- ONFI_FEATURE_ADDR_TIMING_MODE, 1);
- }
+ macronix_nand_fix_broken_get_timings(chip);
return 0;
}
diff --git a/drivers/mtd/nand/raw/nand_micron.c b/drivers/mtd/nand/raw/nand_micron.c
index 0af45b134c0c..5ec4c90a637d 100644
--- a/drivers/mtd/nand/raw/nand_micron.c
+++ b/drivers/mtd/nand/raw/nand_micron.c
@@ -66,7 +66,9 @@ static int micron_nand_onfi_init(struct nand_chip *chip)
if (p->supports_set_get_features) {
set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->set_feature_list);
+ set_bit(ONFI_FEATURE_ON_DIE_ECC, p->set_feature_list);
set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->get_feature_list);
+ set_bit(ONFI_FEATURE_ON_DIE_ECC, p->get_feature_list);
}
return 0;
diff --git a/drivers/mtd/spi-nor/cadence-quadspi.c b/drivers/mtd/spi-nor/cadence-quadspi.c
index c3f7aaa5d18f..d7e10b36a0b9 100644
--- a/drivers/mtd/spi-nor/cadence-quadspi.c
+++ b/drivers/mtd/spi-nor/cadence-quadspi.c
@@ -926,10 +926,12 @@ static ssize_t cqspi_write(struct spi_nor *nor, loff_t to,
if (ret)
return ret;
- if (f_pdata->use_direct_mode)
+ if (f_pdata->use_direct_mode) {
memcpy_toio(cqspi->ahb_base + to, buf, len);
- else
+ ret = cqspi_wait_idle(cqspi);
+ } else {
ret = cqspi_indirect_write_execute(nor, to, buf, len);
+ }
if (ret)
return ret;
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 98663c50ded0..4d5d01cb8141 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -743,15 +743,20 @@ const struct bond_option *bond_opt_get(unsigned int option)
static int bond_option_mode_set(struct bonding *bond,
const struct bond_opt_value *newval)
{
- if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
- netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
- newval->string);
- /* disable arp monitoring */
- bond->params.arp_interval = 0;
- /* set miimon to default value */
- bond->params.miimon = BOND_DEFAULT_MIIMON;
- netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
- bond->params.miimon);
+ if (!bond_mode_uses_arp(newval->value)) {
+ if (bond->params.arp_interval) {
+ netdev_dbg(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
+ newval->string);
+ /* disable arp monitoring */
+ bond->params.arp_interval = 0;
+ }
+
+ if (!bond->params.miimon) {
+ /* set miimon to default value */
+ bond->params.miimon = BOND_DEFAULT_MIIMON;
+ netdev_dbg(bond->dev, "Setting MII monitoring interval to %d\n",
+ bond->params.miimon);
+ }
}
if (newval->value == BOND_MODE_ALB)
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index b397a33f3d32..9b449400376b 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -634,10 +634,12 @@ static int m_can_clk_start(struct m_can_priv *priv)
int err;
err = pm_runtime_get_sync(priv->device);
- if (err)
+ if (err < 0) {
pm_runtime_put_noidle(priv->device);
+ return err;
+ }
- return err;
+ return 0;
}
static void m_can_clk_stop(struct m_can_priv *priv)
@@ -1109,7 +1111,8 @@ static void m_can_chip_config(struct net_device *dev)
} else {
/* Version 3.1.x or 3.2.x */
- cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE);
+ cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
+ CCCR_NISO);
/* Only 3.2.x has NISO Bit implemented */
if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
@@ -1642,8 +1645,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
priv->can.clock.freq = clk_get_rate(cclk);
priv->mram_base = mram_addr;
- m_can_of_parse_mram(priv, mram_config_vals);
-
platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -1666,6 +1667,8 @@ static int m_can_plat_probe(struct platform_device *pdev)
goto clk_disable;
}
+ m_can_of_parse_mram(priv, mram_config_vals);
+
devm_can_led_init(dev);
of_can_transceiver(dev);
@@ -1687,8 +1690,6 @@ failed_ret:
return ret;
}
-/* TODO: runtime PM with power down or sleep mode */
-
static __maybe_unused int m_can_suspend(struct device *dev)
{
struct net_device *ndev = dev_get_drvdata(dev);
@@ -1715,8 +1716,6 @@ static __maybe_unused int m_can_resume(struct device *dev)
pinctrl_pm_select_default_state(dev);
- m_can_init_ram(priv);
-
priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(ndev)) {
@@ -1726,6 +1725,7 @@ static __maybe_unused int m_can_resume(struct device *dev)
if (ret)
return ret;
+ m_can_init_ram(priv);
m_can_start(ndev);
netif_device_attach(ndev);
netif_start_queue(ndev);
diff --git a/drivers/net/can/mscan/mpc5xxx_can.c b/drivers/net/can/mscan/mpc5xxx_can.c
index c7427bdd3a4b..2949a381a94d 100644
--- a/drivers/net/can/mscan/mpc5xxx_can.c
+++ b/drivers/net/can/mscan/mpc5xxx_can.c
@@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
return 0;
}
cdm = of_iomap(np_cdm, 0);
+ if (!cdm) {
+ of_node_put(np_cdm);
+ dev_err(&ofdev->dev, "can't map clock node!\n");
+ return 0;
+ }
if (in_8(&cdm->ipb_clk_sel) & 0x1)
freq *= 2;
diff --git a/drivers/net/can/peak_canfd/peak_pciefd_main.c b/drivers/net/can/peak_canfd/peak_pciefd_main.c
index b9e28578bc7b..455a3797a200 100644
--- a/drivers/net/can/peak_canfd/peak_pciefd_main.c
+++ b/drivers/net/can/peak_canfd/peak_pciefd_main.c
@@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2");
#define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */
#define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */
+#define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \
+ ((u32)(y) << 16) | \
+ ((u32)(z) << 8))
+
/* System Control Registers Bits */
#define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */
#define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */
@@ -782,6 +786,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev,
"%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count,
hw_ver_major, hw_ver_minor, hw_ver_sub);
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+ /* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and
+ * 64-bit logical addresses: this workaround forces usage of 32-bit
+ * DMA addresses only when such a fw is detected.
+ */
+ if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) <
+ PCIEFD_FW_VERSION(3, 3, 0)) {
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (err)
+ dev_warn(&pdev->dev,
+ "warning: can't set DMA mask %llxh (err %d)\n",
+ DMA_BIT_MASK(32), err);
+ }
+#endif
+
/* stop system clock */
pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
PCIEFD_REG_SYS_CTL_CLR);
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 89aec07c225f..5a24039733ef 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -2,6 +2,7 @@
*
* Copyright (C) 2012 - 2014 Xilinx, Inc.
* Copyright (C) 2009 PetaLogix. All rights reserved.
+ * Copyright (C) 2017 Sandvik Mining and Construction Oy
*
* Description:
* This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
@@ -25,8 +26,10 @@
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/skbuff.h>
+#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/can/dev.h>
@@ -101,7 +104,7 @@ enum xcan_reg {
#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
- XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK)
+ XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK)
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
@@ -118,6 +121,7 @@ enum xcan_reg {
/**
* struct xcan_priv - This definition define CAN driver instance
* @can: CAN private data structure.
+ * @tx_lock: Lock for synchronizing TX interrupt handling
* @tx_head: Tx CAN packets ready to send on the queue
* @tx_tail: Tx CAN packets successfully sended on the queue
* @tx_max: Maximum number packets the driver can send
@@ -132,6 +136,7 @@ enum xcan_reg {
*/
struct xcan_priv {
struct can_priv can;
+ spinlock_t tx_lock;
unsigned int tx_head;
unsigned int tx_tail;
unsigned int tx_max;
@@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = {
.brp_inc = 1,
};
+#define XCAN_CAP_WATERMARK 0x0001
+struct xcan_devtype_data {
+ unsigned int caps;
+};
+
/**
* xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure
@@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev)
usleep_range(500, 10000);
}
+ /* reset clears FIFOs */
+ priv->tx_head = 0;
+ priv->tx_tail = 0;
+
return 0;
}
@@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf = (struct can_frame *)skb->data;
u32 id, dlc, data[2] = {0, 0};
+ unsigned long flags;
if (can_dropped_invalid_skb(ndev, skb))
return NETDEV_TX_OK;
@@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
+
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
priv->tx_head++;
/* Write the Frame to Xilinx CAN TX FIFO */
@@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
stats->tx_bytes += cf->can_dlc;
}
+ /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
+ if (priv->tx_max > 1)
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
+
/* Check if the TX buffer is full */
if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
netif_stop_queue(ndev);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
return NETDEV_TX_OK;
}
@@ -530,6 +554,123 @@ static int xcan_rx(struct net_device *ndev)
}
/**
+ * xcan_current_error_state - Get current error state from HW
+ * @ndev: Pointer to net_device structure
+ *
+ * Checks the current CAN error state from the HW. Note that this
+ * only checks for ERROR_PASSIVE and ERROR_WARNING.
+ *
+ * Return:
+ * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
+ * otherwise.
+ */
+static enum can_state xcan_current_error_state(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
+
+ if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
+ return CAN_STATE_ERROR_PASSIVE;
+ else if (status & XCAN_SR_ERRWRN_MASK)
+ return CAN_STATE_ERROR_WARNING;
+ else
+ return CAN_STATE_ERROR_ACTIVE;
+}
+
+/**
+ * xcan_set_error_state - Set new CAN error state
+ * @ndev: Pointer to net_device structure
+ * @new_state: The new CAN state to be set
+ * @cf: Error frame to be populated or NULL
+ *
+ * Set new CAN error state for the device, updating statistics and
+ * populating the error frame if given.
+ */
+static void xcan_set_error_state(struct net_device *ndev,
+ enum can_state new_state,
+ struct can_frame *cf)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
+ u32 txerr = ecr & XCAN_ECR_TEC_MASK;
+ u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
+
+ priv->can.state = new_state;
+
+ if (cf) {
+ cf->can_id |= CAN_ERR_CRTL;
+ cf->data[6] = txerr;
+ cf->data[7] = rxerr;
+ }
+
+ switch (new_state) {
+ case CAN_STATE_ERROR_PASSIVE:
+ priv->can.can_stats.error_passive++;
+ if (cf)
+ cf->data[1] = (rxerr > 127) ?
+ CAN_ERR_CRTL_RX_PASSIVE :
+ CAN_ERR_CRTL_TX_PASSIVE;
+ break;
+ case CAN_STATE_ERROR_WARNING:
+ priv->can.can_stats.error_warning++;
+ if (cf)
+ cf->data[1] |= (txerr > rxerr) ?
+ CAN_ERR_CRTL_TX_WARNING :
+ CAN_ERR_CRTL_RX_WARNING;
+ break;
+ case CAN_STATE_ERROR_ACTIVE:
+ if (cf)
+ cf->data[1] |= CAN_ERR_CRTL_ACTIVE;
+ break;
+ default:
+ /* non-ERROR states are handled elsewhere */
+ WARN_ON(1);
+ break;
+ }
+}
+
+/**
+ * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
+ * @ndev: Pointer to net_device structure
+ *
+ * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
+ * the performed RX/TX has caused it to drop to a lesser state and set
+ * the interface state accordingly.
+ */
+static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
+{
+ struct xcan_priv *priv = netdev_priv(ndev);
+ enum can_state old_state = priv->can.state;
+ enum can_state new_state;
+
+ /* changing error state due to successful frame RX/TX can only
+ * occur from these states
+ */
+ if (old_state != CAN_STATE_ERROR_WARNING &&
+ old_state != CAN_STATE_ERROR_PASSIVE)
+ return;
+
+ new_state = xcan_current_error_state(ndev);
+
+ if (new_state != old_state) {
+ struct sk_buff *skb;
+ struct can_frame *cf;
+
+ skb = alloc_can_err_skb(ndev, &cf);
+
+ xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
+
+ if (skb) {
+ struct net_device_stats *stats = &ndev->stats;
+
+ stats->rx_packets++;
+ stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
+ }
+ }
+}
+
+/**
* xcan_err_interrupt - error frame Isr
* @ndev: net_device pointer
* @isr: interrupt status register value
@@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf;
struct sk_buff *skb;
- u32 err_status, status, txerr = 0, rxerr = 0;
+ u32 err_status;
skb = alloc_can_err_skb(ndev, &cf);
err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
- txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
- rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
- XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
- status = priv->read_reg(priv, XCAN_SR_OFFSET);
if (isr & XCAN_IXR_BSOFF_MASK) {
priv->can.state = CAN_STATE_BUS_OFF;
@@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
can_bus_off(ndev);
if (skb)
cf->can_id |= CAN_ERR_BUSOFF;
- } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) {
- priv->can.state = CAN_STATE_ERROR_PASSIVE;
- priv->can.can_stats.error_passive++;
- if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] = (rxerr > 127) ?
- CAN_ERR_CRTL_RX_PASSIVE :
- CAN_ERR_CRTL_TX_PASSIVE;
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
- } else if (status & XCAN_SR_ERRWRN_MASK) {
- priv->can.state = CAN_STATE_ERROR_WARNING;
- priv->can.can_stats.error_warning++;
- if (skb) {
- cf->can_id |= CAN_ERR_CRTL;
- cf->data[1] |= (txerr > rxerr) ?
- CAN_ERR_CRTL_TX_WARNING :
- CAN_ERR_CRTL_RX_WARNING;
- cf->data[6] = txerr;
- cf->data[7] = rxerr;
- }
+ } else {
+ enum can_state new_state = xcan_current_error_state(ndev);
+
+ xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
}
/* Check for Arbitration lost interrupt */
@@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
if (isr & XCAN_IXR_RXOFLW_MASK) {
stats->rx_over_errors++;
stats->rx_errors++;
- priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
if (skb) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
@@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
- if (isr & XCAN_IXR_RXOK_MASK) {
- priv->write_reg(priv, XCAN_ICR_OFFSET,
- XCAN_IXR_RXOK_MASK);
- work_done += xcan_rx(ndev);
- } else {
- priv->write_reg(priv, XCAN_ICR_OFFSET,
- XCAN_IXR_RXNEMP_MASK);
- break;
- }
+ work_done += xcan_rx(ndev);
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
}
- if (work_done)
+ if (work_done) {
can_led_event(ndev, CAN_LED_EVENT_RX);
+ xcan_update_error_state_after_rxtx(ndev);
+ }
if (work_done < quota) {
napi_complete_done(napi, work_done);
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
- ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
+ ier |= XCAN_IXR_RXNEMP_MASK;
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
}
return work_done;
@@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
{
struct xcan_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats;
+ unsigned int frames_in_fifo;
+ int frames_sent = 1; /* TXOK => at least 1 frame was sent */
+ unsigned long flags;
+ int retries = 0;
+
+ /* Synchronize with xmit as we need to know the exact number
+ * of frames in the FIFO to stay in sync due to the TXFEMP
+ * handling.
+ * This also prevents a race between netif_wake_queue() and
+ * netif_stop_queue().
+ */
+ spin_lock_irqsave(&priv->tx_lock, flags);
+
+ frames_in_fifo = priv->tx_head - priv->tx_tail;
+
+ if (WARN_ON_ONCE(frames_in_fifo == 0)) {
+ /* clear TXOK anyway to avoid getting back here */
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+ return;
+ }
+
+ /* Check if 2 frames were sent (TXOK only means that at least 1
+ * frame was sent).
+ */
+ if (frames_in_fifo > 1) {
+ WARN_ON(frames_in_fifo > priv->tx_max);
+
+ /* Synchronize TXOK and isr so that after the loop:
+ * (1) isr variable is up-to-date at least up to TXOK clear
+ * time. This avoids us clearing a TXOK of a second frame
+ * but not noticing that the FIFO is now empty and thus
+ * marking only a single frame as sent.
+ * (2) No TXOK is left. Having one could mean leaving a
+ * stray TXOK as we might process the associated frame
+ * via TXFEMP handling as we read TXFEMP *after* TXOK
+ * clear to satisfy (1).
+ */
+ while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+ isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
+ }
- while ((priv->tx_head - priv->tx_tail > 0) &&
- (isr & XCAN_IXR_TXOK_MASK)) {
+ if (isr & XCAN_IXR_TXFEMP_MASK) {
+ /* nothing in FIFO anymore */
+ frames_sent = frames_in_fifo;
+ }
+ } else {
+ /* single frame in fifo, just clear TXOK */
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
+ }
+
+ while (frames_sent--) {
can_get_echo_skb(ndev, priv->tx_tail %
priv->tx_max);
priv->tx_tail++;
stats->tx_packets++;
- isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
}
- can_led_event(ndev, CAN_LED_EVENT_TX);
+
netif_wake_queue(ndev);
+
+ spin_unlock_irqrestore(&priv->tx_lock, flags);
+
+ can_led_event(ndev, CAN_LED_EVENT_TX);
+ xcan_update_error_state_after_rxtx(ndev);
}
/**
@@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
struct net_device *ndev = (struct net_device *)dev_id;
struct xcan_priv *priv = netdev_priv(ndev);
u32 isr, ier;
+ u32 isr_errors;
/* Get the interrupt status from Xilinx CAN */
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
@@ -791,18 +957,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
xcan_tx_interrupt(ndev, isr);
/* Check for the type of error interrupt and Processing it */
- if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
- XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) {
- priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK |
- XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK |
- XCAN_IXR_ARBLST_MASK));
+ isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
+ XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK);
+ if (isr_errors) {
+ priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
xcan_err_interrupt(ndev, isr);
}
/* Check for the type of receive interrupt and Processing it */
- if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) {
+ if (isr & XCAN_IXR_RXNEMP_MASK) {
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
- ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK);
+ ier &= ~XCAN_IXR_RXNEMP_MASK;
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
napi_schedule(&priv->napi);
}
@@ -819,13 +984,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
static void xcan_chip_stop(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
- u32 ier;
/* Disable interrupts and leave the can in configuration mode */
- ier = priv->read_reg(priv, XCAN_IER_OFFSET);
- ier &= ~XCAN_INTR_ALL;
- priv->write_reg(priv, XCAN_IER_OFFSET, ier);
- priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
+ set_reset_mode(ndev);
priv->can.state = CAN_STATE_STOPPED;
}
@@ -958,10 +1119,15 @@ static const struct net_device_ops xcan_netdev_ops = {
*/
static int __maybe_unused xcan_suspend(struct device *dev)
{
- if (!device_may_wakeup(dev))
- return pm_runtime_force_suspend(dev);
+ struct net_device *ndev = dev_get_drvdata(dev);
- return 0;
+ if (netif_running(ndev)) {
+ netif_stop_queue(ndev);
+ netif_device_detach(ndev);
+ xcan_chip_stop(ndev);
+ }
+
+ return pm_runtime_force_suspend(dev);
}
/**
@@ -973,11 +1139,27 @@ static int __maybe_unused xcan_suspend(struct device *dev)
*/
static int __maybe_unused xcan_resume(struct device *dev)
{
- if (!device_may_wakeup(dev))
- return pm_runtime_force_resume(dev);
+ struct net_device *ndev = dev_get_drvdata(dev);
+ int ret;
- return 0;
+ ret = pm_runtime_force_resume(dev);
+ if (ret) {
+ dev_err(dev, "pm_runtime_force_resume failed on resume\n");
+ return ret;
+ }
+
+ if (netif_running(ndev)) {
+ ret = xcan_chip_start(ndev);
+ if (ret) {
+ dev_err(dev, "xcan_chip_start failed on resume\n");
+ return ret;
+ }
+
+ netif_device_attach(ndev);
+ netif_start_queue(ndev);
+ }
+ return 0;
}
/**
@@ -992,14 +1174,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct xcan_priv *priv = netdev_priv(ndev);
- if (netif_running(ndev)) {
- netif_stop_queue(ndev);
- netif_device_detach(ndev);
- }
-
- priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
- priv->can.state = CAN_STATE_SLEEPING;
-
clk_disable_unprepare(priv->bus_clk);
clk_disable_unprepare(priv->can_clk);
@@ -1018,7 +1192,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev);
struct xcan_priv *priv = netdev_priv(ndev);
int ret;
- u32 isr, status;
ret = clk_prepare_enable(priv->bus_clk);
if (ret) {
@@ -1032,27 +1205,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
return ret;
}
- priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
- isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
- status = priv->read_reg(priv, XCAN_SR_OFFSET);
-
- if (netif_running(ndev)) {
- if (isr & XCAN_IXR_BSOFF_MASK) {
- priv->can.state = CAN_STATE_BUS_OFF;
- priv->write_reg(priv, XCAN_SRR_OFFSET,
- XCAN_SRR_RESET_MASK);
- } else if ((status & XCAN_SR_ESTAT_MASK) ==
- XCAN_SR_ESTAT_MASK) {
- priv->can.state = CAN_STATE_ERROR_PASSIVE;
- } else if (status & XCAN_SR_ERRWRN_MASK) {
- priv->can.state = CAN_STATE_ERROR_WARNING;
- } else {
- priv->can.state = CAN_STATE_ERROR_ACTIVE;
- }
- netif_device_attach(ndev);
- netif_start_queue(ndev);
- }
-
return 0;
}
@@ -1061,6 +1213,18 @@ static const struct dev_pm_ops xcan_dev_pm_ops = {
SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
};
+static const struct xcan_devtype_data xcan_zynq_data = {
+ .caps = XCAN_CAP_WATERMARK,
+};
+
+/* Match table for OF platform binding */
+static const struct of_device_id xcan_of_match[] = {
+ { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
+ { .compatible = "xlnx,axi-can-1.00.a", },
+ { /* end of list */ },
+};
+MODULE_DEVICE_TABLE(of, xcan_of_match);
+
/**
* xcan_probe - Platform registration call
* @pdev: Handle to the platform device structure
@@ -1075,8 +1239,10 @@ static int xcan_probe(struct platform_device *pdev)
struct resource *res; /* IO mem resources */
struct net_device *ndev;
struct xcan_priv *priv;
+ const struct of_device_id *of_id;
+ int caps = 0;
void __iomem *addr;
- int ret, rx_max, tx_max;
+ int ret, rx_max, tx_max, tx_fifo_depth;
/* Get the virtual base address for the device */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1086,7 +1252,8 @@ static int xcan_probe(struct platform_device *pdev)
goto err;
}
- ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max);
+ ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
+ &tx_fifo_depth);
if (ret < 0)
goto err;
@@ -1094,6 +1261,30 @@ static int xcan_probe(struct platform_device *pdev)
if (ret < 0)
goto err;
+ of_id = of_match_device(xcan_of_match, &pdev->dev);
+ if (of_id) {
+ const struct xcan_devtype_data *devtype_data = of_id->data;
+
+ if (devtype_data)
+ caps = devtype_data->caps;
+ }
+
+ /* There is no way to directly figure out how many frames have been
+ * sent when the TXOK interrupt is processed. If watermark programming
+ * is supported, we can have 2 frames in the FIFO and use TXFEMP
+ * to determine if 1 or 2 frames have been sent.
+ * Theoretically we should be able to use TXFWMEMP to determine up
+ * to 3 frames, but it seems that after putting a second frame in the
+ * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
+ * than 2 frames in FIFO) is set anyway with no TXOK (a frame was
+ * sent), which is not a sensible state - possibly TXFWMEMP is not
+ * completely synchronized with the rest of the bits?
+ */
+ if (caps & XCAN_CAP_WATERMARK)
+ tx_max = min(tx_fifo_depth, 2);
+ else
+ tx_max = 1;
+
/* Create a CAN device instance */
ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
if (!ndev)
@@ -1108,6 +1299,7 @@ static int xcan_probe(struct platform_device *pdev)
CAN_CTRLMODE_BERR_REPORTING;
priv->reg_base = addr;
priv->tx_max = tx_max;
+ spin_lock_init(&priv->tx_lock);
/* Get IRQ for the device */
ndev->irq = platform_get_irq(pdev, 0);
@@ -1172,9 +1364,9 @@ static int xcan_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev);
- netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n",
+ netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n",
priv->reg_base, ndev->irq, priv->can.clock.freq,
- priv->tx_max);
+ tx_fifo_depth, priv->tx_max);
return 0;
@@ -1208,14 +1400,6 @@ static int xcan_remove(struct platform_device *pdev)
return 0;
}
-/* Match table for OF platform binding */
-static const struct of_device_id xcan_of_match[] = {
- { .compatible = "xlnx,zynq-can-1.0", },
- { .compatible = "xlnx,axi-can-1.00.a", },
- { /* end of list */ },
-};
-MODULE_DEVICE_TABLE(of, xcan_of_match);
-
static struct platform_driver xcan_driver = {
.probe = xcan_probe,
.remove = xcan_remove,
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 437cd6eb4faa..9ef07a06aceb 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -343,6 +343,7 @@ static const struct irq_domain_ops mv88e6xxx_g1_irq_domain_ops = {
.xlate = irq_domain_xlate_twocell,
};
+/* To be called with reg_lock held */
static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
{
int irq, virq;
@@ -362,9 +363,15 @@ static void mv88e6xxx_g1_irq_free_common(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_g1_irq_free(struct mv88e6xxx_chip *chip)
{
- mv88e6xxx_g1_irq_free_common(chip);
-
+ /*
+ * free_irq must be called without reg_lock taken because the irq
+ * handler takes this lock, too.
+ */
free_irq(chip->irq, chip);
+
+ mutex_lock(&chip->reg_lock);
+ mv88e6xxx_g1_irq_free_common(chip);
+ mutex_unlock(&chip->reg_lock);
}
static int mv88e6xxx_g1_irq_setup_common(struct mv88e6xxx_chip *chip)
@@ -469,10 +476,12 @@ static int mv88e6xxx_irq_poll_setup(struct mv88e6xxx_chip *chip)
static void mv88e6xxx_irq_poll_free(struct mv88e6xxx_chip *chip)
{
- mv88e6xxx_g1_irq_free_common(chip);
-
kthread_cancel_delayed_work_sync(&chip->irq_poll_work);
kthread_destroy_worker(chip->kworker);
+
+ mutex_lock(&chip->reg_lock);
+ mv88e6xxx_g1_irq_free_common(chip);
+ mutex_unlock(&chip->reg_lock);
}
int mv88e6xxx_wait(struct mv88e6xxx_chip *chip, int addr, int reg, u16 mask)
@@ -4506,12 +4515,10 @@ out_g2_irq:
if (chip->info->g2_irqs > 0)
mv88e6xxx_g2_irq_free(chip);
out_g1_irq:
- mutex_lock(&chip->reg_lock);
if (chip->irq > 0)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
- mutex_unlock(&chip->reg_lock);
out:
if (pdata)
dev_put(pdata->netdev);
@@ -4539,12 +4546,10 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
if (chip->info->g2_irqs > 0)
mv88e6xxx_g2_irq_free(chip);
- mutex_lock(&chip->reg_lock);
if (chip->irq > 0)
mv88e6xxx_g1_irq_free(chip);
else
mv88e6xxx_irq_poll_free(chip);
- mutex_unlock(&chip->reg_lock);
}
static const struct of_device_id mv88e6xxx_of_match[] = {
diff --git a/drivers/net/ethernet/3com/Kconfig b/drivers/net/ethernet/3com/Kconfig
index 5b7658bcf020..5c3ef9fc8207 100644
--- a/drivers/net/ethernet/3com/Kconfig
+++ b/drivers/net/ethernet/3com/Kconfig
@@ -32,7 +32,7 @@ config EL3
config 3C515
tristate "3c515 ISA \"Fast EtherLink\""
- depends on ISA && ISA_DMA_API
+ depends on ISA && ISA_DMA_API && !PPC32
---help---
If you have a 3Com ISA EtherLink XL "Corkscrew" 3c515 Fast Ethernet
network card, say Y here.
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index d5c15e8bb3de..9e5cf5583c87 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -44,7 +44,7 @@ config AMD8111_ETH
config LANCE
tristate "AMD LANCE and PCnet (AT1500 and NE2100) support"
- depends on ISA && ISA_DMA_API && !ARM
+ depends on ISA && ISA_DMA_API && !ARM && !PPC32
---help---
If you have a network (Ethernet) card of this type, say Y here.
Some LinkSys cards are of this type.
@@ -138,7 +138,7 @@ config PCMCIA_NMCLAN
config NI65
tristate "NI6510 support"
- depends on ISA && ISA_DMA_API && !ARM
+ depends on ISA && ISA_DMA_API && !ARM && !PPC32
---help---
If you have a network (Ethernet) card of this type, say Y here.
@@ -173,7 +173,7 @@ config SUNLANCE
config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
- depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM && HAS_DMA
+ depends on ((OF_NET && OF_ADDRESS) || ACPI || PCI) && HAS_IOMEM
depends on X86 || ARM64 || COMPILE_TEST
select BITREVERSE
select CRC32
diff --git a/drivers/net/ethernet/apm/xgene-v2/Kconfig b/drivers/net/ethernet/apm/xgene-v2/Kconfig
index 1205861b6318..eedd3f3dd22e 100644
--- a/drivers/net/ethernet/apm/xgene-v2/Kconfig
+++ b/drivers/net/ethernet/apm/xgene-v2/Kconfig
@@ -1,6 +1,5 @@
config NET_XGENE_V2
tristate "APM X-Gene SoC Ethernet-v2 Driver"
- depends on HAS_DMA
depends on ARCH_XGENE || COMPILE_TEST
help
This is the Ethernet driver for the on-chip ethernet interface
diff --git a/drivers/net/ethernet/apm/xgene/Kconfig b/drivers/net/ethernet/apm/xgene/Kconfig
index afccb033177b..e4e33c900b57 100644
--- a/drivers/net/ethernet/apm/xgene/Kconfig
+++ b/drivers/net/ethernet/apm/xgene/Kconfig
@@ -1,6 +1,5 @@
config NET_XGENE
tristate "APM X-Gene SoC Ethernet Driver"
- depends on HAS_DMA
depends on ARCH_XGENE || COMPILE_TEST
select PHYLIB
select MDIO_XGENE
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
index fc7383106946..91eb8910b1c9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -63,8 +63,6 @@
#define AQ_CFG_NAPI_WEIGHT 64U
-#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U
-
/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
#define AQ_NIC_FC_OFF 0U
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index a2d416b24ffc..2c6ebd91a9f2 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -98,6 +98,8 @@ struct aq_stats_s {
#define AQ_HW_MEDIA_TYPE_TP 1U
#define AQ_HW_MEDIA_TYPE_FIBRE 2U
+#define AQ_HW_MULTICAST_ADDRESS_MAX 32U
+
struct aq_hw_s {
atomic_t flags;
u8 rbl_enabled:1;
@@ -177,7 +179,7 @@ struct aq_hw_ops {
unsigned int packet_filter);
int (*hw_multicast_list_set)(struct aq_hw_s *self,
- u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX]
+ u8 ar_mac[AQ_HW_MULTICAST_ADDRESS_MAX]
[ETH_ALEN],
u32 count);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
index ba5fe8c4125d..e3ae29e523f0 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_main.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -135,17 +135,10 @@ err_exit:
static void aq_ndev_set_multicast_settings(struct net_device *ndev)
{
struct aq_nic_s *aq_nic = netdev_priv(ndev);
- int err = 0;
- err = aq_nic_set_packet_filter(aq_nic, ndev->flags);
- if (err < 0)
- return;
+ aq_nic_set_packet_filter(aq_nic, ndev->flags);
- if (netdev_mc_count(ndev)) {
- err = aq_nic_set_multicast_list(aq_nic, ndev);
- if (err < 0)
- return;
- }
+ aq_nic_set_multicast_list(aq_nic, ndev);
}
static const struct net_device_ops aq_ndev_ops = {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index 1a1a6380c128..7a22d0257e04 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -563,34 +563,41 @@ err_exit:
int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
{
+ unsigned int packet_filter = self->packet_filter;
struct netdev_hw_addr *ha = NULL;
unsigned int i = 0U;
- self->mc_list.count = 0U;
-
- netdev_for_each_mc_addr(ha, ndev) {
- ether_addr_copy(self->mc_list.ar[i++], ha->addr);
- ++self->mc_list.count;
+ self->mc_list.count = 0;
+ if (netdev_uc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+ packet_filter |= IFF_PROMISC;
+ } else {
+ netdev_for_each_uc_addr(ha, ndev) {
+ ether_addr_copy(self->mc_list.ar[i++], ha->addr);
- if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX)
- break;
+ if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
+ break;
+ }
}
- if (i >= AQ_CFG_MULTICAST_ADDRESS_MAX) {
- /* Number of filters is too big: atlantic does not support this.
- * Force all multi filter to support this.
- * With this we disable all UC filters and setup "all pass"
- * multicast mask
- */
- self->packet_filter |= IFF_ALLMULTI;
- self->aq_nic_cfg.mc_list_count = 0;
- return self->aq_hw_ops->hw_packet_filter_set(self->aq_hw,
- self->packet_filter);
+ if (i + netdev_mc_count(ndev) > AQ_HW_MULTICAST_ADDRESS_MAX) {
+ packet_filter |= IFF_ALLMULTI;
} else {
- return self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
- self->mc_list.ar,
- self->mc_list.count);
+ netdev_for_each_mc_addr(ha, ndev) {
+ ether_addr_copy(self->mc_list.ar[i++], ha->addr);
+
+ if (i >= AQ_HW_MULTICAST_ADDRESS_MAX)
+ break;
+ }
+ }
+
+ if (i > 0 && i < AQ_HW_MULTICAST_ADDRESS_MAX) {
+ packet_filter |= IFF_MULTICAST;
+ self->mc_list.count = i;
+ self->aq_hw_ops->hw_multicast_list_set(self->aq_hw,
+ self->mc_list.ar,
+ self->mc_list.count);
}
+ return aq_nic_set_packet_filter(self, packet_filter);
}
int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
index faa533a0ec47..fecfc401f95d 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -75,7 +75,7 @@ struct aq_nic_s {
struct aq_hw_link_status_s link_status;
struct {
u32 count;
- u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
+ u8 ar[AQ_HW_MULTICAST_ADDRESS_MAX][ETH_ALEN];
} mc_list;
struct pci_dev *pdev;
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
index 67e2f9fb9402..8cc6abadc03b 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -765,7 +765,7 @@ static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
u8 ar_mac
- [AQ_CFG_MULTICAST_ADDRESS_MAX]
+ [AQ_HW_MULTICAST_ADDRESS_MAX]
[ETH_ALEN],
u32 count)
{
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index 819f6bcf9b4e..956860a69797 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -784,7 +784,7 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
u8 ar_mac
- [AQ_CFG_MULTICAST_ADDRESS_MAX]
+ [AQ_HW_MULTICAST_ADDRESS_MAX]
[ETH_ALEN],
u32 count)
{
@@ -812,7 +812,7 @@ static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
hw_atl_rpfl2_uc_flr_en_set(self,
(self->aq_nic_cfg->is_mc_list_enabled),
- HW_ATL_B0_MAC_MIN + i);
+ HW_ATL_B0_MAC_MIN + i);
}
err = aq_hw_err_from_flags(self);
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index e743ddf46343..5d0ab8e74b68 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -24,7 +24,8 @@ config ARC_EMAC_CORE
config ARC_EMAC
tristate "ARC EMAC support"
select ARC_EMAC_CORE
- depends on OF_IRQ && OF_NET && HAS_DMA && (ARC || COMPILE_TEST)
+ depends on OF_IRQ && OF_NET
+ depends on ARC || COMPILE_TEST
---help---
On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
non-standard on-chip ethernet device ARC EMAC 10/100 is used.
@@ -33,7 +34,8 @@ config ARC_EMAC
config EMAC_ROCKCHIP
tristate "Rockchip EMAC support"
select ARC_EMAC_CORE
- depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA && (ARCH_ROCKCHIP || COMPILE_TEST)
+ depends on OF_IRQ && OF_NET && REGULATOR
+ depends on ARCH_ROCKCHIP || COMPILE_TEST
---help---
Support for Rockchip RK3036/RK3066/RK3188 EMAC ethernet controllers.
This selects Rockchip SoC glue layer support for the
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 567ee54504bc..5e5022fa1d04 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1897,13 +1897,19 @@ static int alx_resume(struct device *dev)
struct pci_dev *pdev = to_pci_dev(dev);
struct alx_priv *alx = pci_get_drvdata(pdev);
struct alx_hw *hw = &alx->hw;
+ int err;
alx_reset_phy(hw);
if (!netif_running(alx->dev))
return 0;
netif_device_attach(alx->dev);
- return __alx_open(alx, true);
+
+ rtnl_lock();
+ err = __alx_open(alx, true);
+ rtnl_unlock();
+
+ return err;
}
static SIMPLE_DEV_PM_OPS(alx_pm_ops, alx_suspend, alx_resume);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 94270f654b3b..7087b88550db 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1686,6 +1686,7 @@ static struct sk_buff *atl1c_alloc_skb(struct atl1c_adapter *adapter)
skb = build_skb(page_address(page) + adapter->rx_page_offset,
adapter->rx_frag_size);
if (likely(skb)) {
+ skb_reserve(skb, NET_SKB_PAD);
adapter->rx_page_offset += adapter->rx_frag_size;
if (adapter->rx_page_offset >= PAGE_SIZE)
adapter->rx_page = NULL;
diff --git a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig
index af75156919ed..4c3bfde6e8de 100644
--- a/drivers/net/ethernet/broadcom/Kconfig
+++ b/drivers/net/ethernet/broadcom/Kconfig
@@ -157,7 +157,6 @@ config BGMAC
config BGMAC_BCMA
tristate "Broadcom iProc GBit BCMA support"
depends on BCMA && BCMA_HOST_SOC
- depends on HAS_DMA
depends on BCM47XX || ARCH_BCM_5301X || COMPILE_TEST
select BGMAC
select PHYLIB
@@ -170,7 +169,6 @@ config BGMAC_BCMA
config BGMAC_PLATFORM
tristate "Broadcom iProc GBit platform support"
- depends on HAS_DMA
depends on ARCH_BCM_IPROC || COMPILE_TEST
depends on OF
select BGMAC
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index d5fca2e5a9bc..a1f60f89e059 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1946,8 +1946,8 @@ static int bcm_sysport_open(struct net_device *dev)
if (!priv->is_lite)
priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
else
- priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) &
- GIB_FCS_STRIP);
+ priv->crc_fwd = !((gib_readl(priv, GIB_CONTROL) &
+ GIB_FCS_STRIP) >> GIB_FCS_STRIP_SHIFT);
phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
0, priv->phy_interface);
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index d6e5d0cbf3a3..cf440b91fd04 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -278,7 +278,8 @@ struct bcm_rsb {
#define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT)
#define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT)
#define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT)
-#define GIB_FCS_STRIP (1 << 6)
+#define GIB_FCS_STRIP_SHIFT 6
+#define GIB_FCS_STRIP (1 << GIB_FCS_STRIP_SHIFT)
#define GIB_LCL_LOOP_EN (1 << 7)
#define GIB_LCL_LOOP_TXEN (1 << 8)
#define GIB_RMT_LOOP_EN (1 << 9)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index d847e1b9c37b..be1506169076 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1533,6 +1533,7 @@ struct bnx2x {
struct link_vars link_vars;
u32 link_cnt;
struct bnx2x_link_report_data last_reported_link;
+ bool force_link_down;
struct mdio_if_info mdio;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 8cd73ff5debc..af7b5a4d8ba0 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1261,6 +1261,11 @@ void __bnx2x_link_report(struct bnx2x *bp)
{
struct bnx2x_link_report_data cur_data;
+ if (bp->force_link_down) {
+ bp->link_vars.link_up = 0;
+ return;
+ }
+
/* reread mf_cfg */
if (IS_PF(bp) && !CHIP_IS_E1(bp))
bnx2x_read_mf_cfg(bp);
@@ -2817,6 +2822,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
bp->pending_max = 0;
}
+ bp->force_link_down = false;
if (bp->port.pmf) {
rc = bnx2x_initial_phy_init(bp, load_mode);
if (rc)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index da18aa239acb..a4a90b6cdb46 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -3388,14 +3388,18 @@ static int bnx2x_set_rss_flags(struct bnx2x *bp, struct ethtool_rxnfc *info)
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+ if (bp->state == BNX2X_STATE_OPEN)
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+ true);
} else if ((info->flow_type == UDP_V6_FLOW) &&
(bp->rss_conf_obj.udp_rss_v6 != udp_rss_requested)) {
bp->rss_conf_obj.udp_rss_v6 = udp_rss_requested;
DP(BNX2X_MSG_ETHTOOL,
"rss re-configured, UDP 4-tupple %s\n",
udp_rss_requested ? "enabled" : "disabled");
- return bnx2x_rss(bp, &bp->rss_conf_obj, false, true);
+ if (bp->state == BNX2X_STATE_OPEN)
+ return bnx2x_rss(bp, &bp->rss_conf_obj, false,
+ true);
}
return 0;
@@ -3509,7 +3513,10 @@ static int bnx2x_set_rxfh(struct net_device *dev, const u32 *indir,
bp->rss_conf_obj.ind_table[i] = indir[i] + bp->fp->cl_id;
}
- return bnx2x_config_rss_eth(bp, false);
+ if (bp->state == BNX2X_STATE_OPEN)
+ return bnx2x_config_rss_eth(bp, false);
+
+ return 0;
}
/**
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 5b1ed240bf18..57348f2b49a3 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -10279,6 +10279,12 @@ static void bnx2x_sp_rtnl_task(struct work_struct *work)
bp->sp_rtnl_state = 0;
smp_mb();
+ /* Immediately indicate link as down */
+ bp->link_vars.link_up = 0;
+ bp->force_link_down = true;
+ netif_carrier_off(bp->dev);
+ BNX2X_ERR("Indicating link is down due to Tx-timeout\n");
+
bnx2x_nic_unload(bp, UNLOAD_NORMAL, true);
/* When ret value shows failure of allocation failure,
* the nic is rebooted again. If open still fails, a error
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 176fc9f4d7de..4394c1162be4 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -5712,7 +5712,9 @@ static int bnxt_init_chip(struct bnxt *bp, bool irq_re_init)
}
vnic->uc_filter_count = 1;
- vnic->rx_mask = CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
+ vnic->rx_mask = 0;
+ if (bp->dev->flags & IFF_BROADCAST)
+ vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
if ((bp->dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
vnic->rx_mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
@@ -5917,7 +5919,7 @@ unsigned int bnxt_get_max_func_irqs(struct bnxt *bp)
return min_t(unsigned int, hw_resc->max_irqs, hw_resc->max_cp_rings);
}
-void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
+static void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max_irqs)
{
bp->hw_resc.max_irqs = max_irqs;
}
@@ -6888,7 +6890,7 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
rc = bnxt_request_irq(bp);
if (rc) {
netdev_err(bp->dev, "bnxt_request_irq err: %x\n", rc);
- goto open_err;
+ goto open_err_irq;
}
}
@@ -6928,6 +6930,8 @@ static int __bnxt_open_nic(struct bnxt *bp, bool irq_re_init, bool link_re_init)
open_err:
bnxt_debug_dev_exit(bp);
bnxt_disable_napi(bp);
+
+open_err_irq:
bnxt_del_napi(bp);
open_err_free_mem:
@@ -7214,13 +7218,16 @@ static void bnxt_set_rx_mode(struct net_device *dev)
mask &= ~(CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS |
CFA_L2_SET_RX_MASK_REQ_MASK_MCAST |
- CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST);
+ CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST |
+ CFA_L2_SET_RX_MASK_REQ_MASK_BCAST);
if ((dev->flags & IFF_PROMISC) && bnxt_promisc_ok(bp))
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_PROMISCUOUS;
uc_update = bnxt_uc_list_updated(bp);
+ if (dev->flags & IFF_BROADCAST)
+ mask |= CFA_L2_SET_RX_MASK_REQ_MASK_BCAST;
if (dev->flags & IFF_ALLMULTI) {
mask |= CFA_L2_SET_RX_MASK_REQ_MASK_ALL_MCAST;
vnic->mc_list_count = 0;
@@ -8502,11 +8509,11 @@ int bnxt_get_max_rings(struct bnxt *bp, int *max_rx, int *max_tx, bool shared)
int rx, tx, cp;
_bnxt_get_max_rings(bp, &rx, &tx, &cp);
+ *max_rx = rx;
+ *max_tx = tx;
if (!rx || !tx || !cp)
return -ENOMEM;
- *max_rx = rx;
- *max_tx = tx;
return bnxt_trim_rings(bp, max_rx, max_tx, cp, shared);
}
@@ -8520,8 +8527,11 @@ static int bnxt_get_dflt_rings(struct bnxt *bp, int *max_rx, int *max_tx,
/* Not enough rings, try disabling agg rings. */
bp->flags &= ~BNXT_FLAG_AGG_RINGS;
rc = bnxt_get_max_rings(bp, max_rx, max_tx, shared);
- if (rc)
+ if (rc) {
+ /* set BNXT_FLAG_AGG_RINGS back for consistency */
+ bp->flags |= BNXT_FLAG_AGG_RINGS;
return rc;
+ }
bp->flags |= BNXT_FLAG_NO_AGG_RINGS;
bp->dev->hw_features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
bp->dev->features &= ~(NETIF_F_LRO | NETIF_F_GRO_HW);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 9b14eb610b9f..91575ef97c8c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -1470,7 +1470,6 @@ void bnxt_set_max_func_stat_ctxs(struct bnxt *bp, unsigned int max);
unsigned int bnxt_get_max_func_cp_rings(struct bnxt *bp);
void bnxt_set_max_func_cp_rings(struct bnxt *bp, unsigned int max);
unsigned int bnxt_get_max_func_irqs(struct bnxt *bp);
-void bnxt_set_max_func_irqs(struct bnxt *bp, unsigned int max);
int bnxt_get_avail_msix(struct bnxt *bp, int num);
int bnxt_reserve_rings(struct bnxt *bp);
void bnxt_tx_disable(struct bnxt *bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 795f45024c20..491bd40a254d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -27,6 +27,15 @@
#define BNXT_FID_INVALID 0xffff
#define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT))
+#define is_vlan_pcp_wildcarded(vlan_tci_mask) \
+ ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == 0x0000)
+#define is_vlan_pcp_exactmatch(vlan_tci_mask) \
+ ((ntohs(vlan_tci_mask) & VLAN_PRIO_MASK) == VLAN_PRIO_MASK)
+#define is_vlan_pcp_zero(vlan_tci) \
+ ((ntohs(vlan_tci) & VLAN_PRIO_MASK) == 0x0000)
+#define is_vid_exactmatch(vlan_tci_mask) \
+ ((ntohs(vlan_tci_mask) & VLAN_VID_MASK) == VLAN_VID_MASK)
+
/* Return the dst fid of the func for flow forwarding
* For PFs: src_fid is the fid of the PF
* For VF-reps: src_fid the fid of the VF
@@ -389,6 +398,21 @@ static bool is_exactmatch(void *mask, int len)
return true;
}
+static bool is_vlan_tci_allowed(__be16 vlan_tci_mask,
+ __be16 vlan_tci)
+{
+ /* VLAN priority must be either exactly zero or fully wildcarded and
+ * VLAN id must be exact match.
+ */
+ if (is_vid_exactmatch(vlan_tci_mask) &&
+ ((is_vlan_pcp_exactmatch(vlan_tci_mask) &&
+ is_vlan_pcp_zero(vlan_tci)) ||
+ is_vlan_pcp_wildcarded(vlan_tci_mask)))
+ return true;
+
+ return false;
+}
+
static bool bits_set(void *key, int len)
{
const u8 *p = key;
@@ -803,9 +827,9 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
/* Currently VLAN fields cannot be partial wildcard */
if (bits_set(&flow->l2_key.inner_vlan_tci,
sizeof(flow->l2_key.inner_vlan_tci)) &&
- !is_exactmatch(&flow->l2_mask.inner_vlan_tci,
- sizeof(flow->l2_mask.inner_vlan_tci))) {
- netdev_info(bp->dev, "Wildcard match unsupported for VLAN TCI\n");
+ !is_vlan_tci_allowed(flow->l2_mask.inner_vlan_tci,
+ flow->l2_key.inner_vlan_tci)) {
+ netdev_info(bp->dev, "Unsupported VLAN TCI\n");
return false;
}
if (bits_set(&flow->l2_key.inner_vlan_tpid,
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
index 347e4f946eb2..840f6e505f73 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ulp.c
@@ -169,7 +169,6 @@ static int bnxt_req_msix_vecs(struct bnxt_en_dev *edev, int ulp_id,
edev->ulp_tbl[ulp_id].msix_requested = avail_msix;
}
bnxt_fill_msix_vecs(bp, ent);
- bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) - avail_msix);
bnxt_set_max_func_cp_rings(bp, max_cp_rings - avail_msix);
edev->flags |= BNXT_EN_FLAG_MSIX_REQUESTED;
return avail_msix;
@@ -192,7 +191,6 @@ static int bnxt_free_msix_vecs(struct bnxt_en_dev *edev, int ulp_id)
msix_requested = edev->ulp_tbl[ulp_id].msix_requested;
bnxt_set_max_func_cp_rings(bp, max_cp_rings + msix_requested);
edev->ulp_tbl[ulp_id].msix_requested = 0;
- bnxt_set_max_func_irqs(bp, bnxt_get_max_func_irqs(bp) + msix_requested);
edev->flags &= ~BNXT_EN_FLAG_MSIX_REQUESTED;
if (netif_running(dev)) {
bnxt_close_nic(bp, true, false);
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 30273a7717e2..4fd829b5e65d 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -660,7 +660,7 @@ static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id,
id_tbl->max = size;
id_tbl->next = next;
spin_lock_init(&id_tbl->lock);
- id_tbl->table = kcalloc(DIV_ROUND_UP(size, 32), 4, GFP_KERNEL);
+ id_tbl->table = kcalloc(BITS_TO_LONGS(size), sizeof(long), GFP_KERNEL);
if (!id_tbl->table)
return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 3be87efdc93d..aa1374d0af93 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -6,11 +6,15 @@
* Copyright (C) 2004 Sun Microsystems Inc.
* Copyright (C) 2005-2016 Broadcom Corporation.
* Copyright (C) 2016-2017 Broadcom Limited.
+ * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
+ * refers to Broadcom Inc. and/or its subsidiaries.
*
* Firmware is:
* Derived from proprietary unpublished source code,
* Copyright (C) 2000-2016 Broadcom Corporation.
* Copyright (C) 2016-2017 Broadcom Ltd.
+ * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
+ * refers to Broadcom Inc. and/or its subsidiaries.
*
* Permission is hereby granted for the distribution of this firmware
* data in hexadecimal or equivalent format, provided this copyright
@@ -9290,6 +9294,15 @@ static int tg3_chip_reset(struct tg3 *tp)
tg3_restore_clk(tp);
+ /* Increase the core clock speed to fix tx timeout issue for 5762
+ * with 100Mbps link speed.
+ */
+ if (tg3_asic_rev(tp) == ASIC_REV_5762) {
+ val = tr32(TG3_CPMU_CLCK_ORIDE_ENABLE);
+ tw32(TG3_CPMU_CLCK_ORIDE_ENABLE, val |
+ TG3_CPMU_MAC_ORIDE_ENABLE);
+ }
+
/* Reprobe ASF enable state. */
tg3_flag_clear(tp, ENABLE_ASF);
tp->phy_flags &= ~(TG3_PHYFLG_1G_ON_VAUX_OK |
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index 1d61aa3efda1..a772a33b685c 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -7,6 +7,8 @@
* Copyright (C) 2004 Sun Microsystems Inc.
* Copyright (C) 2007-2016 Broadcom Corporation.
* Copyright (C) 2016-2017 Broadcom Limited.
+ * Copyright (C) 2018 Broadcom. All Rights Reserved. The term "Broadcom"
+ * refers to Broadcom Inc. and/or its subsidiaries.
*/
#ifndef _T3_H
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 86659823b259..3d45f4c92cf6 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -166,6 +166,7 @@
#define GEM_DCFG6 0x0294 /* Design Config 6 */
#define GEM_DCFG7 0x0298 /* Design Config 7 */
#define GEM_DCFG8 0x029C /* Design Config 8 */
+#define GEM_DCFG10 0x02A4 /* Design Config 10 */
#define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */
#define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */
@@ -490,6 +491,12 @@
#define GEM_SCR2CMP_OFFSET 0
#define GEM_SCR2CMP_SIZE 8
+/* Bitfields in DCFG10 */
+#define GEM_TXBD_RDBUFF_OFFSET 12
+#define GEM_TXBD_RDBUFF_SIZE 4
+#define GEM_RXBD_RDBUFF_OFFSET 8
+#define GEM_RXBD_RDBUFF_SIZE 4
+
/* Bitfields in TISUBN */
#define GEM_SUBNSINCR_OFFSET 0
#define GEM_SUBNSINCR_SIZE 16
@@ -635,6 +642,7 @@
#define MACB_CAPS_USRIO_DISABLED 0x00000010
#define MACB_CAPS_JUMBO 0x00000020
#define MACB_CAPS_GEM_HAS_PTP 0x00000040
+#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
@@ -1203,6 +1211,9 @@ struct macb {
unsigned int max_tuples;
struct tasklet_struct hresp_err_tasklet;
+
+ int rx_bd_rd_prefetch;
+ int tx_bd_rd_prefetch;
};
#ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 3e93df5d4e3b..a6c911bb5ce2 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -1811,23 +1811,25 @@ static void macb_free_consistent(struct macb *bp)
{
struct macb_queue *queue;
unsigned int q;
+ int size;
- queue = &bp->queues[0];
bp->macbgem_ops.mog_free_rx_buffers(bp);
- if (queue->rx_ring) {
- dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
- queue->rx_ring, queue->rx_ring_dma);
- queue->rx_ring = NULL;
- }
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
kfree(queue->tx_skb);
queue->tx_skb = NULL;
if (queue->tx_ring) {
- dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp),
+ size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
+ dma_free_coherent(&bp->pdev->dev, size,
queue->tx_ring, queue->tx_ring_dma);
queue->tx_ring = NULL;
}
+ if (queue->rx_ring) {
+ size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
+ dma_free_coherent(&bp->pdev->dev, size,
+ queue->rx_ring, queue->rx_ring_dma);
+ queue->rx_ring = NULL;
+ }
}
}
@@ -1874,7 +1876,7 @@ static int macb_alloc_consistent(struct macb *bp)
int size;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- size = TX_RING_BYTES(bp);
+ size = TX_RING_BYTES(bp) + bp->tx_bd_rd_prefetch;
queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
&queue->tx_ring_dma,
GFP_KERNEL);
@@ -1890,7 +1892,7 @@ static int macb_alloc_consistent(struct macb *bp)
if (!queue->tx_skb)
goto out_err;
- size = RX_RING_BYTES(bp);
+ size = RX_RING_BYTES(bp) + bp->rx_bd_rd_prefetch;
queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
&queue->rx_ring_dma, GFP_KERNEL);
if (!queue->rx_ring)
@@ -3726,6 +3728,8 @@ static int at91ether_init(struct platform_device *pdev)
int err;
u32 reg;
+ bp->queues[0].bp = bp;
+
dev->netdev_ops = &at91ether_netdev_ops;
dev->ethtool_ops = &macb_ethtool_ops;
@@ -3795,7 +3799,7 @@ static const struct macb_config np4_config = {
static const struct macb_config zynqmp_config = {
.caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
MACB_CAPS_JUMBO |
- MACB_CAPS_GEM_HAS_PTP,
+ MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -3856,7 +3860,7 @@ static int macb_probe(struct platform_device *pdev)
void __iomem *mem;
const char *mac;
struct macb *bp;
- int err;
+ int err, val;
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mem = devm_ioremap_resource(&pdev->dev, regs);
@@ -3945,6 +3949,18 @@ static int macb_probe(struct platform_device *pdev)
else
dev->max_mtu = ETH_DATA_LEN;
+ if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
+ val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
+ if (val)
+ bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
+ macb_dma_desc_get_size(bp);
+
+ val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
+ if (val)
+ bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
+ macb_dma_desc_get_size(bp);
+ }
+
mac = of_get_mac_address(np);
if (mac) {
ether_addr_copy(bp->dev->dev_addr, mac);
diff --git a/drivers/net/ethernet/cadence/macb_ptp.c b/drivers/net/ethernet/cadence/macb_ptp.c
index 2220c771092b..678835136bf8 100644
--- a/drivers/net/ethernet/cadence/macb_ptp.c
+++ b/drivers/net/ethernet/cadence/macb_ptp.c
@@ -170,10 +170,7 @@ static int gem_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
if (delta > TSU_NSEC_MAX_VAL) {
gem_tsu_get_time(&bp->ptp_clock_info, &now);
- if (sign)
- now = timespec64_sub(now, then);
- else
- now = timespec64_add(now, then);
+ now = timespec64_add(now, then);
gem_tsu_set_time(&bp->ptp_clock_info,
(const struct timespec64 *)&now);
diff --git a/drivers/net/ethernet/calxeda/Kconfig b/drivers/net/ethernet/calxeda/Kconfig
index 07d2201530d2..9fdd496b90ff 100644
--- a/drivers/net/ethernet/calxeda/Kconfig
+++ b/drivers/net/ethernet/calxeda/Kconfig
@@ -1,6 +1,6 @@
config NET_CALXEDA_XGMAC
tristate "Calxeda 1G/10G XGMAC Ethernet driver"
- depends on HAS_IOMEM && HAS_DMA
+ depends on HAS_IOMEM
depends on ARCH_HIGHBANK || COMPILE_TEST
select CRC32
help
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index 043e3c11c42b..92d88c5f76fb 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -15,7 +15,7 @@ if NET_VENDOR_CAVIUM
config THUNDER_NIC_PF
tristate "Thunder Physical function driver"
- depends on 64BIT
+ depends on 64BIT && PCI
select THUNDER_NIC_BGX
---help---
This driver supports Thunder's NIC physical function.
@@ -28,13 +28,13 @@ config THUNDER_NIC_PF
config THUNDER_NIC_VF
tristate "Thunder Virtual function driver"
imply CAVIUM_PTP
- depends on 64BIT
+ depends on 64BIT && PCI
---help---
This driver supports Thunder's NIC virtual function
config THUNDER_NIC_BGX
tristate "Thunder MAC interface driver (BGX)"
- depends on 64BIT
+ depends on 64BIT && PCI
select PHYLIB
select MDIO_THUNDER
select THUNDER_NIC_RGX
@@ -44,7 +44,7 @@ config THUNDER_NIC_BGX
config THUNDER_NIC_RGX
tristate "Thunder MAC interface driver (RGX)"
- depends on 64BIT
+ depends on 64BIT && PCI
select PHYLIB
select MDIO_THUNDER
---help---
@@ -53,7 +53,7 @@ config THUNDER_NIC_RGX
config CAVIUM_PTP
tristate "Cavium PTP coprocessor as PTP clock"
- depends on 64BIT
+ depends on 64BIT && PCI
imply PTP_1588_CLOCK
default y
---help---
@@ -65,7 +65,7 @@ config CAVIUM_PTP
config LIQUIDIO
tristate "Cavium LiquidIO support"
- depends on 64BIT
+ depends on 64BIT && PCI
depends on MAY_USE_DEVLINK
imply PTP_1588_CLOCK
select FW_LOADER
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 8a815bb57177..7e8454d3b1ad 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -91,6 +91,9 @@ static int octeon_console_debug_enabled(u32 console)
*/
#define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
+/* time to wait for possible in-flight requests in milliseconds */
+#define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
+
struct lio_trusted_vf_ctx {
struct completion complete;
int status;
@@ -259,7 +262,7 @@ static inline void pcierror_quiesce_device(struct octeon_device *oct)
force_io_queues_off(oct);
/* To allow for in-flight requests */
- schedule_timeout_uninterruptible(100);
+ schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
if (wait_for_pending_requests(oct))
dev_err(&oct->pci_dev->dev, "There were pending requests\n");
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 3f6afb54a5eb..bb43ddb7539e 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -643,13 +643,21 @@ static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
{
struct octeon_mgmt *p = netdev_priv(netdev);
- int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM;
+ int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
netdev->mtu = new_mtu;
- cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs);
+ /* HW lifts the limit if the frame is VLAN tagged
+ * (+4 bytes per each tag, up to two tags)
+ */
+ cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet);
+ /* Set the hardware to truncate packets larger than the MTU. The jabber
+ * register must be set to a multiple of 8 bytes, so round up. JABBER is
+ * an unconditional limit, so we need to account for two possible VLAN
+ * tags.
+ */
cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
- (size_without_fcs + 7) & 0xfff8);
+ (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8);
return 0;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
index 7b795edd9d3a..a19172dbe6be 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
@@ -51,6 +51,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/uaccess.h>
+#include <linux/nospec.h>
#include "common.h"
#include "cxgb3_ioctl.h"
@@ -2268,6 +2269,7 @@ static int cxgb_extension_ioctl(struct net_device *dev, void __user *useraddr)
if (t.qset_idx >= nqsets)
return -EINVAL;
+ t.qset_idx = array_index_nospec(t.qset_idx, nqsets);
q = &adapter->params.sge.qset[q1 + t.qset_idx];
t.rspq_size = q->rspq_size;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index dd04a2f89ce6..bc03c175a3cd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -263,7 +263,7 @@ static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
"Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
enable ? "set" : "unset", pi->port_id, i, -err);
else
- txq->dcb_prio = value;
+ txq->dcb_prio = enable ? value : 0;
}
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 974a868a4824..3720c3e11ebb 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -8702,7 +8702,7 @@ static int t4_get_flash_params(struct adapter *adap)
};
unsigned int part, manufacturer;
- unsigned int density, size;
+ unsigned int density, size = 0;
u32 flashid = 0;
int ret;
@@ -8772,11 +8772,6 @@ static int t4_get_flash_params(struct adapter *adap)
case 0x22: /* 256MB */
size = 1 << 28;
break;
-
- default:
- dev_err(adap->pdev_dev, "Micron Flash Part has bad size, ID = %#x, Density code = %#x\n",
- flashid, density);
- return -EINVAL;
}
break;
}
@@ -8792,10 +8787,6 @@ static int t4_get_flash_params(struct adapter *adap)
case 0x17: /* 64MB */
size = 1 << 26;
break;
- default:
- dev_err(adap->pdev_dev, "ISSI Flash Part has bad size, ID = %#x, Density code = %#x\n",
- flashid, density);
- return -EINVAL;
}
break;
}
@@ -8811,10 +8802,6 @@ static int t4_get_flash_params(struct adapter *adap)
case 0x18: /* 16MB */
size = 1 << 24;
break;
- default:
- dev_err(adap->pdev_dev, "Macronix Flash Part has bad size, ID = %#x, Density code = %#x\n",
- flashid, density);
- return -EINVAL;
}
break;
}
@@ -8830,17 +8817,21 @@ static int t4_get_flash_params(struct adapter *adap)
case 0x18: /* 16MB */
size = 1 << 24;
break;
- default:
- dev_err(adap->pdev_dev, "Winbond Flash Part has bad size, ID = %#x, Density code = %#x\n",
- flashid, density);
- return -EINVAL;
}
break;
}
- default:
- dev_err(adap->pdev_dev, "Unsupported Flash Part, ID = %#x\n",
- flashid);
- return -EINVAL;
+ }
+
+ /* If we didn't recognize the FLASH part, that's no real issue: the
+ * Hardware/Software contract says that Hardware will _*ALWAYS*_
+ * use a FLASH part which is at least 4MB in size and has 64KB
+ * sectors. The unrecognized FLASH part is likely to be much larger
+ * than 4MB, but that's all we really need.
+ */
+ if (size == 0) {
+ dev_warn(adap->pdev_dev, "Unknown Flash Part, ID = %#x, assuming 4MB\n",
+ flashid);
+ size = 1 << 22;
}
/* Store decoded Flash size and fall through into vetting code. */
diff --git a/drivers/net/ethernet/cirrus/Kconfig b/drivers/net/ethernet/cirrus/Kconfig
index 5ab912937aff..ec0b545197e2 100644
--- a/drivers/net/ethernet/cirrus/Kconfig
+++ b/drivers/net/ethernet/cirrus/Kconfig
@@ -19,6 +19,7 @@ if NET_VENDOR_CIRRUS
config CS89x0
tristate "CS89x0 support"
depends on ISA || EISA || ARM
+ depends on !PPC32
---help---
Support for CS89x0 chipset based Ethernet cards. If you have a
network (Ethernet) card of this type, say Y and read the file
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 973c1fb70d09..99038dfc7fbe 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -79,7 +79,6 @@ void enic_rfs_flw_tbl_init(struct enic *enic)
enic->rfs_h.max = enic->config.num_arfs;
enic->rfs_h.free = enic->rfs_h.max;
enic->rfs_h.toclean = 0;
- enic_rfs_timer_start(enic);
}
void enic_rfs_flw_tbl_free(struct enic *enic)
@@ -88,7 +87,6 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
enic_rfs_timer_stop(enic);
spin_lock_bh(&enic->rfs_h.lock);
- enic->rfs_h.free = 0;
for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
struct hlist_head *hhead;
struct hlist_node *tmp;
@@ -99,6 +97,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
enic_delfltr(enic, n->fltr_id);
hlist_del(&n->node);
kfree(n);
+ enic->rfs_h.free++;
}
}
spin_unlock_bh(&enic->rfs_h.lock);
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 30d2eaa18c04..90c645b8538e 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1920,7 +1920,7 @@ static int enic_open(struct net_device *netdev)
{
struct enic *enic = netdev_priv(netdev);
unsigned int i;
- int err;
+ int err, ret;
err = enic_request_intr(enic);
if (err) {
@@ -1971,16 +1971,15 @@ static int enic_open(struct net_device *netdev)
vnic_intr_unmask(&enic->intr[i]);
enic_notify_timer_start(enic);
- enic_rfs_flw_tbl_init(enic);
+ enic_rfs_timer_start(enic);
return 0;
err_out_free_rq:
for (i = 0; i < enic->rq_count; i++) {
- err = vnic_rq_disable(&enic->rq[i]);
- if (err)
- return err;
- vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
+ ret = vnic_rq_disable(&enic->rq[i]);
+ if (!ret)
+ vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
}
enic_dev_notify_unset(enic);
err_out_free_intr:
@@ -2904,6 +2903,7 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
timer_setup(&enic->notify_timer, enic_notify_timer, 0);
+ enic_rfs_flw_tbl_init(enic);
enic_set_rx_coal_setting(enic);
INIT_WORK(&enic->reset, enic_reset);
INIT_WORK(&enic->tx_hang_reset, enic_tx_hang_reset);
diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
index 78db8e62a83f..ed6c76d20b45 100644
--- a/drivers/net/ethernet/faraday/ftgmac100.c
+++ b/drivers/net/ethernet/faraday/ftgmac100.c
@@ -1735,8 +1735,8 @@ static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
if (unlikely(nd->state != ncsi_dev_state_functional))
return;
- netdev_info(nd->dev, "NCSI interface %s\n",
- nd->link_up ? "up" : "down");
+ netdev_dbg(nd->dev, "NCSI interface %s\n",
+ nd->link_up ? "up" : "down");
}
static void ftgmac100_setup_clk(struct ftgmac100 *priv)
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index 5f4e1ffa7b95..ab02057ac730 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
/* Default alignment for start of data in an Rx FD */
#define DPAA_FD_DATA_ALIGNMENT 16
+/* The DPAA requires 256 bytes reserved and mapped for the SGT */
+#define DPAA_SGT_SIZE 256
+
/* Values for the L3R field of the FM Parse Results
*/
/* L3 Type field: First IP Present IPv4 */
@@ -1617,8 +1620,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
nr_frags = skb_shinfo(skb)->nr_frags;
- dma_unmap_single(dev, addr, qm_fd_get_offset(fd) +
- sizeof(struct qm_sg_entry) * (1 + nr_frags),
+ dma_unmap_single(dev, addr,
+ qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
dma_dir);
/* The sgt buffer has been allocated with netdev_alloc_frag(),
@@ -1903,8 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
void *sgt_buf;
/* get a page frag to store the SGTable */
- sz = SKB_DATA_ALIGN(priv->tx_headroom +
- sizeof(struct qm_sg_entry) * (1 + nr_frags));
+ sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
sgt_buf = netdev_alloc_frag(sz);
if (unlikely(!sgt_buf)) {
netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
@@ -1972,9 +1974,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
skbh = (struct sk_buff **)buffer_start;
*skbh = skb;
- addr = dma_map_single(dev, buffer_start, priv->tx_headroom +
- sizeof(struct qm_sg_entry) * (1 + nr_frags),
- dma_dir);
+ addr = dma_map_single(dev, buffer_start,
+ priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
if (unlikely(dma_mapping_error(dev, addr))) {
dev_err(dev, "DMA mapping failed");
err = -EINVAL;
diff --git a/drivers/net/ethernet/freescale/fman/fman_port.c b/drivers/net/ethernet/freescale/fman/fman_port.c
index ce6e24c74978..ecbf6187e13a 100644
--- a/drivers/net/ethernet/freescale/fman/fman_port.c
+++ b/drivers/net/ethernet/freescale/fman/fman_port.c
@@ -324,6 +324,10 @@ struct fman_port_qmi_regs {
#define HWP_HXS_PHE_REPORT 0x00000800
#define HWP_HXS_PCAC_PSTAT 0x00000100
#define HWP_HXS_PCAC_PSTOP 0x00000001
+#define HWP_HXS_TCP_OFFSET 0xA
+#define HWP_HXS_UDP_OFFSET 0xB
+#define HWP_HXS_SH_PAD_REM 0x80000000
+
struct fman_port_hwp_regs {
struct {
u32 ssa; /* Soft Sequence Attachment */
@@ -728,6 +732,10 @@ static void init_hwp(struct fman_port *port)
iowrite32be(0xffffffff, &regs->pmda[i].lcv);
}
+ /* Short packet padding removal from checksum calculation */
+ iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_TCP_OFFSET].ssa);
+ iowrite32be(HWP_HXS_SH_PAD_REM, &regs->pmda[HWP_HXS_UDP_OFFSET].ssa);
+
start_port_hwp(port);
}
diff --git a/drivers/net/ethernet/hisilicon/Kconfig b/drivers/net/ethernet/hisilicon/Kconfig
index 8bcf470ff5f3..fb1a7251f45d 100644
--- a/drivers/net/ethernet/hisilicon/Kconfig
+++ b/drivers/net/ethernet/hisilicon/Kconfig
@@ -5,7 +5,7 @@
config NET_VENDOR_HISILICON
bool "Hisilicon devices"
default y
- depends on (OF || ACPI) && HAS_DMA
+ depends on OF || ACPI
depends on ARM || ARM64 || COMPILE_TEST
---help---
If you have a network (Ethernet) card belonging to this class, say Y.
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index e2e5cdc7119c..4c0f7eda1166 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -439,6 +439,7 @@ static void rx_free_irq(struct hinic_rxq *rxq)
{
struct hinic_rq *rq = rxq->rq;
+ irq_set_affinity_hint(rq->irq, NULL);
free_irq(rq->irq, rxq);
rx_del_napi(rxq);
}
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
index 9128858479c4..2353ec829c04 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c
@@ -229,6 +229,7 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
txq->txq_stats.tx_busy++;
u64_stats_update_end(&txq->txq_stats.syncp);
err = NETDEV_TX_BUSY;
+ wqe_size = 0;
goto flush_skbs;
}
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index d0e196bff081..ffe7acbeaa22 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -329,7 +329,8 @@ static void replenish_rx_pool(struct ibmvnic_adapter *adapter,
return;
failure:
- dev_info(dev, "replenish pools failure\n");
+ if (lpar_rc != H_PARAMETER && lpar_rc != H_CLOSED)
+ dev_err_ratelimited(dev, "rx: replenish packet buffer failed\n");
pool->free_map[pool->next_free] = index;
pool->rx_buff[index].skb = NULL;
@@ -1617,7 +1618,8 @@ static int ibmvnic_xmit(struct sk_buff *skb, struct net_device *netdev)
&tx_crq);
}
if (lpar_rc != H_SUCCESS) {
- dev_err(dev, "tx failed with code %ld\n", lpar_rc);
+ if (lpar_rc != H_CLOSED && lpar_rc != H_PARAMETER)
+ dev_err_ratelimited(dev, "tx: send failed\n");
dev_kfree_skb_any(skb);
tx_buff->skb = NULL;
@@ -1825,8 +1827,8 @@ static int do_reset(struct ibmvnic_adapter *adapter,
rc = ibmvnic_login(netdev);
if (rc) {
- adapter->state = VNIC_PROBED;
- return 0;
+ adapter->state = reset_state;
+ return rc;
}
if (adapter->reset_reason == VNIC_RESET_CHANGE_PARAM ||
@@ -3204,6 +3206,25 @@ static union ibmvnic_crq *ibmvnic_next_crq(struct ibmvnic_adapter *adapter)
return crq;
}
+static void print_subcrq_error(struct device *dev, int rc, const char *func)
+{
+ switch (rc) {
+ case H_PARAMETER:
+ dev_warn_ratelimited(dev,
+ "%s failed: Send request is malformed or adapter failover pending. (rc=%d)\n",
+ func, rc);
+ break;
+ case H_CLOSED:
+ dev_warn_ratelimited(dev,
+ "%s failed: Backing queue closed. Adapter is down or failover pending. (rc=%d)\n",
+ func, rc);
+ break;
+ default:
+ dev_err_ratelimited(dev, "%s failed: (rc=%d)\n", func, rc);
+ break;
+ }
+}
+
static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
union sub_crq *sub_crq)
{
@@ -3230,11 +3251,8 @@ static int send_subcrq(struct ibmvnic_adapter *adapter, u64 remote_handle,
cpu_to_be64(u64_crq[2]),
cpu_to_be64(u64_crq[3]));
- if (rc) {
- if (rc == H_CLOSED)
- dev_warn(dev, "CRQ Queue closed\n");
- dev_err(dev, "Send error (rc=%d)\n", rc);
- }
+ if (rc)
+ print_subcrq_error(dev, rc, __func__);
return rc;
}
@@ -3252,11 +3270,8 @@ static int send_subcrq_indirect(struct ibmvnic_adapter *adapter,
cpu_to_be64(remote_handle),
ioba, num_entries);
- if (rc) {
- if (rc == H_CLOSED)
- dev_warn(dev, "CRQ Queue closed\n");
- dev_err(dev, "Send (indirect) error (rc=%d)\n", rc);
- }
+ if (rc)
+ print_subcrq_error(dev, rc, __func__);
return rc;
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 8ffb7454e67c..b151ae316546 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -2103,9 +2103,8 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
unsigned int truesize = i40e_rx_pg_size(rx_ring) / 2;
#else
unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) +
- SKB_DATA_ALIGN(I40E_SKB_PAD +
- (xdp->data_end -
- xdp->data_hard_start));
+ SKB_DATA_ALIGN(xdp->data_end -
+ xdp->data_hard_start);
#endif
struct sk_buff *skb;
@@ -2124,7 +2123,7 @@ static struct sk_buff *i40e_build_skb(struct i40e_ring *rx_ring,
return NULL;
/* update pointers within the skb to store the data */
- skb_reserve(skb, I40E_SKB_PAD + (xdp->data - xdp->data_hard_start));
+ skb_reserve(skb, xdp->data - xdp->data_hard_start);
__skb_put(skb, xdp->data_end - xdp->data);
if (metasize)
skb_metadata_set(skb, metasize);
@@ -2200,9 +2199,10 @@ static bool i40e_is_non_eop(struct i40e_ring *rx_ring,
return true;
}
-#define I40E_XDP_PASS 0
-#define I40E_XDP_CONSUMED 1
-#define I40E_XDP_TX 2
+#define I40E_XDP_PASS 0
+#define I40E_XDP_CONSUMED BIT(0)
+#define I40E_XDP_TX BIT(1)
+#define I40E_XDP_REDIR BIT(2)
static int i40e_xmit_xdp_ring(struct xdp_frame *xdpf,
struct i40e_ring *xdp_ring);
@@ -2249,7 +2249,7 @@ static struct sk_buff *i40e_run_xdp(struct i40e_ring *rx_ring,
break;
case XDP_REDIRECT:
err = xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog);
- result = !err ? I40E_XDP_TX : I40E_XDP_CONSUMED;
+ result = !err ? I40E_XDP_REDIR : I40E_XDP_CONSUMED;
break;
default:
bpf_warn_invalid_xdp_action(act);
@@ -2312,7 +2312,8 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
struct sk_buff *skb = rx_ring->skb;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
- bool failure = false, xdp_xmit = false;
+ unsigned int xdp_xmit = 0;
+ bool failure = false;
struct xdp_buff xdp;
xdp.rxq = &rx_ring->xdp_rxq;
@@ -2373,8 +2374,10 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
}
if (IS_ERR(skb)) {
- if (PTR_ERR(skb) == -I40E_XDP_TX) {
- xdp_xmit = true;
+ unsigned int xdp_res = -PTR_ERR(skb);
+
+ if (xdp_res & (I40E_XDP_TX | I40E_XDP_REDIR)) {
+ xdp_xmit |= xdp_res;
i40e_rx_buffer_flip(rx_ring, rx_buffer, size);
} else {
rx_buffer->pagecnt_bias++;
@@ -2428,12 +2431,14 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
total_rx_packets++;
}
- if (xdp_xmit) {
+ if (xdp_xmit & I40E_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_xmit & I40E_XDP_TX) {
struct i40e_ring *xdp_ring =
rx_ring->vsi->xdp_rings[rx_ring->queue_index];
i40e_xdp_ring_update_tail(xdp_ring);
- xdp_do_flush_map();
}
rx_ring->skb = skb;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 3f5c350716bb..0bd1294ba517 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -1871,7 +1871,12 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
if (enable_addr != 0)
rar_high |= IXGBE_RAH_AV;
+ /* Record lower 32 bits of MAC address and then make
+ * sure that write is flushed to hardware before writing
+ * the upper 16 bits and setting the valid bit.
+ */
IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low);
+ IXGBE_WRITE_FLUSH(hw);
IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
return 0;
@@ -1903,8 +1908,13 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index)
rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(index));
rar_high &= ~(0x0000FFFF | IXGBE_RAH_AV);
- IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
+ /* Clear the address valid bit and upper 16 bits of the address
+ * before clearing the lower bits. This way we aren't updating
+ * a live filter.
+ */
IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high);
+ IXGBE_WRITE_FLUSH(hw);
+ IXGBE_WRITE_REG(hw, IXGBE_RAL(index), 0);
/* clear VMDq pool/queue selection for this RAR */
hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
index c116f459945d..da4322e4daed 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ipsec.c
@@ -839,7 +839,7 @@ int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
}
itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
- if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
+ if (unlikely(itd->sa_idx >= IXGBE_IPSEC_MAX_SA_COUNT)) {
netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
__func__, itd->sa_idx, xs->xso.offload_handle);
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 3e87dbbc9024..62e57b05a0ae 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -2186,9 +2186,10 @@ static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring,
return skb;
}
-#define IXGBE_XDP_PASS 0
-#define IXGBE_XDP_CONSUMED 1
-#define IXGBE_XDP_TX 2
+#define IXGBE_XDP_PASS 0
+#define IXGBE_XDP_CONSUMED BIT(0)
+#define IXGBE_XDP_TX BIT(1)
+#define IXGBE_XDP_REDIR BIT(2)
static int ixgbe_xmit_xdp_ring(struct ixgbe_adapter *adapter,
struct xdp_frame *xdpf);
@@ -2225,7 +2226,7 @@ static struct sk_buff *ixgbe_run_xdp(struct ixgbe_adapter *adapter,
case XDP_REDIRECT:
err = xdp_do_redirect(adapter->netdev, xdp, xdp_prog);
if (!err)
- result = IXGBE_XDP_TX;
+ result = IXGBE_XDP_REDIR;
else
result = IXGBE_XDP_CONSUMED;
break;
@@ -2285,7 +2286,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
unsigned int mss = 0;
#endif /* IXGBE_FCOE */
u16 cleaned_count = ixgbe_desc_unused(rx_ring);
- bool xdp_xmit = false;
+ unsigned int xdp_xmit = 0;
struct xdp_buff xdp;
xdp.rxq = &rx_ring->xdp_rxq;
@@ -2328,8 +2329,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
}
if (IS_ERR(skb)) {
- if (PTR_ERR(skb) == -IXGBE_XDP_TX) {
- xdp_xmit = true;
+ unsigned int xdp_res = -PTR_ERR(skb);
+
+ if (xdp_res & (IXGBE_XDP_TX | IXGBE_XDP_REDIR)) {
+ xdp_xmit |= xdp_res;
ixgbe_rx_buffer_flip(rx_ring, rx_buffer, size);
} else {
rx_buffer->pagecnt_bias++;
@@ -2401,7 +2404,10 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
total_rx_packets++;
}
- if (xdp_xmit) {
+ if (xdp_xmit & IXGBE_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_xmit & IXGBE_XDP_TX) {
struct ixgbe_ring *ring = adapter->xdp_ring[smp_processor_id()];
/* Force memory writes to complete before letting h/w
@@ -2409,8 +2415,6 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
*/
wmb();
writel(ring->next_to_use, ring->tail);
-
- xdp_do_flush_map();
}
u64_stats_update_begin(&rx_ring->syncp);
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index cc2f7701e71e..f33fd22b351c 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -18,8 +18,8 @@ if NET_VENDOR_MARVELL
config MV643XX_ETH
tristate "Marvell Discovery (643XX) and Orion ethernet support"
- depends on (MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST) && INET
- depends on HAS_DMA
+ depends on MV64X60 || PPC32 || PLAT_ORION || COMPILE_TEST
+ depends on INET
select PHYLIB
select MVMDIO
---help---
@@ -58,7 +58,6 @@ config MVNETA_BM_ENABLE
config MVNETA
tristate "Marvell Armada 370/38x/XP/37xx network interface support"
depends on ARCH_MVEBU || COMPILE_TEST
- depends on HAS_DMA
select MVMDIO
select PHYLINK
---help---
@@ -84,7 +83,6 @@ config MVNETA_BM
config MVPP2
tristate "Marvell Armada 375/7K/8K network interface support"
depends on ARCH_MVEBU || COMPILE_TEST
- depends on HAS_DMA
select MVMDIO
select PHYLINK
---help---
@@ -93,7 +91,7 @@ config MVPP2
config PXA168_ETH
tristate "Marvell pxa168 ethernet support"
- depends on HAS_IOMEM && HAS_DMA
+ depends on HAS_IOMEM
depends on CPU_PXA168 || ARCH_BERLIN || COMPILE_TEST
select PHYLIB
---help---
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 17a904cc6a5e..0ad2f3f7da85 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1932,7 +1932,7 @@ static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo,
rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
index = rx_desc - rxq->descs;
data = rxq->buf_virt_addr[index];
- phys_addr = rx_desc->buf_phys_addr;
+ phys_addr = rx_desc->buf_phys_addr - pp->rx_offset_correction;
if (!mvneta_rxq_desc_is_first_last(rx_status) ||
(rx_status & MVNETA_RXD_ERR_SUMMARY)) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 9f54ccbddea7..3360f7b9ee73 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -474,10 +474,10 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
{
const struct mlx4_en_frag_info *frag_info = priv->frag_info;
unsigned int truesize = 0;
+ bool release = true;
int nr, frag_size;
struct page *page;
dma_addr_t dma;
- bool release;
/* Collect used fragments while replacing them in the HW descriptors */
for (nr = 0;; frags++) {
@@ -500,7 +500,11 @@ static int mlx4_en_complete_rx_desc(struct mlx4_en_priv *priv,
release = page_count(page) != 1 ||
page_is_pfmemalloc(page) ||
page_to_nid(page) != numa_mem_id();
- } else {
+ } else if (!priv->rx_headroom) {
+ /* rx_headroom for non XDP setup is always 0.
+ * When XDP is set, the above condition will
+ * guarantee page is always released.
+ */
u32 sz_align = ALIGN(frag_size, SMP_CACHE_BYTES);
frags->page_offset += sz_align;
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 7b1b5ac986d0..31bd56727022 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2958,7 +2958,7 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
u32 srqn = qp_get_srqn(qpc) & 0xffffff;
int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
struct res_srq *srq;
- int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
+ int local_qpn = vhcr->in_modifier & 0xffffff;
err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
index 323ffe8bf7e4..456f30007ad6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/alloc.c
@@ -123,7 +123,7 @@ int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
int i;
buf->size = size;
- buf->npages = 1 << get_order(size);
+ buf->npages = DIV_ROUND_UP(size, PAGE_SIZE);
buf->page_shift = PAGE_SHIFT;
buf->frags = kcalloc(buf->npages, sizeof(struct mlx5_buf_list),
GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index 487388aed98f..384c1fa49081 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -807,6 +807,7 @@ static void cmd_work_handler(struct work_struct *work)
unsigned long flags;
bool poll_cmd = ent->polling;
int alloc_ret;
+ int cmd_mode;
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
@@ -853,6 +854,7 @@ static void cmd_work_handler(struct work_struct *work)
set_signature(ent, !cmd->checksum_disabled);
dump_command(dev, ent, 1);
ent->ts1 = ktime_get_ns();
+ cmd_mode = cmd->mode;
if (ent->callback)
schedule_delayed_work(&ent->cb_timeout_work, cb_timeout);
@@ -877,7 +879,7 @@ static void cmd_work_handler(struct work_struct *work)
iowrite32be(1 << ent->idx, &dev->iseg->cmd_dbell);
mmiowb();
/* if not in polling don't use ent after this point */
- if (cmd->mode == CMD_MODE_POLLING || poll_cmd) {
+ if (cmd_mode == CMD_MODE_POLLING || poll_cmd) {
poll_timeout(ent);
/* make sure we read the descriptor after ownership is SW */
rmb();
@@ -1276,7 +1278,7 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
{
struct mlx5_core_dev *dev = filp->private_data;
struct mlx5_cmd_debug *dbg = &dev->cmd.dbg;
- char outlen_str[8];
+ char outlen_str[8] = {0};
int outlen;
void *ptr;
int err;
@@ -1291,8 +1293,6 @@ static ssize_t outlen_write(struct file *filp, const char __user *buf,
if (copy_from_user(outlen_str, buf, count))
return -EFAULT;
- outlen_str[7] = 0;
-
err = sscanf(outlen_str, "%d", &outlen);
if (err < 0)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
index 75e4308ba786..d258bb679271 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_arfs.c
@@ -381,14 +381,14 @@ static void arfs_may_expire_flow(struct mlx5e_priv *priv)
HLIST_HEAD(del_list);
spin_lock_bh(&priv->fs.arfs.arfs_lock);
mlx5e_for_each_arfs_rule(arfs_rule, htmp, priv->fs.arfs.arfs_tables, i, j) {
- if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
- break;
if (!work_pending(&arfs_rule->arfs_work) &&
rps_may_expire_flow(priv->netdev,
arfs_rule->rxq, arfs_rule->flow_id,
arfs_rule->filter_id)) {
hlist_del_init(&arfs_rule->hlist);
hlist_add_head(&arfs_rule->hlist, &del_list);
+ if (quota++ > MLX5E_ARFS_EXPIRY_QUOTA)
+ break;
}
}
spin_unlock_bh(&priv->fs.arfs.arfs_lock);
@@ -711,6 +711,9 @@ int mlx5e_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
skb->protocol != htons(ETH_P_IPV6))
return -EPROTONOSUPPORT;
+ if (skb->encapsulation)
+ return -EPROTONOSUPPORT;
+
arfs_t = arfs_get_table(arfs, arfs_get_ip_proto(skb), skb->protocol);
if (!arfs_t)
return -EPROTONOSUPPORT;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
index 0a52f31fef37..86bc9ac99586 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_dcbnl.c
@@ -275,7 +275,8 @@ int mlx5e_dcbnl_ieee_setets_core(struct mlx5e_priv *priv, struct ieee_ets *ets)
}
static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
- struct ieee_ets *ets)
+ struct ieee_ets *ets,
+ bool zero_sum_allowed)
{
bool have_ets_tc = false;
int bw_sum = 0;
@@ -300,8 +301,9 @@ static int mlx5e_dbcnl_validate_ets(struct net_device *netdev,
}
if (have_ets_tc && bw_sum != 100) {
- netdev_err(netdev,
- "Failed to validate ETS: BW sum is illegal\n");
+ if (bw_sum || (!bw_sum && !zero_sum_allowed))
+ netdev_err(netdev,
+ "Failed to validate ETS: BW sum is illegal\n");
return -EINVAL;
}
return 0;
@@ -316,7 +318,7 @@ static int mlx5e_dcbnl_ieee_setets(struct net_device *netdev,
if (!MLX5_CAP_GEN(priv->mdev, ets))
return -EOPNOTSUPP;
- err = mlx5e_dbcnl_validate_ets(netdev, ets);
+ err = mlx5e_dbcnl_validate_ets(netdev, ets, false);
if (err)
return err;
@@ -642,12 +644,9 @@ static u8 mlx5e_dcbnl_setall(struct net_device *netdev)
ets.prio_tc[i]);
}
- err = mlx5e_dbcnl_validate_ets(netdev, &ets);
- if (err) {
- netdev_err(netdev,
- "%s, Failed to validate ETS: %d\n", __func__, err);
+ err = mlx5e_dbcnl_validate_ets(netdev, &ets, true);
+ if (err)
goto out;
- }
err = mlx5e_dcbnl_ieee_setets_core(priv, &ets);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 56c1b6f5593e..dae4156a710d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2846,7 +2846,7 @@ void mlx5e_activate_priv_channels(struct mlx5e_priv *priv)
mlx5e_activate_channels(&priv->channels);
netif_tx_start_all_queues(priv->netdev);
- if (MLX5_VPORT_MANAGER(priv->mdev))
+ if (MLX5_ESWITCH_MANAGER(priv->mdev))
mlx5e_add_sqs_fwd_rules(priv);
mlx5e_wait_channels_min_rx_wqes(&priv->channels);
@@ -2857,7 +2857,7 @@ void mlx5e_deactivate_priv_channels(struct mlx5e_priv *priv)
{
mlx5e_redirect_rqts_to_drop(priv);
- if (MLX5_VPORT_MANAGER(priv->mdev))
+ if (MLX5_ESWITCH_MANAGER(priv->mdev))
mlx5e_remove_sqs_fwd_rules(priv);
/* FIXME: This is a W/A only for tx timeout watch dog false alarm when
@@ -4597,7 +4597,7 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev)
mlx5e_set_netdev_dev_addr(netdev);
#if IS_ENABLED(CONFIG_MLX5_ESWITCH)
- if (MLX5_VPORT_MANAGER(mdev))
+ if (MLX5_ESWITCH_MANAGER(mdev))
netdev->switchdev_ops = &mlx5e_switchdev_ops;
#endif
@@ -4753,7 +4753,7 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
mlx5e_enable_async_events(priv);
- if (MLX5_VPORT_MANAGER(priv->mdev))
+ if (MLX5_ESWITCH_MANAGER(priv->mdev))
mlx5e_register_vport_reps(priv);
if (netdev->reg_state != NETREG_REGISTERED)
@@ -4788,7 +4788,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
queue_work(priv->wq, &priv->set_rx_mode_work);
- if (MLX5_VPORT_MANAGER(priv->mdev))
+ if (MLX5_ESWITCH_MANAGER(priv->mdev))
mlx5e_unregister_vport_reps(priv);
mlx5e_disable_async_events(priv);
@@ -4972,7 +4972,7 @@ static void *mlx5e_add(struct mlx5_core_dev *mdev)
return NULL;
#ifdef CONFIG_MLX5_ESWITCH
- if (MLX5_VPORT_MANAGER(mdev)) {
+ if (MLX5_ESWITCH_MANAGER(mdev)) {
rpriv = mlx5e_alloc_nic_rep_priv(mdev);
if (!rpriv) {
mlx5_core_warn(mdev, "Failed to alloc NIC rep priv data\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 57987f6546e8..2b8040a3cdbd 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -823,7 +823,7 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
struct mlx5e_rep_priv *rpriv = priv->ppriv;
struct mlx5_eswitch_rep *rep;
- if (!MLX5_CAP_GEN(priv->mdev, vport_group_manager))
+ if (!MLX5_ESWITCH_MANAGER(priv->mdev))
return false;
rep = rpriv->rep;
@@ -837,8 +837,12 @@ bool mlx5e_is_uplink_rep(struct mlx5e_priv *priv)
static bool mlx5e_is_vf_vport_rep(struct mlx5e_priv *priv)
{
struct mlx5e_rep_priv *rpriv = priv->ppriv;
- struct mlx5_eswitch_rep *rep = rpriv->rep;
+ struct mlx5_eswitch_rep *rep;
+ if (!MLX5_ESWITCH_MANAGER(priv->mdev))
+ return false;
+
+ rep = rpriv->rep;
if (rep && rep->vport != FDB_UPLINK_VPORT)
return true;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index 0edf4751a8ba..3a2c4e548226 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -1957,6 +1957,10 @@ static bool actions_match_supported(struct mlx5e_priv *priv,
else
actions = flow->nic_attr->action;
+ if (flow->flags & MLX5E_TC_FLOW_EGRESS &&
+ !(actions & MLX5_FLOW_CONTEXT_ACTION_DECAP))
+ return false;
+
if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR)
return modify_header_match_supported(&parse_attr->spec, exts);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index f63dfbcd29fe..dd01ad4c0b54 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -1594,17 +1594,15 @@ static void esw_disable_vport(struct mlx5_eswitch *esw, int vport_num)
}
/* Public E-Switch API */
-#define ESW_ALLOWED(esw) ((esw) && MLX5_VPORT_MANAGER((esw)->dev))
+#define ESW_ALLOWED(esw) ((esw) && MLX5_ESWITCH_MANAGER((esw)->dev))
+
int mlx5_eswitch_enable_sriov(struct mlx5_eswitch *esw, int nvfs, int mode)
{
int err;
int i, enabled_events;
- if (!ESW_ALLOWED(esw))
- return 0;
-
- if (!MLX5_CAP_GEN(esw->dev, eswitch_flow_table) ||
+ if (!ESW_ALLOWED(esw) ||
!MLX5_CAP_ESW_FLOWTABLE_FDB(esw->dev, ft_support)) {
esw_warn(esw->dev, "E-Switch FDB is not supported, aborting ...\n");
return -EOPNOTSUPP;
@@ -1806,7 +1804,7 @@ int mlx5_eswitch_set_vport_mac(struct mlx5_eswitch *esw,
u64 node_guid;
int err = 0;
- if (!ESW_ALLOWED(esw))
+ if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
return -EPERM;
if (!LEGAL_VPORT(esw, vport) || is_multicast_ether_addr(mac))
return -EINVAL;
@@ -1883,7 +1881,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
{
struct mlx5_vport *evport;
- if (!ESW_ALLOWED(esw))
+ if (!MLX5_CAP_GEN(esw->dev, vport_group_manager))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
@@ -2218,6 +2216,6 @@ free_out:
u8 mlx5_eswitch_mode(struct mlx5_eswitch *esw)
{
- return esw->mode;
+ return ESW_ALLOWED(esw) ? esw->mode : SRIOV_NONE;
}
EXPORT_SYMBOL_GPL(mlx5_eswitch_mode);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index cecd201f0b73..91f1209886ff 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1079,8 +1079,8 @@ static int mlx5_devlink_eswitch_check(struct devlink *devlink)
if (MLX5_CAP_GEN(dev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
return -EOPNOTSUPP;
- if (!MLX5_CAP_GEN(dev, vport_group_manager))
- return -EOPNOTSUPP;
+ if(!MLX5_ESWITCH_MANAGER(dev))
+ return -EPERM;
if (dev->priv.eswitch->mode == SRIOV_NONE)
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 49a75d31185e..6ddb2565884d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -32,6 +32,7 @@
#include <linux/mutex.h>
#include <linux/mlx5/driver.h>
+#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
#include "fs_core.h"
@@ -1886,7 +1887,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
if (flow_act->action == MLX5_FLOW_CONTEXT_ACTION_FWD_NEXT_PRIO) {
if (!fwd_next_prio_supported(ft))
return ERR_PTR(-EOPNOTSUPP);
- if (dest)
+ if (dest_num)
return ERR_PTR(-EINVAL);
mutex_lock(&root->chain_lock);
next_ft = find_next_chained_ft(prio);
@@ -2652,7 +2653,7 @@ int mlx5_init_fs(struct mlx5_core_dev *dev)
goto err;
}
- if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+ if (MLX5_ESWITCH_MANAGER(dev)) {
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, ft_support)) {
err = init_fdb_root_ns(steering);
if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index afd9f4fa22f4..41ad24f0de2c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -32,6 +32,7 @@
#include <linux/mlx5/driver.h>
#include <linux/mlx5/cmd.h>
+#include <linux/mlx5/eswitch.h>
#include <linux/module.h>
#include "mlx5_core.h"
#include "../../mlxfw/mlxfw.h"
@@ -159,13 +160,13 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
}
if (MLX5_CAP_GEN(dev, vport_group_manager) &&
- MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+ MLX5_ESWITCH_MANAGER(dev)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH_FLOW_TABLE);
if (err)
return err;
}
- if (MLX5_CAP_GEN(dev, eswitch_flow_table)) {
+ if (MLX5_ESWITCH_MANAGER(dev)) {
err = mlx5_core_get_caps(dev, MLX5_CAP_ESWITCH);
if (err)
return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
index 1e062e6b2587..3f767cde4c1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/clock.c
@@ -488,6 +488,7 @@ void mlx5_pps_event(struct mlx5_core_dev *mdev,
void mlx5_init_clock(struct mlx5_core_dev *mdev)
{
struct mlx5_clock *clock = &mdev->clock;
+ u64 overflow_cycles;
u64 ns;
u64 frac = 0;
u32 dev_freq;
@@ -511,10 +512,17 @@ void mlx5_init_clock(struct mlx5_core_dev *mdev)
/* Calculate period in seconds to call the overflow watchdog - to make
* sure counter is checked at least once every wrap around.
+ * The period is calculated as the minimum between max HW cycles count
+ * (The clock source mask) and max amount of cycles that can be
+ * multiplied by clock multiplier where the result doesn't exceed
+ * 64bits.
*/
- ns = cyclecounter_cyc2ns(&clock->cycles, clock->cycles.mask,
+ overflow_cycles = div64_u64(~0ULL >> 1, clock->cycles.mult);
+ overflow_cycles = min(overflow_cycles, clock->cycles.mask >> 1);
+
+ ns = cyclecounter_cyc2ns(&clock->cycles, overflow_cycles,
frac, &frac);
- do_div(ns, NSEC_PER_SEC / 2 / HZ);
+ do_div(ns, NSEC_PER_SEC / HZ);
clock->overflow_period = ns;
mdev->clock_info_page = alloc_page(GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
index 7cb67122e8b5..98359559c77e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/mpfs.c
@@ -33,6 +33,7 @@
#include <linux/etherdevice.h>
#include <linux/mlx5/driver.h>
#include <linux/mlx5/mlx5_ifc.h>
+#include <linux/mlx5/eswitch.h>
#include "mlx5_core.h"
#include "lib/mpfs.h"
@@ -98,7 +99,7 @@ int mlx5_mpfs_init(struct mlx5_core_dev *dev)
int l2table_size = 1 << MLX5_CAP_GEN(dev, log_max_l2_table);
struct mlx5_mpfs *mpfs;
- if (!MLX5_VPORT_MANAGER(dev))
+ if (!MLX5_ESWITCH_MANAGER(dev))
return 0;
mpfs = kzalloc(sizeof(*mpfs), GFP_KERNEL);
@@ -122,7 +123,7 @@ void mlx5_mpfs_cleanup(struct mlx5_core_dev *dev)
{
struct mlx5_mpfs *mpfs = dev->priv.mpfs;
- if (!MLX5_VPORT_MANAGER(dev))
+ if (!MLX5_ESWITCH_MANAGER(dev))
return;
WARN_ON(!hlist_empty(mpfs->hash));
@@ -137,7 +138,7 @@ int mlx5_mpfs_add_mac(struct mlx5_core_dev *dev, u8 *mac)
u32 index;
int err;
- if (!MLX5_VPORT_MANAGER(dev))
+ if (!MLX5_ESWITCH_MANAGER(dev))
return 0;
mutex_lock(&mpfs->lock);
@@ -179,7 +180,7 @@ int mlx5_mpfs_del_mac(struct mlx5_core_dev *dev, u8 *mac)
int err = 0;
u32 index;
- if (!MLX5_VPORT_MANAGER(dev))
+ if (!MLX5_ESWITCH_MANAGER(dev))
return 0;
mutex_lock(&mpfs->lock);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index fa9d0760dd36..31a9cbd85689 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -701,7 +701,7 @@ EXPORT_SYMBOL_GPL(mlx5_query_port_prio_tc);
static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
int inlen)
{
- u32 out[MLX5_ST_SZ_DW(qtct_reg)];
+ u32 out[MLX5_ST_SZ_DW(qetc_reg)];
if (!MLX5_CAP_GEN(mdev, ets))
return -EOPNOTSUPP;
@@ -713,7 +713,7 @@ static int mlx5_set_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *in,
static int mlx5_query_port_qetcr_reg(struct mlx5_core_dev *mdev, u32 *out,
int outlen)
{
- u32 in[MLX5_ST_SZ_DW(qtct_reg)];
+ u32 in[MLX5_ST_SZ_DW(qetc_reg)];
if (!MLX5_CAP_GEN(mdev, ets))
return -EOPNOTSUPP;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
index 2a8b529ce6dd..a0674962f02c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/sriov.c
@@ -88,6 +88,9 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
return -EBUSY;
}
+ if (!MLX5_ESWITCH_MANAGER(dev))
+ goto enable_vfs_hca;
+
err = mlx5_eswitch_enable_sriov(dev->priv.eswitch, num_vfs, SRIOV_LEGACY);
if (err) {
mlx5_core_warn(dev,
@@ -95,6 +98,7 @@ static int mlx5_device_enable_sriov(struct mlx5_core_dev *dev, int num_vfs)
return err;
}
+enable_vfs_hca:
for (vf = 0; vf < num_vfs; vf++) {
err = mlx5_core_enable_hca(dev, vf + 1);
if (err) {
@@ -140,7 +144,8 @@ static void mlx5_device_disable_sriov(struct mlx5_core_dev *dev)
}
out:
- mlx5_eswitch_disable_sriov(dev->priv.eswitch);
+ if (MLX5_ESWITCH_MANAGER(dev))
+ mlx5_eswitch_disable_sriov(dev->priv.eswitch);
if (mlx5_wait_for_vf_pages(dev))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 719cecb182c6..7eecd5b07bb1 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -549,8 +549,6 @@ int mlx5_modify_nic_vport_node_guid(struct mlx5_core_dev *mdev,
return -EINVAL;
if (!MLX5_CAP_GEN(mdev, vport_group_manager))
return -EACCES;
- if (!MLX5_CAP_ESW(mdev, nic_vport_node_guid_modify))
- return -EOPNOTSUPP;
in = kvzalloc(inlen, GFP_KERNEL);
if (!in)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index b97bb72b4db4..86478a6b99c5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -113,35 +113,45 @@ err_db_free:
return err;
}
-static void mlx5e_qp_set_frag_buf(struct mlx5_frag_buf *buf,
- struct mlx5_wq_qp *qp)
+static void mlx5_qp_set_frag_buf(struct mlx5_frag_buf *buf,
+ struct mlx5_wq_qp *qp)
{
+ struct mlx5_frag_buf_ctrl *sq_fbc;
struct mlx5_frag_buf *rqb, *sqb;
- rqb = &qp->rq.fbc.frag_buf;
+ rqb = &qp->rq.fbc.frag_buf;
*rqb = *buf;
rqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
- rqb->npages = 1 << get_order(rqb->size);
+ rqb->npages = DIV_ROUND_UP(rqb->size, PAGE_SIZE);
- sqb = &qp->sq.fbc.frag_buf;
- *sqb = *buf;
- sqb->size = mlx5_wq_cyc_get_byte_size(&qp->rq);
- sqb->npages = 1 << get_order(sqb->size);
+ sq_fbc = &qp->sq.fbc;
+ sqb = &sq_fbc->frag_buf;
+ *sqb = *buf;
+ sqb->size = mlx5_wq_cyc_get_byte_size(&qp->sq);
+ sqb->npages = DIV_ROUND_UP(sqb->size, PAGE_SIZE);
sqb->frags += rqb->npages; /* first part is for the rq */
+ if (sq_fbc->strides_offset)
+ sqb->frags--;
}
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl)
{
+ u32 sq_strides_offset;
int err;
mlx5_fill_fbc(MLX5_GET(qpc, qpc, log_rq_stride) + 4,
MLX5_GET(qpc, qpc, log_rq_size),
&wq->rq.fbc);
- mlx5_fill_fbc(ilog2(MLX5_SEND_WQE_BB),
- MLX5_GET(qpc, qpc, log_sq_size),
- &wq->sq.fbc);
+
+ sq_strides_offset =
+ ((wq->rq.fbc.frag_sz_m1 + 1) % PAGE_SIZE) / MLX5_SEND_WQE_BB;
+
+ mlx5_fill_fbc_offset(ilog2(MLX5_SEND_WQE_BB),
+ MLX5_GET(qpc, qpc, log_sq_size),
+ sq_strides_offset,
+ &wq->sq.fbc);
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
@@ -156,7 +166,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
goto err_db_free;
}
- mlx5e_qp_set_frag_buf(&wq_ctrl->buf, wq);
+ mlx5_qp_set_frag_buf(&wq_ctrl->buf, wq);
wq->rq.db = &wq_ctrl->db.db[MLX5_RCV_DBR];
wq->sq.db = &wq_ctrl->db.db[MLX5_SND_DBR];
diff --git a/drivers/net/ethernet/mellanox/mlxsw/Kconfig b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
index f4d9c9975ac3..82827a8d3d67 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlxsw/Kconfig
@@ -30,7 +30,7 @@ config MLXSW_CORE_THERMAL
config MLXSW_PCI
tristate "PCI bus implementation for Mellanox Technologies Switch ASICs"
- depends on PCI && HAS_DMA && HAS_IOMEM && MLXSW_CORE
+ depends on PCI && HAS_IOMEM && MLXSW_CORE
default m
---help---
This is PCI bus implementation for Mellanox Technologies Switch ASICs.
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 6aaaf3d9ba31..77b2adb29341 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -4756,6 +4756,12 @@ static void mlxsw_sp_rt6_destroy(struct mlxsw_sp_rt6 *mlxsw_sp_rt6)
kfree(mlxsw_sp_rt6);
}
+static bool mlxsw_sp_fib6_rt_can_mp(const struct fib6_info *rt)
+{
+ /* RTF_CACHE routes are ignored */
+ return (rt->fib6_flags & (RTF_GATEWAY | RTF_ADDRCONF)) == RTF_GATEWAY;
+}
+
static struct fib6_info *
mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
{
@@ -4765,11 +4771,11 @@ mlxsw_sp_fib6_entry_rt(const struct mlxsw_sp_fib6_entry *fib6_entry)
static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
- const struct fib6_info *nrt, bool append)
+ const struct fib6_info *nrt, bool replace)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
- if (!append)
+ if (!mlxsw_sp_fib6_rt_can_mp(nrt) || replace)
return NULL;
list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
@@ -4784,7 +4790,8 @@ mlxsw_sp_fib6_node_mp_entry_find(const struct mlxsw_sp_fib_node *fib_node,
break;
if (rt->fib6_metric < nrt->fib6_metric)
continue;
- if (rt->fib6_metric == nrt->fib6_metric)
+ if (rt->fib6_metric == nrt->fib6_metric &&
+ mlxsw_sp_fib6_rt_can_mp(rt))
return fib6_entry;
if (rt->fib6_metric > nrt->fib6_metric)
break;
@@ -5163,7 +5170,7 @@ static struct mlxsw_sp_fib6_entry *
mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
const struct fib6_info *nrt, bool replace)
{
- struct mlxsw_sp_fib6_entry *fib6_entry;
+ struct mlxsw_sp_fib6_entry *fib6_entry, *fallback = NULL;
list_for_each_entry(fib6_entry, &fib_node->entry_list, common.list) {
struct fib6_info *rt = mlxsw_sp_fib6_entry_rt(fib6_entry);
@@ -5172,13 +5179,18 @@ mlxsw_sp_fib6_node_entry_find(const struct mlxsw_sp_fib_node *fib_node,
continue;
if (rt->fib6_table->tb6_id != nrt->fib6_table->tb6_id)
break;
- if (replace && rt->fib6_metric == nrt->fib6_metric)
- return fib6_entry;
+ if (replace && rt->fib6_metric == nrt->fib6_metric) {
+ if (mlxsw_sp_fib6_rt_can_mp(rt) ==
+ mlxsw_sp_fib6_rt_can_mp(nrt))
+ return fib6_entry;
+ if (mlxsw_sp_fib6_rt_can_mp(nrt))
+ fallback = fallback ?: fib6_entry;
+ }
if (rt->fib6_metric > nrt->fib6_metric)
- return fib6_entry;
+ return fallback ?: fib6_entry;
}
- return NULL;
+ return fallback;
}
static int
@@ -5304,8 +5316,7 @@ static void mlxsw_sp_fib6_entry_replace(struct mlxsw_sp *mlxsw_sp,
}
static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
- struct fib6_info *rt, bool replace,
- bool append)
+ struct fib6_info *rt, bool replace)
{
struct mlxsw_sp_fib6_entry *fib6_entry;
struct mlxsw_sp_fib_node *fib_node;
@@ -5331,7 +5342,7 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
/* Before creating a new entry, try to append route to an existing
* multipath entry.
*/
- fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, append);
+ fib6_entry = mlxsw_sp_fib6_node_mp_entry_find(fib_node, rt, replace);
if (fib6_entry) {
err = mlxsw_sp_fib6_entry_nexthop_add(mlxsw_sp, fib6_entry, rt);
if (err)
@@ -5339,14 +5350,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
return 0;
}
- /* We received an append event, yet did not find any route to
- * append to.
- */
- if (WARN_ON(append)) {
- err = -EINVAL;
- goto err_fib6_entry_append;
- }
-
fib6_entry = mlxsw_sp_fib6_entry_create(mlxsw_sp, fib_node, rt);
if (IS_ERR(fib6_entry)) {
err = PTR_ERR(fib6_entry);
@@ -5364,7 +5367,6 @@ static int mlxsw_sp_router_fib6_add(struct mlxsw_sp *mlxsw_sp,
err_fib6_node_entry_link:
mlxsw_sp_fib6_entry_destroy(mlxsw_sp, fib6_entry);
err_fib6_entry_create:
-err_fib6_entry_append:
err_fib6_entry_nexthop_add:
mlxsw_sp_fib_node_put(mlxsw_sp, fib_node);
return err;
@@ -5715,7 +5717,7 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
struct mlxsw_sp_fib_event_work *fib_work =
container_of(work, struct mlxsw_sp_fib_event_work, work);
struct mlxsw_sp *mlxsw_sp = fib_work->mlxsw_sp;
- bool replace, append;
+ bool replace;
int err;
rtnl_lock();
@@ -5726,10 +5728,8 @@ static void mlxsw_sp_router_fib6_event_work(struct work_struct *work)
case FIB_EVENT_ENTRY_APPEND: /* fall through */
case FIB_EVENT_ENTRY_ADD:
replace = fib_work->event == FIB_EVENT_ENTRY_REPLACE;
- append = fib_work->event == FIB_EVENT_ENTRY_APPEND;
err = mlxsw_sp_router_fib6_add(mlxsw_sp,
- fib_work->fen6_info.rt, replace,
- append);
+ fib_work->fen6_info.rt, replace);
if (err)
mlxsw_sp_router_fib_abort(mlxsw_sp);
mlxsw_sp_rt6_release(fib_work->fen6_info.rt);
diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c
index fb2c8f8071e6..776a8a9be8e3 100644
--- a/drivers/net/ethernet/mscc/ocelot.c
+++ b/drivers/net/ethernet/mscc/ocelot.c
@@ -344,10 +344,9 @@ static int ocelot_port_stop(struct net_device *dev)
static int ocelot_gen_ifh(u32 *ifh, struct frame_info *info)
{
ifh[0] = IFH_INJ_BYPASS;
- ifh[1] = (0xff00 & info->port) >> 8;
+ ifh[1] = (0xf00 & info->port) >> 8;
ifh[2] = (0xff & info->port) << 24;
- ifh[3] = IFH_INJ_POP_CNT_DISABLE | (info->cpuq << 20) |
- (info->tag_type << 16) | info->vid;
+ ifh[3] = (info->tag_type << 16) | info->vid;
return 0;
}
@@ -370,11 +369,13 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev)
QS_INJ_CTRL_SOF, QS_INJ_CTRL, grp);
info.port = BIT(port->chip_port);
- info.cpuq = 0xff;
+ info.tag_type = IFH_TAG_TYPE_C;
+ info.vid = skb_vlan_tag_get(skb);
ocelot_gen_ifh(ifh, &info);
for (i = 0; i < IFH_LEN; i++)
- ocelot_write_rix(ocelot, ifh[i], QS_INJ_WR, grp);
+ ocelot_write_rix(ocelot, (__force u32)cpu_to_be32(ifh[i]),
+ QS_INJ_WR, grp);
count = (skb->len + 3) / 4;
last = skb->len % 4;
diff --git a/drivers/net/ethernet/netronome/nfp/bpf/main.c b/drivers/net/ethernet/netronome/nfp/bpf/main.c
index fcdfb8e7fdea..40216d56dddc 100644
--- a/drivers/net/ethernet/netronome/nfp/bpf/main.c
+++ b/drivers/net/ethernet/netronome/nfp/bpf/main.c
@@ -81,10 +81,10 @@ nfp_bpf_xdp_offload(struct nfp_app *app, struct nfp_net *nn,
ret = nfp_net_bpf_offload(nn, prog, running, extack);
/* Stop offload if replace not possible */
- if (ret && prog)
- nfp_bpf_xdp_offload(app, nn, NULL, extack);
+ if (ret)
+ return ret;
- nn->dp.bpf_offload_xdp = prog && !ret;
+ nn->dp.bpf_offload_xdp = !!prog;
return ret;
}
@@ -202,6 +202,9 @@ static int nfp_bpf_setup_tc_block(struct net_device *netdev,
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
+ if (tcf_block_shared(f->block))
+ return -EOPNOTSUPP;
+
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c
index 91935405f586..84f7a5dbea9d 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/match.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/match.c
@@ -123,6 +123,20 @@ nfp_flower_compile_mac(struct nfp_flower_mac_mpls *frame,
NFP_FLOWER_MASK_MPLS_Q;
frame->mpls_lse = cpu_to_be32(t_mpls);
+ } else if (dissector_uses_key(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC)) {
+ /* Check for mpls ether type and set NFP_FLOWER_MASK_MPLS_Q
+ * bit, which indicates an mpls ether type but without any
+ * mpls fields.
+ */
+ struct flow_dissector_key_basic *key_basic;
+
+ key_basic = skb_flow_dissector_target(flow->dissector,
+ FLOW_DISSECTOR_KEY_BASIC,
+ flow->key);
+ if (key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_UC) ||
+ key_basic->n_proto == cpu_to_be16(ETH_P_MPLS_MC))
+ frame->mpls_lse = cpu_to_be32(NFP_FLOWER_MASK_MPLS_Q);
}
}
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c
index c42e64f32333..525057bee0ed 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/offload.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c
@@ -264,6 +264,14 @@ nfp_flower_calculate_key_layers(struct nfp_app *app,
case cpu_to_be16(ETH_P_ARP):
return -EOPNOTSUPP;
+ case cpu_to_be16(ETH_P_MPLS_UC):
+ case cpu_to_be16(ETH_P_MPLS_MC):
+ if (!(key_layer & NFP_FLOWER_LAYER_MAC)) {
+ key_layer |= NFP_FLOWER_LAYER_MAC;
+ key_size += sizeof(struct nfp_flower_mac_mpls);
+ }
+ break;
+
/* Will be included in layer 2. */
case cpu_to_be16(ETH_P_8021Q):
break;
@@ -623,6 +631,9 @@ static int nfp_flower_setup_tc_block(struct net_device *netdev,
if (f->binder_type != TCF_BLOCK_BINDER_TYPE_CLSACT_INGRESS)
return -EOPNOTSUPP;
+ if (tcf_block_shared(f->block))
+ return -EOPNOTSUPP;
+
switch (f->command) {
case TC_BLOCK_BIND:
return tcf_block_cb_register(f->block,
diff --git a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
index 78afe75129ab..382bb93cb090 100644
--- a/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
+++ b/drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c
@@ -317,7 +317,7 @@ nfp_tun_write_neigh(struct net_device *netdev, struct nfp_app *app,
payload.dst_ipv4 = flow->daddr;
/* If entry has expired send dst IP with all other fields 0. */
- if (!(neigh->nud_state & NUD_VALID)) {
+ if (!(neigh->nud_state & NUD_VALID) || neigh->dead) {
nfp_tun_del_route_from_cache(app, payload.dst_ipv4);
/* Trigger ARP to verify invalid neighbour state. */
neigh_event_send(neigh, NULL);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_main.c b/drivers/net/ethernet/netronome/nfp/nfp_main.c
index 46b76d5a726c..152283d7e59c 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_main.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_main.c
@@ -240,7 +240,6 @@ static int nfp_pcie_sriov_read_nfd_limit(struct nfp_pf *pf)
return pci_sriov_set_totalvfs(pf->pdev, pf->limit_vfs);
pf->limit_vfs = ~0;
- pci_sriov_set_totalvfs(pf->pdev, 0); /* 0 is unset */
/* Allow any setting for backwards compatibility if symbol not found */
if (err == -ENOENT)
return 0;
@@ -668,7 +667,7 @@ static int nfp_pci_probe(struct pci_dev *pdev,
err = nfp_net_pci_probe(pf);
if (err)
- goto err_sriov_unlimit;
+ goto err_fw_unload;
err = nfp_hwmon_register(pf);
if (err) {
@@ -680,8 +679,6 @@ static int nfp_pci_probe(struct pci_dev *pdev,
err_net_remove:
nfp_net_pci_remove(pf);
-err_sriov_unlimit:
- pci_sriov_set_totalvfs(pf->pdev, 0);
err_fw_unload:
kfree(pf->rtbl);
nfp_mip_close(pf->mip);
@@ -715,7 +712,6 @@ static void nfp_pci_remove(struct pci_dev *pdev)
nfp_hwmon_unregister(pf);
nfp_pcie_sriov_disable(pdev);
- pci_sriov_set_totalvfs(pf->pdev, 0);
nfp_net_pci_remove(pf);
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
index cd34097b79f1..37a6d7822a38 100644
--- a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
+++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c
@@ -232,7 +232,7 @@ struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp)
err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res),
nfp_resource_address(state->res),
fwinf, sizeof(*fwinf));
- if (err < sizeof(*fwinf))
+ if (err < (int)sizeof(*fwinf))
goto err_release;
if (!nffw_res_flg_init_get(fwinf))
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index 00db3401b898..1dfaccd151f0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -502,6 +502,7 @@ enum BAR_ID {
struct qed_nvm_image_info {
u32 num_images;
struct bist_nvm_image_att *image_att;
+ bool valid;
};
#define DRV_MODULE_VERSION \
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
index 8f31406ec894..e0680ce91328 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dcbx.c
@@ -255,9 +255,8 @@ qed_dcbx_get_app_protocol_type(struct qed_hwfn *p_hwfn,
*type = DCBX_PROTOCOL_ROCE_V2;
} else {
*type = DCBX_MAX_PROTOCOL_TYPE;
- DP_ERR(p_hwfn,
- "No action required, App TLV id = 0x%x app_prio_bitmap = 0x%x\n",
- id, app_prio_bitmap);
+ DP_ERR(p_hwfn, "No action required, App TLV entry = 0x%x\n",
+ app_prio_bitmap);
return false;
}
@@ -710,9 +709,9 @@ qed_dcbx_get_local_lldp_params(struct qed_hwfn *p_hwfn,
p_local = &p_hwfn->p_dcbx_info->lldp_local[LLDP_NEAREST_BRIDGE];
memcpy(params->lldp_local.local_chassis_id, p_local->local_chassis_id,
- ARRAY_SIZE(p_local->local_chassis_id));
+ sizeof(p_local->local_chassis_id));
memcpy(params->lldp_local.local_port_id, p_local->local_port_id,
- ARRAY_SIZE(p_local->local_port_id));
+ sizeof(p_local->local_port_id));
}
static void
@@ -724,9 +723,9 @@ qed_dcbx_get_remote_lldp_params(struct qed_hwfn *p_hwfn,
p_remote = &p_hwfn->p_dcbx_info->lldp_remote[LLDP_NEAREST_BRIDGE];
memcpy(params->lldp_remote.peer_chassis_id, p_remote->peer_chassis_id,
- ARRAY_SIZE(p_remote->peer_chassis_id));
+ sizeof(p_remote->peer_chassis_id));
memcpy(params->lldp_remote.peer_port_id, p_remote->peer_port_id,
- ARRAY_SIZE(p_remote->peer_port_id));
+ sizeof(p_remote->peer_port_id));
}
static int
@@ -1479,8 +1478,8 @@ static u8 qed_dcbnl_getcap(struct qed_dev *cdev, int capid, u8 *cap)
*cap = 0x80;
break;
case DCB_CAP_ATTR_DCBX:
- *cap = (DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE |
- DCB_CAP_DCBX_VER_IEEE | DCB_CAP_DCBX_STATIC);
+ *cap = (DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_VER_IEEE |
+ DCB_CAP_DCBX_STATIC);
break;
default:
*cap = false;
@@ -1548,8 +1547,6 @@ static u8 qed_dcbnl_getdcbx(struct qed_dev *cdev)
if (!dcbx_info)
return 0;
- if (dcbx_info->operational.enabled)
- mode |= DCB_CAP_DCBX_LLD_MANAGED;
if (dcbx_info->operational.ieee)
mode |= DCB_CAP_DCBX_VER_IEEE;
if (dcbx_info->operational.cee)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_debug.c b/drivers/net/ethernet/qlogic/qed/qed_debug.c
index a14e48489029..4340c4c90bcb 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_debug.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_debug.c
@@ -6723,7 +6723,7 @@ static enum dbg_status qed_parse_mcp_trace_buf(u8 *trace_buf,
format_idx = header & MFW_TRACE_EVENTID_MASK;
/* Skip message if its index doesn't exist in the meta data */
- if (format_idx > s_mcp_trace_meta.formats_num) {
+ if (format_idx >= s_mcp_trace_meta.formats_num) {
u8 format_size =
(u8)((header & MFW_TRACE_PRM_SIZE_MASK) >>
MFW_TRACE_PRM_SIZE_SHIFT);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 329781cda77f..e5249b4741d0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -1804,7 +1804,7 @@ int qed_hw_init(struct qed_dev *cdev, struct qed_hw_init_params *p_params)
DP_INFO(p_hwfn, "Failed to update driver state\n");
rc = qed_mcp_ov_update_eswitch(p_hwfn, p_hwfn->p_main_ptt,
- QED_OV_ESWITCH_VEB);
+ QED_OV_ESWITCH_NONE);
if (rc)
DP_INFO(p_hwfn, "Failed to update eswitch mode\n");
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 99973e10b179..5ede6408649d 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -665,7 +665,7 @@ qed_sp_update_mcast_bin(struct qed_hwfn *p_hwfn,
p_ramrod->common.update_approx_mcast_flg = 1;
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
- u32 *p_bins = (u32 *)p_params->bins;
+ u32 *p_bins = p_params->bins;
p_ramrod->approx_mcast.bins[i] = cpu_to_le32(p_bins[i]);
}
@@ -1476,8 +1476,8 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_data)
{
- unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct vport_update_ramrod_data *p_ramrod = NULL;
+ u32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS];
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
u8 abs_vport_id = 0;
@@ -1513,26 +1513,25 @@ qed_sp_eth_filter_mcast(struct qed_hwfn *p_hwfn,
/* explicitly clear out the entire vector */
memset(&p_ramrod->approx_mcast.bins, 0,
sizeof(p_ramrod->approx_mcast.bins));
- memset(bins, 0, sizeof(unsigned long) *
- ETH_MULTICAST_MAC_BINS_IN_REGS);
+ memset(bins, 0, sizeof(bins));
/* filter ADD op is explicit set op and it removes
* any existing filters for the vport
*/
if (p_filter_cmd->opcode == QED_FILTER_ADD) {
for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) {
- u32 bit;
+ u32 bit, nbits;
bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
- __set_bit(bit, bins);
+ nbits = sizeof(u32) * BITS_PER_BYTE;
+ bins[bit / nbits] |= 1 << (bit % nbits);
}
/* Convert to correct endianity */
for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) {
struct vport_update_ramrod_mcast *p_ramrod_bins;
- u32 *p_bins = (u32 *)bins;
p_ramrod_bins = &p_ramrod->approx_mcast;
- p_ramrod_bins->bins[i] = cpu_to_le32(p_bins[i]);
+ p_ramrod_bins->bins[i] = cpu_to_le32(bins[i]);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 806a8da257e9..8d80f1095d17 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -215,7 +215,7 @@ struct qed_sp_vport_update_params {
u8 anti_spoofing_en;
u8 update_accept_any_vlan_flg;
u8 accept_any_vlan;
- unsigned long bins[8];
+ u32 bins[8];
struct qed_rss_params *rss_params;
struct qed_filter_accept_flags accept_flags;
struct qed_sge_tpa_params *sge_tpa_params;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index c97ebd681c47..012973d75ad0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -201,8 +201,9 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
skb = build_skb(buffer->data, 0);
if (!skb) {
- rc = -ENOMEM;
- goto out_post;
+ DP_INFO(cdev, "Failed to build SKB\n");
+ kfree(buffer->data);
+ goto out_post1;
}
data->u.placement_offset += NET_SKB_PAD;
@@ -224,8 +225,14 @@ void qed_ll2b_complete_rx_packet(void *cxt, struct qed_ll2_comp_rx_data *data)
cdev->ll2->cbs->rx_cb(cdev->ll2->cb_cookie, skb,
data->opaque_data_0,
data->opaque_data_1);
+ } else {
+ DP_VERBOSE(p_hwfn, (NETIF_MSG_RX_STATUS | NETIF_MSG_PKTDATA |
+ QED_MSG_LL2 | QED_MSG_STORAGE),
+ "Dropping the packet\n");
+ kfree(buffer->data);
}
+out_post1:
/* Update Buffer information and update FW producer */
buffer->data = new_data;
buffer->phys_addr = new_phys_addr;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index b04d57ca5176..758a9a5127fa 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -371,7 +371,7 @@ static struct qed_dev *qed_probe(struct pci_dev *pdev,
goto err2;
}
- DP_INFO(cdev, "qed_probe completed successffuly\n");
+ DP_INFO(cdev, "qed_probe completed successfully\n");
return cdev;
@@ -567,8 +567,16 @@ static irqreturn_t qed_single_int(int irq, void *dev_instance)
/* Fastpath interrupts */
for (j = 0; j < 64; j++) {
if ((0x2ULL << j) & status) {
- hwfn->simd_proto_handler[j].func(
- hwfn->simd_proto_handler[j].token);
+ struct qed_simd_fp_handler *p_handler =
+ &hwfn->simd_proto_handler[j];
+
+ if (p_handler->func)
+ p_handler->func(p_handler->token);
+ else
+ DP_NOTICE(hwfn,
+ "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
+ j, status);
+
status &= ~(0x2ULL << j);
rc = IRQ_HANDLED;
}
@@ -781,6 +789,14 @@ static int qed_slowpath_setup_int(struct qed_dev *cdev,
/* We want a minimum of one slowpath and one fastpath vector per hwfn */
cdev->int_params.in.min_msix_cnt = cdev->num_hwfns * 2;
+ if (is_kdump_kernel()) {
+ DP_INFO(cdev,
+ "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
+ cdev->int_params.in.min_msix_cnt);
+ cdev->int_params.in.num_vectors =
+ cdev->int_params.in.min_msix_cnt;
+ }
+
rc = qed_set_int_mode(cdev, false);
if (rc) {
DP_ERR(cdev, "qed_slowpath_setup_int ERR\n");
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index 4e0b443c9519..cdd645024a32 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -592,6 +592,9 @@ int qed_mcp_nvm_wr_cmd(struct qed_hwfn *p_hwfn,
*o_mcp_resp = mb_params.mcp_resp;
*o_mcp_param = mb_params.mcp_param;
+ /* nvm_info needs to be updated */
+ p_hwfn->nvm_info.valid = false;
+
return 0;
}
@@ -1208,6 +1211,7 @@ static void qed_mcp_handle_link_change(struct qed_hwfn *p_hwfn,
break;
default:
p_link->speed = 0;
+ p_link->link_up = 0;
}
if (p_link->link_up && p_link->speed)
@@ -1305,9 +1309,15 @@ int qed_mcp_set_link(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, bool b_up)
phy_cfg.pause |= (params->pause.forced_tx) ? ETH_PAUSE_TX : 0;
phy_cfg.adv_speed = params->speed.advertised_speeds;
phy_cfg.loopback_mode = params->loopback_mode;
- if (p_hwfn->mcp_info->capabilities & FW_MB_PARAM_FEATURE_SUPPORT_EEE) {
- if (params->eee.enable)
- phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
+
+ /* There are MFWs that share this capability regardless of whether
+ * this is feasible or not. And given that at the very least adv_caps
+ * would be set internally by qed, we want to make sure LFA would
+ * still work.
+ */
+ if ((p_hwfn->mcp_info->capabilities &
+ FW_MB_PARAM_FEATURE_SUPPORT_EEE) && params->eee.enable) {
+ phy_cfg.eee_cfg |= EEE_CFG_EEE_ENABLED;
if (params->eee.tx_lpi_enable)
phy_cfg.eee_cfg |= EEE_CFG_TX_LPI;
if (params->eee.adv_caps & QED_EEE_1G_ADV)
@@ -2555,11 +2565,14 @@ int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn *p_hwfn,
int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
{
- struct qed_nvm_image_info *nvm_info = &p_hwfn->nvm_info;
+ struct qed_nvm_image_info nvm_info;
struct qed_ptt *p_ptt;
int rc;
u32 i;
+ if (p_hwfn->nvm_info.valid)
+ return 0;
+
p_ptt = qed_ptt_acquire(p_hwfn);
if (!p_ptt) {
DP_ERR(p_hwfn, "failed to acquire ptt\n");
@@ -2567,29 +2580,29 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
}
/* Acquire from MFW the amount of available images */
- nvm_info->num_images = 0;
+ nvm_info.num_images = 0;
rc = qed_mcp_bist_nvm_get_num_images(p_hwfn,
- p_ptt, &nvm_info->num_images);
+ p_ptt, &nvm_info.num_images);
if (rc == -EOPNOTSUPP) {
DP_INFO(p_hwfn, "DRV_MSG_CODE_BIST_TEST is not supported\n");
goto out;
- } else if (rc || !nvm_info->num_images) {
+ } else if (rc || !nvm_info.num_images) {
DP_ERR(p_hwfn, "Failed getting number of images\n");
goto err0;
}
- nvm_info->image_att = kmalloc_array(nvm_info->num_images,
- sizeof(struct bist_nvm_image_att),
- GFP_KERNEL);
- if (!nvm_info->image_att) {
+ nvm_info.image_att = kmalloc_array(nvm_info.num_images,
+ sizeof(struct bist_nvm_image_att),
+ GFP_KERNEL);
+ if (!nvm_info.image_att) {
rc = -ENOMEM;
goto err0;
}
/* Iterate over images and get their attributes */
- for (i = 0; i < nvm_info->num_images; i++) {
+ for (i = 0; i < nvm_info.num_images; i++) {
rc = qed_mcp_bist_nvm_get_image_att(p_hwfn, p_ptt,
- &nvm_info->image_att[i], i);
+ &nvm_info.image_att[i], i);
if (rc) {
DP_ERR(p_hwfn,
"Failed getting image index %d attributes\n", i);
@@ -2597,14 +2610,22 @@ int qed_mcp_nvm_info_populate(struct qed_hwfn *p_hwfn)
}
DP_VERBOSE(p_hwfn, QED_MSG_SP, "image index %d, size %x\n", i,
- nvm_info->image_att[i].len);
+ nvm_info.image_att[i].len);
}
out:
+ /* Update hwfn's nvm_info */
+ if (nvm_info.num_images) {
+ p_hwfn->nvm_info.num_images = nvm_info.num_images;
+ kfree(p_hwfn->nvm_info.image_att);
+ p_hwfn->nvm_info.image_att = nvm_info.image_att;
+ p_hwfn->nvm_info.valid = true;
+ }
+
qed_ptt_release(p_hwfn, p_ptt);
return 0;
err1:
- kfree(nvm_info->image_att);
+ kfree(nvm_info.image_att);
err0:
qed_ptt_release(p_hwfn, p_ptt);
return rc;
@@ -2641,6 +2662,7 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
+ qed_mcp_nvm_info_populate(p_hwfn);
for (i = 0; i < p_hwfn->nvm_info.num_images; i++)
if (type == p_hwfn->nvm_info.image_att[i].image_type)
break;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index f01bf52bc381..26e918d7f2f9 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -2831,7 +2831,7 @@ qed_iov_vp_update_mcast_bin_param(struct qed_hwfn *p_hwfn,
p_data->update_approx_mcast_flg = 1;
memcpy(p_data->bins, p_mcast_tlv->bins,
- sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
*tlvs_mask |= 1 << QED_IOV_VP_UPDATE_MCAST;
}
@@ -4513,6 +4513,8 @@ static void qed_sriov_enable_qid_config(struct qed_hwfn *hwfn,
static int qed_sriov_enable(struct qed_dev *cdev, int num)
{
struct qed_iov_vf_init_params params;
+ struct qed_hwfn *hwfn;
+ struct qed_ptt *ptt;
int i, j, rc;
if (num >= RESC_NUM(&cdev->hwfns[0], QED_VPORT)) {
@@ -4525,8 +4527,8 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
/* Initialize HW for VF access */
for_each_hwfn(cdev, j) {
- struct qed_hwfn *hwfn = &cdev->hwfns[j];
- struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+ hwfn = &cdev->hwfns[j];
+ ptt = qed_ptt_acquire(hwfn);
/* Make sure not to use more than 16 queues per VF */
params.num_queues = min_t(int,
@@ -4562,6 +4564,19 @@ static int qed_sriov_enable(struct qed_dev *cdev, int num)
goto err;
}
+ hwfn = QED_LEADING_HWFN(cdev);
+ ptt = qed_ptt_acquire(hwfn);
+ if (!ptt) {
+ DP_ERR(hwfn, "Failed to acquire ptt\n");
+ rc = -EBUSY;
+ goto err;
+ }
+
+ rc = qed_mcp_ov_update_eswitch(hwfn, ptt, QED_OV_ESWITCH_VEB);
+ if (rc)
+ DP_INFO(cdev, "Failed to update eswitch mode\n");
+ qed_ptt_release(hwfn, ptt);
+
return num;
err:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index 2d7fcd6a0777..be6ddde1a104 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -1126,7 +1126,7 @@ int qed_vf_pf_vport_update(struct qed_hwfn *p_hwfn,
resp_size += sizeof(struct pfvf_def_resp_tlv);
memcpy(p_mcast_tlv->bins, p_params->bins,
- sizeof(unsigned long) * ETH_MULTICAST_MAC_BINS_IN_REGS);
+ sizeof(u32) * ETH_MULTICAST_MAC_BINS_IN_REGS);
}
update_rx = p_params->accept_flags.update_rx_mode_config;
@@ -1272,7 +1272,7 @@ void qed_vf_pf_filter_mcast(struct qed_hwfn *p_hwfn,
u32 bit;
bit = qed_mcast_bin_from_mac(p_filter_cmd->mac[i]);
- __set_bit(bit, sp_params.bins);
+ sp_params.bins[bit / 32] |= 1 << (bit % 32);
}
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.h b/drivers/net/ethernet/qlogic/qed/qed_vf.h
index 4f05d5eb3cf5..033409db86ae 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.h
@@ -392,7 +392,12 @@ struct vfpf_vport_update_mcast_bin_tlv {
struct channel_tlv tl;
u8 padding[4];
- u64 bins[8];
+ /* There are only 256 approx bins, and in HSI they're divided into
+ * 32-bit values. As old VFs used to set-bit to the values on its side,
+ * the upper half of the array is never expected to contain any data.
+ */
+ u64 bins[4];
+ u64 obsolete_bins[4];
};
struct vfpf_vport_update_accept_param_tlv {
diff --git a/drivers/net/ethernet/qlogic/qede/qede_ptp.c b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
index 02adb513f475..013ff567283c 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_ptp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_ptp.c
@@ -337,8 +337,14 @@ int qede_ptp_get_ts_info(struct qede_dev *edev, struct ethtool_ts_info *info)
{
struct qede_ptp *ptp = edev->ptp;
- if (!ptp)
- return -EIO;
+ if (!ptp) {
+ info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+ SOF_TIMESTAMPING_RX_SOFTWARE |
+ SOF_TIMESTAMPING_SOFTWARE;
+ info->phc_index = -1;
+
+ return 0;
+ }
info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
SOF_TIMESTAMPING_RX_SOFTWARE |
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
index 891f03a7a33d..8d7b9bb910f2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sysfs.c
@@ -1128,6 +1128,8 @@ static ssize_t qlcnic_83xx_sysfs_flash_write_handler(struct file *filp,
struct qlcnic_adapter *adapter = dev_get_drvdata(dev);
ret = kstrtoul(buf, 16, &data);
+ if (ret)
+ return ret;
switch (data) {
case QLC_83XX_FLASH_SECTOR_ERASE_CMD:
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c
index 5803cd6db406..206f0266463e 100644
--- a/drivers/net/ethernet/qualcomm/qca_spi.c
+++ b/drivers/net/ethernet/qualcomm/qca_spi.c
@@ -658,7 +658,7 @@ qcaspi_netdev_open(struct net_device *dev)
return ret;
}
- netif_start_queue(qca->net_dev);
+ /* SPI thread takes care of TX queue */
return 0;
}
@@ -760,6 +760,9 @@ qcaspi_netdev_tx_timeout(struct net_device *dev)
qca->net_dev->stats.tx_errors++;
/* Trigger tx queue flush and QCA7000 reset */
qca->sync = QCASPI_SYNC_UNKNOWN;
+
+ if (qca->spi_thread)
+ wake_up_process(qca->spi_thread);
}
static int
@@ -878,22 +881,22 @@ qca_spi_probe(struct spi_device *spi)
if ((qcaspi_clkspeed < QCASPI_CLK_SPEED_MIN) ||
(qcaspi_clkspeed > QCASPI_CLK_SPEED_MAX)) {
- dev_info(&spi->dev, "Invalid clkspeed: %d\n",
- qcaspi_clkspeed);
+ dev_err(&spi->dev, "Invalid clkspeed: %d\n",
+ qcaspi_clkspeed);
return -EINVAL;
}
if ((qcaspi_burst_len < QCASPI_BURST_LEN_MIN) ||
(qcaspi_burst_len > QCASPI_BURST_LEN_MAX)) {
- dev_info(&spi->dev, "Invalid burst len: %d\n",
- qcaspi_burst_len);
+ dev_err(&spi->dev, "Invalid burst len: %d\n",
+ qcaspi_burst_len);
return -EINVAL;
}
if ((qcaspi_pluggable < QCASPI_PLUGGABLE_MIN) ||
(qcaspi_pluggable > QCASPI_PLUGGABLE_MAX)) {
- dev_info(&spi->dev, "Invalid pluggable: %d\n",
- qcaspi_pluggable);
+ dev_err(&spi->dev, "Invalid pluggable: %d\n",
+ qcaspi_pluggable);
return -EINVAL;
}
@@ -955,8 +958,8 @@ qca_spi_probe(struct spi_device *spi)
}
if (register_netdev(qcaspi_devs)) {
- dev_info(&spi->dev, "Unable to register net device %s\n",
- qcaspi_devs->name);
+ dev_err(&spi->dev, "Unable to register net device %s\n",
+ qcaspi_devs->name);
free_netdev(qcaspi_devs);
return -EFAULT;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 75dfac0248f4..eaedc11ed686 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7148,7 +7148,7 @@ static void rtl8169_netpoll(struct net_device *dev)
{
struct rtl8169_private *tp = netdev_priv(dev);
- rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), dev);
+ rtl8169_interrupt(pci_irq_vector(tp->pci_dev, 0), tp);
}
#endif
@@ -7734,8 +7734,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
return rc;
}
- /* override BIOS settings, use userspace tools to enable WOL */
- __rtl8169_set_wol(tp, 0);
+ tp->saved_wolopts = __rtl8169_get_wol(tp);
if (rtl_tbi_enabled(tp)) {
tp->set_speed = rtl8169_set_speed_tbi;
@@ -7789,6 +7788,7 @@ static int rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HIGHDMA;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
tp->cp_cmd |= RxChkSum | RxVlan;
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig
index 27be51f0a421..f3f7477043ce 100644
--- a/drivers/net/ethernet/renesas/Kconfig
+++ b/drivers/net/ethernet/renesas/Kconfig
@@ -17,7 +17,6 @@ if NET_VENDOR_RENESAS
config SH_ETH
tristate "Renesas SuperH Ethernet support"
- depends on HAS_DMA
depends on ARCH_RENESAS || SUPERH || COMPILE_TEST
select CRC32
select MII
@@ -31,7 +30,6 @@ config SH_ETH
config RAVB
tristate "Renesas Ethernet AVB support"
- depends on HAS_DMA
depends on ARCH_RENESAS || COMPILE_TEST
select CRC32
select MII
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 68f122140966..0d811c02ff34 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -980,6 +980,13 @@ static void ravb_adjust_link(struct net_device *ndev)
struct ravb_private *priv = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
bool new_state = false;
+ unsigned long flags;
+
+ spin_lock_irqsave(&priv->lock, flags);
+
+ /* Disable TX and RX right over here, if E-MAC change is ignored */
+ if (priv->no_avb_link)
+ ravb_rcv_snd_disable(ndev);
if (phydev->link) {
if (phydev->duplex != priv->duplex) {
@@ -997,18 +1004,21 @@ static void ravb_adjust_link(struct net_device *ndev)
ravb_modify(ndev, ECMR, ECMR_TXF, 0);
new_state = true;
priv->link = phydev->link;
- if (priv->no_avb_link)
- ravb_rcv_snd_enable(ndev);
}
} else if (priv->link) {
new_state = true;
priv->link = 0;
priv->speed = 0;
priv->duplex = -1;
- if (priv->no_avb_link)
- ravb_rcv_snd_disable(ndev);
}
+ /* Enable TX and RX right over here, if E-MAC change is ignored */
+ if (priv->no_avb_link && phydev->link)
+ ravb_rcv_snd_enable(ndev);
+
+ mmiowb();
+ spin_unlock_irqrestore(&priv->lock, flags);
+
if (new_state && netif_msg_link(priv))
phy_print_status(phydev);
}
@@ -1096,75 +1106,6 @@ static int ravb_phy_start(struct net_device *ndev)
return 0;
}
-static int ravb_get_link_ksettings(struct net_device *ndev,
- struct ethtool_link_ksettings *cmd)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- unsigned long flags;
-
- if (!ndev->phydev)
- return -ENODEV;
-
- spin_lock_irqsave(&priv->lock, flags);
- phy_ethtool_ksettings_get(ndev->phydev, cmd);
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return 0;
-}
-
-static int ravb_set_link_ksettings(struct net_device *ndev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- unsigned long flags;
- int error;
-
- if (!ndev->phydev)
- return -ENODEV;
-
- spin_lock_irqsave(&priv->lock, flags);
-
- /* Disable TX and RX */
- ravb_rcv_snd_disable(ndev);
-
- error = phy_ethtool_ksettings_set(ndev->phydev, cmd);
- if (error)
- goto error_exit;
-
- if (cmd->base.duplex == DUPLEX_FULL)
- priv->duplex = 1;
- else
- priv->duplex = 0;
-
- ravb_set_duplex(ndev);
-
-error_exit:
- mdelay(1);
-
- /* Enable TX and RX */
- ravb_rcv_snd_enable(ndev);
-
- mmiowb();
- spin_unlock_irqrestore(&priv->lock, flags);
-
- return error;
-}
-
-static int ravb_nway_reset(struct net_device *ndev)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- int error = -ENODEV;
- unsigned long flags;
-
- if (ndev->phydev) {
- spin_lock_irqsave(&priv->lock, flags);
- error = phy_start_aneg(ndev->phydev);
- spin_unlock_irqrestore(&priv->lock, flags);
- }
-
- return error;
-}
-
static u32 ravb_get_msglevel(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -1377,7 +1318,7 @@ static int ravb_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
}
static const struct ethtool_ops ravb_ethtool_ops = {
- .nway_reset = ravb_nway_reset,
+ .nway_reset = phy_ethtool_nway_reset,
.get_msglevel = ravb_get_msglevel,
.set_msglevel = ravb_set_msglevel,
.get_link = ethtool_op_get_link,
@@ -1387,8 +1328,8 @@ static const struct ethtool_ops ravb_ethtool_ops = {
.get_ringparam = ravb_get_ringparam,
.set_ringparam = ravb_set_ringparam,
.get_ts_info = ravb_get_ts_info,
- .get_link_ksettings = ravb_get_link_ksettings,
- .set_link_ksettings = ravb_set_link_ksettings,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_wol = ravb_get_wol,
.set_wol = ravb_set_wol,
};
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index e9007b613f17..5614fd231bbe 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1927,8 +1927,15 @@ static void sh_eth_adjust_link(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
struct phy_device *phydev = ndev->phydev;
+ unsigned long flags;
int new_state = 0;
+ spin_lock_irqsave(&mdp->lock, flags);
+
+ /* Disable TX and RX right over here, if E-MAC change is ignored */
+ if (mdp->cd->no_psr || mdp->no_ether_link)
+ sh_eth_rcv_snd_disable(ndev);
+
if (phydev->link) {
if (phydev->duplex != mdp->duplex) {
new_state = 1;
@@ -1947,18 +1954,21 @@ static void sh_eth_adjust_link(struct net_device *ndev)
sh_eth_modify(ndev, ECMR, ECMR_TXF, 0);
new_state = 1;
mdp->link = phydev->link;
- if (mdp->cd->no_psr || mdp->no_ether_link)
- sh_eth_rcv_snd_enable(ndev);
}
} else if (mdp->link) {
new_state = 1;
mdp->link = 0;
mdp->speed = 0;
mdp->duplex = -1;
- if (mdp->cd->no_psr || mdp->no_ether_link)
- sh_eth_rcv_snd_disable(ndev);
}
+ /* Enable TX and RX right over here, if E-MAC change is ignored */
+ if ((mdp->cd->no_psr || mdp->no_ether_link) && phydev->link)
+ sh_eth_rcv_snd_enable(ndev);
+
+ mmiowb();
+ spin_unlock_irqrestore(&mdp->lock, flags);
+
if (new_state && netif_msg_link(mdp))
phy_print_status(phydev);
}
@@ -2030,60 +2040,6 @@ static int sh_eth_phy_start(struct net_device *ndev)
return 0;
}
-static int sh_eth_get_link_ksettings(struct net_device *ndev,
- struct ethtool_link_ksettings *cmd)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
- unsigned long flags;
-
- if (!ndev->phydev)
- return -ENODEV;
-
- spin_lock_irqsave(&mdp->lock, flags);
- phy_ethtool_ksettings_get(ndev->phydev, cmd);
- spin_unlock_irqrestore(&mdp->lock, flags);
-
- return 0;
-}
-
-static int sh_eth_set_link_ksettings(struct net_device *ndev,
- const struct ethtool_link_ksettings *cmd)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
- unsigned long flags;
- int ret;
-
- if (!ndev->phydev)
- return -ENODEV;
-
- spin_lock_irqsave(&mdp->lock, flags);
-
- /* disable tx and rx */
- sh_eth_rcv_snd_disable(ndev);
-
- ret = phy_ethtool_ksettings_set(ndev->phydev, cmd);
- if (ret)
- goto error_exit;
-
- if (cmd->base.duplex == DUPLEX_FULL)
- mdp->duplex = 1;
- else
- mdp->duplex = 0;
-
- if (mdp->cd->set_duplex)
- mdp->cd->set_duplex(ndev);
-
-error_exit:
- mdelay(1);
-
- /* enable tx and rx */
- sh_eth_rcv_snd_enable(ndev);
-
- spin_unlock_irqrestore(&mdp->lock, flags);
-
- return ret;
-}
-
/* If it is ever necessary to increase SH_ETH_REG_DUMP_MAX_REGS, the
* version must be bumped as well. Just adding registers up to that
* limit is fine, as long as the existing register indices don't
@@ -2263,22 +2219,6 @@ static void sh_eth_get_regs(struct net_device *ndev, struct ethtool_regs *regs,
pm_runtime_put_sync(&mdp->pdev->dev);
}
-static int sh_eth_nway_reset(struct net_device *ndev)
-{
- struct sh_eth_private *mdp = netdev_priv(ndev);
- unsigned long flags;
- int ret;
-
- if (!ndev->phydev)
- return -ENODEV;
-
- spin_lock_irqsave(&mdp->lock, flags);
- ret = phy_start_aneg(ndev->phydev);
- spin_unlock_irqrestore(&mdp->lock, flags);
-
- return ret;
-}
-
static u32 sh_eth_get_msglevel(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
@@ -2429,7 +2369,7 @@ static int sh_eth_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_regs_len = sh_eth_get_regs_len,
.get_regs = sh_eth_get_regs,
- .nway_reset = sh_eth_nway_reset,
+ .nway_reset = phy_ethtool_nway_reset,
.get_msglevel = sh_eth_get_msglevel,
.set_msglevel = sh_eth_set_msglevel,
.get_link = ethtool_op_get_link,
@@ -2438,8 +2378,8 @@ static const struct ethtool_ops sh_eth_ethtool_ops = {
.get_sset_count = sh_eth_get_sset_count,
.get_ringparam = sh_eth_get_ringparam,
.set_ringparam = sh_eth_set_ringparam,
- .get_link_ksettings = sh_eth_get_link_ksettings,
- .set_link_ksettings = sh_eth_set_link_ksettings,
+ .get_link_ksettings = phy_ethtool_get_link_ksettings,
+ .set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_wol = sh_eth_get_wol,
.set_wol = sh_eth_set_wol,
};
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 23f0785c0573..7eeac3d6cfe8 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -4288,9 +4288,9 @@ static int efx_ef10_filter_pri(struct efx_ef10_filter_table *table,
return -EPROTONOSUPPORT;
}
-static s32 efx_ef10_filter_insert(struct efx_nic *efx,
- struct efx_filter_spec *spec,
- bool replace_equal)
+static s32 efx_ef10_filter_insert_locked(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace_equal)
{
DECLARE_BITMAP(mc_rem_map, EFX_EF10_FILTER_SEARCH_LIMIT);
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -4307,7 +4307,7 @@ static s32 efx_ef10_filter_insert(struct efx_nic *efx,
bool is_mc_recip;
s32 rc;
- down_read(&efx->filter_sem);
+ WARN_ON(!rwsem_is_locked(&efx->filter_sem));
table = efx->filter_state;
down_write(&table->lock);
@@ -4498,10 +4498,22 @@ out_unlock:
if (rss_locked)
mutex_unlock(&efx->rss_lock);
up_write(&table->lock);
- up_read(&efx->filter_sem);
return rc;
}
+static s32 efx_ef10_filter_insert(struct efx_nic *efx,
+ struct efx_filter_spec *spec,
+ bool replace_equal)
+{
+ s32 ret;
+
+ down_read(&efx->filter_sem);
+ ret = efx_ef10_filter_insert_locked(efx, spec, replace_equal);
+ up_read(&efx->filter_sem);
+
+ return ret;
+}
+
static void efx_ef10_filter_update_rx_scatter(struct efx_nic *efx)
{
/* no need to do anything here on EF10 */
@@ -5285,7 +5297,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
EFX_WARN_ON_PARANOID(ids[i] != EFX_EF10_FILTER_ID_INVALID);
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
efx_filter_set_eth_local(&spec, vlan->vid, addr_list[i].addr);
- rc = efx_ef10_filter_insert(efx, &spec, true);
+ rc = efx_ef10_filter_insert_locked(efx, &spec, true);
if (rc < 0) {
if (rollback) {
netif_info(efx, drv, efx->net_dev,
@@ -5314,7 +5326,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
eth_broadcast_addr(baddr);
efx_filter_set_eth_local(&spec, vlan->vid, baddr);
- rc = efx_ef10_filter_insert(efx, &spec, true);
+ rc = efx_ef10_filter_insert_locked(efx, &spec, true);
if (rc < 0) {
netif_warn(efx, drv, efx->net_dev,
"Broadcast filter insert failed rc=%d\n", rc);
@@ -5370,7 +5382,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
if (vlan->vid != EFX_FILTER_VID_UNSPEC)
efx_filter_set_eth_local(&spec, vlan->vid, NULL);
- rc = efx_ef10_filter_insert(efx, &spec, true);
+ rc = efx_ef10_filter_insert_locked(efx, &spec, true);
if (rc < 0) {
const char *um = multicast ? "Multicast" : "Unicast";
const char *encap_name = "";
@@ -5430,7 +5442,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
filter_flags, 0);
eth_broadcast_addr(baddr);
efx_filter_set_eth_local(&spec, vlan->vid, baddr);
- rc = efx_ef10_filter_insert(efx, &spec, true);
+ rc = efx_ef10_filter_insert_locked(efx, &spec, true);
if (rc < 0) {
netif_warn(efx, drv, efx->net_dev,
"Broadcast filter insert failed rc=%d\n",
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index ad4a354ce570..ce3a177081a8 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -1871,12 +1871,6 @@ static void efx_remove_filters(struct efx_nic *efx)
up_write(&efx->filter_sem);
}
-static void efx_restore_filters(struct efx_nic *efx)
-{
- down_read(&efx->filter_sem);
- efx->type->filter_table_restore(efx);
- up_read(&efx->filter_sem);
-}
/**************************************************************************
*
@@ -2688,6 +2682,7 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
efx_disable_interrupts(efx);
mutex_lock(&efx->mac_lock);
+ down_write(&efx->filter_sem);
mutex_lock(&efx->rss_lock);
if (efx->port_initialized && method != RESET_TYPE_INVISIBLE &&
method != RESET_TYPE_DATAPATH)
@@ -2745,9 +2740,8 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
if (efx->type->rx_restore_rss_contexts)
efx->type->rx_restore_rss_contexts(efx);
mutex_unlock(&efx->rss_lock);
- down_read(&efx->filter_sem);
- efx_restore_filters(efx);
- up_read(&efx->filter_sem);
+ efx->type->filter_table_restore(efx);
+ up_write(&efx->filter_sem);
if (efx->type->sriov_reset)
efx->type->sriov_reset(efx);
@@ -2764,6 +2758,7 @@ fail:
efx->port_initialized = false;
mutex_unlock(&efx->rss_lock);
+ up_write(&efx->filter_sem);
mutex_unlock(&efx->mac_lock);
return rc;
@@ -3180,6 +3175,7 @@ bool efx_rps_check_rule(struct efx_arfs_rule *rule, unsigned int filter_idx,
return true;
}
+static
struct hlist_head *efx_rps_hash_bucket(struct efx_nic *efx,
const struct efx_filter_spec *spec)
{
@@ -3472,7 +3468,9 @@ static int efx_pci_probe_main(struct efx_nic *efx)
efx_init_napi(efx);
+ down_write(&efx->filter_sem);
rc = efx->type->init(efx);
+ up_write(&efx->filter_sem);
if (rc) {
netif_err(efx, probe, efx->net_dev,
"failed to initialise NIC\n");
@@ -3764,7 +3762,9 @@ static int efx_pm_resume(struct device *dev)
rc = efx->type->reset(efx, RESET_TYPE_ALL);
if (rc)
return rc;
+ down_write(&efx->filter_sem);
rc = efx->type->init(efx);
+ up_write(&efx->filter_sem);
if (rc)
return rc;
rc = efx_pm_thaw(dev);
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index 8edf20967c82..e045a5d6b938 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -2794,6 +2794,7 @@ int efx_farch_filter_table_probe(struct efx_nic *efx)
if (!state)
return -ENOMEM;
efx->filter_state = state;
+ init_rwsem(&state->lock);
table = &state->table[EFX_FARCH_FILTER_TABLE_RX_IP];
table->id = EFX_FARCH_FILTER_TABLE_RX_IP;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index cb5b0f58c395..edf20361ea5f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -111,7 +111,7 @@ config DWMAC_ROCKCHIP
config DWMAC_SOCFPGA
tristate "SOCFPGA dwmac support"
default ARCH_SOCFPGA
- depends on OF && (ARCH_SOCFPGA || COMPILE_TEST)
+ depends on OF && (ARCH_SOCFPGA || ARCH_STRATIX10 || COMPILE_TEST)
select MFD_SYSCON
help
Support for ethernet controller on Altera SOCFPGA
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 6e359572b9f0..5b3b06a0a3bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -55,6 +55,7 @@ struct socfpga_dwmac {
struct device *dev;
struct regmap *sys_mgr_base_addr;
struct reset_control *stmmac_rst;
+ struct reset_control *stmmac_ocp_rst;
void __iomem *splitter_base;
bool f2h_ptp_ref_clk;
struct tse_pcs pcs;
@@ -262,8 +263,8 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
val = SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII;
/* Assert reset to the enet controller before changing the phy mode */
- if (dwmac->stmmac_rst)
- reset_control_assert(dwmac->stmmac_rst);
+ reset_control_assert(dwmac->stmmac_ocp_rst);
+ reset_control_assert(dwmac->stmmac_rst);
regmap_read(sys_mgr_base_addr, reg_offset, &ctrl);
ctrl &= ~(SYSMGR_EMACGRP_CTRL_PHYSEL_MASK << reg_shift);
@@ -288,8 +289,8 @@ static int socfpga_dwmac_set_phy_mode(struct socfpga_dwmac *dwmac)
/* Deassert reset for the phy configuration to be sampled by
* the enet controller, and operation to start in requested mode
*/
- if (dwmac->stmmac_rst)
- reset_control_deassert(dwmac->stmmac_rst);
+ reset_control_deassert(dwmac->stmmac_ocp_rst);
+ reset_control_deassert(dwmac->stmmac_rst);
if (phymode == PHY_INTERFACE_MODE_SGMII) {
if (tse_pcs_init(dwmac->pcs.tse_pcs_base, &dwmac->pcs) != 0) {
dev_err(dwmac->dev, "Unable to initialize TSE PCS");
@@ -324,6 +325,15 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
+ dwmac->stmmac_ocp_rst = devm_reset_control_get_optional(dev, "stmmaceth-ocp");
+ if (IS_ERR(dwmac->stmmac_ocp_rst)) {
+ ret = PTR_ERR(dwmac->stmmac_ocp_rst);
+ dev_err(dev, "error getting reset control of ocp %d\n", ret);
+ goto err_remove_config_dt;
+ }
+
+ reset_control_deassert(dwmac->stmmac_ocp_rst);
+
ret = socfpga_dwmac_parse_data(dwmac, dev);
if (ret) {
dev_err(dev, "Unable to parse OF data\n");
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
index 2e6e2a96b4f2..f9a61f90cfbc 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sun8i.c
@@ -37,7 +37,7 @@
* is done in the "stmmac files"
*/
-/* struct emac_variant - Descrive dwmac-sun8i hardware variant
+/* struct emac_variant - Describe dwmac-sun8i hardware variant
* @default_syscon_value: The default value of the EMAC register in syscon
* This value is used for disabling properly EMAC
* and used as a good starting value in case of the
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
index d37f17ca62fe..65bc3556bd8f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.c
@@ -407,6 +407,16 @@ static void dwmac4_enable_tso(void __iomem *ioaddr, bool en, u32 chan)
}
}
+static void dwmac4_set_bfsize(void __iomem *ioaddr, int bfsize, u32 chan)
+{
+ u32 value = readl(ioaddr + DMA_CHAN_RX_CONTROL(chan));
+
+ value &= ~DMA_RBSZ_MASK;
+ value |= (bfsize << DMA_RBSZ_SHIFT) & DMA_RBSZ_MASK;
+
+ writel(value, ioaddr + DMA_CHAN_RX_CONTROL(chan));
+}
+
const struct stmmac_dma_ops dwmac4_dma_ops = {
.reset = dwmac4_dma_reset,
.init = dwmac4_dma_init,
@@ -431,6 +441,7 @@ const struct stmmac_dma_ops dwmac4_dma_ops = {
.set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
.set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
.enable_tso = dwmac4_enable_tso,
+ .set_bfsize = dwmac4_set_bfsize,
};
const struct stmmac_dma_ops dwmac410_dma_ops = {
@@ -457,4 +468,5 @@ const struct stmmac_dma_ops dwmac410_dma_ops = {
.set_rx_tail_ptr = dwmac4_set_rx_tail_ptr,
.set_tx_tail_ptr = dwmac4_set_tx_tail_ptr,
.enable_tso = dwmac4_enable_tso,
+ .set_bfsize = dwmac4_set_bfsize,
};
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
index c63c1fe3f26b..22a4a6dbb1a4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_dma.h
@@ -120,6 +120,8 @@
/* DMA Rx Channel X Control register defines */
#define DMA_CONTROL_SR BIT(0)
+#define DMA_RBSZ_MASK GENMASK(14, 1)
+#define DMA_RBSZ_SHIFT 1
/* Interrupt status per channel */
#define DMA_CHAN_STATUS_REB GENMASK(21, 19)
diff --git a/drivers/net/ethernet/stmicro/stmmac/hwif.h b/drivers/net/ethernet/stmicro/stmmac/hwif.h
index e44e7b26ce82..fe8b536b13f8 100644
--- a/drivers/net/ethernet/stmicro/stmmac/hwif.h
+++ b/drivers/net/ethernet/stmicro/stmmac/hwif.h
@@ -183,6 +183,7 @@ struct stmmac_dma_ops {
void (*set_rx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void (*set_tx_tail_ptr)(void __iomem *ioaddr, u32 tail_ptr, u32 chan);
void (*enable_tso)(void __iomem *ioaddr, bool en, u32 chan);
+ void (*set_bfsize)(void __iomem *ioaddr, int bfsize, u32 chan);
};
#define stmmac_reset(__priv, __args...) \
@@ -235,6 +236,8 @@ struct stmmac_dma_ops {
stmmac_do_void_callback(__priv, dma, set_tx_tail_ptr, __args)
#define stmmac_enable_tso(__priv, __args...) \
stmmac_do_void_callback(__priv, dma, enable_tso, __args)
+#define stmmac_set_dma_bfsize(__priv, __args...) \
+ stmmac_do_void_callback(__priv, dma, set_bfsize, __args)
struct mac_device_info;
struct net_device;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index e79b0d7b388a..60f59abab009 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -928,6 +928,7 @@ static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
static int stmmac_init_phy(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ u32 tx_cnt = priv->plat->tx_queues_to_use;
struct phy_device *phydev;
char phy_id_fmt[MII_BUS_ID_SIZE + 3];
char bus_id[MII_BUS_ID_SIZE];
@@ -969,6 +970,15 @@ static int stmmac_init_phy(struct net_device *dev)
SUPPORTED_1000baseT_Full);
/*
+ * Half-duplex mode not supported with multiqueue
+ * half-duplex can only works with single queue
+ */
+ if (tx_cnt > 1)
+ phydev->supported &= ~(SUPPORTED_1000baseT_Half |
+ SUPPORTED_100baseT_Half |
+ SUPPORTED_10baseT_Half);
+
+ /*
* Broken HW is sometimes missing the pull-up resistor on the
* MDIO line, which results in reads to non-existent devices returning
* 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
@@ -1794,6 +1804,8 @@ static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
rxfifosz, qmode);
+ stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz,
+ chan);
}
for (chan = 0; chan < tx_channels_count; chan++) {
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 6d141f3931eb..72da77b94ecd 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -94,7 +94,6 @@ static int dwmac1000_validate_ucast_entries(int ucast_entries)
/**
* stmmac_axi_setup - parse DT parameters for programming the AXI register
* @pdev: platform device
- * @priv: driver private struct.
* Description:
* if required, from device-tree the AXI internal register can be tuned
* by using platform parameters.
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index 7a16d40a72d1..b9221fc1674d 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -60,8 +60,7 @@
#include <linux/sungem_phy.h>
#include "sungem.h"
-/* Stripping FCS is causing problems, disabled for now */
-#undef STRIP_FCS
+#define STRIP_FCS
#define DEFAULT_MSG (NETIF_MSG_DRV | \
NETIF_MSG_PROBE | \
@@ -435,7 +434,7 @@ static int gem_rxmac_reset(struct gem *gp)
writel(desc_dma & 0xffffffff, gp->regs + RXDMA_DBLOW);
writel(RX_RING_SIZE - 4, gp->regs + RXDMA_KICK);
val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
- ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+ (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
writel(val, gp->regs + RXDMA_CFG);
if (readl(gp->regs + GREG_BIFCFG) & GREG_BIFCFG_M66EN)
writel(((5 & RXDMA_BLANK_IPKTS) |
@@ -760,7 +759,6 @@ static int gem_rx(struct gem *gp, int work_to_do)
struct net_device *dev = gp->dev;
int entry, drops, work_done = 0;
u32 done;
- __sum16 csum;
if (netif_msg_rx_status(gp))
printk(KERN_DEBUG "%s: rx interrupt, done: %d, rx_new: %d\n",
@@ -855,9 +853,13 @@ static int gem_rx(struct gem *gp, int work_to_do)
skb = copy_skb;
}
- csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
- skb->csum = csum_unfold(csum);
- skb->ip_summed = CHECKSUM_COMPLETE;
+ if (likely(dev->features & NETIF_F_RXCSUM)) {
+ __sum16 csum;
+
+ csum = (__force __sum16)htons((status & RXDCTRL_TCPCSUM) ^ 0xffff);
+ skb->csum = csum_unfold(csum);
+ skb->ip_summed = CHECKSUM_COMPLETE;
+ }
skb->protocol = eth_type_trans(skb, gp->dev);
napi_gro_receive(&gp->napi, skb);
@@ -1761,7 +1763,7 @@ static void gem_init_dma(struct gem *gp)
writel(0, gp->regs + TXDMA_KICK);
val = (RXDMA_CFG_BASE | (RX_OFFSET << 10) |
- ((14 / 2) << 13) | RXDMA_CFG_FTHRESH_128);
+ (ETH_HLEN << 13) | RXDMA_CFG_FTHRESH_128);
writel(val, gp->regs + RXDMA_CFG);
writel(desc_dma >> 32, gp->regs + RXDMA_DBHI);
@@ -2985,8 +2987,8 @@ static int gem_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, dev);
/* We can do scatter/gather and HW checksum */
- dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
- dev->features |= dev->hw_features | NETIF_F_RXCSUM;
+ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+ dev->features = dev->hw_features;
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index cdbddf16dd29..4f1267477aa4 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -205,7 +205,7 @@ static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
* devices (e.g. cpsw switches) use plain old memory. Descriptor pools
* abstract out these details
*/
-int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
+static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
{
struct cpdma_params *cpdma_params = &ctlr->params;
struct cpdma_desc_pool *pool;
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 06d7c9e4dcda..f270beebb428 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1385,6 +1385,15 @@ static int emac_devioctl(struct net_device *ndev, struct ifreq *ifrq, int cmd)
return -EOPNOTSUPP;
}
+static int match_first_device(struct device *dev, void *data)
+{
+ if (dev->parent && dev->parent->of_node)
+ return of_device_is_compatible(dev->parent->of_node,
+ "ti,davinci_mdio");
+
+ return !strncmp(dev_name(dev), "davinci_mdio", 12);
+}
+
/**
* emac_dev_open - EMAC device open
* @ndev: The DaVinci EMAC network adapter
@@ -1484,8 +1493,14 @@ static int emac_dev_open(struct net_device *ndev)
/* use the first phy on the bus if pdata did not give us a phy id */
if (!phydev && !priv->phy_id) {
- phy = bus_find_device_by_name(&mdio_bus_type, NULL,
- "davinci_mdio");
+ /* NOTE: we can't use bus_find_device_by_name() here because
+ * the device name is not guaranteed to be 'davinci_mdio'. On
+ * some systems it can be 'davinci_mdio.0' so we need to use
+ * strncmp() against the first part of the string to correctly
+ * match it.
+ */
+ phy = bus_find_device(&mdio_bus_type, NULL, NULL,
+ match_first_device);
if (phy) {
priv->phy_id = dev_name(phy);
if (!priv->phy_id || !*priv->phy_id)
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
index 16c3bfbe1992..757a3b37ae8a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
@@ -218,6 +218,7 @@ issue:
ret = of_mdiobus_register(bus, np1);
if (ret) {
mdiobus_free(bus);
+ lp->mii_bus = NULL;
return ret;
}
return 0;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 750eaa53bf0c..ada33c2d9ac2 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -476,7 +476,7 @@ static struct sk_buff **geneve_gro_receive(struct sock *sk,
out_unlock:
rcu_read_unlock();
out:
- NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_flush_final(skb, pp, flush);
return pp;
}
diff --git a/drivers/net/hamradio/bpqether.c b/drivers/net/hamradio/bpqether.c
index f347fd9c5b28..777fa59f5e0c 100644
--- a/drivers/net/hamradio/bpqether.c
+++ b/drivers/net/hamradio/bpqether.c
@@ -89,10 +89,6 @@
static const char banner[] __initconst = KERN_INFO \
"AX.25: bpqether driver version 004\n";
-static char bcast_addr[6]={0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
-
-static char bpq_eth_addr[6];
-
static int bpq_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
static int bpq_device_event(struct notifier_block *, unsigned long, void *);
@@ -501,8 +497,8 @@ static int bpq_new_device(struct net_device *edev)
bpq->ethdev = edev;
bpq->axdev = ndev;
- memcpy(bpq->dest_addr, bcast_addr, sizeof(bpq_eth_addr));
- memcpy(bpq->acpt_addr, bcast_addr, sizeof(bpq_eth_addr));
+ eth_broadcast_addr(bpq->dest_addr);
+ eth_broadcast_addr(bpq->acpt_addr);
err = register_netdevice(ndev);
if (err)
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 1a924b867b07..4b6e308199d2 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -210,7 +210,7 @@ int netvsc_recv_callback(struct net_device *net,
void netvsc_channel_cb(void *context);
int netvsc_poll(struct napi_struct *napi, int budget);
-void rndis_set_subchannel(struct work_struct *w);
+int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev);
int rndis_filter_open(struct netvsc_device *nvdev);
int rndis_filter_close(struct netvsc_device *nvdev);
struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 5d5bd513847f..31c3d77b4733 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -65,6 +65,41 @@ void netvsc_switch_datapath(struct net_device *ndev, bool vf)
VM_PKT_DATA_INBAND, 0);
}
+/* Worker to setup sub channels on initial setup
+ * Initial hotplug event occurs in softirq context
+ * and can't wait for channels.
+ */
+static void netvsc_subchan_work(struct work_struct *w)
+{
+ struct netvsc_device *nvdev =
+ container_of(w, struct netvsc_device, subchan_work);
+ struct rndis_device *rdev;
+ int i, ret;
+
+ /* Avoid deadlock with device removal already under RTNL */
+ if (!rtnl_trylock()) {
+ schedule_work(w);
+ return;
+ }
+
+ rdev = nvdev->extension;
+ if (rdev) {
+ ret = rndis_set_subchannel(rdev->ndev, nvdev);
+ if (ret == 0) {
+ netif_device_attach(rdev->ndev);
+ } else {
+ /* fallback to only primary channel */
+ for (i = 1; i < nvdev->num_chn; i++)
+ netif_napi_del(&nvdev->chan_table[i].napi);
+
+ nvdev->max_chn = 1;
+ nvdev->num_chn = 1;
+ }
+ }
+
+ rtnl_unlock();
+}
+
static struct netvsc_device *alloc_net_device(void)
{
struct netvsc_device *net_device;
@@ -81,7 +116,7 @@ static struct netvsc_device *alloc_net_device(void)
init_completion(&net_device->channel_init_wait);
init_waitqueue_head(&net_device->subchan_open);
- INIT_WORK(&net_device->subchan_work, rndis_set_subchannel);
+ INIT_WORK(&net_device->subchan_work, netvsc_subchan_work);
return net_device;
}
@@ -1239,6 +1274,7 @@ int netvsc_poll(struct napi_struct *napi, int budget)
struct hv_device *device = netvsc_channel_to_device(channel);
struct net_device *ndev = hv_get_drvdata(device);
int work_done = 0;
+ int ret;
/* If starting a new interval */
if (!nvchan->desc)
@@ -1250,16 +1286,18 @@ int netvsc_poll(struct napi_struct *napi, int budget)
nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
}
- /* If send of pending receive completions suceeded
- * and did not exhaust NAPI budget this time
- * and not doing busy poll
+ /* Send any pending receive completions */
+ ret = send_recv_completions(ndev, net_device, nvchan);
+
+ /* If it did not exhaust NAPI budget this time
+ * and not doing busy poll
* then re-enable host interrupts
- * and reschedule if ring is not empty.
+ * and reschedule if ring is not empty
+ * or sending receive completion failed.
*/
- if (send_recv_completions(ndev, net_device, nvchan) == 0 &&
- work_done < budget &&
+ if (work_done < budget &&
napi_complete_done(napi, work_done) &&
- hv_end_read(&channel->inbound) &&
+ (ret || hv_end_read(&channel->inbound)) &&
napi_schedule_prep(napi)) {
hv_begin_read(&channel->inbound);
__napi_schedule(napi);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index fe2256bf1d13..dd1d6e115145 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -905,8 +905,20 @@ static int netvsc_attach(struct net_device *ndev,
if (IS_ERR(nvdev))
return PTR_ERR(nvdev);
- /* Note: enable and attach happen when sub-channels setup */
+ if (nvdev->num_chn > 1) {
+ ret = rndis_set_subchannel(ndev, nvdev);
+
+ /* if unavailable, just proceed with one queue */
+ if (ret) {
+ nvdev->max_chn = 1;
+ nvdev->num_chn = 1;
+ }
+ }
+
+ /* In any case device is now ready */
+ netif_device_attach(ndev);
+ /* Note: enable and attach happen when sub-channels setup */
netif_carrier_off(ndev);
if (netif_running(ndev)) {
@@ -2089,6 +2101,9 @@ static int netvsc_probe(struct hv_device *dev,
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
+ if (nvdev->num_chn > 1)
+ schedule_work(&nvdev->subchan_work);
+
/* hw_features computed in rndis_netdev_set_hwcaps() */
net->features = net->hw_features |
NETIF_F_HIGHDMA | NETIF_F_SG |
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 5428bb261102..408ece27131c 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -1062,29 +1062,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
* This breaks overlap of processing the host message for the
* new primary channel with the initialization of sub-channels.
*/
-void rndis_set_subchannel(struct work_struct *w)
+int rndis_set_subchannel(struct net_device *ndev, struct netvsc_device *nvdev)
{
- struct netvsc_device *nvdev
- = container_of(w, struct netvsc_device, subchan_work);
struct nvsp_message *init_packet = &nvdev->channel_init_pkt;
- struct net_device_context *ndev_ctx;
- struct rndis_device *rdev;
- struct net_device *ndev;
- struct hv_device *hv_dev;
+ struct net_device_context *ndev_ctx = netdev_priv(ndev);
+ struct hv_device *hv_dev = ndev_ctx->device_ctx;
+ struct rndis_device *rdev = nvdev->extension;
int i, ret;
- if (!rtnl_trylock()) {
- schedule_work(w);
- return;
- }
-
- rdev = nvdev->extension;
- if (!rdev)
- goto unlock; /* device was removed */
-
- ndev = rdev->ndev;
- ndev_ctx = netdev_priv(ndev);
- hv_dev = ndev_ctx->device_ctx;
+ ASSERT_RTNL();
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG5_TYPE_SUBCHANNEL;
@@ -1100,13 +1086,13 @@ void rndis_set_subchannel(struct work_struct *w)
VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
if (ret) {
netdev_err(ndev, "sub channel allocate send failed: %d\n", ret);
- goto failed;
+ return ret;
}
wait_for_completion(&nvdev->channel_init_wait);
if (init_packet->msg.v5_msg.subchn_comp.status != NVSP_STAT_SUCCESS) {
netdev_err(ndev, "sub channel request failed\n");
- goto failed;
+ return -EIO;
}
nvdev->num_chn = 1 +
@@ -1125,21 +1111,7 @@ void rndis_set_subchannel(struct work_struct *w)
for (i = 0; i < VRSS_SEND_TAB_SIZE; i++)
ndev_ctx->tx_table[i] = i % nvdev->num_chn;
- netif_device_attach(ndev);
- rtnl_unlock();
- return;
-
-failed:
- /* fallback to only primary channel */
- for (i = 1; i < nvdev->num_chn; i++)
- netif_napi_del(&nvdev->chan_table[i].napi);
-
- nvdev->max_chn = 1;
- nvdev->num_chn = 1;
-
- netif_device_attach(ndev);
-unlock:
- rtnl_unlock();
+ return 0;
}
static int rndis_netdev_set_hwcaps(struct rndis_device *rndis_device,
@@ -1360,21 +1332,13 @@ struct netvsc_device *rndis_filter_device_add(struct hv_device *dev,
netif_napi_add(net, &net_device->chan_table[i].napi,
netvsc_poll, NAPI_POLL_WEIGHT);
- if (net_device->num_chn > 1)
- schedule_work(&net_device->subchan_work);
+ return net_device;
out:
- /* if unavailable, just proceed with one queue */
- if (ret) {
- net_device->max_chn = 1;
- net_device->num_chn = 1;
- }
-
- /* No sub channels, device is ready */
- if (net_device->num_chn == 1)
- netif_device_attach(net);
-
- return net_device;
+ /* setting up multiple channels failed */
+ net_device->max_chn = 1;
+ net_device->num_chn = 1;
+ return 0;
err_dev_remv:
rndis_filter_device_remove(dev, net_device);
diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c
index 64f1b1e77bc0..23a52b9293f3 100644
--- a/drivers/net/ieee802154/adf7242.c
+++ b/drivers/net/ieee802154/adf7242.c
@@ -275,6 +275,8 @@ struct adf7242_local {
struct spi_message stat_msg;
struct spi_transfer stat_xfer;
struct dentry *debugfs_root;
+ struct delayed_work work;
+ struct workqueue_struct *wqueue;
unsigned long flags;
int tx_stat;
bool promiscuous;
@@ -575,10 +577,26 @@ static int adf7242_cmd_rx(struct adf7242_local *lp)
/* Wait until the ACK is sent */
adf7242_wait_status(lp, RC_STATUS_PHY_RDY, RC_STATUS_MASK, __LINE__);
adf7242_clear_irqstat(lp);
+ mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400));
return adf7242_cmd(lp, CMD_RC_RX);
}
+static void adf7242_rx_cal_work(struct work_struct *work)
+{
+ struct adf7242_local *lp =
+ container_of(work, struct adf7242_local, work.work);
+
+ /* Reissuing RC_RX every 400ms - to adjust for offset
+ * drift in receiver (datasheet page 61, OCL section)
+ */
+
+ if (!test_bit(FLAG_XMIT, &lp->flags)) {
+ adf7242_cmd(lp, CMD_RC_PHY_RDY);
+ adf7242_cmd_rx(lp);
+ }
+}
+
static int adf7242_set_txpower(struct ieee802154_hw *hw, int mbm)
{
struct adf7242_local *lp = hw->priv;
@@ -686,7 +704,7 @@ static int adf7242_start(struct ieee802154_hw *hw)
enable_irq(lp->spi->irq);
set_bit(FLAG_START, &lp->flags);
- return adf7242_cmd(lp, CMD_RC_RX);
+ return adf7242_cmd_rx(lp);
}
static void adf7242_stop(struct ieee802154_hw *hw)
@@ -694,6 +712,7 @@ static void adf7242_stop(struct ieee802154_hw *hw)
struct adf7242_local *lp = hw->priv;
disable_irq(lp->spi->irq);
+ cancel_delayed_work_sync(&lp->work);
adf7242_cmd(lp, CMD_RC_IDLE);
clear_bit(FLAG_START, &lp->flags);
adf7242_clear_irqstat(lp);
@@ -719,7 +738,10 @@ static int adf7242_channel(struct ieee802154_hw *hw, u8 page, u8 channel)
adf7242_write_reg(lp, REG_CH_FREQ1, freq >> 8);
adf7242_write_reg(lp, REG_CH_FREQ2, freq >> 16);
- return adf7242_cmd(lp, CMD_RC_RX);
+ if (test_bit(FLAG_START, &lp->flags))
+ return adf7242_cmd_rx(lp);
+ else
+ return adf7242_cmd(lp, CMD_RC_PHY_RDY);
}
static int adf7242_set_hw_addr_filt(struct ieee802154_hw *hw,
@@ -814,6 +836,7 @@ static int adf7242_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
/* ensure existing instances of the IRQ handler have completed */
disable_irq(lp->spi->irq);
set_bit(FLAG_XMIT, &lp->flags);
+ cancel_delayed_work_sync(&lp->work);
reinit_completion(&lp->tx_complete);
adf7242_cmd(lp, CMD_RC_PHY_RDY);
adf7242_clear_irqstat(lp);
@@ -952,6 +975,7 @@ static irqreturn_t adf7242_isr(int irq, void *data)
unsigned int xmit;
u8 irq1;
+ mod_delayed_work(lp->wqueue, &lp->work, msecs_to_jiffies(400));
adf7242_read_reg(lp, REG_IRQ1_SRC1, &irq1);
if (!(irq1 & (IRQ_RX_PKT_RCVD | IRQ_CSMA_CA)))
@@ -1241,6 +1265,9 @@ static int adf7242_probe(struct spi_device *spi)
spi_message_add_tail(&lp->stat_xfer, &lp->stat_msg);
spi_set_drvdata(spi, lp);
+ INIT_DELAYED_WORK(&lp->work, adf7242_rx_cal_work);
+ lp->wqueue = alloc_ordered_workqueue(dev_name(&spi->dev),
+ WQ_MEM_RECLAIM);
ret = adf7242_hw_init(lp);
if (ret)
@@ -1284,6 +1311,9 @@ static int adf7242_remove(struct spi_device *spi)
if (!IS_ERR_OR_NULL(lp->debugfs_root))
debugfs_remove_recursive(lp->debugfs_root);
+ cancel_delayed_work_sync(&lp->work);
+ destroy_workqueue(lp->wqueue);
+
ieee802154_unregister_hw(lp->hw);
mutex_destroy(&lp->bmux);
ieee802154_free_hw(lp->hw);
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 77abedf0b524..3d9e91579866 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -940,7 +940,7 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
static int
at86rf230_ed(struct ieee802154_hw *hw, u8 *level)
{
- BUG_ON(!level);
+ WARN_ON(!level);
*level = 0xbe;
return 0;
}
@@ -1121,8 +1121,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
if (changed & IEEE802154_AFILT_SADDR_CHANGED) {
u16 addr = le16_to_cpu(filt->short_addr);
- dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for saddr\n");
+ dev_vdbg(&lp->spi->dev, "%s called for saddr\n", __func__);
__at86rf230_write(lp, RG_SHORT_ADDR_0, addr);
__at86rf230_write(lp, RG_SHORT_ADDR_1, addr >> 8);
}
@@ -1130,8 +1129,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
if (changed & IEEE802154_AFILT_PANID_CHANGED) {
u16 pan = le16_to_cpu(filt->pan_id);
- dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for pan id\n");
+ dev_vdbg(&lp->spi->dev, "%s called for pan id\n", __func__);
__at86rf230_write(lp, RG_PAN_ID_0, pan);
__at86rf230_write(lp, RG_PAN_ID_1, pan >> 8);
}
@@ -1140,15 +1138,13 @@ at86rf230_set_hw_addr_filt(struct ieee802154_hw *hw,
u8 i, addr[8];
memcpy(addr, &filt->ieee_addr, 8);
- dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for IEEE addr\n");
+ dev_vdbg(&lp->spi->dev, "%s called for IEEE addr\n", __func__);
for (i = 0; i < 8; i++)
__at86rf230_write(lp, RG_IEEE_ADDR_0 + i, addr[i]);
}
if (changed & IEEE802154_AFILT_PANC_CHANGED) {
- dev_vdbg(&lp->spi->dev,
- "at86rf230_set_hw_addr_filt called for panc change\n");
+ dev_vdbg(&lp->spi->dev, "%s called for panc change\n", __func__);
if (filt->pan_coord)
at86rf230_write_subreg(lp, SR_AACK_I_AM_COORD, 1);
else
@@ -1252,7 +1248,6 @@ at86rf230_set_cca_mode(struct ieee802154_hw *hw,
return at86rf230_write_subreg(lp, SR_CCA_MODE, val);
}
-
static int
at86rf230_set_cca_ed_level(struct ieee802154_hw *hw, s32 mbm)
{
diff --git a/drivers/net/ieee802154/fakelb.c b/drivers/net/ieee802154/fakelb.c
index 0d673f7682ee..176395e4b7bb 100644
--- a/drivers/net/ieee802154/fakelb.c
+++ b/drivers/net/ieee802154/fakelb.c
@@ -49,7 +49,7 @@ struct fakelb_phy {
static int fakelb_hw_ed(struct ieee802154_hw *hw, u8 *level)
{
- BUG_ON(!level);
+ WARN_ON(!level);
*level = 0xbe;
return 0;
diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c
index de0d7f28a181..e428277781ac 100644
--- a/drivers/net/ieee802154/mcr20a.c
+++ b/drivers/net/ieee802154/mcr20a.c
@@ -15,10 +15,11 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/spi/spi.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
+#include <linux/irq.h>
#include <linux/skbuff.h>
#include <linux/of_gpio.h>
#include <linux/regmap.h>
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 4377c26f714d..4a949569ec4c 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -75,10 +75,23 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
{
struct ipvl_dev *ipvlan;
struct net_device *mdev = port->dev;
- int err = 0;
+ unsigned int flags;
+ int err;
ASSERT_RTNL();
if (port->mode != nval) {
+ list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
+ flags = ipvlan->dev->flags;
+ if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S) {
+ err = dev_change_flags(ipvlan->dev,
+ flags | IFF_NOARP);
+ } else {
+ err = dev_change_flags(ipvlan->dev,
+ flags & ~IFF_NOARP);
+ }
+ if (unlikely(err))
+ goto fail;
+ }
if (nval == IPVLAN_MODE_L3S) {
/* New mode is L3S */
err = ipvlan_register_nf_hook(read_pnet(&port->pnet));
@@ -86,21 +99,28 @@ static int ipvlan_set_port_mode(struct ipvl_port *port, u16 nval)
mdev->l3mdev_ops = &ipvl_l3mdev_ops;
mdev->priv_flags |= IFF_L3MDEV_MASTER;
} else
- return err;
+ goto fail;
} else if (port->mode == IPVLAN_MODE_L3S) {
/* Old mode was L3S */
mdev->priv_flags &= ~IFF_L3MDEV_MASTER;
ipvlan_unregister_nf_hook(read_pnet(&port->pnet));
mdev->l3mdev_ops = NULL;
}
- list_for_each_entry(ipvlan, &port->ipvlans, pnode) {
- if (nval == IPVLAN_MODE_L3 || nval == IPVLAN_MODE_L3S)
- ipvlan->dev->flags |= IFF_NOARP;
- else
- ipvlan->dev->flags &= ~IFF_NOARP;
- }
port->mode = nval;
}
+ return 0;
+
+fail:
+ /* Undo the flags changes that have been done so far. */
+ list_for_each_entry_continue_reverse(ipvlan, &port->ipvlans, pnode) {
+ flags = ipvlan->dev->flags;
+ if (port->mode == IPVLAN_MODE_L3 ||
+ port->mode == IPVLAN_MODE_L3S)
+ dev_change_flags(ipvlan->dev, flags | IFF_NOARP);
+ else
+ dev_change_flags(ipvlan->dev, flags & ~IFF_NOARP);
+ }
+
return err;
}
@@ -594,7 +614,8 @@ int ipvlan_link_new(struct net *src_net, struct net_device *dev,
ipvlan->phy_dev = phy_dev;
ipvlan->dev = dev;
ipvlan->sfeatures = IPVLAN_FEATURES;
- ipvlan_adjust_mtu(ipvlan, phy_dev);
+ if (!tb[IFLA_MTU])
+ ipvlan_adjust_mtu(ipvlan, phy_dev);
INIT_LIST_HEAD(&ipvlan->addrs);
spin_lock_init(&ipvlan->addrs_lock);
@@ -693,6 +714,7 @@ void ipvlan_link_setup(struct net_device *dev)
{
ether_setup(dev);
+ dev->max_mtu = ETH_MAX_MTU;
dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
dev->priv_flags |= IFF_UNICAST_FLT | IFF_NO_QUEUE;
dev->netdev_ops = &ipvlan_netdev_ops;
diff --git a/drivers/net/net_failover.c b/drivers/net/net_failover.c
index 83f7420ddea5..4f390fa557e4 100644
--- a/drivers/net/net_failover.c
+++ b/drivers/net/net_failover.c
@@ -527,7 +527,7 @@ static int net_failover_slave_register(struct net_device *slave_dev,
netif_addr_lock_bh(failover_dev);
dev_uc_sync_multiple(slave_dev, failover_dev);
- dev_uc_sync_multiple(slave_dev, failover_dev);
+ dev_mc_sync_multiple(slave_dev, failover_dev);
netif_addr_unlock_bh(failover_dev);
err = vlan_vids_add_by_dev(slave_dev, failover_dev);
diff --git a/drivers/net/phy/dp83tc811.c b/drivers/net/phy/dp83tc811.c
index 081d99aa3985..49ac678eb2dc 100644
--- a/drivers/net/phy/dp83tc811.c
+++ b/drivers/net/phy/dp83tc811.c
@@ -222,7 +222,7 @@ static int dp83811_config_intr(struct phy_device *phydev)
if (err < 0)
return err;
- err = phy_write(phydev, MII_DP83811_INT_STAT1, 0);
+ err = phy_write(phydev, MII_DP83811_INT_STAT2, 0);
}
return err;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index b8f57e9b9379..1cd439bdf608 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -130,8 +130,9 @@
#define MII_88E1318S_PHY_WOL_CTRL_CLEAR_WOL_STATUS BIT(12)
#define MII_88E1318S_PHY_WOL_CTRL_MAGIC_PACKET_MATCH_ENABLE BIT(14)
-#define MII_88E1121_PHY_LED_CTRL 16
+#define MII_PHY_LED_CTRL 16
#define MII_88E1121_PHY_LED_DEF 0x0030
+#define MII_88E1510_PHY_LED_DEF 0x1177
#define MII_M1011_PHY_STATUS 0x11
#define MII_M1011_PHY_STATUS_1000 0x8000
@@ -632,8 +633,40 @@ error:
return err;
}
+static void marvell_config_led(struct phy_device *phydev)
+{
+ u16 def_config;
+ int err;
+
+ switch (MARVELL_PHY_FAMILY_ID(phydev->phy_id)) {
+ /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
+ case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1121R):
+ case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1318S):
+ def_config = MII_88E1121_PHY_LED_DEF;
+ break;
+ /* Default PHY LED config:
+ * LED[0] .. 1000Mbps Link
+ * LED[1] .. 100Mbps Link
+ * LED[2] .. Blink, Activity
+ */
+ case MARVELL_PHY_FAMILY_ID(MARVELL_PHY_ID_88E1510):
+ def_config = MII_88E1510_PHY_LED_DEF;
+ break;
+ default:
+ return;
+ }
+
+ err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE, MII_PHY_LED_CTRL,
+ def_config);
+ if (err < 0)
+ pr_warn("Fail to config marvell phy LED.\n");
+}
+
static int marvell_config_init(struct phy_device *phydev)
{
+ /* Set defalut LED */
+ marvell_config_led(phydev);
+
/* Set registers from marvell,reg-init DT property */
return marvell_of_reg_init(phydev);
}
@@ -813,21 +846,6 @@ static int m88e1111_config_init(struct phy_device *phydev)
return genphy_soft_reset(phydev);
}
-static int m88e1121_config_init(struct phy_device *phydev)
-{
- int err;
-
- /* Default PHY LED config: LED[0] .. Link, LED[1] .. Activity */
- err = phy_write_paged(phydev, MII_MARVELL_LED_PAGE,
- MII_88E1121_PHY_LED_CTRL,
- MII_88E1121_PHY_LED_DEF);
- if (err < 0)
- return err;
-
- /* Set marvell,reg-init configuration from device tree */
- return marvell_config_init(phydev);
-}
-
static int m88e1318_config_init(struct phy_device *phydev)
{
if (phy_interrupt_is_valid(phydev)) {
@@ -841,7 +859,7 @@ static int m88e1318_config_init(struct phy_device *phydev)
return err;
}
- return m88e1121_config_init(phydev);
+ return marvell_config_init(phydev);
}
static int m88e1510_config_init(struct phy_device *phydev)
@@ -2087,7 +2105,7 @@ static struct phy_driver marvell_drivers[] = {
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = &m88e1121_probe,
- .config_init = &m88e1121_config_init,
+ .config_init = &marvell_config_init,
.config_aneg = &m88e1121_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 537297d2b4b4..6c9b24fe3148 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -514,7 +514,7 @@ static int phy_start_aneg_priv(struct phy_device *phydev, bool sync)
* negotiation may already be done and aneg interrupt may not be
* generated.
*/
- if (phy_interrupt_is_valid(phydev) && (phydev->state == PHY_AN)) {
+ if (phydev->irq != PHY_POLL && phydev->state == PHY_AN) {
err = phy_aneg_done(phydev);
if (err > 0) {
trigger = true;
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index bd0f339f69fd..b9f5f40a7ac1 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1724,11 +1724,8 @@ EXPORT_SYMBOL(genphy_loopback);
static int __set_phy_supported(struct phy_device *phydev, u32 max_speed)
{
- /* The default values for phydev->supported are provided by the PHY
- * driver "features" member, we want to reset to sane defaults first
- * before supporting higher speeds.
- */
- phydev->supported &= PHY_DEFAULT_FEATURES;
+ phydev->supported &= ~(PHY_1000BT_FEATURES | PHY_100BT_FEATURES |
+ PHY_10BT_FEATURES);
switch (max_speed) {
default:
diff --git a/drivers/net/phy/sfp-bus.c b/drivers/net/phy/sfp-bus.c
index d437f4f5ed52..740655261e5b 100644
--- a/drivers/net/phy/sfp-bus.c
+++ b/drivers/net/phy/sfp-bus.c
@@ -349,7 +349,6 @@ static int sfp_register_bus(struct sfp_bus *bus)
}
if (bus->started)
bus->socket_ops->start(bus->sfp);
- bus->netdev->sfp_bus = bus;
bus->registered = true;
return 0;
}
@@ -364,7 +363,6 @@ static void sfp_unregister_bus(struct sfp_bus *bus)
if (bus->phydev && ops && ops->disconnect_phy)
ops->disconnect_phy(bus->upstream);
}
- bus->netdev->sfp_bus = NULL;
bus->registered = false;
}
@@ -436,6 +434,14 @@ void sfp_upstream_stop(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_upstream_stop);
+static void sfp_upstream_clear(struct sfp_bus *bus)
+{
+ bus->upstream_ops = NULL;
+ bus->upstream = NULL;
+ bus->netdev->sfp_bus = NULL;
+ bus->netdev = NULL;
+}
+
/**
* sfp_register_upstream() - Register the neighbouring device
* @fwnode: firmware node for the SFP bus
@@ -461,9 +467,13 @@ struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
bus->upstream_ops = ops;
bus->upstream = upstream;
bus->netdev = ndev;
+ ndev->sfp_bus = bus;
- if (bus->sfp)
+ if (bus->sfp) {
ret = sfp_register_bus(bus);
+ if (ret)
+ sfp_upstream_clear(bus);
+ }
rtnl_unlock();
}
@@ -488,8 +498,7 @@ void sfp_unregister_upstream(struct sfp_bus *bus)
rtnl_lock();
if (bus->sfp)
sfp_unregister_bus(bus);
- bus->upstream = NULL;
- bus->netdev = NULL;
+ sfp_upstream_clear(bus);
rtnl_unlock();
sfp_bus_put(bus);
@@ -561,6 +570,13 @@ void sfp_module_remove(struct sfp_bus *bus)
}
EXPORT_SYMBOL_GPL(sfp_module_remove);
+static void sfp_socket_clear(struct sfp_bus *bus)
+{
+ bus->sfp_dev = NULL;
+ bus->sfp = NULL;
+ bus->socket_ops = NULL;
+}
+
struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
const struct sfp_socket_ops *ops)
{
@@ -573,8 +589,11 @@ struct sfp_bus *sfp_register_socket(struct device *dev, struct sfp *sfp,
bus->sfp = sfp;
bus->socket_ops = ops;
- if (bus->netdev)
+ if (bus->netdev) {
ret = sfp_register_bus(bus);
+ if (ret)
+ sfp_socket_clear(bus);
+ }
rtnl_unlock();
}
@@ -592,9 +611,7 @@ void sfp_unregister_socket(struct sfp_bus *bus)
rtnl_lock();
if (bus->netdev)
sfp_unregister_bus(bus);
- bus->sfp_dev = NULL;
- bus->sfp = NULL;
- bus->socket_ops = NULL;
+ sfp_socket_clear(bus);
rtnl_unlock();
sfp_bus_put(bus);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index de51e8f70f44..ce61231e96ea 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -1107,7 +1107,7 @@ static const struct proto_ops pppoe_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = pppoe_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = sock_no_setsockopt,
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index a192a017cc68..f5727baac84a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1688,7 +1688,7 @@ static struct sk_buff *tun_build_skb(struct tun_struct *tun,
case XDP_TX:
get_page(alloc_frag->page);
alloc_frag->offset += buflen;
- if (tun_xdp_tx(tun->dev, &xdp))
+ if (tun_xdp_tx(tun->dev, &xdp) < 0)
goto err_redirect;
rcu_read_unlock();
local_bh_enable();
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 3d4f7959dabb..b1b3d8f7e67d 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -642,10 +642,12 @@ static void ax88772_restore_phy(struct usbnet *dev)
priv->presvd_phy_advertise);
/* Restore BMCR */
+ if (priv->presvd_phy_bmcr & BMCR_ANENABLE)
+ priv->presvd_phy_bmcr |= BMCR_ANRESTART;
+
asix_mdio_write_nopm(dev->net, dev->mii.phy_id, MII_BMCR,
priv->presvd_phy_bmcr);
- mii_nway_restart(&dev->mii);
priv->presvd_phy_advertise = 0;
priv->presvd_phy_bmcr = 0;
}
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index b0e8b9613054..1eaec648bd1f 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -967,8 +967,7 @@ void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
atomic_set(&ctx->stop, 1);
- if (hrtimer_active(&ctx->tx_timer))
- hrtimer_cancel(&ctx->tx_timer);
+ hrtimer_cancel(&ctx->tx_timer);
tasklet_kill(&ctx->bh);
diff --git a/drivers/net/usb/lan78xx.c b/drivers/net/usb/lan78xx.c
index 8dff87ec6d99..ed10d49eb5e0 100644
--- a/drivers/net/usb/lan78xx.c
+++ b/drivers/net/usb/lan78xx.c
@@ -64,6 +64,7 @@
#define DEFAULT_RX_CSUM_ENABLE (true)
#define DEFAULT_TSO_CSUM_ENABLE (true)
#define DEFAULT_VLAN_FILTER_ENABLE (true)
+#define DEFAULT_VLAN_RX_OFFLOAD (true)
#define TX_OVERHEAD (8)
#define RXW_PADDING 2
@@ -2298,7 +2299,7 @@ static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
if ((ll_mtu % dev->maxpacket) == 0)
return -EDOM;
- ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + ETH_HLEN);
+ ret = lan78xx_set_rx_max_frame_length(dev, new_mtu + VLAN_ETH_HLEN);
netdev->mtu = new_mtu;
@@ -2364,6 +2365,11 @@ static int lan78xx_set_features(struct net_device *netdev,
}
if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
+ else
+ pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
+
+ if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
else
pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
@@ -2587,7 +2593,8 @@ static int lan78xx_reset(struct lan78xx_net *dev)
buf |= FCT_TX_CTL_EN_;
ret = lan78xx_write_reg(dev, FCT_TX_CTL, buf);
- ret = lan78xx_set_rx_max_frame_length(dev, dev->net->mtu + ETH_HLEN);
+ ret = lan78xx_set_rx_max_frame_length(dev,
+ dev->net->mtu + VLAN_ETH_HLEN);
ret = lan78xx_read_reg(dev, MAC_RX, &buf);
buf |= MAC_RX_RXEN_;
@@ -2975,6 +2982,12 @@ static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
if (DEFAULT_TSO_CSUM_ENABLE)
dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
+ if (DEFAULT_VLAN_RX_OFFLOAD)
+ dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+ if (DEFAULT_VLAN_FILTER_ENABLE)
+ dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
dev->net->hw_features = dev->net->features;
ret = lan78xx_setup_irq_domain(dev);
@@ -3039,8 +3052,13 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
struct sk_buff *skb,
u32 rx_cmd_a, u32 rx_cmd_b)
{
+ /* HW Checksum offload appears to be flawed if used when not stripping
+ * VLAN headers. Drop back to S/W checksums under these conditions.
+ */
if (!(dev->net->features & NETIF_F_RXCSUM) ||
- unlikely(rx_cmd_a & RX_CMD_A_ICSM_)) {
+ unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
+ ((rx_cmd_a & RX_CMD_A_FVTG_) &&
+ !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
skb->ip_summed = CHECKSUM_NONE;
} else {
skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
@@ -3048,6 +3066,16 @@ static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
}
}
+static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
+ struct sk_buff *skb,
+ u32 rx_cmd_a, u32 rx_cmd_b)
+{
+ if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
+ (rx_cmd_a & RX_CMD_A_FVTG_))
+ __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
+ (rx_cmd_b & 0xffff));
+}
+
static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
{
int status;
@@ -3112,6 +3140,8 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
if (skb->len == size) {
lan78xx_rx_csum_offload(dev, skb,
rx_cmd_a, rx_cmd_b);
+ lan78xx_rx_vlan_offload(dev, skb,
+ rx_cmd_a, rx_cmd_b);
skb_trim(skb, skb->len - 4); /* remove fcs */
skb->truesize = size + sizeof(struct sk_buff);
@@ -3130,6 +3160,7 @@ static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb)
skb_set_tail_pointer(skb2, size);
lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
+ lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
skb_trim(skb2, skb2->len - 4); /* remove fcs */
skb2->truesize = size + sizeof(struct sk_buff);
@@ -3313,6 +3344,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
pkt_cnt = 0;
count = 0;
length = 0;
+ spin_lock_irqsave(&tqp->lock, flags);
for (skb = tqp->next; pkt_cnt < tqp->qlen; skb = skb->next) {
if (skb_is_gso(skb)) {
if (pkt_cnt) {
@@ -3321,7 +3353,8 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
}
count = 1;
length = skb->len - TX_OVERHEAD;
- skb2 = skb_dequeue(tqp);
+ __skb_unlink(skb, tqp);
+ spin_unlock_irqrestore(&tqp->lock, flags);
goto gso_skb;
}
@@ -3330,6 +3363,7 @@ static void lan78xx_tx_bh(struct lan78xx_net *dev)
skb_totallen = skb->len + roundup(skb_totallen, sizeof(u32));
pkt_cnt++;
}
+ spin_unlock_irqrestore(&tqp->lock, flags);
/* copy to a single skb */
skb = alloc_skb(skb_totallen, GFP_ATOMIC);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 8e8b51f171f4..cb0cc30c3d6a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -1246,12 +1246,14 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b3, 8)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card (rev3) */
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
+ {QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
{QMI_FIXED_INTF(0x22de, 0x9061, 3)}, /* WeTelecom WPD-600N */
{QMI_QUIRK_SET_DTR(0x1e0e, 0x9001, 5)}, /* SIMCom 7100E, 7230E, 7600E ++ */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0125, 4)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */
+ {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */
{QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */
{QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 86f7196f9d91..2a58607a6aea 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -3962,7 +3962,8 @@ static int rtl8152_close(struct net_device *netdev)
#ifdef CONFIG_PM_SLEEP
unregister_pm_notifier(&tp->pm_notifier);
#endif
- napi_disable(&tp->napi);
+ if (!test_bit(RTL8152_UNPLUG, &tp->flags))
+ napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c
index 5f565bd574da..48ba80a8ca5c 100644
--- a/drivers/net/usb/rtl8150.c
+++ b/drivers/net/usb/rtl8150.c
@@ -681,7 +681,7 @@ static void rtl8150_set_multicast(struct net_device *netdev)
(netdev->flags & IFF_ALLMULTI)) {
rx_creg &= 0xfffe;
rx_creg |= 0x0002;
- dev_info(&netdev->dev, "%s: allmulti set\n", netdev->name);
+ dev_dbg(&netdev->dev, "%s: allmulti set\n", netdev->name);
} else {
/* ~RX_MULTICAST, ~RX_PROMISCUOUS */
rx_creg &= 0x00fc;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 7a6a1fe79309..05553d252446 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -82,6 +82,9 @@ static bool turbo_mode = true;
module_param(turbo_mode, bool, 0644);
MODULE_PARM_DESC(turbo_mode, "Enable multiple frames per Rx transaction");
+static int smsc75xx_link_ok_nopm(struct usbnet *dev);
+static int smsc75xx_phy_gig_workaround(struct usbnet *dev);
+
static int __must_check __smsc75xx_read_reg(struct usbnet *dev, u32 index,
u32 *data, int in_pm)
{
@@ -852,6 +855,9 @@ static int smsc75xx_phy_initialize(struct usbnet *dev)
return -EIO;
}
+ /* phy workaround for gig link */
+ smsc75xx_phy_gig_workaround(dev);
+
smsc75xx_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
ADVERTISE_ALL | ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP |
ADVERTISE_PAUSE_ASYM);
@@ -987,6 +993,62 @@ static int smsc75xx_wait_ready(struct usbnet *dev, int in_pm)
return -EIO;
}
+static int smsc75xx_phy_gig_workaround(struct usbnet *dev)
+{
+ struct mii_if_info *mii = &dev->mii;
+ int ret = 0, timeout = 0;
+ u32 buf, link_up = 0;
+
+ /* Set the phy in Gig loopback */
+ smsc75xx_mdio_write(dev->net, mii->phy_id, MII_BMCR, 0x4040);
+
+ /* Wait for the link up */
+ do {
+ link_up = smsc75xx_link_ok_nopm(dev);
+ usleep_range(10000, 20000);
+ timeout++;
+ } while ((!link_up) && (timeout < 1000));
+
+ if (timeout >= 1000) {
+ netdev_warn(dev->net, "Timeout waiting for PHY link up\n");
+ return -EIO;
+ }
+
+ /* phy reset */
+ ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n", ret);
+ return ret;
+ }
+
+ buf |= PMT_CTL_PHY_RST;
+
+ ret = smsc75xx_write_reg(dev, PMT_CTL, buf);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to write PMT_CTL: %d\n", ret);
+ return ret;
+ }
+
+ timeout = 0;
+ do {
+ usleep_range(10000, 20000);
+ ret = smsc75xx_read_reg(dev, PMT_CTL, &buf);
+ if (ret < 0) {
+ netdev_warn(dev->net, "Failed to read PMT_CTL: %d\n",
+ ret);
+ return ret;
+ }
+ timeout++;
+ } while ((buf & PMT_CTL_PHY_RST) && (timeout < 100));
+
+ if (timeout >= 100) {
+ netdev_warn(dev->net, "timeout waiting for PHY Reset\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
static int smsc75xx_reset(struct usbnet *dev)
{
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b6c9a2af3732..53085c63277b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -53,6 +53,10 @@ module_param(napi_tx, bool, 0644);
/* Amount of XDP headroom to prepend to packets for use by xdp_adjust_head */
#define VIRTIO_XDP_HEADROOM 256
+/* Separating two types of XDP xmit */
+#define VIRTIO_XDP_TX BIT(0)
+#define VIRTIO_XDP_REDIR BIT(1)
+
/* RX packet size EWMA. The average packet size is used to determine the packet
* buffer size when refilling RX rings. As the entire RX ring may be refilled
* at once, the weight is chosen so that the EWMA will be insensitive to short-
@@ -582,7 +586,7 @@ static struct sk_buff *receive_small(struct net_device *dev,
struct receive_queue *rq,
void *buf, void *ctx,
unsigned int len,
- bool *xdp_xmit)
+ unsigned int *xdp_xmit)
{
struct sk_buff *skb;
struct bpf_prog *xdp_prog;
@@ -654,14 +658,14 @@ static struct sk_buff *receive_small(struct net_device *dev,
trace_xdp_exception(vi->dev, xdp_prog, act);
goto err_xdp;
}
- *xdp_xmit = true;
+ *xdp_xmit |= VIRTIO_XDP_TX;
rcu_read_unlock();
goto xdp_xmit;
case XDP_REDIRECT:
err = xdp_do_redirect(dev, &xdp, xdp_prog);
if (err)
goto err_xdp;
- *xdp_xmit = true;
+ *xdp_xmit |= VIRTIO_XDP_REDIR;
rcu_read_unlock();
goto xdp_xmit;
default:
@@ -723,7 +727,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
void *buf,
void *ctx,
unsigned int len,
- bool *xdp_xmit)
+ unsigned int *xdp_xmit)
{
struct virtio_net_hdr_mrg_rxbuf *hdr = buf;
u16 num_buf = virtio16_to_cpu(vi->vdev, hdr->num_buffers);
@@ -818,7 +822,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
put_page(xdp_page);
goto err_xdp;
}
- *xdp_xmit = true;
+ *xdp_xmit |= VIRTIO_XDP_TX;
if (unlikely(xdp_page != page))
put_page(page);
rcu_read_unlock();
@@ -830,7 +834,7 @@ static struct sk_buff *receive_mergeable(struct net_device *dev,
put_page(xdp_page);
goto err_xdp;
}
- *xdp_xmit = true;
+ *xdp_xmit |= VIRTIO_XDP_REDIR;
if (unlikely(xdp_page != page))
put_page(page);
rcu_read_unlock();
@@ -939,7 +943,8 @@ xdp_xmit:
}
static int receive_buf(struct virtnet_info *vi, struct receive_queue *rq,
- void *buf, unsigned int len, void **ctx, bool *xdp_xmit)
+ void *buf, unsigned int len, void **ctx,
+ unsigned int *xdp_xmit)
{
struct net_device *dev = vi->dev;
struct sk_buff *skb;
@@ -1232,7 +1237,8 @@ static void refill_work(struct work_struct *work)
}
}
-static int virtnet_receive(struct receive_queue *rq, int budget, bool *xdp_xmit)
+static int virtnet_receive(struct receive_queue *rq, int budget,
+ unsigned int *xdp_xmit)
{
struct virtnet_info *vi = rq->vq->vdev->priv;
unsigned int len, received = 0, bytes = 0;
@@ -1321,7 +1327,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
struct virtnet_info *vi = rq->vq->vdev->priv;
struct send_queue *sq;
unsigned int received, qp;
- bool xdp_xmit = false;
+ unsigned int xdp_xmit = 0;
virtnet_poll_cleantx(rq);
@@ -1331,12 +1337,14 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
if (received < budget)
virtqueue_napi_complete(napi, rq->vq, received);
- if (xdp_xmit) {
+ if (xdp_xmit & VIRTIO_XDP_REDIR)
+ xdp_do_flush_map();
+
+ if (xdp_xmit & VIRTIO_XDP_TX) {
qp = vi->curr_queue_pairs - vi->xdp_queue_pairs +
smp_processor_id();
sq = &vi->sq[qp];
virtqueue_kick(sq->vq);
- xdp_do_flush_map();
}
return received;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index aee0e60471f1..e857cb3335f6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -623,9 +623,7 @@ static struct sk_buff **vxlan_gro_receive(struct sock *sk,
flush = 0;
out:
- skb_gro_remcsum_cleanup(skb, &grc);
- skb->remcsum_offload = 0;
- NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
return pp;
}
@@ -638,9 +636,62 @@ static int vxlan_gro_complete(struct sock *sk, struct sk_buff *skb, int nhoff)
return eth_gro_complete(skb, nhoff + sizeof(struct vxlanhdr));
}
-/* Add new entry to forwarding table -- assumes lock held */
+static struct vxlan_fdb *vxlan_fdb_alloc(struct vxlan_dev *vxlan,
+ const u8 *mac, __u16 state,
+ __be32 src_vni, __u8 ndm_flags)
+{
+ struct vxlan_fdb *f;
+
+ f = kmalloc(sizeof(*f), GFP_ATOMIC);
+ if (!f)
+ return NULL;
+ f->state = state;
+ f->flags = ndm_flags;
+ f->updated = f->used = jiffies;
+ f->vni = src_vni;
+ INIT_LIST_HEAD(&f->remotes);
+ memcpy(f->eth_addr, mac, ETH_ALEN);
+
+ return f;
+}
+
static int vxlan_fdb_create(struct vxlan_dev *vxlan,
const u8 *mac, union vxlan_addr *ip,
+ __u16 state, __be16 port, __be32 src_vni,
+ __be32 vni, __u32 ifindex, __u8 ndm_flags,
+ struct vxlan_fdb **fdb)
+{
+ struct vxlan_rdst *rd = NULL;
+ struct vxlan_fdb *f;
+ int rc;
+
+ if (vxlan->cfg.addrmax &&
+ vxlan->addrcnt >= vxlan->cfg.addrmax)
+ return -ENOSPC;
+
+ netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
+ f = vxlan_fdb_alloc(vxlan, mac, state, src_vni, ndm_flags);
+ if (!f)
+ return -ENOMEM;
+
+ rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
+ if (rc < 0) {
+ kfree(f);
+ return rc;
+ }
+
+ ++vxlan->addrcnt;
+ hlist_add_head_rcu(&f->hlist,
+ vxlan_fdb_head(vxlan, mac, src_vni));
+
+ *fdb = f;
+
+ return 0;
+}
+
+/* Add new entry to forwarding table -- assumes lock held */
+static int vxlan_fdb_update(struct vxlan_dev *vxlan,
+ const u8 *mac, union vxlan_addr *ip,
__u16 state, __u16 flags,
__be16 port, __be32 src_vni, __be32 vni,
__u32 ifindex, __u8 ndm_flags)
@@ -689,37 +740,17 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
if (!(flags & NLM_F_CREATE))
return -ENOENT;
- if (vxlan->cfg.addrmax &&
- vxlan->addrcnt >= vxlan->cfg.addrmax)
- return -ENOSPC;
-
/* Disallow replace to add a multicast entry */
if ((flags & NLM_F_REPLACE) &&
(is_multicast_ether_addr(mac) || is_zero_ether_addr(mac)))
return -EOPNOTSUPP;
netdev_dbg(vxlan->dev, "add %pM -> %pIS\n", mac, ip);
- f = kmalloc(sizeof(*f), GFP_ATOMIC);
- if (!f)
- return -ENOMEM;
-
- notify = 1;
- f->state = state;
- f->flags = ndm_flags;
- f->updated = f->used = jiffies;
- f->vni = src_vni;
- INIT_LIST_HEAD(&f->remotes);
- memcpy(f->eth_addr, mac, ETH_ALEN);
-
- rc = vxlan_fdb_append(f, ip, port, vni, ifindex, &rd);
- if (rc < 0) {
- kfree(f);
+ rc = vxlan_fdb_create(vxlan, mac, ip, state, port, src_vni,
+ vni, ifindex, ndm_flags, &f);
+ if (rc < 0)
return rc;
- }
-
- ++vxlan->addrcnt;
- hlist_add_head_rcu(&f->hlist,
- vxlan_fdb_head(vxlan, mac, src_vni));
+ notify = 1;
}
if (notify) {
@@ -743,13 +774,15 @@ static void vxlan_fdb_free(struct rcu_head *head)
kfree(f);
}
-static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f)
+static void vxlan_fdb_destroy(struct vxlan_dev *vxlan, struct vxlan_fdb *f,
+ bool do_notify)
{
netdev_dbg(vxlan->dev,
"delete %pM\n", f->eth_addr);
--vxlan->addrcnt;
- vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
+ if (do_notify)
+ vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_DELNEIGH);
hlist_del_rcu(&f->hlist);
call_rcu(&f->rcu, vxlan_fdb_free);
@@ -865,7 +898,7 @@ static int vxlan_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
return -EAFNOSUPPORT;
spin_lock_bh(&vxlan->hash_lock);
- err = vxlan_fdb_create(vxlan, addr, &ip, ndm->ndm_state, flags,
+ err = vxlan_fdb_update(vxlan, addr, &ip, ndm->ndm_state, flags,
port, src_vni, vni, ifindex, ndm->ndm_flags);
spin_unlock_bh(&vxlan->hash_lock);
@@ -899,7 +932,7 @@ static int __vxlan_fdb_delete(struct vxlan_dev *vxlan,
goto out;
}
- vxlan_fdb_destroy(vxlan, f);
+ vxlan_fdb_destroy(vxlan, f, true);
out:
return 0;
@@ -1008,7 +1041,7 @@ static bool vxlan_snoop(struct net_device *dev,
/* close off race between vxlan_flush and incoming packets */
if (netif_running(dev))
- vxlan_fdb_create(vxlan, src_mac, src_ip,
+ vxlan_fdb_update(vxlan, src_mac, src_ip,
NUD_REACHABLE,
NLM_F_EXCL|NLM_F_CREATE,
vxlan->cfg.dst_port,
@@ -2366,7 +2399,7 @@ static void vxlan_cleanup(struct timer_list *t)
"garbage collect %pM\n",
f->eth_addr);
f->state = NUD_STALE;
- vxlan_fdb_destroy(vxlan, f);
+ vxlan_fdb_destroy(vxlan, f, true);
} else if (time_before(timeout, next_timer))
next_timer = timeout;
}
@@ -2417,7 +2450,7 @@ static void vxlan_fdb_delete_default(struct vxlan_dev *vxlan, __be32 vni)
spin_lock_bh(&vxlan->hash_lock);
f = __vxlan_find_mac(vxlan, all_zeros_mac, vni);
if (f)
- vxlan_fdb_destroy(vxlan, f);
+ vxlan_fdb_destroy(vxlan, f, true);
spin_unlock_bh(&vxlan->hash_lock);
}
@@ -2471,7 +2504,7 @@ static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
continue;
/* the all_zeros_mac entry is deleted at vxlan_uninit */
if (!is_zero_ether_addr(f->eth_addr))
- vxlan_fdb_destroy(vxlan, f);
+ vxlan_fdb_destroy(vxlan, f, true);
}
}
spin_unlock_bh(&vxlan->hash_lock);
@@ -3162,6 +3195,7 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
{
struct vxlan_net *vn = net_generic(net, vxlan_net_id);
struct vxlan_dev *vxlan = netdev_priv(dev);
+ struct vxlan_fdb *f = NULL;
int err;
err = vxlan_dev_configure(net, dev, conf, false, extack);
@@ -3175,24 +3209,35 @@ static int __vxlan_dev_create(struct net *net, struct net_device *dev,
err = vxlan_fdb_create(vxlan, all_zeros_mac,
&vxlan->default_dst.remote_ip,
NUD_REACHABLE | NUD_PERMANENT,
- NLM_F_EXCL | NLM_F_CREATE,
vxlan->cfg.dst_port,
vxlan->default_dst.remote_vni,
vxlan->default_dst.remote_vni,
vxlan->default_dst.remote_ifindex,
- NTF_SELF);
+ NTF_SELF, &f);
if (err)
return err;
}
err = register_netdevice(dev);
+ if (err)
+ goto errout;
+
+ err = rtnl_configure_link(dev, NULL);
if (err) {
- vxlan_fdb_delete_default(vxlan, vxlan->default_dst.remote_vni);
- return err;
+ unregister_netdevice(dev);
+ goto errout;
}
+ /* notify default fdb entry */
+ if (f)
+ vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
+
list_add(&vxlan->next, &vn->vxlan_list);
return 0;
+errout:
+ if (f)
+ vxlan_fdb_destroy(vxlan, f, false);
+ return err;
}
static int vxlan_nl2conf(struct nlattr *tb[], struct nlattr *data[],
@@ -3427,6 +3472,7 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
struct vxlan_rdst *dst = &vxlan->default_dst;
struct vxlan_rdst old_dst;
struct vxlan_config conf;
+ struct vxlan_fdb *f = NULL;
int err;
err = vxlan_nl2conf(tb, data,
@@ -3455,16 +3501,16 @@ static int vxlan_changelink(struct net_device *dev, struct nlattr *tb[],
err = vxlan_fdb_create(vxlan, all_zeros_mac,
&dst->remote_ip,
NUD_REACHABLE | NUD_PERMANENT,
- NLM_F_CREATE | NLM_F_APPEND,
vxlan->cfg.dst_port,
dst->remote_vni,
dst->remote_vni,
dst->remote_ifindex,
- NTF_SELF);
+ NTF_SELF, &f);
if (err) {
spin_unlock_bh(&vxlan->hash_lock);
return err;
}
+ vxlan_fdb_notify(vxlan, f, first_remote_rtnl(f), RTM_NEWNEIGH);
}
spin_unlock_bh(&vxlan->hash_lock);
}
diff --git a/drivers/net/wireless/ath/ath10k/mac.c b/drivers/net/wireless/ath/ath10k/mac.c
index e9c2fb318c03..836e0a47b94a 100644
--- a/drivers/net/wireless/ath/ath10k/mac.c
+++ b/drivers/net/wireless/ath/ath10k/mac.c
@@ -6058,8 +6058,19 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
ath10k_mac_max_vht_nss(vht_mcs_mask)));
if (changed & IEEE80211_RC_BW_CHANGED) {
- ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
- sta->addr, bw);
+ enum wmi_phy_mode mode;
+
+ mode = chan_to_phymode(&def);
+ ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n",
+ sta->addr, bw, mode);
+
+ err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
+ WMI_PEER_PHYMODE, mode);
+ if (err) {
+ ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n",
+ sta->addr, mode, err);
+ goto exit;
+ }
err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
WMI_PEER_CHAN_WIDTH, bw);
@@ -6100,6 +6111,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
sta->addr);
}
+exit:
mutex_unlock(&ar->conf_mutex);
}
diff --git a/drivers/net/wireless/ath/ath10k/wmi.h b/drivers/net/wireless/ath/ath10k/wmi.h
index b48db54e9865..d68afb65402a 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.h
+++ b/drivers/net/wireless/ath/ath10k/wmi.h
@@ -6144,6 +6144,7 @@ enum wmi_peer_param {
WMI_PEER_NSS = 0x5,
WMI_PEER_USE_4ADDR = 0x6,
WMI_PEER_DEBUG = 0xa,
+ WMI_PEER_PHYMODE = 0xd,
WMI_PEER_DUMMY_VAR = 0xff, /* dummy parameter for STA PS workaround */
};
diff --git a/drivers/net/wireless/ath/wcn36xx/testmode.c b/drivers/net/wireless/ath/wcn36xx/testmode.c
index 1279064a3b71..51a038022c8b 100644
--- a/drivers/net/wireless/ath/wcn36xx/testmode.c
+++ b/drivers/net/wireless/ath/wcn36xx/testmode.c
@@ -1,4 +1,4 @@
-/*
+/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
diff --git a/drivers/net/wireless/broadcom/brcm80211/Kconfig b/drivers/net/wireless/broadcom/brcm80211/Kconfig
index 9d99eb42d917..6acba67bca07 100644
--- a/drivers/net/wireless/broadcom/brcm80211/Kconfig
+++ b/drivers/net/wireless/broadcom/brcm80211/Kconfig
@@ -60,7 +60,6 @@ config BRCMFMAC_PCIE
bool "PCIE bus interface support for FullMAC driver"
depends on BRCMFMAC
depends on PCI
- depends on HAS_DMA
select BRCMFMAC_PROTO_MSGBUF
select FW_LOADER
---help---
diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
index c99a191e8d69..a907d7b065fa 100644
--- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
+++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c
@@ -4296,6 +4296,13 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
brcmf_dbg(TRACE, "Enter\n");
if (bus) {
+ /* Stop watchdog task */
+ if (bus->watchdog_tsk) {
+ send_sig(SIGTERM, bus->watchdog_tsk, 1);
+ kthread_stop(bus->watchdog_tsk);
+ bus->watchdog_tsk = NULL;
+ }
+
/* De-register interrupt handler */
brcmf_sdiod_intr_unregister(bus->sdiodev);
diff --git a/drivers/net/wireless/marvell/mwifiex/usb.c b/drivers/net/wireless/marvell/mwifiex/usb.c
index 6e3cf9817730..88f4c89f89ba 100644
--- a/drivers/net/wireless/marvell/mwifiex/usb.c
+++ b/drivers/net/wireless/marvell/mwifiex/usb.c
@@ -644,11 +644,6 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
MWIFIEX_FUNC_SHUTDOWN);
}
- if (adapter->workqueue)
- flush_workqueue(adapter->workqueue);
-
- mwifiex_usb_free(card);
-
mwifiex_dbg(adapter, FATAL,
"%s: removing card\n", __func__);
mwifiex_remove_card(adapter);
@@ -1356,6 +1351,8 @@ static void mwifiex_unregister_dev(struct mwifiex_adapter *adapter)
{
struct usb_card_rec *card = (struct usb_card_rec *)adapter->card;
+ mwifiex_usb_free(card);
+
mwifiex_usb_cleanup_tx_aggr(adapter);
card->adapter = NULL;
diff --git a/drivers/net/wireless/mediatek/mt7601u/phy.c b/drivers/net/wireless/mediatek/mt7601u/phy.c
index 9d2f9a776ef1..b804abd464ae 100644
--- a/drivers/net/wireless/mediatek/mt7601u/phy.c
+++ b/drivers/net/wireless/mediatek/mt7601u/phy.c
@@ -986,13 +986,15 @@ static void mt7601u_agc_tune(struct mt7601u_dev *dev)
*/
spin_lock_bh(&dev->con_mon_lock);
avg_rssi = ewma_rssi_read(&dev->avg_rssi);
- WARN_ON_ONCE(avg_rssi == 0);
+ spin_unlock_bh(&dev->con_mon_lock);
+ if (avg_rssi == 0)
+ return;
+
avg_rssi = -avg_rssi;
if (avg_rssi <= -70)
val -= 0x20;
else if (avg_rssi <= -60)
val -= 0x10;
- spin_unlock_bh(&dev->con_mon_lock);
if (val != mt7601u_bbp_rr(dev, 66))
mt7601u_bbp_wr(dev, 66, val);
diff --git a/drivers/net/wireless/quantenna/qtnfmac/Kconfig b/drivers/net/wireless/quantenna/qtnfmac/Kconfig
index 025fa6018550..8d1492a90bd1 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/Kconfig
+++ b/drivers/net/wireless/quantenna/qtnfmac/Kconfig
@@ -7,7 +7,7 @@ config QTNFMAC
config QTNFMAC_PEARL_PCIE
tristate "Quantenna QSR10g PCIe support"
default n
- depends on HAS_DMA && PCI && CFG80211
+ depends on PCI && CFG80211
select QTNFMAC
select FW_LOADER
select CRC32
diff --git a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
index 220e2b710208..ae0ca8006849 100644
--- a/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
+++ b/drivers/net/wireless/quantenna/qtnfmac/cfg80211.c
@@ -654,8 +654,7 @@ qtnf_disconnect(struct wiphy *wiphy, struct net_device *dev,
vif = qtnf_mac_get_base_vif(mac);
if (!vif) {
pr_err("MAC%u: primary VIF is not configured\n", mac->macid);
- ret = -EFAULT;
- goto out;
+ return -EFAULT;
}
if (vif->wdev.iftype != NL80211_IFTYPE_STATION) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.c b/drivers/net/wireless/realtek/rtlwifi/base.c
index 39c817eddd78..54c9f6ab0c8c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.c
+++ b/drivers/net/wireless/realtek/rtlwifi/base.c
@@ -484,18 +484,21 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
}
-void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
+void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
del_timer_sync(&rtlpriv->works.watchdog_timer);
- cancel_delayed_work(&rtlpriv->works.watchdog_wq);
- cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
- cancel_delayed_work(&rtlpriv->works.ps_work);
- cancel_delayed_work(&rtlpriv->works.ps_rfon_wq);
- cancel_delayed_work(&rtlpriv->works.fwevt_wq);
- cancel_delayed_work(&rtlpriv->works.c2hcmd_wq);
+ cancel_delayed_work_sync(&rtlpriv->works.watchdog_wq);
+ if (ips_wq)
+ cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
+ else
+ cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq);
+ cancel_delayed_work_sync(&rtlpriv->works.ps_work);
+ cancel_delayed_work_sync(&rtlpriv->works.ps_rfon_wq);
+ cancel_delayed_work_sync(&rtlpriv->works.fwevt_wq);
+ cancel_delayed_work_sync(&rtlpriv->works.c2hcmd_wq);
}
EXPORT_SYMBOL_GPL(rtl_deinit_deferred_work);
diff --git a/drivers/net/wireless/realtek/rtlwifi/base.h b/drivers/net/wireless/realtek/rtlwifi/base.h
index 912f205779c3..a7ae40eaa3cd 100644
--- a/drivers/net/wireless/realtek/rtlwifi/base.h
+++ b/drivers/net/wireless/realtek/rtlwifi/base.h
@@ -121,7 +121,7 @@ void rtl_init_rfkill(struct ieee80211_hw *hw);
void rtl_deinit_rfkill(struct ieee80211_hw *hw);
void rtl_watch_dog_timer_callback(struct timer_list *t);
-void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
+void rtl_deinit_deferred_work(struct ieee80211_hw *hw, bool ips_wq);
bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
int rtlwifi_rate_mapping(struct ieee80211_hw *hw, bool isht,
diff --git a/drivers/net/wireless/realtek/rtlwifi/core.c b/drivers/net/wireless/realtek/rtlwifi/core.c
index cfea57efa7f4..4bf7967590ca 100644
--- a/drivers/net/wireless/realtek/rtlwifi/core.c
+++ b/drivers/net/wireless/realtek/rtlwifi/core.c
@@ -130,7 +130,6 @@ found_alt:
firmware->size);
rtlpriv->rtlhal.wowlan_fwsize = firmware->size;
}
- rtlpriv->rtlhal.fwsize = firmware->size;
release_firmware(firmware);
}
@@ -196,7 +195,7 @@ static void rtl_op_stop(struct ieee80211_hw *hw)
/* reset sec info */
rtl_cam_reset_sec_info(hw);
- rtl_deinit_deferred_work(hw);
+ rtl_deinit_deferred_work(hw, false);
}
rtlpriv->intf_ops->adapter_stop(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/pci.c b/drivers/net/wireless/realtek/rtlwifi/pci.c
index ae13bcfb3bf0..5d1fda16fc8c 100644
--- a/drivers/net/wireless/realtek/rtlwifi/pci.c
+++ b/drivers/net/wireless/realtek/rtlwifi/pci.c
@@ -2377,7 +2377,7 @@ void rtl_pci_disconnect(struct pci_dev *pdev)
ieee80211_unregister_hw(hw);
rtlmac->mac80211_registered = 0;
} else {
- rtl_deinit_deferred_work(hw);
+ rtl_deinit_deferred_work(hw, false);
rtlpriv->intf_ops->adapter_stop(hw);
}
rtlpriv->cfg->ops->disable_interrupt(hw);
diff --git a/drivers/net/wireless/realtek/rtlwifi/ps.c b/drivers/net/wireless/realtek/rtlwifi/ps.c
index 71af24e2e051..479a4cfc245d 100644
--- a/drivers/net/wireless/realtek/rtlwifi/ps.c
+++ b/drivers/net/wireless/realtek/rtlwifi/ps.c
@@ -71,7 +71,7 @@ bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
struct rtl_priv *rtlpriv = rtl_priv(hw);
/*<1> Stop all timer */
- rtl_deinit_deferred_work(hw);
+ rtl_deinit_deferred_work(hw, true);
/*<2> Disable Interrupt */
rtlpriv->cfg->ops->disable_interrupt(hw);
@@ -292,7 +292,7 @@ void rtl_ips_nic_on(struct ieee80211_hw *hw)
struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
enum rf_pwrstate rtstate;
- cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
+ cancel_delayed_work_sync(&rtlpriv->works.ips_nic_off_wq);
mutex_lock(&rtlpriv->locks.ips_mutex);
if (ppsc->inactiveps) {
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index f9faffc498bc..2ac5004d7a40 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1132,7 +1132,7 @@ void rtl_usb_disconnect(struct usb_interface *intf)
ieee80211_unregister_hw(hw);
rtlmac->mac80211_registered = 0;
} else {
- rtl_deinit_deferred_work(hw);
+ rtl_deinit_deferred_work(hw, false);
rtlpriv->intf_ops->adapter_stop(hw);
}
/*deinit rfkill */
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 922ce0abf5cf..a57daecf1d57 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1810,7 +1810,7 @@ static int talk_to_netback(struct xenbus_device *dev,
err = xen_net_read_mac(dev, info->netdev->dev_addr);
if (err) {
xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
- goto out;
+ goto out_unlocked;
}
rtnl_lock();
@@ -1925,6 +1925,7 @@ abort_transaction_no_dev_fatal:
xennet_destroy_queues(info);
out:
rtnl_unlock();
+out_unlocked:
device_unregister(&dev->dev);
return err;
}
@@ -1950,10 +1951,6 @@ static int xennet_connect(struct net_device *dev)
/* talk_to_netback() sets the correct number of queues */
num_queues = dev->real_num_tx_queues;
- rtnl_lock();
- netdev_update_features(dev);
- rtnl_unlock();
-
if (dev->reg_state == NETREG_UNINITIALIZED) {
err = register_netdev(dev);
if (err) {
@@ -1963,6 +1960,10 @@ static int xennet_connect(struct net_device *dev)
}
}
+ rtnl_lock();
+ netdev_update_features(dev);
+ rtnl_unlock();
+
/*
* All public and private state should now be sane. Get
* ready to start sending and receiving packets and give the driver
diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c
index d5553c47014f..5d823e965883 100644
--- a/drivers/nfc/pn533/usb.c
+++ b/drivers/nfc/pn533/usb.c
@@ -74,7 +74,7 @@ static void pn533_recv_response(struct urb *urb)
struct sk_buff *skb = NULL;
if (!urb->status) {
- skb = alloc_skb(urb->actual_length, GFP_KERNEL);
+ skb = alloc_skb(urb->actual_length, GFP_ATOMIC);
if (!skb) {
nfc_err(&phy->udev->dev, "failed to alloc memory\n");
} else {
@@ -186,7 +186,7 @@ static int pn533_usb_send_frame(struct pn533 *dev,
if (dev->protocol_type == PN533_PROTO_REQ_RESP) {
/* request for response for sent packet directly */
- rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC);
+ rc = pn533_submit_urb_for_response(phy, GFP_KERNEL);
if (rc)
goto error;
} else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) {
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 2e96b34bc936..fb667bf469c7 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -278,6 +278,7 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
return -EIO;
if (memcpy_mcsafe(buf, nsio->addr + offset, size) != 0)
return -EIO;
+ return 0;
}
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 68940356cad3..8b1fd7f1a224 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -414,7 +414,8 @@ static int pmem_attach_disk(struct device *dev,
blk_queue_logical_block_size(q, pmem_sector_size(ndns));
blk_queue_max_hw_sectors(q, UINT_MAX);
blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- blk_queue_flag_set(QUEUE_FLAG_DAX, q);
+ if (pmem->pfn_flags & PFN_MAP)
+ blk_queue_flag_set(QUEUE_FLAG_DAX, q);
q->queuedata = pmem;
disk = alloc_disk_node(0, nid);
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 21710a7460c8..bf65501e6ed6 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -100,6 +100,22 @@ static struct class *nvme_subsys_class;
static void nvme_ns_remove(struct nvme_ns *ns);
static int nvme_revalidate_disk(struct gendisk *disk);
static void nvme_put_subsystem(struct nvme_subsystem *subsys);
+static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
+ unsigned nsid);
+
+static void nvme_set_queue_dying(struct nvme_ns *ns)
+{
+ /*
+ * Revalidating a dead namespace sets capacity to 0. This will end
+ * buffered writers dirtying pages that can't be synced.
+ */
+ if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
+ return;
+ revalidate_disk(ns->disk);
+ blk_set_queue_dying(ns->queue);
+ /* Forcibly unquiesce queues to avoid blocking dispatch */
+ blk_mq_unquiesce_queue(ns->queue);
+}
static void nvme_queue_scan(struct nvme_ctrl *ctrl)
{
@@ -1044,14 +1060,17 @@ EXPORT_SYMBOL_GPL(nvme_set_queue_count);
static void nvme_enable_aen(struct nvme_ctrl *ctrl)
{
- u32 result;
+ u32 result, supported_aens = ctrl->oaes & NVME_AEN_SUPPORTED;
int status;
- status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT,
- ctrl->oaes & NVME_AEN_SUPPORTED, NULL, 0, &result);
+ if (!supported_aens)
+ return;
+
+ status = nvme_set_features(ctrl, NVME_FEAT_ASYNC_EVENT, supported_aens,
+ NULL, 0, &result);
if (status)
dev_warn(ctrl->device, "Failed to configure AEN (cfg %x)\n",
- ctrl->oaes & NVME_AEN_SUPPORTED);
+ supported_aens);
}
static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
@@ -1151,19 +1170,15 @@ static u32 nvme_passthru_start(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
static void nvme_update_formats(struct nvme_ctrl *ctrl)
{
- struct nvme_ns *ns, *next;
- LIST_HEAD(rm_list);
+ struct nvme_ns *ns;
- down_write(&ctrl->namespaces_rwsem);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
- if (ns->disk && nvme_revalidate_disk(ns->disk)) {
- list_move_tail(&ns->list, &rm_list);
- }
- }
- up_write(&ctrl->namespaces_rwsem);
+ down_read(&ctrl->namespaces_rwsem);
+ list_for_each_entry(ns, &ctrl->namespaces, list)
+ if (ns->disk && nvme_revalidate_disk(ns->disk))
+ nvme_set_queue_dying(ns);
+ up_read(&ctrl->namespaces_rwsem);
- list_for_each_entry_safe(ns, next, &rm_list, list)
- nvme_ns_remove(ns);
+ nvme_remove_invalid_namespaces(ctrl, NVME_NSID_ALL);
}
static void nvme_passthru_end(struct nvme_ctrl *ctrl, u32 effects)
@@ -1218,7 +1233,7 @@ static int nvme_user_cmd(struct nvme_ctrl *ctrl, struct nvme_ns *ns,
effects = nvme_passthru_start(ctrl, ns, cmd.opcode);
status = nvme_submit_user_cmd(ns ? ns->queue : ctrl->admin_q, &c,
(void __user *)(uintptr_t)cmd.addr, cmd.data_len,
- (void __user *)(uintptr_t)cmd.metadata, cmd.metadata,
+ (void __user *)(uintptr_t)cmd.metadata, cmd.metadata_len,
0, &cmd.result, timeout);
nvme_passthru_end(ctrl, effects);
@@ -1808,6 +1823,7 @@ static void nvme_set_queue_limits(struct nvme_ctrl *ctrl,
u32 max_segments =
(ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
+ max_segments = min_not_zero(max_segments, ctrl->max_segments);
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
}
@@ -3137,7 +3153,7 @@ static void nvme_remove_invalid_namespaces(struct nvme_ctrl *ctrl,
down_write(&ctrl->namespaces_rwsem);
list_for_each_entry_safe(ns, next, &ctrl->namespaces, list) {
- if (ns->head->ns_id > nsid)
+ if (ns->head->ns_id > nsid || test_bit(NVME_NS_DEAD, &ns->flags))
list_move_tail(&ns->list, &rm_list);
}
up_write(&ctrl->namespaces_rwsem);
@@ -3541,19 +3557,9 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl)
if (ctrl->admin_q)
blk_mq_unquiesce_queue(ctrl->admin_q);
- list_for_each_entry(ns, &ctrl->namespaces, list) {
- /*
- * Revalidating a dead namespace sets capacity to 0. This will
- * end buffered writers dirtying pages that can't be synced.
- */
- if (!ns->disk || test_and_set_bit(NVME_NS_DEAD, &ns->flags))
- continue;
- revalidate_disk(ns->disk);
- blk_set_queue_dying(ns->queue);
+ list_for_each_entry(ns, &ctrl->namespaces, list)
+ nvme_set_queue_dying(ns);
- /* Forcibly unquiesce queues to avoid blocking dispatch */
- blk_mq_unquiesce_queue(ns->queue);
- }
up_read(&ctrl->namespaces_rwsem);
}
EXPORT_SYMBOL_GPL(nvme_kill_queues);
diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c
index 903eb4545e26..f7efe5a58cc7 100644
--- a/drivers/nvme/host/fabrics.c
+++ b/drivers/nvme/host/fabrics.c
@@ -539,14 +539,18 @@ static struct nvmf_transport_ops *nvmf_lookup_transport(
/*
* For something we're not in a state to send to the device the default action
* is to busy it and retry it after the controller state is recovered. However,
- * anything marked for failfast or nvme multipath is immediately failed.
+ * if the controller is deleting or if anything is marked for failfast or
+ * nvme multipath it is immediately failed.
*
* Note: commands used to initialize the controller will be marked for failfast.
* Note: nvme cli/ioctl commands are marked for failfast.
*/
-blk_status_t nvmf_fail_nonready_command(struct request *rq)
+blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
+ struct request *rq)
{
- if (!blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
+ if (ctrl->state != NVME_CTRL_DELETING &&
+ ctrl->state != NVME_CTRL_DEAD &&
+ !blk_noretry_request(rq) && !(rq->cmd_flags & REQ_NVME_MPATH))
return BLK_STS_RESOURCE;
nvme_req(rq)->status = NVME_SC_ABORT_REQ;
return BLK_STS_IOERR;
diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h
index e1818a27aa2d..aa2fdb2a2e8f 100644
--- a/drivers/nvme/host/fabrics.h
+++ b/drivers/nvme/host/fabrics.h
@@ -162,7 +162,8 @@ void nvmf_unregister_transport(struct nvmf_transport_ops *ops);
void nvmf_free_options(struct nvmf_ctrl_options *opts);
int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size);
bool nvmf_should_reconnect(struct nvme_ctrl *ctrl);
-blk_status_t nvmf_fail_nonready_command(struct request *rq);
+blk_status_t nvmf_fail_nonready_command(struct nvme_ctrl *ctrl,
+ struct request *rq);
bool __nvmf_check_ready(struct nvme_ctrl *ctrl, struct request *rq,
bool queue_live);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b528a2f5826c..9bac912173ba 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2272,7 +2272,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ctrl->rport->remoteport.port_state != FC_OBJSTATE_ONLINE ||
!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
- return nvmf_fail_nonready_command(rq);
+ return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
ret = nvme_setup_cmd(ns, rq, sqe);
if (ret)
@@ -2790,6 +2790,9 @@ nvme_fc_delete_association(struct nvme_fc_ctrl *ctrl)
/* re-enable the admin_q so anything new can fast fail */
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ /* resume the io queues so that things will fast fail */
+ nvme_start_queues(&ctrl->ctrl);
+
nvme_fc_ctlr_inactive_on_rport(ctrl);
}
@@ -2804,9 +2807,6 @@ nvme_fc_delete_ctrl(struct nvme_ctrl *nctrl)
* waiting for io to terminate
*/
nvme_fc_delete_association(ctrl);
-
- /* resume the io queues so that things will fast fail */
- nvme_start_queues(nctrl);
}
static void
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 231807cbc849..0c4a33df3b2f 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -170,6 +170,7 @@ struct nvme_ctrl {
u64 cap;
u32 page_size;
u32 max_hw_sectors;
+ u32 max_segments;
u16 oncs;
u16 oacs;
u16 nssa;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index fc33804662e7..ddd441b1516a 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -38,6 +38,13 @@
#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
+/*
+ * These can be higher, but we need to ensure that any command doesn't
+ * require an sg allocation that needs more than a page of data.
+ */
+#define NVME_MAX_KB_SZ 4096
+#define NVME_MAX_SEGS 127
+
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);
@@ -100,6 +107,8 @@ struct nvme_dev {
struct nvme_ctrl ctrl;
struct completion ioq_wait;
+ mempool_t *iod_mempool;
+
/* shadow doorbell buffer support: */
u32 *dbbuf_dbs;
dma_addr_t dbbuf_dbs_dma_addr;
@@ -477,10 +486,7 @@ static blk_status_t nvme_init_iod(struct request *rq, struct nvme_dev *dev)
iod->use_sgl = nvme_pci_use_sgls(dev, rq);
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
- size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
- iod->use_sgl);
-
- iod->sg = kmalloc(alloc_size, GFP_ATOMIC);
+ iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
if (!iod->sg)
return BLK_STS_RESOURCE;
} else {
@@ -526,7 +532,7 @@ static void nvme_free_iod(struct nvme_dev *dev, struct request *req)
}
if (iod->sg != iod->inline_sg)
- kfree(iod->sg);
+ mempool_free(iod->sg, dev->iod_mempool);
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
@@ -2280,6 +2286,7 @@ static void nvme_pci_free_ctrl(struct nvme_ctrl *ctrl)
blk_put_queue(dev->ctrl.admin_q);
kfree(dev->queues);
free_opal_dev(dev->ctrl.opal_dev);
+ mempool_destroy(dev->iod_mempool);
kfree(dev);
}
@@ -2289,6 +2296,7 @@ static void nvme_remove_dead_ctrl(struct nvme_dev *dev, int status)
nvme_get_ctrl(&dev->ctrl);
nvme_dev_disable(dev, false);
+ nvme_kill_queues(&dev->ctrl);
if (!queue_work(nvme_wq, &dev->remove_work))
nvme_put_ctrl(&dev->ctrl);
}
@@ -2333,6 +2341,13 @@ static void nvme_reset_work(struct work_struct *work)
if (result)
goto out;
+ /*
+ * Limit the max command size to prevent iod->sg allocations going
+ * over a single page.
+ */
+ dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
+ dev->ctrl.max_segments = NVME_MAX_SEGS;
+
result = nvme_init_identify(&dev->ctrl);
if (result)
goto out;
@@ -2405,7 +2420,6 @@ static void nvme_remove_dead_ctrl_work(struct work_struct *work)
struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
struct pci_dev *pdev = to_pci_dev(dev->dev);
- nvme_kill_queues(&dev->ctrl);
if (pci_get_drvdata(pdev))
device_release_driver(&pdev->dev);
nvme_put_ctrl(&dev->ctrl);
@@ -2509,6 +2523,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
int node, result = -ENOMEM;
struct nvme_dev *dev;
unsigned long quirks = id->driver_data;
+ size_t alloc_size;
node = dev_to_node(&pdev->dev);
if (node == NUMA_NO_NODE)
@@ -2541,10 +2556,27 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
quirks |= check_vendor_combination_bug(pdev);
+ /*
+ * Double check that our mempool alloc size will cover the biggest
+ * command we support.
+ */
+ alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ,
+ NVME_MAX_SEGS, true);
+ WARN_ON_ONCE(alloc_size > PAGE_SIZE);
+
+ dev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
+ mempool_kfree,
+ (void *) alloc_size,
+ GFP_KERNEL, node);
+ if (!dev->iod_mempool) {
+ result = -ENOMEM;
+ goto release_pools;
+ }
+
result = nvme_init_ctrl(&dev->ctrl, &pdev->dev, &nvme_pci_ctrl_ops,
quirks);
if (result)
- goto release_pools;
+ goto release_mempool;
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
@@ -2553,6 +2585,8 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return 0;
+ release_mempool:
+ mempool_destroy(dev->iod_mempool);
release_pools:
nvme_release_prp_pools(dev);
unmap:
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index c9424da0d23e..66ec5985c9f3 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -560,12 +560,6 @@ static void nvme_rdma_free_queue(struct nvme_rdma_queue *queue)
if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
return;
- if (nvme_rdma_queue_idx(queue) == 0) {
- nvme_rdma_free_qe(queue->device->dev,
- &queue->ctrl->async_event_sqe,
- sizeof(struct nvme_command), DMA_TO_DEVICE);
- }
-
nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
}
@@ -698,7 +692,7 @@ static struct blk_mq_tag_set *nvme_rdma_alloc_tagset(struct nvme_ctrl *nctrl,
set = &ctrl->tag_set;
memset(set, 0, sizeof(*set));
set->ops = &nvme_rdma_mq_ops;
- set->queue_depth = nctrl->opts->queue_size;
+ set->queue_depth = nctrl->sqsize + 1;
set->reserved_tags = 1; /* fabric connect */
set->numa_node = NUMA_NO_NODE;
set->flags = BLK_MQ_F_SHOULD_MERGE;
@@ -734,11 +728,15 @@ out:
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
- nvme_rdma_stop_queue(&ctrl->queues[0]);
if (remove) {
blk_cleanup_queue(ctrl->ctrl.admin_q);
nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
}
+ if (ctrl->async_event_sqe.data) {
+ nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+ ctrl->async_event_sqe.data = NULL;
+ }
nvme_rdma_free_queue(&ctrl->queues[0]);
}
@@ -755,11 +753,16 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
+ error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+ if (error)
+ goto out_free_queue;
+
if (new) {
ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
if (IS_ERR(ctrl->ctrl.admin_tagset)) {
error = PTR_ERR(ctrl->ctrl.admin_tagset);
- goto out_free_queue;
+ goto out_free_async_qe;
}
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
@@ -795,12 +798,6 @@ static int nvme_rdma_configure_admin_queue(struct nvme_rdma_ctrl *ctrl,
if (error)
goto out_stop_queue;
- error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
- &ctrl->async_event_sqe, sizeof(struct nvme_command),
- DMA_TO_DEVICE);
- if (error)
- goto out_stop_queue;
-
return 0;
out_stop_queue:
@@ -811,6 +808,9 @@ out_cleanup_queue:
out_free_tagset:
if (new)
nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
+out_free_async_qe:
+ nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
out_free_queue:
nvme_rdma_free_queue(&ctrl->queues[0]);
return error;
@@ -819,7 +819,6 @@ out_free_queue:
static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
- nvme_rdma_stop_io_queues(ctrl);
if (remove) {
blk_cleanup_queue(ctrl->ctrl.connect_q);
nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
@@ -888,9 +887,9 @@ static void nvme_rdma_free_ctrl(struct nvme_ctrl *nctrl)
list_del(&ctrl->list);
mutex_unlock(&nvme_rdma_ctrl_mutex);
- kfree(ctrl->queues);
nvmf_free_options(nctrl->opts);
free_ctrl:
+ kfree(ctrl->queues);
kfree(ctrl);
}
@@ -949,6 +948,7 @@ static void nvme_rdma_reconnect_ctrl_work(struct work_struct *work)
return;
destroy_admin:
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_rdma_destroy_admin_queue(ctrl, false);
requeue:
dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
@@ -965,12 +965,14 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work)
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
blk_mq_tagset_busy_iter(&ctrl->tag_set,
nvme_cancel_request, &ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, false);
}
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_cancel_request, &ctrl->ctrl);
nvme_rdma_destroy_admin_queue(ctrl, false);
@@ -1637,7 +1639,7 @@ static blk_status_t nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
WARN_ON_ONCE(rq->tag < 0);
if (!nvmf_check_ready(&queue->ctrl->ctrl, rq, queue_ready))
- return nvmf_fail_nonready_command(rq);
+ return nvmf_fail_nonready_command(&queue->ctrl->ctrl, rq);
dev = queue->device->dev;
ib_dma_sync_single_for_cpu(dev, sqe->dma,
@@ -1736,6 +1738,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
{
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
blk_mq_tagset_busy_iter(&ctrl->tag_set,
nvme_cancel_request, &ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, shutdown);
@@ -1747,6 +1750,7 @@ static void nvme_rdma_shutdown_ctrl(struct nvme_rdma_ctrl *ctrl, bool shutdown)
nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_cancel_request, &ctrl->ctrl);
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
@@ -1932,11 +1936,6 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
goto out_free_ctrl;
}
- ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
- 0 /* no quirks, we're perfect! */);
- if (ret)
- goto out_free_ctrl;
-
INIT_DELAYED_WORK(&ctrl->reconnect_work,
nvme_rdma_reconnect_ctrl_work);
INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
@@ -1950,14 +1949,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
GFP_KERNEL);
if (!ctrl->queues)
- goto out_uninit_ctrl;
+ goto out_free_ctrl;
+
+ ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
+ 0 /* no quirks, we're perfect! */);
+ if (ret)
+ goto out_kfree_queues;
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
WARN_ON_ONCE(!changed);
ret = nvme_rdma_configure_admin_queue(ctrl, true);
if (ret)
- goto out_kfree_queues;
+ goto out_uninit_ctrl;
/* sanity check icdoff */
if (ctrl->ctrl.icdoff) {
@@ -1974,20 +1978,19 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
goto out_remove_admin_queue;
}
- if (opts->queue_size > ctrl->ctrl.maxcmd) {
- /* warn if maxcmd is lower than queue_size */
- dev_warn(ctrl->ctrl.device,
- "queue_size %zu > ctrl maxcmd %u, clamping down\n",
- opts->queue_size, ctrl->ctrl.maxcmd);
- opts->queue_size = ctrl->ctrl.maxcmd;
- }
-
+ /* only warn if argument is too large here, will clamp later */
if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
- /* warn if sqsize is lower than queue_size */
dev_warn(ctrl->ctrl.device,
"queue_size %zu > ctrl sqsize %u, clamping down\n",
opts->queue_size, ctrl->ctrl.sqsize + 1);
- opts->queue_size = ctrl->ctrl.sqsize + 1;
+ }
+
+ /* warn if maxcmd is lower than sqsize+1 */
+ if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
+ dev_warn(ctrl->ctrl.device,
+ "sqsize %u > ctrl maxcmd %u, clamping down\n",
+ ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
+ ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
}
if (opts->nr_io_queues) {
@@ -2013,15 +2016,16 @@ static struct nvme_ctrl *nvme_rdma_create_ctrl(struct device *dev,
return &ctrl->ctrl;
out_remove_admin_queue:
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_rdma_destroy_admin_queue(ctrl, true);
-out_kfree_queues:
- kfree(ctrl->queues);
out_uninit_ctrl:
nvme_uninit_ctrl(&ctrl->ctrl);
nvme_put_ctrl(&ctrl->ctrl);
if (ret > 0)
ret = -EIO;
return ERR_PTR(ret);
+out_kfree_queues:
+ kfree(ctrl->queues);
out_free_ctrl:
kfree(ctrl);
return ERR_PTR(ret);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index d3f3b3ec4d1a..ebea1373d1b7 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -282,6 +282,7 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
{
struct nvmet_ns *ns = to_nvmet_ns(item);
struct nvmet_subsys *subsys = ns->subsys;
+ size_t len;
int ret;
mutex_lock(&subsys->lock);
@@ -289,10 +290,14 @@ static ssize_t nvmet_ns_device_path_store(struct config_item *item,
if (ns->enabled)
goto out_unlock;
- kfree(ns->device_path);
+ ret = -EINVAL;
+ len = strcspn(page, "\n");
+ if (!len)
+ goto out_unlock;
+ kfree(ns->device_path);
ret = -ENOMEM;
- ns->device_path = kstrndup(page, strcspn(page, "\n"), GFP_KERNEL);
+ ns->device_path = kstrndup(page, len, GFP_KERNEL);
if (!ns->device_path)
goto out_unlock;
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index a03da764ecae..9838103f2d62 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -339,7 +339,7 @@ int nvmet_ns_enable(struct nvmet_ns *ns)
goto out_unlock;
ret = nvmet_bdev_ns_enable(ns);
- if (ret)
+ if (ret == -ENOTBLK)
ret = nvmet_file_ns_enable(ns);
if (ret)
goto out_unlock;
@@ -686,6 +686,14 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
}
ctrl->csts = NVME_CSTS_RDY;
+
+ /*
+ * Controllers that are not yet enabled should not really enforce the
+ * keep alive timeout, but we still want to track a timeout and cleanup
+ * in case a host died before it enabled the controller. Hence, simply
+ * reset the keep alive timer when the controller is enabled.
+ */
+ mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
}
static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 408279cb6f2c..29b4b236afd8 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -58,8 +58,8 @@ struct nvmet_fc_ls_iod {
struct work_struct work;
} __aligned(sizeof(unsigned long long));
+/* desired maximum for a single sequence - if sg list allows it */
#define NVMET_FC_MAX_SEQ_LENGTH (256 * 1024)
-#define NVMET_FC_MAX_XFR_SGENTS (NVMET_FC_MAX_SEQ_LENGTH / PAGE_SIZE)
enum nvmet_fcp_datadir {
NVMET_FCP_NODATA,
@@ -74,6 +74,7 @@ struct nvmet_fc_fcp_iod {
struct nvme_fc_cmd_iu cmdiubuf;
struct nvme_fc_ersp_iu rspiubuf;
dma_addr_t rspdma;
+ struct scatterlist *next_sg;
struct scatterlist *data_sg;
int data_sg_cnt;
u32 offset;
@@ -1025,8 +1026,7 @@ nvmet_fc_register_targetport(struct nvmet_fc_port_info *pinfo,
INIT_LIST_HEAD(&newrec->assoc_list);
kref_init(&newrec->ref);
ida_init(&newrec->assoc_cnt);
- newrec->max_sg_cnt = min_t(u32, NVMET_FC_MAX_XFR_SGENTS,
- template->max_sgl_segments);
+ newrec->max_sg_cnt = template->max_sgl_segments;
ret = nvmet_fc_alloc_ls_iodlist(newrec);
if (ret) {
@@ -1722,6 +1722,7 @@ nvmet_fc_alloc_tgt_pgs(struct nvmet_fc_fcp_iod *fod)
((fod->io_dir == NVMET_FCP_WRITE) ?
DMA_FROM_DEVICE : DMA_TO_DEVICE));
/* note: write from initiator perspective */
+ fod->next_sg = fod->data_sg;
return 0;
@@ -1866,24 +1867,49 @@ nvmet_fc_transfer_fcp_data(struct nvmet_fc_tgtport *tgtport,
struct nvmet_fc_fcp_iod *fod, u8 op)
{
struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq;
+ struct scatterlist *sg = fod->next_sg;
unsigned long flags;
- u32 tlen;
+ u32 remaininglen = fod->req.transfer_len - fod->offset;
+ u32 tlen = 0;
int ret;
fcpreq->op = op;
fcpreq->offset = fod->offset;
fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC;
- tlen = min_t(u32, tgtport->max_sg_cnt * PAGE_SIZE,
- (fod->req.transfer_len - fod->offset));
+ /*
+ * for next sequence:
+ * break at a sg element boundary
+ * attempt to keep sequence length capped at
+ * NVMET_FC_MAX_SEQ_LENGTH but allow sequence to
+ * be longer if a single sg element is larger
+ * than that amount. This is done to avoid creating
+ * a new sg list to use for the tgtport api.
+ */
+ fcpreq->sg = sg;
+ fcpreq->sg_cnt = 0;
+ while (tlen < remaininglen &&
+ fcpreq->sg_cnt < tgtport->max_sg_cnt &&
+ tlen + sg_dma_len(sg) < NVMET_FC_MAX_SEQ_LENGTH) {
+ fcpreq->sg_cnt++;
+ tlen += sg_dma_len(sg);
+ sg = sg_next(sg);
+ }
+ if (tlen < remaininglen && fcpreq->sg_cnt == 0) {
+ fcpreq->sg_cnt++;
+ tlen += min_t(u32, sg_dma_len(sg), remaininglen);
+ sg = sg_next(sg);
+ }
+ if (tlen < remaininglen)
+ fod->next_sg = sg;
+ else
+ fod->next_sg = NULL;
+
fcpreq->transfer_length = tlen;
fcpreq->transferred_length = 0;
fcpreq->fcp_error = 0;
fcpreq->rsplen = 0;
- fcpreq->sg = &fod->data_sg[fod->offset / PAGE_SIZE];
- fcpreq->sg_cnt = DIV_ROUND_UP(tlen, PAGE_SIZE);
-
/*
* If the last READDATA request: check if LLDD supports
* combined xfr with response.
diff --git a/drivers/nvme/target/loop.c b/drivers/nvme/target/loop.c
index d8d91f04bd7e..ae7586b8be07 100644
--- a/drivers/nvme/target/loop.c
+++ b/drivers/nvme/target/loop.c
@@ -162,7 +162,7 @@ static blk_status_t nvme_loop_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_status_t ret;
if (!nvmf_check_ready(&queue->ctrl->ctrl, req, queue_ready))
- return nvmf_fail_nonready_command(req);
+ return nvmf_fail_nonready_command(&queue->ctrl->ctrl, req);
ret = nvme_setup_cmd(ns, req, &iod->cmd);
if (ret)
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index b5b0cdc21d01..514d1dfc5630 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -936,6 +936,10 @@ struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
return cell;
}
+ /* NULL cell_id only allowed for device tree; invalid otherwise */
+ if (!cell_id)
+ return ERR_PTR(-EINVAL);
+
return nvmem_cell_get_from_list(cell_id);
}
EXPORT_SYMBOL_GPL(nvmem_cell_get);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 848f549164cd..466e3c8582f0 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -102,7 +102,7 @@ static u32 phandle_cache_mask;
* - the phandle lookup overhead reduction provided by the cache
* will likely be less
*/
-static void of_populate_phandle_cache(void)
+void of_populate_phandle_cache(void)
{
unsigned long flags;
u32 cache_entries;
@@ -134,8 +134,7 @@ out:
raw_spin_unlock_irqrestore(&devtree_lock, flags);
}
-#ifndef CONFIG_MODULES
-static int __init of_free_phandle_cache(void)
+int of_free_phandle_cache(void)
{
unsigned long flags;
@@ -148,6 +147,7 @@ static int __init of_free_phandle_cache(void)
return 0;
}
+#if !defined(CONFIG_MODULES)
late_initcall_sync(of_free_phandle_cache);
#endif
diff --git a/drivers/of/of_private.h b/drivers/of/of_private.h
index 891d780c076a..216175d11d3d 100644
--- a/drivers/of/of_private.h
+++ b/drivers/of/of_private.h
@@ -79,6 +79,8 @@ int of_resolve_phandles(struct device_node *tree);
#if defined(CONFIG_OF_OVERLAY)
void of_overlay_mutex_lock(void);
void of_overlay_mutex_unlock(void);
+int of_free_phandle_cache(void);
+void of_populate_phandle_cache(void);
#else
static inline void of_overlay_mutex_lock(void) {};
static inline void of_overlay_mutex_unlock(void) {};
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index 7baa53e5b1d7..eda57ef12fd0 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -804,6 +804,8 @@ static int of_overlay_apply(const void *fdt, struct device_node *tree,
goto err_free_overlay_changeset;
}
+ of_populate_phandle_cache();
+
ret = __of_changeset_apply_notify(&ovcs->cset);
if (ret)
pr_err("overlay changeset entry notify error %d\n", ret);
@@ -1046,8 +1048,17 @@ int of_overlay_remove(int *ovcs_id)
list_del(&ovcs->ovcs_list);
+ /*
+ * Disable phandle cache. Avoids race condition that would arise
+ * from removing cache entry when the associated node is deleted.
+ */
+ of_free_phandle_cache();
+
ret_apply = 0;
ret = __of_changeset_revert_entries(&ovcs->cset, &ret_apply);
+
+ of_populate_phandle_cache();
+
if (ret) {
if (ret_apply)
devicetree_state_flags |= DTSF_REVERT_FAIL;
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index ab2f3fead6b1..31ff03dbeb83 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -598,7 +598,7 @@ static int _generic_set_opp_regulator(const struct opp_table *opp_table,
}
/* Scaling up? Scale voltage before frequency */
- if (freq > old_freq) {
+ if (freq >= old_freq) {
ret = _set_opp_voltage(dev, reg, new_supply);
if (ret)
goto restore_voltage;
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 535201984b8b..1b2cfe51e8d7 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -28,10 +28,10 @@ obj-$(CONFIG_PCI_PF_STUB) += pci-pf-stub.o
obj-$(CONFIG_PCI_ECAM) += ecam.o
obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += xen-pcifront.o
-obj-y += controller/
-obj-y += switch/
-
# Endpoint library must be initialized before its users
obj-$(CONFIG_PCI_ENDPOINT) += endpoint/
+obj-y += controller/
+obj-y += switch/
+
ccflags-$(CONFIG_PCI_DEBUG) := -DDEBUG
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index 18fa09b3ac8f..cc9fa02d32a0 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -96,7 +96,6 @@ config PCI_HOST_GENERIC
depends on OF
select PCI_HOST_COMMON
select IRQ_DOMAIN
- select PCI_DOMAINS
help
Say Y here if you want to support a simple generic PCI host
controller, such as the one emulated by kvmtool.
@@ -138,7 +137,6 @@ config PCI_VERSATILE
config PCIE_IPROC
tristate
- select PCI_DOMAINS
help
This enables the iProc PCIe core controller support for Broadcom's
iProc family of SoCs. An appropriate bus interface driver needs
@@ -176,7 +174,6 @@ config PCIE_IPROC_MSI
config PCIE_ALTERA
bool "Altera PCIe controller"
depends on ARM || NIOS2 || COMPILE_TEST
- select PCI_DOMAINS
help
Say Y here if you want to enable PCIe controller support on Altera
FPGA.
diff --git a/drivers/pci/controller/dwc/Kconfig b/drivers/pci/controller/dwc/Kconfig
index 16f52c626b4b..91b0194240a5 100644
--- a/drivers/pci/controller/dwc/Kconfig
+++ b/drivers/pci/controller/dwc/Kconfig
@@ -58,7 +58,6 @@ config PCIE_DW_PLAT_HOST
depends on PCI && PCI_MSI_IRQ_DOMAIN
select PCIE_DW_HOST
select PCIE_DW_PLAT
- default y
help
Enables support for the PCIe controller in the Designware IP to
work in host mode. There are two instances of PCIe controller in
diff --git a/drivers/pci/controller/dwc/pcie-designware-host.c b/drivers/pci/controller/dwc/pcie-designware-host.c
index 781aa03aeede..29a05759a294 100644
--- a/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -363,7 +363,8 @@ int dw_pcie_host_init(struct pcie_port *pp)
resource_list_for_each_entry_safe(win, tmp, &bridge->windows) {
switch (resource_type(win->res)) {
case IORESOURCE_IO:
- ret = pci_remap_iospace(win->res, pp->io_base);
+ ret = devm_pci_remap_iospace(dev, win->res,
+ pp->io_base);
if (ret) {
dev_warn(dev, "Error %d: failed to map resource %pR\n",
ret, win->res);
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index d3172d5d3d35..0fae816fba39 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -849,7 +849,7 @@ static int advk_pcie_parse_request_of_pci_ranges(struct advk_pcie *pcie)
0, 0xF8000000, 0,
lower_32_bits(res->start),
OB_PCIE_IO);
- err = pci_remap_iospace(res, iobase);
+ err = devm_pci_remap_iospace(dev, res, iobase);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
diff --git a/drivers/pci/controller/pci-ftpci100.c b/drivers/pci/controller/pci-ftpci100.c
index a1ebe9ed441f..bf5ece5d9291 100644
--- a/drivers/pci/controller/pci-ftpci100.c
+++ b/drivers/pci/controller/pci-ftpci100.c
@@ -355,11 +355,13 @@ static int faraday_pci_setup_cascaded_irq(struct faraday_pci *p)
irq = of_irq_get(intc, 0);
if (irq <= 0) {
dev_err(p->dev, "failed to get parent IRQ\n");
+ of_node_put(intc);
return irq ?: -EINVAL;
}
p->irqdomain = irq_domain_add_linear(intc, PCI_NUM_INTX,
&faraday_pci_irqdomain_ops, p);
+ of_node_put(intc);
if (!p->irqdomain) {
dev_err(p->dev, "failed to create Gemini PCI IRQ domain\n");
return -EINVAL;
@@ -501,7 +503,7 @@ static int faraday_pci_probe(struct platform_device *pdev)
dev_err(dev, "illegal IO mem size\n");
return -EINVAL;
}
- ret = pci_remap_iospace(io, io_base);
+ ret = devm_pci_remap_iospace(dev, io, io_base);
if (ret) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
ret, io);
diff --git a/drivers/pci/controller/pci-hyperv.c b/drivers/pci/controller/pci-hyperv.c
index fadc305533d9..d4d4a55f09f8 100644
--- a/drivers/pci/controller/pci-hyperv.c
+++ b/drivers/pci/controller/pci-hyperv.c
@@ -1074,6 +1074,7 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
struct pci_bus *pbus;
struct pci_dev *pdev;
struct cpumask *dest;
+ unsigned long flags;
struct compose_comp_ctxt comp;
struct tran_int_desc *int_desc;
struct {
@@ -1165,14 +1166,15 @@ static void hv_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
* the channel callback directly when channel->target_cpu is
* the current CPU. When the higher level interrupt code
* calls us with interrupt enabled, let's add the
- * local_bh_disable()/enable() to avoid race.
+ * local_irq_save()/restore() to avoid race:
+ * hv_pci_onchannelcallback() can also run in tasklet.
*/
- local_bh_disable();
+ local_irq_save(flags);
if (hbus->hdev->channel->target_cpu == smp_processor_id())
hv_pci_onchannelcallback(hbus);
- local_bh_enable();
+ local_irq_restore(flags);
if (hpdev->state == hv_pcichild_ejecting) {
dev_err_once(&hbus->hdev->device,
diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c
index 68b8bfbdb867..d219404bad92 100644
--- a/drivers/pci/controller/pci-v3-semi.c
+++ b/drivers/pci/controller/pci-v3-semi.c
@@ -537,7 +537,7 @@ static int v3_pci_setup_resource(struct v3_pci *v3,
v3->io_bus_addr = io->start - win->offset;
dev_dbg(dev, "I/O window %pR, bus addr %pap\n",
io, &v3->io_bus_addr);
- ret = pci_remap_iospace(io, io_base);
+ ret = devm_pci_remap_iospace(dev, io, io_base);
if (ret) {
dev_warn(dev,
"error %d: failed to map resource %pR\n",
diff --git a/drivers/pci/controller/pci-versatile.c b/drivers/pci/controller/pci-versatile.c
index 994f32061b32..f59ad2728c0b 100644
--- a/drivers/pci/controller/pci-versatile.c
+++ b/drivers/pci/controller/pci-versatile.c
@@ -82,7 +82,7 @@ static int versatile_pci_parse_request_of_pci_ranges(struct device *dev,
switch (resource_type(res)) {
case IORESOURCE_IO:
- err = pci_remap_iospace(res, iobase);
+ err = devm_pci_remap_iospace(dev, res, iobase);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
diff --git a/drivers/pci/controller/pci-xgene.c b/drivers/pci/controller/pci-xgene.c
index d854d67e873c..ffda3e8b4742 100644
--- a/drivers/pci/controller/pci-xgene.c
+++ b/drivers/pci/controller/pci-xgene.c
@@ -423,7 +423,7 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port,
case IORESOURCE_IO:
xgene_pcie_setup_ob_reg(port, res, OMR3BARL, io_base,
res->start - window->offset);
- ret = pci_remap_iospace(res, io_base);
+ ret = devm_pci_remap_iospace(dev, res, io_base);
if (ret < 0)
return ret;
break;
diff --git a/drivers/pci/controller/pcie-mediatek.c b/drivers/pci/controller/pcie-mediatek.c
index 0baabe30858f..861dda69f366 100644
--- a/drivers/pci/controller/pcie-mediatek.c
+++ b/drivers/pci/controller/pcie-mediatek.c
@@ -1109,7 +1109,7 @@ static int mtk_pcie_request_resources(struct mtk_pcie *pcie)
if (err < 0)
return err;
- pci_remap_iospace(&pcie->pio, pcie->io.start);
+ devm_pci_remap_iospace(dev, &pcie->pio, pcie->io.start);
return 0;
}
diff --git a/drivers/pci/controller/pcie-rcar.c b/drivers/pci/controller/pcie-rcar.c
index 874d75c9ee4a..c8febb009454 100644
--- a/drivers/pci/controller/pcie-rcar.c
+++ b/drivers/pci/controller/pcie-rcar.c
@@ -680,7 +680,11 @@ static int rcar_pcie_phy_init_gen3(struct rcar_pcie *pcie)
if (err)
return err;
- return phy_power_on(pcie->phy);
+ err = phy_power_on(pcie->phy);
+ if (err)
+ phy_exit(pcie->phy);
+
+ return err;
}
static int rcar_msi_alloc(struct rcar_msi *chip)
@@ -1165,7 +1169,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
if (rcar_pcie_hw_init(pcie)) {
dev_info(dev, "PCIe link down\n");
err = -ENODEV;
- goto err_clk_disable;
+ goto err_phy_shutdown;
}
data = rcar_pci_read_reg(pcie, MACSR);
@@ -1177,7 +1181,7 @@ static int rcar_pcie_probe(struct platform_device *pdev)
dev_err(dev,
"failed to enable MSI support: %d\n",
err);
- goto err_clk_disable;
+ goto err_phy_shutdown;
}
}
@@ -1191,6 +1195,12 @@ err_msi_teardown:
if (IS_ENABLED(CONFIG_PCI_MSI))
rcar_pcie_teardown_msi(pcie);
+err_phy_shutdown:
+ if (pcie->phy) {
+ phy_power_off(pcie->phy);
+ phy_exit(pcie->phy);
+ }
+
err_clk_disable:
clk_disable_unprepare(pcie->bus_clk);
diff --git a/drivers/pci/controller/pcie-xilinx-nwl.c b/drivers/pci/controller/pcie-xilinx-nwl.c
index 6a4bbb5b3de0..fb32840ce8e6 100644
--- a/drivers/pci/controller/pcie-xilinx-nwl.c
+++ b/drivers/pci/controller/pcie-xilinx-nwl.c
@@ -559,7 +559,7 @@ static int nwl_pcie_init_irq_domain(struct nwl_pcie *pcie)
PCI_NUM_INTX,
&legacy_domain_ops,
pcie);
-
+ of_node_put(legacy_intc_node);
if (!pcie->legacy_irq_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
diff --git a/drivers/pci/controller/pcie-xilinx.c b/drivers/pci/controller/pcie-xilinx.c
index b110a3a814e3..7b1389d8e2a5 100644
--- a/drivers/pci/controller/pcie-xilinx.c
+++ b/drivers/pci/controller/pcie-xilinx.c
@@ -509,6 +509,7 @@ static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
port->leg_domain = irq_domain_add_linear(pcie_intc_node, PCI_NUM_INTX,
&intx_domain_ops,
port);
+ of_node_put(pcie_intc_node);
if (!port->leg_domain) {
dev_err(dev, "Failed to get a INTx IRQ domain\n");
return -ENODEV;
diff --git a/drivers/pci/endpoint/pci-epf-core.c b/drivers/pci/endpoint/pci-epf-core.c
index 523a8cab3bfb..825fa24427a3 100644
--- a/drivers/pci/endpoint/pci-epf-core.c
+++ b/drivers/pci/endpoint/pci-epf-core.c
@@ -137,6 +137,20 @@ void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar)
}
EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
+static void pci_epf_remove_cfs(struct pci_epf_driver *driver)
+{
+ struct config_group *group, *tmp;
+
+ if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
+ return;
+
+ mutex_lock(&pci_epf_mutex);
+ list_for_each_entry_safe(group, tmp, &driver->epf_group, group_entry)
+ pci_ep_cfs_remove_epf_group(group);
+ list_del(&driver->epf_group);
+ mutex_unlock(&pci_epf_mutex);
+}
+
/**
* pci_epf_unregister_driver() - unregister the PCI EPF driver
* @driver: the PCI EPF driver that has to be unregistered
@@ -145,17 +159,38 @@ EXPORT_SYMBOL_GPL(pci_epf_alloc_space);
*/
void pci_epf_unregister_driver(struct pci_epf_driver *driver)
{
- struct config_group *group;
-
- mutex_lock(&pci_epf_mutex);
- list_for_each_entry(group, &driver->epf_group, group_entry)
- pci_ep_cfs_remove_epf_group(group);
- list_del(&driver->epf_group);
- mutex_unlock(&pci_epf_mutex);
+ pci_epf_remove_cfs(driver);
driver_unregister(&driver->driver);
}
EXPORT_SYMBOL_GPL(pci_epf_unregister_driver);
+static int pci_epf_add_cfs(struct pci_epf_driver *driver)
+{
+ struct config_group *group;
+ const struct pci_epf_device_id *id;
+
+ if (!IS_ENABLED(CONFIG_PCI_ENDPOINT_CONFIGFS))
+ return 0;
+
+ INIT_LIST_HEAD(&driver->epf_group);
+
+ id = driver->id_table;
+ while (id->name[0]) {
+ group = pci_ep_cfs_add_epf_group(id->name);
+ if (IS_ERR(group)) {
+ pci_epf_remove_cfs(driver);
+ return PTR_ERR(group);
+ }
+
+ mutex_lock(&pci_epf_mutex);
+ list_add_tail(&group->group_entry, &driver->epf_group);
+ mutex_unlock(&pci_epf_mutex);
+ id++;
+ }
+
+ return 0;
+}
+
/**
* __pci_epf_register_driver() - register a new PCI EPF driver
* @driver: structure representing PCI EPF driver
@@ -167,8 +202,6 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
struct module *owner)
{
int ret;
- struct config_group *group;
- const struct pci_epf_device_id *id;
if (!driver->ops)
return -EINVAL;
@@ -183,16 +216,7 @@ int __pci_epf_register_driver(struct pci_epf_driver *driver,
if (ret)
return ret;
- INIT_LIST_HEAD(&driver->epf_group);
-
- id = driver->id_table;
- while (id->name[0]) {
- group = pci_ep_cfs_add_epf_group(id->name);
- mutex_lock(&pci_epf_mutex);
- list_add_tail(&group->group_entry, &driver->epf_group);
- mutex_unlock(&pci_epf_mutex);
- id++;
- }
+ pci_epf_add_cfs(driver);
return 0;
}
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index 3979f89b250a..5bd6c1573295 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -7,7 +7,6 @@
* All rights reserved.
*
* Send feedback to <kristen.c.accardi@intel.com>
- *
*/
#include <linux/module.h>
@@ -87,8 +86,17 @@ int acpi_get_hp_hw_control_from_firmware(struct pci_dev *pdev)
return 0;
/* If _OSC exists, we should not evaluate OSHP */
+
+ /*
+ * If there's no ACPI host bridge (i.e., ACPI support is compiled
+ * into the kernel but the hardware platform doesn't support ACPI),
+ * there's nothing to do here.
+ */
host = pci_find_host_bridge(pdev->bus);
root = acpi_pci_find_root(ACPI_HANDLE(&host->dev));
+ if (!root)
+ return 0;
+
if (root->osc_support_set)
goto no_control;
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index d0d73dbbd5ca..0f04ae648cf1 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -575,6 +575,22 @@ void pci_iov_release(struct pci_dev *dev)
}
/**
+ * pci_iov_remove - clean up SR-IOV state after PF driver is detached
+ * @dev: the PCI device
+ */
+void pci_iov_remove(struct pci_dev *dev)
+{
+ struct pci_sriov *iov = dev->sriov;
+
+ if (!dev->is_physfn)
+ return;
+
+ iov->driver_max_VFs = iov->total_VFs;
+ if (iov->num_VFs)
+ pci_warn(dev, "driver left SR-IOV enabled after remove\n");
+}
+
+/**
* pci_iov_update_resource - update a VF BAR
* @dev: the PCI device
* @resno: the resource number
diff --git a/drivers/pci/of.c b/drivers/pci/of.c
index d088c9147f10..69a60d6ebd73 100644
--- a/drivers/pci/of.c
+++ b/drivers/pci/of.c
@@ -612,7 +612,7 @@ int pci_parse_request_of_pci_ranges(struct device *dev,
switch (resource_type(res)) {
case IORESOURCE_IO:
- err = pci_remap_iospace(res, iobase);
+ err = devm_pci_remap_iospace(dev, res, iobase);
if (err) {
dev_warn(dev, "error %d: failed to map resource %pR\n",
err, res);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 65113b6eed14..89ee6a2b6eb8 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -629,6 +629,18 @@ static bool acpi_pci_need_resume(struct pci_dev *dev)
{
struct acpi_device *adev = ACPI_COMPANION(&dev->dev);
+ /*
+ * In some cases (eg. Samsung 305V4A) leaving a bridge in suspend over
+ * system-wide suspend/resume confuses the platform firmware, so avoid
+ * doing that, unless the bridge has a driver that should take care of
+ * the PM handling. According to Section 16.1.6 of ACPI 6.2, endpoint
+ * devices are expected to be in D3 before invoking the S3 entry path
+ * from the firmware, so they should not be affected by this issue.
+ */
+ if (pci_is_bridge(dev) && !dev->driver &&
+ acpi_target_system_state() != ACPI_STATE_S0)
+ return true;
+
if (!adev || !acpi_device_power_manageable(adev))
return false;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index c125d53033c6..6792292b5fc7 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -445,6 +445,7 @@ static int pci_device_remove(struct device *dev)
}
pcibios_free_irq(pci_dev);
pci_dev->driver = NULL;
+ pci_iov_remove(pci_dev);
}
/* Undo the runtime PM settings in local_pci_probe() */
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 97acba712e4e..316496e99da9 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3579,6 +3579,44 @@ void pci_unmap_iospace(struct resource *res)
}
EXPORT_SYMBOL(pci_unmap_iospace);
+static void devm_pci_unmap_iospace(struct device *dev, void *ptr)
+{
+ struct resource **res = ptr;
+
+ pci_unmap_iospace(*res);
+}
+
+/**
+ * devm_pci_remap_iospace - Managed pci_remap_iospace()
+ * @dev: Generic device to remap IO address for
+ * @res: Resource describing the I/O space
+ * @phys_addr: physical address of range to be mapped
+ *
+ * Managed pci_remap_iospace(). Map is automatically unmapped on driver
+ * detach.
+ */
+int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
+ phys_addr_t phys_addr)
+{
+ const struct resource **ptr;
+ int error;
+
+ ptr = devres_alloc(devm_pci_unmap_iospace, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ error = pci_remap_iospace(res, phys_addr);
+ if (error) {
+ devres_free(ptr);
+ } else {
+ *ptr = res;
+ devres_add(dev, ptr);
+ }
+
+ return error;
+}
+EXPORT_SYMBOL(devm_pci_remap_iospace);
+
/**
* devm_pci_remap_cfgspace - Managed pci_remap_cfgspace()
* @dev: Generic device to remap IO address for
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index c358e7a07f3f..882f1f9596df 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -311,6 +311,7 @@ static inline void pci_restore_ats_state(struct pci_dev *dev)
#ifdef CONFIG_PCI_IOV
int pci_iov_init(struct pci_dev *dev);
void pci_iov_release(struct pci_dev *dev);
+void pci_iov_remove(struct pci_dev *dev);
void pci_iov_update_resource(struct pci_dev *dev, int resno);
resource_size_t pci_sriov_resource_alignment(struct pci_dev *dev, int resno);
void pci_restore_iov_state(struct pci_dev *dev);
@@ -325,6 +326,9 @@ static inline void pci_iov_release(struct pci_dev *dev)
{
}
+static inline void pci_iov_remove(struct pci_dev *dev)
+{
+}
static inline void pci_restore_iov_state(struct pci_dev *dev)
{
}
diff --git a/drivers/pci/pcie/err.c b/drivers/pci/pcie/err.c
index f7ce0cb0b0b7..f02e334beb45 100644
--- a/drivers/pci/pcie/err.c
+++ b/drivers/pci/pcie/err.c
@@ -295,6 +295,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
parent = udev->subordinate;
pci_lock_rescan_remove();
+ pci_dev_get(dev);
list_for_each_entry_safe_reverse(pdev, temp, &parent->devices,
bus_list) {
pci_dev_get(pdev);
@@ -328,6 +329,7 @@ void pcie_do_fatal_recovery(struct pci_dev *dev, u32 service)
pci_info(dev, "Device recovery from fatal error failed\n");
}
+ pci_dev_put(dev);
pci_unlock_rescan_remove();
}
diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c
index 6bdb1dad805f..0e31f1392a53 100644
--- a/drivers/perf/xgene_pmu.c
+++ b/drivers/perf/xgene_pmu.c
@@ -1463,7 +1463,7 @@ static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id)
case PMU_TYPE_IOB:
return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id);
case PMU_TYPE_IOB_SLOW:
- return devm_kasprintf(dev, GFP_KERNEL, "iob-slow%d", id);
+ return devm_kasprintf(dev, GFP_KERNEL, "iob_slow%d", id);
case PMU_TYPE_MCB:
return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id);
case PMU_TYPE_MC:
diff --git a/drivers/phy/broadcom/phy-brcm-usb-init.c b/drivers/phy/broadcom/phy-brcm-usb-init.c
index 1b7febc43da9..29d2c3b1913a 100644
--- a/drivers/phy/broadcom/phy-brcm-usb-init.c
+++ b/drivers/phy/broadcom/phy-brcm-usb-init.c
@@ -962,6 +962,10 @@ void brcm_usb_init_xhci(struct brcm_usb_init_params *params)
{
void __iomem *ctrl = params->ctrl_regs;
+ USB_CTRL_UNSET(ctrl, USB30_PCTL, PHY3_IDDQ_OVERRIDE);
+ /* 1 millisecond - for USB clocks to settle down */
+ usleep_range(1000, 2000);
+
if (BRCM_ID(params->family_id) == 0x7366) {
/*
* The PHY3_SOFT_RESETB bits default to the wrong state.
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index 23705e1a0023..0075fb0bef8c 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -182,13 +182,13 @@ static void phy_mdm6600_status(struct work_struct *work)
ddata = container_of(work, struct phy_mdm6600, status_work.work);
dev = ddata->dev;
- error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_CMD_LINES,
+ error = gpiod_get_array_value_cansleep(PHY_MDM6600_NR_STATUS_LINES,
ddata->status_gpios->desc,
values);
if (error)
return;
- for (i = 0; i < PHY_MDM6600_NR_CMD_LINES; i++) {
+ for (i = 0; i < PHY_MDM6600_NR_STATUS_LINES; i++) {
val |= values[i] << i;
dev_dbg(ddata->dev, "XXX %s: i: %i values[i]: %i val: %i\n",
__func__, i, values[i], val);
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
index 76243caa08c6..b5c880b50bb3 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.c
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -333,7 +333,7 @@ static int owl_pin_config_set(struct pinctrl_dev *pctrldev,
unsigned long flags;
unsigned int param;
u32 reg, bit, width, arg;
- int ret, i;
+ int ret = 0, i;
info = &pctrl->soc->padinfo[pin];
diff --git a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
index 35c17653c694..87618a4e90e4 100644
--- a/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
+++ b/drivers/pinctrl/bcm/pinctrl-nsp-mux.c
@@ -460,8 +460,8 @@ static int nsp_pinmux_enable(struct pinctrl_dev *pctrl_dev,
const struct nsp_pin_function *func;
const struct nsp_pin_group *grp;
- if (grp_select > pinctrl->num_groups ||
- func_select > pinctrl->num_functions)
+ if (grp_select >= pinctrl->num_groups ||
+ func_select >= pinctrl->num_functions)
return -EINVAL;
func = &pinctrl->functions[func_select];
@@ -577,6 +577,8 @@ static int nsp_pinmux_probe(struct platform_device *pdev)
return PTR_ERR(pinctrl->base0);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res)
+ return -EINVAL;
pinctrl->base1 = devm_ioremap_nocache(&pdev->dev, res->start,
resource_size(res));
if (!pinctrl->base1) {
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index b601039d6c69..c4aa411f5935 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -101,10 +101,11 @@ struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
}
static int dt_to_map_one_config(struct pinctrl *p,
- struct pinctrl_dev *pctldev,
+ struct pinctrl_dev *hog_pctldev,
const char *statename,
struct device_node *np_config)
{
+ struct pinctrl_dev *pctldev = NULL;
struct device_node *np_pctldev;
const struct pinctrl_ops *ops;
int ret;
@@ -123,8 +124,10 @@ static int dt_to_map_one_config(struct pinctrl *p,
return -EPROBE_DEFER;
}
/* If we're creating a hog we can use the passed pctldev */
- if (pctldev && (np_pctldev == p->dev->of_node))
+ if (hog_pctldev && (np_pctldev == p->dev->of_node)) {
+ pctldev = hog_pctldev;
break;
+ }
pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
if (pctldev)
break;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
index ad6da1184c9f..4c4740ffeb9c 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
@@ -1424,7 +1424,7 @@ static struct pinctrl_desc mtk_desc = {
static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
{
- struct mtk_pinctrl *hw = dev_get_drvdata(chip->parent);
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
int value, err;
err = mtk_hw_get_value(hw, gpio, PINCTRL_PIN_REG_DI, &value);
@@ -1436,7 +1436,7 @@ static int mtk_gpio_get(struct gpio_chip *chip, unsigned int gpio)
static void mtk_gpio_set(struct gpio_chip *chip, unsigned int gpio, int value)
{
- struct mtk_pinctrl *hw = dev_get_drvdata(chip->parent);
+ struct mtk_pinctrl *hw = gpiochip_get_data(chip);
mtk_hw_set_value(hw, gpio, PINCTRL_PIN_REG_DO, !!value);
}
@@ -1459,6 +1459,9 @@ static int mtk_gpio_to_irq(struct gpio_chip *chip, unsigned int offset)
struct mtk_pinctrl *hw = gpiochip_get_data(chip);
unsigned long eint_n;
+ if (!hw->eint)
+ return -ENOTSUPP;
+
eint_n = offset;
return mtk_eint_find_irq(hw->eint, eint_n);
@@ -1471,7 +1474,8 @@ static int mtk_gpio_set_config(struct gpio_chip *chip, unsigned int offset,
unsigned long eint_n;
u32 debounce;
- if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ if (!hw->eint ||
+ pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
return -ENOTSUPP;
debounce = pinconf_to_config_argument(config);
@@ -1504,11 +1508,20 @@ static int mtk_build_gpiochip(struct mtk_pinctrl *hw, struct device_node *np)
if (ret < 0)
return ret;
- ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
- chip->ngpio);
- if (ret < 0) {
- gpiochip_remove(chip);
- return ret;
+ /* Just for backward compatible for these old pinctrl nodes without
+ * "gpio-ranges" property. Otherwise, called directly from a
+ * DeviceTree-supported pinctrl driver is DEPRECATED.
+ * Please see Section 2.1 of
+ * Documentation/devicetree/bindings/gpio/gpio.txt on how to
+ * bind pinctrl and gpio drivers via the "gpio-ranges" property.
+ */
+ if (!of_find_property(np, "gpio-ranges", NULL)) {
+ ret = gpiochip_add_pin_range(chip, dev_name(hw->dev), 0, 0,
+ chip->ngpio);
+ if (ret < 0) {
+ gpiochip_remove(chip);
+ return ret;
+ }
}
return 0;
@@ -1691,15 +1704,16 @@ static int mtk_pinctrl_probe(struct platform_device *pdev)
mtk_desc.custom_conf_items = mtk_conf_items;
#endif
- hw->pctrl = devm_pinctrl_register(&pdev->dev, &mtk_desc, hw);
- if (IS_ERR(hw->pctrl))
- return PTR_ERR(hw->pctrl);
+ err = devm_pinctrl_register_and_init(&pdev->dev, &mtk_desc, hw,
+ &hw->pctrl);
+ if (err)
+ return err;
/* Setup groups descriptions per SoC types */
err = mtk_build_groups(hw);
if (err) {
dev_err(&pdev->dev, "Failed to build groups\n");
- return 0;
+ return err;
}
/* Setup functions descriptions per SoC types */
@@ -1709,17 +1723,25 @@ static int mtk_pinctrl_probe(struct platform_device *pdev)
return err;
}
- err = mtk_build_gpiochip(hw, pdev->dev.of_node);
- if (err) {
- dev_err(&pdev->dev, "Failed to add gpio_chip\n");
+ /* For able to make pinctrl_claim_hogs, we must not enable pinctrl
+ * until all groups and functions are being added one.
+ */
+ err = pinctrl_enable(hw->pctrl);
+ if (err)
return err;
- }
err = mtk_build_eint(hw, pdev);
if (err)
dev_warn(&pdev->dev,
"Failed to add EINT, but pinctrl still can work\n");
+ /* Build gpiochip should be after pinctrl_enable is done */
+ err = mtk_build_gpiochip(hw, pdev->dev.of_node);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to add gpio_chip\n");
+ return err;
+ }
+
platform_set_drvdata(pdev, hw);
return 0;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index b3799695d8db..16ff56f93501 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1000,11 +1000,6 @@ static int mtk_eint_init(struct mtk_pinctrl *pctl, struct platform_device *pdev)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Unable to get eint resource\n");
- return -ENODEV;
- }
-
pctl->eint->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pctl->eint->base))
return PTR_ERR(pctl->eint->base);
diff --git a/drivers/pinctrl/pinctrl-ingenic.c b/drivers/pinctrl/pinctrl-ingenic.c
index a1d7156d0a43..6a1b6058b991 100644
--- a/drivers/pinctrl/pinctrl-ingenic.c
+++ b/drivers/pinctrl/pinctrl-ingenic.c
@@ -536,7 +536,7 @@ static int ingenic_pinmux_gpio_set_direction(struct pinctrl_dev *pctldev,
ingenic_config_pin(jzpc, pin, JZ4770_GPIO_PAT1, input);
} else {
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_SELECT, false);
- ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, input);
+ ingenic_config_pin(jzpc, pin, JZ4740_GPIO_DIR, !input);
ingenic_config_pin(jzpc, pin, JZ4740_GPIO_FUNC, false);
}
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index b3153c095199..e5647dac0818 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1590,8 +1590,11 @@ static int pcs_save_context(struct pcs_device *pcs)
mux_bytes = pcs->width / BITS_PER_BYTE;
- if (!pcs->saved_vals)
+ if (!pcs->saved_vals) {
pcs->saved_vals = devm_kzalloc(pcs->dev, pcs->size, GFP_ATOMIC);
+ if (!pcs->saved_vals)
+ return -ENOMEM;
+ }
switch (pcs->width) {
case 64:
@@ -1651,8 +1654,13 @@ static int pinctrl_single_suspend(struct platform_device *pdev,
if (!pcs)
return -EINVAL;
- if (pcs->flags & PCS_CONTEXT_LOSS_OFF)
- pcs_save_context(pcs);
+ if (pcs->flags & PCS_CONTEXT_LOSS_OFF) {
+ int ret;
+
+ ret = pcs_save_context(pcs);
+ if (ret < 0)
+ return ret;
+ }
return pinctrl_force_sleep(pcs->pctl);
}
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
index b02caf316711..eeb58b3bbc9a 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a77970.c
@@ -21,15 +21,13 @@
#include "core.h"
#include "sh_pfc.h"
-#define CFG_FLAGS SH_PFC_PIN_CFG_DRIVE_STRENGTH
-
#define CPU_ALL_PORT(fn, sfx) \
- PORT_GP_CFG_22(0, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
- PORT_GP_CFG_28(1, fn, sfx, CFG_FLAGS), \
- PORT_GP_CFG_17(2, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
- PORT_GP_CFG_17(3, fn, sfx, CFG_FLAGS | SH_PFC_PIN_CFG_IO_VOLTAGE), \
- PORT_GP_CFG_6(4, fn, sfx, CFG_FLAGS), \
- PORT_GP_CFG_15(5, fn, sfx, CFG_FLAGS)
+ PORT_GP_CFG_22(0, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_28(1, fn, sfx), \
+ PORT_GP_CFG_17(2, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_CFG_17(3, fn, sfx, SH_PFC_PIN_CFG_IO_VOLTAGE), \
+ PORT_GP_6(4, fn, sfx), \
+ PORT_GP_15(5, fn, sfx)
/*
* F_() : just information
* FM() : macro for FN_xxx / xxx_MARK
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index f1fa8612db40..06978c14c83b 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -2185,7 +2185,7 @@ static int __init dell_init(void)
dell_fill_request(&buffer, token->location, 0, 0, 0);
ret = dell_send_request(&buffer,
CLASS_TOKEN_READ, SELECT_TOKEN_AC);
- if (ret)
+ if (ret == 0)
max_intensity = buffer.output[3];
}
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 767c485af59b..01b0e2bb3319 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -89,6 +89,7 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
case PTP_PF_PHYSYNC:
if (chan != 0)
return -EINVAL;
+ break;
default:
return -EINVAL;
}
@@ -221,7 +222,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
}
pct = &sysoff->ts[0];
for (i = 0; i < sysoff->n_samples; i++) {
- getnstimeofday64(&ts);
+ ktime_get_real_ts64(&ts);
pct->sec = ts.tv_sec;
pct->nsec = ts.tv_nsec;
pct++;
@@ -230,7 +231,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
pct->nsec = ts.tv_nsec;
pct++;
}
- getnstimeofday64(&ts);
+ ktime_get_real_ts64(&ts);
pct->sec = ts.tv_sec;
pct->nsec = ts.tv_nsec;
if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
diff --git a/drivers/ptp/ptp_qoriq.c b/drivers/ptp/ptp_qoriq.c
index 1468a1642b49..e8652c148c52 100644
--- a/drivers/ptp/ptp_qoriq.c
+++ b/drivers/ptp/ptp_qoriq.c
@@ -374,7 +374,7 @@ static int qoriq_ptp_probe(struct platform_device *dev)
pr_err("ioremap ptp registers failed\n");
goto no_ioremap;
}
- getnstimeofday64(&now);
+ ktime_get_real_ts64(&now);
ptp_qoriq_settime(&qoriq_ptp->caps, &now);
tmr_ctrl =
diff --git a/drivers/rtc/interface.c b/drivers/rtc/interface.c
index 6d4012dd6922..bac1eeb3d312 100644
--- a/drivers/rtc/interface.c
+++ b/drivers/rtc/interface.c
@@ -265,8 +265,10 @@ int __rtc_read_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
return err;
/* full-function RTCs won't have such missing fields */
- if (rtc_valid_tm(&alarm->time) == 0)
+ if (rtc_valid_tm(&alarm->time) == 0) {
+ rtc_add_offset(rtc, &alarm->time);
return 0;
+ }
/* get the "after" timestamp, to detect wrapped fields */
err = rtc_read_time(rtc, &now);
@@ -409,7 +411,6 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
if (err)
return err;
- rtc_subtract_offset(rtc, &alarm->time);
scheduled = rtc_tm_to_time64(&alarm->time);
/* Make sure we're not setting alarms in the past */
@@ -426,6 +427,8 @@ static int __rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
* over right here, before we set the alarm.
*/
+ rtc_subtract_offset(rtc, &alarm->time);
+
if (!rtc->ops)
err = -ENODEV;
else if (!rtc->ops->set_alarm)
@@ -467,7 +470,6 @@ int rtc_set_alarm(struct rtc_device *rtc, struct rtc_wkalrm *alarm)
mutex_unlock(&rtc->ops_lock);
- rtc_add_offset(rtc, &alarm->time);
return err;
}
EXPORT_SYMBOL_GPL(rtc_set_alarm);
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c
index 097a4d4e2aba..1925aaf09093 100644
--- a/drivers/rtc/rtc-mrst.c
+++ b/drivers/rtc/rtc-mrst.c
@@ -367,10 +367,8 @@ static int vrtc_mrst_do_probe(struct device *dev, struct resource *iomem,
}
retval = rtc_register_device(mrst_rtc.rtc);
- if (retval) {
- retval = PTR_ERR(mrst_rtc.rtc);
+ if (retval)
goto cleanup0;
- }
dev_dbg(dev, "initialised\n");
return 0;
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 73cce3ecb97f..a9f60d0ee02e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -41,6 +41,15 @@
#define DASD_DIAG_MOD "dasd_diag_mod"
+static unsigned int queue_depth = 32;
+static unsigned int nr_hw_queues = 4;
+
+module_param(queue_depth, uint, 0444);
+MODULE_PARM_DESC(queue_depth, "Default queue depth for new DASD devices");
+
+module_param(nr_hw_queues, uint, 0444);
+MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD devices");
+
/*
* SECTION: exported variables of dasd.c
*/
@@ -1222,80 +1231,37 @@ static void dasd_hosts_init(struct dentry *base_dentry,
device->hosts_dentry = pde;
}
-/*
- * Allocate memory for a channel program with 'cplength' channel
- * command words and 'datasize' additional space. There are two
- * variantes: 1) dasd_kmalloc_request uses kmalloc to get the needed
- * memory and 2) dasd_smalloc_request uses the static ccw memory
- * that gets allocated for each device.
- */
-struct dasd_ccw_req *dasd_kmalloc_request(int magic, int cplength,
- int datasize,
- struct dasd_device *device)
-{
- struct dasd_ccw_req *cqr;
-
- /* Sanity checks */
- BUG_ON(datasize > PAGE_SIZE ||
- (cplength*sizeof(struct ccw1)) > PAGE_SIZE);
-
- cqr = kzalloc(sizeof(struct dasd_ccw_req), GFP_ATOMIC);
- if (cqr == NULL)
- return ERR_PTR(-ENOMEM);
- cqr->cpaddr = NULL;
- if (cplength > 0) {
- cqr->cpaddr = kcalloc(cplength, sizeof(struct ccw1),
- GFP_ATOMIC | GFP_DMA);
- if (cqr->cpaddr == NULL) {
- kfree(cqr);
- return ERR_PTR(-ENOMEM);
- }
- }
- cqr->data = NULL;
- if (datasize > 0) {
- cqr->data = kzalloc(datasize, GFP_ATOMIC | GFP_DMA);
- if (cqr->data == NULL) {
- kfree(cqr->cpaddr);
- kfree(cqr);
- return ERR_PTR(-ENOMEM);
- }
- }
- cqr->magic = magic;
- set_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
- dasd_get_device(device);
- return cqr;
-}
-EXPORT_SYMBOL(dasd_kmalloc_request);
-
-struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
- int datasize,
- struct dasd_device *device)
+struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength, int datasize,
+ struct dasd_device *device,
+ struct dasd_ccw_req *cqr)
{
unsigned long flags;
- struct dasd_ccw_req *cqr;
- char *data;
- int size;
+ char *data, *chunk;
+ int size = 0;
- size = (sizeof(struct dasd_ccw_req) + 7L) & -8L;
if (cplength > 0)
size += cplength * sizeof(struct ccw1);
if (datasize > 0)
size += datasize;
+ if (!cqr)
+ size += (sizeof(*cqr) + 7L) & -8L;
+
spin_lock_irqsave(&device->mem_lock, flags);
- cqr = (struct dasd_ccw_req *)
- dasd_alloc_chunk(&device->ccw_chunks, size);
+ data = chunk = dasd_alloc_chunk(&device->ccw_chunks, size);
spin_unlock_irqrestore(&device->mem_lock, flags);
- if (cqr == NULL)
+ if (!chunk)
return ERR_PTR(-ENOMEM);
- memset(cqr, 0, sizeof(struct dasd_ccw_req));
- data = (char *) cqr + ((sizeof(struct dasd_ccw_req) + 7L) & -8L);
- cqr->cpaddr = NULL;
+ if (!cqr) {
+ cqr = (void *) data;
+ data += (sizeof(*cqr) + 7L) & -8L;
+ }
+ memset(cqr, 0, sizeof(*cqr));
+ cqr->mem_chunk = chunk;
if (cplength > 0) {
- cqr->cpaddr = (struct ccw1 *) data;
- data += cplength*sizeof(struct ccw1);
- memset(cqr->cpaddr, 0, cplength*sizeof(struct ccw1));
+ cqr->cpaddr = data;
+ data += cplength * sizeof(struct ccw1);
+ memset(cqr->cpaddr, 0, cplength * sizeof(struct ccw1));
}
- cqr->data = NULL;
if (datasize > 0) {
cqr->data = data;
memset(cqr->data, 0, datasize);
@@ -1307,33 +1273,12 @@ struct dasd_ccw_req *dasd_smalloc_request(int magic, int cplength,
}
EXPORT_SYMBOL(dasd_smalloc_request);
-/*
- * Free memory of a channel program. This function needs to free all the
- * idal lists that might have been created by dasd_set_cda and the
- * struct dasd_ccw_req itself.
- */
-void dasd_kfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
-{
- struct ccw1 *ccw;
-
- /* Clear any idals used for the request. */
- ccw = cqr->cpaddr;
- do {
- clear_normalized_cda(ccw);
- } while (ccw++->flags & (CCW_FLAG_CC | CCW_FLAG_DC));
- kfree(cqr->cpaddr);
- kfree(cqr->data);
- kfree(cqr);
- dasd_put_device(device);
-}
-EXPORT_SYMBOL(dasd_kfree_request);
-
void dasd_sfree_request(struct dasd_ccw_req *cqr, struct dasd_device *device)
{
unsigned long flags;
spin_lock_irqsave(&device->mem_lock, flags);
- dasd_free_chunk(&device->ccw_chunks, cqr);
+ dasd_free_chunk(&device->ccw_chunks, cqr->mem_chunk);
spin_unlock_irqrestore(&device->mem_lock, flags);
dasd_put_device(device);
}
@@ -1885,6 +1830,33 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device,
}
}
+static void __dasd_process_cqr(struct dasd_device *device,
+ struct dasd_ccw_req *cqr)
+{
+ char errorstring[ERRORLENGTH];
+
+ switch (cqr->status) {
+ case DASD_CQR_SUCCESS:
+ cqr->status = DASD_CQR_DONE;
+ break;
+ case DASD_CQR_ERROR:
+ cqr->status = DASD_CQR_NEED_ERP;
+ break;
+ case DASD_CQR_CLEARED:
+ cqr->status = DASD_CQR_TERMINATED;
+ break;
+ default:
+ /* internal error 12 - wrong cqr status*/
+ snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
+ dev_err(&device->cdev->dev,
+ "An error occurred in the DASD device driver, "
+ "reason=%s\n", errorstring);
+ BUG();
+ }
+ if (cqr->callback)
+ cqr->callback(cqr, cqr->callback_data);
+}
+
/*
* the cqrs from the final queue are returned to the upper layer
* by setting a dasd_block state and calling the callback function
@@ -1895,40 +1867,18 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
struct list_head *l, *n;
struct dasd_ccw_req *cqr;
struct dasd_block *block;
- void (*callback)(struct dasd_ccw_req *, void *data);
- void *callback_data;
- char errorstring[ERRORLENGTH];
list_for_each_safe(l, n, final_queue) {
cqr = list_entry(l, struct dasd_ccw_req, devlist);
list_del_init(&cqr->devlist);
block = cqr->block;
- callback = cqr->callback;
- callback_data = cqr->callback_data;
- if (block)
+ if (!block) {
+ __dasd_process_cqr(device, cqr);
+ } else {
spin_lock_bh(&block->queue_lock);
- switch (cqr->status) {
- case DASD_CQR_SUCCESS:
- cqr->status = DASD_CQR_DONE;
- break;
- case DASD_CQR_ERROR:
- cqr->status = DASD_CQR_NEED_ERP;
- break;
- case DASD_CQR_CLEARED:
- cqr->status = DASD_CQR_TERMINATED;
- break;
- default:
- /* internal error 12 - wrong cqr status*/
- snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
- dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", errorstring);
- BUG();
- }
- if (cqr->callback != NULL)
- (callback)(cqr, callback_data);
- if (block)
+ __dasd_process_cqr(device, cqr);
spin_unlock_bh(&block->queue_lock);
+ }
}
}
@@ -3041,7 +2991,6 @@ static blk_status_t do_dasd_request(struct blk_mq_hw_ctx *hctx,
cqr->callback_data = req;
cqr->status = DASD_CQR_FILLED;
cqr->dq = dq;
- *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req)) = cqr;
blk_mq_start_request(req);
spin_lock(&block->queue_lock);
@@ -3072,7 +3021,7 @@ enum blk_eh_timer_return dasd_times_out(struct request *req, bool reserved)
unsigned long flags;
int rc = 0;
- cqr = *((struct dasd_ccw_req **) blk_mq_rq_to_pdu(req));
+ cqr = blk_mq_rq_to_pdu(req);
if (!cqr)
return BLK_EH_DONE;
@@ -3174,9 +3123,9 @@ static int dasd_alloc_queue(struct dasd_block *block)
int rc;
block->tag_set.ops = &dasd_mq_ops;
- block->tag_set.cmd_size = sizeof(struct dasd_ccw_req *);
- block->tag_set.nr_hw_queues = DASD_NR_HW_QUEUES;
- block->tag_set.queue_depth = DASD_MAX_LCU_DEV * DASD_REQ_PER_DEV;
+ block->tag_set.cmd_size = sizeof(struct dasd_ccw_req);
+ block->tag_set.nr_hw_queues = nr_hw_queues;
+ block->tag_set.queue_depth = queue_depth;
block->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
rc = blk_mq_alloc_tag_set(&block->tag_set);
@@ -4038,7 +3987,8 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
struct ccw1 *ccw;
unsigned long *idaw;
- cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device);
+ cqr = dasd_smalloc_request(magic, 1 /* RDC */, rdc_buffer_size, device,
+ NULL);
if (IS_ERR(cqr)) {
/* internal error 13 - Allocating the RDC request failed*/
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 5e963fe0e38d..e36a114354fc 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -407,9 +407,9 @@ static int read_unit_address_configuration(struct dasd_device *device,
int rc;
unsigned long flags;
- cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data)),
- device);
+ device, NULL);
if (IS_ERR(cqr))
return PTR_ERR(cqr);
cqr->startdev = device;
@@ -457,7 +457,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
lcu->flags |= NEED_UAC_UPDATE;
spin_unlock_irqrestore(&lcu->lock, flags);
}
- dasd_kfree_request(cqr, cqr->memdev);
+ dasd_sfree_request(cqr, cqr->memdev);
return rc;
}
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 131f1989f6f3..e1fe02477ea8 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -536,7 +536,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
/* Build the request */
datasize = sizeof(struct dasd_diag_req) +
count*sizeof(struct dasd_diag_bio);
- cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev);
+ cqr = dasd_smalloc_request(DASD_DIAG_MAGIC, 0, datasize, memdev,
+ blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index be208e7adcb4..bbf95b78ef5d 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -886,7 +886,7 @@ static int dasd_eckd_read_conf_lpm(struct dasd_device *device,
}
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* RCD */,
0, /* use rcd_buf as data ara */
- device);
+ device, NULL);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate RCD request");
@@ -1442,7 +1442,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_features)),
- device);
+ device, NULL);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s", "Could not "
"allocate initialization request");
@@ -1504,7 +1504,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
sizeof(struct dasd_psf_ssc_data),
- device);
+ device, NULL);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
@@ -1815,7 +1815,8 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
cplength = 8;
datasize = sizeof(struct DE_eckd_data) + 2*sizeof(struct LO_eckd_data);
- cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device,
+ NULL);
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
@@ -2092,7 +2093,8 @@ dasd_eckd_build_check_tcw(struct dasd_device *base, struct format_data_t *fdata,
*/
itcw_size = itcw_calc_size(0, count, 0);
- cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
+ NULL);
if (IS_ERR(cqr))
return cqr;
@@ -2186,7 +2188,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
cplength += count;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
- startdev);
+ startdev, NULL);
if (IS_ERR(cqr))
return cqr;
@@ -2332,7 +2334,7 @@ dasd_eckd_build_format(struct dasd_device *base,
}
/* Allocate the format ccw request. */
fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
- datasize, startdev);
+ datasize, startdev, NULL);
if (IS_ERR(fcp))
return fcp;
@@ -3103,7 +3105,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
}
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
- startdev);
+ startdev, blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
@@ -3262,7 +3264,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize,
- startdev);
+ startdev, blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
@@ -3595,7 +3597,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
/* Allocate the ccw request. */
itcw_size = itcw_calc_size(0, ctidaw, 0);
- cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 0, itcw_size, startdev,
+ blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
@@ -3862,7 +3865,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
/* Allocate the ccw request. */
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength,
- datasize, startdev);
+ datasize, startdev, blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
@@ -4102,7 +4105,7 @@ dasd_eckd_release(struct dasd_device *device)
return -EACCES;
useglobal = 0;
- cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
if (IS_ERR(cqr)) {
mutex_lock(&dasd_reserve_mutex);
useglobal = 1;
@@ -4157,7 +4160,7 @@ dasd_eckd_reserve(struct dasd_device *device)
return -EACCES;
useglobal = 0;
- cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
if (IS_ERR(cqr)) {
mutex_lock(&dasd_reserve_mutex);
useglobal = 1;
@@ -4211,7 +4214,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
return -EACCES;
useglobal = 0;
- cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1, 32, device, NULL);
if (IS_ERR(cqr)) {
mutex_lock(&dasd_reserve_mutex);
useglobal = 1;
@@ -4271,7 +4274,8 @@ static int dasd_eckd_snid(struct dasd_device *device,
useglobal = 0;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1,
- sizeof(struct dasd_snid_data), device);
+ sizeof(struct dasd_snid_data), device,
+ NULL);
if (IS_ERR(cqr)) {
mutex_lock(&dasd_reserve_mutex);
useglobal = 1;
@@ -4331,7 +4335,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_perf_stats_t)),
- device);
+ device, NULL);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
@@ -4477,7 +4481,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
psf1 = psf_data[1];
/* setup CCWs for PSF + RSSD */
- cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2 , 0, device);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 2, 0, device, NULL);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
"Could not allocate initialization request");
@@ -5037,7 +5041,7 @@ static int dasd_eckd_read_message_buffer(struct dasd_device *device,
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
(sizeof(struct dasd_psf_prssd_data) +
sizeof(struct dasd_rssd_messages)),
- device);
+ device, NULL);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate read message buffer request");
@@ -5126,7 +5130,7 @@ static int dasd_eckd_query_host_access(struct dasd_device *device,
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */,
sizeof(struct dasd_psf_prssd_data) + 1,
- device);
+ device, NULL);
if (IS_ERR(cqr)) {
DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
"Could not allocate read message buffer request");
@@ -5284,8 +5288,8 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
int rc;
cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ ,
- sizeof(struct dasd_psf_cuir_response),
- device);
+ sizeof(struct dasd_psf_cuir_response),
+ device, NULL);
if (IS_ERR(cqr)) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index 0af8c5295b65..6ef8714dc693 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -447,7 +447,7 @@ static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
* is a new ccw in device->eer_cqr. Free the "old"
* snss request now.
*/
- dasd_kfree_request(cqr, device);
+ dasd_sfree_request(cqr, device);
}
/*
@@ -472,8 +472,8 @@ int dasd_eer_enable(struct dasd_device *device)
if (rc)
goto out;
- cqr = dasd_kmalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
- SNSS_DATA_SIZE, device);
+ cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* SNSS */,
+ SNSS_DATA_SIZE, device, NULL);
if (IS_ERR(cqr)) {
rc = -ENOMEM;
cqr = NULL;
@@ -505,7 +505,7 @@ out:
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (cqr)
- dasd_kfree_request(cqr, device);
+ dasd_sfree_request(cqr, device);
return rc;
}
@@ -528,7 +528,7 @@ void dasd_eer_disable(struct dasd_device *device)
in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
if (cqr && !in_use)
- dasd_kfree_request(cqr, device);
+ dasd_sfree_request(cqr, device);
}
/*
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index a6b132f7e869..56007a3e7f11 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -356,7 +356,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_discard(
datasize = sizeof(struct DE_fba_data) +
nr_ccws * (sizeof(struct LO_fba_data) + sizeof(struct ccw1));
- cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
+ cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
+ blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
@@ -490,7 +491,8 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
datasize += (count - 1)*sizeof(struct LO_fba_data);
}
/* Allocate the ccw request. */
- cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev);
+ cqr = dasd_smalloc_request(DASD_FBA_MAGIC, cplength, datasize, memdev,
+ blk_mq_rq_to_pdu(req));
if (IS_ERR(cqr))
return cqr;
ccw = cqr->cpaddr;
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 96709b1a7bf8..de6b96036aa4 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -158,40 +158,33 @@ do { \
struct dasd_ccw_req {
unsigned int magic; /* Eye catcher */
+ int intrc; /* internal error, e.g. from start_IO */
struct list_head devlist; /* for dasd_device request queue */
struct list_head blocklist; /* for dasd_block request queue */
-
- /* Where to execute what... */
struct dasd_block *block; /* the originating block device */
struct dasd_device *memdev; /* the device used to allocate this */
struct dasd_device *startdev; /* device the request is started on */
struct dasd_device *basedev; /* base device if no block->base */
void *cpaddr; /* address of ccw or tcw */
+ short retries; /* A retry counter */
unsigned char cpmode; /* 0 = cmd mode, 1 = itcw */
char status; /* status of this request */
- short retries; /* A retry counter */
+ char lpm; /* logical path mask */
unsigned long flags; /* flags of this request */
struct dasd_queue *dq;
-
- /* ... and how */
unsigned long starttime; /* jiffies time of request start */
unsigned long expires; /* expiration period in jiffies */
- char lpm; /* logical path mask */
void *data; /* pointer to data area */
-
- /* these are important for recovering erroneous requests */
- int intrc; /* internal error, e.g. from start_IO */
struct irb irb; /* device status in case of an error */
struct dasd_ccw_req *refers; /* ERP-chain queueing. */
void *function; /* originating ERP action */
+ void *mem_chunk;
- /* these are for statistics only */
unsigned long buildclk; /* TOD-clock of request generation */
unsigned long startclk; /* TOD-clock of request start */
unsigned long stopclk; /* TOD-clock of request interrupt */
unsigned long endclk; /* TOD-clock of request termination */
- /* Callback that is called after reaching final status. */
void (*callback)(struct dasd_ccw_req *, void *data);
void *callback_data;
};
@@ -235,14 +228,6 @@ struct dasd_ccw_req {
#define DASD_CQR_SUPPRESS_IL 6 /* Suppress 'Incorrect Length' error */
#define DASD_CQR_SUPPRESS_CR 7 /* Suppress 'Command Reject' error */
-/*
- * There is no reliable way to determine the number of available CPUs on
- * LPAR but there is no big performance difference between 1 and the
- * maximum CPU number.
- * 64 is a good trade off performance wise.
- */
-#define DASD_NR_HW_QUEUES 64
-#define DASD_MAX_LCU_DEV 256
#define DASD_REQ_PER_DEV 4
/* Signature for error recovery functions. */
@@ -714,19 +699,10 @@ extern const struct block_device_operations dasd_device_operations;
extern struct kmem_cache *dasd_page_cache;
struct dasd_ccw_req *
-dasd_kmalloc_request(int , int, int, struct dasd_device *);
-struct dasd_ccw_req *
-dasd_smalloc_request(int , int, int, struct dasd_device *);
-void dasd_kfree_request(struct dasd_ccw_req *, struct dasd_device *);
+dasd_smalloc_request(int, int, int, struct dasd_device *, struct dasd_ccw_req *);
void dasd_sfree_request(struct dasd_ccw_req *, struct dasd_device *);
void dasd_wakeup_cb(struct dasd_ccw_req *, void *);
-static inline int
-dasd_kmalloc_set_cda(struct ccw1 *ccw, void *cda, struct dasd_device *device)
-{
- return set_normalized_cda(ccw, cda);
-}
-
struct dasd_device *dasd_alloc_device(void);
void dasd_free_device(struct dasd_device *);
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile
index a070ef0efe65..f230516abb96 100644
--- a/drivers/s390/cio/Makefile
+++ b/drivers/s390/cio/Makefile
@@ -5,6 +5,7 @@
# The following is required for define_trace.h to find ./trace.h
CFLAGS_trace.o := -I$(src)
+CFLAGS_vfio_ccw_fsm.o := -I$(src)
obj-y += airq.o blacklist.o chsc.o cio.o css.o chp.o idset.o isc.o \
fcx.o itcw.o crw.o ccwreq.o trace.o ioasm.o
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index dce92b2a895d..dbe7c7ac9ac8 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -23,9 +23,13 @@
#define CCWCHAIN_LEN_MAX 256
struct pfn_array {
+ /* Starting guest physical I/O address. */
unsigned long pa_iova;
+ /* Array that stores PFNs of the pages need to pin. */
unsigned long *pa_iova_pfn;
+ /* Array that receives PFNs of the pages pinned. */
unsigned long *pa_pfn;
+ /* Number of pages pinned from @pa_iova. */
int pa_nr;
};
@@ -46,70 +50,33 @@ struct ccwchain {
};
/*
- * pfn_array_pin() - pin user pages in memory
+ * pfn_array_alloc_pin() - alloc memory for PFNs, then pin user pages in memory
* @pa: pfn_array on which to perform the operation
* @mdev: the mediated device to perform pin/unpin operations
+ * @iova: target guest physical address
+ * @len: number of bytes that should be pinned from @iova
*
- * Attempt to pin user pages in memory.
+ * Attempt to allocate memory for PFNs, and pin user pages in memory.
*
* Usage of pfn_array:
- * @pa->pa_iova starting guest physical I/O address. Assigned by caller.
- * @pa->pa_iova_pfn array that stores PFNs of the pages need to pin. Allocated
- * by caller.
- * @pa->pa_pfn array that receives PFNs of the pages pinned. Allocated by
- * caller.
- * @pa->pa_nr number of pages from @pa->pa_iova to pin. Assigned by
- * caller.
- * number of pages pinned. Assigned by callee.
+ * We expect (pa_nr == 0) and (pa_iova_pfn == NULL), any field in
+ * this structure will be filled in by this function.
*
* Returns:
* Number of pages pinned on success.
- * If @pa->pa_nr is 0 or negative, returns 0.
+ * If @pa->pa_nr is not 0, or @pa->pa_iova_pfn is not NULL initially,
+ * returns -EINVAL.
* If no pages were pinned, returns -errno.
*/
-static int pfn_array_pin(struct pfn_array *pa, struct device *mdev)
-{
- int i, ret;
-
- if (pa->pa_nr <= 0) {
- pa->pa_nr = 0;
- return 0;
- }
-
- pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
- for (i = 1; i < pa->pa_nr; i++)
- pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
-
- ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
- IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
-
- if (ret > 0 && ret != pa->pa_nr) {
- vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
- pa->pa_nr = 0;
- return 0;
- }
-
- return ret;
-}
-
-/* Unpin the pages before releasing the memory. */
-static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
-{
- vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
- pa->pa_nr = 0;
- kfree(pa->pa_iova_pfn);
-}
-
-/* Alloc memory for PFNs, then pin pages with them. */
static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
u64 iova, unsigned int len)
{
- int ret = 0;
+ int i, ret = 0;
if (!len)
return 0;
- if (pa->pa_nr)
+ if (pa->pa_nr || pa->pa_iova_pfn)
return -EINVAL;
pa->pa_iova = iova;
@@ -126,18 +93,39 @@ static int pfn_array_alloc_pin(struct pfn_array *pa, struct device *mdev,
return -ENOMEM;
pa->pa_pfn = pa->pa_iova_pfn + pa->pa_nr;
- ret = pfn_array_pin(pa, mdev);
+ pa->pa_iova_pfn[0] = pa->pa_iova >> PAGE_SHIFT;
+ for (i = 1; i < pa->pa_nr; i++)
+ pa->pa_iova_pfn[i] = pa->pa_iova_pfn[i - 1] + 1;
- if (ret > 0)
- return ret;
- else if (!ret)
+ ret = vfio_pin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr,
+ IOMMU_READ | IOMMU_WRITE, pa->pa_pfn);
+
+ if (ret < 0) {
+ goto err_out;
+ } else if (ret > 0 && ret != pa->pa_nr) {
+ vfio_unpin_pages(mdev, pa->pa_iova_pfn, ret);
ret = -EINVAL;
+ goto err_out;
+ }
+ return ret;
+
+err_out:
+ pa->pa_nr = 0;
kfree(pa->pa_iova_pfn);
+ pa->pa_iova_pfn = NULL;
return ret;
}
+/* Unpin the pages before releasing the memory. */
+static void pfn_array_unpin_free(struct pfn_array *pa, struct device *mdev)
+{
+ vfio_unpin_pages(mdev, pa->pa_iova_pfn, pa->pa_nr);
+ pa->pa_nr = 0;
+ kfree(pa->pa_iova_pfn);
+}
+
static int pfn_array_table_init(struct pfn_array_table *pat, int nr)
{
pat->pat_pa = kcalloc(nr, sizeof(*pat->pat_pa), GFP_KERNEL);
@@ -365,6 +353,9 @@ static void cp_unpin_free(struct channel_program *cp)
* This is the chain length not considering any TICs.
* You need to do a new round for each TIC target.
*
+ * The program is also validated for absence of not yet supported
+ * indirect data addressing scenarios.
+ *
* Returns: the length of the ccw chain or -errno.
*/
static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
@@ -391,6 +382,14 @@ static int ccwchain_calc_length(u64 iova, struct channel_program *cp)
do {
cnt++;
+ /*
+ * As we don't want to fail direct addressing even if the
+ * orb specified one of the unsupported formats, we defer
+ * checking for IDAWs in unsupported formats to here.
+ */
+ if ((!cp->orb.cmd.c64 || cp->orb.cmd.i2k) && ccw_is_idal(ccw))
+ return -EOPNOTSUPP;
+
if ((!ccw_is_chain(ccw)) && (!ccw_is_tic(ccw)))
break;
@@ -503,7 +502,7 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
struct ccw1 *ccw;
struct pfn_array_table *pat;
unsigned long *idaws;
- int idaw_nr;
+ int ret;
ccw = chain->ch_ccw + idx;
@@ -523,18 +522,19 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
* needed when translating a direct ccw to a idal ccw.
*/
pat = chain->ch_pat + idx;
- if (pfn_array_table_init(pat, 1))
- return -ENOMEM;
- idaw_nr = pfn_array_alloc_pin(pat->pat_pa, cp->mdev,
- ccw->cda, ccw->count);
- if (idaw_nr < 0)
- return idaw_nr;
+ ret = pfn_array_table_init(pat, 1);
+ if (ret)
+ goto out_init;
+
+ ret = pfn_array_alloc_pin(pat->pat_pa, cp->mdev, ccw->cda, ccw->count);
+ if (ret < 0)
+ goto out_init;
/* Translate this direct ccw to a idal ccw. */
- idaws = kcalloc(idaw_nr, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
+ idaws = kcalloc(ret, sizeof(*idaws), GFP_DMA | GFP_KERNEL);
if (!idaws) {
- pfn_array_table_unpin_free(pat, cp->mdev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto out_unpin;
}
ccw->cda = (__u32) virt_to_phys(idaws);
ccw->flags |= CCW_FLAG_IDA;
@@ -542,6 +542,12 @@ static int ccwchain_fetch_direct(struct ccwchain *chain,
pfn_array_table_idal_create_words(pat, idaws);
return 0;
+
+out_unpin:
+ pfn_array_table_unpin_free(pat, cp->mdev);
+out_init:
+ ccw->cda = 0;
+ return ret;
}
static int ccwchain_fetch_idal(struct ccwchain *chain,
@@ -571,7 +577,7 @@ static int ccwchain_fetch_idal(struct ccwchain *chain,
pat = chain->ch_pat + idx;
ret = pfn_array_table_init(pat, idaw_nr);
if (ret)
- return ret;
+ goto out_init;
/* Translate idal ccw to use new allocated idaws. */
idaws = kzalloc(idaw_len, GFP_DMA | GFP_KERNEL);
@@ -603,6 +609,8 @@ out_free_idaws:
kfree(idaws);
out_unpin:
pfn_array_table_unpin_free(pat, cp->mdev);
+out_init:
+ ccw->cda = 0;
return ret;
}
@@ -656,10 +664,8 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
/*
* XXX:
* Only support prefetch enable mode now.
- * Only support 64bit addressing idal.
- * Only support 4k IDAW.
*/
- if (!orb->cmd.pfch || !orb->cmd.c64 || orb->cmd.i2k)
+ if (!orb->cmd.pfch)
return -EOPNOTSUPP;
INIT_LIST_HEAD(&cp->ccwchain_list);
@@ -688,6 +694,10 @@ int cp_init(struct channel_program *cp, struct device *mdev, union orb *orb)
ret = ccwchain_loop_tic(chain, cp);
if (ret)
cp_unpin_free(cp);
+ /* It is safe to force: if not set but idals used
+ * ccwchain_calc_length returns an error.
+ */
+ cp->orb.cmd.c64 = 1;
return ret;
}
diff --git a/drivers/s390/cio/vfio_ccw_drv.c b/drivers/s390/cio/vfio_ccw_drv.c
index ea6a2d0b2894..770fa9cfc310 100644
--- a/drivers/s390/cio/vfio_ccw_drv.c
+++ b/drivers/s390/cio/vfio_ccw_drv.c
@@ -177,6 +177,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
{
struct vfio_ccw_private *private = dev_get_drvdata(&sch->dev);
unsigned long flags;
+ int rc = -EAGAIN;
spin_lock_irqsave(sch->lock, flags);
if (!device_is_registered(&sch->dev))
@@ -187,6 +188,7 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
if (cio_update_schib(sch)) {
vfio_ccw_fsm_event(private, VFIO_CCW_EVENT_NOT_OPER);
+ rc = 0;
goto out_unlock;
}
@@ -195,11 +197,12 @@ static int vfio_ccw_sch_event(struct subchannel *sch, int process)
private->state = private->mdev ? VFIO_CCW_STATE_IDLE :
VFIO_CCW_STATE_STANDBY;
}
+ rc = 0;
out_unlock:
spin_unlock_irqrestore(sch->lock, flags);
- return 0;
+ return rc;
}
static struct css_device_id vfio_ccw_sch_ids[] = {
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 3c800642134e..797a82731159 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -13,6 +13,9 @@
#include "ioasm.h"
#include "vfio_ccw_private.h"
+#define CREATE_TRACE_POINTS
+#include "vfio_ccw_trace.h"
+
static int fsm_io_helper(struct vfio_ccw_private *private)
{
struct subchannel *sch;
@@ -110,6 +113,10 @@ static void fsm_disabled_irq(struct vfio_ccw_private *private,
*/
cio_disable_subchannel(sch);
}
+inline struct subchannel_id get_schid(struct vfio_ccw_private *p)
+{
+ return p->sch->schid;
+}
/*
* Deal with the ccw command request from the userspace.
@@ -121,6 +128,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
union scsw *scsw = &private->scsw;
struct ccw_io_region *io_region = &private->io_region;
struct mdev_device *mdev = private->mdev;
+ char *errstr = "request";
private->state = VFIO_CCW_STATE_BOXED;
@@ -132,15 +140,19 @@ static void fsm_io_request(struct vfio_ccw_private *private,
/* Don't try to build a cp if transport mode is specified. */
if (orb->tm.b) {
io_region->ret_code = -EOPNOTSUPP;
+ errstr = "transport mode";
goto err_out;
}
io_region->ret_code = cp_init(&private->cp, mdev_dev(mdev),
orb);
- if (io_region->ret_code)
+ if (io_region->ret_code) {
+ errstr = "cp init";
goto err_out;
+ }
io_region->ret_code = cp_prefetch(&private->cp);
if (io_region->ret_code) {
+ errstr = "cp prefetch";
cp_free(&private->cp);
goto err_out;
}
@@ -148,6 +160,7 @@ static void fsm_io_request(struct vfio_ccw_private *private,
/* Start channel program and wait for I/O interrupt. */
io_region->ret_code = fsm_io_helper(private);
if (io_region->ret_code) {
+ errstr = "cp fsm_io_helper";
cp_free(&private->cp);
goto err_out;
}
@@ -164,6 +177,8 @@ static void fsm_io_request(struct vfio_ccw_private *private,
err_out:
private->state = VFIO_CCW_STATE_IDLE;
+ trace_vfio_ccw_io_fctl(scsw->cmd.fctl, get_schid(private),
+ io_region->ret_code, errstr);
}
/*
diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h
new file mode 100644
index 000000000000..b1da53ddec1f
--- /dev/null
+++ b/drivers/s390/cio/vfio_ccw_trace.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0
+ * Tracepoints for vfio_ccw driver
+ *
+ * Copyright IBM Corp. 2018
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ * Halil Pasic <pasic@linux.vnet.ibm.com>
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vfio_ccw
+
+#if !defined(_VFIO_CCW_TRACE_) || defined(TRACE_HEADER_MULTI_READ)
+#define _VFIO_CCW_TRACE_
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(vfio_ccw_io_fctl,
+ TP_PROTO(int fctl, struct subchannel_id schid, int errno, char *errstr),
+ TP_ARGS(fctl, schid, errno, errstr),
+
+ TP_STRUCT__entry(
+ __field(int, fctl)
+ __field_struct(struct subchannel_id, schid)
+ __field(int, errno)
+ __field(char*, errstr)
+ ),
+
+ TP_fast_assign(
+ __entry->fctl = fctl;
+ __entry->schid = schid;
+ __entry->errno = errno;
+ __entry->errstr = errstr;
+ ),
+
+ TP_printk("schid=%x.%x.%04x fctl=%x errno=%d info=%s",
+ __entry->schid.cssid,
+ __entry->schid.ssid,
+ __entry->schid.sch_no,
+ __entry->fctl,
+ __entry->errno,
+ __entry->errstr)
+);
+
+#endif /* _VFIO_CCW_TRACE_ */
+
+/* This part must be outside protection */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE vfio_ccw_trace
+
+#include <trace/define_trace.h>
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 2a5fec55bf60..a246a618f9a4 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -829,6 +829,17 @@ struct qeth_trap_id {
/*some helper functions*/
#define QETH_CARD_IFNAME(card) (((card)->dev)? (card)->dev->name : "")
+static inline void qeth_scrub_qdio_buffer(struct qdio_buffer *buf,
+ unsigned int elements)
+{
+ unsigned int i;
+
+ for (i = 0; i < elements; i++)
+ memset(&buf->element[i], 0, sizeof(struct qdio_buffer_element));
+ buf->element[14].sflags = 0;
+ buf->element[15].sflags = 0;
+}
+
/**
* qeth_get_elements_for_range() - find number of SBALEs to cover range.
* @start: Start of the address range.
@@ -1029,7 +1040,7 @@ struct qeth_cmd_buffer *qeth_get_setassparms_cmd(struct qeth_card *,
__u16, __u16,
enum qeth_prot_versions);
int qeth_set_features(struct net_device *, netdev_features_t);
-void qeth_recover_features(struct net_device *dev);
+void qeth_enable_hw_features(struct net_device *dev);
netdev_features_t qeth_fix_features(struct net_device *, netdev_features_t);
netdev_features_t qeth_features_check(struct sk_buff *skb,
struct net_device *dev,
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 8e1474f1ffac..d01ac29fd986 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -73,9 +73,6 @@ static void qeth_notify_skbs(struct qeth_qdio_out_q *queue,
struct qeth_qdio_out_buffer *buf,
enum iucv_tx_notify notification);
static void qeth_release_skbs(struct qeth_qdio_out_buffer *buf);
-static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
- struct qeth_qdio_out_buffer *buf,
- enum qeth_qdio_buffer_states newbufstate);
static int qeth_init_qdio_out_buf(struct qeth_qdio_out_q *, int);
struct workqueue_struct *qeth_wq;
@@ -489,6 +486,7 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
struct qaob *aob;
struct qeth_qdio_out_buffer *buffer;
enum iucv_tx_notify notification;
+ unsigned int i;
aob = (struct qaob *) phys_to_virt(phys_aob_addr);
QETH_CARD_TEXT(card, 5, "haob");
@@ -513,10 +511,18 @@ static void qeth_qdio_handle_aob(struct qeth_card *card,
qeth_notify_skbs(buffer->q, buffer, notification);
buffer->aob = NULL;
- qeth_clear_output_buffer(buffer->q, buffer,
- QETH_QDIO_BUF_HANDLED_DELAYED);
+ /* Free dangling allocations. The attached skbs are handled by
+ * qeth_cleanup_handled_pending().
+ */
+ for (i = 0;
+ i < aob->sb_count && i < QETH_MAX_BUFFER_ELEMENTS(card);
+ i++) {
+ if (aob->sba[i] && buffer->is_header[i])
+ kmem_cache_free(qeth_core_header_cache,
+ (void *) aob->sba[i]);
+ }
+ atomic_set(&buffer->state, QETH_QDIO_BUF_HANDLED_DELAYED);
- /* from here on: do not touch buffer anymore */
qdio_release_aob(aob);
}
@@ -3759,6 +3765,10 @@ static void qeth_qdio_output_handler(struct ccw_device *ccwdev,
QETH_CARD_TEXT(queue->card, 5, "aob");
QETH_CARD_TEXT_(queue->card, 5, "%lx",
virt_to_phys(buffer->aob));
+
+ /* prepare the queue slot for re-use: */
+ qeth_scrub_qdio_buffer(buffer->buffer,
+ QETH_MAX_BUFFER_ELEMENTS(card));
if (qeth_init_qdio_out_buf(queue, bidx)) {
QETH_CARD_TEXT(card, 2, "outofbuf");
qeth_schedule_recovery(card);
@@ -4834,7 +4844,7 @@ int qeth_vm_request_mac(struct qeth_card *card)
goto out;
}
- ccw_device_get_id(CARD_RDEV(card), &id);
+ ccw_device_get_id(CARD_DDEV(card), &id);
request->resp_buf_len = sizeof(*response);
request->resp_version = DIAG26C_VERSION2;
request->op_code = DIAG26C_GET_MAC;
@@ -6459,28 +6469,27 @@ static int qeth_set_ipa_rx_csum(struct qeth_card *card, bool on)
#define QETH_HW_FEATURES (NETIF_F_RXCSUM | NETIF_F_IP_CSUM | NETIF_F_TSO | \
NETIF_F_IPV6_CSUM)
/**
- * qeth_recover_features() - Restore device features after recovery
- * @dev: the recovering net_device
- *
- * Caller must hold rtnl lock.
+ * qeth_enable_hw_features() - (Re-)Enable HW functions for device features
+ * @dev: a net_device
*/
-void qeth_recover_features(struct net_device *dev)
+void qeth_enable_hw_features(struct net_device *dev)
{
- netdev_features_t features = dev->features;
struct qeth_card *card = dev->ml_priv;
+ netdev_features_t features;
+ rtnl_lock();
+ features = dev->features;
/* force-off any feature that needs an IPA sequence.
* netdev_update_features() will restart them.
*/
dev->features &= ~QETH_HW_FEATURES;
netdev_update_features(dev);
-
- if (features == dev->features)
- return;
- dev_warn(&card->gdev->dev,
- "Device recovery failed to restore all offload features\n");
+ if (features != dev->features)
+ dev_warn(&card->gdev->dev,
+ "Device recovery failed to restore all offload features\n");
+ rtnl_unlock();
}
-EXPORT_SYMBOL_GPL(qeth_recover_features);
+EXPORT_SYMBOL_GPL(qeth_enable_hw_features);
int qeth_set_features(struct net_device *dev, netdev_features_t features)
{
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index a7cb37da6a21..2487f0aeb165 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -140,7 +140,7 @@ static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
{
- enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+ enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
IPA_CMD_SETGMAC : IPA_CMD_SETVMAC;
int rc;
@@ -157,7 +157,7 @@ static int qeth_l2_write_mac(struct qeth_card *card, u8 *mac)
static int qeth_l2_remove_mac(struct qeth_card *card, u8 *mac)
{
- enum qeth_ipa_cmds cmd = is_multicast_ether_addr_64bits(mac) ?
+ enum qeth_ipa_cmds cmd = is_multicast_ether_addr(mac) ?
IPA_CMD_DELGMAC : IPA_CMD_DELVMAC;
int rc;
@@ -501,27 +501,34 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
return -ERESTARTSYS;
}
+ /* avoid racing against concurrent state change: */
+ if (!mutex_trylock(&card->conf_mutex))
+ return -EAGAIN;
+
if (!qeth_card_hw_is_reachable(card)) {
ether_addr_copy(dev->dev_addr, addr->sa_data);
- return 0;
+ goto out_unlock;
}
/* don't register the same address twice */
if (ether_addr_equal_64bits(dev->dev_addr, addr->sa_data) &&
(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
- return 0;
+ goto out_unlock;
/* add the new address, switch over, drop the old */
rc = qeth_l2_send_setmac(card, addr->sa_data);
if (rc)
- return rc;
+ goto out_unlock;
ether_addr_copy(old_addr, dev->dev_addr);
ether_addr_copy(dev->dev_addr, addr->sa_data);
if (card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED)
qeth_l2_remove_mac(card, old_addr);
card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
- return 0;
+
+out_unlock:
+ mutex_unlock(&card->conf_mutex);
+ return rc;
}
static void qeth_promisc_to_bridge(struct qeth_card *card)
@@ -1112,6 +1119,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
netif_carrier_off(card->dev);
qeth_set_allowed_threads(card, 0xffffffff, 0);
+
+ qeth_enable_hw_features(card->dev);
if (recover_flag == CARD_STATE_RECOVER) {
if (recovery_mode &&
card->info.type != QETH_CARD_TYPE_OSN) {
@@ -1123,9 +1132,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
}
/* this also sets saved unicast addresses */
qeth_l2_set_rx_mode(card->dev);
- rtnl_lock();
- qeth_recover_features(card->dev);
- rtnl_unlock();
}
/* let user_space know that device is online */
kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index e7fa479adf47..5905dc63e256 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2662,6 +2662,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
netif_carrier_on(card->dev);
else
netif_carrier_off(card->dev);
+
+ qeth_enable_hw_features(card->dev);
if (recover_flag == CARD_STATE_RECOVER) {
rtnl_lock();
if (recovery_mode)
@@ -2669,7 +2671,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
else
dev_open(card->dev);
qeth_l3_set_rx_mode(card->dev);
- qeth_recover_features(card->dev);
rtnl_unlock();
}
qeth_trace_features(card);
diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
index a9831bd37a73..a57f3a7d4748 100644
--- a/drivers/scsi/aacraid/aachba.c
+++ b/drivers/scsi/aacraid/aachba.c
@@ -1974,7 +1974,6 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
u32 lun_count, nexus;
u32 i, bus, target;
u8 expose_flag, attribs;
- u8 devtype;
lun_count = aac_get_safw_phys_lun_count(dev);
@@ -1992,23 +1991,23 @@ static void aac_set_safw_attr_all_targets(struct aac_dev *dev)
continue;
if (expose_flag != 0) {
- devtype = AAC_DEVTYPE_RAID_MEMBER;
- goto update_devtype;
+ dev->hba_map[bus][target].devtype =
+ AAC_DEVTYPE_RAID_MEMBER;
+ continue;
}
if (nexus != 0 && (attribs & 8)) {
- devtype = AAC_DEVTYPE_NATIVE_RAW;
+ dev->hba_map[bus][target].devtype =
+ AAC_DEVTYPE_NATIVE_RAW;
dev->hba_map[bus][target].rmw_nexus =
nexus;
} else
- devtype = AAC_DEVTYPE_ARC_RAW;
+ dev->hba_map[bus][target].devtype =
+ AAC_DEVTYPE_ARC_RAW;
dev->hba_map[bus][target].scan_counter = dev->scan_counter;
aac_set_safw_target_qd(dev, bus, target);
-
-update_devtype:
- dev->hba_map[bus][target].devtype = devtype;
}
}
diff --git a/drivers/scsi/cxlflash/main.h b/drivers/scsi/cxlflash/main.h
index 2a3977823812..a39be94d110c 100644
--- a/drivers/scsi/cxlflash/main.h
+++ b/drivers/scsi/cxlflash/main.h
@@ -107,12 +107,12 @@ cxlflash_assign_ops(struct dev_dependent_vals *ddv)
{
const struct cxlflash_backend_ops *ops = NULL;
-#ifdef CONFIG_OCXL
+#ifdef CONFIG_OCXL_BASE
if (ddv->flags & CXLFLASH_OCXL_DEV)
ops = &cxlflash_ocxl_ops;
#endif
-#ifdef CONFIG_CXL
+#ifdef CONFIG_CXL_BASE
if (!(ddv->flags & CXLFLASH_OCXL_DEV))
ops = &cxlflash_cxl_ops;
#endif
diff --git a/drivers/scsi/cxlflash/ocxl_hw.c b/drivers/scsi/cxlflash/ocxl_hw.c
index 0a95b5f25380..497a68389461 100644
--- a/drivers/scsi/cxlflash/ocxl_hw.c
+++ b/drivers/scsi/cxlflash/ocxl_hw.c
@@ -134,15 +134,14 @@ static struct file *ocxlflash_getfile(struct device *dev, const char *name,
rc = PTR_ERR(file);
dev_err(dev, "%s: alloc_file failed rc=%d\n",
__func__, rc);
- goto err5;
+ path_put(&path);
+ goto err3;
}
file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
file->private_data = priv;
out:
return file;
-err5:
- path_put(&path);
err4:
iput(inode);
err3:
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 15c7f3b6f35e..58bb70b886d7 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -3440,11 +3440,11 @@ static void hpsa_get_enclosure_info(struct ctlr_info *h,
struct ext_report_lun_entry *rle = &rlep->LUN[rle_index];
u16 bmic_device_index = 0;
- bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
-
- encl_dev->sas_address =
+ encl_dev->eli =
hpsa_get_enclosure_logical_identifier(h, scsi3addr);
+ bmic_device_index = GET_BMIC_DRIVE_NUMBER(&rle->lunid[0]);
+
if (encl_dev->target == -1 || encl_dev->lun == -1) {
rc = IO_OK;
goto out;
@@ -9697,7 +9697,24 @@ hpsa_sas_get_linkerrors(struct sas_phy *phy)
static int
hpsa_sas_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
{
- *identifier = rphy->identify.sas_address;
+ struct Scsi_Host *shost = phy_to_shost(rphy);
+ struct ctlr_info *h;
+ struct hpsa_scsi_dev_t *sd;
+
+ if (!shost)
+ return -ENXIO;
+
+ h = shost_to_hba(shost);
+
+ if (!h)
+ return -ENXIO;
+
+ sd = hpsa_find_device_by_sas_rphy(h, rphy);
+ if (!sd)
+ return -ENXIO;
+
+ *identifier = sd->eli;
+
return 0;
}
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index fb9f5e7f8209..59e023696fff 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -68,6 +68,7 @@ struct hpsa_scsi_dev_t {
#define RAID_CTLR_LUNID "\0\0\0\0\0\0\0\0"
unsigned char device_id[16]; /* from inquiry pg. 0x83 */
u64 sas_address;
+ u64 eli; /* from report diags. */
unsigned char vendor[8]; /* bytes 8-15 of inquiry data */
unsigned char model[16]; /* bytes 16-31 of inquiry data */
unsigned char rev; /* byte 2 of inquiry data */
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 0a9b8b387bd2..02d65dce74e5 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -760,7 +760,6 @@ static void ipr_mask_and_clear_interrupts(struct ipr_ioa_cfg *ioa_cfg,
ioa_cfg->hrrq[i].allow_interrupts = 0;
spin_unlock(&ioa_cfg->hrrq[i]._lock);
}
- wmb();
/* Set interrupt mask to stop all new interrupts */
if (ioa_cfg->sis64)
@@ -8403,7 +8402,6 @@ static int ipr_reset_enable_ioa(struct ipr_cmnd *ipr_cmd)
ioa_cfg->hrrq[i].allow_interrupts = 1;
spin_unlock(&ioa_cfg->hrrq[i]._lock);
}
- wmb();
if (ioa_cfg->sis64) {
/* Set the adapter to the correct endian mode. */
writel(IPR_ENDIAN_SWAP_KEY, ioa_cfg->regs.endian_swap_reg);
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 90394cef0f41..0a5dd5595dd3 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -3295,6 +3295,11 @@ static int __qedf_probe(struct pci_dev *pdev, int mode)
init_completion(&qedf->flogi_compl);
+ status = qed_ops->common->update_drv_state(qedf->cdev, true);
+ if (status)
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Failed to send drv state to MFW.\n");
+
memset(&link_params, 0, sizeof(struct qed_link_params));
link_params.link_up = true;
status = qed_ops->common->set_link(qedf->cdev, &link_params);
@@ -3343,6 +3348,7 @@ static int qedf_probe(struct pci_dev *pdev, const struct pci_device_id *id)
static void __qedf_remove(struct pci_dev *pdev, int mode)
{
struct qedf_ctx *qedf;
+ int rc;
if (!pdev) {
QEDF_ERR(NULL, "pdev is NULL.\n");
@@ -3437,6 +3443,12 @@ static void __qedf_remove(struct pci_dev *pdev, int mode)
qed_ops->common->set_power_state(qedf->cdev, PCI_D0);
pci_set_drvdata(pdev, NULL);
}
+
+ rc = qed_ops->common->update_drv_state(qedf->cdev, false);
+ if (rc)
+ QEDF_ERR(&(qedf->dbg_ctx),
+ "Failed to send drv state to MFW.\n");
+
qed_ops->common->slowpath_stop(qedf->cdev);
qed_ops->common->remove(qedf->cdev);
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index cf274a79e77a..091ec1207bea 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -2273,6 +2273,7 @@ kset_free:
static void __qedi_remove(struct pci_dev *pdev, int mode)
{
struct qedi_ctx *qedi = pci_get_drvdata(pdev);
+ int rval;
if (qedi->tmf_thread) {
flush_workqueue(qedi->tmf_thread);
@@ -2302,6 +2303,10 @@ static void __qedi_remove(struct pci_dev *pdev, int mode)
if (mode == QEDI_MODE_NORMAL)
qedi_free_iscsi_pf_param(qedi);
+ rval = qedi_ops->common->update_drv_state(qedi->cdev, false);
+ if (rval)
+ QEDI_ERR(&qedi->dbg_ctx, "Failed to send drv state to MFW\n");
+
if (!test_bit(QEDI_IN_OFFLINE, &qedi->flags)) {
qedi_ops->common->slowpath_stop(qedi->cdev);
qedi_ops->common->remove(qedi->cdev);
@@ -2576,6 +2581,12 @@ static int __qedi_probe(struct pci_dev *pdev, int mode)
if (qedi_setup_boot_info(qedi))
QEDI_ERR(&qedi->dbg_ctx,
"No iSCSI boot target configured\n");
+
+ rc = qedi_ops->common->update_drv_state(qedi->cdev, true);
+ if (rc)
+ QEDI_ERR(&qedi->dbg_ctx,
+ "Failed to send drv state to MFW\n");
+
}
return 0;
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 9442e18aef6f..0f94b1d62d3f 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -361,6 +361,8 @@ struct ct_arg {
dma_addr_t rsp_dma;
u32 req_size;
u32 rsp_size;
+ u32 req_allocated_size;
+ u32 rsp_allocated_size;
void *req;
void *rsp;
port_id_t id;
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 4bc2b66b299f..2c35b0b2baa0 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -556,7 +556,7 @@ err2:
/* please ignore kernel warning. otherwise, we have mem leak. */
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.req_dma);
sp->u.iocb_cmd.u.ctarg.req = NULL;
@@ -564,7 +564,7 @@ err2:
if (sp->u.iocb_cmd.u.ctarg.rsp) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
sp->u.iocb_cmd.u.ctarg.rsp,
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -617,6 +617,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.req) {
ql_log(ql_log_warn, vha, 0xd041,
"%s: Failed to allocate ct_sns request.\n",
@@ -627,6 +628,7 @@ static int qla_async_rftid(scsi_qla_host_t *vha, port_id_t *d_id)
sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0xd042,
"%s: Failed to allocate ct_sns request.\n",
@@ -712,6 +714,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.req) {
ql_log(ql_log_warn, vha, 0xd041,
"%s: Failed to allocate ct_sns request.\n",
@@ -722,6 +725,7 @@ static int qla_async_rffid(scsi_qla_host_t *vha, port_id_t *d_id,
sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0xd042,
"%s: Failed to allocate ct_sns request.\n",
@@ -802,6 +806,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.req) {
ql_log(ql_log_warn, vha, 0xd041,
"%s: Failed to allocate ct_sns request.\n",
@@ -812,6 +817,7 @@ static int qla_async_rnnid(scsi_qla_host_t *vha, port_id_t *d_id,
sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0xd042,
"%s: Failed to allocate ct_sns request.\n",
@@ -909,6 +915,7 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.req) {
ql_log(ql_log_warn, vha, 0xd041,
"%s: Failed to allocate ct_sns request.\n",
@@ -919,6 +926,7 @@ static int qla_async_rsnn_nn(scsi_qla_host_t *vha)
sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0xd042,
"%s: Failed to allocate ct_sns request.\n",
@@ -3388,14 +3396,14 @@ void qla24xx_sp_unmap(scsi_qla_host_t *vha, srb_t *sp)
{
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.req_dma);
sp->u.iocb_cmd.u.ctarg.req = NULL;
}
if (sp->u.iocb_cmd.u.ctarg.rsp) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
sp->u.iocb_cmd.u.ctarg.rsp,
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -3596,14 +3604,14 @@ static void qla2x00_async_gpnid_sp_done(void *s, int res)
/* please ignore kernel warning. otherwise, we have mem leak. */
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.req_dma);
sp->u.iocb_cmd.u.ctarg.req = NULL;
}
if (sp->u.iocb_cmd.u.ctarg.rsp) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
sp->u.iocb_cmd.u.ctarg.rsp,
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -3654,6 +3662,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sp->u.iocb_cmd.u.ctarg.req = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.req_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.req) {
ql_log(ql_log_warn, vha, 0xd041,
"Failed to allocate ct_sns request.\n");
@@ -3663,6 +3672,7 @@ int qla24xx_async_gpnid(scsi_qla_host_t *vha, port_id_t *id)
sp->u.iocb_cmd.u.ctarg.rsp = dma_alloc_coherent(&vha->hw->pdev->dev,
sizeof(struct ct_sns_pkt), &sp->u.iocb_cmd.u.ctarg.rsp_dma,
GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0xd042,
"Failed to allocate ct_sns request.\n");
@@ -4142,14 +4152,14 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
*/
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.req_dma);
sp->u.iocb_cmd.u.ctarg.req = NULL;
}
if (sp->u.iocb_cmd.u.ctarg.rsp) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
sp->u.iocb_cmd.u.ctarg.rsp,
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -4179,14 +4189,14 @@ static void qla2x00_async_gpnft_gnnft_sp_done(void *s, int res)
/* please ignore kernel warning. Otherwise, we have mem leak. */
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.req_dma);
sp->u.iocb_cmd.u.ctarg.req = NULL;
}
if (sp->u.iocb_cmd.u.ctarg.rsp) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
sp->u.iocb_cmd.u.ctarg.rsp,
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -4281,14 +4291,14 @@ static int qla24xx_async_gnnft(scsi_qla_host_t *vha, struct srb *sp,
done_free_sp:
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.req_dma);
sp->u.iocb_cmd.u.ctarg.req = NULL;
}
if (sp->u.iocb_cmd.u.ctarg.rsp) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
sp->u.iocb_cmd.u.ctarg.rsp,
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
@@ -4349,6 +4359,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
sp->u.iocb_cmd.u.ctarg.req = dma_zalloc_coherent(
&vha->hw->pdev->dev, sizeof(struct ct_sns_pkt),
&sp->u.iocb_cmd.u.ctarg.req_dma, GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.req) {
ql_log(ql_log_warn, vha, 0xffff,
"Failed to allocate ct_sns request.\n");
@@ -4366,6 +4377,7 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
sp->u.iocb_cmd.u.ctarg.rsp = dma_zalloc_coherent(
&vha->hw->pdev->dev, rspsz,
&sp->u.iocb_cmd.u.ctarg.rsp_dma, GFP_KERNEL);
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size = sizeof(struct ct_sns_pkt);
if (!sp->u.iocb_cmd.u.ctarg.rsp) {
ql_log(ql_log_warn, vha, 0xffff,
"Failed to allocate ct_sns request.\n");
@@ -4425,14 +4437,14 @@ int qla24xx_async_gpnft(scsi_qla_host_t *vha, u8 fc4_type, srb_t *sp)
done_free_sp:
if (sp->u.iocb_cmd.u.ctarg.req) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.req_allocated_size,
sp->u.iocb_cmd.u.ctarg.req,
sp->u.iocb_cmd.u.ctarg.req_dma);
sp->u.iocb_cmd.u.ctarg.req = NULL;
}
if (sp->u.iocb_cmd.u.ctarg.rsp) {
dma_free_coherent(&vha->hw->pdev->dev,
- sizeof(struct ct_sns_pkt),
+ sp->u.iocb_cmd.u.ctarg.rsp_allocated_size,
sp->u.iocb_cmd.u.ctarg.rsp,
sp->u.iocb_cmd.u.ctarg.rsp_dma);
sp->u.iocb_cmd.u.ctarg.rsp = NULL;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 7b675243bd16..db0e3279e07a 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -591,12 +591,14 @@ static void qla24xx_handle_gnl_done_event(scsi_qla_host_t *vha,
conflict_fcport =
qla2x00_find_fcport_by_wwpn(vha,
e->port_name, 0);
- ql_dbg(ql_dbg_disc, vha, 0x20e6,
- "%s %d %8phC post del sess\n",
- __func__, __LINE__,
- conflict_fcport->port_name);
- qlt_schedule_sess_for_deletion
- (conflict_fcport);
+ if (conflict_fcport) {
+ qlt_schedule_sess_for_deletion
+ (conflict_fcport);
+ ql_dbg(ql_dbg_disc, vha, 0x20e6,
+ "%s %d %8phC post del sess\n",
+ __func__, __LINE__,
+ conflict_fcport->port_name);
+ }
}
/* FW already picked this loop id for another fcport */
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index e881fce7477a..9f309e572be4 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -3180,6 +3180,8 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
"req->req_q_in=%p req->req_q_out=%p rsp->rsp_q_in=%p rsp->rsp_q_out=%p.\n",
req->req_q_in, req->req_q_out, rsp->rsp_q_in, rsp->rsp_q_out);
+ ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
+
if (ha->isp_ops->initialize_adapter(base_vha)) {
ql_log(ql_log_fatal, base_vha, 0x00d6,
"Failed to initialize adapter - Adapter flags %x.\n",
@@ -3216,8 +3218,6 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
host->can_queue, base_vha->req,
base_vha->mgmt_svr_loop_id, host->sg_tablesize);
- ha->wq = alloc_workqueue("qla2xxx_wq", 0, 0);
-
if (ha->mqenable) {
bool mq = false;
bool startit = false;
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 0fea2e2326be..1027b0cb7fa3 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1224,7 +1224,6 @@ static void qla24xx_chk_fcp_state(struct fc_port *sess)
void qlt_schedule_sess_for_deletion(struct fc_port *sess)
{
struct qla_tgt *tgt = sess->tgt;
- struct qla_hw_data *ha = sess->vha->hw;
unsigned long flags;
if (sess->disc_state == DSC_DELETE_PEND)
@@ -1241,16 +1240,16 @@ void qlt_schedule_sess_for_deletion(struct fc_port *sess)
return;
}
- spin_lock_irqsave(&ha->tgt.sess_lock, flags);
if (sess->deleted == QLA_SESS_DELETED)
sess->logout_on_delete = 0;
+ spin_lock_irqsave(&sess->vha->work_lock, flags);
if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
return;
}
sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
- spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
+ spin_unlock_irqrestore(&sess->vha->work_lock, flags);
sess->disc_state = DSC_DELETE_PEND;
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 24d7496cd9e2..364e71861bfd 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -5507,9 +5507,9 @@ static void __exit scsi_debug_exit(void)
int k = sdebug_add_host;
stop_all_queued();
- free_all_queued();
for (; k; k--)
sdebug_remove_adapter();
+ free_all_queued();
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 8932ae81a15a..2715cdaa669c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -296,6 +296,20 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
rtn = host->hostt->eh_timed_out(scmd);
if (rtn == BLK_EH_DONE) {
+ /*
+ * For blk-mq, we must set the request state to complete now
+ * before sending the request to the scsi error handler. This
+ * will prevent a use-after-free in the event the LLD manages
+ * to complete the request before the error handler finishes
+ * processing this timed out request.
+ *
+ * If the request was already completed, then the LLD beat the
+ * time out handler from transferring the request to the scsi
+ * error handler. In that case we can return immediately as no
+ * further action is required.
+ */
+ if (req->q->mq_ops && !blk_mq_mark_complete(req))
+ return rtn;
if (scsi_abort_command(scmd) != SUCCESS) {
set_host_byte(scmd, DID_TIME_OUT);
scsi_eh_scmd_add(scmd);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 1da3d71e9f61..13948102ca29 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3592,7 +3592,7 @@ fc_bsg_job_timeout(struct request *req)
/* the blk_end_sync_io() doesn't check the error */
if (inflight)
- blk_mq_complete_request(req);
+ __blk_complete_request(req);
return BLK_EH_DONE;
}
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index a14fef11776e..2bf3bf73886e 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -391,7 +391,8 @@ static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
* Check that all zones of the device are equal. The last zone can however
* be smaller. The zone size must also be a power of two number of LBAs.
*
- * Returns the zone size in bytes upon success or an error code upon failure.
+ * Returns the zone size in number of blocks upon success or an error code
+ * upon failure.
*/
static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
{
@@ -401,7 +402,7 @@ static s64 sd_zbc_check_zone_size(struct scsi_disk *sdkp)
unsigned char *rec;
unsigned int buf_len;
unsigned int list_length;
- int ret;
+ s64 ret;
u8 same;
/* Get a buffer */
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 53ae52dbff84..cd2fdac000c9 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -51,6 +51,7 @@ static int sg_version_num = 30536; /* 2 digits for each component */
#include <linux/atomic.h>
#include <linux/ratelimit.h>
#include <linux/uio.h>
+#include <linux/cred.h> /* for sg_check_file_access() */
#include "scsi.h"
#include <scsi/scsi_dbg.h>
@@ -209,6 +210,33 @@ static void sg_device_destroy(struct kref *kref);
sdev_prefix_printk(prefix, (sdp)->device, \
(sdp)->disk->disk_name, fmt, ##a)
+/*
+ * The SCSI interfaces that use read() and write() as an asynchronous variant of
+ * ioctl(..., SG_IO, ...) are fundamentally unsafe, since there are lots of ways
+ * to trigger read() and write() calls from various contexts with elevated
+ * privileges. This can lead to kernel memory corruption (e.g. if these
+ * interfaces are called through splice()) and privilege escalation inside
+ * userspace (e.g. if a process with access to such a device passes a file
+ * descriptor to a SUID binary as stdin/stdout/stderr).
+ *
+ * This function provides protection for the legacy API by restricting the
+ * calling context.
+ */
+static int sg_check_file_access(struct file *filp, const char *caller)
+{
+ if (filp->f_cred != current_real_cred()) {
+ pr_err_once("%s: process %d (%s) changed security contexts after opening file descriptor, this is not allowed.\n",
+ caller, task_tgid_vnr(current), current->comm);
+ return -EPERM;
+ }
+ if (uaccess_kernel()) {
+ pr_err_once("%s: process %d (%s) called from kernel context, this is not allowed.\n",
+ caller, task_tgid_vnr(current), current->comm);
+ return -EACCES;
+ }
+ return 0;
+}
+
static int sg_allow_access(struct file *filp, unsigned char *cmd)
{
struct sg_fd *sfp = filp->private_data;
@@ -393,6 +421,14 @@ sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos)
struct sg_header *old_hdr = NULL;
int retval = 0;
+ /*
+ * This could cause a response to be stranded. Close the associated
+ * file descriptor to free up any resources being held.
+ */
+ retval = sg_check_file_access(filp, __func__);
+ if (retval)
+ return retval;
+
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
SCSI_LOG_TIMEOUT(3, sg_printk(KERN_INFO, sdp,
@@ -580,9 +616,11 @@ sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos)
struct sg_header old_hdr;
sg_io_hdr_t *hp;
unsigned char cmnd[SG_MAX_CDB_SIZE];
+ int retval;
- if (unlikely(uaccess_kernel()))
- return -EINVAL;
+ retval = sg_check_file_access(filp, __func__);
+ if (retval)
+ return retval;
if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp)))
return -ENXIO;
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 36f59a1be7e9..61389bdc7926 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -654,10 +654,17 @@ static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
static int scsifront_sdev_configure(struct scsi_device *sdev)
{
struct vscsifrnt_info *info = shost_priv(sdev->host);
+ int err;
- if (info && current == info->curr)
- xenbus_printf(XBT_NIL, info->dev->nodename,
+ if (info && current == info->curr) {
+ err = xenbus_printf(XBT_NIL, info->dev->nodename,
info->dev_state_path, "%d", XenbusStateConnected);
+ if (err) {
+ xenbus_dev_error(info->dev, err,
+ "%s: writing dev_state_path", __func__);
+ return err;
+ }
+ }
return 0;
}
@@ -665,10 +672,15 @@ static int scsifront_sdev_configure(struct scsi_device *sdev)
static void scsifront_sdev_destroy(struct scsi_device *sdev)
{
struct vscsifrnt_info *info = shost_priv(sdev->host);
+ int err;
- if (info && current == info->curr)
- xenbus_printf(XBT_NIL, info->dev->nodename,
+ if (info && current == info->curr) {
+ err = xenbus_printf(XBT_NIL, info->dev->nodename,
info->dev_state_path, "%d", XenbusStateClosed);
+ if (err)
+ xenbus_dev_error(info->dev, err,
+ "%s: writing dev_state_path", __func__);
+ }
}
static struct scsi_host_template scsifront_sht = {
@@ -1003,9 +1015,12 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
if (scsi_add_device(info->host, chn, tgt, lun)) {
dev_err(&dev->dev, "scsi_add_device\n");
- xenbus_printf(XBT_NIL, dev->nodename,
+ err = xenbus_printf(XBT_NIL, dev->nodename,
info->dev_state_path,
"%d", XenbusStateClosed);
+ if (err)
+ xenbus_dev_error(dev, err,
+ "%s: writing dev_state_path", __func__);
}
break;
case VSCSIFRONT_OP_DEL_LUN:
@@ -1019,10 +1034,14 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
}
break;
case VSCSIFRONT_OP_READD_LUN:
- if (device_state == XenbusStateConnected)
- xenbus_printf(XBT_NIL, dev->nodename,
+ if (device_state == XenbusStateConnected) {
+ err = xenbus_printf(XBT_NIL, dev->nodename,
info->dev_state_path,
"%d", XenbusStateConnected);
+ if (err)
+ xenbus_dev_error(dev, err,
+ "%s: writing dev_state_path", __func__);
+ }
break;
default:
break;
diff --git a/drivers/soc/imx/gpc.c b/drivers/soc/imx/gpc.c
index 32f0748fd067..0097a939487f 100644
--- a/drivers/soc/imx/gpc.c
+++ b/drivers/soc/imx/gpc.c
@@ -27,9 +27,16 @@
#define GPC_PGC_SW2ISO_SHIFT 0x8
#define GPC_PGC_SW_SHIFT 0x0
+#define GPC_PGC_PCI_PDN 0x200
+#define GPC_PGC_PCI_SR 0x20c
+
#define GPC_PGC_GPU_PDN 0x260
#define GPC_PGC_GPU_PUPSCR 0x264
#define GPC_PGC_GPU_PDNSCR 0x268
+#define GPC_PGC_GPU_SR 0x26c
+
+#define GPC_PGC_DISP_PDN 0x240
+#define GPC_PGC_DISP_SR 0x24c
#define GPU_VPU_PUP_REQ BIT(1)
#define GPU_VPU_PDN_REQ BIT(0)
@@ -318,10 +325,24 @@ static const struct of_device_id imx_gpc_dt_ids[] = {
{ }
};
+static const struct regmap_range yes_ranges[] = {
+ regmap_reg_range(GPC_CNTR, GPC_CNTR),
+ regmap_reg_range(GPC_PGC_PCI_PDN, GPC_PGC_PCI_SR),
+ regmap_reg_range(GPC_PGC_GPU_PDN, GPC_PGC_GPU_SR),
+ regmap_reg_range(GPC_PGC_DISP_PDN, GPC_PGC_DISP_SR),
+};
+
+static const struct regmap_access_table access_table = {
+ .yes_ranges = yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(yes_ranges),
+};
+
static const struct regmap_config imx_gpc_regmap_config = {
.reg_bits = 32,
.val_bits = 32,
.reg_stride = 4,
+ .rd_table = &access_table,
+ .wr_table = &access_table,
.max_register = 0x2ac,
};
diff --git a/drivers/soc/imx/gpcv2.c b/drivers/soc/imx/gpcv2.c
index f4e3bd40c72e..6ef18cf8f243 100644
--- a/drivers/soc/imx/gpcv2.c
+++ b/drivers/soc/imx/gpcv2.c
@@ -39,10 +39,15 @@
#define GPC_M4_PU_PDN_FLG 0x1bc
-
-#define PGC_MIPI 4
-#define PGC_PCIE 5
-#define PGC_USB_HSIC 8
+/*
+ * The PGC offset values in Reference Manual
+ * (Rev. 1, 01/2018 and the older ones) GPC chapter's
+ * GPC_PGC memory map are incorrect, below offset
+ * values are from design RTL.
+ */
+#define PGC_MIPI 16
+#define PGC_PCIE 17
+#define PGC_USB_HSIC 20
#define GPC_PGC_CTRL(n) (0x800 + (n) * 0x40)
#define GPC_PGC_SR(n) (GPC_PGC_CTRL(n) + 0xc)
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index 9dc02f390ba3..5856e792d09c 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -5,7 +5,8 @@ menu "Qualcomm SoC drivers"
config QCOM_COMMAND_DB
bool "Qualcomm Command DB"
- depends on (ARCH_QCOM && OF) || COMPILE_TEST
+ depends on ARCH_QCOM || COMPILE_TEST
+ depends on OF_RESERVED_MEM
help
Command DB queries shared memory by key string for shared system
resources. Platform drivers that require to set state of a shared
diff --git a/drivers/soc/renesas/rcar-sysc.c b/drivers/soc/renesas/rcar-sysc.c
index 95120acc4d80..50d03d8b4f9a 100644
--- a/drivers/soc/renesas/rcar-sysc.c
+++ b/drivers/soc/renesas/rcar-sysc.c
@@ -194,11 +194,12 @@ static int rcar_sysc_pd_power_on(struct generic_pm_domain *genpd)
static bool has_cpg_mstp;
-static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
+static int __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
{
struct generic_pm_domain *genpd = &pd->genpd;
const char *name = pd->genpd.name;
struct dev_power_governor *gov = &simple_qos_governor;
+ int error;
if (pd->flags & PD_CPU) {
/*
@@ -251,7 +252,11 @@ static void __init rcar_sysc_pd_setup(struct rcar_sysc_pd *pd)
rcar_sysc_power_up(&pd->ch);
finalize:
- pm_genpd_init(genpd, gov, false);
+ error = pm_genpd_init(genpd, gov, false);
+ if (error)
+ pr_err("Failed to init PM domain %s: %d\n", name, error);
+
+ return error;
}
static const struct of_device_id rcar_sysc_matches[] __initconst = {
@@ -375,6 +380,9 @@ static int __init rcar_sysc_pd_init(void)
pr_debug("%pOF: syscier = 0x%08x\n", np, syscier);
iowrite32(syscier, base + SYSCIER);
+ /*
+ * First, create all PM domains
+ */
for (i = 0; i < info->num_areas; i++) {
const struct rcar_sysc_area *area = &info->areas[i];
struct rcar_sysc_pd *pd;
@@ -397,14 +405,29 @@ static int __init rcar_sysc_pd_init(void)
pd->ch.isr_bit = area->isr_bit;
pd->flags = area->flags;
- rcar_sysc_pd_setup(pd);
- if (area->parent >= 0)
- pm_genpd_add_subdomain(domains->domains[area->parent],
- &pd->genpd);
+ error = rcar_sysc_pd_setup(pd);
+ if (error)
+ goto out_put;
domains->domains[area->isr_bit] = &pd->genpd;
}
+ /*
+ * Second, link all PM domains to their parents
+ */
+ for (i = 0; i < info->num_areas; i++) {
+ const struct rcar_sysc_area *area = &info->areas[i];
+
+ if (!area->name || area->parent < 0)
+ continue;
+
+ error = pm_genpd_add_subdomain(domains->domains[area->parent],
+ domains->domains[area->isr_bit]);
+ if (error)
+ pr_warn("Failed to add PM subdomain %s to parent %u\n",
+ area->name, area->parent);
+ }
+
error = of_genpd_add_provider_onecell(np, &domains->onecell_data);
out_put:
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index e8c440329708..31db510018a9 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -30,7 +30,7 @@ void *ion_heap_map_kernel(struct ion_heap *heap,
struct page **tmp = pages;
if (!pages)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (buffer->flags & ION_FLAG_CACHED)
pgprot = PAGE_KERNEL;
diff --git a/drivers/staging/comedi/drivers/quatech_daqp_cs.c b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
index ea194aa01a64..257b0daff01f 100644
--- a/drivers/staging/comedi/drivers/quatech_daqp_cs.c
+++ b/drivers/staging/comedi/drivers/quatech_daqp_cs.c
@@ -642,7 +642,7 @@ static int daqp_ao_insn_write(struct comedi_device *dev,
/* Make sure D/A update mode is direct update */
outb(0, dev->iobase + DAQP_AUX_REG);
- for (i = 0; i > insn->n; i++) {
+ for (i = 0; i < insn->n; i++) {
unsigned int val = data[i];
int ret;
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index 0ecffab52ec2..abdaf7cf8162 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -1842,15 +1842,15 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
memset(set_address, 0, NIC_MAX_MCAST_LIST * ETH_ALEN);
if (dev->flags & IFF_PROMISC) {
- hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER,
- MCAST_FILTER_PROMISC);
+ hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
+ MCAST_FILTER_PROMISC);
goto spin_unlock;
}
if ((netdev_mc_count(dev) > NIC_MAX_MCAST_LIST) ||
(dev->flags & IFF_ALLMULTI)) {
- hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER,
- MCAST_FILTER_MCASTALL);
+ hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
+ MCAST_FILTER_MCASTALL);
goto spin_unlock;
}
@@ -1866,8 +1866,8 @@ void hostif_sme_multicast_set(struct ks_wlan_private *priv)
ETH_ALEN * mc_count);
} else {
priv->sme_i.sme_flag |= SME_MULTICAST;
- hostif_mib_set_request_bool(priv, LOCAL_MULTICAST_FILTER,
- MCAST_FILTER_MCAST);
+ hostif_mib_set_request_int(priv, LOCAL_MULTICAST_FILTER,
+ MCAST_FILTER_MCAST);
}
spin_unlock:
diff --git a/drivers/staging/media/omap4iss/iss_video.c b/drivers/staging/media/omap4iss/iss_video.c
index a3a83424a926..16478fe9e3f8 100644
--- a/drivers/staging/media/omap4iss/iss_video.c
+++ b/drivers/staging/media/omap4iss/iss_video.c
@@ -11,7 +11,6 @@
* (at your option) any later version.
*/
-#include <asm/cacheflush.h>
#include <linux/clk.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
@@ -24,6 +23,8 @@
#include <media/v4l2-ioctl.h>
#include <media/v4l2-mc.h>
+#include <asm/cacheflush.h>
+
#include "iss_video.h"
#include "iss.h"
diff --git a/drivers/staging/rtl8188eu/Kconfig b/drivers/staging/rtl8188eu/Kconfig
index 673fdce25530..ff7832798a77 100644
--- a/drivers/staging/rtl8188eu/Kconfig
+++ b/drivers/staging/rtl8188eu/Kconfig
@@ -7,7 +7,6 @@ config R8188EU
select LIB80211
select LIB80211_CRYPT_WEP
select LIB80211_CRYPT_CCMP
- select LIB80211_CRYPT_TKIP
---help---
This option adds the Realtek RTL8188EU USB device such as TP-Link TL-WN725N.
If built as a module, it will be called r8188eu.
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 05936a45eb93..c6857a5be12a 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -23,7 +23,6 @@
#include <mon.h>
#include <wifi.h>
#include <linux/vmalloc.h>
-#include <net/lib80211.h>
#define ETHERNET_HEADER_SIZE 14 /* Ethernet Header Length */
#define LLC_HEADER_SIZE 6 /* LLC Header Length */
@@ -221,20 +220,31 @@ u32 rtw_free_uc_swdec_pending_queue(struct adapter *adapter)
static int recvframe_chkmic(struct adapter *adapter,
struct recv_frame *precvframe)
{
- int res = _SUCCESS;
- struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
- struct sta_info *stainfo = rtw_get_stainfo(&adapter->stapriv, prxattrib->ta);
+ int i, res = _SUCCESS;
+ u32 datalen;
+ u8 miccode[8];
+ u8 bmic_err = false, brpt_micerror = true;
+ u8 *pframe, *payload, *pframemic;
+ u8 *mickey;
+ struct sta_info *stainfo;
+ struct rx_pkt_attrib *prxattrib = &precvframe->attrib;
+ struct security_priv *psecuritypriv = &adapter->securitypriv;
+
+ struct mlme_ext_priv *pmlmeext = &adapter->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+
+ stainfo = rtw_get_stainfo(&adapter->stapriv, &prxattrib->ta[0]);
if (prxattrib->encrypt == _TKIP_) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n %s: prxattrib->encrypt==_TKIP_\n", __func__));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n %s: da=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ __func__, prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
+ prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5]));
+
+ /* calculate mic code */
if (stainfo) {
- int key_idx;
- const int iv_len = 8, icv_len = 4, key_length = 32;
- struct sk_buff *skb = precvframe->pkt;
- u8 key[32], iv[8], icv[4], *pframe = skb->data;
- void *crypto_private = NULL;
- struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip");
- struct security_priv *psecuritypriv = &adapter->securitypriv;
-
if (IS_MCAST(prxattrib->ra)) {
if (!psecuritypriv) {
res = _FAIL;
@@ -243,58 +253,115 @@ static int recvframe_chkmic(struct adapter *adapter,
DBG_88E("\n %s: didn't install group key!!!!!!!!!!\n", __func__);
goto exit;
}
- key_idx = prxattrib->key_index;
- memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16);
- memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16);
+ mickey = &psecuritypriv->dot118021XGrprxmickey[prxattrib->key_index].skey[0];
+
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_,
+ ("\n %s: bcmc key\n", __func__));
} else {
- key_idx = 0;
- memcpy(key, stainfo->dot118021x_UncstKey.skey, 16);
- memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16);
+ mickey = &stainfo->dot11tkiprxmickey.skey[0];
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n %s: unicast key\n", __func__));
}
- if (!crypto_ops) {
- res = _FAIL;
- goto exit_lib80211_tkip;
- }
+ /* icv_len included the mic code */
+ datalen = precvframe->pkt->len-prxattrib->hdrlen -
+ prxattrib->iv_len-prxattrib->icv_len-8;
+ pframe = precvframe->pkt->data;
+ payload = pframe+prxattrib->hdrlen+prxattrib->iv_len;
- memcpy(iv, pframe + prxattrib->hdrlen, iv_len);
- memcpy(icv, pframe + skb->len - icv_len, icv_len);
- memmove(pframe + iv_len, pframe, prxattrib->hdrlen);
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_info_, ("\n prxattrib->iv_len=%d prxattrib->icv_len=%d\n", prxattrib->iv_len, prxattrib->icv_len));
+ rtw_seccalctkipmic(mickey, pframe, payload, datalen, &miccode[0],
+ (unsigned char)prxattrib->priority); /* care the length of the data */
- skb_pull(skb, iv_len);
- skb_trim(skb, skb->len - icv_len);
+ pframemic = payload+datalen;
- crypto_private = crypto_ops->init(key_idx);
- if (!crypto_private) {
- res = _FAIL;
- goto exit_lib80211_tkip;
- }
- if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) {
- res = _FAIL;
- goto exit_lib80211_tkip;
- }
- if (crypto_ops->decrypt_msdu(skb, key_idx, prxattrib->hdrlen, crypto_private)) {
- res = _FAIL;
- goto exit_lib80211_tkip;
+ bmic_err = false;
+
+ for (i = 0; i < 8; i++) {
+ if (miccode[i] != *(pframemic+i)) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("%s: miccode[%d](%02x)!=*(pframemic+%d)(%02x) ",
+ __func__, i, miccode[i], i, *(pframemic + i)));
+ bmic_err = true;
+ }
}
- memmove(pframe, pframe + iv_len, prxattrib->hdrlen);
- skb_push(skb, iv_len);
- skb_put(skb, icv_len);
+ if (bmic_err) {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n *(pframemic-8)-*(pframemic-1)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ *(pframemic-8), *(pframemic-7), *(pframemic-6),
+ *(pframemic-5), *(pframemic-4), *(pframemic-3),
+ *(pframemic-2), *(pframemic-1)));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n *(pframemic-16)-*(pframemic-9)=0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
+ *(pframemic-16), *(pframemic-15), *(pframemic-14),
+ *(pframemic-13), *(pframemic-12), *(pframemic-11),
+ *(pframemic-10), *(pframemic-9)));
+ {
+ uint i;
- memcpy(pframe + prxattrib->hdrlen, iv, iv_len);
- memcpy(pframe + skb->len - icv_len, icv, icv_len);
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("\n ======demp packet (len=%d)======\n",
+ precvframe->pkt->len));
+ for (i = 0; i < precvframe->pkt->len; i += 8) {
+ RT_TRACE(_module_rtl871x_recv_c_,
+ _drv_err_,
+ ("0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x",
+ *(precvframe->pkt->data+i),
+ *(precvframe->pkt->data+i+1),
+ *(precvframe->pkt->data+i+2),
+ *(precvframe->pkt->data+i+3),
+ *(precvframe->pkt->data+i+4),
+ *(precvframe->pkt->data+i+5),
+ *(precvframe->pkt->data+i+6),
+ *(precvframe->pkt->data+i+7)));
+ }
+ RT_TRACE(_module_rtl871x_recv_c_,
+ _drv_err_,
+ ("\n ====== demp packet end [len=%d]======\n",
+ precvframe->pkt->len));
+ RT_TRACE(_module_rtl871x_recv_c_,
+ _drv_err_,
+ ("\n hrdlen=%d,\n",
+ prxattrib->hdrlen));
+ }
-exit_lib80211_tkip:
- if (crypto_ops && crypto_private)
- crypto_ops->deinit(crypto_private);
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
+ ("ra=0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x 0x%.2x psecuritypriv->binstallGrpkey=%d ",
+ prxattrib->ra[0], prxattrib->ra[1], prxattrib->ra[2],
+ prxattrib->ra[3], prxattrib->ra[4], prxattrib->ra[5], psecuritypriv->binstallGrpkey));
+
+ /* double check key_index for some timing issue , */
+ /* cannot compare with psecuritypriv->dot118021XGrpKeyid also cause timing issue */
+ if ((IS_MCAST(prxattrib->ra) == true) && (prxattrib->key_index != pmlmeinfo->key_index))
+ brpt_micerror = false;
+
+ if ((prxattrib->bdecrypted) && (brpt_micerror)) {
+ rtw_handle_tkip_mic_err(adapter, (u8)IS_MCAST(prxattrib->ra));
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
+ DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
+ } else {
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, (" mic error :prxattrib->bdecrypted=%d ", prxattrib->bdecrypted));
+ DBG_88E(" mic error :prxattrib->bdecrypted=%d\n", prxattrib->bdecrypted);
+ }
+ res = _FAIL;
+ } else {
+ /* mic checked ok */
+ if ((!psecuritypriv->bcheck_grpkey) && (IS_MCAST(prxattrib->ra))) {
+ psecuritypriv->bcheck_grpkey = true;
+ RT_TRACE(_module_rtl871x_recv_c_, _drv_err_, ("psecuritypriv->bcheck_grpkey = true"));
+ }
+ }
} else {
RT_TRACE(_module_rtl871x_recv_c_, _drv_err_,
("%s: rtw_get_stainfo==NULL!!!\n", __func__));
}
+
+ skb_trim(precvframe->pkt, precvframe->pkt->len - 8);
}
exit:
+
return res;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index bfe0b217e679..67a2490f055e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -650,71 +650,71 @@ u32 rtw_tkip_encrypt(struct adapter *padapter, u8 *pxmitframe)
return res;
}
+/* The hlen isn't include the IV */
u32 rtw_tkip_decrypt(struct adapter *padapter, u8 *precvframe)
-{
- struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib;
- u32 res = _SUCCESS;
+{ /* exclude ICV */
+ u16 pnl;
+ u32 pnh;
+ u8 rc4key[16];
+ u8 ttkey[16];
+ u8 crc[4];
+ struct arc4context mycontext;
+ int length;
+
+ u8 *pframe, *payload, *iv, *prwskey;
+ union pn48 dot11txpn;
+ struct sta_info *stainfo;
+ struct rx_pkt_attrib *prxattrib = &((struct recv_frame *)precvframe)->attrib;
+ struct security_priv *psecuritypriv = &padapter->securitypriv;
+ u32 res = _SUCCESS;
+
+
+ pframe = (unsigned char *)((struct recv_frame *)precvframe)->pkt->data;
/* 4 start to decrypt recvframe */
if (prxattrib->encrypt == _TKIP_) {
- struct sta_info *stainfo = rtw_get_stainfo(&padapter->stapriv, prxattrib->ta);
-
+ stainfo = rtw_get_stainfo(&padapter->stapriv, &prxattrib->ta[0]);
if (stainfo) {
- int key_idx;
- const int iv_len = 8, icv_len = 4, key_length = 32;
- void *crypto_private = NULL;
- struct sk_buff *skb = ((struct recv_frame *)precvframe)->pkt;
- u8 key[32], iv[8], icv[4], *pframe = skb->data;
- struct lib80211_crypto_ops *crypto_ops = try_then_request_module(lib80211_get_crypto_ops("TKIP"), "lib80211_crypt_tkip");
- struct security_priv *psecuritypriv = &padapter->securitypriv;
-
if (IS_MCAST(prxattrib->ra)) {
if (!psecuritypriv->binstallGrpkey) {
res = _FAIL;
DBG_88E("%s:rx bc/mc packets, but didn't install group key!!!!!!!!!!\n", __func__);
goto exit;
}
- key_idx = prxattrib->key_index;
- memcpy(key, psecuritypriv->dot118021XGrpKey[key_idx].skey, 16);
- memcpy(key + 16, psecuritypriv->dot118021XGrprxmickey[key_idx].skey, 16);
+ prwskey = psecuritypriv->dot118021XGrpKey[prxattrib->key_index].skey;
} else {
- key_idx = 0;
- memcpy(key, stainfo->dot118021x_UncstKey.skey, 16);
- memcpy(key + 16, stainfo->dot11tkiprxmickey.skey, 16);
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("%s: stainfo!= NULL!!!\n", __func__));
+ prwskey = &stainfo->dot118021x_UncstKey.skey[0];
}
- if (!crypto_ops) {
- res = _FAIL;
- goto exit_lib80211_tkip;
- }
+ iv = pframe+prxattrib->hdrlen;
+ payload = pframe+prxattrib->iv_len+prxattrib->hdrlen;
+ length = ((struct recv_frame *)precvframe)->pkt->len-prxattrib->hdrlen-prxattrib->iv_len;
- memcpy(iv, pframe + prxattrib->hdrlen, iv_len);
- memcpy(icv, pframe + skb->len - icv_len, icv_len);
+ GET_TKIP_PN(iv, dot11txpn);
- crypto_private = crypto_ops->init(key_idx);
- if (!crypto_private) {
- res = _FAIL;
- goto exit_lib80211_tkip;
- }
- if (crypto_ops->set_key(key, key_length, NULL, crypto_private) < 0) {
- res = _FAIL;
- goto exit_lib80211_tkip;
- }
- if (crypto_ops->decrypt_mpdu(skb, prxattrib->hdrlen, crypto_private)) {
- res = _FAIL;
- goto exit_lib80211_tkip;
- }
+ pnl = (u16)(dot11txpn.val);
+ pnh = (u32)(dot11txpn.val>>16);
- memmove(pframe, pframe + iv_len, prxattrib->hdrlen);
- skb_push(skb, iv_len);
- skb_put(skb, icv_len);
+ phase1((u16 *)&ttkey[0], prwskey, &prxattrib->ta[0], pnh);
+ phase2(&rc4key[0], prwskey, (unsigned short *)&ttkey[0], pnl);
- memcpy(pframe + prxattrib->hdrlen, iv, iv_len);
- memcpy(pframe + skb->len - icv_len, icv, icv_len);
+ /* 4 decrypt payload include icv */
-exit_lib80211_tkip:
- if (crypto_ops && crypto_private)
- crypto_ops->deinit(crypto_private);
+ arcfour_init(&mycontext, rc4key, 16);
+ arcfour_encrypt(&mycontext, payload, payload, length);
+
+ *((__le32 *)crc) = getcrc32(payload, length-4);
+
+ if (crc[3] != payload[length-1] ||
+ crc[2] != payload[length-2] ||
+ crc[1] != payload[length-3] ||
+ crc[0] != payload[length-4]) {
+ RT_TRACE(_module_rtl871x_security_c_, _drv_err_,
+ ("rtw_wep_decrypt:icv error crc (%4ph)!=payload (%4ph)\n",
+ &crc, &payload[length-4]));
+ res = _FAIL;
+ }
} else {
RT_TRACE(_module_rtl871x_security_c_, _drv_err_, ("rtw_tkip_decrypt: stainfo==NULL!!!\n"));
res = _FAIL;
diff --git a/drivers/staging/rtl8723bs/core/rtw_ap.c b/drivers/staging/rtl8723bs/core/rtw_ap.c
index 45c05527a57a..faf4b4158cfa 100644
--- a/drivers/staging/rtl8723bs/core/rtw_ap.c
+++ b/drivers/staging/rtl8723bs/core/rtw_ap.c
@@ -1051,7 +1051,7 @@ int rtw_check_beacon_data(struct adapter *padapter, u8 *pbuf, int len)
return _FAIL;
- if (len > MAX_IE_SZ)
+ if (len < 0 || len > MAX_IE_SZ)
return _FAIL;
pbss_network->IELength = len;
diff --git a/drivers/staging/rtlwifi/rtl8822be/hw.c b/drivers/staging/rtlwifi/rtl8822be/hw.c
index 7947edb239a1..88ba5b2fea6a 100644
--- a/drivers/staging/rtlwifi/rtl8822be/hw.c
+++ b/drivers/staging/rtlwifi/rtl8822be/hw.c
@@ -803,7 +803,7 @@ static void _rtl8822be_enable_aspm_back_door(struct ieee80211_hw *hw)
return;
pci_read_config_byte(rtlpci->pdev, 0x70f, &tmp);
- pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | BIT(7));
+ pci_write_config_byte(rtlpci->pdev, 0x70f, tmp | ASPM_L1_LATENCY << 3);
pci_read_config_byte(rtlpci->pdev, 0x719, &tmp);
pci_write_config_byte(rtlpci->pdev, 0x719, tmp | BIT(3) | BIT(4));
diff --git a/drivers/staging/rtlwifi/wifi.h b/drivers/staging/rtlwifi/wifi.h
index 012fb618840b..a45f0eb69d3f 100644
--- a/drivers/staging/rtlwifi/wifi.h
+++ b/drivers/staging/rtlwifi/wifi.h
@@ -88,6 +88,7 @@
#define RTL_USB_MAX_RX_COUNT 100
#define QBSS_LOAD_SIZE 5
#define MAX_WMMELE_LENGTH 64
+#define ASPM_L1_LATENCY 7
#define TOTAL_CAM_ENTRY 32
diff --git a/drivers/staging/speakup/speakup_soft.c b/drivers/staging/speakup/speakup_soft.c
index a61bc41b82d7..947c79532e10 100644
--- a/drivers/staging/speakup/speakup_soft.c
+++ b/drivers/staging/speakup/speakup_soft.c
@@ -198,11 +198,15 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
int chars_sent = 0;
char __user *cp;
char *init;
+ size_t bytes_per_ch = unicode ? 3 : 1;
u16 ch;
int empty;
unsigned long flags;
DEFINE_WAIT(wait);
+ if (count < bytes_per_ch)
+ return -EINVAL;
+
spin_lock_irqsave(&speakup_info.spinlock, flags);
while (1) {
prepare_to_wait(&speakup_event, &wait, TASK_INTERRUPTIBLE);
@@ -228,7 +232,7 @@ static ssize_t softsynthx_read(struct file *fp, char __user *buf, size_t count,
init = get_initstring();
/* Keep 3 bytes available for a 16bit UTF-8-encoded character */
- while (chars_sent <= count - 3) {
+ while (chars_sent <= count - bytes_per_ch) {
if (speakup_info.flushing) {
speakup_info.flushing = 0;
ch = '\x18';
diff --git a/drivers/staging/typec/Kconfig b/drivers/staging/typec/Kconfig
index 3aa981fbc8f5..e45ed08a5166 100644
--- a/drivers/staging/typec/Kconfig
+++ b/drivers/staging/typec/Kconfig
@@ -11,6 +11,7 @@ config TYPEC_TCPCI
config TYPEC_RT1711H
tristate "Richtek RT1711H Type-C chip driver"
+ depends on I2C
select TYPEC_TCPCI
help
Richtek RT1711H Type-C chip driver that works with
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 01ac306131c1..10db5656fd5d 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -3727,11 +3727,16 @@ core_scsi3_pri_read_keys(struct se_cmd *cmd)
* Check for overflow of 8byte PRI READ_KEYS payload and
* next reservation key list descriptor.
*/
- if ((add_len + 8) > (cmd->data_length - 8))
- break;
-
- put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
- off += 8;
+ if (off + 8 <= cmd->data_length) {
+ put_unaligned_be64(pr_reg->pr_res_key, &buf[off]);
+ off += 8;
+ }
+ /*
+ * SPC5r17: 6.16.2 READ KEYS service action
+ * The ADDITIONAL LENGTH field indicates the number of bytes in
+ * the Reservation key list. The contents of the ADDITIONAL
+ * LENGTH field are not altered based on the allocation length
+ */
add_len += 8;
}
spin_unlock(&dev->t10_pr.registration_lock);
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 7f96dfa32b9c..d8dc3d22051f 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -656,7 +656,7 @@ static void scatter_data_area(struct tcmu_dev *udev,
}
static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
- bool bidi)
+ bool bidi, uint32_t read_len)
{
struct se_cmd *se_cmd = cmd->se_cmd;
int i, dbi;
@@ -689,7 +689,7 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
for_each_sg(data_sg, sg, data_nents, i) {
int sg_remaining = sg->length;
to = kmap_atomic(sg_page(sg)) + sg->offset;
- while (sg_remaining > 0) {
+ while (sg_remaining > 0 && read_len > 0) {
if (block_remaining == 0) {
if (from)
kunmap_atomic(from);
@@ -701,6 +701,8 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
}
copy_bytes = min_t(size_t, sg_remaining,
block_remaining);
+ if (read_len < copy_bytes)
+ copy_bytes = read_len;
offset = DATA_BLOCK_SIZE - block_remaining;
tcmu_flush_dcache_range(from, copy_bytes);
memcpy(to + sg->length - sg_remaining, from + offset,
@@ -708,8 +710,11 @@ static void gather_data_area(struct tcmu_dev *udev, struct tcmu_cmd *cmd,
sg_remaining -= copy_bytes;
block_remaining -= copy_bytes;
+ read_len -= copy_bytes;
}
kunmap_atomic(to - sg->offset);
+ if (read_len == 0)
+ break;
}
if (from)
kunmap_atomic(from);
@@ -1042,6 +1047,8 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
{
struct se_cmd *se_cmd = cmd->se_cmd;
struct tcmu_dev *udev = cmd->tcmu_dev;
+ bool read_len_valid = false;
+ uint32_t read_len = se_cmd->data_length;
/*
* cmd has been completed already from timeout, just reclaim
@@ -1056,13 +1063,28 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
pr_warn("TCMU: Userspace set UNKNOWN_OP flag on se_cmd %p\n",
cmd->se_cmd);
entry->rsp.scsi_status = SAM_STAT_CHECK_CONDITION;
- } else if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
+ goto done;
+ }
+
+ if (se_cmd->data_direction == DMA_FROM_DEVICE &&
+ (entry->hdr.uflags & TCMU_UFLAG_READ_LEN) && entry->rsp.read_len) {
+ read_len_valid = true;
+ if (entry->rsp.read_len < read_len)
+ read_len = entry->rsp.read_len;
+ }
+
+ if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
transport_copy_sense_to_cmd(se_cmd, entry->rsp.sense_buffer);
- } else if (se_cmd->se_cmd_flags & SCF_BIDI) {
+ if (!read_len_valid )
+ goto done;
+ else
+ se_cmd->se_cmd_flags |= SCF_TREAT_READ_AS_NORMAL;
+ }
+ if (se_cmd->se_cmd_flags & SCF_BIDI) {
/* Get Data-In buffer before clean up */
- gather_data_area(udev, cmd, true);
+ gather_data_area(udev, cmd, true, read_len);
} else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
- gather_data_area(udev, cmd, false);
+ gather_data_area(udev, cmd, false, read_len);
} else if (se_cmd->data_direction == DMA_TO_DEVICE) {
/* TODO: */
} else if (se_cmd->data_direction != DMA_NONE) {
@@ -1070,7 +1092,13 @@ static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *
se_cmd->data_direction);
}
- target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
+done:
+ if (read_len_valid) {
+ pr_debug("read_len = %d\n", read_len);
+ target_complete_cmd_with_length(cmd->se_cmd,
+ entry->rsp.scsi_status, read_len);
+ } else
+ target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
out:
cmd->se_cmd = NULL;
@@ -1740,7 +1768,7 @@ static int tcmu_configure_device(struct se_device *dev)
/* Initialise the mailbox of the ring buffer */
mb = udev->mb_addr;
mb->version = TCMU_MAILBOX_VERSION;
- mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC;
+ mb->flags = TCMU_MAILBOX_FLAG_CAP_OOOC | TCMU_MAILBOX_FLAG_CAP_READ_LEN;
mb->cmdr_off = CMDR_OFF;
mb->cmdr_size = udev->cmdr_size;
diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c
index 6281266b8ec0..a923ebdeb73c 100644
--- a/drivers/thunderbolt/domain.c
+++ b/drivers/thunderbolt/domain.c
@@ -213,6 +213,10 @@ static ssize_t boot_acl_store(struct device *dev, struct device_attribute *attr,
goto err_free_acl;
}
ret = tb->cm_ops->set_boot_acl(tb, acl, tb->nboot_acl);
+ if (!ret) {
+ /* Notify userspace about the change */
+ kobject_uevent(&tb->dev.kobj, KOBJ_CHANGE);
+ }
mutex_unlock(&tb->lock);
err_free_acl:
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index cbe98bc2b998..431742201709 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -124,6 +124,8 @@ struct n_tty_data {
struct mutex output_lock;
};
+#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
+
static inline size_t read_cnt(struct n_tty_data *ldata)
{
return ldata->read_head - ldata->read_tail;
@@ -141,6 +143,7 @@ static inline unsigned char *read_buf_addr(struct n_tty_data *ldata, size_t i)
static inline unsigned char echo_buf(struct n_tty_data *ldata, size_t i)
{
+ smp_rmb(); /* Matches smp_wmb() in add_echo_byte(). */
return ldata->echo_buf[i & (N_TTY_BUF_SIZE - 1)];
}
@@ -316,9 +319,7 @@ static inline void put_tty_queue(unsigned char c, struct n_tty_data *ldata)
static void reset_buffer_flags(struct n_tty_data *ldata)
{
ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
- ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
ldata->commit_head = 0;
- ldata->echo_mark = 0;
ldata->line_start = 0;
ldata->erasing = 0;
@@ -617,13 +618,20 @@ static size_t __process_echoes(struct tty_struct *tty)
old_space = space = tty_write_room(tty);
tail = ldata->echo_tail;
- while (ldata->echo_commit != tail) {
+ while (MASK(ldata->echo_commit) != MASK(tail)) {
c = echo_buf(ldata, tail);
if (c == ECHO_OP_START) {
unsigned char op;
int no_space_left = 0;
/*
+ * Since add_echo_byte() is called without holding
+ * output_lock, we might see only portion of multi-byte
+ * operation.
+ */
+ if (MASK(ldata->echo_commit) == MASK(tail + 1))
+ goto not_yet_stored;
+ /*
* If the buffer byte is the start of a multi-byte
* operation, get the next byte, which is either the
* op code or a control character value.
@@ -634,6 +642,8 @@ static size_t __process_echoes(struct tty_struct *tty)
unsigned int num_chars, num_bs;
case ECHO_OP_ERASE_TAB:
+ if (MASK(ldata->echo_commit) == MASK(tail + 2))
+ goto not_yet_stored;
num_chars = echo_buf(ldata, tail + 2);
/*
@@ -728,7 +738,8 @@ static size_t __process_echoes(struct tty_struct *tty)
/* If the echo buffer is nearly full (so that the possibility exists
* of echo overrun before the next commit), then discard enough
* data at the tail to prevent a subsequent overrun */
- while (ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
+ while (ldata->echo_commit > tail &&
+ ldata->echo_commit - tail >= ECHO_DISCARD_WATERMARK) {
if (echo_buf(ldata, tail) == ECHO_OP_START) {
if (echo_buf(ldata, tail + 1) == ECHO_OP_ERASE_TAB)
tail += 3;
@@ -738,6 +749,7 @@ static size_t __process_echoes(struct tty_struct *tty)
tail++;
}
+ not_yet_stored:
ldata->echo_tail = tail;
return old_space - space;
}
@@ -748,6 +760,7 @@ static void commit_echoes(struct tty_struct *tty)
size_t nr, old, echoed;
size_t head;
+ mutex_lock(&ldata->output_lock);
head = ldata->echo_head;
ldata->echo_mark = head;
old = ldata->echo_commit - ldata->echo_tail;
@@ -756,10 +769,12 @@ static void commit_echoes(struct tty_struct *tty)
* is over the threshold (and try again each time another
* block is accumulated) */
nr = head - ldata->echo_tail;
- if (nr < ECHO_COMMIT_WATERMARK || (nr % ECHO_BLOCK > old % ECHO_BLOCK))
+ if (nr < ECHO_COMMIT_WATERMARK ||
+ (nr % ECHO_BLOCK > old % ECHO_BLOCK)) {
+ mutex_unlock(&ldata->output_lock);
return;
+ }
- mutex_lock(&ldata->output_lock);
ldata->echo_commit = head;
echoed = __process_echoes(tty);
mutex_unlock(&ldata->output_lock);
@@ -810,7 +825,9 @@ static void flush_echoes(struct tty_struct *tty)
static inline void add_echo_byte(unsigned char c, struct n_tty_data *ldata)
{
- *echo_buf_addr(ldata, ldata->echo_head++) = c;
+ *echo_buf_addr(ldata, ldata->echo_head) = c;
+ smp_wmb(); /* Matches smp_rmb() in echo_buf(). */
+ ldata->echo_head++;
}
/**
@@ -978,14 +995,15 @@ static void eraser(unsigned char c, struct tty_struct *tty)
}
seen_alnums = 0;
- while (ldata->read_head != ldata->canon_head) {
+ while (MASK(ldata->read_head) != MASK(ldata->canon_head)) {
head = ldata->read_head;
/* erase a single possibly multibyte character */
do {
head--;
c = read_buf(ldata, head);
- } while (is_continuation(c, tty) && head != ldata->canon_head);
+ } while (is_continuation(c, tty) &&
+ MASK(head) != MASK(ldata->canon_head));
/* do not partially erase */
if (is_continuation(c, tty))
@@ -1027,7 +1045,7 @@ static void eraser(unsigned char c, struct tty_struct *tty)
* This info is used to go back the correct
* number of columns.
*/
- while (tail != ldata->canon_head) {
+ while (MASK(tail) != MASK(ldata->canon_head)) {
tail--;
c = read_buf(ldata, tail);
if (c == '\t') {
@@ -1302,7 +1320,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
finish_erasing(ldata);
echo_char(c, tty);
echo_char_raw('\n', ldata);
- while (tail != ldata->read_head) {
+ while (MASK(tail) != MASK(ldata->read_head)) {
echo_char(read_buf(ldata, tail), tty);
tail++;
}
@@ -1878,30 +1896,21 @@ static int n_tty_open(struct tty_struct *tty)
struct n_tty_data *ldata;
/* Currently a malloc failure here can panic */
- ldata = vmalloc(sizeof(*ldata));
+ ldata = vzalloc(sizeof(*ldata));
if (!ldata)
- goto err;
+ return -ENOMEM;
ldata->overrun_time = jiffies;
mutex_init(&ldata->atomic_read_lock);
mutex_init(&ldata->output_lock);
tty->disc_data = ldata;
- reset_buffer_flags(tty->disc_data);
- ldata->column = 0;
- ldata->canon_column = 0;
- ldata->num_overrun = 0;
- ldata->no_room = 0;
- ldata->lnext = 0;
tty->closing = 0;
/* indicate buffer work may resume */
clear_bit(TTY_LDISC_HALTED, &tty->flags);
n_tty_set_termios(tty, NULL);
tty_unthrottle(tty);
-
return 0;
-err:
- return -ENOMEM;
}
static inline int input_available_p(struct tty_struct *tty, int poll)
@@ -2411,7 +2420,7 @@ static unsigned long inq_canon(struct n_tty_data *ldata)
tail = ldata->read_tail;
nr = head - tail;
/* Skip EOF-chars.. */
- while (head != tail) {
+ while (MASK(head) != MASK(tail)) {
if (test_bit(tail & (N_TTY_BUF_SIZE - 1), ldata->read_flags) &&
read_buf(ldata, tail) == __DISABLED_CHAR)
nr--;
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index df93b727e984..9e59f4788589 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -617,6 +617,7 @@ EXPORT_SYMBOL_GPL(__serdev_device_driver_register);
static void __exit serdev_exit(void)
{
bus_unregister(&serdev_bus_type);
+ ida_destroy(&ctrl_ida);
}
module_exit(serdev_exit);
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 3296a05cda2d..f80a300b5d68 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -3339,9 +3339,7 @@ static const struct pci_device_id blacklist[] = {
/* multi-io cards handled by parport_serial */
{ PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
{ PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
- { PCI_DEVICE(0x4348, 0x7173), }, /* WCH CH355 4S */
{ PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
- { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
/* Moxa Smartio MUE boards handled by 8250_moxa */
{ PCI_VDEVICE(MOXA, 0x1024), },
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 1eb1a376a041..15eb6c829d39 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -784,7 +784,7 @@ int vc_allocate(unsigned int currcons) /* return 0 on success */
if (!*vc->vc_uni_pagedir_loc)
con_set_default_unimap(vc);
- vc->vc_screenbuf = kmalloc(vc->vc_screenbuf_size, GFP_KERNEL);
+ vc->vc_screenbuf = kzalloc(vc->vc_screenbuf_size, GFP_KERNEL);
if (!vc->vc_screenbuf)
goto err_free;
@@ -871,7 +871,7 @@ static int vc_do_resize(struct tty_struct *tty, struct vc_data *vc,
if (new_screen_size > (4 << 20))
return -EINVAL;
- newscreen = kmalloc(new_screen_size, GFP_USER);
+ newscreen = kzalloc(new_screen_size, GFP_USER);
if (!newscreen)
return -ENOMEM;
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index e8f4ac9400ea..5d421d7e8904 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -215,7 +215,20 @@ static ssize_t name_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uio_device *idev = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", idev->info->name);
+ int ret;
+
+ mutex_lock(&idev->info_lock);
+ if (!idev->info) {
+ ret = -EINVAL;
+ dev_err(dev, "the device has been unregistered\n");
+ goto out;
+ }
+
+ ret = sprintf(buf, "%s\n", idev->info->name);
+
+out:
+ mutex_unlock(&idev->info_lock);
+ return ret;
}
static DEVICE_ATTR_RO(name);
@@ -223,7 +236,20 @@ static ssize_t version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct uio_device *idev = dev_get_drvdata(dev);
- return sprintf(buf, "%s\n", idev->info->version);
+ int ret;
+
+ mutex_lock(&idev->info_lock);
+ if (!idev->info) {
+ ret = -EINVAL;
+ dev_err(dev, "the device has been unregistered\n");
+ goto out;
+ }
+
+ ret = sprintf(buf, "%s\n", idev->info->version);
+
+out:
+ mutex_unlock(&idev->info_lock);
+ return ret;
}
static DEVICE_ATTR_RO(version);
@@ -415,11 +441,15 @@ EXPORT_SYMBOL_GPL(uio_event_notify);
static irqreturn_t uio_interrupt(int irq, void *dev_id)
{
struct uio_device *idev = (struct uio_device *)dev_id;
- irqreturn_t ret = idev->info->handler(irq, idev->info);
+ irqreturn_t ret;
+ mutex_lock(&idev->info_lock);
+
+ ret = idev->info->handler(irq, idev->info);
if (ret == IRQ_HANDLED)
uio_event_notify(idev->info);
+ mutex_unlock(&idev->info_lock);
return ret;
}
@@ -433,7 +463,6 @@ static int uio_open(struct inode *inode, struct file *filep)
struct uio_device *idev;
struct uio_listener *listener;
int ret = 0;
- unsigned long flags;
mutex_lock(&minor_lock);
idev = idr_find(&uio_idr, iminor(inode));
@@ -460,10 +489,16 @@ static int uio_open(struct inode *inode, struct file *filep)
listener->event_count = atomic_read(&idev->event);
filep->private_data = listener;
- spin_lock_irqsave(&idev->info_lock, flags);
+ mutex_lock(&idev->info_lock);
+ if (!idev->info) {
+ mutex_unlock(&idev->info_lock);
+ ret = -EINVAL;
+ goto err_alloc_listener;
+ }
+
if (idev->info && idev->info->open)
ret = idev->info->open(idev->info, inode);
- spin_unlock_irqrestore(&idev->info_lock, flags);
+ mutex_unlock(&idev->info_lock);
if (ret)
goto err_infoopen;
@@ -495,12 +530,11 @@ static int uio_release(struct inode *inode, struct file *filep)
int ret = 0;
struct uio_listener *listener = filep->private_data;
struct uio_device *idev = listener->dev;
- unsigned long flags;
- spin_lock_irqsave(&idev->info_lock, flags);
+ mutex_lock(&idev->info_lock);
if (idev->info && idev->info->release)
ret = idev->info->release(idev->info, inode);
- spin_unlock_irqrestore(&idev->info_lock, flags);
+ mutex_unlock(&idev->info_lock);
module_put(idev->owner);
kfree(listener);
@@ -513,12 +547,11 @@ static __poll_t uio_poll(struct file *filep, poll_table *wait)
struct uio_listener *listener = filep->private_data;
struct uio_device *idev = listener->dev;
__poll_t ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&idev->info_lock, flags);
+ mutex_lock(&idev->info_lock);
if (!idev->info || !idev->info->irq)
ret = -EIO;
- spin_unlock_irqrestore(&idev->info_lock, flags);
+ mutex_unlock(&idev->info_lock);
if (ret)
return ret;
@@ -537,12 +570,11 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
DECLARE_WAITQUEUE(wait, current);
ssize_t retval = 0;
s32 event_count;
- unsigned long flags;
- spin_lock_irqsave(&idev->info_lock, flags);
+ mutex_lock(&idev->info_lock);
if (!idev->info || !idev->info->irq)
retval = -EIO;
- spin_unlock_irqrestore(&idev->info_lock, flags);
+ mutex_unlock(&idev->info_lock);
if (retval)
return retval;
@@ -592,9 +624,13 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
struct uio_device *idev = listener->dev;
ssize_t retval;
s32 irq_on;
- unsigned long flags;
- spin_lock_irqsave(&idev->info_lock, flags);
+ mutex_lock(&idev->info_lock);
+ if (!idev->info) {
+ retval = -EINVAL;
+ goto out;
+ }
+
if (!idev->info || !idev->info->irq) {
retval = -EIO;
goto out;
@@ -618,7 +654,7 @@ static ssize_t uio_write(struct file *filep, const char __user *buf,
retval = idev->info->irqcontrol(idev->info, irq_on);
out:
- spin_unlock_irqrestore(&idev->info_lock, flags);
+ mutex_unlock(&idev->info_lock);
return retval ? retval : sizeof(s32);
}
@@ -640,10 +676,20 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
struct page *page;
unsigned long offset;
void *addr;
+ int ret = 0;
+ int mi;
- int mi = uio_find_mem_index(vmf->vma);
- if (mi < 0)
- return VM_FAULT_SIGBUS;
+ mutex_lock(&idev->info_lock);
+ if (!idev->info) {
+ ret = VM_FAULT_SIGBUS;
+ goto out;
+ }
+
+ mi = uio_find_mem_index(vmf->vma);
+ if (mi < 0) {
+ ret = VM_FAULT_SIGBUS;
+ goto out;
+ }
/*
* We need to subtract mi because userspace uses offset = N*PAGE_SIZE
@@ -658,7 +704,11 @@ static vm_fault_t uio_vma_fault(struct vm_fault *vmf)
page = vmalloc_to_page(addr);
get_page(page);
vmf->page = page;
- return 0;
+
+out:
+ mutex_unlock(&idev->info_lock);
+
+ return ret;
}
static const struct vm_operations_struct uio_logical_vm_ops = {
@@ -683,6 +733,7 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
struct uio_device *idev = vma->vm_private_data;
int mi = uio_find_mem_index(vma);
struct uio_mem *mem;
+
if (mi < 0)
return -EINVAL;
mem = idev->info->mem + mi;
@@ -724,30 +775,46 @@ static int uio_mmap(struct file *filep, struct vm_area_struct *vma)
vma->vm_private_data = idev;
+ mutex_lock(&idev->info_lock);
+ if (!idev->info) {
+ ret = -EINVAL;
+ goto out;
+ }
+
mi = uio_find_mem_index(vma);
- if (mi < 0)
- return -EINVAL;
+ if (mi < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
requested_pages = vma_pages(vma);
actual_pages = ((idev->info->mem[mi].addr & ~PAGE_MASK)
+ idev->info->mem[mi].size + PAGE_SIZE -1) >> PAGE_SHIFT;
- if (requested_pages > actual_pages)
- return -EINVAL;
+ if (requested_pages > actual_pages) {
+ ret = -EINVAL;
+ goto out;
+ }
if (idev->info->mmap) {
ret = idev->info->mmap(idev->info, vma);
- return ret;
+ goto out;
}
switch (idev->info->mem[mi].memtype) {
case UIO_MEM_PHYS:
- return uio_mmap_physical(vma);
+ ret = uio_mmap_physical(vma);
+ break;
case UIO_MEM_LOGICAL:
case UIO_MEM_VIRTUAL:
- return uio_mmap_logical(vma);
+ ret = uio_mmap_logical(vma);
+ break;
default:
- return -EINVAL;
+ ret = -EINVAL;
}
+
+out:
+ mutex_unlock(&idev->info_lock);
+ return 0;
}
static const struct file_operations uio_fops = {
@@ -865,7 +932,7 @@ int __uio_register_device(struct module *owner,
idev->owner = owner;
idev->info = info;
- spin_lock_init(&idev->info_lock);
+ mutex_init(&idev->info_lock);
init_waitqueue_head(&idev->wait);
atomic_set(&idev->event, 0);
@@ -902,8 +969,9 @@ int __uio_register_device(struct module *owner,
* FDs at the time of unregister and therefore may not be
* freed until they are released.
*/
- ret = request_irq(info->irq, uio_interrupt,
- info->irq_flags, info->name, idev);
+ ret = request_threaded_irq(info->irq, NULL, uio_interrupt,
+ info->irq_flags, info->name, idev);
+
if (ret)
goto err_request_irq;
}
@@ -928,7 +996,6 @@ EXPORT_SYMBOL_GPL(__uio_register_device);
void uio_unregister_device(struct uio_info *info)
{
struct uio_device *idev;
- unsigned long flags;
if (!info || !info->uio_dev)
return;
@@ -937,14 +1004,14 @@ void uio_unregister_device(struct uio_info *info)
uio_free_minor(idev);
+ mutex_lock(&idev->info_lock);
uio_dev_del_attributes(idev);
if (info->irq && info->irq != UIO_IRQ_CUSTOM)
free_irq(info->irq, idev);
- spin_lock_irqsave(&idev->info_lock, flags);
idev->info = NULL;
- spin_unlock_irqrestore(&idev->info_lock, flags);
+ mutex_unlock(&idev->info_lock);
device_unregister(&idev->dev);
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 785f0ed037f7..ee34e9046f7e 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -3,6 +3,7 @@ config USB_CHIPIDEA
depends on ((USB_EHCI_HCD && USB_GADGET) || (USB_EHCI_HCD && !USB_GADGET) || (!USB_EHCI_HCD && USB_GADGET)) && HAS_DMA
select EXTCON
select RESET_CONTROLLER
+ select USB_ULPI_BUS
help
Say Y here if your system has a dual role high speed USB
controller based on ChipIdea silicon IP. It supports:
@@ -38,12 +39,4 @@ config USB_CHIPIDEA_HOST
help
Say Y here to enable host controller functionality of the
ChipIdea driver.
-
-config USB_CHIPIDEA_ULPI
- bool "ChipIdea ULPI PHY support"
- depends on USB_ULPI_BUS=y || USB_ULPI_BUS=USB_CHIPIDEA
- help
- Say Y here if you have a ULPI PHY attached to your ChipIdea
- controller.
-
endif
diff --git a/drivers/usb/chipidea/Makefile b/drivers/usb/chipidea/Makefile
index e3d5e728fa53..12df94f78f72 100644
--- a/drivers/usb/chipidea/Makefile
+++ b/drivers/usb/chipidea/Makefile
@@ -1,11 +1,10 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_USB_CHIPIDEA) += ci_hdrc.o
-ci_hdrc-y := core.o otg.o debug.o
+ci_hdrc-y := core.o otg.o debug.o ulpi.o
ci_hdrc-$(CONFIG_USB_CHIPIDEA_UDC) += udc.o
ci_hdrc-$(CONFIG_USB_CHIPIDEA_HOST) += host.o
ci_hdrc-$(CONFIG_USB_OTG_FSM) += otg_fsm.o
-ci_hdrc-$(CONFIG_USB_CHIPIDEA_ULPI) += ulpi.o
# Glue/Bridge layers go here
diff --git a/drivers/usb/chipidea/ci.h b/drivers/usb/chipidea/ci.h
index 0bf244d50544..6a2cc5cd0281 100644
--- a/drivers/usb/chipidea/ci.h
+++ b/drivers/usb/chipidea/ci.h
@@ -240,10 +240,8 @@ struct ci_hdrc {
struct ci_hdrc_platform_data *platdata;
int vbus_active;
-#ifdef CONFIG_USB_CHIPIDEA_ULPI
struct ulpi *ulpi;
struct ulpi_ops ulpi_ops;
-#endif
struct phy *phy;
/* old usb_phy interface */
struct usb_phy *usb_phy;
@@ -426,15 +424,9 @@ static inline bool ci_otg_is_fsm_mode(struct ci_hdrc *ci)
#endif
}
-#if IS_ENABLED(CONFIG_USB_CHIPIDEA_ULPI)
int ci_ulpi_init(struct ci_hdrc *ci);
void ci_ulpi_exit(struct ci_hdrc *ci);
int ci_ulpi_resume(struct ci_hdrc *ci);
-#else
-static inline int ci_ulpi_init(struct ci_hdrc *ci) { return 0; }
-static inline void ci_ulpi_exit(struct ci_hdrc *ci) { }
-static inline int ci_ulpi_resume(struct ci_hdrc *ci) { return 0; }
-#endif
u32 hw_read_intr_enable(struct ci_hdrc *ci);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index af45aa3222b5..4638d9b066be 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -124,8 +124,11 @@ static int host_start(struct ci_hdrc *ci)
hcd->power_budget = ci->platdata->power_budget;
hcd->tpl_support = ci->platdata->tpl_support;
- if (ci->phy || ci->usb_phy)
+ if (ci->phy || ci->usb_phy) {
hcd->skip_phy_initialization = 1;
+ if (ci->usb_phy)
+ hcd->usb_phy = ci->usb_phy;
+ }
ehci = hcd_to_ehci(hcd);
ehci->caps = ci->hw_bank.cap;
diff --git a/drivers/usb/chipidea/ulpi.c b/drivers/usb/chipidea/ulpi.c
index 6da42dcd2888..dfec07e8ae1d 100644
--- a/drivers/usb/chipidea/ulpi.c
+++ b/drivers/usb/chipidea/ulpi.c
@@ -95,6 +95,9 @@ int ci_ulpi_resume(struct ci_hdrc *ci)
{
int cnt = 100000;
+ if (ci->platdata->phy_mode != USBPHY_INTERFACE_MODE_ULPI)
+ return 0;
+
while (cnt-- > 0) {
if (hw_read(ci, OP_ULPI_VIEWPORT, ULPI_SYNC_STATE))
return 0;
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 7b366a6c0b49..75c4623ad779 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1758,6 +1758,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x11ca, 0x0201), /* VeriFone Mx870 Gadget Serial */
.driver_info = SINGLE_RX_URB,
},
+ { USB_DEVICE(0x1965, 0x0018), /* Uniden UBC125XLT */
+ .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
+ },
{ USB_DEVICE(0x22b8, 0x7000), /* Motorola Q Phone */
.driver_info = NO_UNION_NORMAL, /* has no union descriptor */
},
@@ -1828,6 +1831,9 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x09d8, 0x0320), /* Elatec GmbH TWN3 */
.driver_info = NO_UNION_NORMAL, /* has misplaced union descriptor */
},
+ { USB_DEVICE(0x0ca6, 0xa050), /* Castles VEGA3000 */
+ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */
+ },
{ USB_DEVICE(0x2912, 0x0001), /* ATOL FPrint */
.driver_info = CLEAR_HALT_CONDITIONS,
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index fcae521df29b..1fb266809966 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -1142,10 +1142,14 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
if (!udev || udev->state == USB_STATE_NOTATTACHED) {
/* Tell hub_wq to disconnect the device or
- * check for a new connection
+ * check for a new connection or over current condition.
+ * Based on USB2.0 Spec Section 11.12.5,
+ * C_PORT_OVER_CURRENT could be set while
+ * PORT_OVER_CURRENT is not. So check for any of them.
*/
if (udev || (portstatus & USB_PORT_STAT_CONNECTION) ||
- (portstatus & USB_PORT_STAT_OVERCURRENT))
+ (portstatus & USB_PORT_STAT_OVERCURRENT) ||
+ (portchange & USB_PORT_STAT_C_OVERCURRENT))
set_bit(port1, hub->change_bits);
} else if (portstatus & USB_PORT_STAT_ENABLE) {
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index c55def2f1320..097057d2eacf 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -378,6 +378,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Corsair K70 RGB */
{ USB_DEVICE(0x1b1c, 0x1b13), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* Corsair Strafe */
+ { USB_DEVICE(0x1b1c, 0x1b15), .driver_info = USB_QUIRK_DELAY_INIT |
+ USB_QUIRK_DELAY_CTRL_MSG },
+
/* Corsair Strafe RGB */
{ USB_DEVICE(0x1b1c, 0x1b20), .driver_info = USB_QUIRK_DELAY_INIT |
USB_QUIRK_DELAY_CTRL_MSG },
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 4a56ac772a3c..71b3b08ad516 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -1004,6 +1004,7 @@ struct dwc2_hregs_backup {
* @frame_list_sz: Frame list size
* @desc_gen_cache: Kmem cache for generic descriptors
* @desc_hsisoc_cache: Kmem cache for hs isochronous descriptors
+ * @unaligned_cache: Kmem cache for DMA mode to handle non-aligned buf
*
* These are for peripheral mode:
*
@@ -1177,6 +1178,8 @@ struct dwc2_hsotg {
u32 frame_list_sz;
struct kmem_cache *desc_gen_cache;
struct kmem_cache *desc_hsisoc_cache;
+ struct kmem_cache *unaligned_cache;
+#define DWC2_KMEM_UNALIGNED_BUF_SIZE 1024
#endif /* CONFIG_USB_DWC2_HOST || CONFIG_USB_DWC2_DUAL_ROLE */
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index f0d9ccf1d665..cefc99ae69b2 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -812,6 +812,7 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
u32 index;
u32 maxsize = 0;
u32 mask = 0;
+ u8 pid = 0;
maxsize = dwc2_gadget_get_desc_params(hs_ep, &mask);
@@ -840,7 +841,11 @@ static int dwc2_gadget_fill_isoc_desc(struct dwc2_hsotg_ep *hs_ep,
((len << DEV_DMA_NBYTES_SHIFT) & mask));
if (hs_ep->dir_in) {
- desc->status |= ((hs_ep->mc << DEV_DMA_ISOC_PID_SHIFT) &
+ if (len)
+ pid = DIV_ROUND_UP(len, hs_ep->ep.maxpacket);
+ else
+ pid = 1;
+ desc->status |= ((pid << DEV_DMA_ISOC_PID_SHIFT) &
DEV_DMA_ISOC_PID_MASK) |
((len % hs_ep->ep.maxpacket) ?
DEV_DMA_SHORT : 0) |
@@ -884,6 +889,7 @@ static void dwc2_gadget_start_isoc_ddma(struct dwc2_hsotg_ep *hs_ep)
struct dwc2_dma_desc *desc;
if (list_empty(&hs_ep->queue)) {
+ hs_ep->target_frame = TARGET_FRAME_INITIAL;
dev_dbg(hsotg->dev, "%s: No requests in queue\n", __func__);
return;
}
@@ -2755,8 +2761,6 @@ static void dwc2_gadget_handle_out_token_ep_disabled(struct dwc2_hsotg_ep *ep)
*/
tmp = dwc2_hsotg_read_frameno(hsotg);
- dwc2_hsotg_complete_request(hsotg, ep, get_ep_head(ep), 0);
-
if (using_desc_dma(hsotg)) {
if (ep->target_frame == TARGET_FRAME_INITIAL) {
/* Start first ISO Out */
@@ -2817,9 +2821,6 @@ static void dwc2_gadget_handle_nak(struct dwc2_hsotg_ep *hs_ep)
tmp = dwc2_hsotg_read_frameno(hsotg);
if (using_desc_dma(hsotg)) {
- dwc2_hsotg_complete_request(hsotg, hs_ep,
- get_ep_head(hs_ep), 0);
-
hs_ep->target_frame = tmp;
dwc2_gadget_incr_frame_num(hs_ep);
dwc2_gadget_start_isoc_ddma(hs_ep);
@@ -3429,7 +3430,7 @@ static void dwc2_gadget_handle_incomplete_isoc_in(struct dwc2_hsotg *hsotg)
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
hs_ep = hsotg->eps_in[idx];
/* Proceed only unmasked ISOC EPs */
- if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
+ if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
continue;
epctrl = dwc2_readl(hsotg->regs + DIEPCTL(idx));
@@ -3475,7 +3476,7 @@ static void dwc2_gadget_handle_incomplete_isoc_out(struct dwc2_hsotg *hsotg)
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
hs_ep = hsotg->eps_out[idx];
/* Proceed only unmasked ISOC EPs */
- if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
+ if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
continue;
epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
@@ -3649,7 +3650,7 @@ irq_retry:
for (idx = 1; idx < hsotg->num_of_eps; idx++) {
hs_ep = hsotg->eps_out[idx];
/* Proceed only unmasked ISOC EPs */
- if (!hs_ep->isochronous || (BIT(idx) & ~daintmsk))
+ if ((BIT(idx) & ~daintmsk) || !hs_ep->isochronous)
continue;
epctrl = dwc2_readl(hsotg->regs + DOEPCTL(idx));
@@ -4739,9 +4740,11 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
}
ret = usb_add_gadget_udc(dev, &hsotg->gadget);
- if (ret)
+ if (ret) {
+ dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep,
+ hsotg->ctrl_req);
return ret;
-
+ }
dwc2_hsotg_dump(hsotg);
return 0;
@@ -4755,6 +4758,7 @@ int dwc2_gadget_init(struct dwc2_hsotg *hsotg)
int dwc2_hsotg_remove(struct dwc2_hsotg *hsotg)
{
usb_del_gadget_udc(&hsotg->gadget);
+ dwc2_hsotg_ep_free_request(&hsotg->eps_out[0]->ep, hsotg->ctrl_req);
return 0;
}
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index edaf0b6af4f0..6e2cdd7b93d4 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -1567,11 +1567,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
}
if (hsotg->params.host_dma) {
- dwc2_writel((u32)chan->xfer_dma,
- hsotg->regs + HCDMA(chan->hc_num));
+ dma_addr_t dma_addr;
+
+ if (chan->align_buf) {
+ if (dbg_hc(chan))
+ dev_vdbg(hsotg->dev, "align_buf\n");
+ dma_addr = chan->align_buf;
+ } else {
+ dma_addr = chan->xfer_dma;
+ }
+ dwc2_writel((u32)dma_addr, hsotg->regs + HCDMA(chan->hc_num));
+
if (dbg_hc(chan))
dev_vdbg(hsotg->dev, "Wrote %08lx to HCDMA(%d)\n",
- (unsigned long)chan->xfer_dma, chan->hc_num);
+ (unsigned long)dma_addr, chan->hc_num);
}
/* Start the split */
@@ -2625,36 +2634,66 @@ static void dwc2_hc_init_xfer(struct dwc2_hsotg *hsotg,
}
}
-#define DWC2_USB_DMA_ALIGN 4
+static int dwc2_alloc_split_dma_aligned_buf(struct dwc2_hsotg *hsotg,
+ struct dwc2_qh *qh,
+ struct dwc2_host_chan *chan)
+{
+ if (!hsotg->unaligned_cache ||
+ chan->max_packet > DWC2_KMEM_UNALIGNED_BUF_SIZE)
+ return -ENOMEM;
-struct dma_aligned_buffer {
- void *kmalloc_ptr;
- void *old_xfer_buffer;
- u8 data[0];
-};
+ if (!qh->dw_align_buf) {
+ qh->dw_align_buf = kmem_cache_alloc(hsotg->unaligned_cache,
+ GFP_ATOMIC | GFP_DMA);
+ if (!qh->dw_align_buf)
+ return -ENOMEM;
+ }
+
+ qh->dw_align_buf_dma = dma_map_single(hsotg->dev, qh->dw_align_buf,
+ DWC2_KMEM_UNALIGNED_BUF_SIZE,
+ DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(hsotg->dev, qh->dw_align_buf_dma)) {
+ dev_err(hsotg->dev, "can't map align_buf\n");
+ chan->align_buf = 0;
+ return -EINVAL;
+ }
+
+ chan->align_buf = qh->dw_align_buf_dma;
+ return 0;
+}
+
+#define DWC2_USB_DMA_ALIGN 4
static void dwc2_free_dma_aligned_buffer(struct urb *urb)
{
- struct dma_aligned_buffer *temp;
+ void *stored_xfer_buffer;
+ size_t length;
if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
return;
- temp = container_of(urb->transfer_buffer,
- struct dma_aligned_buffer, data);
+ /* Restore urb->transfer_buffer from the end of the allocated area */
+ memcpy(&stored_xfer_buffer, urb->transfer_buffer +
+ urb->transfer_buffer_length, sizeof(urb->transfer_buffer));
- if (usb_urb_dir_in(urb))
- memcpy(temp->old_xfer_buffer, temp->data,
- urb->transfer_buffer_length);
- urb->transfer_buffer = temp->old_xfer_buffer;
- kfree(temp->kmalloc_ptr);
+ if (usb_urb_dir_in(urb)) {
+ if (usb_pipeisoc(urb->pipe))
+ length = urb->transfer_buffer_length;
+ else
+ length = urb->actual_length;
+
+ memcpy(stored_xfer_buffer, urb->transfer_buffer, length);
+ }
+ kfree(urb->transfer_buffer);
+ urb->transfer_buffer = stored_xfer_buffer;
urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
}
static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
{
- struct dma_aligned_buffer *temp, *kmalloc_ptr;
+ void *kmalloc_ptr;
size_t kmalloc_size;
if (urb->num_sgs || urb->sg ||
@@ -2662,22 +2701,29 @@ static int dwc2_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
!((uintptr_t)urb->transfer_buffer & (DWC2_USB_DMA_ALIGN - 1)))
return 0;
- /* Allocate a buffer with enough padding for alignment */
+ /*
+ * Allocate a buffer with enough padding for original transfer_buffer
+ * pointer. This allocation is guaranteed to be aligned properly for
+ * DMA
+ */
kmalloc_size = urb->transfer_buffer_length +
- sizeof(struct dma_aligned_buffer) + DWC2_USB_DMA_ALIGN - 1;
+ sizeof(urb->transfer_buffer);
kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
if (!kmalloc_ptr)
return -ENOMEM;
- /* Position our struct dma_aligned_buffer such that data is aligned */
- temp = PTR_ALIGN(kmalloc_ptr + 1, DWC2_USB_DMA_ALIGN) - 1;
- temp->kmalloc_ptr = kmalloc_ptr;
- temp->old_xfer_buffer = urb->transfer_buffer;
+ /*
+ * Position value of original urb->transfer_buffer pointer to the end
+ * of allocation for later referencing
+ */
+ memcpy(kmalloc_ptr + urb->transfer_buffer_length,
+ &urb->transfer_buffer, sizeof(urb->transfer_buffer));
+
if (usb_urb_dir_out(urb))
- memcpy(temp->data, urb->transfer_buffer,
+ memcpy(kmalloc_ptr, urb->transfer_buffer,
urb->transfer_buffer_length);
- urb->transfer_buffer = temp->data;
+ urb->transfer_buffer = kmalloc_ptr;
urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
@@ -2802,6 +2848,32 @@ static int dwc2_assign_and_init_hc(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
/* Set the transfer attributes */
dwc2_hc_init_xfer(hsotg, chan, qtd);
+ /* For non-dword aligned buffers */
+ if (hsotg->params.host_dma && qh->do_split &&
+ chan->ep_is_in && (chan->xfer_dma & 0x3)) {
+ dev_vdbg(hsotg->dev, "Non-aligned buffer\n");
+ if (dwc2_alloc_split_dma_aligned_buf(hsotg, qh, chan)) {
+ dev_err(hsotg->dev,
+ "Failed to allocate memory to handle non-aligned buffer\n");
+ /* Add channel back to free list */
+ chan->align_buf = 0;
+ chan->multi_count = 0;
+ list_add_tail(&chan->hc_list_entry,
+ &hsotg->free_hc_list);
+ qtd->in_process = 0;
+ qh->channel = NULL;
+ return -ENOMEM;
+ }
+ } else {
+ /*
+ * We assume that DMA is always aligned in non-split
+ * case or split out case. Warn if not.
+ */
+ WARN_ON_ONCE(hsotg->params.host_dma &&
+ (chan->xfer_dma & 0x3));
+ chan->align_buf = 0;
+ }
+
if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC)
/*
@@ -5246,6 +5318,19 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
}
}
+ if (hsotg->params.host_dma) {
+ /*
+ * Create kmem caches to handle non-aligned buffer
+ * in Buffer DMA mode.
+ */
+ hsotg->unaligned_cache = kmem_cache_create("dwc2-unaligned-dma",
+ DWC2_KMEM_UNALIGNED_BUF_SIZE, 4,
+ SLAB_CACHE_DMA, NULL);
+ if (!hsotg->unaligned_cache)
+ dev_err(hsotg->dev,
+ "unable to create dwc2 unaligned cache\n");
+ }
+
hsotg->otg_port = 1;
hsotg->frame_list = NULL;
hsotg->frame_list_dma = 0;
@@ -5280,8 +5365,9 @@ int dwc2_hcd_init(struct dwc2_hsotg *hsotg)
return 0;
error4:
- kmem_cache_destroy(hsotg->desc_gen_cache);
+ kmem_cache_destroy(hsotg->unaligned_cache);
kmem_cache_destroy(hsotg->desc_hsisoc_cache);
+ kmem_cache_destroy(hsotg->desc_gen_cache);
error3:
dwc2_hcd_release(hsotg);
error2:
@@ -5322,8 +5408,9 @@ void dwc2_hcd_remove(struct dwc2_hsotg *hsotg)
usb_remove_hcd(hcd);
hsotg->priv = NULL;
- kmem_cache_destroy(hsotg->desc_gen_cache);
+ kmem_cache_destroy(hsotg->unaligned_cache);
kmem_cache_destroy(hsotg->desc_hsisoc_cache);
+ kmem_cache_destroy(hsotg->desc_gen_cache);
dwc2_hcd_release(hsotg);
usb_put_hcd(hcd);
@@ -5435,7 +5522,7 @@ int dwc2_host_enter_hibernation(struct dwc2_hsotg *hsotg)
dwc2_writel(hprt0, hsotg->regs + HPRT0);
/* Wait for the HPRT0.PrtSusp register field to be set */
- if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 300))
+ if (dwc2_hsotg_wait_bit_set(hsotg, HPRT0, HPRT0_SUSP, 3000))
dev_warn(hsotg->dev, "Suspend wasn't generated\n");
/*
@@ -5616,6 +5703,8 @@ int dwc2_host_exit_hibernation(struct dwc2_hsotg *hsotg, int rem_wakeup,
return ret;
}
+ dwc2_hcd_rem_wakeup(hsotg);
+
hsotg->hibernated = 0;
hsotg->bus_suspended = 0;
hsotg->lx_state = DWC2_L0;
diff --git a/drivers/usb/dwc2/hcd.h b/drivers/usb/dwc2/hcd.h
index 7db1ee7e7a77..5502a501f516 100644
--- a/drivers/usb/dwc2/hcd.h
+++ b/drivers/usb/dwc2/hcd.h
@@ -76,6 +76,8 @@ struct dwc2_qh;
* (micro)frame
* @xfer_buf: Pointer to current transfer buffer position
* @xfer_dma: DMA address of xfer_buf
+ * @align_buf: In Buffer DMA mode this will be used if xfer_buf is not
+ * DWORD aligned
* @xfer_len: Total number of bytes to transfer
* @xfer_count: Number of bytes transferred so far
* @start_pkt_count: Packet count at start of transfer
@@ -133,6 +135,7 @@ struct dwc2_host_chan {
u8 *xfer_buf;
dma_addr_t xfer_dma;
+ dma_addr_t align_buf;
u32 xfer_len;
u32 xfer_count;
u16 start_pkt_count;
@@ -302,6 +305,9 @@ struct dwc2_hs_transfer_time {
* speed. Note that this is in "schedule slice" which
* is tightly packed.
* @ntd: Actual number of transfer descriptors in a list
+ * @dw_align_buf: Used instead of original buffer if its physical address
+ * is not dword-aligned
+ * @dw_align_buf_dma: DMA address for dw_align_buf
* @qtd_list: List of QTDs for this QH
* @channel: Host channel currently processing transfers for this QH
* @qh_list_entry: Entry for QH in either the periodic or non-periodic
@@ -350,6 +356,8 @@ struct dwc2_qh {
struct dwc2_hs_transfer_time hs_transfers[DWC2_HS_SCHEDULE_UFRAMES];
u32 ls_start_schedule_slice;
u16 ntd;
+ u8 *dw_align_buf;
+ dma_addr_t dw_align_buf_dma;
struct list_head qtd_list;
struct dwc2_host_chan *channel;
struct list_head qh_list_entry;
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index fbea5e3fb947..8ce10caf3e19 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -942,14 +942,21 @@ static int dwc2_xfercomp_isoc_split_in(struct dwc2_hsotg *hsotg,
frame_desc = &qtd->urb->iso_descs[qtd->isoc_frame_index];
len = dwc2_get_actual_xfer_length(hsotg, chan, chnum, qtd,
DWC2_HC_XFER_COMPLETE, NULL);
- if (!len) {
+ if (!len && !qtd->isoc_split_offset) {
qtd->complete_split = 0;
- qtd->isoc_split_offset = 0;
return 0;
}
frame_desc->actual_length += len;
+ if (chan->align_buf) {
+ dev_vdbg(hsotg->dev, "non-aligned buffer\n");
+ dma_unmap_single(hsotg->dev, chan->qh->dw_align_buf_dma,
+ DWC2_KMEM_UNALIGNED_BUF_SIZE, DMA_FROM_DEVICE);
+ memcpy(qtd->urb->buf + (chan->xfer_dma - qtd->urb->dma),
+ chan->qh->dw_align_buf, len);
+ }
+
qtd->isoc_split_offset += len;
hctsiz = dwc2_readl(hsotg->regs + HCTSIZ(chnum));
@@ -1224,7 +1231,10 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
* avoid interrupt storms we'll wait before retrying if we've got
* several NAKs. If we didn't do this we'd retry directly from the
* interrupt handler and could end up quickly getting another
- * interrupt (another NAK), which we'd retry.
+ * interrupt (another NAK), which we'd retry. Note that we do not
+ * delay retries for IN parts of control requests, as those are expected
+ * to complete fairly quickly, and if we delay them we risk confusing
+ * the device and cause it issue STALL.
*
* Note that in DMA mode software only gets involved to re-send NAKed
* transfers for split transactions, so we only need to apply this
@@ -1237,7 +1247,9 @@ static void dwc2_hc_nak_intr(struct dwc2_hsotg *hsotg,
qtd->error_count = 0;
qtd->complete_split = 0;
qtd->num_naks++;
- qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY;
+ qtd->qh->want_wait = qtd->num_naks >= DWC2_NAKS_BEFORE_DELAY &&
+ !(chan->ep_type == USB_ENDPOINT_XFER_CONTROL &&
+ chan->ep_is_in);
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_NAK);
goto handle_nak_done;
}
diff --git a/drivers/usb/dwc2/hcd_queue.c b/drivers/usb/dwc2/hcd_queue.c
index d7c3d6c776d8..301ced1618f8 100644
--- a/drivers/usb/dwc2/hcd_queue.c
+++ b/drivers/usb/dwc2/hcd_queue.c
@@ -383,7 +383,7 @@ static unsigned long *dwc2_get_ls_map(struct dwc2_hsotg *hsotg,
/* Get the map and adjust if this is a multi_tt hub */
map = qh->dwc_tt->periodic_bitmaps;
if (qh->dwc_tt->usb_tt->multi)
- map += DWC2_ELEMENTS_PER_LS_BITMAP * qh->ttport;
+ map += DWC2_ELEMENTS_PER_LS_BITMAP * (qh->ttport - 1);
return map;
}
@@ -1696,6 +1696,9 @@ void dwc2_hcd_qh_free(struct dwc2_hsotg *hsotg, struct dwc2_qh *qh)
if (qh->desc_list)
dwc2_hcd_qh_free_ddma(hsotg, qh);
+ else if (hsotg->unaligned_cache && qh->dw_align_buf)
+ kmem_cache_free(hsotg->unaligned_cache, qh->dw_align_buf);
+
kfree(qh);
}
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index ea91310113b9..103807587dc6 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1272,7 +1272,6 @@ static int dwc3_probe(struct platform_device *pdev)
if (!dwc->clks)
return -ENOMEM;
- dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
dwc->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1307,15 +1306,19 @@ static int dwc3_probe(struct platform_device *pdev)
if (IS_ERR(dwc->reset))
return PTR_ERR(dwc->reset);
- ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks);
- if (ret == -EPROBE_DEFER)
- return ret;
- /*
- * Clocks are optional, but new DT platforms should support all clocks
- * as required by the DT-binding.
- */
- if (ret)
- dwc->num_clks = 0;
+ if (dev->of_node) {
+ dwc->num_clks = ARRAY_SIZE(dwc3_core_clks);
+
+ ret = clk_bulk_get(dev, dwc->num_clks, dwc->clks);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ /*
+ * Clocks are optional, but new DT platforms should support all
+ * clocks as required by the DT-binding.
+ */
+ if (ret)
+ dwc->num_clks = 0;
+ }
ret = reset_control_deassert(dwc->reset);
if (ret)
diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c
index 6b3ccd542bd7..dbeff5e6ad14 100644
--- a/drivers/usb/dwc3/dwc3-of-simple.c
+++ b/drivers/usb/dwc3/dwc3-of-simple.c
@@ -165,8 +165,9 @@ static int dwc3_of_simple_remove(struct platform_device *pdev)
reset_control_put(simple->resets);
- pm_runtime_put_sync(dev);
pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ pm_runtime_set_suspended(dev);
return 0;
}
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index c961a94d136b..f57e7c94b8e5 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -34,6 +34,7 @@
#define PCI_DEVICE_ID_INTEL_GLK 0x31aa
#define PCI_DEVICE_ID_INTEL_CNPLP 0x9dee
#define PCI_DEVICE_ID_INTEL_CNPH 0xa36e
+#define PCI_DEVICE_ID_INTEL_ICLLP 0x34ee
#define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
@@ -289,6 +290,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_GLK), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPLP), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CNPH), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICLLP), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index b0e67ab2f98c..a6d0203e40b6 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -490,6 +490,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
qcom->dwc3 = of_find_device_by_node(dwc3_np);
if (!qcom->dwc3) {
dev_err(&pdev->dev, "failed to get dwc3 platform device\n");
+ ret = -ENODEV;
goto depopulate;
}
@@ -547,8 +548,7 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int dwc3_qcom_pm_suspend(struct device *dev)
+static int __maybe_unused dwc3_qcom_pm_suspend(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
int ret = 0;
@@ -560,7 +560,7 @@ static int dwc3_qcom_pm_suspend(struct device *dev)
return ret;
}
-static int dwc3_qcom_pm_resume(struct device *dev)
+static int __maybe_unused dwc3_qcom_pm_resume(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
int ret;
@@ -571,23 +571,20 @@ static int dwc3_qcom_pm_resume(struct device *dev)
return ret;
}
-#endif
-#ifdef CONFIG_PM
-static int dwc3_qcom_runtime_suspend(struct device *dev)
+static int __maybe_unused dwc3_qcom_runtime_suspend(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
return dwc3_qcom_suspend(qcom);
}
-static int dwc3_qcom_runtime_resume(struct device *dev)
+static int __maybe_unused dwc3_qcom_runtime_resume(struct device *dev)
{
struct dwc3_qcom *qcom = dev_get_drvdata(dev);
return dwc3_qcom_resume(qcom);
}
-#endif
static const struct dev_pm_ops dwc3_qcom_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(dwc3_qcom_pm_suspend, dwc3_qcom_pm_resume)
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index c77ff50a88a2..8efde178eef4 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -973,15 +973,12 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc,
ret = dwc3_ep0_start_trans(dep);
} else if (IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) &&
req->request.length && req->request.zero) {
- u32 maxpacket;
ret = usb_gadget_map_request_by_dev(dwc->sysdev,
&req->request, dep->number);
if (ret)
return;
- maxpacket = dep->endpoint.maxpacket;
-
/* prepare normal TRB */
dwc3_ep0_prepare_one_trb(dep, req->request.dma,
req->request.length,
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index f242c2bcea81..b8a15840b4ff 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -1719,6 +1719,8 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
*/
if (w_value && !f->get_alt)
break;
+
+ spin_lock(&cdev->lock);
value = f->set_alt(f, w_index, w_value);
if (value == USB_GADGET_DELAYED_STATUS) {
DBG(cdev,
@@ -1728,6 +1730,7 @@ composite_setup(struct usb_gadget *gadget, const struct usb_ctrlrequest *ctrl)
DBG(cdev, "delayed_status count %d\n",
cdev->delayed_status);
}
+ spin_unlock(&cdev->lock);
break;
case USB_REQ_GET_INTERFACE:
if (ctrl->bRequestType != (USB_DIR_IN|USB_RECIP_INTERFACE))
@@ -1816,7 +1819,6 @@ unknown:
if (cdev->use_os_string && cdev->os_desc_config &&
(ctrl->bRequestType & USB_TYPE_VENDOR) &&
ctrl->bRequest == cdev->b_vendor_code) {
- struct usb_request *req;
struct usb_configuration *os_desc_cfg;
u8 *buf;
int interface;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index dce9d12c7981..3ada83d81bda 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -215,6 +215,7 @@ struct ffs_io_data {
struct mm_struct *mm;
struct work_struct work;
+ struct work_struct cancellation_work;
struct usb_ep *ep;
struct usb_request *req;
@@ -1072,22 +1073,31 @@ ffs_epfile_open(struct inode *inode, struct file *file)
return 0;
}
+static void ffs_aio_cancel_worker(struct work_struct *work)
+{
+ struct ffs_io_data *io_data = container_of(work, struct ffs_io_data,
+ cancellation_work);
+
+ ENTER();
+
+ usb_ep_dequeue(io_data->ep, io_data->req);
+}
+
static int ffs_aio_cancel(struct kiocb *kiocb)
{
struct ffs_io_data *io_data = kiocb->private;
- struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+ struct ffs_data *ffs = io_data->ffs;
int value;
ENTER();
- spin_lock_irq(&epfile->ffs->eps_lock);
-
- if (likely(io_data && io_data->ep && io_data->req))
- value = usb_ep_dequeue(io_data->ep, io_data->req);
- else
+ if (likely(io_data && io_data->ep && io_data->req)) {
+ INIT_WORK(&io_data->cancellation_work, ffs_aio_cancel_worker);
+ queue_work(ffs->io_completion_wq, &io_data->cancellation_work);
+ value = -EINPROGRESS;
+ } else {
value = -EINVAL;
-
- spin_unlock_irq(&epfile->ffs->eps_lock);
+ }
return value;
}
@@ -3253,7 +3263,7 @@ static int ffs_func_setup(struct usb_function *f,
__ffs_event_add(ffs, FUNCTIONFS_SETUP);
spin_unlock_irqrestore(&ffs->ev.waitq.lock, flags);
- return USB_GADGET_DELAYED_STATUS;
+ return creq->wLength == 0 ? USB_GADGET_DELAYED_STATUS : 0;
}
static bool ffs_func_req_match(struct usb_function *f,
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index d2dc1f00180b..d582921f7257 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -438,14 +438,14 @@ static struct usb_descriptor_header *hs_audio_desc[] = {
};
struct cntrl_cur_lay3 {
- __u32 dCUR;
+ __le32 dCUR;
};
struct cntrl_range_lay3 {
- __u16 wNumSubRanges;
- __u32 dMIN;
- __u32 dMAX;
- __u32 dRES;
+ __le16 wNumSubRanges;
+ __le32 dMIN;
+ __le32 dMAX;
+ __le32 dRES;
} __packed;
static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
@@ -559,13 +559,13 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
if (!agdev->out_ep) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return ret;
+ return -ENODEV;
}
agdev->in_ep = usb_ep_autoconfig(gadget, &fs_epin_desc);
if (!agdev->in_ep) {
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
- return ret;
+ return -ENODEV;
}
agdev->in_ep_maxpsize = max_t(u16,
@@ -703,9 +703,9 @@ in_rq_cur(struct usb_function *fn, const struct usb_ctrlrequest *cr)
memset(&c, 0, sizeof(struct cntrl_cur_lay3));
if (entity_id == USB_IN_CLK_ID)
- c.dCUR = p_srate;
+ c.dCUR = cpu_to_le32(p_srate);
else if (entity_id == USB_OUT_CLK_ID)
- c.dCUR = c_srate;
+ c.dCUR = cpu_to_le32(c_srate);
value = min_t(unsigned, w_length, sizeof c);
memcpy(req->buf, &c, value);
@@ -742,15 +742,15 @@ in_rq_range(struct usb_function *fn, const struct usb_ctrlrequest *cr)
if (control_selector == UAC2_CS_CONTROL_SAM_FREQ) {
if (entity_id == USB_IN_CLK_ID)
- r.dMIN = p_srate;
+ r.dMIN = cpu_to_le32(p_srate);
else if (entity_id == USB_OUT_CLK_ID)
- r.dMIN = c_srate;
+ r.dMIN = cpu_to_le32(c_srate);
else
return -EOPNOTSUPP;
r.dMAX = r.dMIN;
r.dRES = 0;
- r.wNumSubRanges = 1;
+ r.wNumSubRanges = cpu_to_le16(1);
value = min_t(unsigned, w_length, sizeof r);
memcpy(req->buf, &r, value);
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index a72295c953bb..fb5ed97572e5 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -32,9 +32,6 @@ struct uac_req {
struct uac_rtd_params {
struct snd_uac_chip *uac; /* parent chip */
bool ep_enabled; /* if the ep is enabled */
- /* Size of the ring buffer */
- size_t dma_bytes;
- unsigned char *dma_area;
struct snd_pcm_substream *ss;
@@ -43,8 +40,6 @@ struct uac_rtd_params {
void *rbuf;
- size_t period_size;
-
unsigned max_psize; /* MaxPacketSize of endpoint */
struct uac_req *ureq;
@@ -84,12 +79,12 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
{
unsigned pending;
- unsigned long flags;
+ unsigned long flags, flags2;
unsigned int hw_ptr;
- bool update_alsa = false;
int status = req->status;
struct uac_req *ur = req->context;
struct snd_pcm_substream *substream;
+ struct snd_pcm_runtime *runtime;
struct uac_rtd_params *prm = ur->pp;
struct snd_uac_chip *uac = prm->uac;
@@ -111,6 +106,14 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
if (!substream)
goto exit;
+ snd_pcm_stream_lock_irqsave(substream, flags2);
+
+ runtime = substream->runtime;
+ if (!runtime || !snd_pcm_running(substream)) {
+ snd_pcm_stream_unlock_irqrestore(substream, flags2);
+ goto exit;
+ }
+
spin_lock_irqsave(&prm->lock, flags);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
@@ -137,43 +140,46 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
req->actual = req->length;
}
- pending = prm->hw_ptr % prm->period_size;
- pending += req->actual;
- if (pending >= prm->period_size)
- update_alsa = true;
-
hw_ptr = prm->hw_ptr;
- prm->hw_ptr = (prm->hw_ptr + req->actual) % prm->dma_bytes;
spin_unlock_irqrestore(&prm->lock, flags);
/* Pack USB load in ALSA ring buffer */
- pending = prm->dma_bytes - hw_ptr;
+ pending = runtime->dma_bytes - hw_ptr;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
if (unlikely(pending < req->actual)) {
- memcpy(req->buf, prm->dma_area + hw_ptr, pending);
- memcpy(req->buf + pending, prm->dma_area,
+ memcpy(req->buf, runtime->dma_area + hw_ptr, pending);
+ memcpy(req->buf + pending, runtime->dma_area,
req->actual - pending);
} else {
- memcpy(req->buf, prm->dma_area + hw_ptr, req->actual);
+ memcpy(req->buf, runtime->dma_area + hw_ptr,
+ req->actual);
}
} else {
if (unlikely(pending < req->actual)) {
- memcpy(prm->dma_area + hw_ptr, req->buf, pending);
- memcpy(prm->dma_area, req->buf + pending,
+ memcpy(runtime->dma_area + hw_ptr, req->buf, pending);
+ memcpy(runtime->dma_area, req->buf + pending,
req->actual - pending);
} else {
- memcpy(prm->dma_area + hw_ptr, req->buf, req->actual);
+ memcpy(runtime->dma_area + hw_ptr, req->buf,
+ req->actual);
}
}
+ spin_lock_irqsave(&prm->lock, flags);
+ /* update hw_ptr after data is copied to memory */
+ prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes;
+ hw_ptr = prm->hw_ptr;
+ spin_unlock_irqrestore(&prm->lock, flags);
+ snd_pcm_stream_unlock_irqrestore(substream, flags2);
+
+ if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual)
+ snd_pcm_period_elapsed(substream);
+
exit:
if (usb_ep_queue(ep, req, GFP_ATOMIC))
dev_err(uac->card->dev, "%d Error!\n", __LINE__);
-
- if (update_alsa)
- snd_pcm_period_elapsed(substream);
}
static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
@@ -236,40 +242,12 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
static int uac_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
- struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
- struct uac_rtd_params *prm;
- int err;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- prm = &uac->p_prm;
- else
- prm = &uac->c_prm;
-
- err = snd_pcm_lib_malloc_pages(substream,
+ return snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
- if (err >= 0) {
- prm->dma_bytes = substream->runtime->dma_bytes;
- prm->dma_area = substream->runtime->dma_area;
- prm->period_size = params_period_bytes(hw_params);
- }
-
- return err;
}
static int uac_pcm_hw_free(struct snd_pcm_substream *substream)
{
- struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
- struct uac_rtd_params *prm;
-
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- prm = &uac->p_prm;
- else
- prm = &uac->c_prm;
-
- prm->dma_area = NULL;
- prm->dma_bytes = 0;
- prm->period_size = 0;
-
return snd_pcm_lib_free_pages(substream);
}
@@ -595,15 +573,15 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
if (err < 0)
goto snd_fail;
- strcpy(pcm->name, pcm_name);
+ strlcpy(pcm->name, pcm_name, sizeof(pcm->name));
pcm->private_data = uac;
uac->pcm = pcm;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &uac_pcm_ops);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &uac_pcm_ops);
- strcpy(card->driver, card_name);
- strcpy(card->shortname, card_name);
+ strlcpy(card->driver, card_name, sizeof(card->driver));
+ strlcpy(card->shortname, card_name, sizeof(card->shortname));
sprintf(card->longname, "%s %i", card_name, card->dev->id);
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS,
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig
index f0cdf89b8503..83ba8a2eb6af 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/Kconfig
+++ b/drivers/usb/gadget/udc/aspeed-vhub/Kconfig
@@ -2,6 +2,7 @@
config USB_ASPEED_VHUB
tristate "Aspeed vHub UDC driver"
depends on ARCH_ASPEED || COMPILE_TEST
+ depends on USB_LIBCOMPOSITE
help
USB peripheral controller for the Aspeed AST2500 family
SoCs supporting the "vHub" functionality and USB2.0
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
index 20ffb03ff6ac..e2927fb083cf 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/ep0.c
@@ -108,6 +108,13 @@ void ast_vhub_ep0_handle_setup(struct ast_vhub_ep *ep)
/* Check our state, cancel pending requests if needed */
if (ep->ep0.state != ep0_state_token) {
EPDBG(ep, "wrong state\n");
+ ast_vhub_nuke(ep, -EIO);
+
+ /*
+ * Accept the packet regardless, this seems to happen
+ * when stalling a SETUP packet that has an OUT data
+ * phase.
+ */
ast_vhub_nuke(ep, 0);
goto stall;
}
@@ -212,6 +219,8 @@ static void ast_vhub_ep0_do_send(struct ast_vhub_ep *ep,
if (chunk && req->req.buf)
memcpy(ep->buf, req->req.buf + req->req.actual, chunk);
+ vhub_dma_workaround(ep->buf);
+
/* Remember chunk size and trigger send */
reg = VHUB_EP0_SET_TX_LEN(chunk);
writel(reg, ep->ep0.ctlstat);
@@ -224,7 +233,7 @@ static void ast_vhub_ep0_rx_prime(struct ast_vhub_ep *ep)
EPVDBG(ep, "rx prime\n");
/* Prime endpoint for receiving data */
- writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat + AST_VHUB_EP0_CTRL);
+ writel(VHUB_EP0_RX_BUFF_RDY, ep->ep0.ctlstat);
}
static void ast_vhub_ep0_do_receive(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
index 80c9feac5147..5939eb1e97f2 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -66,11 +66,16 @@ static void ast_vhub_epn_kick(struct ast_vhub_ep *ep, struct ast_vhub_req *req)
if (!req->req.dma) {
/* For IN transfers, copy data over first */
- if (ep->epn.is_in)
+ if (ep->epn.is_in) {
memcpy(ep->buf, req->req.buf + act, chunk);
+ vhub_dma_workaround(ep->buf);
+ }
writel(ep->buf_dma, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
- } else
+ } else {
+ if (ep->epn.is_in)
+ vhub_dma_workaround(req->req.buf);
writel(req->req.dma + act, ep->epn.regs + AST_VHUB_EP_DESC_BASE);
+ }
/* Start DMA */
req->active = true;
@@ -161,6 +166,7 @@ static inline unsigned int ast_vhub_count_free_descs(struct ast_vhub_ep *ep)
static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
struct ast_vhub_req *req)
{
+ struct ast_vhub_desc *desc = NULL;
unsigned int act = req->act_count;
unsigned int len = req->req.length;
unsigned int chunk;
@@ -177,7 +183,6 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
/* While we can create descriptors */
while (ast_vhub_count_free_descs(ep) && req->last_desc < 0) {
- struct ast_vhub_desc *desc;
unsigned int d_num;
/* Grab next free descriptor */
@@ -227,6 +232,9 @@ static void ast_vhub_epn_kick_desc(struct ast_vhub_ep *ep,
req->act_count = act = act + chunk;
}
+ if (likely(desc))
+ vhub_dma_workaround(desc);
+
/* Tell HW about new descriptors */
writel(VHUB_EP_DMA_SET_CPU_WPTR(ep->epn.d_next),
ep->epn.regs + AST_VHUB_EP_DESC_STATUS);
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
index 2b040257bc1f..4ed03d33a5a9 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
+++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h
@@ -462,6 +462,39 @@ enum std_req_rc {
#define DDBG(d, fmt, ...) do { } while(0)
#endif
+static inline void vhub_dma_workaround(void *addr)
+{
+ /*
+ * This works around a confirmed HW issue with the Aspeed chip.
+ *
+ * The core uses a different bus to memory than the AHB going to
+ * the USB device controller. Due to the latter having a higher
+ * priority than the core for arbitration on that bus, it's
+ * possible for an MMIO to the device, followed by a DMA by the
+ * device from memory to all be performed and services before
+ * a previous store to memory gets completed.
+ *
+ * This the following scenario can happen:
+ *
+ * - Driver writes to a DMA descriptor (Mbus)
+ * - Driver writes to the MMIO register to start the DMA (AHB)
+ * - The gadget sees the second write and sends a read of the
+ * descriptor to the memory controller (Mbus)
+ * - The gadget hits memory before the descriptor write
+ * causing it to read an obsolete value.
+ *
+ * Thankfully the problem is limited to the USB gadget device, other
+ * masters in the SoC all have a lower priority than the core, thus
+ * ensuring that the store by the core arrives first.
+ *
+ * The workaround consists of using a dummy read of the memory before
+ * doing the MMIO writes. This will ensure that the previous writes
+ * have been "pushed out".
+ */
+ mb();
+ (void)__raw_readl((void __iomem *)addr);
+}
+
/* core.c */
void ast_vhub_done(struct ast_vhub_ep *ep, struct ast_vhub_req *req,
int status);
diff --git a/drivers/usb/gadget/udc/r8a66597-udc.c b/drivers/usb/gadget/udc/r8a66597-udc.c
index a3ecce62662b..11e25a3f4f1f 100644
--- a/drivers/usb/gadget/udc/r8a66597-udc.c
+++ b/drivers/usb/gadget/udc/r8a66597-udc.c
@@ -832,11 +832,11 @@ static void init_controller(struct r8a66597 *r8a66597)
r8a66597_bset(r8a66597, XCKE, SYSCFG0);
- msleep(3);
+ mdelay(3);
r8a66597_bset(r8a66597, PLLC, SYSCFG0);
- msleep(1);
+ mdelay(1);
r8a66597_bset(r8a66597, SCKE, SYSCFG0);
@@ -1190,7 +1190,7 @@ __acquires(r8a66597->lock)
r8a66597->ep0_req->length = 2;
/* AV: what happens if we get called again before that gets through? */
spin_unlock(&r8a66597->lock);
- r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_KERNEL);
+ r8a66597_queue(r8a66597->gadget.ep0, r8a66597->ep0_req, GFP_ATOMIC);
spin_lock(&r8a66597->lock);
}
diff --git a/drivers/usb/host/xhci-dbgcap.c b/drivers/usb/host/xhci-dbgcap.c
index 1fbfd89d0a0f..387f124a8334 100644
--- a/drivers/usb/host/xhci-dbgcap.c
+++ b/drivers/usb/host/xhci-dbgcap.c
@@ -508,16 +508,18 @@ static int xhci_do_dbc_start(struct xhci_hcd *xhci)
return 0;
}
-static void xhci_do_dbc_stop(struct xhci_hcd *xhci)
+static int xhci_do_dbc_stop(struct xhci_hcd *xhci)
{
struct xhci_dbc *dbc = xhci->dbc;
if (dbc->state == DS_DISABLED)
- return;
+ return -1;
writel(0, &dbc->regs->control);
xhci_dbc_mem_cleanup(xhci);
dbc->state = DS_DISABLED;
+
+ return 0;
}
static int xhci_dbc_start(struct xhci_hcd *xhci)
@@ -544,6 +546,7 @@ static int xhci_dbc_start(struct xhci_hcd *xhci)
static void xhci_dbc_stop(struct xhci_hcd *xhci)
{
+ int ret;
unsigned long flags;
struct xhci_dbc *dbc = xhci->dbc;
struct dbc_port *port = &dbc->port;
@@ -556,10 +559,11 @@ static void xhci_dbc_stop(struct xhci_hcd *xhci)
xhci_dbc_tty_unregister_device(xhci);
spin_lock_irqsave(&dbc->lock, flags);
- xhci_do_dbc_stop(xhci);
+ ret = xhci_do_dbc_stop(xhci);
spin_unlock_irqrestore(&dbc->lock, flags);
- pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
+ if (!ret)
+ pm_runtime_put_sync(xhci_to_hcd(xhci)->self.controller);
}
static void
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index acbd3d7b8828..ef350c33dc4a 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -595,7 +595,7 @@ struct xhci_ring *xhci_stream_id_to_ring(
if (!ep->stream_info)
return NULL;
- if (stream_id > ep->stream_info->num_streams)
+ if (stream_id >= ep->stream_info->num_streams)
return NULL;
return ep->stream_info->stream_rings[stream_id];
}
@@ -886,12 +886,12 @@ void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id)
dev = xhci->devs[slot_id];
- trace_xhci_free_virt_device(dev);
-
xhci->dcbaa->dev_context_ptrs[slot_id] = 0;
if (!dev)
return;
+ trace_xhci_free_virt_device(dev);
+
if (dev->tt_info)
old_active_eps = dev->tt_info->active_eps;
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index a8c1d073cba0..4b463e5202a4 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -481,7 +481,7 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
unsigned long mask;
unsigned int port;
bool idle, enable;
- int err;
+ int err = 0;
memset(&rsp, 0, sizeof(rsp));
@@ -1223,10 +1223,10 @@ disable_rpm:
pm_runtime_disable(&pdev->dev);
usb_put_hcd(tegra->hcd);
disable_xusbc:
- if (!&pdev->dev.pm_domain)
+ if (!pdev->dev.pm_domain)
tegra_powergate_power_off(TEGRA_POWERGATE_XUSBC);
disable_xusba:
- if (!&pdev->dev.pm_domain)
+ if (!pdev->dev.pm_domain)
tegra_powergate_power_off(TEGRA_POWERGATE_XUSBA);
put_padctl:
tegra_xusb_padctl_put(tegra->padctl);
diff --git a/drivers/usb/host/xhci-trace.h b/drivers/usb/host/xhci-trace.h
index 410544ffe78f..88b427434bd8 100644
--- a/drivers/usb/host/xhci-trace.h
+++ b/drivers/usb/host/xhci-trace.h
@@ -171,6 +171,37 @@ DEFINE_EVENT(xhci_log_trb, xhci_dbc_gadget_ep_queue,
TP_ARGS(ring, trb)
);
+DECLARE_EVENT_CLASS(xhci_log_free_virt_dev,
+ TP_PROTO(struct xhci_virt_device *vdev),
+ TP_ARGS(vdev),
+ TP_STRUCT__entry(
+ __field(void *, vdev)
+ __field(unsigned long long, out_ctx)
+ __field(unsigned long long, in_ctx)
+ __field(u8, fake_port)
+ __field(u8, real_port)
+ __field(u16, current_mel)
+
+ ),
+ TP_fast_assign(
+ __entry->vdev = vdev;
+ __entry->in_ctx = (unsigned long long) vdev->in_ctx->dma;
+ __entry->out_ctx = (unsigned long long) vdev->out_ctx->dma;
+ __entry->fake_port = (u8) vdev->fake_port;
+ __entry->real_port = (u8) vdev->real_port;
+ __entry->current_mel = (u16) vdev->current_mel;
+ ),
+ TP_printk("vdev %p ctx %llx | %llx fake_port %d real_port %d current_mel %d",
+ __entry->vdev, __entry->in_ctx, __entry->out_ctx,
+ __entry->fake_port, __entry->real_port, __entry->current_mel
+ )
+);
+
+DEFINE_EVENT(xhci_log_free_virt_dev, xhci_free_virt_device,
+ TP_PROTO(struct xhci_virt_device *vdev),
+ TP_ARGS(vdev)
+);
+
DECLARE_EVENT_CLASS(xhci_log_virt_dev,
TP_PROTO(struct xhci_virt_device *vdev),
TP_ARGS(vdev),
@@ -208,11 +239,6 @@ DEFINE_EVENT(xhci_log_virt_dev, xhci_alloc_virt_device,
TP_ARGS(vdev)
);
-DEFINE_EVENT(xhci_log_virt_dev, xhci_free_virt_device,
- TP_PROTO(struct xhci_virt_device *vdev),
- TP_ARGS(vdev)
-);
-
DEFINE_EVENT(xhci_log_virt_dev, xhci_setup_device,
TP_PROTO(struct xhci_virt_device *vdev),
TP_ARGS(vdev)
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 8c8da2d657fa..68e6132aa8b2 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -908,6 +908,41 @@ static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
spin_unlock_irqrestore(&xhci->lock, flags);
}
+static bool xhci_pending_portevent(struct xhci_hcd *xhci)
+{
+ struct xhci_port **ports;
+ int port_index;
+ u32 status;
+ u32 portsc;
+
+ status = readl(&xhci->op_regs->status);
+ if (status & STS_EINT)
+ return true;
+ /*
+ * Checking STS_EINT is not enough as there is a lag between a change
+ * bit being set and the Port Status Change Event that it generated
+ * being written to the Event Ring. See note in xhci 1.1 section 4.19.2.
+ */
+
+ port_index = xhci->usb2_rhub.num_ports;
+ ports = xhci->usb2_rhub.ports;
+ while (port_index--) {
+ portsc = readl(ports[port_index]->addr);
+ if (portsc & PORT_CHANGE_MASK ||
+ (portsc & PORT_PLS_MASK) == XDEV_RESUME)
+ return true;
+ }
+ port_index = xhci->usb3_rhub.num_ports;
+ ports = xhci->usb3_rhub.ports;
+ while (port_index--) {
+ portsc = readl(ports[port_index]->addr);
+ if (portsc & PORT_CHANGE_MASK ||
+ (portsc & PORT_PLS_MASK) == XDEV_RESUME)
+ return true;
+ }
+ return false;
+}
+
/*
* Stop HC (not bus-specific)
*
@@ -1009,7 +1044,7 @@ EXPORT_SYMBOL_GPL(xhci_suspend);
*/
int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
{
- u32 command, temp = 0, status;
+ u32 command, temp = 0;
struct usb_hcd *hcd = xhci_to_hcd(xhci);
struct usb_hcd *secondary_hcd;
int retval = 0;
@@ -1043,8 +1078,13 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
command = readl(&xhci->op_regs->command);
command |= CMD_CRS;
writel(command, &xhci->op_regs->command);
+ /*
+ * Some controllers take up to 55+ ms to complete the controller
+ * restore so setting the timeout to 100ms. Xhci specification
+ * doesn't mention any timeout value.
+ */
if (xhci_handshake(&xhci->op_regs->status,
- STS_RESTORE, 0, 10 * 1000)) {
+ STS_RESTORE, 0, 100 * 1000)) {
xhci_warn(xhci, "WARN: xHC restore state timeout\n");
spin_unlock_irq(&xhci->lock);
return -ETIMEDOUT;
@@ -1134,8 +1174,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
done:
if (retval == 0) {
/* Resume root hubs only when have pending events. */
- status = readl(&xhci->op_regs->status);
- if (status & STS_EINT) {
+ if (xhci_pending_portevent(xhci)) {
usb_hcd_resume_root_hub(xhci->shared_hcd);
usb_hcd_resume_root_hub(hcd);
}
@@ -3012,6 +3051,7 @@ static void xhci_endpoint_reset(struct usb_hcd *hcd,
if (!list_empty(&ep->ring->td_list)) {
dev_err(&udev->dev, "EP not empty, refuse reset\n");
spin_unlock_irqrestore(&xhci->lock, flags);
+ xhci_free_command(xhci, cfg_cmd);
goto cleanup;
}
xhci_queue_stop_endpoint(xhci, stop_cmd, udev->slot_id, ep_index, 0);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 939e2f86b595..841e89ffe2e9 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -382,6 +382,10 @@ struct xhci_op_regs {
#define PORT_PLC (1 << 22)
/* port configure error change - port failed to configure its link partner */
#define PORT_CEC (1 << 23)
+#define PORT_CHANGE_MASK (PORT_CSC | PORT_PEC | PORT_WRC | PORT_OCC | \
+ PORT_RC | PORT_PLC | PORT_CEC)
+
+
/* Cold Attach Status - xHC can set this bit to report device attached during
* Sx state. Warm port reset should be perfomed to clear this bit and move port
* to connected state.
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 8abb6cbbd98a..3be40eaa1ac9 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -396,8 +396,7 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
loff_t *ppos)
{
struct usb_yurex *dev;
- int retval = 0;
- int bytes_read = 0;
+ int len = 0;
char in_buffer[20];
unsigned long flags;
@@ -405,26 +404,16 @@ static ssize_t yurex_read(struct file *file, char __user *buffer, size_t count,
mutex_lock(&dev->io_mutex);
if (!dev->interface) { /* already disconnected */
- retval = -ENODEV;
- goto exit;
+ mutex_unlock(&dev->io_mutex);
+ return -ENODEV;
}
spin_lock_irqsave(&dev->lock, flags);
- bytes_read = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
+ len = snprintf(in_buffer, 20, "%lld\n", dev->bbu);
spin_unlock_irqrestore(&dev->lock, flags);
-
- if (*ppos < bytes_read) {
- if (copy_to_user(buffer, in_buffer + *ppos, bytes_read - *ppos))
- retval = -EFAULT;
- else {
- retval = bytes_read - *ppos;
- *ppos += bytes_read;
- }
- }
-
-exit:
mutex_unlock(&dev->io_mutex);
- return retval;
+
+ return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
}
static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
diff --git a/drivers/usb/phy/phy-fsl-usb.c b/drivers/usb/phy/phy-fsl-usb.c
index 900875f326d7..f7c96d209eda 100644
--- a/drivers/usb/phy/phy-fsl-usb.c
+++ b/drivers/usb/phy/phy-fsl-usb.c
@@ -861,6 +861,7 @@ int usb_otg_start(struct platform_device *pdev)
if (pdata->init && pdata->init(pdev) != 0)
return -EINVAL;
+#ifdef CONFIG_PPC32
if (pdata->big_endian_mmio) {
_fsl_readl = _fsl_readl_be;
_fsl_writel = _fsl_writel_be;
@@ -868,6 +869,7 @@ int usb_otg_start(struct platform_device *pdev)
_fsl_readl = _fsl_readl_le;
_fsl_writel = _fsl_writel_le;
}
+#endif
/* request irq */
p_otg->irq = platform_get_irq(pdev, 0);
@@ -958,7 +960,7 @@ int usb_otg_start(struct platform_device *pdev)
/*
* state file in sysfs
*/
-static int show_fsl_usb2_otg_state(struct device *dev,
+static ssize_t show_fsl_usb2_otg_state(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct otg_fsm *fsm = &fsl_otg_dev->fsm;
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index bdd7a5ad3bf1..3bb1fff02bed 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -128,7 +128,7 @@ static int ch341_control_in(struct usb_device *dev,
r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
value, index, buf, bufsize, DEFAULT_TIMEOUT);
- if (r < bufsize) {
+ if (r < (int)bufsize) {
if (r >= 0) {
dev_err(&dev->dev,
"short control message received (%d < %u)\n",
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index eb6c26cbe579..626a29d9aa58 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -95,6 +95,9 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */
{ USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */
{ USB_DEVICE(0x10C4, 0x815F) }, /* Timewave HamLinkUSB */
+ { USB_DEVICE(0x10C4, 0x817C) }, /* CESINEL MEDCAL N Power Quality Monitor */
+ { USB_DEVICE(0x10C4, 0x817D) }, /* CESINEL MEDCAL NT Power Quality Monitor */
+ { USB_DEVICE(0x10C4, 0x817E) }, /* CESINEL MEDCAL S Power Quality Monitor */
{ USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */
{ USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */
{ USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */
@@ -112,6 +115,9 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */
{ USB_DEVICE(0x10C4, 0x8281) }, /* Nanotec Plug & Drive */
{ USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */
+ { USB_DEVICE(0x10C4, 0x82EF) }, /* CESINEL FALCO 6105 AC Power Supply */
+ { USB_DEVICE(0x10C4, 0x82F1) }, /* CESINEL MEDCAL EFD Earth Fault Detector */
+ { USB_DEVICE(0x10C4, 0x82F2) }, /* CESINEL MEDCAL ST Network Analyzer */
{ USB_DEVICE(0x10C4, 0x82F4) }, /* Starizona MicroTouch */
{ USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */
{ USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */
@@ -124,7 +130,9 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8470) }, /* Juniper Networks BX Series System Console */
{ USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */
{ USB_DEVICE(0x10C4, 0x84B6) }, /* Starizona Hyperion */
+ { USB_DEVICE(0x10C4, 0x851E) }, /* CESINEL MEDCAL PT Network Analyzer */
{ USB_DEVICE(0x10C4, 0x85A7) }, /* LifeScan OneTouch Verio IQ */
+ { USB_DEVICE(0x10C4, 0x85B8) }, /* CESINEL ReCon T Energy Logger */
{ USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */
{ USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
@@ -134,17 +142,24 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
+ { USB_DEVICE(0x10C4, 0x88FB) }, /* CESINEL MEDCAL STII Network Analyzer */
+ { USB_DEVICE(0x10C4, 0x8938) }, /* CESINEL MEDCAL S II Network Analyzer */
{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
{ USB_DEVICE(0x10C4, 0x8962) }, /* Brim Brothers charging dock */
{ USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
{ USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
+ { USB_DEVICE(0x10C4, 0x89A4) }, /* CESINEL FTBC Flexible Thyristor Bridge Controller */
+ { USB_DEVICE(0x10C4, 0x89FB) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
{ USB_DEVICE(0x10C4, 0x8A5E) }, /* CEL EM3588 ZigBee USB Stick Long Range */
{ USB_DEVICE(0x10C4, 0x8B34) }, /* Qivicon ZigBee USB Radio Stick */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
+ { USB_DEVICE(0x10C4, 0xEA63) }, /* Silicon Labs Windows Update (CP2101-4/CP2102N) */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */
+ { USB_DEVICE(0x10C4, 0xEA7A) }, /* Silicon Labs Windows Update (CP2105) */
+ { USB_DEVICE(0x10C4, 0xEA7B) }, /* Silicon Labs Windows Update (CP2108) */
{ USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */
{ USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */
{ USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */
diff --git a/drivers/usb/serial/keyspan_pda.c b/drivers/usb/serial/keyspan_pda.c
index 5169624d8b11..38d43c4b7ce5 100644
--- a/drivers/usb/serial/keyspan_pda.c
+++ b/drivers/usb/serial/keyspan_pda.c
@@ -369,8 +369,10 @@ static int keyspan_pda_get_modem_info(struct usb_serial *serial,
3, /* get pins */
USB_TYPE_VENDOR|USB_RECIP_INTERFACE|USB_DIR_IN,
0, 0, data, 1, 2000);
- if (rc >= 0)
+ if (rc == 1)
*value = *data;
+ else if (rc >= 0)
+ rc = -EIO;
kfree(data);
return rc;
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index fdceb46d9fc6..b580b4c7fa48 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -468,6 +468,9 @@ static void mos7840_control_callback(struct urb *urb)
}
dev_dbg(dev, "%s urb buffer size is %d\n", __func__, urb->actual_length);
+ if (urb->actual_length < 1)
+ goto out;
+
dev_dbg(dev, "%s mos7840_port->MsrLsr is %d port %d\n", __func__,
mos7840_port->MsrLsr, mos7840_port->port_num);
data = urb->transfer_buffer;
diff --git a/drivers/usb/typec/tcpm.c b/drivers/usb/typec/tcpm.c
index 8a201dd53d36..d1d20252bad8 100644
--- a/drivers/usb/typec/tcpm.c
+++ b/drivers/usb/typec/tcpm.c
@@ -418,17 +418,18 @@ static void _tcpm_log(struct tcpm_port *port, const char *fmt, va_list args)
u64 ts_nsec = local_clock();
unsigned long rem_nsec;
+ mutex_lock(&port->logbuffer_lock);
if (!port->logbuffer[port->logbuffer_head]) {
port->logbuffer[port->logbuffer_head] =
kzalloc(LOG_BUFFER_ENTRY_SIZE, GFP_KERNEL);
- if (!port->logbuffer[port->logbuffer_head])
+ if (!port->logbuffer[port->logbuffer_head]) {
+ mutex_unlock(&port->logbuffer_lock);
return;
+ }
}
vsnprintf(tmpbuffer, sizeof(tmpbuffer), fmt, args);
- mutex_lock(&port->logbuffer_lock);
-
if (tcpm_log_full(port)) {
port->logbuffer_head = max(port->logbuffer_head - 1, 0);
strcpy(tmpbuffer, "overflow");
@@ -724,6 +725,9 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
tcpm_log(port, "Setting voltage/current limit %u mV %u mA", mv, max_ma);
+ port->supply_voltage = mv;
+ port->current_limit = max_ma;
+
if (port->tcpc->set_current_limit)
ret = port->tcpc->set_current_limit(port->tcpc, max_ma, mv);
@@ -2136,7 +2140,7 @@ static unsigned int tcpm_pd_select_pps_apdo(struct tcpm_port *port)
* PPS APDO. Again skip the first sink PDO as this will
* always be 5V 3A.
*/
- for (j = i; j < port->nr_snk_pdo; j++) {
+ for (j = 1; j < port->nr_snk_pdo; j++) {
pdo = port->snk_pdo[j];
switch (pdo_type(pdo)) {
@@ -2594,8 +2598,6 @@ static void tcpm_reset_port(struct tcpm_port *port)
tcpm_set_attached_state(port, false);
port->try_src_count = 0;
port->try_snk_count = 0;
- port->supply_voltage = 0;
- port->current_limit = 0;
port->usb_type = POWER_SUPPLY_USB_TYPE_C;
power_supply_changed(port->psy);
@@ -3043,7 +3045,8 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_port_is_sink(port) &&
time_is_after_jiffies(port->delayed_runtime)) {
tcpm_set_state(port, SNK_DISCOVERY,
- port->delayed_runtime - jiffies);
+ jiffies_to_msecs(port->delayed_runtime -
+ jiffies));
break;
}
tcpm_set_state(port, unattached_state(port), 0);
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index bd5cca5632b3..8d0a6fe748bd 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -350,6 +350,19 @@ static void ucsi_connector_change(struct work_struct *work)
}
if (con->status.change & UCSI_CONSTAT_CONNECT_CHANGE) {
+ typec_set_pwr_role(con->port, con->status.pwr_dir);
+
+ switch (con->status.partner_type) {
+ case UCSI_CONSTAT_PARTNER_TYPE_UFP:
+ typec_set_data_role(con->port, TYPEC_HOST);
+ break;
+ case UCSI_CONSTAT_PARTNER_TYPE_DFP:
+ typec_set_data_role(con->port, TYPEC_DEVICE);
+ break;
+ default:
+ break;
+ }
+
if (con->status.connected)
ucsi_register_partner(con);
else
diff --git a/drivers/usb/typec/ucsi/ucsi_acpi.c b/drivers/usb/typec/ucsi/ucsi_acpi.c
index 44eb4e1ea817..a18112a83fae 100644
--- a/drivers/usb/typec/ucsi/ucsi_acpi.c
+++ b/drivers/usb/typec/ucsi/ucsi_acpi.c
@@ -79,6 +79,11 @@ static int ucsi_acpi_probe(struct platform_device *pdev)
return -ENODEV;
}
+ /* This will make sure we can use ioremap_nocache() */
+ status = acpi_release_memory(ACPI_HANDLE(&pdev->dev), res, 1);
+ if (ACPI_FAILURE(status))
+ return -ENOMEM;
+
/*
* NOTE: The memory region for the data structures is used also in an
* operation region, which means ACPI has already reserved it. Therefore
diff --git a/drivers/vfio/pci/Kconfig b/drivers/vfio/pci/Kconfig
index 24ee2605b9f0..42dc1d3d71cf 100644
--- a/drivers/vfio/pci/Kconfig
+++ b/drivers/vfio/pci/Kconfig
@@ -28,5 +28,13 @@ config VFIO_PCI_INTX
def_bool y if !S390
config VFIO_PCI_IGD
- depends on VFIO_PCI
- def_bool y if X86
+ bool "VFIO PCI extensions for Intel graphics (GVT-d)"
+ depends on VFIO_PCI && X86
+ default y
+ help
+ Support for Intel IGD specific extensions to enable direct
+ assignment to virtual machines. This includes exposing an IGD
+ specific firmware table and read-only copies of the host bridge
+ and LPC bridge config space.
+
+ To enable Intel IGD assignment through vfio-pci, say Y.
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index b423a309a6e0..125b58eff936 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -28,6 +28,7 @@
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/vgaarb.h>
+#include <linux/nospec.h>
#include "vfio_pci_private.h"
@@ -727,6 +728,9 @@ static long vfio_pci_ioctl(void *device_data,
if (info.index >=
VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
+ info.index = array_index_nospec(info.index,
+ VFIO_PCI_NUM_REGIONS +
+ vdev->num_regions);
i = info.index - VFIO_PCI_NUM_REGIONS;
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index 759a5bdd40e1..7cd63b0c1a46 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -457,17 +457,17 @@ static void tce_iommu_unuse_page(struct tce_container *container,
}
static int tce_iommu_prereg_ua_to_hpa(struct tce_container *container,
- unsigned long tce, unsigned long size,
+ unsigned long tce, unsigned long shift,
unsigned long *phpa, struct mm_iommu_table_group_mem_t **pmem)
{
long ret = 0;
struct mm_iommu_table_group_mem_t *mem;
- mem = mm_iommu_lookup(container->mm, tce, size);
+ mem = mm_iommu_lookup(container->mm, tce, 1ULL << shift);
if (!mem)
return -EINVAL;
- ret = mm_iommu_ua_to_hpa(mem, tce, phpa);
+ ret = mm_iommu_ua_to_hpa(mem, tce, shift, phpa);
if (ret)
return -EINVAL;
@@ -487,7 +487,7 @@ static void tce_iommu_unuse_page_v2(struct tce_container *container,
if (!pua)
return;
- ret = tce_iommu_prereg_ua_to_hpa(container, *pua, IOMMU_PAGE_SIZE(tbl),
+ ret = tce_iommu_prereg_ua_to_hpa(container, *pua, tbl->it_page_shift,
&hpa, &mem);
if (ret)
pr_debug("%s: tce %lx at #%lx was not cached, ret=%d\n",
@@ -611,7 +611,7 @@ static long tce_iommu_build_v2(struct tce_container *container,
entry + i);
ret = tce_iommu_prereg_ua_to_hpa(container,
- tce, IOMMU_PAGE_SIZE(tbl), &hpa, &mem);
+ tce, tbl->it_page_shift, &hpa, &mem);
if (ret)
break;
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 2c75b33db4ac..3e5b17710a4f 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -343,18 +343,16 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
struct page *page[1];
struct vm_area_struct *vma;
struct vm_area_struct *vmas[1];
+ unsigned int flags = 0;
int ret;
+ if (prot & IOMMU_WRITE)
+ flags |= FOLL_WRITE;
+
+ down_read(&mm->mmap_sem);
if (mm == current->mm) {
- ret = get_user_pages_longterm(vaddr, 1, !!(prot & IOMMU_WRITE),
- page, vmas);
+ ret = get_user_pages_longterm(vaddr, 1, flags, page, vmas);
} else {
- unsigned int flags = 0;
-
- if (prot & IOMMU_WRITE)
- flags |= FOLL_WRITE;
-
- down_read(&mm->mmap_sem);
ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
vmas, NULL);
/*
@@ -368,8 +366,8 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
ret = -EOPNOTSUPP;
put_page(page[0]);
}
- up_read(&mm->mmap_sem);
}
+ up_read(&mm->mmap_sem);
if (ret == 1) {
*pfn = page_to_pfn(page[0]);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 686dc670fd29..29756d88799b 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1226,7 +1226,8 @@ err_used:
if (ubufs)
vhost_net_ubuf_put_wait_and_free(ubufs);
err_ubufs:
- sockfd_put(sock);
+ if (sock)
+ sockfd_put(sock);
err_vq:
mutex_unlock(&vq->mutex);
err:
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 451e833f5931..48b154276179 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -41,4 +41,4 @@ obj-$(CONFIG_XEN_PVCALLS_FRONTEND) += pvcalls-front.o
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
xen-gntalloc-y := gntalloc.o
-xen-privcmd-y := privcmd.o
+xen-privcmd-y := privcmd.o privcmd-buf.o
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 762378f1811c..08e4af04d6f2 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -628,8 +628,6 @@ static void __unbind_from_irq(unsigned int irq)
xen_irq_info_cleanup(info);
}
- BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
-
xen_free_irq(irq);
}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 2473b0a9e6e4..ba9f3eec2bd0 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -799,7 +799,7 @@ int gnttab_alloc_pages(int nr_pages, struct page **pages)
return 0;
}
-EXPORT_SYMBOL(gnttab_alloc_pages);
+EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
/**
* gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
@@ -820,7 +820,7 @@ void gnttab_free_pages(int nr_pages, struct page **pages)
}
free_xenballooned_pages(nr_pages, pages);
}
-EXPORT_SYMBOL(gnttab_free_pages);
+EXPORT_SYMBOL_GPL(gnttab_free_pages);
/* Handling of paged out grant targets (GNTST_eagain) */
#define MAX_DELAY 256
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 8835065029d3..c93d8ef8df34 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -289,8 +289,15 @@ static void sysrq_handler(struct xenbus_watch *watch, const char *path,
return;
}
- if (sysrq_key != '\0')
- xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
+ if (sysrq_key != '\0') {
+ err = xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
+ if (err) {
+ pr_err("%s: Error %d writing sysrq in control/sysrq\n",
+ __func__, err);
+ xenbus_transaction_end(xbt, 1);
+ return;
+ }
+ }
err = xenbus_transaction_end(xbt, 0);
if (err == -EAGAIN)
@@ -342,7 +349,12 @@ static int setup_shutdown_watcher(void)
continue;
snprintf(node, FEATURE_PATH_SIZE, "feature-%s",
shutdown_handlers[idx].command);
- xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+ err = xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+ if (err) {
+ pr_err("%s: Error %d writing %s\n", __func__,
+ err, node);
+ return err;
+ }
}
return 0;
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
new file mode 100644
index 000000000000..df1ed37c3269
--- /dev/null
+++ b/drivers/xen/privcmd-buf.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/******************************************************************************
+ * privcmd-buf.c
+ *
+ * Mmap of hypercall buffers.
+ *
+ * Copyright (c) 2018 Juergen Gross
+ */
+
+#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include "privcmd.h"
+
+MODULE_LICENSE("GPL");
+
+static unsigned int limit = 64;
+module_param(limit, uint, 0644);
+MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by "
+ "the privcmd-buf device per open file");
+
+struct privcmd_buf_private {
+ struct mutex lock;
+ struct list_head list;
+ unsigned int allocated;
+};
+
+struct privcmd_buf_vma_private {
+ struct privcmd_buf_private *file_priv;
+ struct list_head list;
+ unsigned int users;
+ unsigned int n_pages;
+ struct page *pages[];
+};
+
+static int privcmd_buf_open(struct inode *ino, struct file *file)
+{
+ struct privcmd_buf_private *file_priv;
+
+ file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
+ return -ENOMEM;
+
+ mutex_init(&file_priv->lock);
+ INIT_LIST_HEAD(&file_priv->list);
+
+ file->private_data = file_priv;
+
+ return 0;
+}
+
+static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv)
+{
+ unsigned int i;
+
+ vma_priv->file_priv->allocated -= vma_priv->n_pages;
+
+ list_del(&vma_priv->list);
+
+ for (i = 0; i < vma_priv->n_pages; i++)
+ if (vma_priv->pages[i])
+ __free_page(vma_priv->pages[i]);
+
+ kfree(vma_priv);
+}
+
+static int privcmd_buf_release(struct inode *ino, struct file *file)
+{
+ struct privcmd_buf_private *file_priv = file->private_data;
+ struct privcmd_buf_vma_private *vma_priv;
+
+ mutex_lock(&file_priv->lock);
+
+ while (!list_empty(&file_priv->list)) {
+ vma_priv = list_first_entry(&file_priv->list,
+ struct privcmd_buf_vma_private,
+ list);
+ privcmd_buf_vmapriv_free(vma_priv);
+ }
+
+ mutex_unlock(&file_priv->lock);
+
+ kfree(file_priv);
+
+ return 0;
+}
+
+static void privcmd_buf_vma_open(struct vm_area_struct *vma)
+{
+ struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
+
+ if (!vma_priv)
+ return;
+
+ mutex_lock(&vma_priv->file_priv->lock);
+ vma_priv->users++;
+ mutex_unlock(&vma_priv->file_priv->lock);
+}
+
+static void privcmd_buf_vma_close(struct vm_area_struct *vma)
+{
+ struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
+ struct privcmd_buf_private *file_priv;
+
+ if (!vma_priv)
+ return;
+
+ file_priv = vma_priv->file_priv;
+
+ mutex_lock(&file_priv->lock);
+
+ vma_priv->users--;
+ if (!vma_priv->users)
+ privcmd_buf_vmapriv_free(vma_priv);
+
+ mutex_unlock(&file_priv->lock);
+}
+
+static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf)
+{
+ pr_debug("fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
+ vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
+ vmf->pgoff, (void *)vmf->address);
+
+ return VM_FAULT_SIGBUS;
+}
+
+static const struct vm_operations_struct privcmd_buf_vm_ops = {
+ .open = privcmd_buf_vma_open,
+ .close = privcmd_buf_vma_close,
+ .fault = privcmd_buf_vma_fault,
+};
+
+static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct privcmd_buf_private *file_priv = file->private_data;
+ struct privcmd_buf_vma_private *vma_priv;
+ unsigned long count = vma_pages(vma);
+ unsigned int i;
+ int ret = 0;
+
+ if (!(vma->vm_flags & VM_SHARED) || count > limit ||
+ file_priv->allocated + count > limit)
+ return -EINVAL;
+
+ vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
+ GFP_KERNEL);
+ if (!vma_priv)
+ return -ENOMEM;
+
+ vma_priv->n_pages = count;
+ count = 0;
+ for (i = 0; i < vma_priv->n_pages; i++) {
+ vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!vma_priv->pages[i])
+ break;
+ count++;
+ }
+
+ mutex_lock(&file_priv->lock);
+
+ file_priv->allocated += count;
+
+ vma_priv->file_priv = file_priv;
+ vma_priv->users = 1;
+
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND;
+ vma->vm_ops = &privcmd_buf_vm_ops;
+ vma->vm_private_data = vma_priv;
+
+ list_add(&vma_priv->list, &file_priv->list);
+
+ if (vma_priv->n_pages != count)
+ ret = -ENOMEM;
+ else
+ for (i = 0; i < vma_priv->n_pages; i++) {
+ ret = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
+ vma_priv->pages[i]);
+ if (ret)
+ break;
+ }
+
+ if (ret)
+ privcmd_buf_vmapriv_free(vma_priv);
+
+ mutex_unlock(&file_priv->lock);
+
+ return ret;
+}
+
+const struct file_operations xen_privcmdbuf_fops = {
+ .owner = THIS_MODULE,
+ .open = privcmd_buf_open,
+ .release = privcmd_buf_release,
+ .mmap = privcmd_buf_mmap,
+};
+EXPORT_SYMBOL_GPL(xen_privcmdbuf_fops);
+
+struct miscdevice xen_privcmdbuf_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "xen/hypercall",
+ .fops = &xen_privcmdbuf_fops,
+};
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 8ae0349d9f0a..7e6e682104dc 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -1007,12 +1007,21 @@ static int __init privcmd_init(void)
pr_err("Could not register Xen privcmd device\n");
return err;
}
+
+ err = misc_register(&xen_privcmdbuf_dev);
+ if (err != 0) {
+ pr_err("Could not register Xen hypercall-buf device\n");
+ misc_deregister(&privcmd_dev);
+ return err;
+ }
+
return 0;
}
static void __exit privcmd_exit(void)
{
misc_deregister(&privcmd_dev);
+ misc_deregister(&xen_privcmdbuf_dev);
}
module_init(privcmd_init);
diff --git a/drivers/xen/privcmd.h b/drivers/xen/privcmd.h
index 14facaeed36f..0dd9f8f67ee3 100644
--- a/drivers/xen/privcmd.h
+++ b/drivers/xen/privcmd.h
@@ -1,3 +1,6 @@
#include <linux/fs.h>
extern const struct file_operations xen_privcmd_fops;
+extern const struct file_operations xen_privcmdbuf_fops;
+
+extern struct miscdevice xen_privcmdbuf_dev;
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 7bc88fd43cfc..e2f3e8b0fba9 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1012,6 +1012,7 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
{
struct v2p_entry *entry;
unsigned long flags;
+ int err;
if (try) {
spin_lock_irqsave(&info->v2p_lock, flags);
@@ -1027,8 +1028,11 @@ static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
scsiback_del_translation_entry(info, vir);
}
} else if (!try) {
- xenbus_printf(XBT_NIL, info->dev->nodename, state,
+ err = xenbus_printf(XBT_NIL, info->dev->nodename, state,
"%d", XenbusStateClosed);
+ if (err)
+ xenbus_dev_error(info->dev, err,
+ "%s: writing %s", __func__, state);
}
}
@@ -1067,8 +1071,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent);
val = xenbus_read(XBT_NIL, dev->nodename, str, NULL);
if (IS_ERR(val)) {
- xenbus_printf(XBT_NIL, dev->nodename, state,
+ err = xenbus_printf(XBT_NIL, dev->nodename, state,
"%d", XenbusStateClosed);
+ if (err)
+ xenbus_dev_error(info->dev, err,
+ "%s: writing %s", __func__, state);
return;
}
strlcpy(phy, val, VSCSI_NAMELEN);
@@ -1079,8 +1086,11 @@ static void scsiback_do_1lun_hotplug(struct vscsibk_info *info, int op,
err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u",
&vir.hst, &vir.chn, &vir.tgt, &vir.lun);
if (XENBUS_EXIST_ERR(err)) {
- xenbus_printf(XBT_NIL, dev->nodename, state,
+ err = xenbus_printf(XBT_NIL, dev->nodename, state,
"%d", XenbusStateClosed);
+ if (err)
+ xenbus_dev_error(info->dev, err,
+ "%s: writing %s", __func__, state);
return;
}
diff --git a/fs/aio.c b/fs/aio.c
index e1d20124ec0e..27454594e37a 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -5,7 +5,6 @@
* Implements an efficient asynchronous io interface.
*
* Copyright 2000, 2001, 2002 Red Hat, Inc. All Rights Reserved.
- * Copyright 2018 Christoph Hellwig.
*
* See ../COPYING for licensing terms.
*/
@@ -165,22 +164,10 @@ struct fsync_iocb {
bool datasync;
};
-struct poll_iocb {
- struct file *file;
- __poll_t events;
- struct wait_queue_head *head;
-
- union {
- struct wait_queue_entry wait;
- struct work_struct work;
- };
-};
-
struct aio_kiocb {
union {
struct kiocb rw;
struct fsync_iocb fsync;
- struct poll_iocb poll;
};
struct kioctx *ki_ctx;
@@ -1590,6 +1577,7 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
if (unlikely(iocb->aio_buf || iocb->aio_offset || iocb->aio_nbytes ||
iocb->aio_rw_flags))
return -EINVAL;
+
req->file = fget(iocb->aio_fildes);
if (unlikely(!req->file))
return -EBADF;
@@ -1604,137 +1592,6 @@ static int aio_fsync(struct fsync_iocb *req, struct iocb *iocb, bool datasync)
return 0;
}
-/* need to use list_del_init so we can check if item was present */
-static inline bool __aio_poll_remove(struct poll_iocb *req)
-{
- if (list_empty(&req->wait.entry))
- return false;
- list_del_init(&req->wait.entry);
- return true;
-}
-
-static inline void __aio_poll_complete(struct aio_kiocb *iocb, __poll_t mask)
-{
- fput(iocb->poll.file);
- aio_complete(iocb, mangle_poll(mask), 0);
-}
-
-static void aio_poll_work(struct work_struct *work)
-{
- struct aio_kiocb *iocb = container_of(work, struct aio_kiocb, poll.work);
-
- if (!list_empty_careful(&iocb->ki_list))
- aio_remove_iocb(iocb);
- __aio_poll_complete(iocb, iocb->poll.events);
-}
-
-static int aio_poll_cancel(struct kiocb *iocb)
-{
- struct aio_kiocb *aiocb = container_of(iocb, struct aio_kiocb, rw);
- struct poll_iocb *req = &aiocb->poll;
- struct wait_queue_head *head = req->head;
- bool found = false;
-
- spin_lock(&head->lock);
- found = __aio_poll_remove(req);
- spin_unlock(&head->lock);
-
- if (found) {
- req->events = 0;
- INIT_WORK(&req->work, aio_poll_work);
- schedule_work(&req->work);
- }
- return 0;
-}
-
-static int aio_poll_wake(struct wait_queue_entry *wait, unsigned mode, int sync,
- void *key)
-{
- struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
- struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
- struct file *file = req->file;
- __poll_t mask = key_to_poll(key);
-
- assert_spin_locked(&req->head->lock);
-
- /* for instances that support it check for an event match first: */
- if (mask && !(mask & req->events))
- return 0;
-
- mask = file->f_op->poll_mask(file, req->events) & req->events;
- if (!mask)
- return 0;
-
- __aio_poll_remove(req);
-
- /*
- * Try completing without a context switch if we can acquire ctx_lock
- * without spinning. Otherwise we need to defer to a workqueue to
- * avoid a deadlock due to the lock order.
- */
- if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
- list_del_init(&iocb->ki_list);
- spin_unlock(&iocb->ki_ctx->ctx_lock);
-
- __aio_poll_complete(iocb, mask);
- } else {
- req->events = mask;
- INIT_WORK(&req->work, aio_poll_work);
- schedule_work(&req->work);
- }
-
- return 1;
-}
-
-static ssize_t aio_poll(struct aio_kiocb *aiocb, struct iocb *iocb)
-{
- struct kioctx *ctx = aiocb->ki_ctx;
- struct poll_iocb *req = &aiocb->poll;
- __poll_t mask;
-
- /* reject any unknown events outside the normal event mask. */
- if ((u16)iocb->aio_buf != iocb->aio_buf)
- return -EINVAL;
- /* reject fields that are not defined for poll */
- if (iocb->aio_offset || iocb->aio_nbytes || iocb->aio_rw_flags)
- return -EINVAL;
-
- req->events = demangle_poll(iocb->aio_buf) | EPOLLERR | EPOLLHUP;
- req->file = fget(iocb->aio_fildes);
- if (unlikely(!req->file))
- return -EBADF;
- if (!file_has_poll_mask(req->file))
- goto out_fail;
-
- req->head = req->file->f_op->get_poll_head(req->file, req->events);
- if (!req->head)
- goto out_fail;
- if (IS_ERR(req->head)) {
- mask = EPOLLERR;
- goto done;
- }
-
- init_waitqueue_func_entry(&req->wait, aio_poll_wake);
- aiocb->ki_cancel = aio_poll_cancel;
-
- spin_lock_irq(&ctx->ctx_lock);
- spin_lock(&req->head->lock);
- mask = req->file->f_op->poll_mask(req->file, req->events) & req->events;
- if (!mask) {
- __add_wait_queue(req->head, &req->wait);
- list_add_tail(&aiocb->ki_list, &ctx->active_reqs);
- }
- spin_unlock(&req->head->lock);
- spin_unlock_irq(&ctx->ctx_lock);
-done:
- if (mask)
- __aio_poll_complete(aiocb, mask);
- return 0;
-out_fail:
- fput(req->file);
- return -EINVAL; /* same as no support for IOCB_CMD_POLL */
-}
-
static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
bool compat)
{
@@ -1808,9 +1665,6 @@ static int io_submit_one(struct kioctx *ctx, struct iocb __user *user_iocb,
case IOCB_CMD_FDSYNC:
ret = aio_fsync(&req->fsync, &iocb, true);
break;
- case IOCB_CMD_POLL:
- ret = aio_poll(req, &iocb);
- break;
default:
pr_debug("invalid aio operation %d\n", iocb.aio_lio_opcode);
ret = -EINVAL;
@@ -2042,6 +1896,11 @@ SYSCALL_DEFINE5(io_getevents, aio_context_t, ctx_id,
return ret;
}
+struct __aio_sigset {
+ const sigset_t __user *sigmask;
+ size_t sigsetsize;
+};
+
SYSCALL_DEFINE6(io_pgetevents,
aio_context_t, ctx_id,
long, min_nr,
diff --git a/fs/autofs/Makefile b/fs/autofs/Makefile
index 43fedde15c26..1f85d35ec8b7 100644
--- a/fs/autofs/Makefile
+++ b/fs/autofs/Makefile
@@ -2,6 +2,6 @@
# Makefile for the linux autofs-filesystem routines.
#
-obj-$(CONFIG_AUTOFS_FS) += autofs.o
+obj-$(CONFIG_AUTOFS_FS) += autofs4.o
-autofs-objs := init.o inode.o root.o symlink.o waitq.o expire.o dev-ioctl.o
+autofs4-objs := init.o inode.o root.o symlink.o waitq.o expire.o dev-ioctl.o
diff --git a/fs/autofs/dev-ioctl.c b/fs/autofs/dev-ioctl.c
index ea4ca1445ab7..86eafda4a652 100644
--- a/fs/autofs/dev-ioctl.c
+++ b/fs/autofs/dev-ioctl.c
@@ -135,6 +135,15 @@ static int validate_dev_ioctl(int cmd, struct autofs_dev_ioctl *param)
cmd);
goto out;
}
+ } else {
+ unsigned int inr = _IOC_NR(cmd);
+
+ if (inr == AUTOFS_DEV_IOCTL_OPENMOUNT_CMD ||
+ inr == AUTOFS_DEV_IOCTL_REQUESTER_CMD ||
+ inr == AUTOFS_DEV_IOCTL_ISMOUNTPOINT_CMD) {
+ err = -EINVAL;
+ goto out;
+ }
}
err = 0;
@@ -271,7 +280,8 @@ static int autofs_dev_ioctl_openmount(struct file *fp,
dev_t devid;
int err, fd;
- /* param->path has already been checked */
+ /* param->path has been checked in validate_dev_ioctl() */
+
if (!param->openmount.devid)
return -EINVAL;
@@ -433,10 +443,7 @@ static int autofs_dev_ioctl_requester(struct file *fp,
dev_t devid;
int err = -ENOENT;
- if (param->size <= AUTOFS_DEV_IOCTL_SIZE) {
- err = -EINVAL;
- goto out;
- }
+ /* param->path has been checked in validate_dev_ioctl() */
devid = sbi->sb->s_dev;
@@ -521,10 +528,7 @@ static int autofs_dev_ioctl_ismountpoint(struct file *fp,
unsigned int devid, magic;
int err = -ENOENT;
- if (param->size <= AUTOFS_DEV_IOCTL_SIZE) {
- err = -EINVAL;
- goto out;
- }
+ /* param->path has been checked in validate_dev_ioctl() */
name = param->path;
type = param->ismountpoint.in.type;
diff --git a/fs/autofs/init.c b/fs/autofs/init.c
index cc9447e1903f..79ae07d9592f 100644
--- a/fs/autofs/init.c
+++ b/fs/autofs/init.c
@@ -23,7 +23,7 @@ static struct file_system_type autofs_fs_type = {
.kill_sb = autofs_kill_sb,
};
MODULE_ALIAS_FS("autofs");
-MODULE_ALIAS("autofs4");
+MODULE_ALIAS("autofs");
static int __init init_autofs_fs(void)
{
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 0ac456b52bdd..816cc921cf36 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -1259,9 +1259,8 @@ static int load_elf_library(struct file *file)
goto out_free_ph;
}
- len = ELF_PAGESTART(eppnt->p_filesz + eppnt->p_vaddr +
- ELF_MIN_ALIGN - 1);
- bss = eppnt->p_memsz + eppnt->p_vaddr;
+ len = ELF_PAGEALIGN(eppnt->p_filesz + eppnt->p_vaddr);
+ bss = ELF_PAGEALIGN(eppnt->p_memsz + eppnt->p_vaddr);
if (bss > len) {
error = vm_brk(len, bss - len);
if (error)
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 0dd87aaeb39a..aba25414231a 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -221,7 +221,7 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
ret = bio_iov_iter_get_pages(&bio, iter);
if (unlikely(ret))
- return ret;
+ goto out;
ret = bio.bi_iter.bi_size;
if (iov_iter_rw(iter) == READ) {
@@ -250,12 +250,13 @@ __blkdev_direct_IO_simple(struct kiocb *iocb, struct iov_iter *iter,
put_page(bvec->bv_page);
}
- if (vecs != inline_vecs)
- kfree(vecs);
-
if (unlikely(bio.bi_status))
ret = blk_status_to_errno(bio.bi_status);
+out:
+ if (vecs != inline_vecs)
+ kfree(vecs);
+
bio_uninit(&bio);
return ret;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index cce6087d6880..b3e45714d28f 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4238,8 +4238,9 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
struct extent_map *em;
u64 start = page_offset(page);
u64 end = start + PAGE_SIZE - 1;
- struct extent_io_tree *tree = &BTRFS_I(page->mapping->host)->io_tree;
- struct extent_map_tree *map = &BTRFS_I(page->mapping->host)->extent_tree;
+ struct btrfs_inode *btrfs_inode = BTRFS_I(page->mapping->host);
+ struct extent_io_tree *tree = &btrfs_inode->io_tree;
+ struct extent_map_tree *map = &btrfs_inode->extent_tree;
if (gfpflags_allow_blocking(mask) &&
page->mapping->host->i_size > SZ_16M) {
@@ -4262,6 +4263,8 @@ int try_release_extent_mapping(struct page *page, gfp_t mask)
extent_map_end(em) - 1,
EXTENT_LOCKED | EXTENT_WRITEBACK,
0, NULL)) {
+ set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
+ &btrfs_inode->runtime_flags);
remove_extent_mapping(map, em);
/* once for the rb tree */
free_extent_map(em);
@@ -4542,8 +4545,11 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
offset_in_extent = em_start - em->start;
em_end = extent_map_end(em);
em_len = em_end - em_start;
- disko = em->block_start + offset_in_extent;
flags = 0;
+ if (em->block_start < EXTENT_MAP_LAST_BYTE)
+ disko = em->block_start + offset_in_extent;
+ else
+ disko = 0;
/*
* bump off for our next call to get_extent
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e9482f0db9d0..eba61bcb9bb3 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -9005,13 +9005,14 @@ again:
unlock_extent_cached(io_tree, page_start, page_end, &cached_state);
-out_unlock:
if (!ret2) {
btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, true);
sb_end_pagefault(inode->i_sb);
extent_changeset_free(data_reserved);
return VM_FAULT_LOCKED;
}
+
+out_unlock:
unlock_page(page);
out:
btrfs_delalloc_release_extents(BTRFS_I(inode), PAGE_SIZE, (ret != 0));
@@ -9443,6 +9444,7 @@ static int btrfs_rename_exchange(struct inode *old_dir,
u64 new_idx = 0;
u64 root_objectid;
int ret;
+ int ret2;
bool root_log_pinned = false;
bool dest_log_pinned = false;
@@ -9639,7 +9641,8 @@ out_fail:
dest_log_pinned = false;
}
}
- ret = btrfs_end_transaction(trans);
+ ret2 = btrfs_end_transaction(trans);
+ ret = ret ? ret : ret2;
out_notrans:
if (new_ino == BTRFS_FIRST_FREE_OBJECTID)
up_read(&fs_info->subvol_sem);
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index c2837a32d689..b077544b5232 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3327,11 +3327,13 @@ static void btrfs_cmp_data_free(struct cmp_pages *cmp)
if (pg) {
unlock_page(pg);
put_page(pg);
+ cmp->src_pages[i] = NULL;
}
pg = cmp->dst_pages[i];
if (pg) {
unlock_page(pg);
put_page(pg);
+ cmp->dst_pages[i] = NULL;
}
}
}
@@ -3577,7 +3579,7 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
ret = btrfs_extent_same_range(src, loff, BTRFS_MAX_DEDUPE_LEN,
dst, dst_loff, &cmp);
if (ret)
- goto out_unlock;
+ goto out_free;
loff += BTRFS_MAX_DEDUPE_LEN;
dst_loff += BTRFS_MAX_DEDUPE_LEN;
@@ -3587,16 +3589,16 @@ static int btrfs_extent_same(struct inode *src, u64 loff, u64 olen,
ret = btrfs_extent_same_range(src, loff, tail_len, dst,
dst_loff, &cmp);
+out_free:
+ kvfree(cmp.src_pages);
+ kvfree(cmp.dst_pages);
+
out_unlock:
if (same_inode)
inode_unlock(src);
else
btrfs_double_inode_unlock(src, dst);
-out_free:
- kvfree(cmp.src_pages);
- kvfree(cmp.dst_pages);
-
return ret;
}
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 1874a6d2e6f5..c25dc47210a3 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -2680,8 +2680,10 @@ out:
free_extent_buffer(scratch_leaf);
}
- if (done && !ret)
+ if (done && !ret) {
ret = 1;
+ fs_info->qgroup_rescan_progress.objectid = (u64)-1;
+ }
return ret;
}
@@ -2784,13 +2786,20 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid,
if (!init_flags) {
/* we're resuming qgroup rescan at mount time */
- if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_RESCAN))
+ if (!(fs_info->qgroup_flags &
+ BTRFS_QGROUP_STATUS_FLAG_RESCAN)) {
btrfs_warn(fs_info,
"qgroup rescan init failed, qgroup is not enabled");
- else if (!(fs_info->qgroup_flags & BTRFS_QGROUP_STATUS_FLAG_ON))
+ ret = -EINVAL;
+ } else if (!(fs_info->qgroup_flags &
+ BTRFS_QGROUP_STATUS_FLAG_ON)) {
btrfs_warn(fs_info,
"qgroup rescan init failed, qgroup rescan is not queued");
- return -EINVAL;
+ ret = -EINVAL;
+ }
+
+ if (ret)
+ return ret;
}
mutex_lock(&fs_info->qgroup_rescan_lock);
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 572306036477..6702896cdb8f 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1151,11 +1151,6 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
return ret;
}
- if (sctx->is_dev_replace && !is_metadata && !have_csum) {
- sblocks_for_recheck = NULL;
- goto nodatasum_case;
- }
-
/*
* read all mirrors one after the other. This includes to
* re-read the extent or metadata block that failed (that was
@@ -1268,13 +1263,19 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
goto out;
}
- if (!is_metadata && !have_csum) {
+ /*
+ * NOTE: Even for nodatasum case, it's still possible that it's a
+ * compressed data extent, thus scrub_fixup_nodatasum(), which write
+ * inode page cache onto disk, could cause serious data corruption.
+ *
+ * So here we could only read from disk, and hope our recovery could
+ * reach disk before the newer write.
+ */
+ if (0 && !is_metadata && !have_csum) {
struct scrub_fixup_nodatasum *fixup_nodatasum;
WARN_ON(sctx->is_dev_replace);
-nodatasum_case:
-
/*
* !is_metadata and !have_csum, this means that the data
* might not be COWed, that it might be modified
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index e034ad9e23b4..1da162928d1a 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1146,6 +1146,7 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
{
int ret;
+ mutex_lock(&uuid_mutex);
mutex_lock(&fs_devices->device_list_mutex);
if (fs_devices->opened) {
fs_devices->opened++;
@@ -1155,6 +1156,7 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
ret = open_fs_devices(fs_devices, flags, holder);
}
mutex_unlock(&fs_devices->device_list_mutex);
+ mutex_unlock(&uuid_mutex);
return ret;
}
diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
index d9f001078e08..4a717d400807 100644
--- a/fs/cachefiles/bind.c
+++ b/fs/cachefiles/bind.c
@@ -218,7 +218,8 @@ static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
"%s",
fsdef->dentry->d_sb->s_id);
- fscache_object_init(&fsdef->fscache, NULL, &cache->cache);
+ fscache_object_init(&fsdef->fscache, &fscache_fsdef_index,
+ &cache->cache);
ret = fscache_add_cache(&cache->cache, &fsdef->fscache, cache->tag);
if (ret < 0)
diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
index ab0bbe93b398..af2b17b21b94 100644
--- a/fs/cachefiles/namei.c
+++ b/fs/cachefiles/namei.c
@@ -186,12 +186,12 @@ try_again:
* need to wait for it to be destroyed */
wait_for_old_object:
trace_cachefiles_wait_active(object, dentry, xobject);
+ clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
if (fscache_object_is_live(&xobject->fscache)) {
pr_err("\n");
pr_err("Error: Unexpected object collision\n");
cachefiles_printk_object(object, xobject);
- BUG();
}
atomic_inc(&xobject->usage);
write_unlock(&cache->active_lock);
@@ -248,7 +248,6 @@ wait_for_old_object:
goto try_again;
requeue:
- clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
cache->cache.ops->put_object(&xobject->fscache, cachefiles_obj_put_wait_timeo);
_leave(" = -ETIMEDOUT");
return -ETIMEDOUT;
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 5082c8a49686..40f7595aad10 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -27,6 +27,7 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
struct cachefiles_one_read *monitor =
container_of(wait, struct cachefiles_one_read, monitor);
struct cachefiles_object *object;
+ struct fscache_retrieval *op = monitor->op;
struct wait_bit_key *key = _key;
struct page *page = wait->private;
@@ -51,16 +52,22 @@ static int cachefiles_read_waiter(wait_queue_entry_t *wait, unsigned mode,
list_del(&wait->entry);
/* move onto the action list and queue for FS-Cache thread pool */
- ASSERT(monitor->op);
+ ASSERT(op);
- object = container_of(monitor->op->op.object,
- struct cachefiles_object, fscache);
+ /* We need to temporarily bump the usage count as we don't own a ref
+ * here otherwise cachefiles_read_copier() may free the op between the
+ * monitor being enqueued on the op->to_do list and the op getting
+ * enqueued on the work queue.
+ */
+ fscache_get_retrieval(op);
+ object = container_of(op->op.object, struct cachefiles_object, fscache);
spin_lock(&object->work_lock);
- list_add_tail(&monitor->op_link, &monitor->op->to_do);
+ list_add_tail(&monitor->op_link, &op->to_do);
spin_unlock(&object->work_lock);
- fscache_enqueue_retrieval(monitor->op);
+ fscache_enqueue_retrieval(op);
+ fscache_put_retrieval(op);
return 0;
}
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index ee764ac352ab..a866be999216 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1135,6 +1135,7 @@ static struct dentry *splice_dentry(struct dentry *dn, struct inode *in)
if (IS_ERR(realdn)) {
pr_err("splice_dentry error %ld %p inode %p ino %llx.%llx\n",
PTR_ERR(realdn), dn, in, ceph_vinop(in));
+ dput(dn);
dn = realdn; /* note realdn contains the error */
goto out;
} else if (realdn) {
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 116146022aa1..bfe999505815 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -126,6 +126,25 @@ static void cifs_debug_tcon(struct seq_file *m, struct cifs_tcon *tcon)
seq_putc(m, '\n');
}
+static void
+cifs_dump_iface(struct seq_file *m, struct cifs_server_iface *iface)
+{
+ struct sockaddr_in *ipv4 = (struct sockaddr_in *)&iface->sockaddr;
+ struct sockaddr_in6 *ipv6 = (struct sockaddr_in6 *)&iface->sockaddr;
+
+ seq_printf(m, "\t\tSpeed: %zu bps\n", iface->speed);
+ seq_puts(m, "\t\tCapabilities: ");
+ if (iface->rdma_capable)
+ seq_puts(m, "rdma ");
+ if (iface->rss_capable)
+ seq_puts(m, "rss ");
+ seq_putc(m, '\n');
+ if (iface->sockaddr.ss_family == AF_INET)
+ seq_printf(m, "\t\tIPv4: %pI4\n", &ipv4->sin_addr);
+ else if (iface->sockaddr.ss_family == AF_INET6)
+ seq_printf(m, "\t\tIPv6: %pI6\n", &ipv6->sin6_addr);
+}
+
static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
{
struct list_head *tmp1, *tmp2, *tmp3;
@@ -312,6 +331,16 @@ skip_rdma:
mid_entry->mid);
}
spin_unlock(&GlobalMid_Lock);
+
+ spin_lock(&ses->iface_lock);
+ if (ses->iface_count)
+ seq_printf(m, "\n\tServer interfaces: %zu\n",
+ ses->iface_count);
+ for (j = 0; j < ses->iface_count; j++) {
+ seq_printf(m, "\t%d)\n", j);
+ cifs_dump_iface(m, &ses->iface_list[j]);
+ }
+ spin_unlock(&ses->iface_lock);
}
}
spin_unlock(&cifs_tcp_ses_lock);
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index 937251cc61c0..ee2a8ec70056 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -37,7 +37,6 @@
#include <crypto/aead.h>
int __cifs_calc_signature(struct smb_rqst *rqst,
- int start,
struct TCP_Server_Info *server, char *signature,
struct shash_desc *shash)
{
@@ -45,16 +44,27 @@ int __cifs_calc_signature(struct smb_rqst *rqst,
int rc;
struct kvec *iov = rqst->rq_iov;
int n_vec = rqst->rq_nvec;
+ int is_smb2 = server->vals->header_preamble_size == 0;
- for (i = start; i < n_vec; i++) {
+ /* iov[0] is actual data and not the rfc1002 length for SMB2+ */
+ if (is_smb2) {
+ if (iov[0].iov_len <= 4)
+ return -EIO;
+ i = 0;
+ } else {
+ if (n_vec < 2 || iov[0].iov_len != 4)
+ return -EIO;
+ i = 1; /* skip rfc1002 length */
+ }
+
+ for (; i < n_vec; i++) {
if (iov[i].iov_len == 0)
continue;
if (iov[i].iov_base == NULL) {
cifs_dbg(VFS, "null iovec entry\n");
return -EIO;
}
- if (i == 1 && iov[1].iov_len <= 4)
- break; /* nothing to sign or corrupt header */
+
rc = crypto_shash_update(shash,
iov[i].iov_base, iov[i].iov_len);
if (rc) {
@@ -118,7 +128,7 @@ static int cifs_calc_signature(struct smb_rqst *rqst,
return rc;
}
- return __cifs_calc_signature(rqst, 1, server, signature,
+ return __cifs_calc_signature(rqst, server, signature,
&server->secmech.sdescmd5->shash);
}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 1efa2e65bc1a..c923c7854027 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -33,6 +33,9 @@
#define CIFS_MAGIC_NUMBER 0xFF534D42 /* the first four bytes of SMB PDUs */
+#define CIFS_PORT 445
+#define RFC1001_PORT 139
+
/*
* The sizes of various internal tables and strings
*/
@@ -312,6 +315,10 @@ struct smb_version_operations {
/* send echo request */
int (*echo)(struct TCP_Server_Info *);
/* create directory */
+ int (*posix_mkdir)(const unsigned int xid, struct inode *inode,
+ umode_t mode, struct cifs_tcon *tcon,
+ const char *full_path,
+ struct cifs_sb_info *cifs_sb);
int (*mkdir)(const unsigned int, struct cifs_tcon *, const char *,
struct cifs_sb_info *);
/* set info on created directory */
@@ -416,7 +423,7 @@ struct smb_version_operations {
void (*set_oplock_level)(struct cifsInodeInfo *, __u32, unsigned int,
bool *);
/* create lease context buffer for CREATE request */
- char * (*create_lease_buf)(u8 *, u8);
+ char * (*create_lease_buf)(u8 *lease_key, u8 oplock);
/* parse lease context buffer and return oplock/epoch info */
__u8 (*parse_lease_buf)(void *buf, unsigned int *epoch, char *lkey);
ssize_t (*copychunk_range)(const unsigned int,
@@ -838,6 +845,13 @@ static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net)
#endif
+struct cifs_server_iface {
+ size_t speed;
+ unsigned int rdma_capable : 1;
+ unsigned int rss_capable : 1;
+ struct sockaddr_storage sockaddr;
+};
+
/*
* Session structure. One of these for each uid session with a particular host
*/
@@ -875,6 +889,20 @@ struct cifs_ses {
#ifdef CONFIG_CIFS_SMB311
__u8 preauth_sha_hash[SMB2_PREAUTH_HASH_SIZE];
#endif /* 3.1.1 */
+
+ /*
+ * Network interfaces available on the server this session is
+ * connected to.
+ *
+ * Other channels can be opened by connecting and binding this
+ * session to interfaces from this list.
+ *
+ * iface_lock should be taken when accessing any of these fields
+ */
+ spinlock_t iface_lock;
+ struct cifs_server_iface *iface_list;
+ size_t iface_count;
+ unsigned long iface_last_update; /* jiffies */
};
static inline bool
@@ -883,6 +911,14 @@ cap_unix(struct cifs_ses *ses)
return ses->server->vals->cap_unix & ses->capabilities;
}
+struct cached_fid {
+ bool is_valid:1; /* Do we have a useable root fid */
+ struct cifs_fid *fid;
+ struct mutex fid_mutex;
+ struct cifs_tcon *tcon;
+ struct work_struct lease_break;
+};
+
/*
* there is one of these for each connection to a resource on a particular
* session
@@ -987,9 +1023,7 @@ struct cifs_tcon {
struct fscache_cookie *fscache; /* cookie for share */
#endif
struct list_head pending_opens; /* list of incomplete opens */
- bool valid_root_fid:1; /* Do we have a useable root fid */
- struct mutex prfid_mutex; /* prevents reopen race after dead ses*/
- struct cifs_fid *prfid; /* handle to the directory at top of share */
+ struct cached_fid crfid; /* Cached root fid */
/* BB add field for back pointer to sb struct(s)? */
};
@@ -1382,6 +1416,7 @@ typedef int (mid_handle_t)(struct TCP_Server_Info *server,
/* one of these for every pending CIFS request to the server */
struct mid_q_entry {
struct list_head qhead; /* mids waiting on reply from this server */
+ struct kref refcount;
struct TCP_Server_Info *server; /* server corresponding to this mid */
__u64 mid; /* multiplex id */
__u32 pid; /* process id */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 4e0d183c3d10..1890f534c88b 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -82,6 +82,7 @@ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer,
struct TCP_Server_Info *server);
extern void DeleteMidQEntry(struct mid_q_entry *midEntry);
extern void cifs_delete_mid(struct mid_q_entry *mid);
+extern void cifs_mid_q_entry_release(struct mid_q_entry *midEntry);
extern void cifs_wake_up_task(struct mid_q_entry *mid);
extern int cifs_handle_standard(struct TCP_Server_Info *server,
struct mid_q_entry *mid);
@@ -112,10 +113,6 @@ extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
struct kvec *, int /* nvec to send */,
int * /* type of buf returned */, const int flags,
struct kvec * /* resp vec */);
-extern int smb2_send_recv(const unsigned int xid, struct cifs_ses *pses,
- struct kvec *pkvec, int nvec_to_send,
- int *pbuftype, const int flags,
- struct kvec *presp);
extern int SendReceiveBlockingLock(const unsigned int xid,
struct cifs_tcon *ptcon,
struct smb_hdr *in_buf ,
@@ -544,7 +541,7 @@ int cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb,
const unsigned char *path, char *pbuf,
unsigned int *pbytes_written);
-int __cifs_calc_signature(struct smb_rqst *rqst, int start,
+int __cifs_calc_signature(struct smb_rqst *rqst,
struct TCP_Server_Info *server, char *signature,
struct shash_desc *shash);
enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
@@ -552,6 +549,7 @@ enum securityEnum cifs_select_sectype(struct TCP_Server_Info *,
struct cifs_aio_ctx *cifs_aio_ctx_alloc(void);
void cifs_aio_ctx_release(struct kref *refcount);
int setup_aio_ctx_iter(struct cifs_aio_ctx *ctx, struct iov_iter *iter, int rw);
+void smb2_cached_lease_break(struct work_struct *work);
int cifs_alloc_hash(const char *name, struct crypto_shash **shash,
struct sdesc **sdesc);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 42329b25877d..93408eab92e7 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -107,10 +107,10 @@ cifs_mark_open_files_invalid(struct cifs_tcon *tcon)
}
spin_unlock(&tcon->open_file_lock);
- mutex_lock(&tcon->prfid_mutex);
- tcon->valid_root_fid = false;
- memset(tcon->prfid, 0, sizeof(struct cifs_fid));
- mutex_unlock(&tcon->prfid_mutex);
+ mutex_lock(&tcon->crfid.fid_mutex);
+ tcon->crfid.is_valid = false;
+ memset(tcon->crfid.fid, 0, sizeof(struct cifs_fid));
+ mutex_unlock(&tcon->crfid.fid_mutex);
/*
* BB Add call to invalidate_inodes(sb) for all superblocks mounted
@@ -157,8 +157,14 @@ cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command)
* greater than cifs socket timeout which is 7 seconds
*/
while (server->tcpStatus == CifsNeedReconnect) {
- wait_event_interruptible_timeout(server->response_q,
- (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
+ rc = wait_event_interruptible_timeout(server->response_q,
+ (server->tcpStatus != CifsNeedReconnect),
+ 10 * HZ);
+ if (rc < 0) {
+ cifs_dbg(FYI, "%s: aborting reconnect due to a received"
+ " signal by the process\n", __func__);
+ return -ERESTARTSYS;
+ }
/* are we still trying to reconnect? */
if (server->tcpStatus != CifsNeedReconnect)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 96645a7d8f27..5df2c0698cda 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -57,9 +57,6 @@
#include "smb2proto.h"
#include "smbdirect.h"
-#define CIFS_PORT 445
-#define RFC1001_PORT 139
-
extern mempool_t *cifs_req_poolp;
extern bool disable_legacy_dialects;
@@ -927,6 +924,7 @@ next_pdu:
server->pdu_size = next_offset;
}
+ mid_entry = NULL;
if (server->ops->is_transform_hdr &&
server->ops->receive_transform &&
server->ops->is_transform_hdr(buf)) {
@@ -941,8 +939,11 @@ next_pdu:
length = mid_entry->receive(server, mid_entry);
}
- if (length < 0)
+ if (length < 0) {
+ if (mid_entry)
+ cifs_mid_q_entry_release(mid_entry);
continue;
+ }
if (server->large_buf)
buf = server->bigbuf;
@@ -959,6 +960,8 @@ next_pdu:
if (!mid_entry->multiRsp || mid_entry->multiEnd)
mid_entry->callback(mid_entry);
+
+ cifs_mid_q_entry_release(mid_entry);
} else if (server->ops->is_oplock_break &&
server->ops->is_oplock_break(buf, server)) {
cifs_dbg(FYI, "Received oplock break\n");
@@ -3029,8 +3032,11 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info)
#ifdef CONFIG_CIFS_SMB311
if ((volume_info->linux_ext) && (ses->server->posix_ext_supported)) {
- if (ses->server->vals->protocol_id == SMB311_PROT_ID)
+ if (ses->server->vals->protocol_id == SMB311_PROT_ID) {
tcon->posix_extensions = true;
+ printk_once(KERN_WARNING
+ "SMB3.11 POSIX Extensions are experimental\n");
+ }
}
#endif /* 311 */
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index f4697f548a39..a2cfb33e85c1 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1575,6 +1575,17 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
goto mkdir_out;
}
+ server = tcon->ses->server;
+
+#ifdef CONFIG_CIFS_SMB311
+ if ((server->ops->posix_mkdir) && (tcon->posix_extensions)) {
+ rc = server->ops->posix_mkdir(xid, inode, mode, tcon, full_path,
+ cifs_sb);
+ d_drop(direntry); /* for time being always refresh inode info */
+ goto mkdir_out;
+ }
+#endif /* SMB311 */
+
if (cap_unix(tcon->ses) && (CIFS_UNIX_POSIX_PATH_OPS_CAP &
le64_to_cpu(tcon->fsUnixInfo.Capability))) {
rc = cifs_posix_mkdir(inode, direntry, mode, full_path, cifs_sb,
@@ -1583,8 +1594,6 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
goto mkdir_out;
}
- server = tcon->ses->server;
-
if (!server->ops->mkdir) {
rc = -ENOSYS;
goto mkdir_out;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index af29ade195c0..53e8362cbc4a 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -82,6 +82,7 @@ sesInfoAlloc(void)
INIT_LIST_HEAD(&ret_buf->smb_ses_list);
INIT_LIST_HEAD(&ret_buf->tcon_list);
mutex_init(&ret_buf->session_mutex);
+ spin_lock_init(&ret_buf->iface_lock);
}
return ret_buf;
}
@@ -102,6 +103,7 @@ sesInfoFree(struct cifs_ses *buf_to_free)
kfree(buf_to_free->user_name);
kfree(buf_to_free->domainName);
kzfree(buf_to_free->auth_key.response);
+ kfree(buf_to_free->iface_list);
kzfree(buf_to_free);
}
@@ -117,8 +119,9 @@ tconInfoAlloc(void)
INIT_LIST_HEAD(&ret_buf->openFileList);
INIT_LIST_HEAD(&ret_buf->tcon_list);
spin_lock_init(&ret_buf->open_file_lock);
- mutex_init(&ret_buf->prfid_mutex);
- ret_buf->prfid = kzalloc(sizeof(struct cifs_fid), GFP_KERNEL);
+ mutex_init(&ret_buf->crfid.fid_mutex);
+ ret_buf->crfid.fid = kzalloc(sizeof(struct cifs_fid),
+ GFP_KERNEL);
#ifdef CONFIG_CIFS_STATS
spin_lock_init(&ret_buf->stat_lock);
#endif
@@ -136,7 +139,7 @@ tconInfoFree(struct cifs_tcon *buf_to_free)
atomic_dec(&tconInfoAllocCount);
kfree(buf_to_free->nativeFileSystem);
kzfree(buf_to_free->password);
- kfree(buf_to_free->prfid);
+ kfree(buf_to_free->crfid.fid);
kfree(buf_to_free);
}
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index aff8ce8ba34d..646dcd149de1 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -107,6 +107,7 @@ cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
if (compare_mid(mid->mid, buf) &&
mid->mid_state == MID_REQUEST_SUBMITTED &&
le16_to_cpu(mid->command) == buf->Command) {
+ kref_get(&mid->refcount);
spin_unlock(&GlobalMid_Lock);
return mid;
}
diff --git a/fs/cifs/smb2file.c b/fs/cifs/smb2file.c
index 788412675723..4ed10dd086e6 100644
--- a/fs/cifs/smb2file.c
+++ b/fs/cifs/smb2file.c
@@ -41,7 +41,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
int rc;
__le16 *smb2_path;
struct smb2_file_all_info *smb2_data = NULL;
- __u8 smb2_oplock[17];
+ __u8 smb2_oplock;
struct cifs_fid *fid = oparms->fid;
struct network_resiliency_req nr_ioctl_req;
@@ -59,12 +59,9 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
}
oparms->desired_access |= FILE_READ_ATTRIBUTES;
- *smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH;
+ smb2_oplock = SMB2_OPLOCK_LEVEL_BATCH;
- if (oparms->tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LEASING)
- memcpy(smb2_oplock + 1, fid->lease_key, SMB2_LEASE_KEY_SIZE);
-
- rc = SMB2_open(xid, oparms, smb2_path, smb2_oplock, smb2_data, NULL,
+ rc = SMB2_open(xid, oparms, smb2_path, &smb2_oplock, smb2_data, NULL,
NULL);
if (rc)
goto out;
@@ -101,7 +98,7 @@ smb2_open_file(const unsigned int xid, struct cifs_open_parms *oparms,
move_smb2_info_to_cifs(buf, smb2_data);
}
- *oplock = *smb2_oplock;
+ *oplock = smb2_oplock;
out:
kfree(smb2_data);
kfree(smb2_path);
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index e2bec47c6845..3ff7cec2da81 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -454,7 +454,8 @@ cifs_convert_path_to_utf16(const char *from, struct cifs_sb_info *cifs_sb)
#ifdef CONFIG_CIFS_SMB311
/* SMB311 POSIX extensions paths do not include leading slash */
else if (cifs_sb_master_tlink(cifs_sb) &&
- cifs_sb_master_tcon(cifs_sb)->posix_extensions) {
+ cifs_sb_master_tcon(cifs_sb)->posix_extensions &&
+ (from[0] == '/')) {
start_of_path = from + 1;
}
#endif /* 311 */
@@ -492,10 +493,11 @@ cifs_ses_oplock_break(struct work_struct *work)
{
struct smb2_lease_break_work *lw = container_of(work,
struct smb2_lease_break_work, lease_break);
- int rc;
+ int rc = 0;
rc = SMB2_lease_break(0, tlink_tcon(lw->tlink), lw->lease_key,
lw->lease_state);
+
cifs_dbg(FYI, "Lease release rc %d\n", rc);
cifs_put_tlink(lw->tlink);
kfree(lw);
@@ -561,6 +563,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
open->oplock = lease_state;
}
+
return found;
}
@@ -603,6 +606,18 @@ smb2_is_valid_lease_break(char *buffer)
return true;
}
spin_unlock(&tcon->open_file_lock);
+
+ if (tcon->crfid.is_valid &&
+ !memcmp(rsp->LeaseKey,
+ tcon->crfid.fid->lease_key,
+ SMB2_LEASE_KEY_SIZE)) {
+ INIT_WORK(&tcon->crfid.lease_break,
+ smb2_cached_lease_break);
+ queue_work(cifsiod_wq,
+ &tcon->crfid.lease_break);
+ spin_unlock(&cifs_tcp_ses_lock);
+ return true;
+ }
}
}
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index b15f5957d645..ea92a38b2f08 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -203,6 +203,7 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
if ((mid->mid == wire_mid) &&
(mid->mid_state == MID_REQUEST_SUBMITTED) &&
(mid->command == shdr->Command)) {
+ kref_get(&mid->refcount);
spin_unlock(&GlobalMid_Lock);
return mid;
}
@@ -294,34 +295,191 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
return rsize;
}
-#ifdef CONFIG_CIFS_STATS2
+
+static int
+parse_server_interfaces(struct network_interface_info_ioctl_rsp *buf,
+ size_t buf_len,
+ struct cifs_server_iface **iface_list,
+ size_t *iface_count)
+{
+ struct network_interface_info_ioctl_rsp *p;
+ struct sockaddr_in *addr4;
+ struct sockaddr_in6 *addr6;
+ struct iface_info_ipv4 *p4;
+ struct iface_info_ipv6 *p6;
+ struct cifs_server_iface *info;
+ ssize_t bytes_left;
+ size_t next = 0;
+ int nb_iface = 0;
+ int rc = 0;
+
+ *iface_list = NULL;
+ *iface_count = 0;
+
+ /*
+ * Fist pass: count and sanity check
+ */
+
+ bytes_left = buf_len;
+ p = buf;
+ while (bytes_left >= sizeof(*p)) {
+ nb_iface++;
+ next = le32_to_cpu(p->Next);
+ if (!next) {
+ bytes_left -= sizeof(*p);
+ break;
+ }
+ p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
+ bytes_left -= next;
+ }
+
+ if (!nb_iface) {
+ cifs_dbg(VFS, "%s: malformed interface info\n", __func__);
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (bytes_left || p->Next)
+ cifs_dbg(VFS, "%s: incomplete interface info\n", __func__);
+
+
+ /*
+ * Second pass: extract info to internal structure
+ */
+
+ *iface_list = kcalloc(nb_iface, sizeof(**iface_list), GFP_KERNEL);
+ if (!*iface_list) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ info = *iface_list;
+ bytes_left = buf_len;
+ p = buf;
+ while (bytes_left >= sizeof(*p)) {
+ info->speed = le64_to_cpu(p->LinkSpeed);
+ info->rdma_capable = le32_to_cpu(p->Capability & RDMA_CAPABLE);
+ info->rss_capable = le32_to_cpu(p->Capability & RSS_CAPABLE);
+
+ cifs_dbg(FYI, "%s: adding iface %zu\n", __func__, *iface_count);
+ cifs_dbg(FYI, "%s: speed %zu bps\n", __func__, info->speed);
+ cifs_dbg(FYI, "%s: capabilities 0x%08x\n", __func__,
+ le32_to_cpu(p->Capability));
+
+ switch (p->Family) {
+ /*
+ * The kernel and wire socket structures have the same
+ * layout and use network byte order but make the
+ * conversion explicit in case either one changes.
+ */
+ case INTERNETWORK:
+ addr4 = (struct sockaddr_in *)&info->sockaddr;
+ p4 = (struct iface_info_ipv4 *)p->Buffer;
+ addr4->sin_family = AF_INET;
+ memcpy(&addr4->sin_addr, &p4->IPv4Address, 4);
+
+ /* [MS-SMB2] 2.2.32.5.1.1 Clients MUST ignore these */
+ addr4->sin_port = cpu_to_be16(CIFS_PORT);
+
+ cifs_dbg(FYI, "%s: ipv4 %pI4\n", __func__,
+ &addr4->sin_addr);
+ break;
+ case INTERNETWORKV6:
+ addr6 = (struct sockaddr_in6 *)&info->sockaddr;
+ p6 = (struct iface_info_ipv6 *)p->Buffer;
+ addr6->sin6_family = AF_INET6;
+ memcpy(&addr6->sin6_addr, &p6->IPv6Address, 16);
+
+ /* [MS-SMB2] 2.2.32.5.1.2 Clients MUST ignore these */
+ addr6->sin6_flowinfo = 0;
+ addr6->sin6_scope_id = 0;
+ addr6->sin6_port = cpu_to_be16(CIFS_PORT);
+
+ cifs_dbg(FYI, "%s: ipv6 %pI6\n", __func__,
+ &addr6->sin6_addr);
+ break;
+ default:
+ cifs_dbg(VFS,
+ "%s: skipping unsupported socket family\n",
+ __func__);
+ goto next_iface;
+ }
+
+ (*iface_count)++;
+ info++;
+next_iface:
+ next = le32_to_cpu(p->Next);
+ if (!next)
+ break;
+ p = (struct network_interface_info_ioctl_rsp *)((u8 *)p+next);
+ bytes_left -= next;
+ }
+
+ if (!*iface_count) {
+ rc = -EINVAL;
+ goto out;
+ }
+
+out:
+ if (rc) {
+ kfree(*iface_list);
+ *iface_count = 0;
+ *iface_list = NULL;
+ }
+ return rc;
+}
+
+
static int
SMB3_request_interfaces(const unsigned int xid, struct cifs_tcon *tcon)
{
int rc;
unsigned int ret_data_len = 0;
- struct network_interface_info_ioctl_rsp *out_buf;
+ struct network_interface_info_ioctl_rsp *out_buf = NULL;
+ struct cifs_server_iface *iface_list;
+ size_t iface_count;
+ struct cifs_ses *ses = tcon->ses;
rc = SMB2_ioctl(xid, tcon, NO_FILE_ID, NO_FILE_ID,
FSCTL_QUERY_NETWORK_INTERFACE_INFO, true /* is_fsctl */,
NULL /* no data input */, 0 /* no data input */,
(char **)&out_buf, &ret_data_len);
- if (rc != 0)
+ if (rc != 0) {
cifs_dbg(VFS, "error %d on ioctl to get interface list\n", rc);
- else if (ret_data_len < sizeof(struct network_interface_info_ioctl_rsp)) {
- cifs_dbg(VFS, "server returned bad net interface info buf\n");
- rc = -EINVAL;
- } else {
- /* Dump info on first interface */
- cifs_dbg(FYI, "Adapter Capability 0x%x\t",
- le32_to_cpu(out_buf->Capability));
- cifs_dbg(FYI, "Link Speed %lld\n",
- le64_to_cpu(out_buf->LinkSpeed));
+ goto out;
}
+
+ rc = parse_server_interfaces(out_buf, ret_data_len,
+ &iface_list, &iface_count);
+ if (rc)
+ goto out;
+
+ spin_lock(&ses->iface_lock);
+ kfree(ses->iface_list);
+ ses->iface_list = iface_list;
+ ses->iface_count = iface_count;
+ ses->iface_last_update = jiffies;
+ spin_unlock(&ses->iface_lock);
+
+out:
kfree(out_buf);
return rc;
}
-#endif /* STATS2 */
+
+void
+smb2_cached_lease_break(struct work_struct *work)
+{
+ struct cached_fid *cfid = container_of(work,
+ struct cached_fid, lease_break);
+ mutex_lock(&cfid->fid_mutex);
+ if (cfid->is_valid) {
+ cifs_dbg(FYI, "clear cached root file handle\n");
+ SMB2_close(0, cfid->tcon, cfid->fid->persistent_fid,
+ cfid->fid->volatile_fid);
+ cfid->is_valid = false;
+ }
+ mutex_unlock(&cfid->fid_mutex);
+}
/*
* Open the directory at the root of a share
@@ -331,13 +489,13 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
struct cifs_open_parms oparams;
int rc;
__le16 srch_path = 0; /* Null - since an open of top of share */
- u8 oplock = SMB2_OPLOCK_LEVEL_NONE;
+ u8 oplock = SMB2_OPLOCK_LEVEL_II;
- mutex_lock(&tcon->prfid_mutex);
- if (tcon->valid_root_fid) {
+ mutex_lock(&tcon->crfid.fid_mutex);
+ if (tcon->crfid.is_valid) {
cifs_dbg(FYI, "found a cached root file handle\n");
- memcpy(pfid, tcon->prfid, sizeof(struct cifs_fid));
- mutex_unlock(&tcon->prfid_mutex);
+ memcpy(pfid, tcon->crfid.fid, sizeof(struct cifs_fid));
+ mutex_unlock(&tcon->crfid.fid_mutex);
return 0;
}
@@ -350,10 +508,11 @@ int open_shroot(unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *pfid)
rc = SMB2_open(xid, &oparams, &srch_path, &oplock, NULL, NULL, NULL);
if (rc == 0) {
- memcpy(tcon->prfid, pfid, sizeof(struct cifs_fid));
- tcon->valid_root_fid = true;
+ memcpy(tcon->crfid.fid, pfid, sizeof(struct cifs_fid));
+ tcon->crfid.tcon = tcon;
+ tcon->crfid.is_valid = true;
}
- mutex_unlock(&tcon->prfid_mutex);
+ mutex_unlock(&tcon->crfid.fid_mutex);
return rc;
}
@@ -383,9 +542,7 @@ smb3_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon)
if (rc)
return;
-#ifdef CONFIG_CIFS_STATS2
SMB3_request_interfaces(xid, tcon);
-#endif /* STATS2 */
SMB2_QFS_attr(xid, tcon, fid.persistent_fid, fid.volatile_fid,
FS_ATTRIBUTE_INFORMATION);
@@ -436,7 +593,7 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_open_parms oparms;
struct cifs_fid fid;
- if ((*full_path == 0) && tcon->valid_root_fid)
+ if ((*full_path == 0) && tcon->crfid.is_valid)
return 0;
utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
@@ -699,6 +856,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
rc = SMB2_set_ea(xid, tcon, fid.persistent_fid, fid.volatile_fid, ea,
len);
+ kfree(ea);
+
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
return rc;
@@ -2063,8 +2222,7 @@ smb2_create_lease_buf(u8 *lease_key, u8 oplock)
if (!buf)
return NULL;
- buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key));
- buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8)));
+ memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
buf->ccontext.DataOffset = cpu_to_le16(offsetof
@@ -2090,8 +2248,7 @@ smb3_create_lease_buf(u8 *lease_key, u8 oplock)
if (!buf)
return NULL;
- buf->lcontext.LeaseKeyLow = cpu_to_le64(*((u64 *)lease_key));
- buf->lcontext.LeaseKeyHigh = cpu_to_le64(*((u64 *)(lease_key + 8)));
+ memcpy(&buf->lcontext.LeaseKey, lease_key, SMB2_LEASE_KEY_SIZE);
buf->lcontext.LeaseState = map_oplock_to_lease(oplock);
buf->ccontext.DataOffset = cpu_to_le16(offsetof
@@ -2128,8 +2285,7 @@ smb3_parse_lease_buf(void *buf, unsigned int *epoch, char *lease_key)
if (lc->lcontext.LeaseFlags & SMB2_LEASE_FLAG_BREAK_IN_PROGRESS)
return SMB2_OPLOCK_LEVEL_NOCHANGE;
if (lease_key)
- memcpy(lease_key, &lc->lcontext.LeaseKeyLow,
- SMB2_LEASE_KEY_SIZE);
+ memcpy(lease_key, &lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
return le32_to_cpu(lc->lcontext.LeaseState);
}
@@ -2151,7 +2307,7 @@ fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, unsigned int orig_len,
struct smb_rqst *old_rq)
{
struct smb2_sync_hdr *shdr =
- (struct smb2_sync_hdr *)old_rq->rq_iov[1].iov_base;
+ (struct smb2_sync_hdr *)old_rq->rq_iov[0].iov_base;
memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
@@ -2171,14 +2327,13 @@ static inline void smb2_sg_set_buf(struct scatterlist *sg, const void *buf,
}
/* Assumes:
- * rqst->rq_iov[0] is rfc1002 length
- * rqst->rq_iov[1] is tranform header
- * rqst->rq_iov[2+] data to be encrypted/decrypted
+ * rqst->rq_iov[0] is transform header
+ * rqst->rq_iov[1+] data to be encrypted/decrypted
*/
static struct scatterlist *
init_sg(struct smb_rqst *rqst, u8 *sign)
{
- unsigned int sg_len = rqst->rq_nvec + rqst->rq_npages;
+ unsigned int sg_len = rqst->rq_nvec + rqst->rq_npages + 1;
unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
struct scatterlist *sg;
unsigned int i;
@@ -2189,10 +2344,10 @@ init_sg(struct smb_rqst *rqst, u8 *sign)
return NULL;
sg_init_table(sg, sg_len);
- smb2_sg_set_buf(&sg[0], rqst->rq_iov[1].iov_base + 20, assoc_data_len);
- for (i = 1; i < rqst->rq_nvec - 1; i++)
- smb2_sg_set_buf(&sg[i], rqst->rq_iov[i+1].iov_base,
- rqst->rq_iov[i+1].iov_len);
+ smb2_sg_set_buf(&sg[0], rqst->rq_iov[0].iov_base + 20, assoc_data_len);
+ for (i = 1; i < rqst->rq_nvec; i++)
+ smb2_sg_set_buf(&sg[i], rqst->rq_iov[i].iov_base,
+ rqst->rq_iov[i].iov_len);
for (j = 0; i < sg_len - 1; i++, j++) {
unsigned int len, offset;
@@ -2224,18 +2379,17 @@ smb2_get_enc_key(struct TCP_Server_Info *server, __u64 ses_id, int enc, u8 *key)
return 1;
}
/*
- * Encrypt or decrypt @rqst message. @rqst has the following format:
- * iov[0] - rfc1002 length
- * iov[1] - transform header (associate data),
- * iov[2-N] and pages - data to encrypt.
- * On success return encrypted data in iov[2-N] and pages, leave iov[0-1]
+ * Encrypt or decrypt @rqst message. @rqst[0] has the following format:
+ * iov[0] - transform header (associate data),
+ * iov[1-N] - SMB2 header and pages - data to encrypt.
+ * On success return encrypted data in iov[1-N] and pages, leave iov[0]
* untouched.
*/
static int
crypt_message(struct TCP_Server_Info *server, struct smb_rqst *rqst, int enc)
{
struct smb2_transform_hdr *tr_hdr =
- (struct smb2_transform_hdr *)rqst->rq_iov[1].iov_base;
+ (struct smb2_transform_hdr *)rqst->rq_iov[0].iov_base;
unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
int rc = 0;
struct scatterlist *sg;
@@ -2323,10 +2477,6 @@ free_req:
return rc;
}
-/*
- * This is called from smb_send_rqst. At this point we have the rfc1002
- * header as the first element in the vector.
- */
static int
smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
struct smb_rqst *old_rq)
@@ -2335,7 +2485,7 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
struct page **pages;
struct smb2_transform_hdr *tr_hdr;
unsigned int npages = old_rq->rq_npages;
- unsigned int orig_len = get_rfc1002_length(old_rq->rq_iov[0].iov_base);
+ unsigned int orig_len;
int i;
int rc = -ENOMEM;
@@ -2355,18 +2505,14 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
goto err_free_pages;
}
- /* Make space for one extra iov to hold the transform header */
iov = kmalloc_array(old_rq->rq_nvec + 1, sizeof(struct kvec),
GFP_KERNEL);
if (!iov)
goto err_free_pages;
- /* copy all iovs from the old except the 1st one (rfc1002 length) */
- memcpy(&iov[2], &old_rq->rq_iov[1],
- sizeof(struct kvec) * (old_rq->rq_nvec - 1));
- /* copy the rfc1002 iov */
- iov[0].iov_base = old_rq->rq_iov[0].iov_base;
- iov[0].iov_len = old_rq->rq_iov[0].iov_len;
+ /* copy all iovs from the old */
+ memcpy(&iov[1], &old_rq->rq_iov[0],
+ sizeof(struct kvec) * old_rq->rq_nvec);
new_rq->rq_iov = iov;
new_rq->rq_nvec = old_rq->rq_nvec + 1;
@@ -2375,14 +2521,12 @@ smb3_init_transform_rq(struct TCP_Server_Info *server, struct smb_rqst *new_rq,
if (!tr_hdr)
goto err_free_iov;
+ orig_len = smb_rqst_len(server, old_rq);
+
/* fill the 2nd iov with a transform header */
fill_transform_hdr(tr_hdr, orig_len, old_rq);
- new_rq->rq_iov[1].iov_base = tr_hdr;
- new_rq->rq_iov[1].iov_len = sizeof(struct smb2_transform_hdr);
-
- /* Update rfc1002 header */
- inc_rfc1001_len(new_rq->rq_iov[0].iov_base,
- sizeof(struct smb2_transform_hdr));
+ new_rq->rq_iov[0].iov_base = tr_hdr;
+ new_rq->rq_iov[0].iov_len = sizeof(struct smb2_transform_hdr);
/* copy pages form the old */
for (i = 0; i < npages; i++) {
@@ -2426,7 +2570,7 @@ smb3_free_transform_rq(struct smb_rqst *rqst)
put_page(rqst->rq_pages[i]);
kfree(rqst->rq_pages);
/* free transform header */
- kfree(rqst->rq_iov[1].iov_base);
+ kfree(rqst->rq_iov[0].iov_base);
kfree(rqst->rq_iov);
}
@@ -2443,19 +2587,17 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
unsigned int buf_data_size, struct page **pages,
unsigned int npages, unsigned int page_data_size)
{
- struct kvec iov[3];
+ struct kvec iov[2];
struct smb_rqst rqst = {NULL};
int rc;
- iov[0].iov_base = NULL;
- iov[0].iov_len = 0;
- iov[1].iov_base = buf;
- iov[1].iov_len = sizeof(struct smb2_transform_hdr);
- iov[2].iov_base = buf + sizeof(struct smb2_transform_hdr);
- iov[2].iov_len = buf_data_size;
+ iov[0].iov_base = buf;
+ iov[0].iov_len = sizeof(struct smb2_transform_hdr);
+ iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
+ iov[1].iov_len = buf_data_size;
rqst.rq_iov = iov;
- rqst.rq_nvec = 3;
+ rqst.rq_nvec = 2;
rqst.rq_pages = pages;
rqst.rq_npages = npages;
rqst.rq_pagesz = PAGE_SIZE;
@@ -2467,7 +2609,7 @@ decrypt_raw_data(struct TCP_Server_Info *server, char *buf,
if (rc)
return rc;
- memmove(buf, iov[2].iov_base, buf_data_size);
+ memmove(buf, iov[1].iov_base, buf_data_size);
server->total_read = buf_data_size + page_data_size;
@@ -3170,6 +3312,7 @@ struct smb_version_operations smb311_operations = {
.set_compression = smb2_set_compression,
.mkdir = smb2_mkdir,
.mkdir_setinfo = smb2_mkdir_setinfo,
+ .posix_mkdir = smb311_posix_mkdir,
.rmdir = smb2_rmdir,
.unlink = smb2_unlink,
.rename = smb2_rename_path,
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index af032e1a3eac..3c92678cb45b 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -155,7 +155,7 @@ out:
static int
smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
{
- int rc = 0;
+ int rc;
struct nls_table *nls_codepage;
struct cifs_ses *ses;
struct TCP_Server_Info *server;
@@ -166,10 +166,10 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
* for those three - in the calling routine.
*/
if (tcon == NULL)
- return rc;
+ return 0;
if (smb2_command == SMB2_TREE_CONNECT)
- return rc;
+ return 0;
if (tcon->tidStatus == CifsExiting) {
/*
@@ -212,8 +212,14 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
return -EAGAIN;
}
- wait_event_interruptible_timeout(server->response_q,
- (server->tcpStatus != CifsNeedReconnect), 10 * HZ);
+ rc = wait_event_interruptible_timeout(server->response_q,
+ (server->tcpStatus != CifsNeedReconnect),
+ 10 * HZ);
+ if (rc < 0) {
+ cifs_dbg(FYI, "%s: aborting reconnect due to a received"
+ " signal by the process\n", __func__);
+ return -ERESTARTSYS;
+ }
/* are we still trying to reconnect? */
if (server->tcpStatus != CifsNeedReconnect)
@@ -231,7 +237,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon)
}
if (!tcon->ses->need_reconnect && !tcon->need_reconnect)
- return rc;
+ return 0;
nls_codepage = load_nls_default();
@@ -340,7 +346,10 @@ smb2_plain_req_init(__le16 smb2_command, struct cifs_tcon *tcon,
return rc;
/* BB eventually switch this to SMB2 specific small buf size */
- *request_buf = cifs_small_buf_get();
+ if (smb2_command == SMB2_SET_INFO)
+ *request_buf = cifs_buf_get();
+ else
+ *request_buf = cifs_small_buf_get();
if (*request_buf == NULL) {
/* BB should we add a retry in here if not a writepage? */
return -ENOMEM;
@@ -602,6 +611,7 @@ static void assemble_neg_contexts(struct smb2_negotiate_req *req,
int
SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
{
+ struct smb_rqst rqst;
struct smb2_negotiate_req *req;
struct smb2_negotiate_rsp *rsp;
struct kvec iov[1];
@@ -673,7 +683,11 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
- rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_negotiate_rsp *)rsp_iov.iov_base;
/*
@@ -990,8 +1004,9 @@ SMB2_sess_alloc_buffer(struct SMB2_sess_data *sess_data)
req->PreviousSessionId = sess_data->previous_session;
req->Flags = 0; /* MBZ */
- /* to enable echos and oplocks */
- req->sync_hdr.CreditRequest = cpu_to_le16(3);
+
+ /* enough to enable echos and oplocks and one max size write */
+ req->sync_hdr.CreditRequest = cpu_to_le16(130);
/* only one of SMB2 signing flags may be set in SMB2 request */
if (server->sign)
@@ -1027,6 +1042,7 @@ static int
SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
{
int rc;
+ struct smb_rqst rqst;
struct smb2_sess_setup_req *req = sess_data->iov[0].iov_base;
struct kvec rsp_iov = { NULL, 0 };
@@ -1035,10 +1051,13 @@ SMB2_sess_sendreceive(struct SMB2_sess_data *sess_data)
cpu_to_le16(sizeof(struct smb2_sess_setup_req) - 1 /* pad */);
req->SecurityBufferLength = cpu_to_le16(sess_data->iov[1].iov_len);
- /* BB add code to build os and lm fields */
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = sess_data->iov;
+ rqst.rq_nvec = 2;
- rc = smb2_send_recv(sess_data->xid, sess_data->ses,
- sess_data->iov, 2,
+ /* BB add code to build os and lm fields */
+ rc = cifs_send_recv(sess_data->xid, sess_data->ses,
+ &rqst,
&sess_data->buf0_type,
CIFS_LOG_ERROR | CIFS_NEG_OP, &rsp_iov);
cifs_small_buf_release(sess_data->iov[0].iov_base);
@@ -1376,6 +1395,7 @@ out:
int
SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
{
+ struct smb_rqst rqst;
struct smb2_logoff_req *req; /* response is also trivial struct */
int rc = 0;
struct TCP_Server_Info *server;
@@ -1413,7 +1433,11 @@ SMB2_logoff(const unsigned int xid, struct cifs_ses *ses)
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
- rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
/*
* No tcon so can't do
@@ -1443,6 +1467,7 @@ int
SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
struct cifs_tcon *tcon, const struct nls_table *cp)
{
+ struct smb_rqst rqst;
struct smb2_tree_connect_req *req;
struct smb2_tree_connect_rsp *rsp = NULL;
struct kvec iov[2];
@@ -1499,7 +1524,11 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
!smb3_encryption_required(tcon))
req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
- rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 2;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
@@ -1563,6 +1592,7 @@ tcon_error_exit:
int
SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
{
+ struct smb_rqst rqst;
struct smb2_tree_disconnect_req *req; /* response is trivial */
int rc = 0;
struct cifs_ses *ses = tcon->ses;
@@ -1593,7 +1623,11 @@ SMB2_tdis(const unsigned int xid, struct cifs_tcon *tcon)
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
- rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc)
cifs_stats_fail_inc(tcon, SMB2_TREE_DISCONNECT_HE);
@@ -1682,12 +1716,12 @@ parse_lease_state(struct TCP_Server_Info *server, struct smb2_create_rsp *rsp,
static int
add_lease_context(struct TCP_Server_Info *server, struct kvec *iov,
- unsigned int *num_iovec, __u8 *oplock)
+ unsigned int *num_iovec, u8 *lease_key, __u8 *oplock)
{
struct smb2_create_req *req = iov[0].iov_base;
unsigned int num = *num_iovec;
- iov[num].iov_base = server->ops->create_lease_buf(oplock+1, *oplock);
+ iov[num].iov_base = server->ops->create_lease_buf(lease_key, *oplock);
if (iov[num].iov_base == NULL)
return -ENOMEM;
iov[num].iov_len = server->vals->create_lease_size;
@@ -1886,11 +1920,165 @@ alloc_path_with_tree_prefix(__le16 **out_path, int *out_size, int *out_len,
return 0;
}
+#ifdef CONFIG_CIFS_SMB311
+int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+ umode_t mode, struct cifs_tcon *tcon,
+ const char *full_path,
+ struct cifs_sb_info *cifs_sb)
+{
+ struct smb_rqst rqst;
+ struct smb2_create_req *req;
+ struct smb2_create_rsp *rsp;
+ struct TCP_Server_Info *server;
+ struct cifs_ses *ses = tcon->ses;
+ struct kvec iov[3]; /* make sure at least one for each open context */
+ struct kvec rsp_iov = {NULL, 0};
+ int resp_buftype;
+ int uni_path_len;
+ __le16 *copy_path = NULL;
+ int copy_size;
+ int rc = 0;
+ unsigned int n_iov = 2;
+ __u32 file_attributes = 0;
+ char *pc_buf = NULL;
+ int flags = 0;
+ unsigned int total_len;
+ __le16 *path = cifs_convert_path_to_utf16(full_path, cifs_sb);
+
+ if (!path)
+ return -ENOMEM;
+
+ cifs_dbg(FYI, "mkdir\n");
+
+ if (ses && (ses->server))
+ server = ses->server;
+ else
+ return -EIO;
+
+ rc = smb2_plain_req_init(SMB2_CREATE, tcon, (void **) &req, &total_len);
+
+ if (rc)
+ return rc;
+
+ if (smb3_encryption_required(tcon))
+ flags |= CIFS_TRANSFORM_REQ;
+
+
+ req->ImpersonationLevel = IL_IMPERSONATION;
+ req->DesiredAccess = cpu_to_le32(FILE_WRITE_ATTRIBUTES);
+ /* File attributes ignored on open (used in create though) */
+ req->FileAttributes = cpu_to_le32(file_attributes);
+ req->ShareAccess = FILE_SHARE_ALL_LE;
+ req->CreateDisposition = cpu_to_le32(FILE_CREATE);
+ req->CreateOptions = cpu_to_le32(CREATE_NOT_FILE);
+
+ iov[0].iov_base = (char *)req;
+ /* -1 since last byte is buf[0] which is sent below (path) */
+ iov[0].iov_len = total_len - 1;
+
+ req->NameOffset = cpu_to_le16(sizeof(struct smb2_create_req));
+
+ /* [MS-SMB2] 2.2.13 NameOffset:
+ * If SMB2_FLAGS_DFS_OPERATIONS is set in the Flags field of
+ * the SMB2 header, the file name includes a prefix that will
+ * be processed during DFS name normalization as specified in
+ * section 3.3.5.9. Otherwise, the file name is relative to
+ * the share that is identified by the TreeId in the SMB2
+ * header.
+ */
+ if (tcon->share_flags & SHI1005_FLAGS_DFS) {
+ int name_len;
+
+ req->sync_hdr.Flags |= SMB2_FLAGS_DFS_OPERATIONS;
+ rc = alloc_path_with_tree_prefix(&copy_path, &copy_size,
+ &name_len,
+ tcon->treeName, path);
+ if (rc) {
+ cifs_small_buf_release(req);
+ return rc;
+ }
+ req->NameLength = cpu_to_le16(name_len * 2);
+ uni_path_len = copy_size;
+ path = copy_path;
+ } else {
+ uni_path_len = (2 * UniStrnlen((wchar_t *)path, PATH_MAX)) + 2;
+ /* MUST set path len (NameLength) to 0 opening root of share */
+ req->NameLength = cpu_to_le16(uni_path_len - 2);
+ if (uni_path_len % 8 != 0) {
+ copy_size = roundup(uni_path_len, 8);
+ copy_path = kzalloc(copy_size, GFP_KERNEL);
+ if (!copy_path) {
+ cifs_small_buf_release(req);
+ return -ENOMEM;
+ }
+ memcpy((char *)copy_path, (const char *)path,
+ uni_path_len);
+ uni_path_len = copy_size;
+ path = copy_path;
+ }
+ }
+
+ iov[1].iov_len = uni_path_len;
+ iov[1].iov_base = path;
+ req->RequestedOplockLevel = SMB2_OPLOCK_LEVEL_NONE;
+
+ if (tcon->posix_extensions) {
+ if (n_iov > 2) {
+ struct create_context *ccontext =
+ (struct create_context *)iov[n_iov-1].iov_base;
+ ccontext->Next =
+ cpu_to_le32(iov[n_iov-1].iov_len);
+ }
+
+ rc = add_posix_context(iov, &n_iov, mode);
+ if (rc) {
+ cifs_small_buf_release(req);
+ kfree(copy_path);
+ return rc;
+ }
+ pc_buf = iov[n_iov-1].iov_base;
+ }
+
+
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = n_iov;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
+ &rsp_iov);
+
+ cifs_small_buf_release(req);
+ rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
+
+ if (rc != 0) {
+ cifs_stats_fail_inc(tcon, SMB2_CREATE_HE);
+ trace_smb3_posix_mkdir_err(xid, tcon->tid, ses->Suid,
+ CREATE_NOT_FILE, FILE_WRITE_ATTRIBUTES, rc);
+ goto smb311_mkdir_exit;
+ } else
+ trace_smb3_posix_mkdir_done(xid, rsp->PersistentFileId, tcon->tid,
+ ses->Suid, CREATE_NOT_FILE,
+ FILE_WRITE_ATTRIBUTES);
+
+ SMB2_close(xid, tcon, rsp->PersistentFileId, rsp->VolatileFileId);
+
+ /* Eventually save off posix specific response info and timestaps */
+
+smb311_mkdir_exit:
+ kfree(copy_path);
+ kfree(pc_buf);
+ free_rsp_buf(resp_buftype, rsp);
+ return rc;
+
+}
+#endif /* SMB311 */
+
int
SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
__u8 *oplock, struct smb2_file_all_info *buf,
struct kvec *err_iov, int *buftype)
{
+ struct smb_rqst rqst;
struct smb2_create_req *req;
struct smb2_create_rsp *rsp;
struct TCP_Server_Info *server;
@@ -1993,7 +2181,8 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
*oplock == SMB2_OPLOCK_LEVEL_NONE)
req->RequestedOplockLevel = *oplock;
else {
- rc = add_lease_context(server, iov, &n_iov, oplock);
+ rc = add_lease_context(server, iov, &n_iov,
+ oparms->fid->lease_key, oplock);
if (rc) {
cifs_small_buf_release(req);
kfree(copy_path);
@@ -2043,7 +2232,11 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
}
#endif /* SMB311 */
- rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags,
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = n_iov;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
&rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
@@ -2099,6 +2292,7 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
char *in_data, u32 indatalen,
char **out_data, u32 *plen /* returned data len */)
{
+ struct smb_rqst rqst;
struct smb2_ioctl_req *req;
struct smb2_ioctl_rsp *rsp;
struct cifs_ses *ses;
@@ -2189,7 +2383,11 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
if (opcode == FSCTL_VALIDATE_NEGOTIATE_INFO)
req->sync_hdr.Flags |= SMB2_FLAGS_SIGNED;
- rc = smb2_send_recv(xid, ses, iov, n_iov, &resp_buftype, flags,
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = n_iov;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
&rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_ioctl_rsp *)rsp_iov.iov_base;
@@ -2274,6 +2472,7 @@ int
SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int flags)
{
+ struct smb_rqst rqst;
struct smb2_close_req *req;
struct smb2_close_rsp *rsp;
struct cifs_ses *ses = tcon->ses;
@@ -2301,7 +2500,11 @@ SMB2_close_flags(const unsigned int xid, struct cifs_tcon *tcon,
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
- rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_close_rsp *)rsp_iov.iov_base;
@@ -2387,6 +2590,7 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
u32 additional_info, size_t output_len, size_t min_len, void **data,
u32 *dlen)
{
+ struct smb_rqst rqst;
struct smb2_query_info_req *req;
struct smb2_query_info_rsp *rsp = NULL;
struct kvec iov[2];
@@ -2427,7 +2631,11 @@ query_info(const unsigned int xid, struct cifs_tcon *tcon,
/* 1 for Buffer */
iov[0].iov_len = total_len - 1;
- rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_query_info_rsp *)rsp_iov.iov_base;
@@ -2594,11 +2802,10 @@ SMB2_echo(struct TCP_Server_Info *server)
{
struct smb2_echo_req *req;
int rc = 0;
- struct kvec iov[2];
+ struct kvec iov[1];
struct smb_rqst rqst = { .rq_iov = iov,
- .rq_nvec = 2 };
+ .rq_nvec = 1 };
unsigned int total_len;
- __be32 rfc1002_marker;
cifs_dbg(FYI, "In echo request\n");
@@ -2614,11 +2821,8 @@ SMB2_echo(struct TCP_Server_Info *server)
req->sync_hdr.CreditRequest = cpu_to_le16(1);
- iov[0].iov_len = 4;
- rfc1002_marker = cpu_to_be32(total_len);
- iov[0].iov_base = &rfc1002_marker;
- iov[1].iov_len = total_len;
- iov[1].iov_base = (char *)req;
+ iov[0].iov_len = total_len;
+ iov[0].iov_base = (char *)req;
rc = cifs_call_async(server, &rqst, NULL, smb2_echo_callback, NULL,
server, CIFS_ECHO_OP);
@@ -2633,6 +2837,7 @@ int
SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
u64 volatile_fid)
{
+ struct smb_rqst rqst;
struct smb2_flush_req *req;
struct cifs_ses *ses = tcon->ses;
struct kvec iov[1];
@@ -2660,7 +2865,11 @@ SMB2_flush(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
- rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc != 0) {
@@ -2848,10 +3057,9 @@ smb2_async_readv(struct cifs_readdata *rdata)
struct smb2_sync_hdr *shdr;
struct cifs_io_parms io_parms;
struct smb_rqst rqst = { .rq_iov = rdata->iov,
- .rq_nvec = 2 };
+ .rq_nvec = 1 };
struct TCP_Server_Info *server;
unsigned int total_len;
- __be32 req_len;
cifs_dbg(FYI, "%s: offset=%llu bytes=%u\n",
__func__, rdata->offset, rdata->bytes);
@@ -2882,12 +3090,8 @@ smb2_async_readv(struct cifs_readdata *rdata)
if (smb3_encryption_required(io_parms.tcon))
flags |= CIFS_TRANSFORM_REQ;
- req_len = cpu_to_be32(total_len);
-
- rdata->iov[0].iov_base = &req_len;
- rdata->iov[0].iov_len = sizeof(__be32);
- rdata->iov[1].iov_base = buf;
- rdata->iov[1].iov_len = total_len;
+ rdata->iov[0].iov_base = buf;
+ rdata->iov[0].iov_len = total_len;
shdr = (struct smb2_sync_hdr *)buf;
@@ -2926,6 +3130,7 @@ int
SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, char **buf, int *buf_type)
{
+ struct smb_rqst rqst;
int resp_buftype, rc = -EACCES;
struct smb2_read_plain_req *req = NULL;
struct smb2_read_rsp *rsp = NULL;
@@ -2946,7 +3151,11 @@ SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
- rc = smb2_send_recv(xid, ses, iov, 1, &resp_buftype, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_read_rsp *)rsp_iov.iov_base;
@@ -3062,10 +3271,9 @@ smb2_async_writev(struct cifs_writedata *wdata,
struct smb2_sync_hdr *shdr;
struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink);
struct TCP_Server_Info *server = tcon->ses->server;
- struct kvec iov[2];
+ struct kvec iov[1];
struct smb_rqst rqst = { };
unsigned int total_len;
- __be32 rfc1002_marker;
rc = smb2_plain_req_init(SMB2_WRITE, tcon, (void **) &req, &total_len);
if (rc) {
@@ -3137,15 +3345,11 @@ smb2_async_writev(struct cifs_writedata *wdata,
v1->length = cpu_to_le32(wdata->mr->mr->length);
}
#endif
- /* 4 for rfc1002 length field and 1 for Buffer */
- iov[0].iov_len = 4;
- rfc1002_marker = cpu_to_be32(total_len - 1 + wdata->bytes);
- iov[0].iov_base = &rfc1002_marker;
- iov[1].iov_len = total_len - 1;
- iov[1].iov_base = (char *)req;
+ iov[0].iov_len = total_len - 1;
+ iov[0].iov_base = (char *)req;
rqst.rq_iov = iov;
- rqst.rq_nvec = 2;
+ rqst.rq_nvec = 1;
rqst.rq_pages = wdata->pages;
rqst.rq_offset = wdata->page_offset;
rqst.rq_npages = wdata->nr_pages;
@@ -3153,7 +3357,7 @@ smb2_async_writev(struct cifs_writedata *wdata,
rqst.rq_tailsz = wdata->tailsz;
#ifdef CONFIG_CIFS_SMB_DIRECT
if (wdata->mr) {
- iov[1].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
+ iov[0].iov_len += sizeof(struct smbd_buffer_descriptor_v1);
rqst.rq_npages = 0;
}
#endif
@@ -3210,6 +3414,7 @@ int
SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, struct kvec *iov, int n_vec)
{
+ struct smb_rqst rqst;
int rc = 0;
struct smb2_write_req *req = NULL;
struct smb2_write_rsp *rsp = NULL;
@@ -3251,7 +3456,11 @@ SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
/* 1 for Buffer */
iov[0].iov_len = total_len - 1;
- rc = smb2_send_recv(xid, io_parms->tcon->ses, iov, n_vec + 1,
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = n_vec + 1;
+
+ rc = cifs_send_recv(xid, io_parms->tcon->ses, &rqst,
&resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_write_rsp *)rsp_iov.iov_base;
@@ -3323,6 +3532,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int index,
struct cifs_search_info *srch_inf)
{
+ struct smb_rqst rqst;
struct smb2_query_directory_req *req;
struct smb2_query_directory_rsp *rsp = NULL;
struct kvec iov[2];
@@ -3395,7 +3605,11 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
iov[1].iov_base = (char *)(req->Buffer);
iov[1].iov_len = len;
- rc = smb2_send_recv(xid, ses, iov, 2, &resp_buftype, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 2;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(req);
rsp = (struct smb2_query_directory_rsp *)rsp_iov.iov_base;
@@ -3454,6 +3668,7 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
u8 info_type, u32 additional_info, unsigned int num,
void **data, unsigned int *size)
{
+ struct smb_rqst rqst;
struct smb2_set_info_req *req;
struct smb2_set_info_rsp *rsp = NULL;
struct kvec *iov;
@@ -3509,9 +3724,13 @@ send_set_info(const unsigned int xid, struct cifs_tcon *tcon,
iov[i].iov_len = size[i];
}
- rc = smb2_send_recv(xid, ses, iov, num, &resp_buftype, flags,
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = num;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags,
&rsp_iov);
- cifs_small_buf_release(req);
+ cifs_buf_release(req);
rsp = (struct smb2_set_info_rsp *)rsp_iov.iov_base;
if (rc != 0) {
@@ -3664,6 +3883,7 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
const u64 persistent_fid, const u64 volatile_fid,
__u8 oplock_level)
{
+ struct smb_rqst rqst;
int rc;
struct smb2_oplock_break *req = NULL;
struct cifs_ses *ses = tcon->ses;
@@ -3692,7 +3912,11 @@ SMB2_oplock_break(const unsigned int xid, struct cifs_tcon *tcon,
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
- rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc) {
@@ -3755,6 +3979,7 @@ int
SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, struct kstatfs *fsdata)
{
+ struct smb_rqst rqst;
struct smb2_query_info_rsp *rsp = NULL;
struct kvec iov;
struct kvec rsp_iov;
@@ -3773,7 +3998,11 @@ SMB2_QFS_info(const unsigned int xid, struct cifs_tcon *tcon,
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = &iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(iov.iov_base);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -3798,6 +4027,7 @@ int
SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, int level)
{
+ struct smb_rqst rqst;
struct smb2_query_info_rsp *rsp = NULL;
struct kvec iov;
struct kvec rsp_iov;
@@ -3829,7 +4059,11 @@ SMB2_QFS_attr(const unsigned int xid, struct cifs_tcon *tcon,
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
- rc = smb2_send_recv(xid, ses, &iov, 1, &resp_buftype, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = &iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buftype, flags, &rsp_iov);
cifs_small_buf_release(iov.iov_base);
if (rc) {
cifs_stats_fail_inc(tcon, SMB2_QUERY_INFO_HE);
@@ -3868,6 +4102,7 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
const __u64 persist_fid, const __u64 volatile_fid, const __u32 pid,
const __u32 num_lock, struct smb2_lock_element *buf)
{
+ struct smb_rqst rqst;
int rc = 0;
struct smb2_lock_req *req = NULL;
struct kvec iov[2];
@@ -3900,7 +4135,12 @@ smb2_lockv(const unsigned int xid, struct cifs_tcon *tcon,
iov[1].iov_len = count;
cifs_stats_inc(&tcon->stats.cifs_stats.num_locks);
- rc = smb2_send_recv(xid, tcon->ses, iov, 2, &resp_buf_type, flags,
+
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 2;
+
+ rc = cifs_send_recv(xid, tcon->ses, &rqst, &resp_buf_type, flags,
&rsp_iov);
cifs_small_buf_release(req);
if (rc) {
@@ -3934,6 +4174,7 @@ int
SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
__u8 *lease_key, const __le32 lease_state)
{
+ struct smb_rqst rqst;
int rc;
struct smb2_lease_ack *req = NULL;
struct cifs_ses *ses = tcon->ses;
@@ -3964,7 +4205,11 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
iov[0].iov_base = (char *)req;
iov[0].iov_len = total_len;
- rc = smb2_send_recv(xid, ses, iov, 1, &resp_buf_type, flags, &rsp_iov);
+ memset(&rqst, 0, sizeof(struct smb_rqst));
+ rqst.rq_iov = iov;
+ rqst.rq_nvec = 1;
+
+ rc = cifs_send_recv(xid, ses, &rqst, &resp_buf_type, flags, &rsp_iov);
cifs_small_buf_release(req);
if (rc) {
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index a345560001ce..a671adcc44a6 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -678,16 +678,14 @@ struct create_context {
#define SMB2_LEASE_KEY_SIZE 16
struct lease_context {
- __le64 LeaseKeyLow;
- __le64 LeaseKeyHigh;
+ u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
__le32 LeaseState;
__le32 LeaseFlags;
__le64 LeaseDuration;
} __packed;
struct lease_context_v2 {
- __le64 LeaseKeyLow;
- __le64 LeaseKeyHigh;
+ u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
__le32 LeaseState;
__le32 LeaseFlags;
__le64 LeaseDuration;
@@ -851,8 +849,11 @@ struct validate_negotiate_info_rsp {
__le16 Dialect; /* Dialect in use for the connection */
} __packed;
-#define RSS_CAPABLE 0x00000001
-#define RDMA_CAPABLE 0x00000002
+#define RSS_CAPABLE cpu_to_le32(0x00000001)
+#define RDMA_CAPABLE cpu_to_le32(0x00000002)
+
+#define INTERNETWORK cpu_to_le16(0x0002)
+#define INTERNETWORKV6 cpu_to_le16(0x0017)
struct network_interface_info_ioctl_rsp {
__le32 Next; /* next interface. zero if this is last one */
@@ -860,7 +861,21 @@ struct network_interface_info_ioctl_rsp {
__le32 Capability; /* RSS or RDMA Capable */
__le32 Reserved;
__le64 LinkSpeed;
- char SockAddr_Storage[128];
+ __le16 Family;
+ __u8 Buffer[126];
+} __packed;
+
+struct iface_info_ipv4 {
+ __be16 Port;
+ __be32 IPv4Address;
+ __be64 Reserved;
+} __packed;
+
+struct iface_info_ipv6 {
+ __be16 Port;
+ __be32 FlowInfo;
+ __u8 IPv6Address[16];
+ __be32 ScopeId;
} __packed;
#define NO_FILE_ID 0xFFFFFFFFFFFFFFFFULL /* general ioctls to srv not to file */
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index c84020057bd8..6e6a4f2ec890 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -79,6 +79,10 @@ extern int smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, bool set_alloc);
extern int smb2_set_file_info(struct inode *inode, const char *full_path,
FILE_BASIC_INFO *buf, const unsigned int xid);
+extern int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
+ umode_t mode, struct cifs_tcon *tcon,
+ const char *full_path,
+ struct cifs_sb_info *cifs_sb);
extern int smb2_mkdir(const unsigned int xid, struct cifs_tcon *tcon,
const char *name, struct cifs_sb_info *cifs_sb);
extern void smb2_mkdir_setinfo(struct inode *inode, const char *full_path,
@@ -109,6 +113,8 @@ extern int smb2_unlock_range(struct cifsFileInfo *cfile,
extern int smb2_push_mandatory_locks(struct cifsFileInfo *cfile);
extern void smb2_reconnect_server(struct work_struct *work);
extern int smb3_crypto_aead_allocate(struct TCP_Server_Info *server);
+extern unsigned long smb_rqst_len(struct TCP_Server_Info *server,
+ struct smb_rqst *rqst);
/*
* SMB2 Worker functions - most of protocol specific implementation details
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 349d5ccf854c..719d55e63d88 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -171,10 +171,10 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
unsigned char smb2_signature[SMB2_HMACSHA256_SIZE];
unsigned char *sigptr = smb2_signature;
struct kvec *iov = rqst->rq_iov;
- int iov_hdr_index = rqst->rq_nvec > 1 ? 1 : 0;
- struct smb2_sync_hdr *shdr =
- (struct smb2_sync_hdr *)iov[iov_hdr_index].iov_base;
+ struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
struct cifs_ses *ses;
+ struct shash_desc *shash = &server->secmech.sdeschmacsha256->shash;
+ struct smb_rqst drqst;
ses = smb2_find_smb_ses(server, shdr->SessionId);
if (!ses) {
@@ -192,21 +192,39 @@ smb2_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
}
rc = crypto_shash_setkey(server->secmech.hmacsha256,
- ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE);
+ ses->auth_key.response, SMB2_NTLMV2_SESSKEY_SIZE);
if (rc) {
cifs_dbg(VFS, "%s: Could not update with response\n", __func__);
return rc;
}
- rc = crypto_shash_init(&server->secmech.sdeschmacsha256->shash);
+ rc = crypto_shash_init(shash);
if (rc) {
cifs_dbg(VFS, "%s: Could not init sha256", __func__);
return rc;
}
- rc = __cifs_calc_signature(rqst, iov_hdr_index, server, sigptr,
- &server->secmech.sdeschmacsha256->shash);
+ /*
+ * For SMB2+, __cifs_calc_signature() expects to sign only the actual
+ * data, that is, iov[0] should not contain a rfc1002 length.
+ *
+ * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to
+ * __cifs_calc_signature().
+ */
+ drqst = *rqst;
+ if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) {
+ rc = crypto_shash_update(shash, iov[0].iov_base,
+ iov[0].iov_len);
+ if (rc) {
+ cifs_dbg(VFS, "%s: Could not update with payload\n",
+ __func__);
+ return rc;
+ }
+ drqst.rq_iov++;
+ drqst.rq_nvec--;
+ }
+ rc = __cifs_calc_signature(&drqst, server, sigptr, shash);
if (!rc)
memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE);
@@ -410,14 +428,14 @@ generate_smb311signingkey(struct cifs_ses *ses)
int
smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
{
- int rc = 0;
+ int rc;
unsigned char smb3_signature[SMB2_CMACAES_SIZE];
unsigned char *sigptr = smb3_signature;
struct kvec *iov = rqst->rq_iov;
- int iov_hdr_index = rqst->rq_nvec > 1 ? 1 : 0;
- struct smb2_sync_hdr *shdr =
- (struct smb2_sync_hdr *)iov[iov_hdr_index].iov_base;
+ struct smb2_sync_hdr *shdr = (struct smb2_sync_hdr *)iov[0].iov_base;
struct cifs_ses *ses;
+ struct shash_desc *shash = &server->secmech.sdesccmacaes->shash;
+ struct smb_rqst drqst;
ses = smb2_find_smb_ses(server, shdr->SessionId);
if (!ses) {
@@ -429,8 +447,7 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
memset(shdr->Signature, 0x0, SMB2_SIGNATURE_SIZE);
rc = crypto_shash_setkey(server->secmech.cmacaes,
- ses->smb3signingkey, SMB2_CMACAES_SIZE);
-
+ ses->smb3signingkey, SMB2_CMACAES_SIZE);
if (rc) {
cifs_dbg(VFS, "%s: Could not set key for cmac aes\n", __func__);
return rc;
@@ -441,15 +458,33 @@ smb3_calc_signature(struct smb_rqst *rqst, struct TCP_Server_Info *server)
* so unlike smb2 case we do not have to check here if secmech are
* initialized
*/
- rc = crypto_shash_init(&server->secmech.sdesccmacaes->shash);
+ rc = crypto_shash_init(shash);
if (rc) {
cifs_dbg(VFS, "%s: Could not init cmac aes\n", __func__);
return rc;
}
- rc = __cifs_calc_signature(rqst, iov_hdr_index, server, sigptr,
- &server->secmech.sdesccmacaes->shash);
+ /*
+ * For SMB2+, __cifs_calc_signature() expects to sign only the actual
+ * data, that is, iov[0] should not contain a rfc1002 length.
+ *
+ * Sign the rfc1002 length prior to passing the data (iov[1-N]) down to
+ * __cifs_calc_signature().
+ */
+ drqst = *rqst;
+ if (drqst.rq_nvec >= 2 && iov[0].iov_len == 4) {
+ rc = crypto_shash_update(shash, iov[0].iov_base,
+ iov[0].iov_len);
+ if (rc) {
+ cifs_dbg(VFS, "%s: Could not update with payload\n",
+ __func__);
+ return rc;
+ }
+ drqst.rq_iov++;
+ drqst.rq_nvec--;
+ }
+ rc = __cifs_calc_signature(&drqst, server, sigptr, shash);
if (!rc)
memcpy(shdr->Signature, sigptr, SMB2_SIGNATURE_SIZE);
@@ -462,7 +497,7 @@ smb2_sign_rqst(struct smb_rqst *rqst, struct TCP_Server_Info *server)
{
int rc = 0;
struct smb2_sync_hdr *shdr =
- (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+ (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
if (!(shdr->Flags & SMB2_FLAGS_SIGNED) ||
server->tcpStatus == CifsNeedNegotiate)
@@ -552,6 +587,7 @@ smb2_mid_entry_alloc(const struct smb2_sync_hdr *shdr,
temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
memset(temp, 0, sizeof(struct mid_q_entry));
+ kref_init(&temp->refcount);
temp->mid = le64_to_cpu(shdr->MessageId);
temp->pid = current->pid;
temp->command = shdr->Command; /* Always LE */
@@ -635,7 +671,7 @@ smb2_setup_request(struct cifs_ses *ses, struct smb_rqst *rqst)
{
int rc;
struct smb2_sync_hdr *shdr =
- (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+ (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
struct mid_q_entry *mid;
smb2_seq_num_into_buf(ses->server, shdr);
@@ -656,7 +692,7 @@ smb2_setup_async_request(struct TCP_Server_Info *server, struct smb_rqst *rqst)
{
int rc;
struct smb2_sync_hdr *shdr =
- (struct smb2_sync_hdr *)rqst->rq_iov[1].iov_base;
+ (struct smb2_sync_hdr *)rqst->rq_iov[0].iov_base;
struct mid_q_entry *mid;
smb2_seq_num_into_buf(server, shdr);
diff --git a/fs/cifs/smbdirect.c b/fs/cifs/smbdirect.c
index e459c97151b3..c55ea4e6201b 100644
--- a/fs/cifs/smbdirect.c
+++ b/fs/cifs/smbdirect.c
@@ -18,6 +18,7 @@
#include "smbdirect.h"
#include "cifs_debug.h"
#include "cifsproto.h"
+#include "smb2proto.h"
static struct smbd_response *get_empty_queue_buffer(
struct smbd_connection *info);
@@ -2082,12 +2083,13 @@ int smbd_recv(struct smbd_connection *info, struct msghdr *msg)
* rqst: the data to write
* return value: 0 if successfully write, otherwise error code
*/
-int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
+int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst)
{
+ struct smbd_connection *info = server->smbd_conn;
struct kvec vec;
int nvecs;
int size;
- unsigned int buflen = 0, remaining_data_length;
+ unsigned int buflen, remaining_data_length;
int start, i, j;
int max_iov_size =
info->max_send_size - sizeof(struct smbd_data_transfer);
@@ -2111,25 +2113,13 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
log_write(ERR, "expected the pdu length in 1st iov, but got %zu\n", rqst->rq_iov[0].iov_len);
return -EINVAL;
}
- iov = &rqst->rq_iov[1];
-
- /* total up iov array first */
- for (i = 0; i < rqst->rq_nvec-1; i++) {
- buflen += iov[i].iov_len;
- }
/*
* Add in the page array if there is one. The caller needs to set
* rq_tailsz to PAGE_SIZE when the buffer has multiple pages and
* ends at page boundary
*/
- if (rqst->rq_npages) {
- if (rqst->rq_npages == 1)
- buflen += rqst->rq_tailsz;
- else
- buflen += rqst->rq_pagesz * (rqst->rq_npages - 1) -
- rqst->rq_offset + rqst->rq_tailsz;
- }
+ buflen = smb_rqst_len(server, rqst);
if (buflen + sizeof(struct smbd_data_transfer) >
info->max_fragmented_send_size) {
@@ -2139,6 +2129,8 @@ int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst)
goto done;
}
+ iov = &rqst->rq_iov[1];
+
cifs_dbg(FYI, "Sending smb (RDMA): smb_len=%u\n", buflen);
for (i = 0; i < rqst->rq_nvec-1; i++)
dump_smb(iov[i].iov_base, iov[i].iov_len);
diff --git a/fs/cifs/smbdirect.h b/fs/cifs/smbdirect.h
index 1e419c21dc60..a11096254f29 100644
--- a/fs/cifs/smbdirect.h
+++ b/fs/cifs/smbdirect.h
@@ -292,7 +292,7 @@ void smbd_destroy(struct smbd_connection *info);
/* Interface for carrying upper layer I/O through send/recv */
int smbd_recv(struct smbd_connection *info, struct msghdr *msg);
-int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst);
+int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst);
enum mr_state {
MR_READY,
@@ -332,7 +332,7 @@ static inline void *smbd_get_connection(
static inline int smbd_reconnect(struct TCP_Server_Info *server) {return -1; }
static inline void smbd_destroy(struct smbd_connection *info) {}
static inline int smbd_recv(struct smbd_connection *info, struct msghdr *msg) {return -1; }
-static inline int smbd_send(struct smbd_connection *info, struct smb_rqst *rqst) {return -1; }
+static inline int smbd_send(struct TCP_Server_Info *server, struct smb_rqst *rqst) {return -1; }
#endif
#endif
diff --git a/fs/cifs/trace.h b/fs/cifs/trace.h
index 61e74d455d90..67e413f6ee4d 100644
--- a/fs/cifs/trace.h
+++ b/fs/cifs/trace.h
@@ -378,7 +378,7 @@ DEFINE_EVENT(smb3_open_err_class, smb3_##name, \
TP_ARGS(xid, tid, sesid, create_options, desired_access, rc))
DEFINE_SMB3_OPEN_ERR_EVENT(open_err);
-
+DEFINE_SMB3_OPEN_ERR_EVENT(posix_mkdir_err);
DECLARE_EVENT_CLASS(smb3_open_done_class,
TP_PROTO(unsigned int xid,
@@ -420,6 +420,7 @@ DEFINE_EVENT(smb3_open_done_class, smb3_##name, \
TP_ARGS(xid, fid, tid, sesid, create_options, desired_access))
DEFINE_SMB3_OPEN_DONE_EVENT(open_done);
+DEFINE_SMB3_OPEN_DONE_EVENT(posix_mkdir_done);
#endif /* _CIFS_TRACE_H */
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 1f1a68f89110..a341ec839c83 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -61,6 +61,7 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
temp = mempool_alloc(cifs_mid_poolp, GFP_NOFS);
memset(temp, 0, sizeof(struct mid_q_entry));
+ kref_init(&temp->refcount);
temp->mid = get_mid(smb_buffer);
temp->pid = current->pid;
temp->command = cpu_to_le16(smb_buffer->Command);
@@ -82,6 +83,21 @@ AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server)
return temp;
}
+static void _cifs_mid_q_entry_release(struct kref *refcount)
+{
+ struct mid_q_entry *mid = container_of(refcount, struct mid_q_entry,
+ refcount);
+
+ mempool_free(mid, cifs_mid_poolp);
+}
+
+void cifs_mid_q_entry_release(struct mid_q_entry *midEntry)
+{
+ spin_lock(&GlobalMid_Lock);
+ kref_put(&midEntry->refcount, _cifs_mid_q_entry_release);
+ spin_unlock(&GlobalMid_Lock);
+}
+
void
DeleteMidQEntry(struct mid_q_entry *midEntry)
{
@@ -110,7 +126,7 @@ DeleteMidQEntry(struct mid_q_entry *midEntry)
}
}
#endif
- mempool_free(midEntry, cifs_mid_poolp);
+ cifs_mid_q_entry_release(midEntry);
}
void
@@ -201,15 +217,25 @@ smb_send_kvec(struct TCP_Server_Info *server, struct msghdr *smb_msg,
return 0;
}
-static unsigned long
-rqst_len(struct smb_rqst *rqst)
+unsigned long
+smb_rqst_len(struct TCP_Server_Info *server, struct smb_rqst *rqst)
{
unsigned int i;
- struct kvec *iov = rqst->rq_iov;
+ struct kvec *iov;
+ int nvec;
unsigned long buflen = 0;
+ if (server->vals->header_preamble_size == 0 &&
+ rqst->rq_nvec >= 2 && rqst->rq_iov[0].iov_len == 4) {
+ iov = &rqst->rq_iov[1];
+ nvec = rqst->rq_nvec - 1;
+ } else {
+ iov = rqst->rq_iov;
+ nvec = rqst->rq_nvec;
+ }
+
/* total up iov array first */
- for (i = 0; i < rqst->rq_nvec; i++)
+ for (i = 0; i < nvec; i++)
buflen += iov[i].iov_len;
/*
@@ -236,70 +262,88 @@ rqst_len(struct smb_rqst *rqst)
}
static int
-__smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
+__smb_send_rqst(struct TCP_Server_Info *server, int num_rqst,
+ struct smb_rqst *rqst)
{
- int rc;
- struct kvec *iov = rqst->rq_iov;
- int n_vec = rqst->rq_nvec;
- unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
- unsigned long send_length;
- unsigned int i;
+ int rc = 0;
+ struct kvec *iov;
+ int n_vec;
+ unsigned int send_length = 0;
+ unsigned int i, j;
size_t total_len = 0, sent, size;
struct socket *ssocket = server->ssocket;
struct msghdr smb_msg;
int val = 1;
+ __be32 rfc1002_marker;
+
if (cifs_rdma_enabled(server) && server->smbd_conn) {
- rc = smbd_send(server->smbd_conn, rqst);
+ rc = smbd_send(server, rqst);
goto smbd_done;
}
if (ssocket == NULL)
return -ENOTSOCK;
- /* sanity check send length */
- send_length = rqst_len(rqst);
- if (send_length != smb_buf_length + 4) {
- WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
- send_length, smb_buf_length);
- return -EIO;
- }
-
- if (n_vec < 2)
- return -EIO;
-
- cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
- dump_smb(iov[0].iov_base, iov[0].iov_len);
- dump_smb(iov[1].iov_base, iov[1].iov_len);
-
/* cork the socket */
kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
(char *)&val, sizeof(val));
- size = 0;
- for (i = 0; i < n_vec; i++)
- size += iov[i].iov_len;
+ for (j = 0; j < num_rqst; j++)
+ send_length += smb_rqst_len(server, &rqst[j]);
+ rfc1002_marker = cpu_to_be32(send_length);
- iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, iov, n_vec, size);
+ /* Generate a rfc1002 marker for SMB2+ */
+ if (server->vals->header_preamble_size == 0) {
+ struct kvec hiov = {
+ .iov_base = &rfc1002_marker,
+ .iov_len = 4
+ };
+ iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC, &hiov,
+ 1, 4);
+ rc = smb_send_kvec(server, &smb_msg, &sent);
+ if (rc < 0)
+ goto uncork;
- rc = smb_send_kvec(server, &smb_msg, &sent);
- if (rc < 0)
- goto uncork;
+ total_len += sent;
+ send_length += 4;
+ }
- total_len += sent;
+ cifs_dbg(FYI, "Sending smb: smb_len=%u\n", send_length);
- /* now walk the page array and send each page in it */
- for (i = 0; i < rqst->rq_npages; i++) {
- struct bio_vec bvec;
+ for (j = 0; j < num_rqst; j++) {
+ iov = rqst[j].rq_iov;
+ n_vec = rqst[j].rq_nvec;
+
+ size = 0;
+ for (i = 0; i < n_vec; i++) {
+ dump_smb(iov[i].iov_base, iov[i].iov_len);
+ size += iov[i].iov_len;
+ }
- bvec.bv_page = rqst->rq_pages[i];
- rqst_page_get_length(rqst, i, &bvec.bv_len, &bvec.bv_offset);
+ iov_iter_kvec(&smb_msg.msg_iter, WRITE | ITER_KVEC,
+ iov, n_vec, size);
- iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
- &bvec, 1, bvec.bv_len);
rc = smb_send_kvec(server, &smb_msg, &sent);
if (rc < 0)
- break;
+ goto uncork;
total_len += sent;
+
+ /* now walk the page array and send each page in it */
+ for (i = 0; i < rqst[j].rq_npages; i++) {
+ struct bio_vec bvec;
+
+ bvec.bv_page = rqst[j].rq_pages[i];
+ rqst_page_get_length(&rqst[j], i, &bvec.bv_len,
+ &bvec.bv_offset);
+
+ iov_iter_bvec(&smb_msg.msg_iter, WRITE | ITER_BVEC,
+ &bvec, 1, bvec.bv_len);
+ rc = smb_send_kvec(server, &smb_msg, &sent);
+ if (rc < 0)
+ break;
+
+ total_len += sent;
+ }
}
uncork:
@@ -308,9 +352,9 @@ uncork:
kernel_setsockopt(ssocket, SOL_TCP, TCP_CORK,
(char *)&val, sizeof(val));
- if ((total_len > 0) && (total_len != smb_buf_length + 4)) {
+ if ((total_len > 0) && (total_len != send_length)) {
cifs_dbg(FYI, "partial send (wanted=%u sent=%zu): terminating session\n",
- smb_buf_length + 4, total_len);
+ send_length, total_len);
/*
* If we have only sent part of an SMB then the next SMB could
* be taken as the remainder of this one. We need to kill the
@@ -335,7 +379,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
int rc;
if (!(flags & CIFS_TRANSFORM_REQ))
- return __smb_send_rqst(server, rqst);
+ return __smb_send_rqst(server, 1, rqst);
if (!server->ops->init_transform_rq ||
!server->ops->free_transform_rq) {
@@ -347,7 +391,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst, int flags)
if (rc)
return rc;
- rc = __smb_send_rqst(server, &cur_rqst);
+ rc = __smb_send_rqst(server, 1, &cur_rqst);
server->ops->free_transform_rq(&cur_rqst);
return rc;
}
@@ -365,7 +409,7 @@ smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
iov[1].iov_base = (char *)smb_buffer + 4;
iov[1].iov_len = smb_buf_length;
- return __smb_send_rqst(server, &rqst);
+ return __smb_send_rqst(server, 1, &rqst);
}
static int
@@ -730,7 +774,6 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
* to the same server. We may make this configurable later or
* use ses->maxReq.
*/
-
rc = wait_for_free_request(ses->server, timeout, optype);
if (rc)
return rc;
@@ -766,8 +809,8 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
#ifdef CONFIG_CIFS_SMB311
if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP))
- smb311_update_preauth_hash(ses, rqst->rq_iov+1,
- rqst->rq_nvec-1);
+ smb311_update_preauth_hash(ses, rqst->rq_iov,
+ rqst->rq_nvec);
#endif
if (timeout == CIFS_ASYNC_OP)
@@ -812,8 +855,8 @@ cifs_send_recv(const unsigned int xid, struct cifs_ses *ses,
#ifdef CONFIG_CIFS_SMB311
if ((ses->status == CifsNew) || (optype & CIFS_NEG_OP)) {
struct kvec iov = {
- .iov_base = buf,
- .iov_len = midQ->resp_buf_size
+ .iov_base = resp_iov->iov_base,
+ .iov_len = resp_iov->iov_len
};
smb311_update_preauth_hash(ses, &iov, 1);
}
@@ -872,49 +915,6 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
return rc;
}
-/* Like SendReceive2 but iov[0] does not contain an rfc1002 header */
-int
-smb2_send_recv(const unsigned int xid, struct cifs_ses *ses,
- struct kvec *iov, int n_vec, int *resp_buf_type /* ret */,
- const int flags, struct kvec *resp_iov)
-{
- struct smb_rqst rqst;
- struct kvec s_iov[CIFS_MAX_IOV_SIZE], *new_iov;
- int rc;
- int i;
- __u32 count;
- __be32 rfc1002_marker;
-
- if (n_vec + 1 > CIFS_MAX_IOV_SIZE) {
- new_iov = kmalloc_array(n_vec + 1, sizeof(struct kvec),
- GFP_KERNEL);
- if (!new_iov)
- return -ENOMEM;
- } else
- new_iov = s_iov;
-
- /* 1st iov is an RFC1002 Session Message length */
- memcpy(new_iov + 1, iov, (sizeof(struct kvec) * n_vec));
-
- count = 0;
- for (i = 1; i < n_vec + 1; i++)
- count += new_iov[i].iov_len;
-
- rfc1002_marker = cpu_to_be32(count);
-
- new_iov[0].iov_base = &rfc1002_marker;
- new_iov[0].iov_len = 4;
-
- memset(&rqst, 0, sizeof(struct smb_rqst));
- rqst.rq_iov = new_iov;
- rqst.rq_nvec = n_vec + 1;
-
- rc = cifs_send_recv(xid, ses, &rqst, resp_buf_type, flags, resp_iov);
- if (n_vec + 1 > CIFS_MAX_IOV_SIZE)
- kfree(new_iov);
- return rc;
-}
-
int
SendReceive(const unsigned int xid, struct cifs_ses *ses,
struct smb_hdr *in_buf, struct smb_hdr *out_buf,
diff --git a/fs/eventfd.c b/fs/eventfd.c
index ceb1031f1cac..08d3bd602f73 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -101,20 +101,14 @@ static int eventfd_release(struct inode *inode, struct file *file)
return 0;
}
-static struct wait_queue_head *
-eventfd_get_poll_head(struct file *file, __poll_t events)
-{
- struct eventfd_ctx *ctx = file->private_data;
-
- return &ctx->wqh;
-}
-
-static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask)
+static __poll_t eventfd_poll(struct file *file, poll_table *wait)
{
struct eventfd_ctx *ctx = file->private_data;
__poll_t events = 0;
u64 count;
+ poll_wait(file, &ctx->wqh, wait);
+
/*
* All writes to ctx->count occur within ctx->wqh.lock. This read
* can be done outside ctx->wqh.lock because we know that poll_wait
@@ -156,11 +150,11 @@ static __poll_t eventfd_poll_mask(struct file *file, __poll_t eventmask)
count = READ_ONCE(ctx->count);
if (count > 0)
- events |= (EPOLLIN & eventmask);
+ events |= EPOLLIN;
if (count == ULLONG_MAX)
events |= EPOLLERR;
if (ULLONG_MAX - 1 > count)
- events |= (EPOLLOUT & eventmask);
+ events |= EPOLLOUT;
return events;
}
@@ -311,8 +305,7 @@ static const struct file_operations eventfd_fops = {
.show_fdinfo = eventfd_show_fdinfo,
#endif
.release = eventfd_release,
- .get_poll_head = eventfd_get_poll_head,
- .poll_mask = eventfd_poll_mask,
+ .poll = eventfd_poll,
.read = eventfd_read,
.write = eventfd_write,
.llseek = noop_llseek,
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index ea4436f409fb..67db22fe99c5 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -922,18 +922,14 @@ static __poll_t ep_read_events_proc(struct eventpoll *ep, struct list_head *head
return 0;
}
-static struct wait_queue_head *ep_eventpoll_get_poll_head(struct file *file,
- __poll_t eventmask)
-{
- struct eventpoll *ep = file->private_data;
- return &ep->poll_wait;
-}
-
-static __poll_t ep_eventpoll_poll_mask(struct file *file, __poll_t eventmask)
+static __poll_t ep_eventpoll_poll(struct file *file, poll_table *wait)
{
struct eventpoll *ep = file->private_data;
int depth = 0;
+ /* Insert inside our poll wait queue */
+ poll_wait(file, &ep->poll_wait, wait);
+
/*
* Proceed to find out if wanted events are really available inside
* the ready list.
@@ -972,8 +968,7 @@ static const struct file_operations eventpoll_fops = {
.show_fdinfo = ep_show_fdinfo,
#endif
.release = ep_eventpoll_release,
- .get_poll_head = ep_eventpoll_get_poll_head,
- .poll_mask = ep_eventpoll_poll_mask,
+ .poll = ep_eventpoll_poll,
.llseek = noop_llseek,
};
diff --git a/fs/exec.c b/fs/exec.c
index 2d4e0075bd24..bdd0eacefdf5 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -290,15 +290,15 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
struct vm_area_struct *vma = NULL;
struct mm_struct *mm = bprm->mm;
- bprm->vma = vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ bprm->vma = vma = vm_area_alloc(mm);
if (!vma)
return -ENOMEM;
+ vma_set_anonymous(vma);
if (down_write_killable(&mm->mmap_sem)) {
err = -EINTR;
goto err_free;
}
- vma->vm_mm = mm;
/*
* Place the stack at the largest stack address the architecture
@@ -311,7 +311,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
vma->vm_start = vma->vm_end - PAGE_SIZE;
vma->vm_flags = VM_SOFTDIRTY | VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
- INIT_LIST_HEAD(&vma->anon_vma_chain);
err = insert_vm_struct(mm, vma);
if (err)
@@ -326,7 +325,7 @@ err:
up_write(&mm->mmap_sem);
err_free:
bprm->vma = NULL;
- kmem_cache_free(vm_area_cachep, vma);
+ vm_area_free(vma);
return err;
}
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index cc40802ddfa8..00e759f05161 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -748,7 +748,6 @@ extern void ext2_free_blocks (struct inode *, unsigned long,
unsigned long);
extern unsigned long ext2_count_free_blocks (struct super_block *);
extern unsigned long ext2_count_dirs (struct super_block *);
-extern void ext2_check_blocks_bitmap (struct super_block *);
extern struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
unsigned int block_group,
struct buffer_head ** bh);
@@ -771,7 +770,6 @@ extern void ext2_set_link(struct inode *, struct ext2_dir_entry_2 *, struct page
extern struct inode * ext2_new_inode (struct inode *, umode_t, const struct qstr *);
extern void ext2_free_inode (struct inode *);
extern unsigned long ext2_count_free_inodes (struct super_block *);
-extern void ext2_check_inodes_bitmap (struct super_block *);
extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
/* inode.c */
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 25ab1274090f..8ff53f8da3bc 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -557,6 +557,9 @@ static int parse_options(char *options, struct super_block *sb,
set_opt (opts->s_mount_opt, NO_UID32);
break;
case Opt_nocheck:
+ ext2_msg(sb, KERN_WARNING,
+ "Option nocheck/check=none is deprecated and"
+ " will be removed in June 2020.");
clear_opt (opts->s_mount_opt, CHECK);
break;
case Opt_debug:
@@ -1335,9 +1338,6 @@ static int ext2_remount (struct super_block * sb, int * flags, char * data)
new_opts.s_resgid = sbi->s_resgid;
spin_unlock(&sbi->s_lock);
- /*
- * Allow the "check" option to be passed as a remount option.
- */
if (!parse_options(data, sb, &new_opts))
return -EINVAL;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index b00481c475cb..aa52d87985aa 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -184,7 +184,6 @@ static int ext4_init_block_bitmap(struct super_block *sb,
unsigned int bit, bit_max;
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t start, tmp;
- int flex_bg = 0;
J_ASSERT_BH(bh, buffer_locked(bh));
@@ -207,22 +206,19 @@ static int ext4_init_block_bitmap(struct super_block *sb,
start = ext4_group_first_block_no(sb, block_group);
- if (ext4_has_feature_flex_bg(sb))
- flex_bg = 1;
-
/* Set bits for block and inode bitmaps, and inode table */
tmp = ext4_block_bitmap(sb, gdp);
- if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
+ if (ext4_block_in_group(sb, tmp, block_group))
ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
tmp = ext4_inode_bitmap(sb, gdp);
- if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
+ if (ext4_block_in_group(sb, tmp, block_group))
ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
tmp = ext4_inode_table(sb, gdp);
for (; tmp < ext4_inode_table(sb, gdp) +
sbi->s_itb_per_group; tmp++) {
- if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
+ if (ext4_block_in_group(sb, tmp, block_group))
ext4_set_bit(EXT4_B2C(sbi, tmp - start), bh->b_data);
}
@@ -372,6 +368,8 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
return -EFSCORRUPTED;
ext4_lock_group(sb, block_group);
+ if (buffer_verified(bh))
+ goto verified;
if (unlikely(!ext4_block_bitmap_csum_verify(sb, block_group,
desc, bh))) {
ext4_unlock_group(sb, block_group);
@@ -390,6 +388,7 @@ static int ext4_validate_block_bitmap(struct super_block *sb,
return -EFSCORRUPTED;
}
set_buffer_verified(bh);
+verified:
ext4_unlock_group(sb, block_group);
return 0;
}
@@ -442,7 +441,16 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
goto verify;
}
ext4_lock_group(sb, block_group);
- if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+ if (ext4_has_group_desc_csum(sb) &&
+ (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
+ if (block_group == 0) {
+ ext4_unlock_group(sb, block_group);
+ unlock_buffer(bh);
+ ext4_error(sb, "Block bitmap for bg 0 marked "
+ "uninitialized");
+ err = -EFSCORRUPTED;
+ goto out;
+ }
err = ext4_init_block_bitmap(sb, bh, block_group, desc);
set_bitmap_uptodate(bh);
set_buffer_uptodate(bh);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0b127853c584..7c7123f265c2 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1114,6 +1114,7 @@ struct ext4_inode_info {
#define EXT4_MOUNT_DIOREAD_NOLOCK 0x400000 /* Enable support for dio read nolocking */
#define EXT4_MOUNT_JOURNAL_CHECKSUM 0x800000 /* Journal checksums */
#define EXT4_MOUNT_JOURNAL_ASYNC_COMMIT 0x1000000 /* Journal Async Commit */
+#define EXT4_MOUNT_WARN_ON_ERROR 0x2000000 /* Trigger WARN_ON on error */
#define EXT4_MOUNT_DELALLOC 0x8000000 /* Delalloc support */
#define EXT4_MOUNT_DATA_ERR_ABORT 0x10000000 /* Abort on file data write */
#define EXT4_MOUNT_BLOCK_VALIDITY 0x20000000 /* Block validity checking */
@@ -1507,11 +1508,6 @@ static inline struct ext4_inode_info *EXT4_I(struct inode *inode)
static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
{
return ino == EXT4_ROOT_INO ||
- ino == EXT4_USR_QUOTA_INO ||
- ino == EXT4_GRP_QUOTA_INO ||
- ino == EXT4_BOOT_LOADER_INO ||
- ino == EXT4_JOURNAL_INO ||
- ino == EXT4_RESIZE_INO ||
(ino >= EXT4_FIRST_INO(sb) &&
ino <= le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count));
}
@@ -3018,9 +3014,6 @@ extern int ext4_inline_data_fiemap(struct inode *inode,
struct iomap;
extern int ext4_inline_data_iomap(struct inode *inode, struct iomap *iomap);
-extern int ext4_try_to_evict_inline_data(handle_t *handle,
- struct inode *inode,
- int needed);
extern int ext4_inline_data_truncate(struct inode *inode, int *has_inline);
extern int ext4_convert_inline_data(struct inode *inode);
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 98fb0c119c68..adf6668b596f 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -91,6 +91,7 @@ struct ext4_extent_header {
};
#define EXT4_EXT_MAGIC cpu_to_le16(0xf30a)
+#define EXT4_MAX_EXTENT_DEPTH 5
#define EXT4_EXTENT_TAIL_OFFSET(hdr) \
(sizeof(struct ext4_extent_header) + \
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0057fe3f248d..8ce6fd5b10dd 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -869,6 +869,12 @@ ext4_find_extent(struct inode *inode, ext4_lblk_t block,
eh = ext_inode_hdr(inode);
depth = ext_depth(inode);
+ if (depth < 0 || depth > EXT4_MAX_EXTENT_DEPTH) {
+ EXT4_ERROR_INODE(inode, "inode has invalid extent depth: %d",
+ depth);
+ ret = -EFSCORRUPTED;
+ goto err;
+ }
if (path) {
ext4_ext_drop_refs(path);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index f525f909b559..f336cbc6e932 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -90,6 +90,8 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
return -EFSCORRUPTED;
ext4_lock_group(sb, block_group);
+ if (buffer_verified(bh))
+ goto verified;
blk = ext4_inode_bitmap(sb, desc);
if (!ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
EXT4_INODES_PER_GROUP(sb) / 8)) {
@@ -101,6 +103,7 @@ static int ext4_validate_inode_bitmap(struct super_block *sb,
return -EFSBADCRC;
}
set_buffer_verified(bh);
+verified:
ext4_unlock_group(sb, block_group);
return 0;
}
@@ -150,7 +153,16 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
}
ext4_lock_group(sb, block_group);
- if (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT)) {
+ if (ext4_has_group_desc_csum(sb) &&
+ (desc->bg_flags & cpu_to_le16(EXT4_BG_INODE_UNINIT))) {
+ if (block_group == 0) {
+ ext4_unlock_group(sb, block_group);
+ unlock_buffer(bh);
+ ext4_error(sb, "Inode bitmap for bg 0 marked "
+ "uninitialized");
+ err = -EFSCORRUPTED;
+ goto out;
+ }
memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb),
sb->s_blocksize * 8, bh->b_data);
@@ -994,7 +1006,8 @@ got:
/* recheck and clear flag under lock if we still need to */
ext4_lock_group(sb, group);
- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+ if (ext4_has_group_desc_csum(sb) &&
+ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
ext4_free_group_clusters_set(sb, gdp,
ext4_free_clusters_after_init(sb, group, gdp));
@@ -1375,7 +1388,10 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
ext4_itable_unused_count(sb, gdp)),
sbi->s_inodes_per_block);
- if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group)) {
+ if ((used_blks < 0) || (used_blks > sbi->s_itb_per_group) ||
+ ((group == 0) && ((EXT4_INODES_PER_GROUP(sb) -
+ ext4_itable_unused_count(sb, gdp)) <
+ EXT4_FIRST_INO(sb)))) {
ext4_error(sb, "Something is wrong with group %u: "
"used itable blocks: %d; "
"itable unused count: %u",
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 285ed1588730..3543fe80a3c4 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -437,6 +437,7 @@ static int ext4_destroy_inline_data_nolock(handle_t *handle,
memset((void *)ext4_raw_inode(&is.iloc)->i_block,
0, EXT4_MIN_INLINE_DATA_SIZE);
+ memset(ei->i_data, 0, EXT4_MIN_INLINE_DATA_SIZE);
if (ext4_has_feature_extents(inode->i_sb)) {
if (S_ISDIR(inode->i_mode) ||
@@ -681,6 +682,10 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
goto convert;
}
+ ret = ext4_journal_get_write_access(handle, iloc.bh);
+ if (ret)
+ goto out;
+
flags |= AOP_FLAG_NOFS;
page = grab_cache_page_write_begin(mapping, 0, flags);
@@ -709,7 +714,7 @@ int ext4_try_to_write_inline_data(struct address_space *mapping,
out_up_read:
up_read(&EXT4_I(inode)->xattr_sem);
out:
- if (handle)
+ if (handle && (ret != 1))
ext4_journal_stop(handle);
brelse(iloc.bh);
return ret;
@@ -751,6 +756,7 @@ int ext4_write_inline_data_end(struct inode *inode, loff_t pos, unsigned len,
ext4_write_unlock_xattr(inode, &no_expand);
brelse(iloc.bh);
+ mark_inode_dirty(inode);
out:
return copied;
}
@@ -886,18 +892,17 @@ retry_journal:
flags |= AOP_FLAG_NOFS;
if (ret == -ENOSPC) {
+ ext4_journal_stop(handle);
ret = ext4_da_convert_inline_data_to_extent(mapping,
inode,
flags,
fsdata);
- ext4_journal_stop(handle);
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_journal;
goto out;
}
-
page = grab_cache_page_write_begin(mapping, 0, flags);
if (!page) {
ret = -ENOMEM;
@@ -915,6 +920,9 @@ retry_journal:
if (ret < 0)
goto out_release_page;
}
+ ret = ext4_journal_get_write_access(handle, iloc.bh);
+ if (ret)
+ goto out_release_page;
up_read(&EXT4_I(inode)->xattr_sem);
*pagep = page;
@@ -935,7 +943,6 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
unsigned len, unsigned copied,
struct page *page)
{
- int i_size_changed = 0;
int ret;
ret = ext4_write_inline_data_end(inode, pos, len, copied, page);
@@ -953,10 +960,8 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
* But it's important to update i_size while still holding page lock:
* page writeout could otherwise come in and zero beyond i_size.
*/
- if (pos+copied > inode->i_size) {
+ if (pos+copied > inode->i_size)
i_size_write(inode, pos+copied);
- i_size_changed = 1;
- }
unlock_page(page);
put_page(page);
@@ -966,8 +971,7 @@ int ext4_da_write_inline_data_end(struct inode *inode, loff_t pos,
* ordering of page lock and transaction start for journaling
* filesystems.
*/
- if (i_size_changed)
- mark_inode_dirty(inode);
+ mark_inode_dirty(inode);
return copied;
}
@@ -1890,42 +1894,6 @@ out:
return (error < 0 ? error : 0);
}
-/*
- * Called during xattr set, and if we can sparse space 'needed',
- * just create the extent tree evict the data to the outer block.
- *
- * We use jbd2 instead of page cache to move data to the 1st block
- * so that the whole transaction can be committed as a whole and
- * the data isn't lost because of the delayed page cache write.
- */
-int ext4_try_to_evict_inline_data(handle_t *handle,
- struct inode *inode,
- int needed)
-{
- int error;
- struct ext4_xattr_entry *entry;
- struct ext4_inode *raw_inode;
- struct ext4_iloc iloc;
-
- error = ext4_get_inode_loc(inode, &iloc);
- if (error)
- return error;
-
- raw_inode = ext4_raw_inode(&iloc);
- entry = (struct ext4_xattr_entry *)((void *)raw_inode +
- EXT4_I(inode)->i_inline_off);
- if (EXT4_XATTR_LEN(entry->e_name_len) +
- EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)) < needed) {
- error = -ENOSPC;
- goto out;
- }
-
- error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
-out:
- brelse(iloc.bh);
- return error;
-}
-
int ext4_inline_data_truncate(struct inode *inode, int *has_inline)
{
handle_t *handle;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 2ea07efbe016..4efe77286ecd 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -402,9 +402,9 @@ static int __check_block_validity(struct inode *inode, const char *func,
if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
map->m_len)) {
ext4_error_inode(inode, func, line, map->m_pblk,
- "lblock %lu mapped to illegal pblock "
+ "lblock %lu mapped to illegal pblock %llu "
"(length %d)", (unsigned long) map->m_lblk,
- map->m_len);
+ map->m_pblk, map->m_len);
return -EFSCORRUPTED;
}
return 0;
@@ -1389,9 +1389,10 @@ static int ext4_write_end(struct file *file,
loff_t old_size = inode->i_size;
int ret = 0, ret2;
int i_size_changed = 0;
+ int inline_data = ext4_has_inline_data(inode);
trace_ext4_write_end(inode, pos, len, copied);
- if (ext4_has_inline_data(inode)) {
+ if (inline_data) {
ret = ext4_write_inline_data_end(inode, pos, len,
copied, page);
if (ret < 0) {
@@ -1419,7 +1420,7 @@ static int ext4_write_end(struct file *file,
* ordering of page lock and transaction start for journaling
* filesystems.
*/
- if (i_size_changed)
+ if (i_size_changed || inline_data)
ext4_mark_inode_dirty(handle, inode);
if (pos + len > inode->i_size && ext4_can_truncate(inode))
@@ -1493,6 +1494,7 @@ static int ext4_journalled_write_end(struct file *file,
int partial = 0;
unsigned from, to;
int size_changed = 0;
+ int inline_data = ext4_has_inline_data(inode);
trace_ext4_journalled_write_end(inode, pos, len, copied);
from = pos & (PAGE_SIZE - 1);
@@ -1500,7 +1502,7 @@ static int ext4_journalled_write_end(struct file *file,
BUG_ON(!ext4_handle_valid(handle));
- if (ext4_has_inline_data(inode)) {
+ if (inline_data) {
ret = ext4_write_inline_data_end(inode, pos, len,
copied, page);
if (ret < 0) {
@@ -1531,7 +1533,7 @@ static int ext4_journalled_write_end(struct file *file,
if (old_size < pos)
pagecache_isize_extended(inode, old_size, pos);
- if (size_changed) {
+ if (size_changed || inline_data) {
ret2 = ext4_mark_inode_dirty(handle, inode);
if (!ret)
ret = ret2;
@@ -2028,11 +2030,7 @@ static int __ext4_journalled_writepage(struct page *page,
}
if (inline_data) {
- BUFFER_TRACE(inode_bh, "get write access");
- ret = ext4_journal_get_write_access(handle, inode_bh);
-
- err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
-
+ ret = ext4_mark_inode_dirty(handle, inode);
} else {
ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
do_journal_get_write_access);
@@ -4506,7 +4504,8 @@ static int __ext4_get_inode_loc(struct inode *inode,
int inodes_per_block, inode_offset;
iloc->bh = NULL;
- if (!ext4_valid_inum(sb, inode->i_ino))
+ if (inode->i_ino < EXT4_ROOT_INO ||
+ inode->i_ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
return -EFSCORRUPTED;
iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 6eae2b91aafa..f7ab34088162 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2423,7 +2423,8 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
* initialize bb_free to be able to skip
* empty groups without initialization
*/
- if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+ if (ext4_has_group_desc_csum(sb) &&
+ (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
meta_group_info[i]->bb_free =
ext4_free_clusters_after_init(sb, group, desc);
} else {
@@ -2989,7 +2990,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
#endif
ext4_set_bits(bitmap_bh->b_data, ac->ac_b_ex.fe_start,
ac->ac_b_ex.fe_len);
- if (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
+ if (ext4_has_group_desc_csum(sb) &&
+ (gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT))) {
gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
ext4_free_group_clusters_set(sb, gdp,
ext4_free_clusters_after_init(sb,
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 27b9a76a0dfa..638ad4743477 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -186,11 +186,8 @@ static int kmmpd(void *data)
goto exit_thread;
}
- if (sb_rdonly(sb)) {
- ext4_warning(sb, "kmmpd being stopped since filesystem "
- "has been remounted as readonly.");
- goto exit_thread;
- }
+ if (sb_rdonly(sb))
+ break;
diff = jiffies - last_update_time;
if (diff < mmp_update_interval * HZ)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 0c4c2201b3aa..b7f7922061be 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -405,6 +405,9 @@ static void ext4_journal_commit_callback(journal_t *journal, transaction_t *txn)
static void ext4_handle_error(struct super_block *sb)
{
+ if (test_opt(sb, WARN_ON_ERROR))
+ WARN_ON_ONCE(1);
+
if (sb_rdonly(sb))
return;
@@ -740,6 +743,9 @@ __acquires(bitlock)
va_end(args);
}
+ if (test_opt(sb, WARN_ON_ERROR))
+ WARN_ON_ONCE(1);
+
if (test_opt(sb, ERRORS_CONT)) {
ext4_commit_super(sb, 0);
return;
@@ -1371,7 +1377,8 @@ enum {
Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
Opt_noquota, Opt_barrier, Opt_nobarrier, Opt_err,
Opt_usrquota, Opt_grpquota, Opt_prjquota, Opt_i_version, Opt_dax,
- Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_mblk_io_submit,
+ Opt_stripe, Opt_delalloc, Opt_nodelalloc, Opt_warn_on_error,
+ Opt_nowarn_on_error, Opt_mblk_io_submit,
Opt_lazytime, Opt_nolazytime, Opt_debug_want_extra_isize,
Opt_nomblk_io_submit, Opt_block_validity, Opt_noblock_validity,
Opt_inode_readahead_blks, Opt_journal_ioprio,
@@ -1438,6 +1445,8 @@ static const match_table_t tokens = {
{Opt_dax, "dax"},
{Opt_stripe, "stripe=%u"},
{Opt_delalloc, "delalloc"},
+ {Opt_warn_on_error, "warn_on_error"},
+ {Opt_nowarn_on_error, "nowarn_on_error"},
{Opt_lazytime, "lazytime"},
{Opt_nolazytime, "nolazytime"},
{Opt_debug_want_extra_isize, "debug_want_extra_isize=%u"},
@@ -1602,6 +1611,8 @@ static const struct mount_opts {
MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
{Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
MOPT_EXT4_ONLY | MOPT_CLEAR},
+ {Opt_warn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_SET},
+ {Opt_nowarn_on_error, EXT4_MOUNT_WARN_ON_ERROR, MOPT_CLEAR},
{Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
MOPT_EXT4_ONLY | MOPT_CLEAR},
{Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
@@ -2331,6 +2342,7 @@ static int ext4_check_descriptors(struct super_block *sb,
struct ext4_sb_info *sbi = EXT4_SB(sb);
ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
ext4_fsblk_t last_block;
+ ext4_fsblk_t last_bg_block = sb_block + ext4_bg_num_gdb(sb, 0);
ext4_fsblk_t block_bitmap;
ext4_fsblk_t inode_bitmap;
ext4_fsblk_t inode_table;
@@ -2363,6 +2375,14 @@ static int ext4_check_descriptors(struct super_block *sb,
if (!sb_rdonly(sb))
return 0;
}
+ if (block_bitmap >= sb_block + 1 &&
+ block_bitmap <= last_bg_block) {
+ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ "Block bitmap for group %u overlaps "
+ "block group descriptors", i);
+ if (!sb_rdonly(sb))
+ return 0;
+ }
if (block_bitmap < first_block || block_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Block bitmap for group %u not in group "
@@ -2377,6 +2397,14 @@ static int ext4_check_descriptors(struct super_block *sb,
if (!sb_rdonly(sb))
return 0;
}
+ if (inode_bitmap >= sb_block + 1 &&
+ inode_bitmap <= last_bg_block) {
+ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ "Inode bitmap for group %u overlaps "
+ "block group descriptors", i);
+ if (!sb_rdonly(sb))
+ return 0;
+ }
if (inode_bitmap < first_block || inode_bitmap > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
"Inode bitmap for group %u not in group "
@@ -2391,6 +2419,14 @@ static int ext4_check_descriptors(struct super_block *sb,
if (!sb_rdonly(sb))
return 0;
}
+ if (inode_table >= sb_block + 1 &&
+ inode_table <= last_bg_block) {
+ ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
+ "Inode table for group %u overlaps "
+ "block group descriptors", i);
+ if (!sb_rdonly(sb))
+ return 0;
+ }
if (inode_table < first_block ||
inode_table + sbi->s_itb_per_group - 1 > last_block) {
ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
@@ -3097,6 +3133,9 @@ static ext4_group_t ext4_has_uninit_itable(struct super_block *sb)
ext4_group_t group, ngroups = EXT4_SB(sb)->s_groups_count;
struct ext4_group_desc *gdp = NULL;
+ if (!ext4_has_group_desc_csum(sb))
+ return ngroups;
+
for (group = 0; group < ngroups; group++) {
gdp = ext4_get_group_desc(sb, group, NULL);
if (!gdp)
@@ -3742,6 +3781,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
le32_to_cpu(es->s_log_block_size));
goto failed_mount;
}
+ if (le32_to_cpu(es->s_log_cluster_size) >
+ (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
+ ext4_msg(sb, KERN_ERR,
+ "Invalid log cluster size: %u",
+ le32_to_cpu(es->s_log_cluster_size));
+ goto failed_mount;
+ }
if (le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) > (blocksize / 4)) {
ext4_msg(sb, KERN_ERR,
@@ -3806,6 +3852,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
} else {
sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
+ if (sbi->s_first_ino < EXT4_GOOD_OLD_FIRST_INO) {
+ ext4_msg(sb, KERN_ERR, "invalid first ino: %u",
+ sbi->s_first_ino);
+ goto failed_mount;
+ }
if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
(!is_power_of_2(sbi->s_inode_size)) ||
(sbi->s_inode_size > blocksize)) {
@@ -3882,13 +3933,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
"block size (%d)", clustersize, blocksize);
goto failed_mount;
}
- if (le32_to_cpu(es->s_log_cluster_size) >
- (EXT4_MAX_CLUSTER_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
- ext4_msg(sb, KERN_ERR,
- "Invalid log cluster size: %u",
- le32_to_cpu(es->s_log_cluster_size));
- goto failed_mount;
- }
sbi->s_cluster_bits = le32_to_cpu(es->s_log_cluster_size) -
le32_to_cpu(es->s_log_block_size);
sbi->s_clusters_per_group =
@@ -3909,10 +3953,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
} else {
if (clustersize != blocksize) {
- ext4_warning(sb, "fragment/cluster size (%d) != "
- "block size (%d)", clustersize,
- blocksize);
- clustersize = blocksize;
+ ext4_msg(sb, KERN_ERR,
+ "fragment/cluster size (%d) != "
+ "block size (%d)", clustersize, blocksize);
+ goto failed_mount;
}
if (sbi->s_blocks_per_group > blocksize * 8) {
ext4_msg(sb, KERN_ERR,
@@ -3966,6 +4010,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ext4_blocks_count(es));
goto failed_mount;
}
+ if ((es->s_first_data_block == 0) && (es->s_log_block_size == 0) &&
+ (sbi->s_cluster_ratio == 1)) {
+ ext4_msg(sb, KERN_WARNING, "bad geometry: first data "
+ "block is 0 with a 1k block and cluster size");
+ goto failed_mount;
+ }
+
blocks_count = (ext4_blocks_count(es) -
le32_to_cpu(es->s_first_data_block) +
EXT4_BLOCKS_PER_GROUP(sb) - 1);
@@ -4001,6 +4052,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ret = -ENOMEM;
goto failed_mount;
}
+ if (((u64)sbi->s_groups_count * sbi->s_inodes_per_group) !=
+ le32_to_cpu(es->s_inodes_count)) {
+ ext4_msg(sb, KERN_ERR, "inodes count not valid: %u vs %llu",
+ le32_to_cpu(es->s_inodes_count),
+ ((u64)sbi->s_groups_count * sbi->s_inodes_per_group));
+ ret = -EINVAL;
+ goto failed_mount;
+ }
bgl_lock_init(sbi->s_blockgroup_lock);
@@ -4020,14 +4079,13 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount2;
}
}
+ sbi->s_gdb_count = db_count;
if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
ret = -EFSCORRUPTED;
goto failed_mount2;
}
- sbi->s_gdb_count = db_count;
-
timer_setup(&sbi->s_err_report, print_daily_error_info, 0);
/* Register extent status tree shrinker */
@@ -4736,6 +4794,14 @@ static int ext4_commit_super(struct super_block *sb, int sync)
if (!sbh || block_device_ejected(sb))
return error;
+
+ /*
+ * The superblock bh should be mapped, but it might not be if the
+ * device was hot-removed. Not much we can do but fail the I/O.
+ */
+ if (!buffer_mapped(sbh))
+ return error;
+
/*
* If the file system is mounted read-only, don't update the
* superblock write time. This avoids updating the superblock
@@ -5140,6 +5206,8 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
if (sbi->s_journal)
ext4_mark_recovery_complete(sb, es);
+ if (sbi->s_mmp_tsk)
+ kthread_stop(sbi->s_mmp_tsk);
} else {
/* Make sure we can mount this feature set readwrite */
if (ext4_has_feature_readonly(sb) ||
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index fc4ced59c565..723df14f4084 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -230,12 +230,12 @@ __ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh,
{
int error = -EFSCORRUPTED;
- if (buffer_verified(bh))
- return 0;
-
if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
BHDR(bh)->h_blocks != cpu_to_le32(1))
goto errout;
+ if (buffer_verified(bh))
+ return 0;
+
error = -EFSBADCRC;
if (!ext4_xattr_block_csum_verify(inode, bh))
goto errout;
@@ -1560,7 +1560,7 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
handle_t *handle, struct inode *inode,
bool is_block)
{
- struct ext4_xattr_entry *last;
+ struct ext4_xattr_entry *last, *next;
struct ext4_xattr_entry *here = s->here;
size_t min_offs = s->end - s->base, name_len = strlen(i->name);
int in_inode = i->in_inode;
@@ -1595,7 +1595,13 @@ static int ext4_xattr_set_entry(struct ext4_xattr_info *i,
/* Compute min_offs and last. */
last = s->first;
- for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+ for (; !IS_LAST_ENTRY(last); last = next) {
+ next = EXT4_XATTR_NEXT(last);
+ if ((void *)next >= s->end) {
+ EXT4_ERROR_INODE(inode, "corrupted xattr entries");
+ ret = -EFSCORRUPTED;
+ goto out;
+ }
if (!last->e_value_inum && last->e_value_size) {
size_t offs = le16_to_cpu(last->e_value_offs);
if (offs < min_offs)
@@ -2206,23 +2212,8 @@ int ext4_xattr_ibody_inline_set(handle_t *handle, struct inode *inode,
if (EXT4_I(inode)->i_extra_isize == 0)
return -ENOSPC;
error = ext4_xattr_set_entry(i, s, handle, inode, false /* is_block */);
- if (error) {
- if (error == -ENOSPC &&
- ext4_has_inline_data(inode)) {
- error = ext4_try_to_evict_inline_data(handle, inode,
- EXT4_XATTR_LEN(strlen(i->name) +
- EXT4_XATTR_SIZE(i->value_len)));
- if (error)
- return error;
- error = ext4_xattr_ibody_find(inode, i, is);
- if (error)
- return error;
- error = ext4_xattr_set_entry(i, s, handle, inode,
- false /* is_block */);
- }
- if (error)
- return error;
- }
+ if (error)
+ return error;
header = IHDR(inode, ext4_raw_inode(&is->iloc));
if (!IS_LAST_ENTRY(s->first)) {
header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
@@ -2651,6 +2642,11 @@ static int ext4_xattr_make_inode_space(handle_t *handle, struct inode *inode,
last = IFIRST(header);
/* Find the entry best suited to be pushed into EA block */
for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+ /* never move system.data out of the inode */
+ if ((last->e_name_len == 4) &&
+ (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) &&
+ !memcmp(last->e_name, "data", 4))
+ continue;
total_size = EXT4_XATTR_LEN(last->e_name_len);
if (!last->e_value_inum)
total_size += EXT4_XATTR_SIZE(
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 065dc919a0ce..bfd589ea74c0 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -707,13 +707,21 @@ static void fat_set_state(struct super_block *sb,
brelse(bh);
}
+static void fat_reset_iocharset(struct fat_mount_options *opts)
+{
+ if (opts->iocharset != fat_default_iocharset) {
+ /* Note: opts->iocharset can be NULL here */
+ kfree(opts->iocharset);
+ opts->iocharset = fat_default_iocharset;
+ }
+}
+
static void delayed_free(struct rcu_head *p)
{
struct msdos_sb_info *sbi = container_of(p, struct msdos_sb_info, rcu);
unload_nls(sbi->nls_disk);
unload_nls(sbi->nls_io);
- if (sbi->options.iocharset != fat_default_iocharset)
- kfree(sbi->options.iocharset);
+ fat_reset_iocharset(&sbi->options);
kfree(sbi);
}
@@ -1132,7 +1140,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
opts->fs_fmask = opts->fs_dmask = current_umask();
opts->allow_utime = -1;
opts->codepage = fat_default_codepage;
- opts->iocharset = fat_default_iocharset;
+ fat_reset_iocharset(opts);
if (is_vfat) {
opts->shortname = VFAT_SFN_DISPLAY_WINNT|VFAT_SFN_CREATE_WIN95;
opts->rodir = 0;
@@ -1289,8 +1297,7 @@ static int parse_options(struct super_block *sb, char *options, int is_vfat,
/* vfat specific */
case Opt_charset:
- if (opts->iocharset != fat_default_iocharset)
- kfree(opts->iocharset);
+ fat_reset_iocharset(opts);
iocharset = match_strdup(&args[0]);
if (!iocharset)
return -ENOMEM;
@@ -1881,8 +1888,7 @@ out_fail:
iput(fat_inode);
unload_nls(sbi->nls_io);
unload_nls(sbi->nls_disk);
- if (sbi->options.iocharset != fat_default_iocharset)
- kfree(sbi->options.iocharset);
+ fat_reset_iocharset(&sbi->options);
sb->s_fs_info = NULL;
kfree(sbi);
return error;
diff --git a/fs/fscache/cache.c b/fs/fscache/cache.c
index c184c5a356ff..cdcb376ef8df 100644
--- a/fs/fscache/cache.c
+++ b/fs/fscache/cache.c
@@ -220,6 +220,7 @@ int fscache_add_cache(struct fscache_cache *cache,
{
struct fscache_cache_tag *tag;
+ ASSERTCMP(ifsdef->cookie, ==, &fscache_fsdef_index);
BUG_ON(!cache->ops);
BUG_ON(!ifsdef);
@@ -248,7 +249,6 @@ int fscache_add_cache(struct fscache_cache *cache,
if (!cache->kobj)
goto error;
- ifsdef->cookie = &fscache_fsdef_index;
ifsdef->cache = cache;
cache->fsdef = ifsdef;
diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
index 97137d7ec5ee..83bfe04456b6 100644
--- a/fs/fscache/cookie.c
+++ b/fs/fscache/cookie.c
@@ -516,6 +516,7 @@ static int fscache_alloc_object(struct fscache_cache *cache,
goto error;
}
+ ASSERTCMP(object->cookie, ==, cookie);
fscache_stat(&fscache_n_object_alloc);
object->debug_id = atomic_inc_return(&fscache_object_debug_id);
@@ -571,6 +572,8 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
_enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
+ ASSERTCMP(object->cookie, ==, cookie);
+
spin_lock(&cookie->lock);
/* there may be multiple initial creations of this object, but we only
@@ -610,9 +613,7 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
spin_unlock(&cache->object_list_lock);
}
- /* attach to the cookie */
- object->cookie = cookie;
- fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
+ /* Attach to the cookie. The object already has a ref on it. */
hlist_add_head(&object->cookie_link, &cookie->backing_objects);
fscache_objlist_add(object);
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 20e0d0a4dc8c..9edc920f651f 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -327,6 +327,7 @@ void fscache_object_init(struct fscache_object *object,
object->store_limit_l = 0;
object->cache = cache;
object->cookie = cookie;
+ fscache_cookie_get(cookie, fscache_cookie_get_attach_object);
object->parent = NULL;
#ifdef CONFIG_FSCACHE_OBJECT_LIST
RB_CLEAR_NODE(&object->objlist_link);
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index e30c5975ea58..8d265790374c 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -70,7 +70,8 @@ void fscache_enqueue_operation(struct fscache_operation *op)
ASSERT(op->processor != NULL);
ASSERT(fscache_object_is_available(op->object));
ASSERTCMP(atomic_read(&op->usage), >, 0);
- ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
+ ASSERTIFCMP(op->state != FSCACHE_OP_ST_IN_PROGRESS,
+ op->state, ==, FSCACHE_OP_ST_CANCELLED);
fscache_stat(&fscache_n_op_enqueue);
switch (op->flags & FSCACHE_OP_TYPE) {
@@ -499,7 +500,8 @@ void fscache_put_operation(struct fscache_operation *op)
struct fscache_cache *cache;
_enter("{OBJ%x OP%x,%d}",
- op->object->debug_id, op->debug_id, atomic_read(&op->usage));
+ op->object ? op->object->debug_id : 0,
+ op->debug_id, atomic_read(&op->usage));
ASSERTCMP(atomic_read(&op->usage), >, 0);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index d508c7844681..40d4c66c7751 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -411,6 +411,7 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
bool truncate_op = (lend == LLONG_MAX);
memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
+ vma_init(&pseudo_vma, current->mm);
pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
pagevec_init(&pvec);
next = start;
@@ -595,6 +596,7 @@ static long hugetlbfs_fallocate(struct file *file, int mode, loff_t offset,
* as input to create an allocation policy.
*/
memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
+ vma_init(&pseudo_vma, mm);
pseudo_vma.vm_flags = (VM_HUGETLB | VM_MAYSHARE | VM_SHARED);
pseudo_vma.vm_file = file;
diff --git a/fs/inode.c b/fs/inode.c
index 2c300e981796..8c86c809ca17 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1999,8 +1999,14 @@ void inode_init_owner(struct inode *inode, const struct inode *dir,
inode->i_uid = current_fsuid();
if (dir && dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
+
+ /* Directories are special, and always inherit S_ISGID */
if (S_ISDIR(mode))
mode |= S_ISGID;
+ else if ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP) &&
+ !in_group_p(inode->i_gid) &&
+ !capable_wrt_inode_uidgid(dir, CAP_FSETID))
+ mode &= ~S_ISGID;
} else
inode->i_gid = current_fsgid();
inode->i_mode = mode;
diff --git a/fs/internal.h b/fs/internal.h
index 980d005b21b4..5645b4ebf494 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -127,7 +127,6 @@ int do_fchownat(int dfd, const char __user *filename, uid_t user, gid_t group,
extern int open_check_o_direct(struct file *f);
extern int vfs_open(const struct path *, struct file *, const struct cred *);
-extern struct file *filp_clone_open(struct file *);
/*
* inode.c
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 51dd68e67b0f..c0b66a7a795b 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1361,6 +1361,13 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
if (jh->b_transaction == transaction &&
jh->b_jlist != BJ_Metadata) {
jbd_lock_bh_state(bh);
+ if (jh->b_transaction == transaction &&
+ jh->b_jlist != BJ_Metadata)
+ pr_err("JBD2: assertion failure: h_type=%u "
+ "h_line_no=%u block_no=%llu jlist=%u\n",
+ handle->h_type, handle->h_line_no,
+ (unsigned long long) bh->b_blocknr,
+ jh->b_jlist);
J_ASSERT_JH(jh, jh->b_transaction != transaction ||
jh->b_jlist == BJ_Metadata);
jbd_unlock_bh_state(bh);
@@ -1380,11 +1387,11 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
* of the transaction. This needs to be done
* once a transaction -bzzz
*/
- jh->b_modified = 1;
if (handle->h_buffer_credits <= 0) {
ret = -ENOSPC;
goto out_unlock_bh;
}
+ jh->b_modified = 1;
handle->h_buffer_credits--;
}
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index c60f3d32ee91..a6797986b625 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -491,15 +491,17 @@ static int ea_get(struct inode *inode, struct ea_buffer *ea_buf, int min_size)
if (size > PSIZE) {
/*
* To keep the rest of the code simple. Allocate a
- * contiguous buffer to work with
+ * contiguous buffer to work with. Make the buffer large
+ * enough to make use of the whole extent.
*/
- ea_buf->xattr = kmalloc(size, GFP_KERNEL);
+ ea_buf->max_size = (size + sb->s_blocksize - 1) &
+ ~(sb->s_blocksize - 1);
+
+ ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL);
if (ea_buf->xattr == NULL)
return -ENOMEM;
ea_buf->flag = EA_MALLOC;
- ea_buf->max_size = (size + sb->s_blocksize - 1) &
- ~(sb->s_blocksize - 1);
if (ea_size == 0)
return 0;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index bbd0465535eb..f033f3a69a3b 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -883,8 +883,10 @@ struct inode *nfs_delegation_find_inode(struct nfs_client *clp,
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
res = nfs_delegation_find_inode_server(server, fhandle);
- if (res != ERR_PTR(-ENOENT))
+ if (res != ERR_PTR(-ENOENT)) {
+ rcu_read_unlock();
return res;
+ }
}
rcu_read_unlock();
return ERR_PTR(-ENOENT);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index d4a07acad598..8f003792ccde 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1243,17 +1243,18 @@ static int ff_layout_read_done_cb(struct rpc_task *task,
hdr->ds_clp, hdr->lseg,
hdr->pgio_mirror_idx);
+ clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
+ clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
switch (err) {
case -NFS4ERR_RESET_TO_PNFS:
if (ff_layout_choose_best_ds_for_read(hdr->lseg,
hdr->pgio_mirror_idx + 1,
&hdr->pgio_mirror_idx))
goto out_eagain;
- ff_layout_read_record_layoutstats_done(task, hdr);
- pnfs_read_resend_pnfs(hdr);
+ set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
return task->tk_status;
case -NFS4ERR_RESET_TO_MDS:
- ff_layout_reset_read(hdr);
+ set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
return task->tk_status;
case -EAGAIN:
goto out_eagain;
@@ -1403,6 +1404,10 @@ static void ff_layout_read_release(void *data)
struct nfs_pgio_header *hdr = data;
ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
+ if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
+ pnfs_read_resend_pnfs(hdr);
+ else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
+ ff_layout_reset_read(hdr);
pnfs_generic_rw_release(data);
}
@@ -1423,12 +1428,14 @@ static int ff_layout_write_done_cb(struct rpc_task *task,
hdr->ds_clp, hdr->lseg,
hdr->pgio_mirror_idx);
+ clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
+ clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
switch (err) {
case -NFS4ERR_RESET_TO_PNFS:
- ff_layout_reset_write(hdr, true);
+ set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
return task->tk_status;
case -NFS4ERR_RESET_TO_MDS:
- ff_layout_reset_write(hdr, false);
+ set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
return task->tk_status;
case -EAGAIN:
return -EAGAIN;
@@ -1575,6 +1582,10 @@ static void ff_layout_write_release(void *data)
struct nfs_pgio_header *hdr = data;
ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
+ if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
+ ff_layout_reset_write(hdr, true);
+ else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
+ ff_layout_reset_write(hdr, false);
pnfs_generic_rw_release(data);
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ed45090e4df6..6dd146885da9 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3294,6 +3294,7 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
struct nfs4_closedata *calldata = data;
struct nfs4_state *state = calldata->state;
struct inode *inode = calldata->inode;
+ struct pnfs_layout_hdr *lo;
bool is_rdonly, is_wronly, is_rdwr;
int call_close = 0;
@@ -3337,6 +3338,12 @@ static void nfs4_close_prepare(struct rpc_task *task, void *data)
goto out_wait;
}
+ lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL;
+ if (lo && !pnfs_layout_is_valid(lo)) {
+ calldata->arg.lr_args = NULL;
+ calldata->res.lr_res = NULL;
+ }
+
if (calldata->arg.fmode == 0)
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
@@ -5972,12 +5979,19 @@ static void nfs4_delegreturn_release(void *calldata)
static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
{
struct nfs4_delegreturndata *d_data;
+ struct pnfs_layout_hdr *lo;
d_data = (struct nfs4_delegreturndata *)data;
if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task))
return;
+ lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
+ if (lo && !pnfs_layout_is_valid(lo)) {
+ d_data->args.lr_args = NULL;
+ d_data->res.lr_res = NULL;
+ }
+
nfs4_setup_sequence(d_data->res.server->nfs_client,
&d_data->args.seq_args,
&d_data->res.seq_res,
@@ -8650,6 +8664,8 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
+ nfs4_sequence_free_slot(&lgp->res.seq_res);
+
switch (nfs4err) {
case 0:
goto out;
@@ -8714,7 +8730,6 @@ nfs4_layoutget_handle_exception(struct rpc_task *task,
goto out;
}
- nfs4_sequence_free_slot(&lgp->res.seq_res);
err = nfs4_handle_exception(server, nfs4err, exception);
if (!status) {
if (exception->retry)
@@ -8786,20 +8801,22 @@ nfs4_proc_layoutget(struct nfs4_layoutget *lgp, long *timeout)
if (IS_ERR(task))
return ERR_CAST(task);
status = rpc_wait_for_completion_task(task);
- if (status == 0) {
+ if (status != 0)
+ goto out;
+
+ /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
+ if (task->tk_status < 0 || lgp->res.layoutp->len == 0) {
status = nfs4_layoutget_handle_exception(task, lgp, &exception);
*timeout = exception.timeout;
- }
-
+ } else
+ lseg = pnfs_layout_process(lgp);
+out:
trace_nfs4_layoutget(lgp->args.ctx,
&lgp->args.range,
&lgp->res.range,
&lgp->res.stateid,
status);
- /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
- if (status == 0 && lgp->res.layoutp->len)
- lseg = pnfs_layout_process(lgp);
rpc_put_task(task);
dprintk("<-- %s status=%d\n", __func__, status);
if (status)
@@ -8817,6 +8834,8 @@ nfs4_layoutreturn_prepare(struct rpc_task *task, void *calldata)
&lrp->args.seq_args,
&lrp->res.seq_res,
task);
+ if (!pnfs_layout_is_valid(lrp->args.layout))
+ rpc_exit(task, 0);
}
static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index a8f5e6b16749..3fe81424337d 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -801,6 +801,11 @@ static inline void nfs4_lgopen_release(struct nfs4_layoutget *lgp)
{
}
+static inline bool pnfs_layout_is_valid(const struct pnfs_layout_hdr *lo)
+{
+ return false;
+}
+
#endif /* CONFIG_NFS_V4_1 */
#if IS_ENABLED(CONFIG_NFS_V4_2)
diff --git a/fs/pipe.c b/fs/pipe.c
index bb0840e234f3..39d6f431da83 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -509,22 +509,19 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
}
-static struct wait_queue_head *
-pipe_get_poll_head(struct file *filp, __poll_t events)
-{
- struct pipe_inode_info *pipe = filp->private_data;
-
- return &pipe->wait;
-}
-
/* No kernel lock held - fine */
-static __poll_t pipe_poll_mask(struct file *filp, __poll_t events)
+static __poll_t
+pipe_poll(struct file *filp, poll_table *wait)
{
+ __poll_t mask;
struct pipe_inode_info *pipe = filp->private_data;
- int nrbufs = pipe->nrbufs;
- __poll_t mask = 0;
+ int nrbufs;
+
+ poll_wait(filp, &pipe->wait, wait);
/* Reading only -- no need for acquiring the semaphore. */
+ nrbufs = pipe->nrbufs;
+ mask = 0;
if (filp->f_mode & FMODE_READ) {
mask = (nrbufs > 0) ? EPOLLIN | EPOLLRDNORM : 0;
if (!pipe->writers && filp->f_version != pipe->w_counter)
@@ -1023,8 +1020,7 @@ const struct file_operations pipefifo_fops = {
.llseek = no_llseek,
.read_iter = pipe_read,
.write_iter = pipe_write,
- .get_poll_head = pipe_get_poll_head,
- .poll_mask = pipe_poll_mask,
+ .poll = pipe_poll,
.unlocked_ioctl = pipe_ioctl,
.release = pipe_release,
.fasync = pipe_fasync,
diff --git a/fs/proc/base.c b/fs/proc/base.c
index b6572944efc3..aaffc0c30216 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -235,6 +235,10 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
if (env_start != arg_end || env_start >= env_end)
env_start = env_end = arg_end;
+ /* .. and limit it to a maximum of one page of slop */
+ if (env_end >= arg_end + PAGE_SIZE)
+ env_end = arg_end + PAGE_SIZE - 1;
+
/* We're not going to care if "*ppos" has high bits set */
pos = arg_start + *ppos;
@@ -254,10 +258,19 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
while (count) {
int got;
size_t size = min_t(size_t, PAGE_SIZE, count);
+ long offset;
- got = access_remote_vm(mm, pos, page, size, FOLL_ANON);
- if (got <= 0)
+ /*
+ * Are we already starting past the official end?
+ * We always include the last byte that is *supposed*
+ * to be NUL
+ */
+ offset = (pos >= arg_end) ? pos - arg_end + 1 : 0;
+
+ got = access_remote_vm(mm, pos - offset, page, size + offset, FOLL_ANON);
+ if (got <= offset)
break;
+ got -= offset;
/* Don't walk past a NUL character once you hit arg_end */
if (pos + got >= arg_end) {
@@ -276,12 +289,17 @@ static ssize_t get_mm_cmdline(struct mm_struct *mm, char __user *buf,
n = arg_end - pos - 1;
/* Cut off at first NUL after 'n' */
- got = n + strnlen(page+n, got-n);
- if (!got)
+ got = n + strnlen(page+n, offset+got-n);
+ if (got < offset)
break;
+ got -= offset;
+
+ /* Include the NUL if it existed */
+ if (got < size)
+ got++;
}
- got -= copy_to_user(buf, page, got);
+ got -= copy_to_user(buf, page+offset, got);
if (unlikely(!got)) {
if (!len)
len = -EFAULT;
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 6ac1c92997ea..bb1c1625b158 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -564,11 +564,20 @@ static int proc_seq_open(struct inode *inode, struct file *file)
return seq_open(file, de->seq_ops);
}
+static int proc_seq_release(struct inode *inode, struct file *file)
+{
+ struct proc_dir_entry *de = PDE(inode);
+
+ if (de->state_size)
+ return seq_release_private(inode, file);
+ return seq_release(inode, file);
+}
+
static const struct file_operations proc_seq_fops = {
.open = proc_seq_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release,
+ .release = proc_seq_release,
};
struct proc_dir_entry *proc_create_seq_private(const char *name, umode_t mode,
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index e9679016271f..dfd73a4616ce 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -831,7 +831,8 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
SEQ_PUT_DEC(" kB\nSwap: ", mss->swap);
SEQ_PUT_DEC(" kB\nSwapPss: ",
mss->swap_pss >> PSS_SHIFT);
- SEQ_PUT_DEC(" kB\nLocked: ", mss->pss >> PSS_SHIFT);
+ SEQ_PUT_DEC(" kB\nLocked: ",
+ mss->pss_locked >> PSS_SHIFT);
seq_puts(m, " kB\n");
}
if (!rollup_mode) {
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index d88231e3b2be..fc20e06c56ba 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -711,21 +711,18 @@ EXPORT_SYMBOL(dquot_quota_sync);
static unsigned long
dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- struct list_head *head;
struct dquot *dquot;
unsigned long freed = 0;
spin_lock(&dq_list_lock);
- head = free_dquots.prev;
- while (head != &free_dquots && sc->nr_to_scan) {
- dquot = list_entry(head, struct dquot, dq_free);
+ while (!list_empty(&free_dquots) && sc->nr_to_scan) {
+ dquot = list_first_entry(&free_dquots, struct dquot, dq_free);
remove_dquot_hash(dquot);
remove_free_dquot(dquot);
remove_inuse(dquot);
do_destroy_dquot(dquot);
sc->nr_to_scan--;
freed++;
- head = free_dquots.prev;
}
spin_unlock(&dq_list_lock);
return freed;
diff --git a/fs/reiserfs/prints.c b/fs/reiserfs/prints.c
index 7e288d97adcb..9fed1c05f1f4 100644
--- a/fs/reiserfs/prints.c
+++ b/fs/reiserfs/prints.c
@@ -76,83 +76,99 @@ static char *le_type(struct reiserfs_key *key)
}
/* %k */
-static void sprintf_le_key(char *buf, struct reiserfs_key *key)
+static int scnprintf_le_key(char *buf, size_t size, struct reiserfs_key *key)
{
if (key)
- sprintf(buf, "[%d %d %s %s]", le32_to_cpu(key->k_dir_id),
- le32_to_cpu(key->k_objectid), le_offset(key),
- le_type(key));
+ return scnprintf(buf, size, "[%d %d %s %s]",
+ le32_to_cpu(key->k_dir_id),
+ le32_to_cpu(key->k_objectid), le_offset(key),
+ le_type(key));
else
- sprintf(buf, "[NULL]");
+ return scnprintf(buf, size, "[NULL]");
}
/* %K */
-static void sprintf_cpu_key(char *buf, struct cpu_key *key)
+static int scnprintf_cpu_key(char *buf, size_t size, struct cpu_key *key)
{
if (key)
- sprintf(buf, "[%d %d %s %s]", key->on_disk_key.k_dir_id,
- key->on_disk_key.k_objectid, reiserfs_cpu_offset(key),
- cpu_type(key));
+ return scnprintf(buf, size, "[%d %d %s %s]",
+ key->on_disk_key.k_dir_id,
+ key->on_disk_key.k_objectid,
+ reiserfs_cpu_offset(key), cpu_type(key));
else
- sprintf(buf, "[NULL]");
+ return scnprintf(buf, size, "[NULL]");
}
-static void sprintf_de_head(char *buf, struct reiserfs_de_head *deh)
+static int scnprintf_de_head(char *buf, size_t size,
+ struct reiserfs_de_head *deh)
{
if (deh)
- sprintf(buf,
- "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
- deh_offset(deh), deh_dir_id(deh), deh_objectid(deh),
- deh_location(deh), deh_state(deh));
+ return scnprintf(buf, size,
+ "[offset=%d dir_id=%d objectid=%d location=%d state=%04x]",
+ deh_offset(deh), deh_dir_id(deh),
+ deh_objectid(deh), deh_location(deh),
+ deh_state(deh));
else
- sprintf(buf, "[NULL]");
+ return scnprintf(buf, size, "[NULL]");
}
-static void sprintf_item_head(char *buf, struct item_head *ih)
+static int scnprintf_item_head(char *buf, size_t size, struct item_head *ih)
{
if (ih) {
- strcpy(buf,
- (ih_version(ih) == KEY_FORMAT_3_6) ? "*3.6* " : "*3.5*");
- sprintf_le_key(buf + strlen(buf), &(ih->ih_key));
- sprintf(buf + strlen(buf), ", item_len %d, item_location %d, "
- "free_space(entry_count) %d",
- ih_item_len(ih), ih_location(ih), ih_free_space(ih));
+ char *p = buf;
+ char * const end = buf + size;
+
+ p += scnprintf(p, end - p, "%s",
+ (ih_version(ih) == KEY_FORMAT_3_6) ?
+ "*3.6* " : "*3.5*");
+
+ p += scnprintf_le_key(p, end - p, &ih->ih_key);
+
+ p += scnprintf(p, end - p,
+ ", item_len %d, item_location %d, free_space(entry_count) %d",
+ ih_item_len(ih), ih_location(ih),
+ ih_free_space(ih));
+ return p - buf;
} else
- sprintf(buf, "[NULL]");
+ return scnprintf(buf, size, "[NULL]");
}
-static void sprintf_direntry(char *buf, struct reiserfs_dir_entry *de)
+static int scnprintf_direntry(char *buf, size_t size,
+ struct reiserfs_dir_entry *de)
{
char name[20];
memcpy(name, de->de_name, de->de_namelen > 19 ? 19 : de->de_namelen);
name[de->de_namelen > 19 ? 19 : de->de_namelen] = 0;
- sprintf(buf, "\"%s\"==>[%d %d]", name, de->de_dir_id, de->de_objectid);
+ return scnprintf(buf, size, "\"%s\"==>[%d %d]",
+ name, de->de_dir_id, de->de_objectid);
}
-static void sprintf_block_head(char *buf, struct buffer_head *bh)
+static int scnprintf_block_head(char *buf, size_t size, struct buffer_head *bh)
{
- sprintf(buf, "level=%d, nr_items=%d, free_space=%d rdkey ",
- B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
+ return scnprintf(buf, size,
+ "level=%d, nr_items=%d, free_space=%d rdkey ",
+ B_LEVEL(bh), B_NR_ITEMS(bh), B_FREE_SPACE(bh));
}
-static void sprintf_buffer_head(char *buf, struct buffer_head *bh)
+static int scnprintf_buffer_head(char *buf, size_t size, struct buffer_head *bh)
{
- sprintf(buf,
- "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
- bh->b_bdev, bh->b_size,
- (unsigned long long)bh->b_blocknr, atomic_read(&(bh->b_count)),
- bh->b_state, bh->b_page,
- buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
- buffer_dirty(bh) ? "DIRTY" : "CLEAN",
- buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
+ return scnprintf(buf, size,
+ "dev %pg, size %zd, blocknr %llu, count %d, state 0x%lx, page %p, (%s, %s, %s)",
+ bh->b_bdev, bh->b_size,
+ (unsigned long long)bh->b_blocknr,
+ atomic_read(&(bh->b_count)),
+ bh->b_state, bh->b_page,
+ buffer_uptodate(bh) ? "UPTODATE" : "!UPTODATE",
+ buffer_dirty(bh) ? "DIRTY" : "CLEAN",
+ buffer_locked(bh) ? "LOCKED" : "UNLOCKED");
}
-static void sprintf_disk_child(char *buf, struct disk_child *dc)
+static int scnprintf_disk_child(char *buf, size_t size, struct disk_child *dc)
{
- sprintf(buf, "[dc_number=%d, dc_size=%u]", dc_block_number(dc),
- dc_size(dc));
+ return scnprintf(buf, size, "[dc_number=%d, dc_size=%u]",
+ dc_block_number(dc), dc_size(dc));
}
static char *is_there_reiserfs_struct(char *fmt, int *what)
@@ -189,55 +205,60 @@ static void prepare_error_buf(const char *fmt, va_list args)
char *fmt1 = fmt_buf;
char *k;
char *p = error_buf;
+ char * const end = &error_buf[sizeof(error_buf)];
int what;
spin_lock(&error_lock);
- strcpy(fmt1, fmt);
+ if (WARN_ON(strscpy(fmt_buf, fmt, sizeof(fmt_buf)) < 0)) {
+ strscpy(error_buf, "format string too long", end - error_buf);
+ goto out_unlock;
+ }
while ((k = is_there_reiserfs_struct(fmt1, &what)) != NULL) {
*k = 0;
- p += vsprintf(p, fmt1, args);
+ p += vscnprintf(p, end - p, fmt1, args);
switch (what) {
case 'k':
- sprintf_le_key(p, va_arg(args, struct reiserfs_key *));
+ p += scnprintf_le_key(p, end - p,
+ va_arg(args, struct reiserfs_key *));
break;
case 'K':
- sprintf_cpu_key(p, va_arg(args, struct cpu_key *));
+ p += scnprintf_cpu_key(p, end - p,
+ va_arg(args, struct cpu_key *));
break;
case 'h':
- sprintf_item_head(p, va_arg(args, struct item_head *));
+ p += scnprintf_item_head(p, end - p,
+ va_arg(args, struct item_head *));
break;
case 't':
- sprintf_direntry(p,
- va_arg(args,
- struct reiserfs_dir_entry *));
+ p += scnprintf_direntry(p, end - p,
+ va_arg(args, struct reiserfs_dir_entry *));
break;
case 'y':
- sprintf_disk_child(p,
- va_arg(args, struct disk_child *));
+ p += scnprintf_disk_child(p, end - p,
+ va_arg(args, struct disk_child *));
break;
case 'z':
- sprintf_block_head(p,
- va_arg(args, struct buffer_head *));
+ p += scnprintf_block_head(p, end - p,
+ va_arg(args, struct buffer_head *));
break;
case 'b':
- sprintf_buffer_head(p,
- va_arg(args, struct buffer_head *));
+ p += scnprintf_buffer_head(p, end - p,
+ va_arg(args, struct buffer_head *));
break;
case 'a':
- sprintf_de_head(p,
- va_arg(args,
- struct reiserfs_de_head *));
+ p += scnprintf_de_head(p, end - p,
+ va_arg(args, struct reiserfs_de_head *));
break;
}
- p += strlen(p);
fmt1 = k + 2;
}
- vsprintf(p, fmt1, args);
+ p += vscnprintf(p, end - p, fmt1, args);
+out_unlock:
spin_unlock(&error_lock);
}
diff --git a/fs/select.c b/fs/select.c
index 317891ff8165..4a6b6e4b21cb 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -34,29 +34,6 @@
#include <linux/uaccess.h>
-__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
-{
- if (file->f_op->poll) {
- return file->f_op->poll(file, pt);
- } else if (file_has_poll_mask(file)) {
- unsigned int events = poll_requested_events(pt);
- struct wait_queue_head *head;
-
- if (pt && pt->_qproc) {
- head = file->f_op->get_poll_head(file, events);
- if (!head)
- return DEFAULT_POLLMASK;
- if (IS_ERR(head))
- return EPOLLERR;
- pt->_qproc(file, head, pt);
- }
-
- return file->f_op->poll_mask(file, events);
- } else {
- return DEFAULT_POLLMASK;
- }
-}
-EXPORT_SYMBOL_GPL(vfs_poll);
/*
* Estimate expected accuracy in ns from a timeval.
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c
index 23813c078cc9..0839efa720b3 100644
--- a/fs/squashfs/cache.c
+++ b/fs/squashfs/cache.c
@@ -350,6 +350,9 @@ int squashfs_read_metadata(struct super_block *sb, void *buffer,
TRACE("Entered squashfs_read_metadata [%llx:%x]\n", *block, *offset);
+ if (unlikely(length < 0))
+ return -EIO;
+
while (length) {
entry = squashfs_cache_get(sb, msblk->block_cache, *block, 0);
if (entry->error) {
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 13d80947bf9e..fcff2e0487fe 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -194,7 +194,11 @@ static long long read_indexes(struct super_block *sb, int n,
}
for (i = 0; i < blocks; i++) {
- int size = le32_to_cpu(blist[i]);
+ int size = squashfs_block_size(blist[i]);
+ if (size < 0) {
+ err = size;
+ goto failure;
+ }
block += SQUASHFS_COMPRESSED_SIZE_BLOCK(size);
}
n -= blocks;
@@ -367,7 +371,7 @@ static int read_blocklist(struct inode *inode, int index, u64 *block)
sizeof(size));
if (res < 0)
return res;
- return le32_to_cpu(size);
+ return squashfs_block_size(size);
}
/* Copy data into page cache */
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c
index 0ed6edbc5c71..86ad9a4b8c36 100644
--- a/fs/squashfs/fragment.c
+++ b/fs/squashfs/fragment.c
@@ -61,9 +61,7 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment,
return size;
*fragment_block = le64_to_cpu(fragment_entry.start_block);
- size = le32_to_cpu(fragment_entry.size);
-
- return size;
+ return squashfs_block_size(fragment_entry.size);
}
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 24d12fd14177..4e6853f084d0 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -129,6 +129,12 @@
#define SQUASHFS_COMPRESSED_BLOCK(B) (!((B) & SQUASHFS_COMPRESSED_BIT_BLOCK))
+static inline int squashfs_block_size(__le32 raw)
+{
+ u32 size = le32_to_cpu(raw);
+ return (size >> 25) ? -EIO : size;
+}
+
/*
* Inode number ops. Inodes consist of a compressed block number, and an
* uncompressed offset within that block
diff --git a/fs/timerfd.c b/fs/timerfd.c
index d84a2bee4f82..cdad49da3ff7 100644
--- a/fs/timerfd.c
+++ b/fs/timerfd.c
@@ -226,20 +226,21 @@ static int timerfd_release(struct inode *inode, struct file *file)
kfree_rcu(ctx, rcu);
return 0;
}
-
-static struct wait_queue_head *timerfd_get_poll_head(struct file *file,
- __poll_t eventmask)
+
+static __poll_t timerfd_poll(struct file *file, poll_table *wait)
{
struct timerfd_ctx *ctx = file->private_data;
+ __poll_t events = 0;
+ unsigned long flags;
- return &ctx->wqh;
-}
+ poll_wait(file, &ctx->wqh, wait);
-static __poll_t timerfd_poll_mask(struct file *file, __poll_t eventmask)
-{
- struct timerfd_ctx *ctx = file->private_data;
+ spin_lock_irqsave(&ctx->wqh.lock, flags);
+ if (ctx->ticks)
+ events |= EPOLLIN;
+ spin_unlock_irqrestore(&ctx->wqh.lock, flags);
- return ctx->ticks ? EPOLLIN : 0;
+ return events;
}
static ssize_t timerfd_read(struct file *file, char __user *buf, size_t count,
@@ -363,8 +364,7 @@ static long timerfd_ioctl(struct file *file, unsigned int cmd, unsigned long arg
static const struct file_operations timerfd_fops = {
.release = timerfd_release,
- .get_poll_head = timerfd_get_poll_head,
- .poll_mask = timerfd_poll_mask,
+ .poll = timerfd_poll,
.read = timerfd_read,
.llseek = noop_llseek,
.show_fdinfo = timerfd_show,
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 1b961b1d9699..fcda0fc97b90 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -533,8 +533,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
udf_write_aext(table, &epos, &eloc,
(etype << 30) | elen, 1);
} else
- udf_delete_aext(table, epos, eloc,
- (etype << 30) | elen);
+ udf_delete_aext(table, epos);
} else {
alloc_count = 0;
}
@@ -630,7 +629,7 @@ static udf_pblk_t udf_table_new_block(struct super_block *sb,
if (goal_elen)
udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
else
- udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
+ udf_delete_aext(table, goal_epos);
brelse(goal_epos.bh);
udf_add_free_space(sb, partition, -1);
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index 0a98a2369738..d9523013096f 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -141,10 +141,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
fibh->ebh->b_data,
sizeof(struct fileIdentDesc) + fibh->soffset);
- fi_len = (sizeof(struct fileIdentDesc) +
- cfi->lengthFileIdent +
- le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3;
-
+ fi_len = udf_dir_entry_len(cfi);
*nf_pos += fi_len - (fibh->eoffset - fibh->soffset);
fibh->eoffset = fibh->soffset + fi_len;
} else {
@@ -152,6 +149,9 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
sizeof(struct fileIdentDesc));
}
}
+ /* Got last entry outside of dir size - fs is corrupted! */
+ if (*nf_pos > dir->i_size)
+ return NULL;
return fi;
}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 7f39d17352c9..9915a58fbabd 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1147,8 +1147,7 @@ static void udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr
if (startnum > endnum) {
for (i = 0; i < (startnum - endnum); i++)
- udf_delete_aext(inode, *epos, laarr[i].extLocation,
- laarr[i].extLength);
+ udf_delete_aext(inode, *epos);
} else if (startnum < endnum) {
for (i = 0; i < (endnum - startnum); i++) {
udf_insert_aext(inode, *epos, laarr[i].extLocation,
@@ -2176,14 +2175,15 @@ static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
return (nelen >> 30);
}
-int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
- struct kernel_lb_addr eloc, uint32_t elen)
+int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
{
struct extent_position oepos;
int adsize;
int8_t etype;
struct allocExtDesc *aed;
struct udf_inode_info *iinfo;
+ struct kernel_lb_addr eloc;
+ uint32_t elen;
if (epos.bh) {
get_bh(epos.bh);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index c586026508db..06f37ddd2997 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -351,8 +351,6 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
loff_t f_pos;
loff_t size = udf_ext0_offset(dir) + dir->i_size;
int nfidlen;
- uint8_t lfi;
- uint16_t liu;
udf_pblk_t block;
struct kernel_lb_addr eloc;
uint32_t elen = 0;
@@ -383,7 +381,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
namelen = 0;
}
- nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3;
+ nfidlen = ALIGN(sizeof(struct fileIdentDesc) + namelen, UDF_NAME_PAD);
f_pos = udf_ext0_offset(dir);
@@ -424,12 +422,8 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
goto out_err;
}
- liu = le16_to_cpu(cfi->lengthOfImpUse);
- lfi = cfi->lengthFileIdent;
-
if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
- if (((sizeof(struct fileIdentDesc) +
- liu + lfi + 3) & ~3) == nfidlen) {
+ if (udf_dir_entry_len(cfi) == nfidlen) {
cfi->descTag.tagSerialNum = cpu_to_le16(1);
cfi->fileVersionNum = cpu_to_le16(1);
cfi->fileCharacteristics = 0;
@@ -1201,9 +1195,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
if (dir_fi) {
dir_fi->icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location);
- udf_update_tag((char *)dir_fi,
- (sizeof(struct fileIdentDesc) +
- le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3);
+ udf_update_tag((char *)dir_fi, udf_dir_entry_len(dir_fi));
if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
mark_inode_dirty(old_inode);
else
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index bae311b59400..84c47dde4d26 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -132,6 +132,12 @@ struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
struct fileIdentDesc *, struct udf_fileident_bh *,
uint8_t *, uint8_t *);
+static inline unsigned int udf_dir_entry_len(struct fileIdentDesc *cfi)
+{
+ return ALIGN(sizeof(struct fileIdentDesc) +
+ le16_to_cpu(cfi->lengthOfImpUse) + cfi->lengthFileIdent,
+ UDF_NAME_PAD);
+}
/* file.c */
extern long udf_ioctl(struct file *, unsigned int, unsigned long);
@@ -167,8 +173,7 @@ extern int udf_add_aext(struct inode *, struct extent_position *,
struct kernel_lb_addr *, uint32_t, int);
extern void udf_write_aext(struct inode *, struct extent_position *,
struct kernel_lb_addr *, uint32_t, int);
-extern int8_t udf_delete_aext(struct inode *, struct extent_position,
- struct kernel_lb_addr, uint32_t);
+extern int8_t udf_delete_aext(struct inode *, struct extent_position);
extern int8_t udf_next_aext(struct inode *, struct extent_position *,
struct kernel_lb_addr *, uint32_t *, int);
extern int8_t udf_current_aext(struct inode *, struct extent_position *,
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index 123bf7d516fc..594d192b2331 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -222,24 +222,26 @@ static inline bool userfaultfd_huge_must_wait(struct userfaultfd_ctx *ctx,
unsigned long reason)
{
struct mm_struct *mm = ctx->mm;
- pte_t *pte;
+ pte_t *ptep, pte;
bool ret = true;
VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
- pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
- if (!pte)
+ ptep = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
+
+ if (!ptep)
goto out;
ret = false;
+ pte = huge_ptep_get(ptep);
/*
* Lockless access: we're in a wait_event so it's ok if it
* changes under us.
*/
- if (huge_pte_none(*pte))
+ if (huge_pte_none(pte))
ret = true;
- if (!huge_pte_write(*pte) && (reason & VM_UFFD_WP))
+ if (!huge_pte_write(pte) && (reason & VM_UFFD_WP))
ret = true;
out:
return ret;
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index 84db76e0e3e3..fecd187fcf2c 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -157,6 +157,7 @@ __xfs_ag_resv_free(
error = xfs_mod_fdblocks(pag->pag_mount, oldresv, true);
resv->ar_reserved = 0;
resv->ar_asked = 0;
+ resv->ar_orig_reserved = 0;
if (error)
trace_xfs_ag_resv_free_error(pag->pag_mount, pag->pag_agno,
@@ -189,13 +190,34 @@ __xfs_ag_resv_init(
struct xfs_mount *mp = pag->pag_mount;
struct xfs_ag_resv *resv;
int error;
- xfs_extlen_t reserved;
+ xfs_extlen_t hidden_space;
if (used > ask)
ask = used;
- reserved = ask - used;
- error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true);
+ switch (type) {
+ case XFS_AG_RESV_RMAPBT:
+ /*
+ * Space taken by the rmapbt is not subtracted from fdblocks
+ * because the rmapbt lives in the free space. Here we must
+ * subtract the entire reservation from fdblocks so that we
+ * always have blocks available for rmapbt expansion.
+ */
+ hidden_space = ask;
+ break;
+ case XFS_AG_RESV_METADATA:
+ /*
+ * Space taken by all other metadata btrees are accounted
+ * on-disk as used space. We therefore only hide the space
+ * that is reserved but not used by the trees.
+ */
+ hidden_space = ask - used;
+ break;
+ default:
+ ASSERT(0);
+ return -EINVAL;
+ }
+ error = xfs_mod_fdblocks(mp, -(int64_t)hidden_space, true);
if (error) {
trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
error, _RET_IP_);
@@ -216,7 +238,8 @@ __xfs_ag_resv_init(
resv = xfs_perag_resv(pag, type);
resv->ar_asked = ask;
- resv->ar_reserved = resv->ar_orig_reserved = reserved;
+ resv->ar_orig_reserved = hidden_space;
+ resv->ar_reserved = ask - used;
trace_xfs_ag_resv_init(pag, type, ask);
return 0;
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index eef466260d43..75dbdc14c45f 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -223,12 +223,13 @@ xfs_alloc_get_rec(
error = xfs_btree_get_rec(cur, &rec, stat);
if (error || !(*stat))
return error;
- if (rec->alloc.ar_blockcount == 0)
- goto out_bad_rec;
*bno = be32_to_cpu(rec->alloc.ar_startblock);
*len = be32_to_cpu(rec->alloc.ar_blockcount);
+ if (*len == 0)
+ goto out_bad_rec;
+
/* check for valid extent range, including overflow */
if (!xfs_verify_agbno(mp, agno, *bno))
goto out_bad_rec;
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 01628f0c9a0c..7205268b30bc 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -5780,6 +5780,32 @@ del_cursor:
return error;
}
+/* Make sure we won't be right-shifting an extent past the maximum bound. */
+int
+xfs_bmap_can_insert_extents(
+ struct xfs_inode *ip,
+ xfs_fileoff_t off,
+ xfs_fileoff_t shift)
+{
+ struct xfs_bmbt_irec got;
+ int is_empty;
+ int error = 0;
+
+ ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
+
+ if (XFS_FORCED_SHUTDOWN(ip->i_mount))
+ return -EIO;
+
+ xfs_ilock(ip, XFS_ILOCK_EXCL);
+ error = xfs_bmap_last_extent(NULL, ip, XFS_DATA_FORK, &got, &is_empty);
+ if (!error && !is_empty && got.br_startoff >= off &&
+ ((got.br_startoff + shift) & BMBT_STARTOFF_MASK) < got.br_startoff)
+ error = -EINVAL;
+ xfs_iunlock(ip, XFS_ILOCK_EXCL);
+
+ return error;
+}
+
int
xfs_bmap_insert_extents(
struct xfs_trans *tp,
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index 99dddbd0fcc6..9b49ddf99c41 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -227,6 +227,8 @@ int xfs_bmap_collapse_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
bool *done, xfs_fsblock_t *firstblock,
struct xfs_defer_ops *dfops);
+int xfs_bmap_can_insert_extents(struct xfs_inode *ip, xfs_fileoff_t off,
+ xfs_fileoff_t shift);
int xfs_bmap_insert_extents(struct xfs_trans *tp, struct xfs_inode *ip,
xfs_fileoff_t *next_fsb, xfs_fileoff_t offset_shift_fsb,
bool *done, xfs_fileoff_t stop_fsb, xfs_fsblock_t *firstblock,
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 1c5a8aaf2bfc..059bc44c27e8 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -962,6 +962,9 @@ typedef enum xfs_dinode_fmt {
XFS_DFORK_DSIZE(dip, mp) : \
XFS_DFORK_ASIZE(dip, mp))
+#define XFS_DFORK_MAXEXT(dip, mp, w) \
+ (XFS_DFORK_SIZE(dip, mp, w) / sizeof(struct xfs_bmbt_rec))
+
/*
* Return pointers to the data or attribute forks.
*/
@@ -1526,6 +1529,8 @@ typedef struct xfs_bmdr_block {
#define BMBT_STARTBLOCK_BITLEN 52
#define BMBT_BLOCKCOUNT_BITLEN 21
+#define BMBT_STARTOFF_MASK ((1ULL << BMBT_STARTOFF_BITLEN) - 1)
+
typedef struct xfs_bmbt_rec {
__be64 l0, l1;
} xfs_bmbt_rec_t;
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index d38d724534c4..30d1d60f1d46 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -374,6 +374,47 @@ xfs_log_dinode_to_disk(
}
}
+static xfs_failaddr_t
+xfs_dinode_verify_fork(
+ struct xfs_dinode *dip,
+ struct xfs_mount *mp,
+ int whichfork)
+{
+ uint32_t di_nextents = XFS_DFORK_NEXTENTS(dip, whichfork);
+
+ switch (XFS_DFORK_FORMAT(dip, whichfork)) {
+ case XFS_DINODE_FMT_LOCAL:
+ /*
+ * no local regular files yet
+ */
+ if (whichfork == XFS_DATA_FORK) {
+ if (S_ISREG(be16_to_cpu(dip->di_mode)))
+ return __this_address;
+ if (be64_to_cpu(dip->di_size) >
+ XFS_DFORK_SIZE(dip, mp, whichfork))
+ return __this_address;
+ }
+ if (di_nextents)
+ return __this_address;
+ break;
+ case XFS_DINODE_FMT_EXTENTS:
+ if (di_nextents > XFS_DFORK_MAXEXT(dip, mp, whichfork))
+ return __this_address;
+ break;
+ case XFS_DINODE_FMT_BTREE:
+ if (whichfork == XFS_ATTR_FORK) {
+ if (di_nextents > MAXAEXTNUM)
+ return __this_address;
+ } else if (di_nextents > MAXEXTNUM) {
+ return __this_address;
+ }
+ break;
+ default:
+ return __this_address;
+ }
+ return NULL;
+}
+
xfs_failaddr_t
xfs_dinode_verify(
struct xfs_mount *mp,
@@ -441,24 +482,9 @@ xfs_dinode_verify(
case S_IFREG:
case S_IFLNK:
case S_IFDIR:
- switch (dip->di_format) {
- case XFS_DINODE_FMT_LOCAL:
- /*
- * no local regular files yet
- */
- if (S_ISREG(mode))
- return __this_address;
- if (di_size > XFS_DFORK_DSIZE(dip, mp))
- return __this_address;
- if (dip->di_nextents)
- return __this_address;
- /* fall through */
- case XFS_DINODE_FMT_EXTENTS:
- case XFS_DINODE_FMT_BTREE:
- break;
- default:
- return __this_address;
- }
+ fa = xfs_dinode_verify_fork(dip, mp, XFS_DATA_FORK);
+ if (fa)
+ return fa;
break;
case 0:
/* Uninitialized inode ok. */
@@ -468,17 +494,9 @@ xfs_dinode_verify(
}
if (XFS_DFORK_Q(dip)) {
- switch (dip->di_aformat) {
- case XFS_DINODE_FMT_LOCAL:
- if (dip->di_anextents)
- return __this_address;
- /* fall through */
- case XFS_DINODE_FMT_EXTENTS:
- case XFS_DINODE_FMT_BTREE:
- break;
- default:
- return __this_address;
- }
+ fa = xfs_dinode_verify_fork(dip, mp, XFS_ATTR_FORK);
+ if (fa)
+ return fa;
} else {
/*
* If there is no fork offset, this may be a freshly-made inode
@@ -713,7 +731,8 @@ xfs_inode_validate_extsize(
if ((hint_flag || inherit_flag) && extsize == 0)
return __this_address;
- if (!(hint_flag || inherit_flag) && extsize != 0)
+ /* free inodes get flags set to zero but extsize remains */
+ if (mode && !(hint_flag || inherit_flag) && extsize != 0)
return __this_address;
if (extsize_bytes % blocksize_bytes)
@@ -759,7 +778,8 @@ xfs_inode_validate_cowextsize(
if (hint_flag && cowextsize == 0)
return __this_address;
- if (!hint_flag && cowextsize != 0)
+ /* free inodes get flags set to zero but cowextsize remains */
+ if (mode && !hint_flag && cowextsize != 0)
return __this_address;
if (hint_flag && rt_flag)
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index 65fc4ed2e9a1..b228c821bae6 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -1029,8 +1029,8 @@ xfs_rtalloc_query_range(
if (low_rec->ar_startext >= mp->m_sb.sb_rextents ||
low_rec->ar_startext == high_rec->ar_startext)
return 0;
- if (high_rec->ar_startext >= mp->m_sb.sb_rextents)
- high_rec->ar_startext = mp->m_sb.sb_rextents - 1;
+ if (high_rec->ar_startext > mp->m_sb.sb_rextents)
+ high_rec->ar_startext = mp->m_sb.sb_rextents;
/* Iterate the bitmap, looking for discrepancies. */
rtstart = low_rec->ar_startext;
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index c35009a86699..83b1e8c6c18f 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -685,12 +685,10 @@ out_unlock_iolock:
}
/*
- * dead simple method of punching delalyed allocation blocks from a range in
- * the inode. Walks a block at a time so will be slow, but is only executed in
- * rare error cases so the overhead is not critical. This will always punch out
- * both the start and end blocks, even if the ranges only partially overlap
- * them, so it is up to the caller to ensure that partial blocks are not
- * passed in.
+ * Dead simple method of punching delalyed allocation blocks from a range in
+ * the inode. This will always punch out both the start and end blocks, even
+ * if the ranges only partially overlap them, so it is up to the caller to
+ * ensure that partial blocks are not passed in.
*/
int
xfs_bmap_punch_delalloc_range(
@@ -698,63 +696,44 @@ xfs_bmap_punch_delalloc_range(
xfs_fileoff_t start_fsb,
xfs_fileoff_t length)
{
- xfs_fileoff_t remaining = length;
+ struct xfs_ifork *ifp = &ip->i_df;
+ xfs_fileoff_t end_fsb = start_fsb + length;
+ struct xfs_bmbt_irec got, del;
+ struct xfs_iext_cursor icur;
int error = 0;
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
- do {
- int done;
- xfs_bmbt_irec_t imap;
- int nimaps = 1;
- xfs_fsblock_t firstblock;
- struct xfs_defer_ops dfops;
+ if (!(ifp->if_flags & XFS_IFEXTENTS)) {
+ error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK);
+ if (error)
+ return error;
+ }
- /*
- * Map the range first and check that it is a delalloc extent
- * before trying to unmap the range. Otherwise we will be
- * trying to remove a real extent (which requires a
- * transaction) or a hole, which is probably a bad idea...
- */
- error = xfs_bmapi_read(ip, start_fsb, 1, &imap, &nimaps,
- XFS_BMAPI_ENTIRE);
+ if (!xfs_iext_lookup_extent_before(ip, ifp, &end_fsb, &icur, &got))
+ return 0;
- if (error) {
- /* something screwed, just bail */
- if (!XFS_FORCED_SHUTDOWN(ip->i_mount)) {
- xfs_alert(ip->i_mount,
- "Failed delalloc mapping lookup ino %lld fsb %lld.",
- ip->i_ino, start_fsb);
- }
- break;
- }
- if (!nimaps) {
- /* nothing there */
- goto next_block;
- }
- if (imap.br_startblock != DELAYSTARTBLOCK) {
- /* been converted, ignore */
- goto next_block;
- }
- WARN_ON(imap.br_blockcount == 0);
+ while (got.br_startoff + got.br_blockcount > start_fsb) {
+ del = got;
+ xfs_trim_extent(&del, start_fsb, length);
/*
- * Note: while we initialise the firstblock/dfops pair, they
- * should never be used because blocks should never be
- * allocated or freed for a delalloc extent and hence we need
- * don't cancel or finish them after the xfs_bunmapi() call.
+ * A delete can push the cursor forward. Step back to the
+ * previous extent on non-delalloc or extents outside the
+ * target range.
*/
- xfs_defer_init(&dfops, &firstblock);
- error = xfs_bunmapi(NULL, ip, start_fsb, 1, 0, 1, &firstblock,
- &dfops, &done);
- if (error)
- break;
+ if (!del.br_blockcount ||
+ !isnullstartblock(del.br_startblock)) {
+ if (!xfs_iext_prev_extent(ifp, &icur, &got))
+ break;
+ continue;
+ }
- ASSERT(!xfs_defer_has_unfinished_work(&dfops));
-next_block:
- start_fsb++;
- remaining--;
- } while(remaining > 0);
+ error = xfs_bmap_del_extent_delay(ip, XFS_DATA_FORK, &icur,
+ &got, &del);
+ if (error || !xfs_iext_get_extent(ifp, &icur, &got))
+ break;
+ }
return error;
}
@@ -1208,7 +1187,22 @@ xfs_free_file_space(
return 0;
if (offset + len > XFS_ISIZE(ip))
len = XFS_ISIZE(ip) - offset;
- return iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
+ error = iomap_zero_range(VFS_I(ip), offset, len, NULL, &xfs_iomap_ops);
+ if (error)
+ return error;
+
+ /*
+ * If we zeroed right up to EOF and EOF straddles a page boundary we
+ * must make sure that the post-EOF area is also zeroed because the
+ * page could be mmap'd and iomap_zero_range doesn't do that for us.
+ * Writeback of the eof page will do this, albeit clumsily.
+ */
+ if (offset + len >= XFS_ISIZE(ip) && ((offset + len) & PAGE_MASK)) {
+ error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
+ (offset + len) & ~PAGE_MASK, LLONG_MAX);
+ }
+
+ return error;
}
/*
@@ -1404,6 +1398,10 @@ xfs_insert_file_space(
trace_xfs_insert_file_space(ip);
+ error = xfs_bmap_can_insert_extents(ip, stop_fsb, shift_fsb);
+ if (error)
+ return error;
+
error = xfs_prepare_shift(ip, offset);
if (error)
return error;
diff --git a/fs/xfs/xfs_fsmap.c b/fs/xfs/xfs_fsmap.c
index c34fa9c342f2..c7157bc48bd1 100644
--- a/fs/xfs/xfs_fsmap.c
+++ b/fs/xfs/xfs_fsmap.c
@@ -513,8 +513,8 @@ xfs_getfsmap_rtdev_rtbitmap_query(
struct xfs_trans *tp,
struct xfs_getfsmap_info *info)
{
- struct xfs_rtalloc_rec alow;
- struct xfs_rtalloc_rec ahigh;
+ struct xfs_rtalloc_rec alow = { 0 };
+ struct xfs_rtalloc_rec ahigh = { 0 };
int error;
xfs_ilock(tp->t_mountp->m_rbmip, XFS_ILOCK_SHARED);
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index a7afcad6b711..3f2bd6032cf8 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -387,7 +387,7 @@ xfs_reserve_blocks(
do {
free = percpu_counter_sum(&mp->m_fdblocks) -
mp->m_alloc_set_aside;
- if (!free)
+ if (free <= 0)
break;
delta = request - mp->m_resblks;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 7a96c4e0ab5c..5df4de666cc1 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -3236,7 +3236,6 @@ xfs_iflush_cluster(
struct xfs_inode *cip;
int nr_found;
int clcount = 0;
- int bufwasdelwri;
int i;
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
@@ -3360,37 +3359,22 @@ cluster_corrupt_out:
* inode buffer and shut down the filesystem.
*/
rcu_read_unlock();
- /*
- * Clean up the buffer. If it was delwri, just release it --
- * brelse can handle it with no problems. If not, shut down the
- * filesystem before releasing the buffer.
- */
- bufwasdelwri = (bp->b_flags & _XBF_DELWRI_Q);
- if (bufwasdelwri)
- xfs_buf_relse(bp);
-
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
- if (!bufwasdelwri) {
- /*
- * Just like incore_relse: if we have b_iodone functions,
- * mark the buffer as an error and call them. Otherwise
- * mark it as stale and brelse.
- */
- if (bp->b_iodone) {
- bp->b_flags &= ~XBF_DONE;
- xfs_buf_stale(bp);
- xfs_buf_ioerror(bp, -EIO);
- xfs_buf_ioend(bp);
- } else {
- xfs_buf_stale(bp);
- xfs_buf_relse(bp);
- }
- }
-
/*
- * Unlocks the flush lock
+ * We'll always have an inode attached to the buffer for completion
+ * process by the time we are called from xfs_iflush(). Hence we have
+ * always need to do IO completion processing to abort the inodes
+ * attached to the buffer. handle them just like the shutdown case in
+ * xfs_buf_submit().
*/
+ ASSERT(bp->b_iodone);
+ bp->b_flags &= ~XBF_DONE;
+ xfs_buf_stale(bp);
+ xfs_buf_ioerror(bp, -EIO);
+ xfs_buf_ioend(bp);
+
+ /* abort the corrupt inode, as it was not attached to the buffer */
xfs_iflush_abort(cip, false);
kmem_free(cilist);
xfs_perag_put(pag);
@@ -3486,12 +3470,17 @@ xfs_iflush(
xfs_log_force(mp, 0);
/*
- * inode clustering:
- * see if other inodes can be gathered into this write
+ * inode clustering: try to gather other inodes into this write
+ *
+ * Note: Any error during clustering will result in the filesystem
+ * being shut down and completion callbacks run on the cluster buffer.
+ * As we have already flushed and attached this inode to the buffer,
+ * it has already been aborted and released by xfs_iflush_cluster() and
+ * so we have no further error handling to do here.
*/
error = xfs_iflush_cluster(ip, bp);
if (error)
- goto cluster_corrupt_out;
+ return error;
*bpp = bp;
return 0;
@@ -3500,12 +3489,8 @@ corrupt_out:
if (bp)
xfs_buf_relse(bp);
xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
-cluster_corrupt_out:
- error = -EFSCORRUPTED;
abort_out:
- /*
- * Unlocks the flush lock
- */
+ /* abort the corrupt inode, as it was not attached to the buffer */
xfs_iflush_abort(ip, false);
return error;
}
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 49f5492eed3b..55876dd02f0c 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -963,12 +963,13 @@ xfs_ilock_for_iomap(
unsigned *lockmode)
{
unsigned mode = XFS_ILOCK_SHARED;
+ bool is_write = flags & (IOMAP_WRITE | IOMAP_ZERO);
/*
* COW writes may allocate delalloc space or convert unwritten COW
* extents, so we need to make sure to take the lock exclusively here.
*/
- if (xfs_is_reflink_inode(ip) && (flags & (IOMAP_WRITE | IOMAP_ZERO))) {
+ if (xfs_is_reflink_inode(ip) && is_write) {
/*
* FIXME: It could still overwrite on unshared extents and not
* need allocation.
@@ -989,6 +990,7 @@ xfs_ilock_for_iomap(
mode = XFS_ILOCK_EXCL;
}
+relock:
if (flags & IOMAP_NOWAIT) {
if (!xfs_ilock_nowait(ip, mode))
return -EAGAIN;
@@ -996,6 +998,17 @@ xfs_ilock_for_iomap(
xfs_ilock(ip, mode);
}
+ /*
+ * The reflink iflag could have changed since the earlier unlocked
+ * check, so if we got ILOCK_SHARED for a write and but we're now a
+ * reflink inode we have to switch to ILOCK_EXCL and relock.
+ */
+ if (mode == XFS_ILOCK_SHARED && is_write && xfs_is_reflink_inode(ip)) {
+ xfs_iunlock(ip, mode);
+ mode = XFS_ILOCK_EXCL;
+ goto relock;
+ }
+
*lockmode = mode;
return 0;
}
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index e040af120b69..524f543c5b82 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -258,7 +258,12 @@ xfs_trans_alloc(
if (!(flags & XFS_TRANS_NO_WRITECOUNT))
sb_start_intwrite(mp->m_super);
- WARN_ON(mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
+ /*
+ * Zero-reservation ("empty") transactions can't modify anything, so
+ * they're allowed to run while we're frozen.
+ */
+ WARN_ON(resp->tr_logres > 0 &&
+ mp->m_super->s_writers.frozen == SB_FREEZE_COMPLETE);
atomic_inc(&mp->m_active_trans);
tp = kmem_zone_zalloc(xfs_trans_zone,
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 40a916efd7c0..1194a4c78d55 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -309,7 +309,7 @@ static inline void acpi_processor_ppc_exit(void)
{
return;
}
-static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr,
+static inline void acpi_processor_ppc_has_changed(struct acpi_processor *pr,
int event_flag)
{
static unsigned int printout = 1;
@@ -320,7 +320,6 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr,
"Consider compiling CPUfreq support into your kernel.\n");
printout = 0;
}
- return 0;
}
static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
{
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
index 0763f065b975..d10f1e7d6ba8 100644
--- a/include/asm-generic/qspinlock_types.h
+++ b/include/asm-generic/qspinlock_types.h
@@ -63,7 +63,7 @@ typedef struct qspinlock {
/*
* Initializier
*/
-#define __ARCH_SPIN_LOCK_UNLOCKED { .val = ATOMIC_INIT(0) }
+#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
/*
* Bitfields in the atomic value:
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index faddde44de8c..3063125197ad 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -265,33 +265,41 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
* For now w.r.t page table cache, mark the range_size as PAGE_SIZE
*/
+#ifndef pte_free_tlb
#define pte_free_tlb(tlb, ptep, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
+#endif
+#ifndef pmd_free_tlb
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)
+#endif
#ifndef __ARCH_HAS_4LEVEL_HACK
+#ifndef pud_free_tlb
#define pud_free_tlb(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
+#endif
#ifndef __ARCH_HAS_5LEVEL_HACK
+#ifndef p4d_free_tlb
#define p4d_free_tlb(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
__p4d_free_tlb(tlb, pudp, address); \
} while (0)
#endif
+#endif
#define tlb_migrate_finish(mm) do {} while (0)
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index cc414db9da0a..482461d8931d 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -245,7 +245,8 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, int flags);
void af_alg_free_resources(struct af_alg_async_req *areq);
void af_alg_async_cb(struct crypto_async_request *_req, int err);
-__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events);
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
unsigned int areqlen);
int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h
index 9564597cbfac..0aa1d9c3e0b9 100644
--- a/include/dt-bindings/clock/imx6ul-clock.h
+++ b/include/dt-bindings/clock/imx6ul-clock.h
@@ -235,27 +235,25 @@
#define IMX6UL_CLK_CSI_PODF 222
#define IMX6UL_CLK_PLL3_120M 223
#define IMX6UL_CLK_KPP 224
-#define IMX6UL_CLK_CKO1_SEL 225
-#define IMX6UL_CLK_CKO1_PODF 226
-#define IMX6UL_CLK_CKO1 227
-#define IMX6UL_CLK_CKO2_SEL 228
-#define IMX6UL_CLK_CKO2_PODF 229
-#define IMX6UL_CLK_CKO2 230
-#define IMX6UL_CLK_CKO 231
-
-/* For i.MX6ULL */
-#define IMX6ULL_CLK_ESAI_PRED 232
-#define IMX6ULL_CLK_ESAI_PODF 233
-#define IMX6ULL_CLK_ESAI_EXTAL 234
-#define IMX6ULL_CLK_ESAI_MEM 235
-#define IMX6ULL_CLK_ESAI_IPG 236
-#define IMX6ULL_CLK_DCP_CLK 237
-#define IMX6ULL_CLK_EPDC_PRE_SEL 238
-#define IMX6ULL_CLK_EPDC_SEL 239
-#define IMX6ULL_CLK_EPDC_PODF 240
-#define IMX6ULL_CLK_EPDC_ACLK 241
-#define IMX6ULL_CLK_EPDC_PIX 242
-#define IMX6ULL_CLK_ESAI_SEL 243
+#define IMX6ULL_CLK_ESAI_PRED 225
+#define IMX6ULL_CLK_ESAI_PODF 226
+#define IMX6ULL_CLK_ESAI_EXTAL 227
+#define IMX6ULL_CLK_ESAI_MEM 228
+#define IMX6ULL_CLK_ESAI_IPG 229
+#define IMX6ULL_CLK_DCP_CLK 230
+#define IMX6ULL_CLK_EPDC_PRE_SEL 231
+#define IMX6ULL_CLK_EPDC_SEL 232
+#define IMX6ULL_CLK_EPDC_PODF 233
+#define IMX6ULL_CLK_EPDC_ACLK 234
+#define IMX6ULL_CLK_EPDC_PIX 235
+#define IMX6ULL_CLK_ESAI_SEL 236
+#define IMX6UL_CLK_CKO1_SEL 237
+#define IMX6UL_CLK_CKO1_PODF 238
+#define IMX6UL_CLK_CKO1 239
+#define IMX6UL_CLK_CKO2_SEL 240
+#define IMX6UL_CLK_CKO2_PODF 241
+#define IMX6UL_CLK_CKO2 242
+#define IMX6UL_CLK_CKO 243
#define IMX6UL_CLK_END 244
#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 4b35a66383f9..e54f40974eb0 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -443,6 +443,9 @@ int acpi_check_resource_conflict(const struct resource *res);
int acpi_check_region(resource_size_t start, resource_size_t n,
const char *name);
+acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
+ u32 level);
+
int acpi_resources_are_enforced(void);
#ifdef CONFIG_HIBERNATION
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 0c27515d2cf6..8124815eb121 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -214,6 +214,7 @@ struct atmphy_ops {
struct atm_skb_data {
struct atm_vcc *vcc; /* ATM VCC */
unsigned long atm_options; /* ATM layer options */
+ unsigned int acct_truesize; /* truesize accounted to vcc */
};
#define VCC_HTABLE_SIZE 32
@@ -241,6 +242,20 @@ void vcc_insert_socket(struct sock *sk);
void atm_dev_release_vccs(struct atm_dev *dev);
+static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ /*
+ * Because ATM skbs may not belong to a sock (and we don't
+ * necessarily want to), skb->truesize may be adjusted,
+ * escaping the hack in pskb_expand_head() which avoids
+ * doing so for some cases. So stash the value of truesize
+ * at the time we accounted it, and atm_pop_raw() can use
+ * that value later, in case it changes.
+ */
+ refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+ ATM_SKB(skb)->acct_truesize = skb->truesize;
+ ATM_SKB(skb)->atm_options = vcc->atm_options;
+}
static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
{
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 0bd432a4d7bd..24251762c20c 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -22,7 +22,6 @@ struct dentry;
*/
enum wb_state {
WB_registered, /* bdi_register() was done */
- WB_shutting_down, /* wb_shutdown() in progress */
WB_writeback_running, /* Writeback is in progress */
WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
WB_start_all, /* nr_pages == 0 (all) work pending */
@@ -189,6 +188,7 @@ struct backing_dev_info {
#ifdef CONFIG_CGROUP_WRITEBACK
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
struct rb_root cgwb_congested_tree; /* their congested states */
+ struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
#else
struct bdi_writeback_congested *wb_congested;
#endif
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e3147eb74222..ca3f2c2edd85 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -287,6 +287,20 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
+/**
+ * blk_mq_mark_complete() - Set request state to complete
+ * @rq: request to set to complete state
+ *
+ * Returns true if request state was successfully set to complete. If
+ * successful, the caller is responsibile for seeing this request is ended, as
+ * blk_mq_complete_request will not work again.
+ */
+static inline bool blk_mq_mark_complete(struct request *rq)
+{
+ return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) ==
+ MQ_RQ_IN_FLIGHT;
+}
+
/*
* Driver command data is immediately after the request. So subtract request
* size to get back to the original request, add request size to get the PDU.
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 9154570edf29..79226ca8f80f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1119,8 +1119,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
if (!q->limits.chunk_sectors)
return q->limits.max_sectors;
- return q->limits.chunk_sectors -
- (offset & (q->limits.chunk_sectors - 1));
+ return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
+ (offset & (q->limits.chunk_sectors - 1))));
}
static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 975fb4cf1bb7..d50c2f0a655a 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -2,6 +2,7 @@
#ifndef _BPF_CGROUP_H
#define _BPF_CGROUP_H
+#include <linux/errno.h>
#include <linux/jump_label.h>
#include <uapi/linux/bpf.h>
@@ -188,12 +189,38 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
\
__ret; \
})
+int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype, struct bpf_prog *prog);
+int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype);
+int cgroup_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
#else
+struct bpf_prog;
struct cgroup_bpf {};
static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
+static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype)
+{
+ return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EINVAL;
+}
+
#define cgroup_bpf_enabled (0)
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 995c3b1e59bf..8827e797ff97 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -488,12 +488,15 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
/* Map specifics */
struct xdp_buff;
+struct sk_buff;
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
void __dev_map_flush(struct bpf_map *map);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx);
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+ struct bpf_prog *xdp_prog);
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
@@ -586,6 +589,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
return 0;
}
+struct sk_buff;
+
+static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
+ struct sk_buff *skb,
+ struct bpf_prog *xdp_prog)
+{
+ return 0;
+}
+
static inline
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
{
@@ -684,6 +696,8 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key);
int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
+int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+ struct bpf_prog *prog);
#else
static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
{
@@ -702,6 +716,12 @@ static inline int sock_map_prog(struct bpf_map *map,
{
return -EOPNOTSUPP;
}
+
+static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
#endif
#if defined(CONFIG_XDP_SOCKETS)
diff --git a/include/linux/bpf_lirc.h b/include/linux/bpf_lirc.h
index 5f8a4283092d..9d9ff755ec29 100644
--- a/include/linux/bpf_lirc.h
+++ b/include/linux/bpf_lirc.h
@@ -5,11 +5,12 @@
#include <uapi/linux/bpf.h>
#ifdef CONFIG_BPF_LIRC_MODE2
-int lirc_prog_attach(const union bpf_attr *attr);
+int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
int lirc_prog_detach(const union bpf_attr *attr);
int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
#else
-static inline int lirc_prog_attach(const union bpf_attr *attr)
+static inline int lirc_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
{
return -EINVAL;
}
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h
index 687b1760bb9f..f02cee0225d4 100644
--- a/include/linux/bpfilter.h
+++ b/include/linux/bpfilter.h
@@ -5,10 +5,10 @@
#include <uapi/linux/bpfilter.h>
struct sock;
-int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char *optval,
+int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
unsigned int optlen);
-int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char *optval,
- int *optlen);
+int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
+ int __user *optlen);
extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname,
char __user *optval,
unsigned int optlen, bool is_set);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index b1a5562b3215..c68acc47da57 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -72,6 +72,9 @@
*/
#ifndef COMPAT_SYSCALL_DEFINEx
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ __diag_push(); \
+ __diag_ignore(GCC, 8, "-Wattribute-alias", \
+ "Type aliasing is used to sanitize syscall arguments");\
asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
__attribute__((alias(__stringify(__se_compat_sys##name)))); \
@@ -80,8 +83,11 @@
asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
{ \
- return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+ long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ return ret; \
} \
+ __diag_pop(); \
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#endif /* COMPAT_SYSCALL_DEFINEx */
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index f1a7492a5cc8..573f5a7d42d4 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -66,25 +66,40 @@
#endif
/*
+ * Feature detection for gnu_inline (gnu89 extern inline semantics). Either
+ * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
+ * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
+ * defined so the gnu89 semantics are the default.
+ */
+#ifdef __GNUC_STDC_INLINE__
+# define __gnu_inline __attribute__((gnu_inline))
+#else
+# define __gnu_inline
+#endif
+
+/*
* Force always-inline if the user requests it so via the .config,
* or if gcc is too old.
* GCC does not warn about unused static inline functions for
* -Wunused-function. This turns out to avoid the need for complex #ifdef
* directives. Suppress the warning in clang as well by using "unused"
* function attribute, which is redundant but not harmful for gcc.
+ * Prefer gnu_inline, so that extern inline functions do not emit an
+ * externally visible function. This makes extern inline behave as per gnu89
+ * semantics rather than c99. This prevents multiple symbol definition errors
+ * of extern inline functions at link time.
+ * A lot of inline functions can cause havoc with function tracing.
*/
#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
!defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-#define inline inline __attribute__((always_inline,unused)) notrace
-#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace
-#define __inline __inline __attribute__((always_inline,unused)) notrace
+#define inline \
+ inline __attribute__((always_inline, unused)) notrace __gnu_inline
#else
-/* A lot of inline functions can cause havoc with function tracing */
-#define inline inline __attribute__((unused)) notrace
-#define __inline__ __inline__ __attribute__((unused)) notrace
-#define __inline __inline __attribute__((unused)) notrace
+#define inline inline __attribute__((unused)) notrace __gnu_inline
#endif
+#define __inline__ inline
+#define __inline inline
#define __always_inline inline __attribute__((always_inline))
#define noinline __attribute__((noinline))
@@ -347,3 +362,28 @@
#if GCC_VERSION >= 50100
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
+
+/*
+ * Turn individual warnings and errors on and off locally, depending
+ * on version.
+ */
+#define __diag_GCC(version, severity, s) \
+ __diag_GCC_ ## version(__diag_GCC_ ## severity s)
+
+/* Severity used in pragma directives */
+#define __diag_GCC_ignore ignored
+#define __diag_GCC_warn warning
+#define __diag_GCC_error error
+
+/* Compilers before gcc-4.6 do not understand "#pragma GCC diagnostic push" */
+#if GCC_VERSION >= 40600
+#define __diag_str1(s) #s
+#define __diag_str(s) __diag_str1(s)
+#define __diag(s) _Pragma(__diag_str(GCC diagnostic s))
+#endif
+
+#if GCC_VERSION >= 80000
+#define __diag_GCC_8(s) __diag(s)
+#else
+#define __diag_GCC_8(s)
+#endif
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 6b79a9bba9a7..a8ba6b04152c 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -271,4 +271,22 @@ struct ftrace_likely_data {
# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
#endif
+#ifndef __diag
+#define __diag(string)
+#endif
+
+#ifndef __diag_GCC
+#define __diag_GCC(version, severity, string)
+#endif
+
+#define __diag_push() __diag(push)
+#define __diag_pop() __diag(pop)
+
+#define __diag_ignore(compiler, version, option, comment) \
+ __diag_ ## compiler(version, ignore, option)
+#define __diag_warn(compiler, version, option, comment) \
+ __diag_ ## compiler(version, warn, option)
+#define __diag_error(compiler, version, option, comment) \
+ __diag_ ## compiler(version, error, option)
+
#endif /* __LINUX_COMPILER_TYPES_H */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 3855e3800f48..deb0f663252f 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -135,7 +135,7 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
-int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
+vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
enum page_entry_size pe_size, pfn_t pfn);
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index e6c0448ebcc7..31c865d1842e 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -124,7 +124,7 @@ static inline void delayacct_blkio_start(void)
static inline void delayacct_blkio_end(struct task_struct *p)
{
- if (current->delays)
+ if (p->delays)
__delayacct_blkio_end(p);
delayacct_clear_flag(DELAYACCT_PF_BLKIO);
}
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index b67bf6ac907d..3c5a4cb3eb95 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -48,7 +48,7 @@
* CMA should not be used by the device drivers directly. It is
* only a helper framework for dma-mapping subsystem.
*
- * For more information, see kernel-docs in drivers/base/dma-contiguous.c
+ * For more information, see kernel-docs in kernel/dma/contiguous.c
*/
#ifdef __KERNEL__
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 7094718b653b..ffcc7724ca21 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -11,6 +11,7 @@
#include <linux/fcntl.h>
#include <linux/wait.h>
+#include <linux/err.h>
/*
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 45fc0f5000d8..c73dd7396886 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -19,6 +19,7 @@
#include <linux/cryptohash.h>
#include <linux/set_memory.h>
#include <linux/kallsyms.h>
+#include <linux/if_vlan.h>
#include <net/sch_generic.h>
@@ -469,15 +470,16 @@ struct sock_fprog_kern {
};
struct bpf_binary_header {
- unsigned int pages;
- u8 image[];
+ u32 pages;
+ /* Some arches need word alignment for their instructions */
+ u8 image[] __aligned(4);
};
struct bpf_prog {
u16 pages; /* Number of allocated pages */
u16 jited:1, /* Is our filter JIT'ed? */
jit_requested:1,/* archs need to JIT the prog */
- locked:1, /* Program image locked? */
+ undo_set_mem:1, /* Passed set_memory_ro() checkpoint */
gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */
dst_needed:1, /* Do we need dst entry? */
@@ -671,50 +673,27 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
-{
- fp->locked = 1;
- WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
-}
-
-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
-{
- if (fp->locked) {
- WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
- /* In case set_memory_rw() fails, we want to be the first
- * to crash here instead of some random place later on.
- */
- fp->locked = 0;
- }
-}
-
-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
-{
- WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
-}
-
-static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
-{
- WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
-}
-#else
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
+ fp->undo_set_mem = 1;
+ set_memory_ro((unsigned long)fp, fp->pages);
}
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{
+ if (fp->undo_set_mem)
+ set_memory_rw((unsigned long)fp, fp->pages);
}
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
+ set_memory_ro((unsigned long)hdr, hdr->pages);
}
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
{
+ set_memory_rw((unsigned long)hdr, hdr->pages);
}
-#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog *fp)
@@ -786,6 +765,21 @@ static inline bool bpf_dump_raw_ok(void)
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
+static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
+ unsigned int pktlen)
+{
+ unsigned int len;
+
+ if (unlikely(!(fwd->flags & IFF_UP)))
+ return -ENETDOWN;
+
+ len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
+ if (pktlen > len)
+ return -EMSGSIZE;
+
+ return 0;
+}
+
/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
* same cpu context. Further for best results no more than a single map
* for the do_redirect/do_flush pair should be used. This limitation is
@@ -961,6 +955,9 @@ static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
}
#endif /* CONFIG_BPF_JIT */
+void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
+void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
+
#define BPF_ANC BIT(15)
static inline bool bpf_needs_clear_a(const struct sock_filter *first)
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 5c91108846db..805bf22898cf 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1720,8 +1720,6 @@ struct file_operations {
int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
- struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
- __poll_t (*poll_mask) (struct file *, __poll_t);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
@@ -2422,6 +2420,7 @@ extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
const char *, int, umode_t);
extern struct file * dentry_open(const struct path *, int, const struct cred *);
+extern struct file *filp_clone_open(struct file *);
extern int filp_close(struct file *, fl_owner_t id);
extern struct filename *getname_flags(const char __user *, int, int *);
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
index 3efa3b861d44..941b11811f85 100644
--- a/include/linux/fsl/guts.h
+++ b/include/linux/fsl/guts.h
@@ -16,6 +16,7 @@
#define __FSL_GUTS_H__
#include <linux/types.h>
+#include <linux/io.h>
/**
* Global Utility Registers.
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 8154f4920fcb..ebb77674be90 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -223,7 +223,6 @@ extern enum ftrace_tracing_type_t ftrace_tracing_type;
*/
int register_ftrace_function(struct ftrace_ops *ops);
int unregister_ftrace_function(struct ftrace_ops *ops);
-void clear_ftrace_function(void);
extern void ftrace_stub(unsigned long a0, unsigned long a1,
struct ftrace_ops *op, struct pt_regs *regs);
@@ -239,7 +238,6 @@ static inline int ftrace_nr_registered_ops(void)
{
return 0;
}
-static inline void clear_ftrace_function(void) { }
static inline void ftrace_kill(void) { }
static inline void ftrace_free_init_mem(void) { }
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 41a3d5775394..773bcb1d4044 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -511,6 +511,7 @@ struct hid_output_fifo {
#define HID_STAT_ADDED BIT(0)
#define HID_STAT_PARSED BIT(1)
#define HID_STAT_DUP_DETECTED BIT(2)
+#define HID_STAT_REPROBED BIT(3)
struct hid_input {
struct list_head list;
@@ -579,7 +580,7 @@ struct hid_device { /* device report descriptor */
bool battery_avoid_query;
#endif
- unsigned int status; /* see STAT flags above */
+ unsigned long status; /* see STAT flags above */
unsigned claimed; /* Claimed by hidinput, hiddev? */
unsigned quirks; /* Various quirks the device can pull on us */
bool io_started; /* If IO has started */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 7843b98e1c6e..c20c7e197d07 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -105,13 +105,13 @@ static inline bool br_vlan_enabled(const struct net_device *dev)
static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
{
- return -1;
+ return -EINVAL;
}
static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
struct bridge_vlan_info *p_vinfo)
{
- return -1;
+ return -EINVAL;
}
#endif
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index f8231854b5d6..119f53941c12 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -109,6 +109,8 @@ struct ip_mc_list {
extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto);
extern int igmp_rcv(struct sk_buff *);
extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
+extern int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
+ unsigned int mode);
extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
extern void ip_mc_drop_socket(struct sock *sk);
extern int ip_mc_source(int add, int omode, struct sock *sk,
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index 767467d886de..67c75372b691 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -141,7 +141,7 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
char __user *user_buffer);
size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
-int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length);
+int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
int iio_dma_buffer_request_update(struct iio_buffer *buffer);
int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index d7188de4db96..3f4bf60b0bb5 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -100,7 +100,7 @@ static inline bool input_is_mt_axis(int axis)
return axis == ABS_MT_SLOT || input_is_mt_value(axis);
}
-void input_mt_report_slot_state(struct input_dev *dev,
+bool input_mt_report_slot_state(struct input_dev *dev,
unsigned int tool_type, bool active);
void input_mt_report_finger_count(struct input_dev *dev, int count);
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 1df940196ab2..ef169d67df92 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -121,6 +121,7 @@
#define ecap_srs(e) ((e >> 31) & 0x1)
#define ecap_ers(e) ((e >> 30) & 0x1)
#define ecap_prs(e) ((e >> 29) & 0x1)
+#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
#define ecap_dis(e) ((e >> 27) & 0x1)
#define ecap_nest(e) ((e >> 26) & 0x1)
#define ecap_mts(e) ((e >> 25) & 0x1)
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 4bd2f34947f4..201de12a9957 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -503,6 +503,7 @@ struct irq_chip {
* IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
* IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
* IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
+ * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs
*/
enum {
IRQCHIP_SET_TYPE_MASKED = (1 << 0),
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 25b33b664537..dd1e40ddac7d 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -145,11 +145,6 @@ static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
return desc->irq_common_data.handler_data;
}
-static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
-{
- return desc->irq_common_data.msi_desc;
-}
-
/*
* Architectures call this to let the generic IRQ layer
* handle an interrupt.
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d23123238534..941dc0a5a877 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -666,7 +666,7 @@ do { \
* your code. (Extra memory is used for special buffers that are
* allocated when trace_printk() is used.)
*
- * A little optization trick is done here. If there's only one
+ * A little optimization trick is done here. If there's only one
* argument, there's no need to scan the string for printf formats.
* The trace_puts() will suffice. But how can we take advantage of
* using trace_puts() when trace_printk() has only one argument?
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 2803264c512f..c1961761311d 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -62,7 +62,6 @@ void *kthread_probe_data(struct task_struct *k);
int kthread_park(struct task_struct *k);
void kthread_unpark(struct task_struct *k);
void kthread_parkme(void);
-void kthread_park_complete(struct task_struct *k);
int kthreadd(void *unused);
extern struct task_struct *kthreadd_task;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 8b8946dd63b9..32f247cb5e9e 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -210,6 +210,7 @@ enum {
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
/* (doesn't imply presence) */
ATA_FLAG_SATA = (1 << 1),
+ ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */
ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */
ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
@@ -1495,6 +1496,29 @@ static inline bool ata_tag_valid(unsigned int tag)
return tag < ATA_MAX_QUEUE || ata_tag_internal(tag);
}
+#define __ata_qc_for_each(ap, qc, tag, max_tag, fn) \
+ for ((tag) = 0; (tag) < (max_tag) && \
+ ({ qc = fn((ap), (tag)); 1; }); (tag)++) \
+
+/*
+ * Internal use only, iterate commands ignoring error handling and
+ * status of 'qc'.
+ */
+#define ata_qc_for_each_raw(ap, qc, tag) \
+ __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, __ata_qc_from_tag)
+
+/*
+ * Iterate all potential commands that can be queued
+ */
+#define ata_qc_for_each(ap, qc, tag) \
+ __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, ata_qc_from_tag)
+
+/*
+ * Like ata_qc_for_each, but with the internal tag included
+ */
+#define ata_qc_for_each_with_internal(ap, qc, tag) \
+ __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE + 1, ata_qc_from_tag)
+
/*
* device helpers
*/
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 4f5f8c21e283..1eb6f244588d 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -27,6 +27,8 @@
*/
#define MARVELL_PHY_ID_88E6390 0x01410f90
+#define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4)
+
/* struct phy_device dev_flags definitions */
#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 31ca3e28b0eb..a6ddefc60517 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -38,6 +38,7 @@ struct memory_block {
int arch_get_memory_phys_device(unsigned long start_pfn);
unsigned long memory_block_size_bytes(void);
+int set_memory_block_size_order(unsigned int order);
/* These states are exposed to userspace as text strings in sysfs */
#define MEM_ONLINE (1<<0) /* exposed to userspace */
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 80cbb7fdce4a..83957920653a 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -358,6 +358,7 @@ struct mlx5_frag_buf_ctrl {
struct mlx5_frag_buf frag_buf;
u32 sz_m1;
u32 frag_sz_m1;
+ u32 strides_offset;
u8 log_sz;
u8 log_stride;
u8 log_frag_strides;
@@ -983,14 +984,22 @@ static inline u32 mlx5_base_mkey(const u32 key)
return key & 0xffffff00u;
}
-static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
- struct mlx5_frag_buf_ctrl *fbc)
+static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
+ u32 strides_offset,
+ struct mlx5_frag_buf_ctrl *fbc)
{
fbc->log_stride = log_stride;
fbc->log_sz = log_sz;
fbc->sz_m1 = (1 << fbc->log_sz) - 1;
fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
+ fbc->strides_offset = strides_offset;
+}
+
+static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
+ struct mlx5_frag_buf_ctrl *fbc)
+{
+ mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc);
}
static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
@@ -1004,7 +1013,10 @@ static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
u32 ix)
{
- unsigned int frag = (ix >> fbc->log_frag_strides);
+ unsigned int frag;
+
+ ix += fbc->strides_offset;
+ frag = ix >> fbc->log_frag_strides;
return fbc->frag_buf.frags[frag].buf +
((fbc->frag_sz_m1 & ix) << fbc->log_stride);
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index d3c9db492b30..fab5121ffb8f 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -8,6 +8,8 @@
#include <linux/mlx5/driver.h>
+#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
+
enum {
SRIOV_NONE,
SRIOV_LEGACY,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 27134c4fcb76..ac281f5ec9b8 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -922,7 +922,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 vnic_env_queue_counters[0x1];
u8 ets[0x1];
u8 nic_flow_table[0x1];
- u8 eswitch_flow_table[0x1];
+ u8 eswitch_manager[0x1];
u8 device_memory[0x1];
u8 mcam_reg[0x1];
u8 pcam_reg[0x1];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a0fbb9ffe380..7ba6d356d18f 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -155,7 +155,9 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
* mmap() functions).
*/
-extern struct kmem_cache *vm_area_cachep;
+struct vm_area_struct *vm_area_alloc(struct mm_struct *);
+struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
+void vm_area_free(struct vm_area_struct *);
#ifndef CONFIG_MMU
extern struct rb_root nommu_region_tree;
@@ -450,6 +452,20 @@ struct vm_operations_struct {
unsigned long addr);
};
+static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
+{
+ static const struct vm_operations_struct dummy_vm_ops = {};
+
+ vma->vm_mm = mm;
+ vma->vm_ops = &dummy_vm_ops;
+ INIT_LIST_HEAD(&vma->anon_vma_chain);
+}
+
+static inline void vma_set_anonymous(struct vm_area_struct *vma)
+{
+ vma->vm_ops = NULL;
+}
+
struct mmu_gather;
struct inode;
@@ -2132,7 +2148,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
struct mminit_pfnnid_cache *state);
#endif
-#ifdef CONFIG_HAVE_MEMBLOCK
+#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
void zero_resv_unavail(void);
#else
static inline void zero_resv_unavail(void) {}
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 2014bd19f28e..96a71a648eed 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -501,6 +501,7 @@ enum dmi_field {
DMI_PRODUCT_VERSION,
DMI_PRODUCT_SERIAL,
DMI_PRODUCT_UUID,
+ DMI_PRODUCT_SKU,
DMI_PRODUCT_FAMILY,
DMI_BOARD_VENDOR,
DMI_BOARD_NAME,
diff --git a/include/linux/net.h b/include/linux/net.h
index 08b6eb964dd6..6554d3ba4396 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -147,7 +147,6 @@ struct proto_ops {
int (*getname) (struct socket *sock,
struct sockaddr *addr,
int peer);
- __poll_t (*poll_mask) (struct socket *sock, __poll_t events);
__poll_t (*poll) (struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int (*ioctl) (struct socket *sock, unsigned int cmd,
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3ec9850c7936..3d0cc0b5cec2 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2789,11 +2789,31 @@ static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp,
if (PTR_ERR(pp) != -EINPROGRESS)
NAPI_GRO_CB(skb)->flush |= flush;
}
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+ struct sk_buff **pp,
+ int flush,
+ struct gro_remcsum *grc)
+{
+ if (PTR_ERR(pp) != -EINPROGRESS) {
+ NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_remcsum_cleanup(skb, grc);
+ skb->remcsum_offload = 0;
+ }
+}
#else
static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
{
NAPI_GRO_CB(skb)->flush |= flush;
}
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+ struct sk_buff **pp,
+ int flush,
+ struct gro_remcsum *grc)
+{
+ NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_remcsum_cleanup(skb, grc);
+ skb->remcsum_offload = 0;
+}
#endif
static inline int dev_hard_header(struct sk_buff *skb, struct net_device *dev,
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 9dee3c23895d..712eed156d09 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1438,6 +1438,8 @@ enum {
NFS_IOHDR_EOF,
NFS_IOHDR_REDO,
NFS_IOHDR_STAT,
+ NFS_IOHDR_RESEND_PNFS,
+ NFS_IOHDR_RESEND_MDS,
};
struct nfs_io_completion;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 340029b2fb38..abd5d5e17aee 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1240,6 +1240,8 @@ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
unsigned long pci_address_to_pio(phys_addr_t addr);
phys_addr_t pci_pio_to_address(unsigned long pio);
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
+int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
+ phys_addr_t phys_addr);
void pci_unmap_iospace(struct resource *res);
void __iomem *devm_pci_remap_cfgspace(struct device *dev,
resource_size_t offset,
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 9206a4fef9ac..cb8d84090cfb 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -234,7 +234,7 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n);
unsigned int of_genpd_opp_to_performance_state(struct device *dev,
- struct device_node *opp_node);
+ struct device_node *np);
int genpd_dev_pm_attach(struct device *dev);
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
@@ -274,9 +274,9 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
static inline unsigned int
of_genpd_opp_to_performance_state(struct device *dev,
- struct device_node *opp_node)
+ struct device_node *np)
{
- return -ENODEV;
+ return 0;
}
static inline int genpd_dev_pm_attach(struct device *dev)
diff --git a/include/linux/poll.h b/include/linux/poll.h
index fdf86b4cbc71..7e0fdcf905d2 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -74,18 +74,18 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
pt->_key = ~(__poll_t)0; /* all events enabled */
}
-static inline bool file_has_poll_mask(struct file *file)
+static inline bool file_can_poll(struct file *file)
{
- return file->f_op->get_poll_head && file->f_op->poll_mask;
+ return file->f_op->poll;
}
-static inline bool file_can_poll(struct file *file)
+static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
{
- return file->f_op->poll || file_has_poll_mask(file);
+ if (unlikely(!file->f_op->poll))
+ return DEFAULT_POLLMASK;
+ return file->f_op->poll(file, pt);
}
-__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt);
-
struct poll_table_entry {
struct file *filp;
__poll_t key;
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 4193c41e383a..a685da2c4522 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -98,5 +98,7 @@ extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
-
+extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
+ spinlock_t *lock,
+ unsigned long *flags);
#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index b72ebdff0b77..003d09ab308d 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -165,6 +165,7 @@ void ring_buffer_record_enable(struct ring_buffer *buffer);
void ring_buffer_record_off(struct ring_buffer *buffer);
void ring_buffer_record_on(struct ring_buffer *buffer);
int ring_buffer_record_is_on(struct ring_buffer *buffer);
+int ring_buffer_record_is_set_on(struct ring_buffer *buffer);
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
diff --git a/include/linux/rmi.h b/include/linux/rmi.h
index 64125443f8a6..5ef5c7c412a7 100644
--- a/include/linux/rmi.h
+++ b/include/linux/rmi.h
@@ -354,6 +354,8 @@ struct rmi_driver_data {
struct mutex irq_mutex;
struct input_dev *input;
+ struct irq_domain *irqdomain;
+
u8 pdt_props;
u8 num_rx_electrodes;
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 51f52020ad5f..093aa57120b0 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -9,9 +9,6 @@
#include <asm/io.h>
struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
unsigned long page_link;
unsigned int offset;
unsigned int length;
@@ -64,7 +61,6 @@ struct sg_table {
*
*/
-#define SG_MAGIC 0x87654321
#define SG_CHAIN 0x01UL
#define SG_END 0x02UL
@@ -98,7 +94,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
*/
BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
sg->page_link = page_link | (unsigned long) page;
@@ -129,7 +124,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
static inline struct page *sg_page(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
@@ -195,9 +189,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
**/
static inline void sg_mark_end(struct scatterlist *sg)
{
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
/*
* Set termination bit, clear potential chain bit
*/
@@ -215,9 +206,6 @@ static inline void sg_mark_end(struct scatterlist *sg)
**/
static inline void sg_unmark_end(struct scatterlist *sg)
{
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
sg->page_link &= ~SG_END;
}
@@ -260,12 +248,6 @@ static inline void *sg_virt(struct scatterlist *sg)
static inline void sg_init_marker(struct scatterlist *sgl,
unsigned int nents)
{
-#ifdef CONFIG_DEBUG_SG
- unsigned int i;
-
- for (i = 0; i < nents; i++)
- sgl[i].sg_magic = SG_MAGIC;
-#endif
sg_mark_end(&sgl[nents - 1]);
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 87bf02d93a27..43731fe51c97 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -118,7 +118,7 @@ struct task_group;
* the comment with set_special_state().
*/
#define is_special_task_state(state) \
- ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
+ ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
#define __set_current_state(state_value) \
do { \
@@ -1799,20 +1799,22 @@ static inline void rseq_set_notify_resume(struct task_struct *t)
set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
}
-void __rseq_handle_notify_resume(struct pt_regs *regs);
+void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
-static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ struct pt_regs *regs)
{
if (current->rseq)
- __rseq_handle_notify_resume(regs);
+ __rseq_handle_notify_resume(ksig, regs);
}
-static inline void rseq_signal_deliver(struct pt_regs *regs)
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
{
preempt_disable();
__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
preempt_enable();
- rseq_handle_notify_resume(regs);
+ rseq_handle_notify_resume(ksig, regs);
}
/* rseq_preempt() requires preemption to be disabled. */
@@ -1831,9 +1833,7 @@ static inline void rseq_migrate(struct task_struct *t)
/*
* If parent process has a registered restartable sequences area, the
- * child inherits. Only applies when forking a process, not a thread. In
- * case a parent fork() in the middle of a restartable sequence, set the
- * resume notifier to force the child to retry.
+ * child inherits. Only applies when forking a process, not a thread.
*/
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
@@ -1847,7 +1847,6 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
t->rseq_len = current->rseq_len;
t->rseq_sig = current->rseq_sig;
t->rseq_event_mask = current->rseq_event_mask;
- rseq_preempt(t);
}
}
@@ -1864,10 +1863,12 @@ static inline void rseq_execve(struct task_struct *t)
static inline void rseq_set_notify_resume(struct task_struct *t)
{
}
-static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ struct pt_regs *regs)
{
}
-static inline void rseq_signal_deliver(struct pt_regs *regs)
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
{
}
static inline void rseq_preempt(struct task_struct *t)
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 5be31eb7b266..108ede99e533 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -75,7 +75,7 @@ extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *,
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
struct task_struct *fork_idle(int);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-extern long kernel_wait4(pid_t, int *, int, struct rusage *);
+extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
extern void free_task(struct task_struct *tsk);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c86885954994..610a201126ee 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -630,6 +630,7 @@ typedef unsigned char *sk_buff_data_t;
* @hash: the packet hash
* @queue_mapping: Queue mapping for multiqueue devices
* @xmit_more: More SKBs are pending for this queue
+ * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
* @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -735,7 +736,7 @@ struct sk_buff {
peeked:1,
head_frag:1,
xmit_more:1,
- __unused:1; /* one bit hole */
+ pfmemalloc:1;
/* fields enclosed in headers_start/headers_end are copied
* using a single memcpy() in __copy_skb_header()
@@ -754,31 +755,30 @@ struct sk_buff {
__u8 __pkt_type_offset[0];
__u8 pkt_type:3;
- __u8 pfmemalloc:1;
__u8 ignore_df:1;
-
__u8 nf_trace:1;
__u8 ip_summed:2;
__u8 ooo_okay:1;
+
__u8 l4_hash:1;
__u8 sw_hash:1;
__u8 wifi_acked_valid:1;
__u8 wifi_acked:1;
-
__u8 no_fcs:1;
/* Indicates the inner headers are valid in the skbuff. */
__u8 encapsulation:1;
__u8 encap_hdr_csum:1;
__u8 csum_valid:1;
+
__u8 csum_complete_sw:1;
__u8 csum_level:2;
__u8 csum_not_inet:1;
-
__u8 dst_pending_confirm:1;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
__u8 ipvs_property:1;
+
__u8 inner_protocol_type:1;
__u8 remcsum_offload:1;
#ifdef CONFIG_NET_SWITCHDEV
@@ -3252,7 +3252,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
int *peeked, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
-__poll_t datagram_poll_mask(struct socket *sock, __poll_t events);
+__poll_t datagram_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
struct iov_iter *to, int size);
static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 09fa2c6f0e68..3a1a1dbc6f49 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -155,8 +155,12 @@ struct kmem_cache {
#ifdef CONFIG_SYSFS
#define SLAB_SUPPORTS_SYSFS
+void sysfs_slab_unlink(struct kmem_cache *);
void sysfs_slab_release(struct kmem_cache *);
#else
+static inline void sysfs_slab_unlink(struct kmem_cache *s)
+{
+}
static inline void sysfs_slab_release(struct kmem_cache *s)
{
}
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 1e8a46435838..fd57888d4942 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -427,6 +427,11 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
+ unsigned long *flags);
+#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
+ __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
+
int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
gfp_t gfp);
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 73810808cdf2..5c1a0933768e 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -11,6 +11,7 @@
#ifndef _LINUX_SYSCALLS_H
#define _LINUX_SYSCALLS_H
+struct __aio_sigset;
struct epoll_event;
struct iattr;
struct inode;
@@ -231,6 +232,9 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
*/
#ifndef __SYSCALL_DEFINEx
#define __SYSCALL_DEFINEx(x, name, ...) \
+ __diag_push(); \
+ __diag_ignore(GCC, 8, "-Wattribute-alias", \
+ "Type aliasing is used to sanitize syscall arguments");\
asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
__attribute__((alias(__stringify(__se_sys##name)))); \
ALLOW_ERROR_INJECTION(sys##name, ERRNO); \
@@ -243,6 +247,7 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
__PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
return ret; \
} \
+ __diag_pop(); \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#endif /* __SYSCALL_DEFINEx */
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 6c5f2074e14f..6f8b68cd460f 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -75,7 +75,7 @@ struct uio_device {
struct fasync_struct *async_queue;
wait_queue_head_t wait;
struct uio_info *info;
- spinlock_t info_lock;
+ struct mutex info_lock;
struct kobject *map_dir;
struct kobject *portio_dir;
};
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 53ce8176c313..ec9d6bc65855 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -271,7 +271,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags);
-__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events);
+__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 5fbfe61f41c6..1beb3ead0385 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -5835,10 +5835,11 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
/**
* cfg80211_rx_control_port - notification about a received control port frame
* @dev: The device the frame matched to
- * @buf: control port frame
- * @len: length of the frame data
- * @addr: The peer from which the frame was received
- * @proto: frame protocol, typically PAE or Pre-authentication
+ * @skb: The skbuf with the control port frame. It is assumed that the skbuf
+ * is 802.3 formatted (with 802.3 header). The skb can be non-linear.
+ * This function does not take ownership of the skb, so the caller is
+ * responsible for any cleanup. The caller must also ensure that
+ * skb->protocol is set appropriately.
* @unencrypted: Whether the frame was received unencrypted
*
* This function is used to inform userspace about a received control port
@@ -5851,8 +5852,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
* Return: %true if the frame was passed to userspace
*/
bool cfg80211_rx_control_port(struct net_device *dev,
- const u8 *buf, size_t len,
- const u8 *addr, u16 proto, bool unencrypted);
+ struct sk_buff *skb, bool unencrypted);
/**
* cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 5cba71d2dc44..3d4930528db0 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -170,6 +170,7 @@ struct fib6_info {
unused:3;
struct fib6_nh fib6_nh;
+ struct rcu_head rcu;
};
struct rt6_info {
@@ -273,17 +274,22 @@ static inline void ip6_rt_put(struct rt6_info *rt)
}
struct fib6_info *fib6_info_alloc(gfp_t gfp_flags);
-void fib6_info_destroy(struct fib6_info *f6i);
+void fib6_info_destroy_rcu(struct rcu_head *head);
static inline void fib6_info_hold(struct fib6_info *f6i)
{
atomic_inc(&f6i->fib6_ref);
}
+static inline bool fib6_info_hold_safe(struct fib6_info *f6i)
+{
+ return atomic_inc_not_zero(&f6i->fib6_ref);
+}
+
static inline void fib6_info_release(struct fib6_info *f6i)
{
if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
- fib6_info_destroy(f6i);
+ call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
}
enum fib6_walk_state {
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 59656fc580df..7b9c82de11cc 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -66,6 +66,12 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
}
+static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i)
+{
+ return (f6i->fib6_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
+ RTF_GATEWAY;
+}
+
void ip6_route_input(struct sk_buff *skb);
struct dst_entry *ip6_route_input_lookup(struct net *net,
struct net_device *dev,
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 16475c269749..8f73be494503 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -355,14 +355,7 @@ struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
struct ipv6_txoptions *opt,
int newtype,
- struct ipv6_opt_hdr __user *newopt,
- int newoptlen);
-struct ipv6_txoptions *
-ipv6_renew_options_kern(struct sock *sk,
- struct ipv6_txoptions *opt,
- int newtype,
- struct ipv6_opt_hdr *newopt,
- int newoptlen);
+ struct ipv6_opt_hdr *newopt);
struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
struct ipv6_txoptions *opt);
@@ -830,7 +823,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
* to minimize possbility that any useful information to an
* attacker is leaked. Only lower 20 bits are relevant.
*/
- rol32(hash, 16);
+ hash = rol32(hash, 16);
flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
@@ -1107,6 +1100,8 @@ void ipv6_sysctl_unregister(void);
int ipv6_sock_mc_join(struct sock *sk, int ifindex,
const struct in6_addr *addr);
+int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
+ const struct in6_addr *addr, unsigned int mode);
int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
#endif /* _NET_IPV6_H */
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index b0eaeb02d46d..f4c21b5a1242 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -153,6 +153,8 @@ struct iucv_sock_list {
atomic_t autobind_name;
};
+__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 47e35cce3b64..a71264d75d7f 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -128,6 +128,7 @@ struct net {
#endif
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
struct netns_nf_frag nf_frag;
+ struct ctl_table_header *nf_frag_frags_hdr;
#endif
struct sock *nfnl;
struct sock *nfnl_stash;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 08c005ce56e9..dc417ef0a0c5 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -150,6 +150,7 @@ static inline void nft_data_debug(const struct nft_data *data)
* @portid: netlink portID of the original message
* @seq: netlink sequence number
* @family: protocol family
+ * @level: depth of the chains
* @report: notify via unicast netlink message
*/
struct nft_ctx {
@@ -160,6 +161,7 @@ struct nft_ctx {
u32 portid;
u32 seq;
u8 family;
+ u8 level;
bool report;
};
@@ -865,7 +867,6 @@ enum nft_chain_flags {
* @table: table that this chain belongs to
* @handle: chain handle
* @use: number of jump references to this chain
- * @level: length of longest path to this chain
* @flags: bitmask of enum nft_chain_flags
* @name: name of the chain
*/
@@ -878,7 +879,6 @@ struct nft_chain {
struct nft_table *table;
u64 handle;
u32 use;
- u16 level;
u8 flags:6,
genmask:2;
char *name;
@@ -1124,7 +1124,6 @@ struct nft_flowtable {
u32 genmask:2,
use:30;
u64 handle;
- char *dev_name[NFT_FLOWTABLE_DEVICE_MAX];
/* runtime data below here */
struct nf_hook_ops *ops ____cacheline_aligned;
struct nf_flowtable data;
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index e0c0c2558ec4..a05134507e7b 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -65,4 +65,10 @@ extern const struct nft_expr_ops nft_payload_fast_ops;
extern struct static_key_false nft_counters_enabled;
extern struct static_key_false nft_trace_enabled;
+extern struct nft_set_type nft_set_rhash_type;
+extern struct nft_set_type nft_set_hash_type;
+extern struct nft_set_type nft_set_hash_fast_type;
+extern struct nft_set_type nft_set_rbtree_type;
+extern struct nft_set_type nft_set_bitmap_type;
+
#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h
index 9754a50ecde9..4cc64c8446eb 100644
--- a/include/net/netfilter/nf_tproxy.h
+++ b/include/net/netfilter/nf_tproxy.h
@@ -64,7 +64,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
* belonging to established connections going through that one.
*/
struct sock *
-nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
+nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
const u8 protocol,
const __be32 saddr, const __be32 daddr,
const __be16 sport, const __be16 dport,
@@ -103,7 +103,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
struct sock *sk);
struct sock *
-nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
+nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
const u8 protocol,
const struct in6_addr *saddr, const struct in6_addr *daddr,
const __be16 sport, const __be16 dport,
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index c978a31b0f84..762ac9931b62 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -109,7 +109,6 @@ struct netns_ipv6 {
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
struct netns_nf_frag {
- struct netns_sysctl_ipv6 sysctl;
struct netns_frags frags;
};
#endif
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index a3c1a2c47cd4..20b059574e60 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -111,6 +111,11 @@ void tcf_block_put_ext(struct tcf_block *block, struct Qdisc *q,
{
}
+static inline bool tcf_block_shared(struct tcf_block *block)
+{
+ return false;
+}
+
static inline struct Qdisc *tcf_block_q(struct tcf_block *block)
{
return NULL;
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 30b3e2fe240a..8c2caa370e0f 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -109,7 +109,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
int sctp_inet_listen(struct socket *sock, int backlog);
void sctp_write_space(struct sock *sk);
void sctp_data_ready(struct sock *sk);
-__poll_t sctp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t sctp_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
void sctp_sock_rfree(struct sk_buff *skb);
void sctp_copy_sock(struct sock *newsk, struct sock *sk,
struct sctp_association *asoc);
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
index 9470fd7e4350..32d2454c0479 100644
--- a/include/net/tc_act/tc_csum.h
+++ b/include/net/tc_act/tc_csum.h
@@ -7,7 +7,6 @@
#include <linux/tc_act/tc_csum.h>
struct tcf_csum_params {
- int action;
u32 update_flags;
struct rcu_head rcu;
};
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
index efef0b4b1b2b..46b8c7f1c8d5 100644
--- a/include/net/tc_act/tc_tunnel_key.h
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -18,7 +18,6 @@
struct tcf_tunnel_key_params {
struct rcu_head rcu;
int tcft_action;
- int action;
struct metadata_dst *tcft_enc_metadata;
};
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 0448e7c5d2b4..cd3ecda9386a 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -342,6 +342,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
+void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
{
@@ -388,7 +389,8 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
void tcp_close(struct sock *sk, long timeout);
void tcp_init_sock(struct sock *sk);
void tcp_init_transfer(struct sock *sk, int bpf_op);
-__poll_t tcp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t tcp_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
int tcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
int tcp_setsockopt(struct sock *sk, int level, int optname,
@@ -538,6 +540,7 @@ void tcp_send_fin(struct sock *sk);
void tcp_send_active_reset(struct sock *sk, gfp_t priority);
int tcp_send_synack(struct sock *);
void tcp_push_one(struct sock *, unsigned int mss_now);
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
void tcp_send_ack(struct sock *sk);
void tcp_send_delayed_ack(struct sock *sk);
void tcp_send_loss_probe(struct sock *sk);
@@ -827,6 +830,10 @@ struct tcp_skb_cb {
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
+static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
+{
+ TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
+}
#if IS_ENABLED(CONFIG_IPV6)
/* This is the variant of inet6_iif() that must be used by TCP,
@@ -834,6 +841,11 @@ struct tcp_skb_cb {
*/
static inline int tcp_v6_iif(const struct sk_buff *skb)
{
+ return TCP_SKB_CB(skb)->header.h6.iif;
+}
+
+static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
+{
bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
@@ -907,8 +919,6 @@ enum tcp_ca_event {
CA_EVENT_LOSS, /* loss timeout */
CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
- CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */
- CA_EVENT_NON_DELAYED_ACK,
};
/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
diff --git a/include/net/tls.h b/include/net/tls.h
index 7f84ea3e217c..70c273777fe9 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -109,7 +109,8 @@ struct tls_sw_context_rx {
struct strparser strp;
void (*saved_data_ready)(struct sock *sk);
- __poll_t (*sk_poll_mask)(struct socket *sock, __poll_t events);
+ unsigned int (*sk_poll)(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
struct sk_buff *recv_pkt;
u8 control;
bool decrypted;
@@ -224,7 +225,8 @@ void tls_sw_free_resources_tx(struct sock *sk);
void tls_sw_free_resources_rx(struct sock *sk);
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len);
-__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events);
+unsigned int tls_sw_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags);
diff --git a/include/net/udp.h b/include/net/udp.h
index b1ea8b0f5e6a..81afdacd4fff 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -285,7 +285,7 @@ int udp_init_sock(struct sock *sk);
int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int __udp_disconnect(struct sock *sk, int flags);
int udp_disconnect(struct sock *sk, int flags);
-__poll_t udp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features,
bool is_ipv6);
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 9fe472f2ac95..7161856bcf9c 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -60,6 +60,10 @@ struct xdp_sock {
bool zc;
/* Protects multiple processes in the control path */
struct mutex mutex;
+ /* Mutual exclusion of NAPI TX thread and sendmsg error paths
+ * in the SKB destructor callback.
+ */
+ spinlock_t tx_completion_lock;
u64 rx_dropped;
};
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 4c6241bc2039..6c003995347a 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -3391,11 +3391,14 @@ int ib_process_cq_direct(struct ib_cq *cq, int budget);
*
* Users can examine the cq structure to determine the actual CQ size.
*/
-struct ib_cq *ib_create_cq(struct ib_device *device,
- ib_comp_handler comp_handler,
- void (*event_handler)(struct ib_event *, void *),
- void *cq_context,
- const struct ib_cq_init_attr *cq_attr);
+struct ib_cq *__ib_create_cq(struct ib_device *device,
+ ib_comp_handler comp_handler,
+ void (*event_handler)(struct ib_event *, void *),
+ void *cq_context,
+ const struct ib_cq_init_attr *cq_attr,
+ const char *caller);
+#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
+ __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
/**
* ib_resize_cq - Modifies the capacity of the CQ.
diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h
index d00221345c19..d4593a6062ef 100644
--- a/include/uapi/linux/aio_abi.h
+++ b/include/uapi/linux/aio_abi.h
@@ -29,7 +29,6 @@
#include <linux/types.h>
#include <linux/fs.h>
-#include <linux/signal.h>
#include <asm/byteorder.h>
typedef __kernel_ulong_t aio_context_t;
@@ -39,8 +38,10 @@ enum {
IOCB_CMD_PWRITE = 1,
IOCB_CMD_FSYNC = 2,
IOCB_CMD_FDSYNC = 3,
- /* 4 was the experimental IOCB_CMD_PREADX */
- IOCB_CMD_POLL = 5,
+ /* These two are experimental.
+ * IOCB_CMD_PREADX = 4,
+ * IOCB_CMD_POLL = 5,
+ */
IOCB_CMD_NOOP = 6,
IOCB_CMD_PREADV = 7,
IOCB_CMD_PWRITEV = 8,
@@ -108,10 +109,5 @@ struct iocb {
#undef IFBIG
#undef IFLITTLE
-struct __aio_sigset {
- const sigset_t __user *sigmask;
- size_t sigsetsize;
-};
-
#endif /* __LINUX__AIO_ABI_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 59b19b6a40d7..b7db3261c62d 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1857,7 +1857,8 @@ union bpf_attr {
* is resolved), the nexthop address is returned in ipv4_dst
* or ipv6_dst based on family, smac is set to mac address of
* egress device, dmac is set to nexthop mac address, rt_metric
- * is set to metric from route (IPv4/IPv6 only).
+ * is set to metric from route (IPv4/IPv6 only), and ifindex
+ * is set to the device index of the nexthop from the FIB lookup.
*
* *plen* argument is the size of the passed in struct.
* *flags* argument can be a combination of one or more of the
@@ -1873,9 +1874,10 @@ union bpf_attr {
* *ctx* is either **struct xdp_md** for XDP programs or
* **struct sk_buff** tc cls_act programs.
* Return
- * Egress device index on success, 0 if packet needs to continue
- * up the stack for further processing or a negative error in case
- * of failure.
+ * * < 0 if any input argument is invalid
+ * * 0 on success (packet is forwarded, nexthop neighbor exists)
+ * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
+ * * packet is not forwarded or needs assist from full stack
*
* int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
* Description
@@ -2612,6 +2614,18 @@ struct bpf_raw_tracepoint_args {
#define BPF_FIB_LOOKUP_DIRECT BIT(0)
#define BPF_FIB_LOOKUP_OUTPUT BIT(1)
+enum {
+ BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
+ BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */
+ BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */
+ BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */
+ BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */
+ BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
+ BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
+ BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
+ BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
+};
+
struct bpf_fib_lookup {
/* input: network family for lookup (AF_INET, AF_INET6)
* output: network family of egress nexthop
@@ -2625,7 +2639,11 @@ struct bpf_fib_lookup {
/* total length of packet from network header - used for MTU check */
__u16 tot_len;
- __u32 ifindex; /* L3 device index for lookup */
+
+ /* input: L3 device index for lookup
+ * output: device index from FIB lookup
+ */
+ __u32 ifindex;
union {
/* inputs to lookup */
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index 0b5ddbe135a4..972265f32871 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -76,7 +76,7 @@ struct btf_type {
*/
#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24)
#define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16)
-#define BTF_INT_BITS(VAL) ((VAL) & 0x0000ffff)
+#define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff)
/* Attributes stored in the BTF_INT_ENCODING */
#define BTF_INT_SIGNED (1 << 0)
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 4ca65b56084f..7363f18e65a5 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -226,7 +226,7 @@ enum tunable_id {
ETHTOOL_TX_COPYBREAK,
ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
/*
- * Add your fresh new tubale attribute above and remember to update
+ * Add your fresh new tunable attribute above and remember to update
* tunable_strings[] in net/core/ethtool.c
*/
__ETHTOOL_TUNABLE_COUNT,
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
index 85a3fb65e40a..20d6cc91435d 100644
--- a/include/uapi/linux/nbd.h
+++ b/include/uapi/linux/nbd.h
@@ -53,6 +53,9 @@ enum {
/* These are client behavior specific flags. */
#define NBD_CFLAG_DESTROY_ON_DISCONNECT (1 << 0) /* delete the nbd device on
disconnect. */
+#define NBD_CFLAG_DISCONNECT_ON_CLOSE (1 << 1) /* disconnect the nbd device on
+ * close by last opener.
+ */
/* userspace doesn't need the nbd_device structure */
diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h
index d620fa43756c..9a402fdb60e9 100644
--- a/include/uapi/linux/rseq.h
+++ b/include/uapi/linux/rseq.h
@@ -10,13 +10,8 @@
* Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/
-#ifdef __KERNEL__
-# include <linux/types.h>
-#else
-# include <stdint.h>
-#endif
-
-#include <linux/types_32_64.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
enum rseq_cpu_id_state {
RSEQ_CPU_ID_UNINITIALIZED = -1,
@@ -52,10 +47,10 @@ struct rseq_cs {
__u32 version;
/* enum rseq_cs_flags */
__u32 flags;
- LINUX_FIELD_u32_u64(start_ip);
+ __u64 start_ip;
/* Offset from start_ip. */
- LINUX_FIELD_u32_u64(post_commit_offset);
- LINUX_FIELD_u32_u64(abort_ip);
+ __u64 post_commit_offset;
+ __u64 abort_ip;
} __attribute__((aligned(4 * sizeof(__u64))));
/*
@@ -67,28 +62,30 @@ struct rseq_cs {
struct rseq {
/*
* Restartable sequences cpu_id_start field. Updated by the
- * kernel, and read by user-space with single-copy atomicity
- * semantics. Aligned on 32-bit. Always contains a value in the
- * range of possible CPUs, although the value may not be the
- * actual current CPU (e.g. if rseq is not initialized). This
- * CPU number value should always be compared against the value
- * of the cpu_id field before performing a rseq commit or
- * returning a value read from a data structure indexed using
- * the cpu_id_start value.
+ * kernel. Read by user-space with single-copy atomicity
+ * semantics. This field should only be read by the thread which
+ * registered this data structure. Aligned on 32-bit. Always
+ * contains a value in the range of possible CPUs, although the
+ * value may not be the actual current CPU (e.g. if rseq is not
+ * initialized). This CPU number value should always be compared
+ * against the value of the cpu_id field before performing a rseq
+ * commit or returning a value read from a data structure indexed
+ * using the cpu_id_start value.
*/
__u32 cpu_id_start;
/*
- * Restartable sequences cpu_id field. Updated by the kernel,
- * and read by user-space with single-copy atomicity semantics.
- * Aligned on 32-bit. Values RSEQ_CPU_ID_UNINITIALIZED and
- * RSEQ_CPU_ID_REGISTRATION_FAILED have a special semantic: the
- * former means "rseq uninitialized", and latter means "rseq
- * initialization failed". This value is meant to be read within
- * rseq critical sections and compared with the cpu_id_start
- * value previously read, before performing the commit instruction,
- * or read and compared with the cpu_id_start value before returning
- * a value loaded from a data structure indexed using the
- * cpu_id_start value.
+ * Restartable sequences cpu_id field. Updated by the kernel.
+ * Read by user-space with single-copy atomicity semantics. This
+ * field should only be read by the thread which registered this
+ * data structure. Aligned on 32-bit. Values
+ * RSEQ_CPU_ID_UNINITIALIZED and RSEQ_CPU_ID_REGISTRATION_FAILED
+ * have a special semantic: the former means "rseq uninitialized",
+ * and latter means "rseq initialization failed". This value is
+ * meant to be read within rseq critical sections and compared
+ * with the cpu_id_start value previously read, before performing
+ * the commit instruction, or read and compared with the
+ * cpu_id_start value before returning a value loaded from a data
+ * structure indexed using the cpu_id_start value.
*/
__u32 cpu_id;
/*
@@ -105,27 +102,44 @@ struct rseq {
* targeted by the rseq_cs. Also needs to be set to NULL by user-space
* before reclaiming memory that contains the targeted struct rseq_cs.
*
- * Read and set by the kernel with single-copy atomicity semantics.
- * Set by user-space with single-copy atomicity semantics. Aligned
- * on 64-bit.
+ * Read and set by the kernel. Set by user-space with single-copy
+ * atomicity semantics. This field should only be updated by the
+ * thread which registered this data structure. Aligned on 64-bit.
*/
- LINUX_FIELD_u32_u64(rseq_cs);
+ union {
+ __u64 ptr64;
+#ifdef __LP64__
+ __u64 ptr;
+#else
+ struct {
+#if (defined(__BYTE_ORDER) && (__BYTE_ORDER == __BIG_ENDIAN)) || defined(__BIG_ENDIAN)
+ __u32 padding; /* Initialized to zero. */
+ __u32 ptr32;
+#else /* LITTLE */
+ __u32 ptr32;
+ __u32 padding; /* Initialized to zero. */
+#endif /* ENDIAN */
+ } ptr;
+#endif
+ } rseq_cs;
+
/*
- * - RSEQ_DISABLE flag:
+ * Restartable sequences flags field.
+ *
+ * This field should only be updated by the thread which
+ * registered this data structure. Read by the kernel.
+ * Mainly used for single-stepping through rseq critical sections
+ * with debuggers.
*
- * Fallback fast-track flag for single-stepping.
- * Set by user-space if lack of progress is detected.
- * Cleared by user-space after rseq finish.
- * Read by the kernel.
* - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT
- * Inhibit instruction sequence block restart and event
- * counter increment on preemption for this thread.
+ * Inhibit instruction sequence block restart on preemption
+ * for this thread.
* - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL
- * Inhibit instruction sequence block restart and event
- * counter increment on signal delivery for this thread.
+ * Inhibit instruction sequence block restart on signal
+ * delivery for this thread.
* - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE
- * Inhibit instruction sequence block restart and event
- * counter increment on migration for this thread.
+ * Inhibit instruction sequence block restart on migration for
+ * this thread.
*/
__u32 flags;
} __attribute__((aligned(4 * sizeof(__u64))));
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index 6e299349b158..b7b57967d90f 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -44,6 +44,7 @@
#define TCMU_MAILBOX_VERSION 2
#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
#define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
+#define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */
struct tcmu_mailbox {
__u16 version;
@@ -71,6 +72,7 @@ struct tcmu_cmd_entry_hdr {
__u16 cmd_id;
__u8 kflags;
#define TCMU_UFLAG_UNKNOWN_OP 0x1
+#define TCMU_UFLAG_READ_LEN 0x2
__u8 uflags;
} __packed;
@@ -119,7 +121,7 @@ struct tcmu_cmd_entry {
__u8 scsi_status;
__u8 __pad1;
__u16 __pad2;
- __u32 __pad3;
+ __u32 read_len;
char sense_buffer[TCMU_SENSE_BUFFERSIZE];
} rsp;
};
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 29eb659aa77a..e3f6ed8a7064 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -127,6 +127,10 @@ enum {
#define TCP_CM_INQ TCP_INQ
+#define TCP_REPAIR_ON 1
+#define TCP_REPAIR_OFF 0
+#define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */
+
struct tcp_repair_opt {
__u32 opt_code;
__u32 opt_val;
diff --git a/include/uapi/linux/types_32_64.h b/include/uapi/linux/types_32_64.h
deleted file mode 100644
index 0a87ace34a57..000000000000
--- a/include/uapi/linux/types_32_64.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-#ifndef _UAPI_LINUX_TYPES_32_64_H
-#define _UAPI_LINUX_TYPES_32_64_H
-
-/*
- * linux/types_32_64.h
- *
- * Integer type declaration for pointers across 32-bit and 64-bit systems.
- *
- * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifdef __KERNEL__
-# include <linux/types.h>
-#else
-# include <stdint.h>
-#endif
-
-#include <asm/byteorder.h>
-
-#ifdef __BYTE_ORDER
-# if (__BYTE_ORDER == __BIG_ENDIAN)
-# define LINUX_BYTE_ORDER_BIG_ENDIAN
-# else
-# define LINUX_BYTE_ORDER_LITTLE_ENDIAN
-# endif
-#else
-# ifdef __BIG_ENDIAN
-# define LINUX_BYTE_ORDER_BIG_ENDIAN
-# else
-# define LINUX_BYTE_ORDER_LITTLE_ENDIAN
-# endif
-#endif
-
-#ifdef __LP64__
-# define LINUX_FIELD_u32_u64(field) __u64 field
-# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) field = (intptr_t)v
-#else
-# ifdef LINUX_BYTE_ORDER_BIG_ENDIAN
-# define LINUX_FIELD_u32_u64(field) __u32 field ## _padding, field
-# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \
- field ## _padding = 0, field = (intptr_t)v
-# else
-# define LINUX_FIELD_u32_u64(field) __u32 field, field ## _padding
-# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \
- field = (intptr_t)v, field ## _padding = 0
-# endif
-#endif
-
-#endif /* _UAPI_LINUX_TYPES_32_64_H */
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 9d4340c907d1..1e1d9bd0bd37 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -25,12 +25,16 @@ extern bool xen_pvh;
#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
#define xen_pvh_domain() (xen_pvh)
+#include <linux/types.h>
+
+extern uint32_t xen_start_flags;
+
#ifdef CONFIG_XEN_DOM0
#include <xen/interface/xen.h>
#include <asm/xen/hypervisor.h>
#define xen_initial_domain() (xen_domain() && \
- xen_start_info && xen_start_info->flags & SIF_INITDOMAIN)
+ (xen_start_flags & SIF_INITDOMAIN))
#else /* !CONFIG_XEN_DOM0 */
#define xen_initial_domain() (0)
#endif /* CONFIG_XEN_DOM0 */
diff --git a/init/Kconfig b/init/Kconfig
index 5a52f07259a2..041f3a022122 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1051,10 +1051,9 @@ config LD_DEAD_CODE_DATA_ELIMINATION
depends on HAVE_LD_DEAD_CODE_DATA_ELIMINATION
depends on EXPERT
help
- Select this if the architecture wants to do dead code and
- data elimination with the linker by compiling with
- -ffunction-sections -fdata-sections, and linking with
- --gc-sections.
+ Enable this if you want to do dead code and data elimination with
+ the linker by compiling with -ffunction-sections -fdata-sections,
+ and linking with --gc-sections.
This can reduce on disk and in-memory size of the kernel
code and static data, particularly for small configs and
@@ -1719,10 +1718,6 @@ source "arch/Kconfig"
endmenu # General setup
-config HAVE_GENERIC_DMA_COHERENT
- bool
- default n
-
config RT_MUTEXES
bool
diff --git a/ipc/sem.c b/ipc/sem.c
index 5af1943ad782..76e95e4f3aa2 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -2118,7 +2118,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
}
do {
- queue.status = -EINTR;
+ WRITE_ONCE(queue.status, -EINTR);
queue.sleeper = current;
__set_current_state(TASK_INTERRUPTIBLE);
diff --git a/kernel/Makefile b/kernel/Makefile
index d2001624fe7a..04bc07c2b42a 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -41,6 +41,7 @@ obj-y += printk/
obj-y += irq/
obj-y += rcu/
obj-y += livepatch/
+obj-y += dma/
obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o
obj-$(CONFIG_FREEZER) += freezer.o
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 2d49d18b793a..9704934252b3 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -450,7 +450,7 @@ static const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
*/
static bool btf_type_int_is_regular(const struct btf_type *t)
{
- u16 nr_bits, nr_bytes;
+ u8 nr_bits, nr_bytes;
u32 int_data;
int_data = btf_type_int(t);
@@ -991,38 +991,38 @@ static void btf_int_bits_seq_show(const struct btf *btf,
void *data, u8 bits_offset,
struct seq_file *m)
{
+ u16 left_shift_bits, right_shift_bits;
u32 int_data = btf_type_int(t);
- u16 nr_bits = BTF_INT_BITS(int_data);
- u16 total_bits_offset;
- u16 nr_copy_bytes;
- u16 nr_copy_bits;
- u8 nr_upper_bits;
- union {
- u64 u64_num;
- u8 u8_nums[8];
- } print_num;
+ u8 nr_bits = BTF_INT_BITS(int_data);
+ u8 total_bits_offset;
+ u8 nr_copy_bytes;
+ u8 nr_copy_bits;
+ u64 print_num;
+ /*
+ * bits_offset is at most 7.
+ * BTF_INT_OFFSET() cannot exceed 64 bits.
+ */
total_bits_offset = bits_offset + BTF_INT_OFFSET(int_data);
data += BITS_ROUNDDOWN_BYTES(total_bits_offset);
bits_offset = BITS_PER_BYTE_MASKED(total_bits_offset);
nr_copy_bits = nr_bits + bits_offset;
nr_copy_bytes = BITS_ROUNDUP_BYTES(nr_copy_bits);
- print_num.u64_num = 0;
- memcpy(&print_num.u64_num, data, nr_copy_bytes);
+ print_num = 0;
+ memcpy(&print_num, data, nr_copy_bytes);
- /* Ditch the higher order bits */
- nr_upper_bits = BITS_PER_BYTE_MASKED(nr_copy_bits);
- if (nr_upper_bits) {
- /* We need to mask out some bits of the upper byte. */
- u8 mask = (1 << nr_upper_bits) - 1;
-
- print_num.u8_nums[nr_copy_bytes - 1] &= mask;
- }
+#ifdef __BIG_ENDIAN_BITFIELD
+ left_shift_bits = bits_offset;
+#else
+ left_shift_bits = BITS_PER_U64 - nr_copy_bits;
+#endif
+ right_shift_bits = BITS_PER_U64 - nr_bits;
- print_num.u64_num >>= bits_offset;
+ print_num <<= left_shift_bits;
+ print_num >>= right_shift_bits;
- seq_printf(m, "0x%llx", print_num.u64_num);
+ seq_printf(m, "0x%llx", print_num);
}
static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
@@ -1032,7 +1032,7 @@ static void btf_int_seq_show(const struct btf *btf, const struct btf_type *t,
u32 int_data = btf_type_int(t);
u8 encoding = BTF_INT_ENCODING(int_data);
bool sign = encoding & BTF_INT_SIGNED;
- u32 nr_bits = BTF_INT_BITS(int_data);
+ u8 nr_bits = BTF_INT_BITS(int_data);
if (bits_offset || BTF_INT_OFFSET(int_data) ||
BITS_PER_BYTE_MASKED(nr_bits)) {
diff --git a/kernel/bpf/cgroup.c b/kernel/bpf/cgroup.c
index f7c00bd6f8e4..3d83ee7df381 100644
--- a/kernel/bpf/cgroup.c
+++ b/kernel/bpf/cgroup.c
@@ -428,6 +428,60 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
return ret;
}
+int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype, struct bpf_prog *prog)
+{
+ struct cgroup *cgrp;
+ int ret;
+
+ cgrp = cgroup_get_from_fd(attr->target_fd);
+ if (IS_ERR(cgrp))
+ return PTR_ERR(cgrp);
+
+ ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
+ attr->attach_flags);
+ cgroup_put(cgrp);
+ return ret;
+}
+
+int cgroup_bpf_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype)
+{
+ struct bpf_prog *prog;
+ struct cgroup *cgrp;
+ int ret;
+
+ cgrp = cgroup_get_from_fd(attr->target_fd);
+ if (IS_ERR(cgrp))
+ return PTR_ERR(cgrp);
+
+ prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
+ if (IS_ERR(prog))
+ prog = NULL;
+
+ ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
+ if (prog)
+ bpf_prog_put(prog);
+
+ cgroup_put(cgrp);
+ return ret;
+}
+
+int cgroup_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ struct cgroup *cgrp;
+ int ret;
+
+ cgrp = cgroup_get_from_fd(attr->query.target_fd);
+ if (IS_ERR(cgrp))
+ return PTR_ERR(cgrp);
+
+ ret = cgroup_bpf_query(cgrp, attr, uattr);
+
+ cgroup_put(cgrp);
+ return ret;
+}
+
/**
* __cgroup_bpf_run_filter_skb() - Run a program for packet filtering
* @sk: The socket sending or receiving traffic
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 9f1493705f40..1e5625d46414 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -350,6 +350,20 @@ struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
return prog_adj;
}
+void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp)
+{
+ int i;
+
+ for (i = 0; i < fp->aux->func_cnt; i++)
+ bpf_prog_kallsyms_del(fp->aux->func[i]);
+}
+
+void bpf_prog_kallsyms_del_all(struct bpf_prog *fp)
+{
+ bpf_prog_kallsyms_del_subprogs(fp);
+ bpf_prog_kallsyms_del(fp);
+}
+
#ifdef CONFIG_BPF_JIT
/* All BPF JIT sysctl knobs here. */
int bpf_jit_enable __read_mostly = IS_BUILTIN(CONFIG_BPF_JIT_ALWAYS_ON);
@@ -1434,6 +1448,17 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
return 0;
}
+static void bpf_prog_select_func(struct bpf_prog *fp)
+{
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
+
+ fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
+#else
+ fp->bpf_func = __bpf_prog_ret0_warn;
+#endif
+}
+
/**
* bpf_prog_select_runtime - select exec runtime for BPF program
* @fp: bpf_prog populated with internal BPF program
@@ -1444,13 +1469,13 @@ static int bpf_check_tail_call(const struct bpf_prog *fp)
*/
struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
{
-#ifndef CONFIG_BPF_JIT_ALWAYS_ON
- u32 stack_depth = max_t(u32, fp->aux->stack_depth, 1);
+ /* In case of BPF to BPF calls, verifier did all the prep
+ * work with regards to JITing, etc.
+ */
+ if (fp->bpf_func)
+ goto finalize;
- fp->bpf_func = interpreters[(round_up(stack_depth, 32) / 32) - 1];
-#else
- fp->bpf_func = __bpf_prog_ret0_warn;
-#endif
+ bpf_prog_select_func(fp);
/* eBPF JITs can rewrite the program in case constant
* blinding is active. However, in case of error during
@@ -1471,6 +1496,8 @@ struct bpf_prog *bpf_prog_select_runtime(struct bpf_prog *fp, int *err)
if (*err)
return fp;
}
+
+finalize:
bpf_prog_lock_ro(fp);
/* The tail call compatibility check can only be done at
diff --git a/kernel/bpf/devmap.c b/kernel/bpf/devmap.c
index a7cc7b3494a9..d361fc1e3bf3 100644
--- a/kernel/bpf/devmap.c
+++ b/kernel/bpf/devmap.c
@@ -334,10 +334,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
{
struct net_device *dev = dst->dev;
struct xdp_frame *xdpf;
+ int err;
if (!dev->netdev_ops->ndo_xdp_xmit)
return -EOPNOTSUPP;
+ err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
+ if (unlikely(err))
+ return err;
+
xdpf = convert_to_xdp_frame(xdp);
if (unlikely(!xdpf))
return -EOVERFLOW;
@@ -345,6 +350,20 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
return bq_enqueue(dst, xdpf, dev_rx);
}
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+ struct bpf_prog *xdp_prog)
+{
+ int err;
+
+ err = xdp_ok_fwd_dev(dst->dev, skb->len);
+ if (unlikely(err))
+ return err;
+ skb->dev = dst->dev;
+ generic_xdp_tx(skb, xdp_prog);
+
+ return 0;
+}
+
static void *dev_map_lookup_elem(struct bpf_map *map, void *key)
{
struct bpf_dtab_netdev *obj = __dev_map_lookup_elem(map, *(u32 *)key);
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 3ca2198a6d22..513d9dfcf4ee 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -747,13 +747,15 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
* old element will be freed immediately.
* Otherwise return an error
*/
- atomic_dec(&htab->count);
- return ERR_PTR(-E2BIG);
+ l_new = ERR_PTR(-E2BIG);
+ goto dec_count;
}
l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
htab->map.numa_node);
- if (!l_new)
- return ERR_PTR(-ENOMEM);
+ if (!l_new) {
+ l_new = ERR_PTR(-ENOMEM);
+ goto dec_count;
+ }
}
memcpy(l_new->key, key, key_size);
@@ -766,7 +768,8 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
GFP_ATOMIC | __GFP_NOWARN);
if (!pptr) {
kfree(l_new);
- return ERR_PTR(-ENOMEM);
+ l_new = ERR_PTR(-ENOMEM);
+ goto dec_count;
}
}
@@ -780,6 +783,9 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
l_new->hash = hash;
return l_new;
+dec_count:
+ atomic_dec(&htab->count);
+ return l_new;
}
static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
diff --git a/kernel/bpf/sockmap.c b/kernel/bpf/sockmap.c
index 52a91d816c0e..98fb7938beea 100644
--- a/kernel/bpf/sockmap.c
+++ b/kernel/bpf/sockmap.c
@@ -72,6 +72,7 @@ struct bpf_htab {
u32 n_buckets;
u32 elem_size;
struct bpf_sock_progs progs;
+ struct rcu_head rcu;
};
struct htab_elem {
@@ -89,8 +90,8 @@ enum smap_psock_state {
struct smap_psock_map_entry {
struct list_head list;
struct sock **entry;
- struct htab_elem *hash_link;
- struct bpf_htab *htab;
+ struct htab_elem __rcu *hash_link;
+ struct bpf_htab __rcu *htab;
};
struct smap_psock {
@@ -120,6 +121,7 @@ struct smap_psock {
struct bpf_prog *bpf_parse;
struct bpf_prog *bpf_verdict;
struct list_head maps;
+ spinlock_t maps_lock;
/* Back reference used when sock callback trigger sockmap operations */
struct sock *sock;
@@ -140,6 +142,7 @@ static int bpf_tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
static int bpf_tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
static int bpf_tcp_sendpage(struct sock *sk, struct page *page,
int offset, size_t size, int flags);
+static void bpf_tcp_close(struct sock *sk, long timeout);
static inline struct smap_psock *smap_psock_sk(const struct sock *sk)
{
@@ -161,7 +164,42 @@ out:
return !empty;
}
-static struct proto tcp_bpf_proto;
+enum {
+ SOCKMAP_IPV4,
+ SOCKMAP_IPV6,
+ SOCKMAP_NUM_PROTS,
+};
+
+enum {
+ SOCKMAP_BASE,
+ SOCKMAP_TX,
+ SOCKMAP_NUM_CONFIGS,
+};
+
+static struct proto *saved_tcpv6_prot __read_mostly;
+static DEFINE_SPINLOCK(tcpv6_prot_lock);
+static struct proto bpf_tcp_prots[SOCKMAP_NUM_PROTS][SOCKMAP_NUM_CONFIGS];
+static void build_protos(struct proto prot[SOCKMAP_NUM_CONFIGS],
+ struct proto *base)
+{
+ prot[SOCKMAP_BASE] = *base;
+ prot[SOCKMAP_BASE].close = bpf_tcp_close;
+ prot[SOCKMAP_BASE].recvmsg = bpf_tcp_recvmsg;
+ prot[SOCKMAP_BASE].stream_memory_read = bpf_tcp_stream_read;
+
+ prot[SOCKMAP_TX] = prot[SOCKMAP_BASE];
+ prot[SOCKMAP_TX].sendmsg = bpf_tcp_sendmsg;
+ prot[SOCKMAP_TX].sendpage = bpf_tcp_sendpage;
+}
+
+static void update_sk_prot(struct sock *sk, struct smap_psock *psock)
+{
+ int family = sk->sk_family == AF_INET6 ? SOCKMAP_IPV6 : SOCKMAP_IPV4;
+ int conf = psock->bpf_tx_msg ? SOCKMAP_TX : SOCKMAP_BASE;
+
+ sk->sk_prot = &bpf_tcp_prots[family][conf];
+}
+
static int bpf_tcp_init(struct sock *sk)
{
struct smap_psock *psock;
@@ -181,14 +219,17 @@ static int bpf_tcp_init(struct sock *sk)
psock->save_close = sk->sk_prot->close;
psock->sk_proto = sk->sk_prot;
- if (psock->bpf_tx_msg) {
- tcp_bpf_proto.sendmsg = bpf_tcp_sendmsg;
- tcp_bpf_proto.sendpage = bpf_tcp_sendpage;
- tcp_bpf_proto.recvmsg = bpf_tcp_recvmsg;
- tcp_bpf_proto.stream_memory_read = bpf_tcp_stream_read;
+ /* Build IPv6 sockmap whenever the address of tcpv6_prot changes */
+ if (sk->sk_family == AF_INET6 &&
+ unlikely(sk->sk_prot != smp_load_acquire(&saved_tcpv6_prot))) {
+ spin_lock_bh(&tcpv6_prot_lock);
+ if (likely(sk->sk_prot != saved_tcpv6_prot)) {
+ build_protos(bpf_tcp_prots[SOCKMAP_IPV6], sk->sk_prot);
+ smp_store_release(&saved_tcpv6_prot, sk->sk_prot);
+ }
+ spin_unlock_bh(&tcpv6_prot_lock);
}
-
- sk->sk_prot = &tcp_bpf_proto;
+ update_sk_prot(sk, psock);
rcu_read_unlock();
return 0;
}
@@ -219,24 +260,64 @@ out:
rcu_read_unlock();
}
+static struct htab_elem *lookup_elem_raw(struct hlist_head *head,
+ u32 hash, void *key, u32 key_size)
+{
+ struct htab_elem *l;
+
+ hlist_for_each_entry_rcu(l, head, hash_node) {
+ if (l->hash == hash && !memcmp(&l->key, key, key_size))
+ return l;
+ }
+
+ return NULL;
+}
+
+static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
+{
+ return &htab->buckets[hash & (htab->n_buckets - 1)];
+}
+
+static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
+{
+ return &__select_bucket(htab, hash)->head;
+}
+
static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
{
atomic_dec(&htab->count);
kfree_rcu(l, rcu);
}
+static struct smap_psock_map_entry *psock_map_pop(struct sock *sk,
+ struct smap_psock *psock)
+{
+ struct smap_psock_map_entry *e;
+
+ spin_lock_bh(&psock->maps_lock);
+ e = list_first_entry_or_null(&psock->maps,
+ struct smap_psock_map_entry,
+ list);
+ if (e)
+ list_del(&e->list);
+ spin_unlock_bh(&psock->maps_lock);
+ return e;
+}
+
static void bpf_tcp_close(struct sock *sk, long timeout)
{
void (*close_fun)(struct sock *sk, long timeout);
- struct smap_psock_map_entry *e, *tmp;
+ struct smap_psock_map_entry *e;
struct sk_msg_buff *md, *mtmp;
struct smap_psock *psock;
struct sock *osk;
+ lock_sock(sk);
rcu_read_lock();
psock = smap_psock_sk(sk);
if (unlikely(!psock)) {
rcu_read_unlock();
+ release_sock(sk);
return sk->sk_prot->close(sk, timeout);
}
@@ -247,7 +328,6 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
*/
close_fun = psock->save_close;
- write_lock_bh(&sk->sk_callback_lock);
if (psock->cork) {
free_start_sg(psock->sock, psock->cork);
kfree(psock->cork);
@@ -260,21 +340,40 @@ static void bpf_tcp_close(struct sock *sk, long timeout)
kfree(md);
}
- list_for_each_entry_safe(e, tmp, &psock->maps, list) {
+ e = psock_map_pop(sk, psock);
+ while (e) {
if (e->entry) {
osk = cmpxchg(e->entry, sk, NULL);
if (osk == sk) {
- list_del(&e->list);
smap_release_sock(psock, sk);
}
} else {
- hlist_del_rcu(&e->hash_link->hash_node);
- smap_release_sock(psock, e->hash_link->sk);
- free_htab_elem(e->htab, e->hash_link);
+ struct htab_elem *link = rcu_dereference(e->hash_link);
+ struct bpf_htab *htab = rcu_dereference(e->htab);
+ struct hlist_head *head;
+ struct htab_elem *l;
+ struct bucket *b;
+
+ b = __select_bucket(htab, link->hash);
+ head = &b->head;
+ raw_spin_lock_bh(&b->lock);
+ l = lookup_elem_raw(head,
+ link->hash, link->key,
+ htab->map.key_size);
+ /* If another thread deleted this object skip deletion.
+ * The refcnt on psock may or may not be zero.
+ */
+ if (l) {
+ hlist_del_rcu(&link->hash_node);
+ smap_release_sock(psock, link->sk);
+ free_htab_elem(htab, link);
+ }
+ raw_spin_unlock_bh(&b->lock);
}
+ e = psock_map_pop(sk, psock);
}
- write_unlock_bh(&sk->sk_callback_lock);
rcu_read_unlock();
+ release_sock(sk);
close_fun(sk, timeout);
}
@@ -472,7 +571,8 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
while (sg[i].length) {
free += sg[i].length;
sk_mem_uncharge(sk, sg[i].length);
- put_page(sg_page(&sg[i]));
+ if (!md->skb)
+ put_page(sg_page(&sg[i]));
sg[i].length = 0;
sg[i].page_link = 0;
sg[i].offset = 0;
@@ -481,6 +581,8 @@ static int free_sg(struct sock *sk, int start, struct sk_msg_buff *md)
if (i == MAX_SKB_FRAGS)
i = 0;
}
+ if (md->skb)
+ consume_skb(md->skb);
return free;
}
@@ -1111,8 +1213,7 @@ static void bpf_tcp_msg_add(struct smap_psock *psock,
static int bpf_tcp_ulp_register(void)
{
- tcp_bpf_proto = tcp_prot;
- tcp_bpf_proto.close = bpf_tcp_close;
+ build_protos(bpf_tcp_prots[SOCKMAP_IPV4], &tcp_prot);
/* Once BPF TX ULP is registered it is never unregistered. It
* will be in the ULP list for the lifetime of the system. Doing
* duplicate registers is not a problem.
@@ -1135,7 +1236,7 @@ static int smap_verdict_func(struct smap_psock *psock, struct sk_buff *skb)
*/
TCP_SKB_CB(skb)->bpf.sk_redir = NULL;
skb->sk = psock->sock;
- bpf_compute_data_pointers(skb);
+ bpf_compute_data_end_sk_skb(skb);
preempt_disable();
rc = (*prog->bpf_func)(skb, prog->insnsi);
preempt_enable();
@@ -1357,7 +1458,9 @@ static void smap_release_sock(struct smap_psock *psock, struct sock *sock)
{
if (refcount_dec_and_test(&psock->refcnt)) {
tcp_cleanup_ulp(sock);
+ write_lock_bh(&sock->sk_callback_lock);
smap_stop_sock(psock, sock);
+ write_unlock_bh(&sock->sk_callback_lock);
clear_bit(SMAP_TX_RUNNING, &psock->state);
rcu_assign_sk_user_data(sock, NULL);
call_rcu_sched(&psock->rcu, smap_destroy_psock);
@@ -1388,7 +1491,7 @@ static int smap_parse_func_strparser(struct strparser *strp,
* any socket yet.
*/
skb->sk = psock->sock;
- bpf_compute_data_pointers(skb);
+ bpf_compute_data_end_sk_skb(skb);
rc = (*prog->bpf_func)(skb, prog->insnsi);
skb->sk = NULL;
rcu_read_unlock();
@@ -1508,6 +1611,7 @@ static struct smap_psock *smap_init_psock(struct sock *sock, int node)
INIT_LIST_HEAD(&psock->maps);
INIT_LIST_HEAD(&psock->ingress);
refcount_set(&psock->refcnt, 1);
+ spin_lock_init(&psock->maps_lock);
rcu_assign_sk_user_data(sock, psock);
sock_hold(sock);
@@ -1564,18 +1668,32 @@ free_stab:
return ERR_PTR(err);
}
-static void smap_list_remove(struct smap_psock *psock,
- struct sock **entry,
- struct htab_elem *hash_link)
+static void smap_list_map_remove(struct smap_psock *psock,
+ struct sock **entry)
{
struct smap_psock_map_entry *e, *tmp;
+ spin_lock_bh(&psock->maps_lock);
list_for_each_entry_safe(e, tmp, &psock->maps, list) {
- if (e->entry == entry || e->hash_link == hash_link) {
+ if (e->entry == entry)
+ list_del(&e->list);
+ }
+ spin_unlock_bh(&psock->maps_lock);
+}
+
+static void smap_list_hash_remove(struct smap_psock *psock,
+ struct htab_elem *hash_link)
+{
+ struct smap_psock_map_entry *e, *tmp;
+
+ spin_lock_bh(&psock->maps_lock);
+ list_for_each_entry_safe(e, tmp, &psock->maps, list) {
+ struct htab_elem *c = rcu_dereference(e->hash_link);
+
+ if (c == hash_link)
list_del(&e->list);
- break;
- }
}
+ spin_unlock_bh(&psock->maps_lock);
}
static void sock_map_free(struct bpf_map *map)
@@ -1601,7 +1719,6 @@ static void sock_map_free(struct bpf_map *map)
if (!sock)
continue;
- write_lock_bh(&sock->sk_callback_lock);
psock = smap_psock_sk(sock);
/* This check handles a racing sock event that can get the
* sk_callback_lock before this case but after xchg happens
@@ -1609,10 +1726,9 @@ static void sock_map_free(struct bpf_map *map)
* to be null and queued for garbage collection.
*/
if (likely(psock)) {
- smap_list_remove(psock, &stab->sock_map[i], NULL);
+ smap_list_map_remove(psock, &stab->sock_map[i]);
smap_release_sock(psock, sock);
}
- write_unlock_bh(&sock->sk_callback_lock);
}
rcu_read_unlock();
@@ -1661,17 +1777,15 @@ static int sock_map_delete_elem(struct bpf_map *map, void *key)
if (!sock)
return -EINVAL;
- write_lock_bh(&sock->sk_callback_lock);
psock = smap_psock_sk(sock);
if (!psock)
goto out;
if (psock->bpf_parse)
smap_stop_sock(psock, sock);
- smap_list_remove(psock, &stab->sock_map[k], NULL);
+ smap_list_map_remove(psock, &stab->sock_map[k]);
smap_release_sock(psock, sock);
out:
- write_unlock_bh(&sock->sk_callback_lock);
return 0;
}
@@ -1752,7 +1866,6 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
}
}
- write_lock_bh(&sock->sk_callback_lock);
psock = smap_psock_sk(sock);
/* 2. Do not allow inheriting programs if psock exists and has
@@ -1789,7 +1902,7 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
e = kzalloc(sizeof(*e), GFP_ATOMIC | __GFP_NOWARN);
if (!e) {
err = -ENOMEM;
- goto out_progs;
+ goto out_free;
}
}
@@ -1809,7 +1922,9 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
if (err)
goto out_free;
smap_init_progs(psock, verdict, parse);
+ write_lock_bh(&sock->sk_callback_lock);
smap_start_sock(psock, sock);
+ write_unlock_bh(&sock->sk_callback_lock);
}
/* 4. Place psock in sockmap for use and stop any programs on
@@ -1819,9 +1934,10 @@ static int __sock_map_ctx_update_elem(struct bpf_map *map,
*/
if (map_link) {
e->entry = map_link;
+ spin_lock_bh(&psock->maps_lock);
list_add_tail(&e->list, &psock->maps);
+ spin_unlock_bh(&psock->maps_lock);
}
- write_unlock_bh(&sock->sk_callback_lock);
return err;
out_free:
smap_release_sock(psock, sock);
@@ -1832,7 +1948,6 @@ out_progs:
}
if (tx_msg)
bpf_prog_put(tx_msg);
- write_unlock_bh(&sock->sk_callback_lock);
kfree(e);
return err;
}
@@ -1869,10 +1984,8 @@ static int sock_map_ctx_update_elem(struct bpf_sock_ops_kern *skops,
if (osock) {
struct smap_psock *opsock = smap_psock_sk(osock);
- write_lock_bh(&osock->sk_callback_lock);
- smap_list_remove(opsock, &stab->sock_map[i], NULL);
+ smap_list_map_remove(opsock, &stab->sock_map[i]);
smap_release_sock(opsock, osock);
- write_unlock_bh(&osock->sk_callback_lock);
}
out:
return err;
@@ -1915,6 +2028,24 @@ int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type)
return 0;
}
+int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+ struct bpf_prog *prog)
+{
+ int ufd = attr->target_fd;
+ struct bpf_map *map;
+ struct fd f;
+ int err;
+
+ f = fdget(ufd);
+ map = __bpf_map_get(f);
+ if (IS_ERR(map))
+ return PTR_ERR(map);
+
+ err = sock_map_prog(map, prog, attr->attach_type);
+ fdput(f);
+ return err;
+}
+
static void *sock_map_lookup(struct bpf_map *map, void *key)
{
return NULL;
@@ -1944,7 +2075,13 @@ static int sock_map_update_elem(struct bpf_map *map,
return -EOPNOTSUPP;
}
+ lock_sock(skops.sk);
+ preempt_disable();
+ rcu_read_lock();
err = sock_map_ctx_update_elem(&skops, map, key, flags);
+ rcu_read_unlock();
+ preempt_enable();
+ release_sock(skops.sk);
fput(socket->file);
return err;
}
@@ -2043,14 +2180,13 @@ free_htab:
return ERR_PTR(err);
}
-static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
+static void __bpf_htab_free(struct rcu_head *rcu)
{
- return &htab->buckets[hash & (htab->n_buckets - 1)];
-}
+ struct bpf_htab *htab;
-static inline struct hlist_head *select_bucket(struct bpf_htab *htab, u32 hash)
-{
- return &__select_bucket(htab, hash)->head;
+ htab = container_of(rcu, struct bpf_htab, rcu);
+ bpf_map_area_free(htab->buckets);
+ kfree(htab);
}
static void sock_hash_free(struct bpf_map *map)
@@ -2069,16 +2205,18 @@ static void sock_hash_free(struct bpf_map *map)
*/
rcu_read_lock();
for (i = 0; i < htab->n_buckets; i++) {
- struct hlist_head *head = select_bucket(htab, i);
+ struct bucket *b = __select_bucket(htab, i);
+ struct hlist_head *head;
struct hlist_node *n;
struct htab_elem *l;
+ raw_spin_lock_bh(&b->lock);
+ head = &b->head;
hlist_for_each_entry_safe(l, n, head, hash_node) {
struct sock *sock = l->sk;
struct smap_psock *psock;
hlist_del_rcu(&l->hash_node);
- write_lock_bh(&sock->sk_callback_lock);
psock = smap_psock_sk(sock);
/* This check handles a racing sock event that can get
* the sk_callback_lock before this case but after xchg
@@ -2086,16 +2224,15 @@ static void sock_hash_free(struct bpf_map *map)
* (psock) to be null and queued for garbage collection.
*/
if (likely(psock)) {
- smap_list_remove(psock, NULL, l);
+ smap_list_hash_remove(psock, l);
smap_release_sock(psock, sock);
}
- write_unlock_bh(&sock->sk_callback_lock);
- kfree(l);
+ free_htab_elem(htab, l);
}
+ raw_spin_unlock_bh(&b->lock);
}
rcu_read_unlock();
- bpf_map_area_free(htab->buckets);
- kfree(htab);
+ call_rcu(&htab->rcu, __bpf_htab_free);
}
static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
@@ -2122,19 +2259,6 @@ static struct htab_elem *alloc_sock_hash_elem(struct bpf_htab *htab,
return l_new;
}
-static struct htab_elem *lookup_elem_raw(struct hlist_head *head,
- u32 hash, void *key, u32 key_size)
-{
- struct htab_elem *l;
-
- hlist_for_each_entry_rcu(l, head, hash_node) {
- if (l->hash == hash && !memcmp(&l->key, key, key_size))
- return l;
- }
-
- return NULL;
-}
-
static inline u32 htab_map_hash(const void *key, u32 key_len)
{
return jhash(key, key_len, 0);
@@ -2230,7 +2354,10 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
if (err)
goto err;
- /* bpf_map_update_elem() can be called in_irq() */
+ /* psock is valid here because otherwise above *ctx_update_elem would
+ * have thrown an error. It is safe to skip error check.
+ */
+ psock = smap_psock_sk(sock);
raw_spin_lock_bh(&b->lock);
l_old = lookup_elem_raw(head, hash, key, key_size);
if (l_old && map_flags == BPF_NOEXIST) {
@@ -2248,15 +2375,12 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
goto bucket_err;
}
- psock = smap_psock_sk(sock);
- if (unlikely(!psock)) {
- err = -EINVAL;
- goto bucket_err;
- }
-
- e->hash_link = l_new;
- e->htab = container_of(map, struct bpf_htab, map);
+ rcu_assign_pointer(e->hash_link, l_new);
+ rcu_assign_pointer(e->htab,
+ container_of(map, struct bpf_htab, map));
+ spin_lock_bh(&psock->maps_lock);
list_add_tail(&e->list, &psock->maps);
+ spin_unlock_bh(&psock->maps_lock);
/* add new element to the head of the list, so that
* concurrent search will find it before old elem
@@ -2266,19 +2390,17 @@ static int sock_hash_ctx_update_elem(struct bpf_sock_ops_kern *skops,
psock = smap_psock_sk(l_old->sk);
hlist_del_rcu(&l_old->hash_node);
- smap_list_remove(psock, NULL, l_old);
+ smap_list_hash_remove(psock, l_old);
smap_release_sock(psock, l_old->sk);
free_htab_elem(htab, l_old);
}
raw_spin_unlock_bh(&b->lock);
return 0;
bucket_err:
+ smap_release_sock(psock, sock);
raw_spin_unlock_bh(&b->lock);
err:
kfree(e);
- psock = smap_psock_sk(sock);
- if (psock)
- smap_release_sock(psock, sock);
return err;
}
@@ -2300,7 +2422,13 @@ static int sock_hash_update_elem(struct bpf_map *map,
return -EINVAL;
}
+ lock_sock(skops.sk);
+ preempt_disable();
+ rcu_read_lock();
err = sock_hash_ctx_update_elem(&skops, map, key, flags);
+ rcu_read_unlock();
+ preempt_enable();
+ release_sock(skops.sk);
fput(socket->file);
return err;
}
@@ -2326,7 +2454,6 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
struct smap_psock *psock;
hlist_del_rcu(&l->hash_node);
- write_lock_bh(&sock->sk_callback_lock);
psock = smap_psock_sk(sock);
/* This check handles a racing sock event that can get the
* sk_callback_lock before this case but after xchg happens
@@ -2334,10 +2461,9 @@ static int sock_hash_delete_elem(struct bpf_map *map, void *key)
* to be null and queued for garbage collection.
*/
if (likely(psock)) {
- smap_list_remove(psock, NULL, l);
+ smap_list_hash_remove(psock, l);
smap_release_sock(psock, sock);
}
- write_unlock_bh(&sock->sk_callback_lock);
free_htab_elem(htab, l);
ret = 0;
}
@@ -2359,10 +2485,8 @@ struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key)
b = __select_bucket(htab, hash);
head = &b->head;
- raw_spin_lock_bh(&b->lock);
l = lookup_elem_raw(head, hash, key, key_size);
sk = l ? l->sk : NULL;
- raw_spin_unlock_bh(&b->lock);
return sk;
}
@@ -2383,6 +2507,7 @@ const struct bpf_map_ops sock_hash_ops = {
.map_get_next_key = sock_hash_get_next_key,
.map_update_elem = sock_hash_update_elem,
.map_delete_elem = sock_hash_delete_elem,
+ .map_release_uref = sock_map_release,
};
BPF_CALL_4(bpf_sock_map_update, struct bpf_sock_ops_kern *, bpf_sock,
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 0fa20624707f..a31a1ba0f8ea 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -735,7 +735,9 @@ static int map_update_elem(union bpf_attr *attr)
if (bpf_map_is_dev_bound(map)) {
err = bpf_map_offload_update_elem(map, key, value, attr->flags);
goto out;
- } else if (map->map_type == BPF_MAP_TYPE_CPUMAP) {
+ } else if (map->map_type == BPF_MAP_TYPE_CPUMAP ||
+ map->map_type == BPF_MAP_TYPE_SOCKHASH ||
+ map->map_type == BPF_MAP_TYPE_SOCKMAP) {
err = map->ops->map_update_elem(map, key, value, attr->flags);
goto out;
}
@@ -1034,14 +1036,9 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
{
if (atomic_dec_and_test(&prog->aux->refcnt)) {
- int i;
-
/* bpf_prog_free_id() must be called first */
bpf_prog_free_id(prog, do_idr_lock);
-
- for (i = 0; i < prog->aux->func_cnt; i++)
- bpf_prog_kallsyms_del(prog->aux->func[i]);
- bpf_prog_kallsyms_del(prog);
+ bpf_prog_kallsyms_del_all(prog);
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
}
@@ -1358,9 +1355,7 @@ static int bpf_prog_load(union bpf_attr *attr)
if (err < 0)
goto free_used_maps;
- /* eBPF program is ready to be JITed */
- if (!prog->bpf_func)
- prog = bpf_prog_select_runtime(prog, &err);
+ prog = bpf_prog_select_runtime(prog, &err);
if (err < 0)
goto free_used_maps;
@@ -1384,6 +1379,7 @@ static int bpf_prog_load(union bpf_attr *attr)
return err;
free_used_maps:
+ bpf_prog_kallsyms_del_subprogs(prog);
free_used_maps(prog->aux);
free_prog:
bpf_prog_uncharge_memlock(prog);
@@ -1489,8 +1485,6 @@ out_free_tp:
return err;
}
-#ifdef CONFIG_CGROUP_BPF
-
static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
enum bpf_attach_type attach_type)
{
@@ -1505,40 +1499,6 @@ static int bpf_prog_attach_check_attach_type(const struct bpf_prog *prog,
#define BPF_PROG_ATTACH_LAST_FIELD attach_flags
-static int sockmap_get_from_fd(const union bpf_attr *attr,
- int type, bool attach)
-{
- struct bpf_prog *prog = NULL;
- int ufd = attr->target_fd;
- struct bpf_map *map;
- struct fd f;
- int err;
-
- f = fdget(ufd);
- map = __bpf_map_get(f);
- if (IS_ERR(map))
- return PTR_ERR(map);
-
- if (attach) {
- prog = bpf_prog_get_type(attr->attach_bpf_fd, type);
- if (IS_ERR(prog)) {
- fdput(f);
- return PTR_ERR(prog);
- }
- }
-
- err = sock_map_prog(map, prog, attr->attach_type);
- if (err) {
- fdput(f);
- if (prog)
- bpf_prog_put(prog);
- return err;
- }
-
- fdput(f);
- return 0;
-}
-
#define BPF_F_ATTACH_MASK \
(BPF_F_ALLOW_OVERRIDE | BPF_F_ALLOW_MULTI)
@@ -1546,7 +1506,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
{
enum bpf_prog_type ptype;
struct bpf_prog *prog;
- struct cgroup *cgrp;
int ret;
if (!capable(CAP_NET_ADMIN))
@@ -1583,12 +1542,15 @@ static int bpf_prog_attach(const union bpf_attr *attr)
ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
break;
case BPF_SK_MSG_VERDICT:
- return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, true);
+ ptype = BPF_PROG_TYPE_SK_MSG;
+ break;
case BPF_SK_SKB_STREAM_PARSER:
case BPF_SK_SKB_STREAM_VERDICT:
- return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, true);
+ ptype = BPF_PROG_TYPE_SK_SKB;
+ break;
case BPF_LIRC_MODE2:
- return lirc_prog_attach(attr);
+ ptype = BPF_PROG_TYPE_LIRC_MODE2;
+ break;
default:
return -EINVAL;
}
@@ -1602,18 +1564,20 @@ static int bpf_prog_attach(const union bpf_attr *attr)
return -EINVAL;
}
- cgrp = cgroup_get_from_fd(attr->target_fd);
- if (IS_ERR(cgrp)) {
- bpf_prog_put(prog);
- return PTR_ERR(cgrp);
+ switch (ptype) {
+ case BPF_PROG_TYPE_SK_SKB:
+ case BPF_PROG_TYPE_SK_MSG:
+ ret = sockmap_get_from_fd(attr, ptype, prog);
+ break;
+ case BPF_PROG_TYPE_LIRC_MODE2:
+ ret = lirc_prog_attach(attr, prog);
+ break;
+ default:
+ ret = cgroup_bpf_prog_attach(attr, ptype, prog);
}
- ret = cgroup_bpf_attach(cgrp, prog, attr->attach_type,
- attr->attach_flags);
if (ret)
bpf_prog_put(prog);
- cgroup_put(cgrp);
-
return ret;
}
@@ -1622,9 +1586,6 @@ static int bpf_prog_attach(const union bpf_attr *attr)
static int bpf_prog_detach(const union bpf_attr *attr)
{
enum bpf_prog_type ptype;
- struct bpf_prog *prog;
- struct cgroup *cgrp;
- int ret;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
@@ -1657,29 +1618,17 @@ static int bpf_prog_detach(const union bpf_attr *attr)
ptype = BPF_PROG_TYPE_CGROUP_DEVICE;
break;
case BPF_SK_MSG_VERDICT:
- return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, false);
+ return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_MSG, NULL);
case BPF_SK_SKB_STREAM_PARSER:
case BPF_SK_SKB_STREAM_VERDICT:
- return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, false);
+ return sockmap_get_from_fd(attr, BPF_PROG_TYPE_SK_SKB, NULL);
case BPF_LIRC_MODE2:
return lirc_prog_detach(attr);
default:
return -EINVAL;
}
- cgrp = cgroup_get_from_fd(attr->target_fd);
- if (IS_ERR(cgrp))
- return PTR_ERR(cgrp);
-
- prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
- if (IS_ERR(prog))
- prog = NULL;
-
- ret = cgroup_bpf_detach(cgrp, prog, attr->attach_type, 0);
- if (prog)
- bpf_prog_put(prog);
- cgroup_put(cgrp);
- return ret;
+ return cgroup_bpf_prog_detach(attr, ptype);
}
#define BPF_PROG_QUERY_LAST_FIELD query.prog_cnt
@@ -1687,9 +1636,6 @@ static int bpf_prog_detach(const union bpf_attr *attr)
static int bpf_prog_query(const union bpf_attr *attr,
union bpf_attr __user *uattr)
{
- struct cgroup *cgrp;
- int ret;
-
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (CHECK_ATTR(BPF_PROG_QUERY))
@@ -1717,14 +1663,9 @@ static int bpf_prog_query(const union bpf_attr *attr,
default:
return -EINVAL;
}
- cgrp = cgroup_get_from_fd(attr->query.target_fd);
- if (IS_ERR(cgrp))
- return PTR_ERR(cgrp);
- ret = cgroup_bpf_query(cgrp, attr, uattr);
- cgroup_put(cgrp);
- return ret;
+
+ return cgroup_bpf_prog_query(attr, uattr);
}
-#endif /* CONFIG_CGROUP_BPF */
#define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
@@ -2371,7 +2312,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
case BPF_OBJ_GET:
err = bpf_obj_get(&attr);
break;
-#ifdef CONFIG_CGROUP_BPF
case BPF_PROG_ATTACH:
err = bpf_prog_attach(&attr);
break;
@@ -2381,7 +2321,6 @@ SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, siz
case BPF_PROG_QUERY:
err = bpf_prog_query(&attr, uattr);
break;
-#endif
case BPF_PROG_TEST_RUN:
err = bpf_prog_test_run(&attr, uattr);
break;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 9e2bf834f13a..63aaac52a265 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -5430,6 +5430,10 @@ static int jit_subprogs(struct bpf_verifier_env *env)
if (insn->code != (BPF_JMP | BPF_CALL) ||
insn->src_reg != BPF_PSEUDO_CALL)
continue;
+ /* Upon error here we cannot fall back to interpreter but
+ * need a hard reject of the program. Thus -EFAULT is
+ * propagated in any case.
+ */
subprog = find_subprog(env, i + insn->imm + 1);
if (subprog < 0) {
WARN_ONCE(1, "verifier bug. No program starts at insn %d\n",
@@ -5450,7 +5454,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
func = kcalloc(env->subprog_cnt, sizeof(prog), GFP_KERNEL);
if (!func)
- return -ENOMEM;
+ goto out_undo_insn;
for (i = 0; i < env->subprog_cnt; i++) {
subprog_start = subprog_end;
@@ -5515,7 +5519,7 @@ static int jit_subprogs(struct bpf_verifier_env *env)
tmp = bpf_int_jit_compile(func[i]);
if (tmp != func[i] || func[i]->bpf_func != old_bpf_func) {
verbose(env, "JIT doesn't support bpf-to-bpf calls\n");
- err = -EFAULT;
+ err = -ENOTSUPP;
goto out_free;
}
cond_resched();
@@ -5552,6 +5556,7 @@ out_free:
if (func[i])
bpf_jit_free(func[i]);
kfree(func);
+out_undo_insn:
/* cleanup main prog to be interpreted */
prog->jit_requested = 0;
for (i = 0, insn = prog->insnsi; i < prog->len; i++, insn++) {
@@ -5578,6 +5583,8 @@ static int fixup_call_args(struct bpf_verifier_env *env)
err = jit_subprogs(env);
if (err == 0)
return 0;
+ if (err == -EFAULT)
+ return err;
}
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
for (i = 0; i < prog->len; i++, insn++) {
diff --git a/kernel/dma/Kconfig b/kernel/dma/Kconfig
new file mode 100644
index 000000000000..9bd54304446f
--- /dev/null
+++ b/kernel/dma/Kconfig
@@ -0,0 +1,50 @@
+
+config HAS_DMA
+ bool
+ depends on !NO_DMA
+ default y
+
+config NEED_SG_DMA_LENGTH
+ bool
+
+config NEED_DMA_MAP_STATE
+ bool
+
+config ARCH_DMA_ADDR_T_64BIT
+ def_bool 64BIT || PHYS_ADDR_T_64BIT
+
+config HAVE_GENERIC_DMA_COHERENT
+ bool
+
+config ARCH_HAS_SYNC_DMA_FOR_DEVICE
+ bool
+
+config ARCH_HAS_SYNC_DMA_FOR_CPU
+ bool
+ select NEED_DMA_MAP_STATE
+
+config DMA_DIRECT_OPS
+ bool
+ depends on HAS_DMA
+
+config DMA_NONCOHERENT_OPS
+ bool
+ depends on HAS_DMA
+ select DMA_DIRECT_OPS
+
+config DMA_NONCOHERENT_MMAP
+ bool
+ depends on DMA_NONCOHERENT_OPS
+
+config DMA_NONCOHERENT_CACHE_SYNC
+ bool
+ depends on DMA_NONCOHERENT_OPS
+
+config DMA_VIRT_OPS
+ bool
+ depends on HAS_DMA
+
+config SWIOTLB
+ bool
+ select DMA_DIRECT_OPS
+ select NEED_DMA_MAP_STATE
diff --git a/kernel/dma/Makefile b/kernel/dma/Makefile
new file mode 100644
index 000000000000..6de44e4eb454
--- /dev/null
+++ b/kernel/dma/Makefile
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_HAS_DMA) += mapping.o
+obj-$(CONFIG_DMA_CMA) += contiguous.o
+obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += coherent.o
+obj-$(CONFIG_DMA_DIRECT_OPS) += direct.o
+obj-$(CONFIG_DMA_NONCOHERENT_OPS) += noncoherent.o
+obj-$(CONFIG_DMA_VIRT_OPS) += virt.o
+obj-$(CONFIG_DMA_API_DEBUG) += debug.o
+obj-$(CONFIG_SWIOTLB) += swiotlb.o
+
diff --git a/drivers/base/dma-coherent.c b/kernel/dma/coherent.c
index 597d40893862..597d40893862 100644
--- a/drivers/base/dma-coherent.c
+++ b/kernel/dma/coherent.c
diff --git a/drivers/base/dma-contiguous.c b/kernel/dma/contiguous.c
index d987dcd1bd56..d987dcd1bd56 100644
--- a/drivers/base/dma-contiguous.c
+++ b/kernel/dma/contiguous.c
diff --git a/lib/dma-debug.c b/kernel/dma/debug.c
index c007d25bee09..c007d25bee09 100644
--- a/lib/dma-debug.c
+++ b/kernel/dma/debug.c
diff --git a/lib/dma-direct.c b/kernel/dma/direct.c
index 8be8106270c2..8be8106270c2 100644
--- a/lib/dma-direct.c
+++ b/kernel/dma/direct.c
diff --git a/drivers/base/dma-mapping.c b/kernel/dma/mapping.c
index f831a582209c..d2a92ddaac4d 100644
--- a/drivers/base/dma-mapping.c
+++ b/kernel/dma/mapping.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * drivers/base/dma-mapping.c - arch-independent dma-mapping routines
+ * arch-independent dma-mapping routines
*
* Copyright (c) 2006 SUSE Linux Products GmbH
* Copyright (c) 2006 Tejun Heo <teheo@suse.de>
diff --git a/lib/dma-noncoherent.c b/kernel/dma/noncoherent.c
index 79e9a757387f..79e9a757387f 100644
--- a/lib/dma-noncoherent.c
+++ b/kernel/dma/noncoherent.c
diff --git a/lib/swiotlb.c b/kernel/dma/swiotlb.c
index 04b68d9dffac..904541055792 100644
--- a/lib/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -1085,3 +1085,4 @@ const struct dma_map_ops swiotlb_dma_ops = {
.unmap_page = swiotlb_unmap_page,
.dma_supported = dma_direct_supported,
};
+EXPORT_SYMBOL(swiotlb_dma_ops);
diff --git a/lib/dma-virt.c b/kernel/dma/virt.c
index 8e61a02ef9ca..631ddec4b60a 100644
--- a/lib/dma-virt.c
+++ b/kernel/dma/virt.c
@@ -1,7 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * lib/dma-virt.c
- *
* DMA operations that map to virtual addresses without flushing memory.
*/
#include <linux/export.h>
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 80cca2b30c4f..8f0434a9951a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6482,7 +6482,7 @@ void perf_prepare_sample(struct perf_event_header *header,
data->phys_addr = perf_virt_to_phys(data->addr);
}
-static void __always_inline
+static __always_inline void
__perf_event_output(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs,
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 045a37e9ddee..5d3cf407e374 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -103,7 +103,7 @@ out:
preempt_enable();
}
-static bool __always_inline
+static __always_inline bool
ring_buffer_has_space(unsigned long head, unsigned long tail,
unsigned long data_size, unsigned int size,
bool backward)
@@ -114,7 +114,7 @@ ring_buffer_has_space(unsigned long head, unsigned long tail,
return CIRC_SPACE(tail, head, data_size) >= size;
}
-static int __always_inline
+static __always_inline int
__perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size,
bool backward)
@@ -414,7 +414,7 @@ err:
}
EXPORT_SYMBOL_GPL(perf_aux_output_begin);
-static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb)
+static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
{
if (rb->aux_overwrite)
return false;
diff --git a/kernel/fork.c b/kernel/fork.c
index 9440d61b925c..1b27babc4c78 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -303,11 +303,36 @@ struct kmem_cache *files_cachep;
struct kmem_cache *fs_cachep;
/* SLAB cache for vm_area_struct structures */
-struct kmem_cache *vm_area_cachep;
+static struct kmem_cache *vm_area_cachep;
/* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep;
+struct vm_area_struct *vm_area_alloc(struct mm_struct *mm)
+{
+ struct vm_area_struct *vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+
+ if (vma)
+ vma_init(vma, mm);
+ return vma;
+}
+
+struct vm_area_struct *vm_area_dup(struct vm_area_struct *orig)
+{
+ struct vm_area_struct *new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+
+ if (new) {
+ *new = *orig;
+ INIT_LIST_HEAD(&new->anon_vma_chain);
+ }
+ return new;
+}
+
+void vm_area_free(struct vm_area_struct *vma)
+{
+ kmem_cache_free(vm_area_cachep, vma);
+}
+
static void account_kernel_stack(struct task_struct *tsk, int account)
{
void *stack = task_stack_page(tsk);
@@ -455,11 +480,9 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm,
goto fail_nomem;
charge = len;
}
- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+ tmp = vm_area_dup(mpnt);
if (!tmp)
goto fail_nomem;
- *tmp = *mpnt;
- INIT_LIST_HEAD(&tmp->anon_vma_chain);
retval = vma_dup_policy(mpnt, tmp);
if (retval)
goto fail_nomem_policy;
@@ -539,7 +562,7 @@ fail_uprobe_end:
fail_nomem_anon_vma_fork:
mpol_put(vma_policy(tmp));
fail_nomem_policy:
- kmem_cache_free(vm_area_cachep, tmp);
+ vm_area_free(tmp);
fail_nomem:
retval = -ENOMEM;
vm_unacct_memory(charge);
diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
index 4dadeb3d6666..6f636136cccc 100644
--- a/kernel/irq/debugfs.c
+++ b/kernel/irq/debugfs.c
@@ -55,6 +55,7 @@ static const struct irq_bit_descr irqchip_flags[] = {
BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE),
BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE),
BIT_MASK_DESCR(IRQCHIP_EOI_THREADED),
+ BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI),
};
static void
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 481951bf091d..486dedbd9af5 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -177,9 +177,20 @@ void *kthread_probe_data(struct task_struct *task)
static void __kthread_parkme(struct kthread *self)
{
for (;;) {
- set_current_state(TASK_PARKED);
+ /*
+ * TASK_PARKED is a special state; we must serialize against
+ * possible pending wakeups to avoid store-store collisions on
+ * task->state.
+ *
+ * Such a collision might possibly result in the task state
+ * changin from TASK_PARKED and us failing the
+ * wait_task_inactive() in kthread_park().
+ */
+ set_special_state(TASK_PARKED);
if (!test_bit(KTHREAD_SHOULD_PARK, &self->flags))
break;
+
+ complete_all(&self->parked);
schedule();
}
__set_current_state(TASK_RUNNING);
@@ -191,11 +202,6 @@ void kthread_parkme(void)
}
EXPORT_SYMBOL_GPL(kthread_parkme);
-void kthread_park_complete(struct task_struct *k)
-{
- complete_all(&to_kthread(k)->parked);
-}
-
static int kthread(void *_create)
{
/* Copy data: it's on kthread's stack */
@@ -319,8 +325,14 @@ struct task_struct *__kthread_create_on_node(int (*threadfn)(void *data),
task = create->result;
if (!IS_ERR(task)) {
static const struct sched_param param = { .sched_priority = 0 };
+ char name[TASK_COMM_LEN];
- vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
+ /*
+ * task is already visible to other tasks, so updating
+ * COMM must be protected.
+ */
+ vsnprintf(name, sizeof(name), namefmt, args);
+ set_task_comm(task, name);
/*
* root may have changed our (kthreadd's) priority or CPU mask.
* The kernel thread should not inherit these properties.
@@ -461,6 +473,9 @@ void kthread_unpark(struct task_struct *k)
reinit_completion(&kthread->parked);
clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
+ /*
+ * __kthread_parkme() will either see !SHOULD_PARK or get the wakeup.
+ */
wake_up_state(k, TASK_PARKED);
}
EXPORT_SYMBOL_GPL(kthread_unpark);
@@ -487,7 +502,16 @@ int kthread_park(struct task_struct *k)
set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
if (k != current) {
wake_up_process(k);
+ /*
+ * Wait for __kthread_parkme() to complete(), this means we
+ * _will_ have TASK_PARKED and are about to call schedule().
+ */
wait_for_completion(&kthread->parked);
+ /*
+ * Now wait for that schedule() to complete and the task to
+ * get scheduled out.
+ */
+ WARN_ON_ONCE(!wait_task_inactive(k, TASK_PARKED));
}
return 0;
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index edcac5de7ebc..5fa4d3138bf1 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1265,11 +1265,11 @@ unsigned long lockdep_count_forward_deps(struct lock_class *class)
this.parent = NULL;
this.class = class;
- local_irq_save(flags);
+ raw_local_irq_save(flags);
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_forward_deps(&this);
arch_spin_unlock(&lockdep_lock);
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
return ret;
}
@@ -1292,11 +1292,11 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
this.parent = NULL;
this.class = class;
- local_irq_save(flags);
+ raw_local_irq_save(flags);
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_backward_deps(&this);
arch_spin_unlock(&lockdep_lock);
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
return ret;
}
@@ -4411,7 +4411,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
if (unlikely(!debug_locks))
return;
- local_irq_save(flags);
+ raw_local_irq_save(flags);
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
@@ -4422,7 +4422,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
break;
}
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index bc1e507be9ff..776308d2fa9e 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -181,6 +181,7 @@ void down_read_non_owner(struct rw_semaphore *sem)
might_sleep();
__down_read(sem);
+ rwsem_set_reader_owned(sem);
}
EXPORT_SYMBOL(down_read_non_owner);
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 5857267a4af5..38283363da06 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -176,10 +176,27 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
unsigned long pfn, pgoff, order;
pgprot_t pgprot = PAGE_KERNEL;
int error, nid, is_ram;
+ struct dev_pagemap *conflict_pgmap;
align_start = res->start & ~(SECTION_SIZE - 1);
align_size = ALIGN(res->start + resource_size(res), SECTION_SIZE)
- align_start;
+ align_end = align_start + align_size - 1;
+
+ conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_start), NULL);
+ if (conflict_pgmap) {
+ dev_WARN(dev, "Conflicting mapping in same section\n");
+ put_dev_pagemap(conflict_pgmap);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ conflict_pgmap = get_dev_pagemap(PHYS_PFN(align_end), NULL);
+ if (conflict_pgmap) {
+ dev_WARN(dev, "Conflicting mapping in same section\n");
+ put_dev_pagemap(conflict_pgmap);
+ return ERR_PTR(-ENOMEM);
+ }
+
is_ram = region_intersects(align_start, align_size,
IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
@@ -199,7 +216,6 @@ void *devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
mutex_lock(&pgmap_lock);
error = 0;
- align_end = align_start + align_size - 1;
foreach_order_pgoff(res, order, pgoff) {
error = __radix_tree_insert(&pgmap_radix,
@@ -305,7 +321,7 @@ EXPORT_SYMBOL_GPL(get_dev_pagemap);
#ifdef CONFIG_DEV_PAGEMAP_OPS
DEFINE_STATIC_KEY_FALSE(devmap_managed_key);
-EXPORT_SYMBOL_GPL(devmap_managed_key);
+EXPORT_SYMBOL(devmap_managed_key);
static atomic_t devmap_enable;
/*
@@ -346,5 +362,5 @@ void __put_devmap_managed_page(struct page *page)
} else if (!count)
__put_page(page);
}
-EXPORT_SYMBOL_GPL(__put_devmap_managed_page);
+EXPORT_SYMBOL(__put_devmap_managed_page);
#endif /* CONFIG_DEV_PAGEMAP_OPS */
diff --git a/kernel/rseq.c b/kernel/rseq.c
index ae306f90c514..c6242d8594dc 100644
--- a/kernel/rseq.c
+++ b/kernel/rseq.c
@@ -85,9 +85,9 @@ static int rseq_update_cpu_id(struct task_struct *t)
{
u32 cpu_id = raw_smp_processor_id();
- if (__put_user(cpu_id, &t->rseq->cpu_id_start))
+ if (put_user(cpu_id, &t->rseq->cpu_id_start))
return -EFAULT;
- if (__put_user(cpu_id, &t->rseq->cpu_id))
+ if (put_user(cpu_id, &t->rseq->cpu_id))
return -EFAULT;
trace_rseq_update(t);
return 0;
@@ -100,14 +100,14 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t)
/*
* Reset cpu_id_start to its initial state (0).
*/
- if (__put_user(cpu_id_start, &t->rseq->cpu_id_start))
+ if (put_user(cpu_id_start, &t->rseq->cpu_id_start))
return -EFAULT;
/*
* Reset cpu_id to RSEQ_CPU_ID_UNINITIALIZED, so any user coming
* in after unregistration can figure out that rseq needs to be
* registered again.
*/
- if (__put_user(cpu_id, &t->rseq->cpu_id))
+ if (put_user(cpu_id, &t->rseq->cpu_id))
return -EFAULT;
return 0;
}
@@ -115,29 +115,36 @@ static int rseq_reset_rseq_cpu_id(struct task_struct *t)
static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
{
struct rseq_cs __user *urseq_cs;
- unsigned long ptr;
+ u64 ptr;
u32 __user *usig;
u32 sig;
int ret;
- ret = __get_user(ptr, &t->rseq->rseq_cs);
- if (ret)
- return ret;
+ if (copy_from_user(&ptr, &t->rseq->rseq_cs.ptr64, sizeof(ptr)))
+ return -EFAULT;
if (!ptr) {
memset(rseq_cs, 0, sizeof(*rseq_cs));
return 0;
}
- urseq_cs = (struct rseq_cs __user *)ptr;
+ if (ptr >= TASK_SIZE)
+ return -EINVAL;
+ urseq_cs = (struct rseq_cs __user *)(unsigned long)ptr;
if (copy_from_user(rseq_cs, urseq_cs, sizeof(*rseq_cs)))
return -EFAULT;
- if (rseq_cs->version > 0)
- return -EINVAL;
+ if (rseq_cs->start_ip >= TASK_SIZE ||
+ rseq_cs->start_ip + rseq_cs->post_commit_offset >= TASK_SIZE ||
+ rseq_cs->abort_ip >= TASK_SIZE ||
+ rseq_cs->version > 0)
+ return -EINVAL;
+ /* Check for overflow. */
+ if (rseq_cs->start_ip + rseq_cs->post_commit_offset < rseq_cs->start_ip)
+ return -EINVAL;
/* Ensure that abort_ip is not in the critical section. */
if (rseq_cs->abort_ip - rseq_cs->start_ip < rseq_cs->post_commit_offset)
return -EINVAL;
- usig = (u32 __user *)(rseq_cs->abort_ip - sizeof(u32));
+ usig = (u32 __user *)(unsigned long)(rseq_cs->abort_ip - sizeof(u32));
ret = get_user(sig, usig);
if (ret)
return ret;
@@ -146,7 +153,7 @@ static int rseq_get_rseq_cs(struct task_struct *t, struct rseq_cs *rseq_cs)
printk_ratelimited(KERN_WARNING
"Possible attack attempt. Unexpected rseq signature 0x%x, expecting 0x%x (pid=%d, addr=%p).\n",
sig, current->rseq_sig, current->pid, usig);
- return -EPERM;
+ return -EINVAL;
}
return 0;
}
@@ -157,7 +164,7 @@ static int rseq_need_restart(struct task_struct *t, u32 cs_flags)
int ret;
/* Get thread flags. */
- ret = __get_user(flags, &t->rseq->flags);
+ ret = get_user(flags, &t->rseq->flags);
if (ret)
return ret;
@@ -195,9 +202,11 @@ static int clear_rseq_cs(struct task_struct *t)
* of code outside of the rseq assembly block. This performs
* a lazy clear of the rseq_cs field.
*
- * Set rseq_cs to NULL with single-copy atomicity.
+ * Set rseq_cs to NULL.
*/
- return __put_user(0UL, &t->rseq->rseq_cs);
+ if (clear_user(&t->rseq->rseq_cs.ptr64, sizeof(t->rseq->rseq_cs.ptr64)))
+ return -EFAULT;
+ return 0;
}
/*
@@ -251,10 +260,10 @@ static int rseq_ip_fixup(struct pt_regs *regs)
* respect to other threads scheduled on the same CPU, and with respect
* to signal handlers.
*/
-void __rseq_handle_notify_resume(struct pt_regs *regs)
+void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
{
struct task_struct *t = current;
- int ret;
+ int ret, sig;
if (unlikely(t->flags & PF_EXITING))
return;
@@ -268,7 +277,8 @@ void __rseq_handle_notify_resume(struct pt_regs *regs)
return;
error:
- force_sig(SIGSEGV, t);
+ sig = ksig ? ksig->sig : 0;
+ force_sigsegv(sig, t);
}
#ifdef CONFIG_DEBUG_RSEQ
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0f79bb5ffb5a..5ba96d9ddbde 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7,7 +7,6 @@
*/
#include "sched.h"
-#include <linux/kthread.h>
#include <linux/nospec.h>
#include <linux/kcov.h>
@@ -2724,28 +2723,20 @@ static struct rq *finish_task_switch(struct task_struct *prev)
membarrier_mm_sync_core_before_usermode(mm);
mmdrop(mm);
}
- if (unlikely(prev_state & (TASK_DEAD|TASK_PARKED))) {
- switch (prev_state) {
- case TASK_DEAD:
- if (prev->sched_class->task_dead)
- prev->sched_class->task_dead(prev);
+ if (unlikely(prev_state == TASK_DEAD)) {
+ if (prev->sched_class->task_dead)
+ prev->sched_class->task_dead(prev);
- /*
- * Remove function-return probe instances associated with this
- * task and put them back on the free list.
- */
- kprobe_flush_task(prev);
-
- /* Task is done with its stack. */
- put_task_stack(prev);
+ /*
+ * Remove function-return probe instances associated with this
+ * task and put them back on the free list.
+ */
+ kprobe_flush_task(prev);
- put_task_struct(prev);
- break;
+ /* Task is done with its stack. */
+ put_task_stack(prev);
- case TASK_PARKED:
- kthread_park_complete(prev);
- break;
- }
+ put_task_struct(prev);
}
tick_nohz_task_switch();
@@ -3113,7 +3104,9 @@ static void sched_tick_remote(struct work_struct *work)
struct tick_work *twork = container_of(dwork, struct tick_work, work);
int cpu = twork->cpu;
struct rq *rq = cpu_rq(cpu);
+ struct task_struct *curr;
struct rq_flags rf;
+ u64 delta;
/*
* Handle the tick only if it appears the remote CPU is running in full
@@ -3122,24 +3115,28 @@ static void sched_tick_remote(struct work_struct *work)
* statistics and checks timeslices in a time-independent way, regardless
* of when exactly it is running.
*/
- if (!idle_cpu(cpu) && tick_nohz_tick_stopped_cpu(cpu)) {
- struct task_struct *curr;
- u64 delta;
+ if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu))
+ goto out_requeue;
- rq_lock_irq(rq, &rf);
- update_rq_clock(rq);
- curr = rq->curr;
- delta = rq_clock_task(rq) - curr->se.exec_start;
+ rq_lock_irq(rq, &rf);
+ curr = rq->curr;
+ if (is_idle_task(curr))
+ goto out_unlock;
- /*
- * Make sure the next tick runs within a reasonable
- * amount of time.
- */
- WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
- curr->sched_class->task_tick(rq, curr, 0);
- rq_unlock_irq(rq, &rf);
- }
+ update_rq_clock(rq);
+ delta = rq_clock_task(rq) - curr->se.exec_start;
+
+ /*
+ * Make sure the next tick runs within a reasonable
+ * amount of time.
+ */
+ WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
+ curr->sched_class->task_tick(rq, curr, 0);
+
+out_unlock:
+ rq_unlock_irq(rq, &rf);
+out_requeue:
/*
* Run the remote tick once per second (1Hz). This arbitrary
* frequency is large enough to avoid overload but short enough
diff --git a/kernel/sched/cpufreq_schedutil.c b/kernel/sched/cpufreq_schedutil.c
index 3cde46483f0a..c907fde01eaa 100644
--- a/kernel/sched/cpufreq_schedutil.c
+++ b/kernel/sched/cpufreq_schedutil.c
@@ -192,7 +192,7 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
{
struct rq *rq = cpu_rq(sg_cpu->cpu);
- if (rq->rt.rt_nr_running)
+ if (rt_rq_is_runnable(&rq->rt))
return sg_cpu->max;
/*
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index fbfc3f1d368a..10c7b51c0d1f 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -2290,8 +2290,17 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p)
if (task_on_rq_queued(p) && p->dl.dl_runtime)
task_non_contending(p);
- if (!task_on_rq_queued(p))
+ if (!task_on_rq_queued(p)) {
+ /*
+ * Inactive timer is armed. However, p is leaving DEADLINE and
+ * might migrate away from this rq while continuing to run on
+ * some other class. We need to remove its contribution from
+ * this rq running_bw now, or sub_rq_bw (below) will complain.
+ */
+ if (p->dl.dl_non_contending)
+ sub_running_bw(&p->dl, &rq->dl);
sub_rq_bw(&p->dl, &rq->dl);
+ }
/*
* We cannot use inactive_task_timer() to invoke sub_running_bw()
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index cacdbef99b95..9c219f7b0970 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3982,18 +3982,10 @@ util_est_dequeue(struct cfs_rq *cfs_rq, struct task_struct *p, bool task_sleep)
if (!sched_feat(UTIL_EST))
return;
- /*
- * Update root cfs_rq's estimated utilization
- *
- * If *p is the last task then the root cfs_rq's estimated utilization
- * of a CPU is 0 by definition.
- */
- ue.enqueued = 0;
- if (cfs_rq->nr_running) {
- ue.enqueued = cfs_rq->avg.util_est.enqueued;
- ue.enqueued -= min_t(unsigned int, ue.enqueued,
- (_task_util_est(p) | UTIL_AVG_UNCHANGED));
- }
+ /* Update root cfs_rq's estimated utilization */
+ ue.enqueued = cfs_rq->avg.util_est.enqueued;
+ ue.enqueued -= min_t(unsigned int, ue.enqueued,
+ (_task_util_est(p) | UTIL_AVG_UNCHANGED));
WRITE_ONCE(cfs_rq->avg.util_est.enqueued, ue.enqueued);
/*
@@ -4590,6 +4582,7 @@ void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
now = sched_clock_cpu(smp_processor_id());
cfs_b->runtime = cfs_b->quota;
cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
+ cfs_b->expires_seq++;
}
static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg)
@@ -4612,6 +4605,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
struct task_group *tg = cfs_rq->tg;
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg);
u64 amount = 0, min_amount, expires;
+ int expires_seq;
/* note: this is a positive sum as runtime_remaining <= 0 */
min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining;
@@ -4628,6 +4622,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
cfs_b->idle = 0;
}
}
+ expires_seq = cfs_b->expires_seq;
expires = cfs_b->runtime_expires;
raw_spin_unlock(&cfs_b->lock);
@@ -4637,8 +4632,10 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
* spread between our sched_clock and the one on which runtime was
* issued.
*/
- if ((s64)(expires - cfs_rq->runtime_expires) > 0)
+ if (cfs_rq->expires_seq != expires_seq) {
+ cfs_rq->expires_seq = expires_seq;
cfs_rq->runtime_expires = expires;
+ }
return cfs_rq->runtime_remaining > 0;
}
@@ -4664,12 +4661,9 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
* has not truly expired.
*
* Fortunately we can check determine whether this the case by checking
- * whether the global deadline has advanced. It is valid to compare
- * cfs_b->runtime_expires without any locks since we only care about
- * exact equality, so a partial write will still work.
+ * whether the global deadline(cfs_b->expires_seq) has advanced.
*/
-
- if (cfs_rq->runtime_expires != cfs_b->runtime_expires) {
+ if (cfs_rq->expires_seq == cfs_b->expires_seq) {
/* extend local deadline, drift is bounded above by 2 ticks */
cfs_rq->runtime_expires += TICK_NSEC;
} else {
@@ -5202,13 +5196,18 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
{
+ u64 overrun;
+
lockdep_assert_held(&cfs_b->lock);
- if (!cfs_b->period_active) {
- cfs_b->period_active = 1;
- hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
- hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
- }
+ if (cfs_b->period_active)
+ return;
+
+ cfs_b->period_active = 1;
+ overrun = hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
+ cfs_b->runtime_expires += (overrun + 1) * ktime_to_ns(cfs_b->period);
+ cfs_b->expires_seq++;
+ hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
}
static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 47556b0c9a95..572567078b60 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -508,8 +508,11 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)
rt_se = rt_rq->tg->rt_se[cpu];
- if (!rt_se)
+ if (!rt_se) {
dequeue_top_rt_rq(rt_rq);
+ /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
+ cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
+ }
else if (on_rt_rq(rt_se))
dequeue_rt_entity(rt_se, 0);
}
@@ -1001,8 +1004,6 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq)
sub_nr_running(rq, rt_rq->rt_nr_running);
rt_rq->rt_queued = 0;
- /* Kick cpufreq (see the comment in kernel/sched/sched.h). */
- cpufreq_update_util(rq, 0);
}
static void
@@ -1014,11 +1015,14 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)
if (rt_rq->rt_queued)
return;
- if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)
+
+ if (rt_rq_throttled(rt_rq))
return;
- add_nr_running(rq, rt_rq->rt_nr_running);
- rt_rq->rt_queued = 1;
+ if (rt_rq->rt_nr_running) {
+ add_nr_running(rq, rt_rq->rt_nr_running);
+ rt_rq->rt_queued = 1;
+ }
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_util(rq, 0);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 6601baf2361c..c7742dcc136c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -334,9 +334,10 @@ struct cfs_bandwidth {
u64 runtime;
s64 hierarchical_quota;
u64 runtime_expires;
+ int expires_seq;
- int idle;
- int period_active;
+ short idle;
+ short period_active;
struct hrtimer period_timer;
struct hrtimer slack_timer;
struct list_head throttled_cfs_rq;
@@ -551,6 +552,7 @@ struct cfs_rq {
#ifdef CONFIG_CFS_BANDWIDTH
int runtime_enabled;
+ int expires_seq;
u64 runtime_expires;
s64 runtime_remaining;
@@ -609,6 +611,11 @@ struct rt_rq {
#endif
};
+static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
+{
+ return rt_rq->rt_queued && rt_rq->rt_nr_running;
+}
+
/* Deadline class' related fields in a runqueue */
struct dl_rq {
/* runqueue is an rbtree, ordered by deadline */
diff --git a/kernel/softirq.c b/kernel/softirq.c
index de2f57fddc04..75ffc1d1a2e0 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -79,12 +79,16 @@ static void wakeup_softirqd(void)
/*
* If ksoftirqd is scheduled, we do not want to process pending softirqs
- * right now. Let ksoftirqd handle this at its own rate, to get fairness.
+ * right now. Let ksoftirqd handle this at its own rate, to get fairness,
+ * unless we're doing some of the synchronous softirqs.
*/
-static bool ksoftirqd_running(void)
+#define SOFTIRQ_NOW_MASK ((1 << HI_SOFTIRQ) | (1 << TASKLET_SOFTIRQ))
+static bool ksoftirqd_running(unsigned long pending)
{
struct task_struct *tsk = __this_cpu_read(ksoftirqd);
+ if (pending & SOFTIRQ_NOW_MASK)
+ return false;
return tsk && (tsk->state == TASK_RUNNING);
}
@@ -139,9 +143,13 @@ static void __local_bh_enable(unsigned int cnt)
{
lockdep_assert_irqs_disabled();
+ if (preempt_count() == cnt)
+ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
+
if (softirq_count() == (cnt & SOFTIRQ_MASK))
trace_softirqs_on(_RET_IP_);
- preempt_count_sub(cnt);
+
+ __preempt_count_sub(cnt);
}
/*
@@ -324,7 +332,7 @@ asmlinkage __visible void do_softirq(void)
pending = local_softirq_pending();
- if (pending && !ksoftirqd_running())
+ if (pending && !ksoftirqd_running(pending))
do_softirq_own_stack();
local_irq_restore(flags);
@@ -351,7 +359,7 @@ void irq_enter(void)
static inline void invoke_softirq(void)
{
- if (ksoftirqd_running())
+ if (ksoftirqd_running(local_softirq_pending()))
return;
if (!force_irqthreads) {
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index f89014a2c238..1ff523dae6e2 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -270,7 +270,11 @@ unlock:
goto retry;
}
- wake_up_q(&wakeq);
+ if (!err) {
+ preempt_disable();
+ wake_up_q(&wakeq);
+ preempt_enable();
+ }
return err;
}
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 055a4a728c00..3e93c54bd3a1 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1659,7 +1659,7 @@ EXPORT_SYMBOL_GPL(hrtimer_init_sleeper);
int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
{
switch(restart->nanosleep.type) {
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
case TT_COMPAT:
if (compat_put_timespec64(ts, restart->nanosleep.compat_rmtp))
return -EFAULT;
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 5a6251ac6f7a..9cdf54b04ca8 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -604,7 +604,6 @@ static int posix_cpu_timer_set(struct k_itimer *timer, int timer_flags,
/*
* Disarm any old timer after extracting its expiry time.
*/
- lockdep_assert_irqs_disabled();
ret = 0;
old_incr = timer->it.cpu.incr;
@@ -1049,7 +1048,6 @@ static void posix_cpu_timer_rearm(struct k_itimer *timer)
/*
* Now re-arm for the new expiry time.
*/
- lockdep_assert_irqs_disabled();
arm_timer(timer);
unlock:
unlock_task_sighand(p, &flags);
diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c
index b7005dd21ec1..14de3727b18e 100644
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -277,8 +277,7 @@ static bool tick_check_preferred(struct clock_event_device *curdev,
*/
return !curdev ||
newdev->rating > curdev->rating ||
- (!cpumask_equal(curdev->cpumask, newdev->cpumask) &&
- !tick_check_percpu(curdev, newdev, smp_processor_id()));
+ !cpumask_equal(curdev->cpumask, newdev->cpumask);
}
/*
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 6fa99213fc72..2b41e8e2d31d 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -28,6 +28,7 @@
*/
#include <linux/export.h>
+#include <linux/kernel.h>
#include <linux/timex.h>
#include <linux/capability.h>
#include <linux/timekeeper_internal.h>
@@ -314,9 +315,10 @@ unsigned int jiffies_to_msecs(const unsigned long j)
return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
#else
# if BITS_PER_LONG == 32
- return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
+ return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
+ HZ_TO_MSEC_SHR32;
# else
- return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
+ return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
# endif
#endif
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index efed9c1cfb7e..caf9cbf35816 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -192,17 +192,6 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
op->saved_func(ip, parent_ip, op, regs);
}
-/**
- * clear_ftrace_function - reset the ftrace function
- *
- * This NULLs the ftrace function and in essence stops
- * tracing. There may be lag
- */
-void clear_ftrace_function(void)
-{
- ftrace_trace_function = ftrace_stub;
-}
-
static void ftrace_sync(struct work_struct *work)
{
/*
@@ -6689,7 +6678,7 @@ void ftrace_kill(void)
{
ftrace_disabled = 1;
ftrace_enabled = 0;
- clear_ftrace_function();
+ ftrace_trace_function = ftrace_stub;
}
/**
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 6a46af21765c..0b0b688ea166 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3227,6 +3227,22 @@ int ring_buffer_record_is_on(struct ring_buffer *buffer)
}
/**
+ * ring_buffer_record_is_set_on - return true if the ring buffer is set writable
+ * @buffer: The ring buffer to see if write is set enabled
+ *
+ * Returns true if the ring buffer is set writable by ring_buffer_record_on().
+ * Note that this does NOT mean it is in a writable state.
+ *
+ * It may return true when the ring buffer has been disabled by
+ * ring_buffer_record_disable(), as that is a temporary disabling of
+ * the ring buffer.
+ */
+int ring_buffer_record_is_set_on(struct ring_buffer *buffer)
+{
+ return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
+}
+
+/**
* ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
* @buffer: The ring buffer to stop writes to.
* @cpu: The CPU buffer to stop
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c9336e98ac59..823687997b01 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1360,8 +1360,6 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
void
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
- struct ring_buffer *buf;
-
if (tr->stop_count)
return;
@@ -1375,9 +1373,13 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
arch_spin_lock(&tr->max_lock);
- buf = tr->trace_buffer.buffer;
- tr->trace_buffer.buffer = tr->max_buffer.buffer;
- tr->max_buffer.buffer = buf;
+ /* Inherit the recordable setting from trace_buffer */
+ if (ring_buffer_record_is_set_on(tr->trace_buffer.buffer))
+ ring_buffer_record_on(tr->max_buffer.buffer);
+ else
+ ring_buffer_record_off(tr->max_buffer.buffer);
+
+ swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
__update_max_tr(tr, tsk, cpu);
arch_spin_unlock(&tr->max_lock);
@@ -2957,6 +2959,7 @@ out_nobuffer:
}
EXPORT_SYMBOL_GPL(trace_vbprintk);
+__printf(3, 0)
static int
__trace_array_vprintk(struct ring_buffer *buffer,
unsigned long ip, const char *fmt, va_list args)
@@ -3011,12 +3014,14 @@ out_nobuffer:
return len;
}
+__printf(3, 0)
int trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args)
{
return __trace_array_vprintk(tr->trace_buffer.buffer, ip, fmt, args);
}
+__printf(3, 0)
int trace_array_printk(struct trace_array *tr,
unsigned long ip, const char *fmt, ...)
{
@@ -3032,6 +3037,7 @@ int trace_array_printk(struct trace_array *tr,
return ret;
}
+__printf(3, 4)
int trace_array_printk_buf(struct ring_buffer *buffer,
unsigned long ip, const char *fmt, ...)
{
@@ -3047,6 +3053,7 @@ int trace_array_printk_buf(struct ring_buffer *buffer,
return ret;
}
+__printf(2, 0)
int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
{
return trace_array_vprintk(&global_trace, ip, fmt, args);
@@ -3364,8 +3371,8 @@ static void print_func_help_header(struct trace_buffer *buf, struct seq_file *m,
print_event_info(buf, m);
- seq_printf(m, "# TASK-PID CPU# %s TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
- seq_printf(m, "# | | | %s | |\n", tgid ? " | " : "");
+ seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : "");
+ seq_printf(m, "# | | %s | | |\n", tgid ? " | " : "");
}
static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file *m,
@@ -3385,9 +3392,9 @@ static void print_func_help_header_irq(struct trace_buffer *buf, struct seq_file
tgid ? tgid_space : space);
seq_printf(m, "# %s||| / delay\n",
tgid ? tgid_space : space);
- seq_printf(m, "# TASK-PID CPU#%s|||| TIMESTAMP FUNCTION\n",
+ seq_printf(m, "# TASK-PID %sCPU# |||| TIMESTAMP FUNCTION\n",
tgid ? " TGID " : space);
- seq_printf(m, "# | | | %s|||| | |\n",
+ seq_printf(m, "# | | %s | |||| | |\n",
tgid ? " | " : space);
}
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 630c5a24b2b2..f8f86231ad90 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -583,9 +583,7 @@ static __always_inline void trace_clear_recursion(int bit)
static inline struct ring_buffer_iter *
trace_buffer_iter(struct trace_iterator *iter, int cpu)
{
- if (iter->buffer_iter && iter->buffer_iter[cpu])
- return iter->buffer_iter[cpu];
- return NULL;
+ return iter->buffer_iter ? iter->buffer_iter[cpu] : NULL;
}
int tracer_init(struct tracer *t, struct trace_array *tr);
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index e1c818dbc0d7..893a206bcba4 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -78,7 +78,8 @@ static const char * ops[] = { OPS };
C(TOO_MANY_PREDS, "Too many terms in predicate expression"), \
C(INVALID_FILTER, "Meaningless filter expression"), \
C(IP_FIELD_ONLY, "Only 'ip' field is supported for function trace"), \
- C(INVALID_VALUE, "Invalid value (did you forget quotes)?"),
+ C(INVALID_VALUE, "Invalid value (did you forget quotes)?"), \
+ C(NO_FILTER, "No filter found"),
#undef C
#define C(a, b) FILT_ERR_##a
@@ -550,6 +551,13 @@ predicate_parse(const char *str, int nr_parens, int nr_preds,
goto out_free;
}
+ if (!N) {
+ /* No program? */
+ ret = -EINVAL;
+ parse_error(pe, FILT_ERR_NO_FILTER, ptr - str);
+ goto out_free;
+ }
+
prog[N].pred = NULL; /* #13 */
prog[N].target = 1; /* TRUE */
prog[N+1].pred = NULL;
@@ -1693,6 +1701,7 @@ static void create_filter_finish(struct filter_parse_error *pe)
* @filter_str: filter string
* @set_str: remember @filter_str and enable detailed error in filter
* @filterp: out param for created filter (always updated on return)
+ * Must be a pointer that references a NULL pointer.
*
* Creates a filter for @call with @filter_str. If @set_str is %true,
* @filter_str is copied and recorded in the new filter.
@@ -1710,6 +1719,10 @@ static int create_filter(struct trace_event_call *call,
struct filter_parse_error *pe = NULL;
int err;
+ /* filterp must point to NULL */
+ if (WARN_ON(*filterp))
+ *filterp = NULL;
+
err = create_filter_start(filter_string, set_str, &pe, filterp);
if (err)
return err;
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index 046c716a6536..aae18af94c94 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -393,7 +393,7 @@ static void hist_err_event(char *str, char *system, char *event, char *var)
else if (system)
snprintf(err, MAX_FILTER_STR_VAL, "%s.%s", system, event);
else
- strncpy(err, var, MAX_FILTER_STR_VAL);
+ strscpy(err, var, MAX_FILTER_STR_VAL);
hist_err(str, err);
}
diff --git a/kernel/trace/trace_events_trigger.c b/kernel/trace/trace_events_trigger.c
index d18249683682..5dea177cef53 100644
--- a/kernel/trace/trace_events_trigger.c
+++ b/kernel/trace/trace_events_trigger.c
@@ -679,6 +679,8 @@ event_trigger_callback(struct event_command *cmd_ops,
goto out_free;
out_reg:
+ /* Up the trigger_data count to make sure reg doesn't free it on failure */
+ event_trigger_init(trigger_ops, trigger_data);
ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
/*
* The above returns on success the # of functions enabled,
@@ -686,11 +688,13 @@ event_trigger_callback(struct event_command *cmd_ops,
* Consider no functions a failure too.
*/
if (!ret) {
+ cmd_ops->unreg(glob, trigger_ops, trigger_data, file);
ret = -ENOENT;
- goto out_free;
- } else if (ret < 0)
- goto out_free;
- ret = 0;
+ } else if (ret > 0)
+ ret = 0;
+
+ /* Down the counter of trigger_data or free it if not used anymore */
+ event_trigger_free(trigger_ops, trigger_data);
out:
return ret;
@@ -1416,6 +1420,9 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
goto out;
}
+ /* Up the trigger_data count to make sure nothing frees it on failure */
+ event_trigger_init(trigger_ops, trigger_data);
+
if (trigger) {
number = strsep(&trigger, ":");
@@ -1466,6 +1473,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
goto out_disable;
/* Just return zero, not the number of enabled functions */
ret = 0;
+ event_trigger_free(trigger_ops, trigger_data);
out:
return ret;
@@ -1476,7 +1484,7 @@ int event_enable_trigger_func(struct event_command *cmd_ops,
out_free:
if (cmd_ops->set_filter)
cmd_ops->set_filter(NULL, trigger_data, NULL);
- kfree(trigger_data);
+ event_trigger_free(trigger_ops, trigger_data);
kfree(enable_data);
goto out;
}
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 23c0b0cb5fb9..169b3c44ee97 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -831,6 +831,7 @@ print_graph_entry_leaf(struct trace_iterator *iter,
struct ftrace_graph_ret *graph_ret;
struct ftrace_graph_ent *call;
unsigned long long duration;
+ int cpu = iter->cpu;
int i;
graph_ret = &ret_entry->ret;
@@ -839,7 +840,6 @@ print_graph_entry_leaf(struct trace_iterator *iter,
if (data) {
struct fgraph_cpu_data *cpu_data;
- int cpu = iter->cpu;
cpu_data = per_cpu_ptr(data->cpu_data, cpu);
@@ -869,6 +869,9 @@ print_graph_entry_leaf(struct trace_iterator *iter,
trace_seq_printf(s, "%ps();\n", (void *)call->func);
+ print_graph_irq(iter, graph_ret->func, TRACE_GRAPH_RET,
+ cpu, iter->ent->pid, flags);
+
return trace_handle_return(s);
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index daa81571b22a..6b71860f3998 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -400,11 +400,10 @@ static struct trace_kprobe *find_trace_kprobe(const char *event,
static int
enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
{
+ struct event_file_link *link = NULL;
int ret = 0;
if (file) {
- struct event_file_link *link;
-
link = kmalloc(sizeof(*link), GFP_KERNEL);
if (!link) {
ret = -ENOMEM;
@@ -424,6 +423,18 @@ enable_trace_kprobe(struct trace_kprobe *tk, struct trace_event_file *file)
else
ret = enable_kprobe(&tk->rp.kp);
}
+
+ if (ret) {
+ if (file) {
+ /* Notice the if is true on not WARN() */
+ if (!WARN_ON_ONCE(!link))
+ list_del_rcu(&link->list);
+ kfree(link);
+ tk->tp.flags &= ~TP_FLAG_TRACE;
+ } else {
+ tk->tp.flags &= ~TP_FLAG_PROFILE;
+ }
+ }
out:
return ret;
}
@@ -1480,8 +1491,10 @@ create_local_trace_kprobe(char *func, void *addr, unsigned long offs,
}
ret = __register_trace_kprobe(tk);
- if (ret < 0)
+ if (ret < 0) {
+ kfree(tk->tp.call.print_fmt);
goto error;
+ }
return &tk->tp.call;
error:
@@ -1501,6 +1514,8 @@ void destroy_local_trace_kprobe(struct trace_event_call *event_call)
}
__unregister_trace_kprobe(tk);
+
+ kfree(tk->tp.call.print_fmt);
free_trace_kprobe(tk);
}
#endif /* CONFIG_PERF_EVENTS */
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 90db994ac900..1c8e30fda46a 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -594,8 +594,7 @@ int trace_print_context(struct trace_iterator *iter)
trace_find_cmdline(entry->pid, comm);
- trace_seq_printf(s, "%16s-%-5d [%03d] ",
- comm, entry->pid, iter->cpu);
+ trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
if (tr->trace_flags & TRACE_ITER_RECORD_TGID) {
unsigned int tgid = trace_find_tgid(entry->pid);
@@ -606,6 +605,8 @@ int trace_print_context(struct trace_iterator *iter)
trace_seq_printf(s, "(%5d) ", tgid);
}
+ trace_seq_printf(s, "[%03d] ", iter->cpu);
+
if (tr->trace_flags & TRACE_ITER_IRQ_INFO)
trace_print_lat_fmt(s, entry);
diff --git a/lib/Kconfig b/lib/Kconfig
index e34b04b56057..706836ec314d 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -420,60 +420,15 @@ config HAS_IOPORT_MAP
depends on HAS_IOMEM && !NO_IOPORT_MAP
default y
-config HAS_DMA
- bool
- depends on !NO_DMA
- default y
+source "kernel/dma/Kconfig"
config SGL_ALLOC
bool
default n
-config NEED_SG_DMA_LENGTH
- bool
-
-config NEED_DMA_MAP_STATE
- bool
-
-config ARCH_DMA_ADDR_T_64BIT
- def_bool 64BIT || PHYS_ADDR_T_64BIT
-
config IOMMU_HELPER
bool
-config ARCH_HAS_SYNC_DMA_FOR_DEVICE
- bool
-
-config ARCH_HAS_SYNC_DMA_FOR_CPU
- bool
- select NEED_DMA_MAP_STATE
-
-config DMA_DIRECT_OPS
- bool
- depends on HAS_DMA
-
-config DMA_NONCOHERENT_OPS
- bool
- depends on HAS_DMA
- select DMA_DIRECT_OPS
-
-config DMA_NONCOHERENT_MMAP
- bool
- depends on DMA_NONCOHERENT_OPS
-
-config DMA_NONCOHERENT_CACHE_SYNC
- bool
- depends on DMA_NONCOHERENT_OPS
-
-config DMA_VIRT_OPS
- bool
- depends on HAS_DMA
-
-config SWIOTLB
- bool
- select DMA_DIRECT_OPS
- select NEED_DMA_MAP_STATE
-
config CHECK_SIGNATURE
bool
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 3d35d062970d..befb127507c0 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -5,7 +5,8 @@ if HAVE_ARCH_KASAN
config KASAN
bool "KASan: runtime memory debugger"
- depends on SLUB || (SLAB && !DEBUG_SLAB)
+ depends on (SLUB && SYSFS) || (SLAB && !DEBUG_SLAB)
+ select SLUB_DEBUG if SLUB
select CONSTRUCTORS
select STACKDEPOT
help
diff --git a/lib/Makefile b/lib/Makefile
index 956b320292fe..90dc5520b784 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -23,15 +23,12 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
sha1.o chacha20.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
- earlycpio.o seq_buf.o siphash.o \
+ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
nmi_backtrace.o nodemask.o win_minmax.o
lib-$(CONFIG_PRINTK) += dump_stack.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
-lib-$(CONFIG_DMA_DIRECT_OPS) += dma-direct.o
-lib-$(CONFIG_DMA_NONCOHERENT_OPS) += dma-noncoherent.o
-lib-$(CONFIG_DMA_VIRT_OPS) += dma-virt.o
lib-y += kobject.o klist.o
obj-y += lockref.o
@@ -98,10 +95,6 @@ obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
-ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
- lib-y += dec_and_lock.o
-endif
-
obj-$(CONFIG_BITREVERSE) += bitrev.o
obj-$(CONFIG_RATIONAL) += rational.o
obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
@@ -148,7 +141,6 @@ obj-$(CONFIG_SMP) += percpu_counter.o
obj-$(CONFIG_AUDIT_GENERIC) += audit.o
obj-$(CONFIG_AUDIT_COMPAT_GENERIC) += compat_audit.o
-obj-$(CONFIG_SWIOTLB) += swiotlb.o
obj-$(CONFIG_IOMMU_HELPER) += iommu-helper.o
obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o
obj-$(CONFIG_NOTIFIER_ERROR_INJECTION) += notifier-error-inject.o
@@ -169,8 +161,6 @@ obj-$(CONFIG_NLATTR) += nlattr.o
obj-$(CONFIG_LRU_CACHE) += lru_cache.o
-obj-$(CONFIG_DMA_API_DEBUG) += dma-debug.o
-
obj-$(CONFIG_GENERIC_CSUM) += checksum.o
obj-$(CONFIG_GENERIC_ATOMIC64) += atomic64.o
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index 347fa7ac2e8a..9555b68bb774 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -33,3 +33,19 @@ int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock)
}
EXPORT_SYMBOL(_atomic_dec_and_lock);
+
+int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
+ unsigned long *flags)
+{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+
+ /* Otherwise do it the slow way */
+ spin_lock_irqsave(lock, *flags);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ spin_unlock_irqrestore(lock, *flags);
+ return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 7e43cd54c84c..8be175df3075 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -596,15 +596,70 @@ static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
return ret;
}
+static size_t copy_pipe_to_iter_mcsafe(const void *addr, size_t bytes,
+ struct iov_iter *i)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ size_t n, off, xfer = 0;
+ int idx;
+
+ if (!sanity(i))
+ return 0;
+
+ bytes = n = push_pipe(i, bytes, &idx, &off);
+ if (unlikely(!n))
+ return 0;
+ for ( ; n; idx = next_idx(idx, pipe), off = 0) {
+ size_t chunk = min_t(size_t, n, PAGE_SIZE - off);
+ unsigned long rem;
+
+ rem = memcpy_mcsafe_to_page(pipe->bufs[idx].page, off, addr,
+ chunk);
+ i->idx = idx;
+ i->iov_offset = off + chunk - rem;
+ xfer += chunk - rem;
+ if (rem)
+ break;
+ n -= chunk;
+ addr += chunk;
+ }
+ i->count -= xfer;
+ return xfer;
+}
+
+/**
+ * _copy_to_iter_mcsafe - copy to user with source-read error exception handling
+ * @addr: source kernel address
+ * @bytes: total transfer length
+ * @iter: destination iterator
+ *
+ * The pmem driver arranges for filesystem-dax to use this facility via
+ * dax_copy_to_iter() for protecting read/write to persistent memory.
+ * Unless / until an architecture can guarantee identical performance
+ * between _copy_to_iter_mcsafe() and _copy_to_iter() it would be a
+ * performance regression to switch more users to the mcsafe version.
+ *
+ * Otherwise, the main differences between this and typical _copy_to_iter().
+ *
+ * * Typical tail/residue handling after a fault retries the copy
+ * byte-by-byte until the fault happens again. Re-triggering machine
+ * checks is potentially fatal so the implementation uses source
+ * alignment and poison alignment assumptions to avoid re-triggering
+ * hardware exceptions.
+ *
+ * * ITER_KVEC, ITER_PIPE, and ITER_BVEC can return short copies.
+ * Compare to copy_to_iter() where only ITER_IOVEC attempts might return
+ * a short copy.
+ *
+ * See MCSAFE_TEST for self-test.
+ */
size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
{
const char *from = addr;
unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
- if (unlikely(i->type & ITER_PIPE)) {
- WARN_ON(1);
- return 0;
- }
+ if (unlikely(i->type & ITER_PIPE))
+ return copy_pipe_to_iter_mcsafe(addr, bytes, i);
if (iter_is_iovec(i))
might_fault();
iterate_and_advance(i, bytes, v,
@@ -701,6 +756,20 @@ size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i)
EXPORT_SYMBOL(_copy_from_iter_nocache);
#ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
+/**
+ * _copy_from_iter_flushcache - write destination through cpu cache
+ * @addr: destination kernel address
+ * @bytes: total transfer length
+ * @iter: source iterator
+ *
+ * The pmem driver arranges for filesystem-dax to use this facility via
+ * dax_copy_from_iter() for ensuring that writes to persistent memory
+ * are flushed through the CPU cache. It is differentiated from
+ * _copy_from_iter_nocache() in that guarantees all data is flushed for
+ * all iterator types. The _copy_from_iter_nocache() only attempts to
+ * bypass the cache for the ITER_IOVEC case, and on some archs may use
+ * instructions that strand dirty-data in the cache.
+ */
size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
{
char *to = addr;
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index 9bbd9c5d375a..beb14839b41a 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -141,7 +141,7 @@ int percpu_ida_alloc(struct percpu_ida *pool, int state)
spin_lock_irqsave(&tags->lock, flags);
/* Fastpath */
- if (likely(tags->nr_free >= 0)) {
+ if (likely(tags->nr_free)) {
tag = tags->freelist[--tags->nr_free];
spin_unlock_irqrestore(&tags->lock, flags);
return tag;
diff --git a/lib/refcount.c b/lib/refcount.c
index 0eb48353abe3..d3b81cefce91 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -350,3 +350,31 @@ bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock)
}
EXPORT_SYMBOL(refcount_dec_and_lock);
+/**
+ * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
+ * interrupts if able to decrement refcount to 0
+ * @r: the refcount
+ * @lock: the spinlock to be locked
+ * @flags: saved IRQ-flags if the is acquired
+ *
+ * Same as refcount_dec_and_lock() above except that the spinlock is acquired
+ * with disabled interupts.
+ *
+ * Return: true and hold spinlock if able to decrement refcount to 0, false
+ * otherwise
+ */
+bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
+ unsigned long *flags)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ spin_lock_irqsave(lock, *flags);
+ if (!refcount_dec_and_test(r)) {
+ spin_unlock_irqrestore(lock, *flags);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 9427b5766134..e5c8586cf717 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -774,7 +774,7 @@ int rhashtable_walk_start_check(struct rhashtable_iter *iter)
skip++;
if (list == iter->list) {
iter->p = p;
- skip = skip;
+ iter->skip = skip;
goto found;
}
}
@@ -964,8 +964,16 @@ EXPORT_SYMBOL_GPL(rhashtable_walk_stop);
static size_t rounded_hashtable_size(const struct rhashtable_params *params)
{
- return max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
- (unsigned long)params->min_size);
+ size_t retsize;
+
+ if (params->nelem_hint)
+ retsize = max(roundup_pow_of_two(params->nelem_hint * 4 / 3),
+ (unsigned long)params->min_size);
+ else
+ retsize = max(HASH_DEFAULT_SIZE,
+ (unsigned long)params->min_size);
+
+ return retsize;
}
static u32 rhashtable_jhash2(const void *key, u32 length, u32 seed)
@@ -1022,8 +1030,6 @@ int rhashtable_init(struct rhashtable *ht,
struct bucket_table *tbl;
size_t size;
- size = HASH_DEFAULT_SIZE;
-
if ((!params->key_len && !params->obj_hashfn) ||
(params->obj_hashfn && !params->obj_cmpfn))
return -EINVAL;
@@ -1050,8 +1056,7 @@ int rhashtable_init(struct rhashtable *ht,
ht->p.min_size = max_t(u16, ht->p.min_size, HASH_MIN_SIZE);
- if (params->nelem_hint)
- size = rounded_hashtable_size(&ht->p);
+ size = rounded_hashtable_size(&ht->p);
if (params->locks_mul)
ht->p.locks_mul = roundup_pow_of_two(params->locks_mul);
@@ -1143,13 +1148,14 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
void (*free_fn)(void *ptr, void *arg),
void *arg)
{
- struct bucket_table *tbl;
+ struct bucket_table *tbl, *next_tbl;
unsigned int i;
cancel_work_sync(&ht->run_work);
mutex_lock(&ht->mutex);
tbl = rht_dereference(ht->tbl, ht);
+restart:
if (free_fn) {
for (i = 0; i < tbl->size; i++) {
struct rhash_head *pos, *next;
@@ -1166,7 +1172,12 @@ void rhashtable_free_and_destroy(struct rhashtable *ht,
}
}
+ next_tbl = rht_dereference(tbl->future_tbl, ht);
bucket_table_free(tbl);
+ if (next_tbl) {
+ tbl = next_tbl;
+ goto restart;
+ }
mutex_unlock(&ht->mutex);
}
EXPORT_SYMBOL_GPL(rhashtable_free_and_destroy);
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 1642fd507a96..7c6096a71704 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -24,9 +24,6 @@
**/
struct scatterlist *sg_next(struct scatterlist *sg)
{
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
if (sg_is_last(sg))
return NULL;
@@ -111,10 +108,7 @@ struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
for_each_sg(sgl, sg, nents, i)
ret = sg;
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sgl[0].sg_magic != SG_MAGIC);
BUG_ON(!sg_is_last(ret));
-#endif
return ret;
}
EXPORT_SYMBOL(sg_last);
diff --git a/lib/test_bpf.c b/lib/test_bpf.c
index 60aedc879361..08d3d59dca17 100644
--- a/lib/test_bpf.c
+++ b/lib/test_bpf.c
@@ -5282,21 +5282,31 @@ static struct bpf_test tests[] = {
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Ctx heavy transformations",
{ },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+ CLASSIC | FLAG_EXPECTED_FAIL,
+#else
CLASSIC,
+#endif
{ },
{
{ 1, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) },
{ 10, !!(SKB_VLAN_TCI & VLAN_TAG_PRESENT) }
},
.fill_helper = bpf_fill_maxinsns6,
+ .expected_errcode = -ENOTSUPP,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Call heavy transformations",
{ },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+ CLASSIC | FLAG_NO_DATA | FLAG_EXPECTED_FAIL,
+#else
CLASSIC | FLAG_NO_DATA,
+#endif
{ },
{ { 1, 0 }, { 10, 0 } },
.fill_helper = bpf_fill_maxinsns7,
+ .expected_errcode = -ENOTSUPP,
},
{ /* Mainly checking JIT here. */
"BPF_MAXINSNS: Jump heavy test",
@@ -5347,18 +5357,28 @@ static struct bpf_test tests[] = {
{
"BPF_MAXINSNS: exec all MSH",
{ },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+ CLASSIC | FLAG_EXPECTED_FAIL,
+#else
CLASSIC,
+#endif
{ 0xfa, 0xfb, 0xfc, 0xfd, },
{ { 4, 0xababab83 } },
.fill_helper = bpf_fill_maxinsns13,
+ .expected_errcode = -ENOTSUPP,
},
{
"BPF_MAXINSNS: ld_abs+get_processor_id",
{ },
+#if defined(CONFIG_BPF_JIT_ALWAYS_ON) && defined(CONFIG_S390)
+ CLASSIC | FLAG_EXPECTED_FAIL,
+#else
CLASSIC,
+#endif
{ },
{ { 1, 0xbee } },
.fill_helper = bpf_fill_ld_abs_get_processor_id,
+ .expected_errcode = -ENOTSUPP,
},
/*
* LD_IND / LD_ABS on fragmented SKBs
diff --git a/lib/test_printf.c b/lib/test_printf.c
index b2aa8f514844..cea592f402ed 100644
--- a/lib/test_printf.c
+++ b/lib/test_printf.c
@@ -260,13 +260,6 @@ plain(void)
{
int err;
- /*
- * Make sure crng is ready. Otherwise we get "(ptrval)" instead
- * of a hashed address when printing '%p' in plain_hash() and
- * plain_format().
- */
- wait_for_random_bytes();
-
err = plain_hash();
if (err) {
pr_warn("plain 'p' does not appear to be hashed\n");
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 347cc834c04a..2e5d3df0853d 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -359,15 +359,8 @@ static void wb_shutdown(struct bdi_writeback *wb)
spin_lock_bh(&wb->work_lock);
if (!test_and_clear_bit(WB_registered, &wb->state)) {
spin_unlock_bh(&wb->work_lock);
- /*
- * Wait for wb shutdown to finish if someone else is just
- * running wb_shutdown(). Otherwise we could proceed to wb /
- * bdi destruction before wb_shutdown() is finished.
- */
- wait_on_bit(&wb->state, WB_shutting_down, TASK_UNINTERRUPTIBLE);
return;
}
- set_bit(WB_shutting_down, &wb->state);
spin_unlock_bh(&wb->work_lock);
cgwb_remove_from_bdi_list(wb);
@@ -379,12 +372,6 @@ static void wb_shutdown(struct bdi_writeback *wb)
mod_delayed_work(bdi_wq, &wb->dwork, 0);
flush_delayed_work(&wb->dwork);
WARN_ON(!list_empty(&wb->work_list));
- /*
- * Make sure bit gets cleared after shutdown is finished. Matches with
- * the barrier provided by test_and_clear_bit() above.
- */
- smp_wmb();
- clear_and_wake_up_bit(WB_shutting_down, &wb->state);
}
static void wb_exit(struct bdi_writeback *wb)
@@ -508,10 +495,12 @@ static void cgwb_release_workfn(struct work_struct *work)
struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
release_work);
+ mutex_lock(&wb->bdi->cgwb_release_mutex);
wb_shutdown(wb);
css_put(wb->memcg_css);
css_put(wb->blkcg_css);
+ mutex_unlock(&wb->bdi->cgwb_release_mutex);
fprop_local_destroy_percpu(&wb->memcg_completions);
percpu_ref_exit(&wb->refcnt);
@@ -697,6 +686,7 @@ static int cgwb_bdi_init(struct backing_dev_info *bdi)
INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
bdi->cgwb_congested_tree = RB_ROOT;
+ mutex_init(&bdi->cgwb_release_mutex);
ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
if (!ret) {
@@ -717,7 +707,10 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
spin_lock_irq(&cgwb_lock);
radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
cgwb_kill(*slot);
+ spin_unlock_irq(&cgwb_lock);
+ mutex_lock(&bdi->cgwb_release_mutex);
+ spin_lock_irq(&cgwb_lock);
while (!list_empty(&bdi->wb_list)) {
wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
bdi_node);
@@ -726,6 +719,7 @@ static void cgwb_bdi_unregister(struct backing_dev_info *bdi)
spin_lock_irq(&cgwb_lock);
}
spin_unlock_irq(&cgwb_lock);
+ mutex_unlock(&bdi->cgwb_release_mutex);
}
/**
diff --git a/mm/debug.c b/mm/debug.c
index 56e2d9125ea5..38c926520c97 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -43,12 +43,25 @@ const struct trace_print_flags vmaflag_names[] = {
void __dump_page(struct page *page, const char *reason)
{
+ bool page_poisoned = PagePoisoned(page);
+ int mapcount;
+
+ /*
+ * If struct page is poisoned don't access Page*() functions as that
+ * leads to recursive loop. Page*() check for poisoned pages, and calls
+ * dump_page() when detected.
+ */
+ if (page_poisoned) {
+ pr_emerg("page:%px is uninitialized and poisoned", page);
+ goto hex_only;
+ }
+
/*
* Avoid VM_BUG_ON() in page_mapcount().
* page->_mapcount space in struct page is used by sl[aou]b pages to
* encode own info.
*/
- int mapcount = PageSlab(page) ? 0 : page_mapcount(page);
+ mapcount = PageSlab(page) ? 0 : page_mapcount(page);
pr_emerg("page:%px count:%d mapcount:%d mapping:%px index:%#lx",
page, page_ref_count(page), mapcount,
@@ -60,6 +73,7 @@ void __dump_page(struct page *page, const char *reason)
pr_emerg("flags: %#lx(%pGp)\n", page->flags, &page->flags);
+hex_only:
print_hex_dump(KERN_ALERT, "raw: ", DUMP_PREFIX_NONE, 32,
sizeof(unsigned long), page,
sizeof(struct page), false);
@@ -68,7 +82,7 @@ void __dump_page(struct page *page, const char *reason)
pr_alert("page dumped because: %s\n", reason);
#ifdef CONFIG_MEMCG
- if (page->mem_cgroup)
+ if (!page_poisoned && page->mem_cgroup)
pr_alert("page->mem_cgroup:%px\n", page->mem_cgroup);
#endif
}
diff --git a/mm/gup.c b/mm/gup.c
index b70d7ba7cc13..fc5f98069f4e 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -1238,8 +1238,6 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
int locked = 0;
long ret = 0;
- VM_BUG_ON(start & ~PAGE_MASK);
- VM_BUG_ON(len != PAGE_ALIGN(len));
end = start + len;
for (nstart = start; nstart < end; nstart = nend) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 1cd7c1a57a14..25346bd99364 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -2084,6 +2084,8 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
if (vma_is_dax(vma))
return;
page = pmd_page(_pmd);
+ if (!PageDirty(page) && pmd_dirty(_pmd))
+ set_page_dirty(page);
if (!PageReferenced(page) && pmd_young(_pmd))
SetPageReferenced(page);
page_remove_rmap(page, true);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3612fbb32e9d..039ddbc574e9 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2163,6 +2163,7 @@ static void __init gather_bootmem_prealloc(void)
*/
if (hstate_is_gigantic(h))
adjust_managed_page_count(page, 1 << h->order);
+ cond_resched();
}
}
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index f185455b3406..c3bd5209da38 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -619,12 +619,13 @@ void kasan_kfree_large(void *ptr, unsigned long ip)
int kasan_module_alloc(void *addr, size_t size)
{
void *ret;
+ size_t scaled_size;
size_t shadow_size;
unsigned long shadow_start;
shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
- shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
- PAGE_SIZE);
+ scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
+ shadow_size = round_up(scaled_size, PAGE_SIZE);
if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
return -EINVAL;
diff --git a/mm/memblock.c b/mm/memblock.c
index cc16d70b8333..4b5d245fafc1 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -228,7 +228,8 @@ phys_addr_t __init_memblock memblock_find_in_range_node(phys_addr_t size,
* so we use WARN_ONCE() here to see the stack trace if
* fail happens.
*/
- WARN_ONCE(1, "memblock: bottom-up allocation failed, memory hotunplug may be affected\n");
+ WARN_ONCE(IS_ENABLED(CONFIG_MEMORY_HOTREMOVE),
+ "memblock: bottom-up allocation failed, memory hotremove may be affected\n");
}
return __memblock_find_range_top_down(start, end, size, align, nid,
@@ -1225,6 +1226,7 @@ phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, i
return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
}
+#if defined(CONFIG_NO_BOOTMEM)
/**
* memblock_virt_alloc_internal - allocate boot memory block
* @size: size of memory block to be allocated in bytes
@@ -1432,6 +1434,7 @@ void * __init memblock_virt_alloc_try_nid(
(u64)max_addr);
return NULL;
}
+#endif
/**
* __memblock_free_early - free boot memory block
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e6f0d5ef320a..8c0280b3143e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -850,7 +850,7 @@ static void invalidate_reclaim_iterators(struct mem_cgroup *dead_memcg)
int nid;
int i;
- while ((memcg = parent_mem_cgroup(memcg))) {
+ for (; memcg; memcg = parent_mem_cgroup(memcg)) {
for_each_node(nid) {
mz = mem_cgroup_nodeinfo(memcg, nid);
for (i = 0; i <= DEF_PRIORITY; i++) {
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 9ac49ef17b4e..01f1a14facc4 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2505,6 +2505,7 @@ void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
/* Create pseudo-vma that contains just the policy */
memset(&pvma, 0, sizeof(struct vm_area_struct));
+ vma_init(&pvma, NULL);
pvma.vm_end = TASK_SIZE; /* policy covers entire file */
mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
diff --git a/mm/mmap.c b/mm/mmap.c
index d1eb87ef4b1a..17bbf4d3e24f 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -182,12 +182,12 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
if (vma->vm_file)
fput(vma->vm_file);
mpol_put(vma_policy(vma));
- kmem_cache_free(vm_area_cachep, vma);
+ vm_area_free(vma);
return next;
}
-static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf);
-
+static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags,
+ struct list_head *uf);
SYSCALL_DEFINE1(brk, unsigned long, brk)
{
unsigned long retval;
@@ -245,7 +245,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
goto out;
/* Ok, looks good - let it rip. */
- if (do_brk(oldbrk, newbrk-oldbrk, &uf) < 0)
+ if (do_brk_flags(oldbrk, newbrk-oldbrk, 0, &uf) < 0)
goto out;
set_brk:
@@ -911,7 +911,7 @@ again:
anon_vma_merge(vma, next);
mm->map_count--;
mpol_put(vma_policy(next));
- kmem_cache_free(vm_area_cachep, next);
+ vm_area_free(next);
/*
* In mprotect's case 6 (see comments on vma_merge),
* we must remove another next too. It would clutter
@@ -1729,19 +1729,17 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
* specific mapper. the address has already been validated, but
* not unmapped, but the maps are removed from the list.
*/
- vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ vma = vm_area_alloc(mm);
if (!vma) {
error = -ENOMEM;
goto unacct_error;
}
- vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_flags = vm_flags;
vma->vm_page_prot = vm_get_page_prot(vm_flags);
vma->vm_pgoff = pgoff;
- INIT_LIST_HEAD(&vma->anon_vma_chain);
if (file) {
if (vm_flags & VM_DENYWRITE) {
@@ -1780,6 +1778,8 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
error = shmem_zero_setup(vma);
if (error)
goto free_vma;
+ } else {
+ vma_set_anonymous(vma);
}
vma_link(mm, vma, prev, rb_link, rb_parent);
@@ -1832,7 +1832,7 @@ allow_write_and_free_vma:
if (vm_flags & VM_DENYWRITE)
allow_write_access(file);
free_vma:
- kmem_cache_free(vm_area_cachep, vma);
+ vm_area_free(vma);
unacct_error:
if (charged)
vm_unacct_memory(charged);
@@ -2620,15 +2620,10 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
return err;
}
- new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+ new = vm_area_dup(vma);
if (!new)
return -ENOMEM;
- /* most fields are the same, copy all, and then fixup */
- *new = *vma;
-
- INIT_LIST_HEAD(&new->anon_vma_chain);
-
if (new_below)
new->vm_end = addr;
else {
@@ -2669,7 +2664,7 @@ int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
out_free_mpol:
mpol_put(vma_policy(new));
out_free_vma:
- kmem_cache_free(vm_area_cachep, new);
+ vm_area_free(new);
return err;
}
@@ -2929,21 +2924,14 @@ static inline void verify_mm_writelocked(struct mm_struct *mm)
* anonymous maps. eventually we may be able to do some
* brk-specific accounting here.
*/
-static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long flags, struct list_head *uf)
+static int do_brk_flags(unsigned long addr, unsigned long len, unsigned long flags, struct list_head *uf)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma, *prev;
- unsigned long len;
struct rb_node **rb_link, *rb_parent;
pgoff_t pgoff = addr >> PAGE_SHIFT;
int error;
- len = PAGE_ALIGN(request);
- if (len < request)
- return -ENOMEM;
- if (!len)
- return 0;
-
/* Until we need other flags, refuse anything except VM_EXEC. */
if ((flags & (~VM_EXEC)) != 0)
return -EINVAL;
@@ -2991,14 +2979,13 @@ static int do_brk_flags(unsigned long addr, unsigned long request, unsigned long
/*
* create a vma struct for an anonymous mapping
*/
- vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ vma = vm_area_alloc(mm);
if (!vma) {
vm_unacct_memory(len >> PAGE_SHIFT);
return -ENOMEM;
}
- INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma->vm_mm = mm;
+ vma_set_anonymous(vma);
vma->vm_start = addr;
vma->vm_end = addr + len;
vma->vm_pgoff = pgoff;
@@ -3015,18 +3002,20 @@ out:
return 0;
}
-static int do_brk(unsigned long addr, unsigned long len, struct list_head *uf)
-{
- return do_brk_flags(addr, len, 0, uf);
-}
-
-int vm_brk_flags(unsigned long addr, unsigned long len, unsigned long flags)
+int vm_brk_flags(unsigned long addr, unsigned long request, unsigned long flags)
{
struct mm_struct *mm = current->mm;
+ unsigned long len;
int ret;
bool populate;
LIST_HEAD(uf);
+ len = PAGE_ALIGN(request);
+ if (len < request)
+ return -ENOMEM;
+ if (!len)
+ return 0;
+
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
@@ -3207,16 +3196,14 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
}
*need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
} else {
- new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+ new_vma = vm_area_dup(vma);
if (!new_vma)
goto out;
- *new_vma = *vma;
new_vma->vm_start = addr;
new_vma->vm_end = addr + len;
new_vma->vm_pgoff = pgoff;
if (vma_dup_policy(vma, new_vma))
goto out_free_vma;
- INIT_LIST_HEAD(&new_vma->anon_vma_chain);
if (anon_vma_clone(new_vma, vma))
goto out_free_mempol;
if (new_vma->vm_file)
@@ -3231,7 +3218,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
out_free_mempol:
mpol_put(vma_policy(new_vma));
out_free_vma:
- kmem_cache_free(vm_area_cachep, new_vma);
+ vm_area_free(new_vma);
out:
return NULL;
}
@@ -3355,12 +3342,10 @@ static struct vm_area_struct *__install_special_mapping(
int ret;
struct vm_area_struct *vma;
- vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ vma = vm_area_alloc(mm);
if (unlikely(vma == NULL))
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&vma->anon_vma_chain);
- vma->vm_mm = mm;
vma->vm_start = addr;
vma->vm_end = addr + len;
@@ -3381,7 +3366,7 @@ static struct vm_area_struct *__install_special_mapping(
return vma;
out:
- kmem_cache_free(vm_area_cachep, vma);
+ vm_area_free(vma);
return ERR_PTR(ret);
}
diff --git a/mm/nommu.c b/mm/nommu.c
index 4452d8bd9ae4..9fc9e43335b6 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -769,7 +769,7 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma)
if (vma->vm_file)
fput(vma->vm_file);
put_nommu_region(vma->vm_region);
- kmem_cache_free(vm_area_cachep, vma);
+ vm_area_free(vma);
}
/*
@@ -1145,6 +1145,8 @@ static int do_mmap_private(struct vm_area_struct *vma,
if (ret < len)
memset(base + ret, 0, len - ret);
+ } else {
+ vma_set_anonymous(vma);
}
return 0;
@@ -1204,7 +1206,7 @@ unsigned long do_mmap(struct file *file,
if (!region)
goto error_getting_region;
- vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ vma = vm_area_alloc(current->mm);
if (!vma)
goto error_getting_vma;
@@ -1212,7 +1214,6 @@ unsigned long do_mmap(struct file *file,
region->vm_flags = vm_flags;
region->vm_pgoff = pgoff;
- INIT_LIST_HEAD(&vma->anon_vma_chain);
vma->vm_flags = vm_flags;
vma->vm_pgoff = pgoff;
@@ -1368,7 +1369,7 @@ error:
kmem_cache_free(vm_region_jar, region);
if (vma->vm_file)
fput(vma->vm_file);
- kmem_cache_free(vm_area_cachep, vma);
+ vm_area_free(vma);
return ret;
sharing_violation:
@@ -1469,14 +1470,13 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
if (!region)
return -ENOMEM;
- new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
+ new = vm_area_dup(vma);
if (!new) {
kmem_cache_free(vm_region_jar, region);
return -ENOMEM;
}
/* most fields are the same, copy all, and then fixup */
- *new = *vma;
*region = *vma->vm_region;
new->vm_region = region;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1521100f1e63..a790ef4be74e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6383,7 +6383,7 @@ void __paginginit free_area_init_node(int nid, unsigned long *zones_size,
free_area_init_core(pgdat);
}
-#ifdef CONFIG_HAVE_MEMBLOCK
+#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
/*
* Only struct pages that are backed by physical memory are zeroed and
* initialized by going through __init_single_page(). But, there are some
@@ -6421,7 +6421,7 @@ void __paginginit zero_resv_unavail(void)
if (pgcnt)
pr_info("Reserved but unavailable: %lld pages", pgcnt);
}
-#endif /* CONFIG_HAVE_MEMBLOCK */
+#endif /* CONFIG_HAVE_MEMBLOCK && !CONFIG_FLAT_NODE_MEM_MAP */
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
@@ -6847,6 +6847,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
/* Initialise every node */
mminit_verify_pageflags_layout();
setup_nr_node_ids();
+ zero_resv_unavail();
for_each_online_node(nid) {
pg_data_t *pgdat = NODE_DATA(nid);
free_area_init_node(nid, NULL,
@@ -6857,7 +6858,6 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
node_set_state(nid, N_MEMORY);
check_for_memory(pgdat, nid);
}
- zero_resv_unavail();
}
static int __init cmdline_parse_core(char *p, unsigned long *core,
@@ -7033,9 +7033,9 @@ void __init set_dma_reserve(unsigned long new_dma_reserve)
void __init free_area_init(unsigned long *zones_size)
{
+ zero_resv_unavail();
free_area_init_node(0, zones_size,
__pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL);
- zero_resv_unavail();
}
static int page_alloc_cpu_dead(unsigned int cpu)
diff --git a/mm/rmap.c b/mm/rmap.c
index 6db729dc4c50..eb477809a5c0 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -64,6 +64,7 @@
#include <linux/backing-dev.h>
#include <linux/page_idle.h>
#include <linux/memremap.h>
+#include <linux/userfaultfd_k.h>
#include <asm/tlbflush.h>
@@ -1481,11 +1482,16 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
set_pte_at(mm, address, pvmw.pte, pteval);
}
- } else if (pte_unused(pteval)) {
+ } else if (pte_unused(pteval) && !userfaultfd_armed(vma)) {
/*
* The guest indicated that the page content is of no
* interest anymore. Simply discard the pte, vmscan
* will take care of the rest.
+ * A future reference will then fault in a new zero
+ * page. When userfaultfd is active, we must not drop
+ * this page though, as its main user (postcopy
+ * migration) will not expect userfaults on already
+ * copied pages.
*/
dec_mm_counter(mm, mm_counter(page));
/* We have to invalidate as we cleared the pte */
diff --git a/mm/shmem.c b/mm/shmem.c
index 2cab84403055..41b9bbf24e16 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1421,6 +1421,7 @@ static void shmem_pseudo_vma_init(struct vm_area_struct *vma,
{
/* Create a pseudo vma that just contains the policy */
memset(vma, 0, sizeof(*vma));
+ vma_init(vma, NULL);
/* Bias interleave by inode number to distribute better across nodes */
vma->vm_pgoff = index + info->vfs_inode.i_ino;
vma->vm_policy = mpol_shared_policy_lookup(&info->policy, index);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 890b1f04a03a..2296caf87bfb 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -567,10 +567,14 @@ static int shutdown_cache(struct kmem_cache *s)
list_del(&s->list);
if (s->flags & SLAB_TYPESAFE_BY_RCU) {
+#ifdef SLAB_SUPPORTS_SYSFS
+ sysfs_slab_unlink(s);
+#endif
list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
schedule_work(&slab_caches_to_rcu_destroy_work);
} else {
#ifdef SLAB_SUPPORTS_SYSFS
+ sysfs_slab_unlink(s);
sysfs_slab_release(s);
#else
slab_kmem_cache_release(s);
diff --git a/mm/slub.c b/mm/slub.c
index a3b8467c14af..51258eff4178 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -5667,7 +5667,6 @@ static void sysfs_slab_remove_workfn(struct work_struct *work)
kset_unregister(s->memcg_kset);
#endif
kobject_uevent(&s->kobj, KOBJ_REMOVE);
- kobject_del(&s->kobj);
out:
kobject_put(&s->kobj);
}
@@ -5752,6 +5751,12 @@ static void sysfs_slab_remove(struct kmem_cache *s)
schedule_work(&s->kobj_remove_work);
}
+void sysfs_slab_unlink(struct kmem_cache *s)
+{
+ if (slab_state >= FULL)
+ kobject_del(&s->kobj);
+}
+
void sysfs_slab_release(struct kmem_cache *s)
{
if (slab_state >= FULL)
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 75eda9c2b260..8ba0870ecddd 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1796,11 +1796,9 @@ static void vmstat_update(struct work_struct *w)
* to occur in the future. Keep on running the
* update worker thread.
*/
- preempt_disable();
queue_delayed_work_on(smp_processor_id(), mm_percpu_wq,
this_cpu_ptr(&vmstat_work),
round_jiffies_relative(sysctl_stat_interval));
- preempt_enable();
}
}
diff --git a/mm/zswap.c b/mm/zswap.c
index 7d34e69507e3..cd91fd9d96b8 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -1026,6 +1026,15 @@ static int zswap_frontswap_store(unsigned type, pgoff_t offset,
ret = -ENOMEM;
goto reject;
}
+
+ /* A second zswap_is_full() check after
+ * zswap_shrink() to make sure it's now
+ * under the max_pool_percent
+ */
+ if (zswap_is_full()) {
+ ret = -ENOMEM;
+ goto reject;
+ }
}
/* allocate entry */
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 73a65789271b..8ccee3d01822 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -693,7 +693,7 @@ static struct sk_buff **vlan_gro_receive(struct sk_buff **head,
out_unlock:
rcu_read_unlock();
out:
- NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_flush_final(skb, pp, flush);
return pp;
}
diff --git a/net/9p/client.c b/net/9p/client.c
index 18c5271910dc..5c1343195292 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -225,7 +225,8 @@ static int parse_opts(char *opts, struct p9_client *clnt)
}
free_and_return:
- v9fs_put_trans(clnt->trans_mod);
+ if (ret)
+ v9fs_put_trans(clnt->trans_mod);
kfree(tmp_options);
return ret;
}
diff --git a/net/Makefile b/net/Makefile
index 13ec0d5415c7..bdaf53925acd 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -20,11 +20,7 @@ obj-$(CONFIG_TLS) += tls/
obj-$(CONFIG_XFRM) += xfrm/
obj-$(CONFIG_UNIX) += unix/
obj-$(CONFIG_NET) += ipv6/
-ifneq ($(CC_CAN_LINK),y)
-$(warning CC cannot link executables. Skipping bpfilter.)
-else
obj-$(CONFIG_BPFILTER) += bpfilter/
-endif
obj-$(CONFIG_PACKET) += packet/
obj-$(CONFIG_NET_KEY) += key/
obj-$(CONFIG_BRIDGE) += bridge/
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 55fdba05d7d9..9b6bc5abe946 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1869,7 +1869,7 @@ static const struct proto_ops atalk_dgram_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = atalk_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = atalk_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = atalk_compat_ioctl,
diff --git a/net/atm/br2684.c b/net/atm/br2684.c
index 36b3adacc0dd..10462de734ea 100644
--- a/net/atm/br2684.c
+++ b/net/atm/br2684.c
@@ -252,8 +252,7 @@ static int br2684_xmit_vcc(struct sk_buff *skb, struct net_device *dev,
ATM_SKB(skb)->vcc = atmvcc = brvcc->atmvcc;
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, atmvcc, atmvcc->dev);
- refcount_add(skb->truesize, &sk_atm(atmvcc)->sk_wmem_alloc);
- ATM_SKB(skb)->atm_options = atmvcc->atm_options;
+ atm_account_tx(atmvcc, skb);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
diff --git a/net/atm/clip.c b/net/atm/clip.c
index 66caa48a27c2..d795b9c5aea4 100644
--- a/net/atm/clip.c
+++ b/net/atm/clip.c
@@ -381,8 +381,7 @@ static netdev_tx_t clip_start_xmit(struct sk_buff *skb,
memcpy(here, llc_oui, sizeof(llc_oui));
((__be16 *) here)[3] = skb->protocol;
}
- refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
- ATM_SKB(skb)->atm_options = vcc->atm_options;
+ atm_account_tx(vcc, skb);
entry->vccs->last_use = jiffies;
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n", skb, vcc, vcc->dev);
old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */
diff --git a/net/atm/common.c b/net/atm/common.c
index 1f2af59935db..a7a68e509628 100644
--- a/net/atm/common.c
+++ b/net/atm/common.c
@@ -630,10 +630,9 @@ int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t size)
goto out;
}
pr_debug("%d += %d\n", sk_wmem_alloc_get(sk), skb->truesize);
- refcount_add(skb->truesize, &sk->sk_wmem_alloc);
+ atm_account_tx(vcc, skb);
skb->dev = NULL; /* for paths shared with net_device interfaces */
- ATM_SKB(skb)->atm_options = vcc->atm_options;
if (!copy_from_iter_full(skb_put(skb, size), size, &m->msg_iter)) {
kfree_skb(skb);
error = -EFAULT;
@@ -648,11 +647,16 @@ out:
return error;
}
-__poll_t vcc_poll_mask(struct socket *sock, __poll_t events)
+__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
- struct atm_vcc *vcc = ATM_SD(sock);
- __poll_t mask = 0;
+ struct atm_vcc *vcc;
+ __poll_t mask;
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+ mask = 0;
+
+ vcc = ATM_SD(sock);
/* exceptional events */
if (sk->sk_err)
diff --git a/net/atm/common.h b/net/atm/common.h
index 526796ad230f..5850649068bb 100644
--- a/net/atm/common.h
+++ b/net/atm/common.h
@@ -17,7 +17,7 @@ int vcc_connect(struct socket *sock, int itf, short vpi, int vci);
int vcc_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
int flags);
int vcc_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len);
-__poll_t vcc_poll_mask(struct socket *sock, __poll_t events);
+__poll_t vcc_poll(struct file *file, struct socket *sock, poll_table *wait);
int vcc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int vcc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int vcc_setsockopt(struct socket *sock, int level, int optname,
diff --git a/net/atm/lec.c b/net/atm/lec.c
index 5a95fcf6f9b6..d7f5cf5b7594 100644
--- a/net/atm/lec.c
+++ b/net/atm/lec.c
@@ -182,9 +182,8 @@ lec_send(struct atm_vcc *vcc, struct sk_buff *skb)
struct net_device *dev = skb->dev;
ATM_SKB(skb)->vcc = vcc;
- ATM_SKB(skb)->atm_options = vcc->atm_options;
+ atm_account_tx(vcc, skb);
- refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
if (vcc->send(vcc, skb) < 0) {
dev->stats.tx_dropped++;
return;
diff --git a/net/atm/mpc.c b/net/atm/mpc.c
index 75620c2f2617..24b53c4c39c6 100644
--- a/net/atm/mpc.c
+++ b/net/atm/mpc.c
@@ -555,8 +555,7 @@ static int send_via_shortcut(struct sk_buff *skb, struct mpoa_client *mpc)
sizeof(struct llc_snap_hdr));
}
- refcount_add(skb->truesize, &sk_atm(entry->shortcut)->sk_wmem_alloc);
- ATM_SKB(skb)->atm_options = entry->shortcut->atm_options;
+ atm_account_tx(entry->shortcut, skb);
entry->shortcut->send(entry->shortcut, skb);
entry->packets_fwded++;
mpc->in_ops->put(entry);
diff --git a/net/atm/pppoatm.c b/net/atm/pppoatm.c
index 21d9d341a619..af8c4b38b746 100644
--- a/net/atm/pppoatm.c
+++ b/net/atm/pppoatm.c
@@ -350,8 +350,7 @@ static int pppoatm_send(struct ppp_channel *chan, struct sk_buff *skb)
return 1;
}
- refcount_add(skb->truesize, &sk_atm(ATM_SKB(skb)->vcc)->sk_wmem_alloc);
- ATM_SKB(skb)->atm_options = ATM_SKB(skb)->vcc->atm_options;
+ atm_account_tx(vcc, skb);
pr_debug("atm_skb(%p)->vcc(%p)->dev(%p)\n",
skb, ATM_SKB(skb)->vcc, ATM_SKB(skb)->vcc->dev);
ret = ATM_SKB(skb)->vcc->send(ATM_SKB(skb)->vcc, skb)
diff --git a/net/atm/pvc.c b/net/atm/pvc.c
index 9f75092fe778..2cb10af16afc 100644
--- a/net/atm/pvc.c
+++ b/net/atm/pvc.c
@@ -113,7 +113,7 @@ static const struct proto_ops pvc_proto_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = pvc_getname,
- .poll_mask = vcc_poll_mask,
+ .poll = vcc_poll,
.ioctl = vcc_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = vcc_compat_ioctl,
diff --git a/net/atm/raw.c b/net/atm/raw.c
index ee10e8d46185..b3ba44aab0ee 100644
--- a/net/atm/raw.c
+++ b/net/atm/raw.c
@@ -35,8 +35,8 @@ static void atm_pop_raw(struct atm_vcc *vcc, struct sk_buff *skb)
struct sock *sk = sk_atm(vcc);
pr_debug("(%d) %d -= %d\n",
- vcc->vci, sk_wmem_alloc_get(sk), skb->truesize);
- WARN_ON(refcount_sub_and_test(skb->truesize, &sk->sk_wmem_alloc));
+ vcc->vci, sk_wmem_alloc_get(sk), ATM_SKB(skb)->acct_truesize);
+ WARN_ON(refcount_sub_and_test(ATM_SKB(skb)->acct_truesize, &sk->sk_wmem_alloc));
dev_kfree_skb_any(skb);
sk->sk_write_space(sk);
}
diff --git a/net/atm/svc.c b/net/atm/svc.c
index 53f4ad7087b1..2f91b766ac42 100644
--- a/net/atm/svc.c
+++ b/net/atm/svc.c
@@ -636,7 +636,7 @@ static const struct proto_ops svc_proto_ops = {
.socketpair = sock_no_socketpair,
.accept = svc_accept,
.getname = svc_getname,
- .poll_mask = vcc_poll_mask,
+ .poll = vcc_poll,
.ioctl = svc_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = svc_compat_ioctl,
diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c
index d1d2442ce573..c603d33d5410 100644
--- a/net/ax25/af_ax25.c
+++ b/net/ax25/af_ax25.c
@@ -1941,7 +1941,7 @@ static const struct proto_ops ax25_proto_ops = {
.socketpair = sock_no_socketpair,
.accept = ax25_accept,
.getname = ax25_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = ax25_ioctl,
.listen = ax25_listen,
.shutdown = ax25_shutdown,
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index be09a9883825..73bf6a93a3cf 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -2732,7 +2732,7 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
{
struct batadv_neigh_ifinfo *router_ifinfo = NULL;
struct batadv_neigh_node *router;
- struct batadv_gw_node *curr_gw;
+ struct batadv_gw_node *curr_gw = NULL;
int ret = 0;
void *hdr;
@@ -2780,6 +2780,8 @@ static int batadv_iv_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
ret = 0;
out:
+ if (curr_gw)
+ batadv_gw_node_put(curr_gw);
if (router_ifinfo)
batadv_neigh_ifinfo_put(router_ifinfo);
if (router)
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index ec93337ee259..6baec4e68898 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -927,7 +927,7 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
{
struct batadv_neigh_ifinfo *router_ifinfo = NULL;
struct batadv_neigh_node *router;
- struct batadv_gw_node *curr_gw;
+ struct batadv_gw_node *curr_gw = NULL;
int ret = 0;
void *hdr;
@@ -995,6 +995,8 @@ static int batadv_v_gw_dump_entry(struct sk_buff *msg, u32 portid, u32 seq,
ret = 0;
out:
+ if (curr_gw)
+ batadv_gw_node_put(curr_gw);
if (router_ifinfo)
batadv_neigh_ifinfo_put(router_ifinfo);
if (router)
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 4229b01ac7b5..87479c60670e 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -19,6 +19,7 @@
#include "debugfs.h"
#include "main.h"
+#include <linux/dcache.h>
#include <linux/debugfs.h>
#include <linux/err.h>
#include <linux/errno.h>
@@ -344,6 +345,25 @@ out:
}
/**
+ * batadv_debugfs_rename_hardif() - Fix debugfs path for renamed hardif
+ * @hard_iface: hard interface which was renamed
+ */
+void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface)
+{
+ const char *name = hard_iface->net_dev->name;
+ struct dentry *dir;
+ struct dentry *d;
+
+ dir = hard_iface->debug_dir;
+ if (!dir)
+ return;
+
+ d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name);
+ if (!d)
+ pr_err("Can't rename debugfs dir to %s\n", name);
+}
+
+/**
* batadv_debugfs_del_hardif() - delete the base directory for a hard interface
* in debugfs.
* @hard_iface: hard interface which is deleted.
@@ -414,6 +434,26 @@ out:
}
/**
+ * batadv_debugfs_rename_meshif() - Fix debugfs path for renamed softif
+ * @dev: net_device which was renamed
+ */
+void batadv_debugfs_rename_meshif(struct net_device *dev)
+{
+ struct batadv_priv *bat_priv = netdev_priv(dev);
+ const char *name = dev->name;
+ struct dentry *dir;
+ struct dentry *d;
+
+ dir = bat_priv->debug_dir;
+ if (!dir)
+ return;
+
+ d = debugfs_rename(dir->d_parent, dir, dir->d_parent, name);
+ if (!d)
+ pr_err("Can't rename debugfs dir to %s\n", name);
+}
+
+/**
* batadv_debugfs_del_meshif() - Remove interface dependent debugfs entries
* @dev: netdev struct of the soft interface
*/
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
index 37b069698b04..08a592ffbee5 100644
--- a/net/batman-adv/debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -30,8 +30,10 @@ struct net_device;
void batadv_debugfs_init(void);
void batadv_debugfs_destroy(void);
int batadv_debugfs_add_meshif(struct net_device *dev);
+void batadv_debugfs_rename_meshif(struct net_device *dev);
void batadv_debugfs_del_meshif(struct net_device *dev);
int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface);
+void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface);
void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface);
#else
@@ -49,6 +51,10 @@ static inline int batadv_debugfs_add_meshif(struct net_device *dev)
return 0;
}
+static inline void batadv_debugfs_rename_meshif(struct net_device *dev)
+{
+}
+
static inline void batadv_debugfs_del_meshif(struct net_device *dev)
{
}
@@ -60,6 +66,11 @@ int batadv_debugfs_add_hardif(struct batadv_hard_iface *hard_iface)
}
static inline
+void batadv_debugfs_rename_hardif(struct batadv_hard_iface *hard_iface)
+{
+}
+
+static inline
void batadv_debugfs_del_hardif(struct batadv_hard_iface *hard_iface)
{
}
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index c405d15befd6..2f0d42f2f913 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -989,6 +989,32 @@ void batadv_hardif_remove_interfaces(void)
rtnl_unlock();
}
+/**
+ * batadv_hard_if_event_softif() - Handle events for soft interfaces
+ * @event: NETDEV_* event to handle
+ * @net_dev: net_device which generated an event
+ *
+ * Return: NOTIFY_* result
+ */
+static int batadv_hard_if_event_softif(unsigned long event,
+ struct net_device *net_dev)
+{
+ struct batadv_priv *bat_priv;
+
+ switch (event) {
+ case NETDEV_REGISTER:
+ batadv_sysfs_add_meshif(net_dev);
+ bat_priv = netdev_priv(net_dev);
+ batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
+ break;
+ case NETDEV_CHANGENAME:
+ batadv_debugfs_rename_meshif(net_dev);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
static int batadv_hard_if_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
@@ -997,12 +1023,8 @@ static int batadv_hard_if_event(struct notifier_block *this,
struct batadv_hard_iface *primary_if = NULL;
struct batadv_priv *bat_priv;
- if (batadv_softif_is_valid(net_dev) && event == NETDEV_REGISTER) {
- batadv_sysfs_add_meshif(net_dev);
- bat_priv = netdev_priv(net_dev);
- batadv_softif_create_vlan(bat_priv, BATADV_NO_FLAGS);
- return NOTIFY_DONE;
- }
+ if (batadv_softif_is_valid(net_dev))
+ return batadv_hard_if_event_softif(event, net_dev);
hard_iface = batadv_hardif_get_by_netdev(net_dev);
if (!hard_iface && (event == NETDEV_REGISTER ||
@@ -1051,6 +1073,9 @@ static int batadv_hard_if_event(struct notifier_block *this,
if (batadv_is_wifi_hardif(hard_iface))
hard_iface->num_bcasts = BATADV_NUM_BCASTS_WIRELESS;
break;
+ case NETDEV_CHANGENAME:
+ batadv_debugfs_rename_hardif(hard_iface);
+ break;
default:
break;
}
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 3986551397ca..12a2b7d21376 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1705,7 +1705,9 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
ether_addr_copy(common->addr, tt_addr);
common->vid = vid;
- common->flags = flags;
+ if (!is_multicast_ether_addr(common->addr))
+ common->flags = flags & (~BATADV_TT_SYNC_MASK);
+
tt_global_entry->roam_at = 0;
/* node must store current time in case of roaming. This is
* needed to purge this entry out on timeout (if nobody claims
@@ -1768,7 +1770,8 @@ static bool batadv_tt_global_add(struct batadv_priv *bat_priv,
* TT_CLIENT_TEMP, therefore they have to be copied in the
* client entry
*/
- common->flags |= flags & (~BATADV_TT_SYNC_MASK);
+ if (!is_multicast_ether_addr(common->addr))
+ common->flags |= flags & (~BATADV_TT_SYNC_MASK);
/* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
* one originator left in the list and we previously received a
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 510ab4f55df5..3264e1873219 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -437,13 +437,16 @@ static inline __poll_t bt_accept_poll(struct sock *parent)
return 0;
}
-__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events)
+__poll_t bt_sock_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
__poll_t mask = 0;
BT_DBG("sock %p, sk %p", sock, sk);
+ poll_wait(file, sk_sleep(sk), wait);
+
if (sk->sk_state == BT_LISTEN)
return bt_accept_poll(sk);
@@ -475,7 +478,7 @@ __poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events)
return mask;
}
-EXPORT_SYMBOL(bt_sock_poll_mask);
+EXPORT_SYMBOL(bt_sock_poll);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index d6c099861538..1506e1632394 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -1975,7 +1975,7 @@ static const struct proto_ops hci_sock_ops = {
.sendmsg = hci_sock_sendmsg,
.recvmsg = hci_sock_recvmsg,
.ioctl = hci_sock_ioctl,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = hci_sock_setsockopt,
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 742a190034e6..686bdc6b35b0 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1653,7 +1653,7 @@ static const struct proto_ops l2cap_sock_ops = {
.getname = l2cap_sock_getname,
.sendmsg = l2cap_sock_sendmsg,
.recvmsg = l2cap_sock_recvmsg,
- .poll_mask = bt_sock_poll_mask,
+ .poll = bt_sock_poll,
.ioctl = bt_sock_ioctl,
.mmap = sock_no_mmap,
.socketpair = sock_no_socketpair,
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index 1cf57622473a..d606e9212291 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -1049,7 +1049,7 @@ static const struct proto_ops rfcomm_sock_ops = {
.setsockopt = rfcomm_sock_setsockopt,
.getsockopt = rfcomm_sock_getsockopt,
.ioctl = rfcomm_sock_ioctl,
- .poll_mask = bt_sock_poll_mask,
+ .poll = bt_sock_poll,
.socketpair = sock_no_socketpair,
.mmap = sock_no_mmap
};
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index d60dbc61d170..413b8ee49fec 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -1197,7 +1197,7 @@ static const struct proto_ops sco_sock_ops = {
.getname = sco_sock_getname,
.sendmsg = sco_sock_sendmsg,
.recvmsg = sco_sock_recvmsg,
- .poll_mask = bt_sock_poll_mask,
+ .poll = bt_sock_poll,
.ioctl = bt_sock_ioctl,
.mmap = sock_no_mmap,
.socketpair = sock_no_socketpair,
diff --git a/net/bpf/test_run.c b/net/bpf/test_run.c
index 68c3578343b4..22a78eedf4b1 100644
--- a/net/bpf/test_run.c
+++ b/net/bpf/test_run.c
@@ -96,6 +96,7 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
u32 size = kattr->test.data_size_in;
u32 repeat = kattr->test.repeat;
u32 retval, duration;
+ int hh_len = ETH_HLEN;
struct sk_buff *skb;
void *data;
int ret;
@@ -131,12 +132,22 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
skb_reset_network_header(skb);
if (is_l2)
- __skb_push(skb, ETH_HLEN);
+ __skb_push(skb, hh_len);
if (is_direct_pkt_access)
bpf_compute_data_pointers(skb);
retval = bpf_test_run(prog, skb, repeat, &duration);
- if (!is_l2)
- __skb_push(skb, ETH_HLEN);
+ if (!is_l2) {
+ if (skb_headroom(skb) < hh_len) {
+ int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
+
+ if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+ }
+ memset(__skb_push(skb, hh_len), 0, hh_len);
+ }
+
size = skb->len;
/* bpf program can never convert linear skb to non-linear */
if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
diff --git a/net/bpfilter/.gitignore b/net/bpfilter/.gitignore
new file mode 100644
index 000000000000..e97084e3eea2
--- /dev/null
+++ b/net/bpfilter/.gitignore
@@ -0,0 +1 @@
+bpfilter_umh
diff --git a/net/bpfilter/Kconfig b/net/bpfilter/Kconfig
index a948b072c28f..76deb6615883 100644
--- a/net/bpfilter/Kconfig
+++ b/net/bpfilter/Kconfig
@@ -1,6 +1,5 @@
menuconfig BPFILTER
bool "BPF based packet filtering framework (BPFILTER)"
- default n
depends on NET && BPF && INET
help
This builds experimental bpfilter framework that is aiming to
@@ -9,6 +8,7 @@ menuconfig BPFILTER
if BPFILTER
config BPFILTER_UMH
tristate "bpfilter kernel module with user mode helper"
+ depends on $(success,$(srctree)/scripts/cc-can-link.sh $(CC))
default m
help
This builds bpfilter kernel module with embedded user mode helper
diff --git a/net/bpfilter/Makefile b/net/bpfilter/Makefile
index e0bbe7583e58..39c6980b5d99 100644
--- a/net/bpfilter/Makefile
+++ b/net/bpfilter/Makefile
@@ -15,18 +15,7 @@ ifeq ($(CONFIG_BPFILTER_UMH), y)
HOSTLDFLAGS += -static
endif
-# a bit of elf magic to convert bpfilter_umh binary into a binary blob
-# inside bpfilter_umh.o elf file referenced by
-# _binary_net_bpfilter_bpfilter_umh_start symbol
-# which bpfilter_kern.c passes further into umh blob loader at run-time
-quiet_cmd_copy_umh = GEN $@
- cmd_copy_umh = echo ':' > $(obj)/.bpfilter_umh.o.cmd; \
- $(OBJCOPY) -I binary -O `$(OBJDUMP) -f $<|grep format|cut -d' ' -f8` \
- -B `$(OBJDUMP) -f $<|grep architecture|cut -d, -f1|cut -d' ' -f2` \
- --rename-section .data=.init.rodata $< $@
-
-$(obj)/bpfilter_umh.o: $(obj)/bpfilter_umh
- $(call cmd,copy_umh)
+$(obj)/bpfilter_umh_blob.o: $(obj)/bpfilter_umh
obj-$(CONFIG_BPFILTER_UMH) += bpfilter.o
-bpfilter-objs += bpfilter_kern.o bpfilter_umh.o
+bpfilter-objs += bpfilter_kern.o bpfilter_umh_blob.o
diff --git a/net/bpfilter/bpfilter_kern.c b/net/bpfilter/bpfilter_kern.c
index 09522573f611..f0fc182d3db7 100644
--- a/net/bpfilter/bpfilter_kern.c
+++ b/net/bpfilter/bpfilter_kern.c
@@ -10,11 +10,8 @@
#include <linux/file.h>
#include "msgfmt.h"
-#define UMH_start _binary_net_bpfilter_bpfilter_umh_start
-#define UMH_end _binary_net_bpfilter_bpfilter_umh_end
-
-extern char UMH_start;
-extern char UMH_end;
+extern char bpfilter_umh_start;
+extern char bpfilter_umh_end;
static struct umh_info info;
/* since ip_getsockopt() can run in parallel, serialize access to umh */
@@ -93,7 +90,9 @@ static int __init load_umh(void)
int err;
/* fork usermode process */
- err = fork_usermode_blob(&UMH_start, &UMH_end - &UMH_start, &info);
+ err = fork_usermode_blob(&bpfilter_umh_start,
+ &bpfilter_umh_end - &bpfilter_umh_start,
+ &info);
if (err)
return err;
pr_info("Loaded bpfilter_umh pid %d\n", info.pid);
diff --git a/net/bpfilter/bpfilter_umh_blob.S b/net/bpfilter/bpfilter_umh_blob.S
new file mode 100644
index 000000000000..40311d10d2f2
--- /dev/null
+++ b/net/bpfilter/bpfilter_umh_blob.S
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+ .section .init.rodata, "a"
+ .global bpfilter_umh_start
+bpfilter_umh_start:
+ .incbin "net/bpfilter/bpfilter_umh"
+ .global bpfilter_umh_end
+bpfilter_umh_end:
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index e0adcd123f48..711d7156efd8 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -131,8 +131,10 @@ static void caif_flow_cb(struct sk_buff *skb)
caifd = caif_get(skb->dev);
WARN_ON(caifd == NULL);
- if (caifd == NULL)
+ if (!caifd) {
+ rcu_read_unlock();
return;
+ }
caifd_hold(caifd);
rcu_read_unlock();
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index c7991867d622..a6fb1b3bcad9 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -934,11 +934,15 @@ static int caif_release(struct socket *sock)
}
/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
-static __poll_t caif_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t caif_poll(struct file *file,
+ struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
+ __poll_t mask;
struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
- __poll_t mask = 0;
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+ mask = 0;
/* exceptional events? */
if (sk->sk_err)
@@ -972,7 +976,7 @@ static const struct proto_ops caif_seqpacket_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
- .poll_mask = caif_poll_mask,
+ .poll = caif_poll,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
@@ -993,7 +997,7 @@ static const struct proto_ops caif_stream_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
- .poll_mask = caif_poll_mask,
+ .poll = caif_poll,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 9393f25df08d..0af8f0db892a 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -1660,7 +1660,7 @@ static const struct proto_ops bcm_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = can_ioctl, /* use can_ioctl() from af_can.c */
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
diff --git a/net/can/raw.c b/net/can/raw.c
index fd7e2f49ea6a..1051eee82581 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -843,7 +843,7 @@ static const struct proto_ops raw_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = raw_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = can_ioctl, /* use can_ioctl() from af_can.c */
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
diff --git a/net/core/datagram.c b/net/core/datagram.c
index f19bf3dc2bd6..9938952c5c78 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -819,8 +819,9 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
/**
* datagram_poll - generic datagram poll
+ * @file: file struct
* @sock: socket
- * @events to wait for
+ * @wait: poll table
*
* Datagram poll: Again totally generic. This also handles
* sequenced packet sockets providing the socket receive queue
@@ -830,10 +831,14 @@ EXPORT_SYMBOL(skb_copy_and_csum_datagram_msg);
* and you use a different write policy from sock_writeable()
* then please supply your own write_space callback.
*/
-__poll_t datagram_poll_mask(struct socket *sock, __poll_t events)
+__poll_t datagram_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
- __poll_t mask = 0;
+ __poll_t mask;
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+ mask = 0;
/* exceptional events? */
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
@@ -866,4 +871,4 @@ __poll_t datagram_poll_mask(struct socket *sock, __poll_t events)
return mask;
}
-EXPORT_SYMBOL(datagram_poll_mask);
+EXPORT_SYMBOL(datagram_poll);
diff --git a/net/core/dev.c b/net/core/dev.c
index 57b7bab5f70b..a5aa1c7444e6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -8643,7 +8643,8 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
/* We get here if we can't use the current device name */
if (!pat)
goto out;
- if (dev_get_valid_name(net, dev, pat) < 0)
+ err = dev_get_valid_name(net, dev, pat);
+ if (err < 0)
goto out;
}
@@ -8655,7 +8656,6 @@ int dev_change_net_namespace(struct net_device *dev, struct net *net, const char
dev_close(dev);
/* And unlink it from device chain */
- err = -ENODEV;
unlist_netdevice(dev);
synchronize_net();
diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
index a04e1e88bf3a..50537ff961a7 100644
--- a/net/core/dev_ioctl.c
+++ b/net/core/dev_ioctl.c
@@ -285,16 +285,9 @@ static int dev_ifsioc(struct net *net, struct ifreq *ifr, unsigned int cmd)
if (ifr->ifr_qlen < 0)
return -EINVAL;
if (dev->tx_queue_len ^ ifr->ifr_qlen) {
- unsigned int orig_len = dev->tx_queue_len;
-
- dev->tx_queue_len = ifr->ifr_qlen;
- err = call_netdevice_notifiers(
- NETDEV_CHANGE_TX_QUEUE_LEN, dev);
- err = notifier_to_errno(err);
- if (err) {
- dev->tx_queue_len = orig_len;
+ err = dev_change_tx_queue_len(dev, ifr->ifr_qlen);
+ if (err)
return err;
- }
}
return 0;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 126ffc5bc630..f64aa13811ea 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -416,6 +416,14 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops,
if (rule->mark && r->mark != rule->mark)
continue;
+ if (rule->suppress_ifgroup != -1 &&
+ r->suppress_ifgroup != rule->suppress_ifgroup)
+ continue;
+
+ if (rule->suppress_prefixlen != -1 &&
+ r->suppress_prefixlen != rule->suppress_prefixlen)
+ continue;
+
if (rule->mark_mask && r->mark_mask != rule->mark_mask)
continue;
@@ -436,6 +444,9 @@ static struct fib_rule *rule_find(struct fib_rules_ops *ops,
if (rule->ip_proto && r->ip_proto != rule->ip_proto)
continue;
+ if (rule->proto && r->proto != rule->proto)
+ continue;
+
if (fib_rule_port_range_set(&rule->sport_range) &&
!fib_rule_port_range_compare(&r->sport_range,
&rule->sport_range))
@@ -645,6 +656,73 @@ errout:
return err;
}
+static int rule_exists(struct fib_rules_ops *ops, struct fib_rule_hdr *frh,
+ struct nlattr **tb, struct fib_rule *rule)
+{
+ struct fib_rule *r;
+
+ list_for_each_entry(r, &ops->rules_list, list) {
+ if (r->action != rule->action)
+ continue;
+
+ if (r->table != rule->table)
+ continue;
+
+ if (r->pref != rule->pref)
+ continue;
+
+ if (memcmp(r->iifname, rule->iifname, IFNAMSIZ))
+ continue;
+
+ if (memcmp(r->oifname, rule->oifname, IFNAMSIZ))
+ continue;
+
+ if (r->mark != rule->mark)
+ continue;
+
+ if (r->suppress_ifgroup != rule->suppress_ifgroup)
+ continue;
+
+ if (r->suppress_prefixlen != rule->suppress_prefixlen)
+ continue;
+
+ if (r->mark_mask != rule->mark_mask)
+ continue;
+
+ if (r->tun_id != rule->tun_id)
+ continue;
+
+ if (r->fr_net != rule->fr_net)
+ continue;
+
+ if (r->l3mdev != rule->l3mdev)
+ continue;
+
+ if (!uid_eq(r->uid_range.start, rule->uid_range.start) ||
+ !uid_eq(r->uid_range.end, rule->uid_range.end))
+ continue;
+
+ if (r->ip_proto != rule->ip_proto)
+ continue;
+
+ if (r->proto != rule->proto)
+ continue;
+
+ if (!fib_rule_port_range_compare(&r->sport_range,
+ &rule->sport_range))
+ continue;
+
+ if (!fib_rule_port_range_compare(&r->dport_range,
+ &rule->dport_range))
+ continue;
+
+ if (!ops->compare(r, frh, tb))
+ continue;
+ return 1;
+ }
+ return 0;
+}
+
int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
struct netlink_ext_ack *extack)
{
@@ -679,7 +757,7 @@ int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
goto errout;
if ((nlh->nlmsg_flags & NLM_F_EXCL) &&
- rule_find(ops, frh, tb, rule, user_priority)) {
+ rule_exists(ops, frh, tb, rule)) {
err = -EEXIST;
goto errout_free;
}
diff --git a/net/core/filter.c b/net/core/filter.c
index 3d9ba7e5965a..06da770f543f 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -459,11 +459,21 @@ static bool convert_bpf_ld_abs(struct sock_filter *fp, struct bpf_insn **insnp)
(!unaligned_ok && offset >= 0 &&
offset + ip_align >= 0 &&
offset + ip_align % size == 0))) {
+ bool ldx_off_ok = offset <= S16_MAX;
+
*insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_H);
*insn++ = BPF_ALU64_IMM(BPF_SUB, BPF_REG_TMP, offset);
- *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP, size, 2 + endian);
- *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A, BPF_REG_D,
- offset);
+ *insn++ = BPF_JMP_IMM(BPF_JSLT, BPF_REG_TMP,
+ size, 2 + endian + (!ldx_off_ok * 2));
+ if (ldx_off_ok) {
+ *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
+ BPF_REG_D, offset);
+ } else {
+ *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_D);
+ *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_TMP, offset);
+ *insn++ = BPF_LDX_MEM(BPF_SIZE(fp->code), BPF_REG_A,
+ BPF_REG_TMP, 0);
+ }
if (endian)
*insn++ = BPF_ENDIAN(BPF_FROM_BE, BPF_REG_A, size * 8);
*insn++ = BPF_JMP_A(8);
@@ -1762,6 +1772,37 @@ static const struct bpf_func_proto bpf_skb_pull_data_proto = {
.arg2_type = ARG_ANYTHING,
};
+static inline int sk_skb_try_make_writable(struct sk_buff *skb,
+ unsigned int write_len)
+{
+ int err = __bpf_try_make_writable(skb, write_len);
+
+ bpf_compute_data_end_sk_skb(skb);
+ return err;
+}
+
+BPF_CALL_2(sk_skb_pull_data, struct sk_buff *, skb, u32, len)
+{
+ /* Idea is the following: should the needed direct read/write
+ * test fail during runtime, we can pull in more data and redo
+ * again, since implicitly, we invalidate previous checks here.
+ *
+ * Or, since we know how much we need to make read/writeable,
+ * this can be done once at the program beginning for direct
+ * access case. By this we overcome limitations of only current
+ * headroom being accessible.
+ */
+ return sk_skb_try_make_writable(skb, len ? : skb_headlen(skb));
+}
+
+static const struct bpf_func_proto sk_skb_pull_data_proto = {
+ .func = sk_skb_pull_data,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+};
+
BPF_CALL_5(bpf_l3_csum_replace, struct sk_buff *, skb, u32, offset,
u64, from, u64, to, u64, flags)
{
@@ -2779,7 +2820,8 @@ static int bpf_skb_net_shrink(struct sk_buff *skb, u32 len_diff)
static u32 __bpf_skb_max_len(const struct sk_buff *skb)
{
- return skb->dev->mtu + skb->dev->hard_header_len;
+ return skb->dev ? skb->dev->mtu + skb->dev->hard_header_len :
+ SKB_MAX_ALLOC;
}
static int bpf_skb_adjust_net(struct sk_buff *skb, s32 len_diff)
@@ -2863,8 +2905,8 @@ static int bpf_skb_trim_rcsum(struct sk_buff *skb, unsigned int new_len)
return __skb_trim_rcsum(skb, new_len);
}
-BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
- u64, flags)
+static inline int __bpf_skb_change_tail(struct sk_buff *skb, u32 new_len,
+ u64 flags)
{
u32 max_len = __bpf_skb_max_len(skb);
u32 min_len = __bpf_skb_min_len(skb);
@@ -2900,6 +2942,13 @@ BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
if (!ret && skb_is_gso(skb))
skb_gso_reset(skb);
}
+ return ret;
+}
+
+BPF_CALL_3(bpf_skb_change_tail, struct sk_buff *, skb, u32, new_len,
+ u64, flags)
+{
+ int ret = __bpf_skb_change_tail(skb, new_len, flags);
bpf_compute_data_pointers(skb);
return ret;
@@ -2914,9 +2963,27 @@ static const struct bpf_func_proto bpf_skb_change_tail_proto = {
.arg3_type = ARG_ANYTHING,
};
-BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
+BPF_CALL_3(sk_skb_change_tail, struct sk_buff *, skb, u32, new_len,
u64, flags)
{
+ int ret = __bpf_skb_change_tail(skb, new_len, flags);
+
+ bpf_compute_data_end_sk_skb(skb);
+ return ret;
+}
+
+static const struct bpf_func_proto sk_skb_change_tail_proto = {
+ .func = sk_skb_change_tail,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_ANYTHING,
+};
+
+static inline int __bpf_skb_change_head(struct sk_buff *skb, u32 head_room,
+ u64 flags)
+{
u32 max_len = __bpf_skb_max_len(skb);
u32 new_len = skb->len + head_room;
int ret;
@@ -2941,8 +3008,16 @@ BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
skb_reset_mac_header(skb);
}
+ return ret;
+}
+
+BPF_CALL_3(bpf_skb_change_head, struct sk_buff *, skb, u32, head_room,
+ u64, flags)
+{
+ int ret = __bpf_skb_change_head(skb, head_room, flags);
+
bpf_compute_data_pointers(skb);
- return 0;
+ return ret;
}
static const struct bpf_func_proto bpf_skb_change_head_proto = {
@@ -2954,6 +3029,23 @@ static const struct bpf_func_proto bpf_skb_change_head_proto = {
.arg3_type = ARG_ANYTHING,
};
+BPF_CALL_3(sk_skb_change_head, struct sk_buff *, skb, u32, head_room,
+ u64, flags)
+{
+ int ret = __bpf_skb_change_head(skb, head_room, flags);
+
+ bpf_compute_data_end_sk_skb(skb);
+ return ret;
+}
+
+static const struct bpf_func_proto sk_skb_change_head_proto = {
+ .func = sk_skb_change_head,
+ .gpl_only = false,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_CTX,
+ .arg2_type = ARG_ANYTHING,
+ .arg3_type = ARG_ANYTHING,
+};
static unsigned long xdp_get_metalen(const struct xdp_buff *xdp)
{
return xdp_data_meta_unsupported(xdp) ? 0 :
@@ -3046,12 +3138,16 @@ static int __bpf_tx_xdp(struct net_device *dev,
u32 index)
{
struct xdp_frame *xdpf;
- int sent;
+ int err, sent;
if (!dev->netdev_ops->ndo_xdp_xmit) {
return -EOPNOTSUPP;
}
+ err = xdp_ok_fwd_dev(dev, xdp->data_end - xdp->data);
+ if (unlikely(err))
+ return err;
+
xdpf = convert_to_xdp_frame(xdp);
if (unlikely(!xdpf))
return -EOVERFLOW;
@@ -3214,20 +3310,6 @@ err:
}
EXPORT_SYMBOL_GPL(xdp_do_redirect);
-static int __xdp_generic_ok_fwd_dev(struct sk_buff *skb, struct net_device *fwd)
-{
- unsigned int len;
-
- if (unlikely(!(fwd->flags & IFF_UP)))
- return -ENETDOWN;
-
- len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
- if (skb->len > len)
- return -EMSGSIZE;
-
- return 0;
-}
-
static int xdp_do_generic_redirect_map(struct net_device *dev,
struct sk_buff *skb,
struct xdp_buff *xdp,
@@ -3256,10 +3338,11 @@ static int xdp_do_generic_redirect_map(struct net_device *dev,
}
if (map->map_type == BPF_MAP_TYPE_DEVMAP) {
- if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
+ struct bpf_dtab_netdev *dst = fwd;
+
+ err = dev_map_generic_redirect(dst, skb, xdp_prog);
+ if (unlikely(err))
goto err;
- skb->dev = fwd;
- generic_xdp_tx(skb, xdp_prog);
} else if (map->map_type == BPF_MAP_TYPE_XSKMAP) {
struct xdp_sock *xs = fwd;
@@ -3298,7 +3381,8 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
goto err;
}
- if (unlikely((err = __xdp_generic_ok_fwd_dev(skb, fwd))))
+ err = xdp_ok_fwd_dev(fwd, skb->len);
+ if (unlikely(err))
goto err;
skb->dev = fwd;
@@ -4086,8 +4170,9 @@ static int bpf_fib_set_fwd_params(struct bpf_fib_lookup *params,
memcpy(params->smac, dev->dev_addr, ETH_ALEN);
params->h_vlan_TCI = 0;
params->h_vlan_proto = 0;
+ params->ifindex = dev->ifindex;
- return dev->ifindex;
+ return 0;
}
#endif
@@ -4111,7 +4196,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
/* verify forwarding is enabled on this interface */
in_dev = __in_dev_get_rcu(dev);
if (unlikely(!in_dev || !IN_DEV_FORWARD(in_dev)))
- return 0;
+ return BPF_FIB_LKUP_RET_FWD_DISABLED;
if (flags & BPF_FIB_LOOKUP_OUTPUT) {
fl4.flowi4_iif = 1;
@@ -4136,7 +4221,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
tb = fib_get_table(net, tbid);
if (unlikely(!tb))
- return 0;
+ return BPF_FIB_LKUP_RET_NOT_FWDED;
err = fib_table_lookup(tb, &fl4, &res, FIB_LOOKUP_NOREF);
} else {
@@ -4148,8 +4233,20 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
err = fib_lookup(net, &fl4, &res, FIB_LOOKUP_NOREF);
}
- if (err || res.type != RTN_UNICAST)
- return 0;
+ if (err) {
+ /* map fib lookup errors to RTN_ type */
+ if (err == -EINVAL)
+ return BPF_FIB_LKUP_RET_BLACKHOLE;
+ if (err == -EHOSTUNREACH)
+ return BPF_FIB_LKUP_RET_UNREACHABLE;
+ if (err == -EACCES)
+ return BPF_FIB_LKUP_RET_PROHIBIT;
+
+ return BPF_FIB_LKUP_RET_NOT_FWDED;
+ }
+
+ if (res.type != RTN_UNICAST)
+ return BPF_FIB_LKUP_RET_NOT_FWDED;
if (res.fi->fib_nhs > 1)
fib_select_path(net, &res, &fl4, NULL);
@@ -4157,19 +4254,16 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
if (check_mtu) {
mtu = ip_mtu_from_fib_result(&res, params->ipv4_dst);
if (params->tot_len > mtu)
- return 0;
+ return BPF_FIB_LKUP_RET_FRAG_NEEDED;
}
nh = &res.fi->fib_nh[res.nh_sel];
/* do not handle lwt encaps right now */
if (nh->nh_lwtstate)
- return 0;
+ return BPF_FIB_LKUP_RET_UNSUPP_LWT;
dev = nh->nh_dev;
- if (unlikely(!dev))
- return 0;
-
if (nh->nh_gw)
params->ipv4_dst = nh->nh_gw;
@@ -4179,10 +4273,10 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
* rcu_read_lock_bh is not needed here
*/
neigh = __ipv4_neigh_lookup_noref(dev, (__force u32)params->ipv4_dst);
- if (neigh)
- return bpf_fib_set_fwd_params(params, neigh, dev);
+ if (!neigh)
+ return BPF_FIB_LKUP_RET_NO_NEIGH;
- return 0;
+ return bpf_fib_set_fwd_params(params, neigh, dev);
}
#endif
@@ -4203,7 +4297,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
/* link local addresses are never forwarded */
if (rt6_need_strict(dst) || rt6_need_strict(src))
- return 0;
+ return BPF_FIB_LKUP_RET_NOT_FWDED;
dev = dev_get_by_index_rcu(net, params->ifindex);
if (unlikely(!dev))
@@ -4211,7 +4305,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
idev = __in6_dev_get_safely(dev);
if (unlikely(!idev || !net->ipv6.devconf_all->forwarding))
- return 0;
+ return BPF_FIB_LKUP_RET_FWD_DISABLED;
if (flags & BPF_FIB_LOOKUP_OUTPUT) {
fl6.flowi6_iif = 1;
@@ -4238,7 +4332,7 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
tb = ipv6_stub->fib6_get_table(net, tbid);
if (unlikely(!tb))
- return 0;
+ return BPF_FIB_LKUP_RET_NOT_FWDED;
f6i = ipv6_stub->fib6_table_lookup(net, tb, oif, &fl6, strict);
} else {
@@ -4251,11 +4345,23 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
}
if (unlikely(IS_ERR_OR_NULL(f6i) || f6i == net->ipv6.fib6_null_entry))
- return 0;
+ return BPF_FIB_LKUP_RET_NOT_FWDED;
+
+ if (unlikely(f6i->fib6_flags & RTF_REJECT)) {
+ switch (f6i->fib6_type) {
+ case RTN_BLACKHOLE:
+ return BPF_FIB_LKUP_RET_BLACKHOLE;
+ case RTN_UNREACHABLE:
+ return BPF_FIB_LKUP_RET_UNREACHABLE;
+ case RTN_PROHIBIT:
+ return BPF_FIB_LKUP_RET_PROHIBIT;
+ default:
+ return BPF_FIB_LKUP_RET_NOT_FWDED;
+ }
+ }
- if (unlikely(f6i->fib6_flags & RTF_REJECT ||
- f6i->fib6_type != RTN_UNICAST))
- return 0;
+ if (f6i->fib6_type != RTN_UNICAST)
+ return BPF_FIB_LKUP_RET_NOT_FWDED;
if (f6i->fib6_nsiblings && fl6.flowi6_oif == 0)
f6i = ipv6_stub->fib6_multipath_select(net, f6i, &fl6,
@@ -4265,11 +4371,11 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
if (check_mtu) {
mtu = ipv6_stub->ip6_mtu_from_fib6(f6i, dst, src);
if (params->tot_len > mtu)
- return 0;
+ return BPF_FIB_LKUP_RET_FRAG_NEEDED;
}
if (f6i->fib6_nh.nh_lwtstate)
- return 0;
+ return BPF_FIB_LKUP_RET_UNSUPP_LWT;
if (f6i->fib6_flags & RTF_GATEWAY)
*dst = f6i->fib6_nh.nh_gw;
@@ -4283,10 +4389,10 @@ static int bpf_ipv6_fib_lookup(struct net *net, struct bpf_fib_lookup *params,
*/
neigh = ___neigh_lookup_noref(ipv6_stub->nd_tbl, neigh_key_eq128,
ndisc_hashfn, dst, dev);
- if (neigh)
- return bpf_fib_set_fwd_params(params, neigh, dev);
+ if (!neigh)
+ return BPF_FIB_LKUP_RET_NO_NEIGH;
- return 0;
+ return bpf_fib_set_fwd_params(params, neigh, dev);
}
#endif
@@ -4328,7 +4434,7 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
struct bpf_fib_lookup *, params, int, plen, u32, flags)
{
struct net *net = dev_net(skb->dev);
- int index = -EAFNOSUPPORT;
+ int rc = -EAFNOSUPPORT;
if (plen < sizeof(*params))
return -EINVAL;
@@ -4339,25 +4445,25 @@ BPF_CALL_4(bpf_skb_fib_lookup, struct sk_buff *, skb,
switch (params->family) {
#if IS_ENABLED(CONFIG_INET)
case AF_INET:
- index = bpf_ipv4_fib_lookup(net, params, flags, false);
+ rc = bpf_ipv4_fib_lookup(net, params, flags, false);
break;
#endif
#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6:
- index = bpf_ipv6_fib_lookup(net, params, flags, false);
+ rc = bpf_ipv6_fib_lookup(net, params, flags, false);
break;
#endif
}
- if (index > 0) {
+ if (!rc) {
struct net_device *dev;
- dev = dev_get_by_index_rcu(net, index);
+ dev = dev_get_by_index_rcu(net, params->ifindex);
if (!is_skb_forwardable(dev, skb))
- index = 0;
+ rc = BPF_FIB_LKUP_RET_FRAG_NEEDED;
}
- return index;
+ return rc;
}
static const struct bpf_func_proto bpf_skb_fib_lookup_proto = {
@@ -4430,10 +4536,10 @@ static const struct bpf_func_proto bpf_lwt_push_encap_proto = {
.arg4_type = ARG_CONST_SIZE
};
+#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
const void *, from, u32, len)
{
-#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
struct seg6_bpf_srh_state *srh_state =
this_cpu_ptr(&seg6_bpf_srh_states);
void *srh_tlvs, *srh_end, *ptr;
@@ -4459,9 +4565,6 @@ BPF_CALL_4(bpf_lwt_seg6_store_bytes, struct sk_buff *, skb, u32, offset,
memcpy(skb->data + offset, from, len);
return 0;
-#else /* CONFIG_IPV6_SEG6_BPF */
- return -EOPNOTSUPP;
-#endif
}
static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
@@ -4477,7 +4580,6 @@ static const struct bpf_func_proto bpf_lwt_seg6_store_bytes_proto = {
BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
u32, action, void *, param, u32, param_len)
{
-#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
struct seg6_bpf_srh_state *srh_state =
this_cpu_ptr(&seg6_bpf_srh_states);
struct ipv6_sr_hdr *srh;
@@ -4525,9 +4627,6 @@ BPF_CALL_4(bpf_lwt_seg6_action, struct sk_buff *, skb,
default:
return -EINVAL;
}
-#else /* CONFIG_IPV6_SEG6_BPF */
- return -EOPNOTSUPP;
-#endif
}
static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
@@ -4543,7 +4642,6 @@ static const struct bpf_func_proto bpf_lwt_seg6_action_proto = {
BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
s32, len)
{
-#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
struct seg6_bpf_srh_state *srh_state =
this_cpu_ptr(&seg6_bpf_srh_states);
void *srh_end, *srh_tlvs, *ptr;
@@ -4587,9 +4685,6 @@ BPF_CALL_3(bpf_lwt_seg6_adjust_srh, struct sk_buff *, skb, u32, offset,
srh_state->hdrlen += len;
srh_state->valid = 0;
return 0;
-#else /* CONFIG_IPV6_SEG6_BPF */
- return -EOPNOTSUPP;
-#endif
}
static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
@@ -4600,6 +4695,7 @@ static const struct bpf_func_proto bpf_lwt_seg6_adjust_srh_proto = {
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_ANYTHING,
};
+#endif /* CONFIG_IPV6_SEG6_BPF */
bool bpf_helper_changes_pkt_data(void *func)
{
@@ -4608,9 +4704,12 @@ bool bpf_helper_changes_pkt_data(void *func)
func == bpf_skb_store_bytes ||
func == bpf_skb_change_proto ||
func == bpf_skb_change_head ||
+ func == sk_skb_change_head ||
func == bpf_skb_change_tail ||
+ func == sk_skb_change_tail ||
func == bpf_skb_adjust_room ||
func == bpf_skb_pull_data ||
+ func == sk_skb_pull_data ||
func == bpf_clone_redirect ||
func == bpf_l3_csum_replace ||
func == bpf_l4_csum_replace ||
@@ -4618,11 +4717,12 @@ bool bpf_helper_changes_pkt_data(void *func)
func == bpf_xdp_adjust_meta ||
func == bpf_msg_pull_data ||
func == bpf_xdp_adjust_tail ||
- func == bpf_lwt_push_encap ||
+#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
func == bpf_lwt_seg6_store_bytes ||
func == bpf_lwt_seg6_adjust_srh ||
- func == bpf_lwt_seg6_action
- )
+ func == bpf_lwt_seg6_action ||
+#endif
+ func == bpf_lwt_push_encap)
return true;
return false;
@@ -4862,11 +4962,11 @@ sk_skb_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
case BPF_FUNC_skb_load_bytes:
return &bpf_skb_load_bytes_proto;
case BPF_FUNC_skb_pull_data:
- return &bpf_skb_pull_data_proto;
+ return &sk_skb_pull_data_proto;
case BPF_FUNC_skb_change_tail:
- return &bpf_skb_change_tail_proto;
+ return &sk_skb_change_tail_proto;
case BPF_FUNC_skb_change_head:
- return &bpf_skb_change_head_proto;
+ return &sk_skb_change_head_proto;
case BPF_FUNC_get_socket_cookie:
return &bpf_get_socket_cookie_proto;
case BPF_FUNC_get_socket_uid:
@@ -4957,12 +5057,14 @@ static const struct bpf_func_proto *
lwt_seg6local_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{
switch (func_id) {
+#if IS_ENABLED(CONFIG_IPV6_SEG6_BPF)
case BPF_FUNC_lwt_seg6_store_bytes:
return &bpf_lwt_seg6_store_bytes_proto;
case BPF_FUNC_lwt_seg6_action:
return &bpf_lwt_seg6_action_proto;
case BPF_FUNC_lwt_seg6_adjust_srh:
return &bpf_lwt_seg6_adjust_srh_proto;
+#endif
default:
return lwt_out_func_proto(func_id, prog);
}
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index b2b2323bdc84..188d693cb251 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -77,8 +77,20 @@ gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
d->lock = lock;
spin_lock_bh(lock);
}
- if (d->tail)
- return gnet_stats_copy(d, type, NULL, 0, padattr);
+ if (d->tail) {
+ int ret = gnet_stats_copy(d, type, NULL, 0, padattr);
+
+ /* The initial attribute added in gnet_stats_copy() may be
+ * preceded by a padding attribute, in which case d->tail will
+ * end up pointing at the padding instead of the real attribute.
+ * Fix this so gnet_stats_finish_copy() adjusts the length of
+ * the right attribute.
+ */
+ if (ret == 0 && d->tail->nla_type == padattr)
+ d->tail = (struct nlattr *)((char *)d->tail +
+ NLA_ALIGN(d->tail->nla_len));
+ return ret;
+ }
return 0;
}
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 68bf07206744..43a932cb609b 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -269,7 +269,7 @@ static void __page_pool_empty_ring(struct page_pool *pool)
struct page *page;
/* Empty recycle ring */
- while ((page = ptr_ring_consume(&pool->ring))) {
+ while ((page = ptr_ring_consume_bh(&pool->ring))) {
/* Verify the refcnt invariant of cached pages */
if (!(page_ref_count(page) == 1))
pr_crit("%s() page_pool refcnt %d violation\n",
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 5ef61222fdef..e3f743c141b3 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2759,9 +2759,12 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
return err;
}
- dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
-
- __dev_notify_flags(dev, old_flags, ~0U);
+ if (dev->rtnl_link_state == RTNL_LINK_INITIALIZED) {
+ __dev_notify_flags(dev, old_flags, 0U);
+ } else {
+ dev->rtnl_link_state = RTNL_LINK_INITIALIZED;
+ __dev_notify_flags(dev, old_flags, ~0U);
+ }
return 0;
}
EXPORT_SYMBOL(rtnl_configure_link);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index c642304f178c..fb35b62af272 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -858,6 +858,7 @@ static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb)
n->cloned = 1;
n->nohdr = 0;
n->peeked = 0;
+ C(pfmemalloc);
n->destructor = NULL;
C(tail);
C(end);
@@ -3719,6 +3720,7 @@ normal:
net_warn_ratelimited(
"skb_segment: too many frags: %u %u\n",
pos, mss);
+ err = -EINVAL;
goto err;
}
@@ -3752,11 +3754,10 @@ skip_fraglist:
perform_csum_check:
if (!csum) {
- if (skb_has_shared_frag(nskb)) {
- err = __skb_linearize(nskb);
- if (err)
- goto err;
- }
+ if (skb_has_shared_frag(nskb) &&
+ __skb_linearize(nskb))
+ goto err;
+
if (!nskb->remcsum_offload)
nskb->ip_summed = CHECKSUM_NONE;
SKB_GSO_CB(nskb)->csum =
@@ -5276,8 +5277,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
if (npages >= 1 << order) {
page = alloc_pages((gfp_mask & ~__GFP_DIRECT_RECLAIM) |
__GFP_COMP |
- __GFP_NOWARN |
- __GFP_NORETRY,
+ __GFP_NOWARN,
order);
if (page)
goto fill_page;
diff --git a/net/core/sock.c b/net/core/sock.c
index bcc41829a16d..bc2d7a37297f 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -2277,9 +2277,9 @@ int sk_alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
pfrag->offset += use;
sge = sg + sg_curr - 1;
- if (sg_curr > first_coalesce && sg_page(sg) == pfrag->page &&
- sg->offset + sg->length == orig_offset) {
- sg->length += use;
+ if (sg_curr > first_coalesce && sg_page(sge) == pfrag->page &&
+ sge->offset + sge->length == orig_offset) {
+ sge->length += use;
} else {
sge = sg + sg_curr;
sg_unmark_end(sge);
@@ -3243,7 +3243,8 @@ static int req_prot_init(const struct proto *prot)
rsk_prot->slab = kmem_cache_create(rsk_prot->slab_name,
rsk_prot->obj_size, 0,
- prot->slab_flags, NULL);
+ SLAB_ACCOUNT | prot->slab_flags,
+ NULL);
if (!rsk_prot->slab) {
pr_crit("%s: Can't create request sock SLAB cache!\n",
@@ -3258,7 +3259,8 @@ int proto_register(struct proto *prot, int alloc_slab)
if (alloc_slab) {
prot->slab = kmem_cache_create_usercopy(prot->name,
prot->obj_size, 0,
- SLAB_HWCACHE_ALIGN | prot->slab_flags,
+ SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT |
+ prot->slab_flags,
prot->useroffset, prot->usersize,
NULL);
@@ -3281,6 +3283,7 @@ int proto_register(struct proto *prot, int alloc_slab)
kmem_cache_create(prot->twsk_prot->twsk_slab_name,
prot->twsk_prot->twsk_obj_size,
0,
+ SLAB_ACCOUNT |
prot->slab_flags,
NULL);
if (prot->twsk_prot->twsk_slab == NULL)
diff --git a/net/dccp/ccids/ccid3.c b/net/dccp/ccids/ccid3.c
index 8b5ba6dffac7..12877a1514e7 100644
--- a/net/dccp/ccids/ccid3.c
+++ b/net/dccp/ccids/ccid3.c
@@ -600,7 +600,7 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
{
struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
struct dccp_sock *dp = dccp_sk(sk);
- ktime_t now = ktime_get_real();
+ ktime_t now = ktime_get();
s64 delta = 0;
switch (fbtype) {
@@ -625,15 +625,14 @@ static void ccid3_hc_rx_send_feedback(struct sock *sk,
case CCID3_FBACK_PERIODIC:
delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback);
if (delta <= 0)
- DCCP_BUG("delta (%ld) <= 0", (long)delta);
- else
- hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
+ delta = 1;
+ hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta);
break;
default:
return;
}
- ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta,
+ ccid3_pr_debug("Interval %lldusec, X_recv=%u, 1/p=%u\n", delta,
hc->rx_x_recv, hc->rx_pinv);
hc->rx_tstamp_last_feedback = now;
@@ -680,7 +679,8 @@ static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb)
static u32 ccid3_first_li(struct sock *sk)
{
struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk);
- u32 x_recv, p, delta;
+ u32 x_recv, p;
+ s64 delta;
u64 fval;
if (hc->rx_rtt == 0) {
@@ -688,7 +688,9 @@ static u32 ccid3_first_li(struct sock *sk)
hc->rx_rtt = DCCP_FALLBACK_RTT;
}
- delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback));
+ delta = ktime_us_delta(ktime_get(), hc->rx_tstamp_last_feedback);
+ if (delta <= 0)
+ delta = 1;
x_recv = scaled_div32(hc->rx_bytes_recv, delta);
if (x_recv == 0) { /* would also trigger divide-by-zero */
DCCP_WARN("X_recv==0\n");
diff --git a/net/dccp/dccp.h b/net/dccp/dccp.h
index 0ea2ee56ac1b..f91e3816806b 100644
--- a/net/dccp/dccp.h
+++ b/net/dccp/dccp.h
@@ -316,7 +316,8 @@ int dccp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
int flags, int *addr_len);
void dccp_shutdown(struct sock *sk, int how);
int inet_dccp_listen(struct socket *sock, int backlog);
-__poll_t dccp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t dccp_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
int dccp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
void dccp_req_err(struct sock *sk, u64 seq);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index a9e478cd3787..b08feb219b44 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -984,7 +984,7 @@ static const struct proto_ops inet_dccp_ops = {
.accept = inet_accept,
.getname = inet_getname,
/* FIXME: work on tcp_poll to rename it to inet_csk_poll */
- .poll_mask = dccp_poll_mask,
+ .poll = dccp_poll,
.ioctl = inet_ioctl,
/* FIXME: work on inet_listen to rename it to sock_common_listen */
.listen = inet_dccp_listen,
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 17fc4e0166ba..6344f1b18a6a 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -1070,7 +1070,7 @@ static const struct proto_ops inet6_dccp_ops = {
.socketpair = sock_no_socketpair,
.accept = inet_accept,
.getname = inet6_getname,
- .poll_mask = dccp_poll_mask,
+ .poll = dccp_poll,
.ioctl = inet6_ioctl,
.listen = inet_dccp_listen,
.shutdown = inet_shutdown,
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index ca21c1c76da0..0d56e36a6db7 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -312,11 +312,20 @@ int dccp_disconnect(struct sock *sk, int flags)
EXPORT_SYMBOL_GPL(dccp_disconnect);
-__poll_t dccp_poll_mask(struct socket *sock, __poll_t events)
+/*
+ * Wait for a DCCP event.
+ *
+ * Note that we don't need to lock the socket, as the upper poll layers
+ * take care of normal races (between the test and the event) and we don't
+ * go look at any of the socket buffers directly.
+ */
+__poll_t dccp_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
__poll_t mask;
struct sock *sk = sock->sk;
+ sock_poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_state == DCCP_LISTEN)
return inet_csk_listen_poll(sk);
@@ -358,7 +367,7 @@ __poll_t dccp_poll_mask(struct socket *sock, __poll_t events)
return mask;
}
-EXPORT_SYMBOL_GPL(dccp_poll_mask);
+EXPORT_SYMBOL_GPL(dccp_poll);
int dccp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 9a686d890bfa..7d6ff983ba2c 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -1207,11 +1207,11 @@ static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int peer)
}
-static __poll_t dn_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t dn_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
struct dn_scp *scp = DN_SK(sk);
- __poll_t mask = datagram_poll_mask(sock, events);
+ __poll_t mask = datagram_poll(file, sock, wait);
if (!skb_queue_empty(&scp->other_receive_queue))
mask |= EPOLLRDBAND;
@@ -2331,7 +2331,7 @@ static const struct proto_ops dn_proto_ops = {
.socketpair = sock_no_socketpair,
.accept = dn_accept,
.getname = dn_getname,
- .poll_mask = dn_poll_mask,
+ .poll = dn_poll,
.ioctl = dn_ioctl,
.listen = dn_listen,
.shutdown = dn_shutdown,
diff --git a/net/dns_resolver/dns_key.c b/net/dns_resolver/dns_key.c
index 40c851693f77..0c9478b91fa5 100644
--- a/net/dns_resolver/dns_key.c
+++ b/net/dns_resolver/dns_key.c
@@ -86,35 +86,39 @@ dns_resolver_preparse(struct key_preparsed_payload *prep)
opt++;
kdebug("options: '%s'", opt);
do {
+ int opt_len, opt_nlen;
const char *eq;
- int opt_len, opt_nlen, opt_vlen, tmp;
+ char optval[128];
next_opt = memchr(opt, '#', end - opt) ?: end;
opt_len = next_opt - opt;
- if (opt_len <= 0 || opt_len > 128) {
+ if (opt_len <= 0 || opt_len > sizeof(optval)) {
pr_warn_ratelimited("Invalid option length (%d) for dns_resolver key\n",
opt_len);
return -EINVAL;
}
- eq = memchr(opt, '=', opt_len) ?: end;
- opt_nlen = eq - opt;
- eq++;
- opt_vlen = next_opt - eq; /* will be -1 if no value */
+ eq = memchr(opt, '=', opt_len);
+ if (eq) {
+ opt_nlen = eq - opt;
+ eq++;
+ memcpy(optval, eq, next_opt - eq);
+ optval[next_opt - eq] = '\0';
+ } else {
+ opt_nlen = opt_len;
+ optval[0] = '\0';
+ }
- tmp = opt_vlen >= 0 ? opt_vlen : 0;
- kdebug("option '%*.*s' val '%*.*s'",
- opt_nlen, opt_nlen, opt, tmp, tmp, eq);
+ kdebug("option '%*.*s' val '%s'",
+ opt_nlen, opt_nlen, opt, optval);
/* see if it's an error number representing a DNS error
* that's to be recorded as the result in this key */
if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 &&
memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) {
kdebug("dns error number option");
- if (opt_vlen <= 0)
- goto bad_option_value;
- ret = kstrtoul(eq, 10, &derrno);
+ ret = kstrtoul(optval, 10, &derrno);
if (ret < 0)
goto bad_option_value;
diff --git a/net/ieee802154/6lowpan/core.c b/net/ieee802154/6lowpan/core.c
index 275449b0d633..3297e7fa9945 100644
--- a/net/ieee802154/6lowpan/core.c
+++ b/net/ieee802154/6lowpan/core.c
@@ -90,12 +90,18 @@ static int lowpan_neigh_construct(struct net_device *dev, struct neighbour *n)
return 0;
}
+static int lowpan_get_iflink(const struct net_device *dev)
+{
+ return lowpan_802154_dev(dev)->wdev->ifindex;
+}
+
static const struct net_device_ops lowpan_netdev_ops = {
.ndo_init = lowpan_dev_init,
.ndo_start_xmit = lowpan_xmit,
.ndo_open = lowpan_open,
.ndo_stop = lowpan_stop,
.ndo_neigh_construct = lowpan_neigh_construct,
+ .ndo_get_iflink = lowpan_get_iflink,
};
static void lowpan_setup(struct net_device *ldev)
diff --git a/net/ieee802154/socket.c b/net/ieee802154/socket.c
index a0768d2759b8..a60658c85a9a 100644
--- a/net/ieee802154/socket.c
+++ b/net/ieee802154/socket.c
@@ -423,7 +423,7 @@ static const struct proto_ops ieee802154_raw_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = ieee802154_sock_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
@@ -969,7 +969,7 @@ static const struct proto_ops ieee802154_dgram_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = ieee802154_sock_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 15e125558c76..b403499fdabe 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -986,7 +986,7 @@ const struct proto_ops inet_stream_ops = {
.socketpair = sock_no_socketpair,
.accept = inet_accept,
.getname = inet_getname,
- .poll_mask = tcp_poll_mask,
+ .poll = tcp_poll,
.ioctl = inet_ioctl,
.listen = inet_listen,
.shutdown = inet_shutdown,
@@ -1021,7 +1021,7 @@ const struct proto_ops inet_dgram_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = inet_getname,
- .poll_mask = udp_poll_mask,
+ .poll = udp_poll,
.ioctl = inet_ioctl,
.listen = sock_no_listen,
.shutdown = inet_shutdown,
@@ -1042,7 +1042,7 @@ EXPORT_SYMBOL(inet_dgram_ops);
/*
* For SOCK_RAW sockets; should be the same as inet_dgram_ops but without
- * udp_poll_mask
+ * udp_poll
*/
static const struct proto_ops inet_sockraw_ops = {
.family = PF_INET,
@@ -1053,7 +1053,7 @@ static const struct proto_ops inet_sockraw_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = inet_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = inet_ioctl,
.listen = sock_no_listen,
.shutdown = inet_shutdown,
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index b21833651394..e46cdd310e5f 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -300,6 +300,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb)
if (!ipv4_is_zeronet(ip_hdr(skb)->saddr)) {
struct flowi4 fl4 = {
.flowi4_iif = LOOPBACK_IFINDEX,
+ .flowi4_oif = l3mdev_master_ifindex_rcu(dev),
.daddr = ip_hdr(skb)->saddr,
.flowi4_tos = RT_TOS(ip_hdr(skb)->tos),
.flowi4_scope = scope,
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 1540db65241a..c9ec1603666b 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -448,9 +448,7 @@ next_proto:
out_unlock:
rcu_read_unlock();
out:
- NAPI_GRO_CB(skb)->flush |= flush;
- skb_gro_remcsum_cleanup(skb, &grc);
- skb->remcsum_offload = 0;
+ skb_gro_flush_final_remcsum(skb, pp, flush, &grc);
return pp;
}
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index 1859c473b21a..6a7d980105f6 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -223,7 +223,7 @@ static struct sk_buff **gre_gro_receive(struct sk_buff **head,
out_unlock:
rcu_read_unlock();
out:
- NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_flush_final(skb, pp, flush);
return pp;
}
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 85b617b655bc..28fef7d15959 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1200,13 +1200,13 @@ static void igmpv3_del_delrec(struct in_device *in_dev, struct ip_mc_list *im)
spin_lock_bh(&im->lock);
if (pmc) {
im->interface = pmc->interface;
- im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
- im->sfmode = pmc->sfmode;
- if (pmc->sfmode == MCAST_INCLUDE) {
+ if (im->sfmode == MCAST_INCLUDE) {
im->tomb = pmc->tomb;
im->sources = pmc->sources;
for (psf = im->sources; psf; psf = psf->sf_next)
- psf->sf_crcount = im->crcount;
+ psf->sf_crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+ } else {
+ im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
}
in_dev_put(pmc->interface);
kfree(pmc);
@@ -1288,7 +1288,7 @@ static void igmp_group_dropped(struct ip_mc_list *im)
#endif
}
-static void igmp_group_added(struct ip_mc_list *im)
+static void igmp_group_added(struct ip_mc_list *im, unsigned int mode)
{
struct in_device *in_dev = im->interface;
#ifdef CONFIG_IP_MULTICAST
@@ -1316,7 +1316,13 @@ static void igmp_group_added(struct ip_mc_list *im)
}
/* else, v3 */
- im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+ /* Based on RFC3376 5.1, for newly added INCLUDE SSM, we should
+ * not send filter-mode change record as the mode should be from
+ * IN() to IN(A).
+ */
+ if (mode == MCAST_EXCLUDE)
+ im->crcount = in_dev->mr_qrv ?: net->ipv4.sysctl_igmp_qrv;
+
igmp_ifc_event(in_dev);
#endif
}
@@ -1381,8 +1387,7 @@ static void ip_mc_hash_remove(struct in_device *in_dev,
/*
* A socket has joined a multicast group on device dev.
*/
-
-void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
+void __ip_mc_inc_group(struct in_device *in_dev, __be32 addr, unsigned int mode)
{
struct ip_mc_list *im;
#ifdef CONFIG_IP_MULTICAST
@@ -1394,7 +1399,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
for_each_pmc_rtnl(in_dev, im) {
if (im->multiaddr == addr) {
im->users++;
- ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0);
+ ip_mc_add_src(in_dev, &addr, mode, 0, NULL, 0);
goto out;
}
}
@@ -1408,8 +1413,8 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
in_dev_hold(in_dev);
im->multiaddr = addr;
/* initial mode is (EX, empty) */
- im->sfmode = MCAST_EXCLUDE;
- im->sfcount[MCAST_EXCLUDE] = 1;
+ im->sfmode = mode;
+ im->sfcount[mode] = 1;
refcount_set(&im->refcnt, 1);
spin_lock_init(&im->lock);
#ifdef CONFIG_IP_MULTICAST
@@ -1426,12 +1431,17 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
#ifdef CONFIG_IP_MULTICAST
igmpv3_del_delrec(in_dev, im);
#endif
- igmp_group_added(im);
+ igmp_group_added(im, mode);
if (!in_dev->dead)
ip_rt_multicast_event(in_dev);
out:
return;
}
+
+void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
+{
+ __ip_mc_inc_group(in_dev, addr, MCAST_EXCLUDE);
+}
EXPORT_SYMBOL(ip_mc_inc_group);
static int ip_mc_check_iphdr(struct sk_buff *skb)
@@ -1688,7 +1698,7 @@ void ip_mc_remap(struct in_device *in_dev)
#ifdef CONFIG_IP_MULTICAST
igmpv3_del_delrec(in_dev, pmc);
#endif
- igmp_group_added(pmc);
+ igmp_group_added(pmc, pmc->sfmode);
}
}
@@ -1751,7 +1761,7 @@ void ip_mc_up(struct in_device *in_dev)
#ifdef CONFIG_IP_MULTICAST
igmpv3_del_delrec(in_dev, pmc);
#endif
- igmp_group_added(pmc);
+ igmp_group_added(pmc, pmc->sfmode);
}
}
@@ -2130,8 +2140,8 @@ static void ip_mc_clear_src(struct ip_mc_list *pmc)
/* Join a multicast group
*/
-
-int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
+static int __ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr,
+ unsigned int mode)
{
__be32 addr = imr->imr_multiaddr.s_addr;
struct ip_mc_socklist *iml, *i;
@@ -2172,15 +2182,30 @@ int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
memcpy(&iml->multi, imr, sizeof(*imr));
iml->next_rcu = inet->mc_list;
iml->sflist = NULL;
- iml->sfmode = MCAST_EXCLUDE;
+ iml->sfmode = mode;
rcu_assign_pointer(inet->mc_list, iml);
- ip_mc_inc_group(in_dev, addr);
+ __ip_mc_inc_group(in_dev, addr, mode);
err = 0;
done:
return err;
}
+
+/* Join ASM (Any-Source Multicast) group
+ */
+int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr)
+{
+ return __ip_mc_join_group(sk, imr, MCAST_EXCLUDE);
+}
EXPORT_SYMBOL(ip_mc_join_group);
+/* Join SSM (Source-Specific Multicast) group
+ */
+int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
+ unsigned int mode)
+{
+ return __ip_mc_join_group(sk, imr, mode);
+}
+
static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
struct in_device *in_dev)
{
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index c9e35b81d093..1e4cf3ab560f 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -90,7 +90,7 @@ static void inet_frags_free_cb(void *ptr, void *arg)
void inet_frags_exit_net(struct netns_frags *nf)
{
- nf->low_thresh = 0; /* prevent creation of new frags */
+ nf->high_thresh = 0; /* prevent creation of new frags */
rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
}
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 31ff46daae97..3647167c8fa3 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -243,9 +243,9 @@ static inline int compute_score(struct sock *sk, struct net *net,
bool dev_match = (sk->sk_bound_dev_if == dif ||
sk->sk_bound_dev_if == sdif);
- if (exact_dif && !dev_match)
+ if (!dev_match)
return -1;
- if (sk->sk_bound_dev_if && dev_match)
+ if (sk->sk_bound_dev_if)
score += 4;
}
if (sk->sk_incoming_cpu == raw_smp_processor_id())
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index af5a830ff6ad..0e3edd25f881 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -523,6 +523,8 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->dev = from->dev;
to->mark = from->mark;
+ skb_copy_hash(to, from);
+
/* Copy the flags to each fragment. */
IPCB(to)->flags = IPCB(from)->flags;
@@ -1145,7 +1147,8 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
cork->fragsize = ip_sk_use_pmtu(sk) ?
dst_mtu(&rt->dst) : rt->dst.dev->mtu;
- cork->gso_size = sk->sk_type == SOCK_DGRAM ? ipc->gso_size : 0;
+ cork->gso_size = sk->sk_type == SOCK_DGRAM &&
+ sk->sk_protocol == IPPROTO_UDP ? ipc->gso_size : 0;
cork->dst = &rt->dst;
cork->length = 0;
cork->ttl = ipc->ttl;
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index fc32fdbeefa6..c0fe5ad996f2 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -150,15 +150,18 @@ static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
{
struct sockaddr_in sin;
const struct iphdr *iph = ip_hdr(skb);
- __be16 *ports = (__be16 *)skb_transport_header(skb);
+ __be16 *ports;
+ int end;
- if (skb_transport_offset(skb) + 4 > (int)skb->len)
+ end = skb_transport_offset(skb) + 4;
+ if (end > 0 && !pskb_may_pull(skb, end))
return;
/* All current transport protocols have the port numbers in the
* first four bytes of the transport header and this function is
* written with this assumption in mind.
*/
+ ports = (__be16 *)skb_transport_header(skb);
sin.sin_family = AF_INET;
sin.sin_addr.s_addr = iph->daddr;
@@ -984,7 +987,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
mreq.imr_multiaddr.s_addr = mreqs.imr_multiaddr;
mreq.imr_address.s_addr = mreqs.imr_interface;
mreq.imr_ifindex = 0;
- err = ip_mc_join_group(sk, &mreq);
+ err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
if (err && err != -EADDRINUSE)
break;
omode = MCAST_INCLUDE;
@@ -1061,7 +1064,7 @@ static int do_ip_setsockopt(struct sock *sk, int level,
mreq.imr_multiaddr = psin->sin_addr;
mreq.imr_address.s_addr = 0;
mreq.imr_ifindex = greqs.gsr_interface;
- err = ip_mc_join_group(sk, &mreq);
+ err = ip_mc_join_group_ssm(sk, &mreq, MCAST_INCLUDE);
if (err && err != -EADDRINUSE)
break;
greqs.gsr_interface = mreq.imr_ifindex;
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index ca0dad90803a..e77872c93c20 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1898,6 +1898,7 @@ static struct xt_match ipt_builtin_mt[] __read_mostly = {
.checkentry = icmp_checkentry,
.proto = IPPROTO_ICMP,
.family = NFPROTO_IPV4,
+ .me = THIS_MODULE,
},
};
diff --git a/net/ipv4/netfilter/nf_tproxy_ipv4.c b/net/ipv4/netfilter/nf_tproxy_ipv4.c
index 805e83ec3ad9..164714104965 100644
--- a/net/ipv4/netfilter/nf_tproxy_ipv4.c
+++ b/net/ipv4/netfilter/nf_tproxy_ipv4.c
@@ -37,7 +37,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
* to a listener socket if there's one */
struct sock *sk2;
- sk2 = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol,
+ sk2 = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
iph->saddr, laddr ? laddr : iph->daddr,
hp->source, lport ? lport : hp->dest,
skb->dev, NF_TPROXY_LOOKUP_LISTENER);
@@ -71,7 +71,7 @@ __be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr)
EXPORT_SYMBOL_GPL(nf_tproxy_laddr4);
struct sock *
-nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
+nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
const u8 protocol,
const __be32 saddr, const __be32 daddr,
const __be16 sport, const __be16 dport,
@@ -79,16 +79,21 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
const enum nf_tproxy_lookup_t lookup_type)
{
struct sock *sk;
- struct tcphdr *tcph;
switch (protocol) {
- case IPPROTO_TCP:
+ case IPPROTO_TCP: {
+ struct tcphdr _hdr, *hp;
+
+ hp = skb_header_pointer(skb, ip_hdrlen(skb),
+ sizeof(struct tcphdr), &_hdr);
+ if (hp == NULL)
+ return NULL;
+
switch (lookup_type) {
case NF_TPROXY_LOOKUP_LISTENER:
- tcph = hp;
sk = inet_lookup_listener(net, &tcp_hashinfo, skb,
ip_hdrlen(skb) +
- __tcp_hdrlen(tcph),
+ __tcp_hdrlen(hp),
saddr, sport,
daddr, dport,
in->ifindex, 0);
@@ -110,6 +115,7 @@ nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
BUG();
}
break;
+ }
case IPPROTO_UDP:
sk = udp4_lib_lookup(net, saddr, sport, daddr, dport,
in->ifindex);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index d06247ba08b2..5fa335fd3852 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -189,8 +189,9 @@ static int ipv4_ping_group_range(struct ctl_table *table, int write,
if (write && ret == 0) {
low = make_kgid(user_ns, urange[0]);
high = make_kgid(user_ns, urange[1]);
- if (!gid_valid(low) || !gid_valid(high) ||
- (urange[1] < urange[0]) || gid_lt(high, low)) {
+ if (!gid_valid(low) || !gid_valid(high))
+ return -EINVAL;
+ if (urange[1] < urange[0] || gid_lt(high, low)) {
low = make_kgid(&init_user_ns, 1);
high = make_kgid(&init_user_ns, 0);
}
@@ -265,8 +266,9 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
ipv4.sysctl_tcp_fastopen);
struct ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
struct tcp_fastopen_context *ctxt;
- int ret;
u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
+ __le32 key[4];
+ int ret, i;
tbl.data = kmalloc(tbl.maxlen, GFP_KERNEL);
if (!tbl.data)
@@ -275,11 +277,14 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
rcu_read_lock();
ctxt = rcu_dereference(net->ipv4.tcp_fastopen_ctx);
if (ctxt)
- memcpy(user_key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
+ memcpy(key, ctxt->key, TCP_FASTOPEN_KEY_LENGTH);
else
- memset(user_key, 0, sizeof(user_key));
+ memset(key, 0, sizeof(key));
rcu_read_unlock();
+ for (i = 0; i < ARRAY_SIZE(key); i++)
+ user_key[i] = le32_to_cpu(key[i]);
+
snprintf(tbl.data, tbl.maxlen, "%08x-%08x-%08x-%08x",
user_key[0], user_key[1], user_key[2], user_key[3]);
ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
@@ -290,13 +295,17 @@ static int proc_tcp_fastopen_key(struct ctl_table *table, int write,
ret = -EINVAL;
goto bad_key;
}
- tcp_fastopen_reset_cipher(net, NULL, user_key,
+
+ for (i = 0; i < ARRAY_SIZE(user_key); i++)
+ key[i] = cpu_to_le32(user_key[i]);
+
+ tcp_fastopen_reset_cipher(net, NULL, key,
TCP_FASTOPEN_KEY_LENGTH);
}
bad_key:
pr_debug("proc FO key set 0x%x-%x-%x-%x <- 0x%s: %u\n",
- user_key[0], user_key[1], user_key[2], user_key[3],
+ user_key[0], user_key[1], user_key[2], user_key[3],
(char *)tbl.data, ret);
kfree(tbl.data);
return ret;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 141acd92e58a..4491faf83f4f 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -494,21 +494,32 @@ static inline bool tcp_stream_is_readable(const struct tcp_sock *tp,
}
/*
- * Socket is not locked. We are protected from async events by poll logic and
- * correct handling of state changes made by other threads is impossible in
- * any case.
+ * Wait for a TCP event.
+ *
+ * Note that we don't need to lock the socket, as the upper poll layers
+ * take care of normal races (between the test and the event) and we don't
+ * go look at any of the socket buffers directly.
*/
-__poll_t tcp_poll_mask(struct socket *sock, __poll_t events)
+__poll_t tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
+ __poll_t mask;
struct sock *sk = sock->sk;
const struct tcp_sock *tp = tcp_sk(sk);
- __poll_t mask = 0;
int state;
+ sock_poll_wait(file, sk_sleep(sk), wait);
+
state = inet_sk_state_load(sk);
if (state == TCP_LISTEN)
return inet_csk_listen_poll(sk);
+ /* Socket is not locked. We are protected from async events
+ * by poll logic and correct handling of state changes
+ * made by other threads is impossible in any case.
+ */
+
+ mask = 0;
+
/*
* EPOLLHUP is certainly not done right. But poll() doesn't
* have a notion of HUP in just one direction, and for a
@@ -589,7 +600,7 @@ __poll_t tcp_poll_mask(struct socket *sock, __poll_t events)
return mask;
}
-EXPORT_SYMBOL(tcp_poll_mask);
+EXPORT_SYMBOL(tcp_poll);
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
@@ -1987,7 +1998,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
* shouldn't happen.
*/
if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
- "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
+ "TCP recvmsg seq # bug: copied %X, seq %X, rcvnxt %X, fl %X\n",
*seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
flags))
break;
@@ -2002,7 +2013,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
goto found_fin_ok;
WARN(!(flags & MSG_PEEK),
- "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
+ "TCP recvmsg seq # bug 2: copied %X, seq %X, rcvnxt %X, fl %X\n",
*seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
}
@@ -2551,6 +2562,8 @@ int tcp_disconnect(struct sock *sk, int flags)
tcp_clear_xmit_timers(sk);
__skb_queue_purge(&sk->sk_receive_queue);
+ tp->copied_seq = tp->rcv_nxt;
+ tp->urg_data = 0;
tcp_write_queue_purge(sk);
tcp_fastopen_active_disable_ofo_check(sk);
skb_rbtree_purge(&tp->out_of_order_queue);
@@ -2810,14 +2823,17 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
case TCP_REPAIR:
if (!tcp_can_repair_sock(sk))
err = -EPERM;
- else if (val == 1) {
+ else if (val == TCP_REPAIR_ON) {
tp->repair = 1;
sk->sk_reuse = SK_FORCE_REUSE;
tp->repair_queue = TCP_NO_QUEUE;
- } else if (val == 0) {
+ } else if (val == TCP_REPAIR_OFF) {
tp->repair = 0;
sk->sk_reuse = SK_NO_REUSE;
tcp_send_window_probe(sk);
+ } else if (val == TCP_REPAIR_OFF_NO_WP) {
+ tp->repair = 0;
+ sk->sk_reuse = SK_NO_REUSE;
} else
err = -EINVAL;
@@ -3709,8 +3725,7 @@ int tcp_abort(struct sock *sk, int err)
struct request_sock *req = inet_reqsk(sk);
local_bh_disable();
- inet_csk_reqsk_queue_drop_and_put(req->rsk_listener,
- req);
+ inet_csk_reqsk_queue_drop(req->rsk_listener, req);
local_bh_enable();
return 0;
}
diff --git a/net/ipv4/tcp_dctcp.c b/net/ipv4/tcp_dctcp.c
index 5f5e5936760e..8b637f9f23a2 100644
--- a/net/ipv4/tcp_dctcp.c
+++ b/net/ipv4/tcp_dctcp.c
@@ -55,7 +55,6 @@ struct dctcp {
u32 dctcp_alpha;
u32 next_seq;
u32 ce_state;
- u32 delayed_ack_reserved;
u32 loss_cwnd;
};
@@ -96,7 +95,6 @@ static void dctcp_init(struct sock *sk)
ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
- ca->delayed_ack_reserved = 0;
ca->loss_cwnd = 0;
ca->ce_state = 0;
@@ -131,23 +129,14 @@ static void dctcp_ce_state_0_to_1(struct sock *sk)
struct dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
- /* State has changed from CE=0 to CE=1 and delayed
- * ACK has not sent yet.
- */
- if (!ca->ce_state && ca->delayed_ack_reserved) {
- u32 tmp_rcv_nxt;
-
- /* Save current rcv_nxt. */
- tmp_rcv_nxt = tp->rcv_nxt;
-
- /* Generate previous ack with CE=0. */
- tp->ecn_flags &= ~TCP_ECN_DEMAND_CWR;
- tp->rcv_nxt = ca->prior_rcv_nxt;
-
- tcp_send_ack(sk);
-
- /* Recover current rcv_nxt. */
- tp->rcv_nxt = tmp_rcv_nxt;
+ if (!ca->ce_state) {
+ /* State has changed from CE=0 to CE=1, force an immediate
+ * ACK to reflect the new CE state. If an ACK was delayed,
+ * send that first to reflect the prior CE state.
+ */
+ if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
+ __tcp_send_ack(sk, ca->prior_rcv_nxt);
+ tcp_enter_quickack_mode(sk, 1);
}
ca->prior_rcv_nxt = tp->rcv_nxt;
@@ -161,23 +150,14 @@ static void dctcp_ce_state_1_to_0(struct sock *sk)
struct dctcp *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk);
- /* State has changed from CE=1 to CE=0 and delayed
- * ACK has not sent yet.
- */
- if (ca->ce_state && ca->delayed_ack_reserved) {
- u32 tmp_rcv_nxt;
-
- /* Save current rcv_nxt. */
- tmp_rcv_nxt = tp->rcv_nxt;
-
- /* Generate previous ack with CE=1. */
- tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
- tp->rcv_nxt = ca->prior_rcv_nxt;
-
- tcp_send_ack(sk);
-
- /* Recover current rcv_nxt. */
- tp->rcv_nxt = tmp_rcv_nxt;
+ if (ca->ce_state) {
+ /* State has changed from CE=1 to CE=0, force an immediate
+ * ACK to reflect the new CE state. If an ACK was delayed,
+ * send that first to reflect the prior CE state.
+ */
+ if (inet_csk(sk)->icsk_ack.pending & ICSK_ACK_TIMER)
+ __tcp_send_ack(sk, ca->prior_rcv_nxt);
+ tcp_enter_quickack_mode(sk, 1);
}
ca->prior_rcv_nxt = tp->rcv_nxt;
@@ -248,25 +228,6 @@ static void dctcp_state(struct sock *sk, u8 new_state)
}
}
-static void dctcp_update_ack_reserved(struct sock *sk, enum tcp_ca_event ev)
-{
- struct dctcp *ca = inet_csk_ca(sk);
-
- switch (ev) {
- case CA_EVENT_DELAYED_ACK:
- if (!ca->delayed_ack_reserved)
- ca->delayed_ack_reserved = 1;
- break;
- case CA_EVENT_NON_DELAYED_ACK:
- if (ca->delayed_ack_reserved)
- ca->delayed_ack_reserved = 0;
- break;
- default:
- /* Don't care for the rest. */
- break;
- }
-}
-
static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
{
switch (ev) {
@@ -276,10 +237,6 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
case CA_EVENT_ECN_NO_CE:
dctcp_ce_state_1_to_0(sk);
break;
- case CA_EVENT_DELAYED_ACK:
- case CA_EVENT_NON_DELAYED_ACK:
- dctcp_update_ack_reserved(sk, ev);
- break;
default:
/* Don't care for the rest. */
break;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 355d3dffd021..3bcd30a2ba06 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -215,7 +215,7 @@ static void tcp_incr_quickack(struct sock *sk, unsigned int max_quickacks)
icsk->icsk_ack.quick = quickacks;
}
-static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
+void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
{
struct inet_connection_sock *icsk = inet_csk(sk);
@@ -223,6 +223,7 @@ static void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks)
icsk->icsk_ack.pingpong = 0;
icsk->icsk_ack.ato = TCP_ATO_MIN;
}
+EXPORT_SYMBOL(tcp_enter_quickack_mode);
/* Send ACKs quickly, if "quick" count is not exhausted
* and the session is not interactive.
@@ -265,7 +266,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
* it is probably a retransmit.
*/
if (tp->ecn_flags & TCP_ECN_SEEN)
- tcp_enter_quickack_mode(sk, 1);
+ tcp_enter_quickack_mode(sk, 2);
break;
case INET_ECN_CE:
if (tcp_ca_needs_ecn(sk))
@@ -273,7 +274,7 @@ static void __tcp_ecn_check_ce(struct sock *sk, const struct sk_buff *skb)
if (!(tp->ecn_flags & TCP_ECN_DEMAND_CWR)) {
/* Better not delay acks, sender can have a very low cwnd */
- tcp_enter_quickack_mode(sk, 1);
+ tcp_enter_quickack_mode(sk, 2);
tp->ecn_flags |= TCP_ECN_DEMAND_CWR;
}
tp->ecn_flags |= TCP_ECN_SEEN;
@@ -3181,6 +3182,15 @@ static int tcp_clean_rtx_queue(struct sock *sk, u32 prior_fack,
if (tcp_is_reno(tp)) {
tcp_remove_reno_sacks(sk, pkts_acked);
+
+ /* If any of the cumulatively ACKed segments was
+ * retransmitted, non-SACK case cannot confirm that
+ * progress was due to original transmission due to
+ * lack of TCPCB_SACKED_ACKED bits even if some of
+ * the packets may have been never retransmitted.
+ */
+ if (flag & FLAG_RETRANS_DATA_ACKED)
+ flag &= ~FLAG_ORIG_SACK_ACKED;
} else {
int delta;
@@ -4348,6 +4358,23 @@ static bool tcp_try_coalesce(struct sock *sk,
return true;
}
+static bool tcp_ooo_try_coalesce(struct sock *sk,
+ struct sk_buff *to,
+ struct sk_buff *from,
+ bool *fragstolen)
+{
+ bool res = tcp_try_coalesce(sk, to, from, fragstolen);
+
+ /* In case tcp_drop() is called later, update to->gso_segs */
+ if (res) {
+ u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) +
+ max_t(u16, 1, skb_shinfo(from)->gso_segs);
+
+ skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF);
+ }
+ return res;
+}
+
static void tcp_drop(struct sock *sk, struct sk_buff *skb)
{
sk_drops_add(sk, skb);
@@ -4471,8 +4498,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
/* In the typical case, we are adding an skb to the end of the list.
* Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup.
*/
- if (tcp_try_coalesce(sk, tp->ooo_last_skb,
- skb, &fragstolen)) {
+ if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb,
+ skb, &fragstolen)) {
coalesce_done:
tcp_grow_window(sk, skb);
kfree_skb_partial(skb, fragstolen);
@@ -4500,7 +4527,7 @@ coalesce_done:
/* All the bits are present. Drop. */
NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPOFOMERGE);
- __kfree_skb(skb);
+ tcp_drop(sk, skb);
skb = NULL;
tcp_dsack_set(sk, seq, end_seq);
goto add_sack;
@@ -4519,11 +4546,11 @@ coalesce_done:
TCP_SKB_CB(skb1)->end_seq);
NET_INC_STATS(sock_net(sk),
LINUX_MIB_TCPOFOMERGE);
- __kfree_skb(skb1);
+ tcp_drop(sk, skb1);
goto merge_right;
}
- } else if (tcp_try_coalesce(sk, skb1,
- skb, &fragstolen)) {
+ } else if (tcp_ooo_try_coalesce(sk, skb1,
+ skb, &fragstolen)) {
goto coalesce_done;
}
p = &parent->rb_right;
@@ -4892,6 +4919,7 @@ end:
static void tcp_collapse_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
+ u32 range_truesize, sum_tiny = 0;
struct sk_buff *skb, *head;
u32 start, end;
@@ -4903,6 +4931,7 @@ new_range:
}
start = TCP_SKB_CB(skb)->seq;
end = TCP_SKB_CB(skb)->end_seq;
+ range_truesize = skb->truesize;
for (head = skb;;) {
skb = skb_rb_next(skb);
@@ -4913,11 +4942,20 @@ new_range:
if (!skb ||
after(TCP_SKB_CB(skb)->seq, end) ||
before(TCP_SKB_CB(skb)->end_seq, start)) {
- tcp_collapse(sk, NULL, &tp->out_of_order_queue,
- head, skb, start, end);
+ /* Do not attempt collapsing tiny skbs */
+ if (range_truesize != head->truesize ||
+ end - start >= SKB_WITH_OVERHEAD(SK_MEM_QUANTUM)) {
+ tcp_collapse(sk, NULL, &tp->out_of_order_queue,
+ head, skb, start, end);
+ } else {
+ sum_tiny += range_truesize;
+ if (sum_tiny > sk->sk_rcvbuf >> 3)
+ return;
+ }
goto new_range;
}
+ range_truesize += skb->truesize;
if (unlikely(before(TCP_SKB_CB(skb)->seq, start)))
start = TCP_SKB_CB(skb)->seq;
if (after(TCP_SKB_CB(skb)->end_seq, end))
@@ -4932,6 +4970,7 @@ new_range:
* 2) not add too big latencies if thousands of packets sit there.
* (But if application shrinks SO_RCVBUF, we could still end up
* freeing whole queue here)
+ * 3) Drop at least 12.5 % of sk_rcvbuf to avoid malicious attacks.
*
* Return true if queue has shrunk.
*/
@@ -4939,20 +4978,26 @@ static bool tcp_prune_ofo_queue(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
struct rb_node *node, *prev;
+ int goal;
if (RB_EMPTY_ROOT(&tp->out_of_order_queue))
return false;
NET_INC_STATS(sock_net(sk), LINUX_MIB_OFOPRUNED);
+ goal = sk->sk_rcvbuf >> 3;
node = &tp->ooo_last_skb->rbnode;
do {
prev = rb_prev(node);
rb_erase(node, &tp->out_of_order_queue);
+ goal -= rb_to_skb(node)->truesize;
tcp_drop(sk, rb_to_skb(node));
- sk_mem_reclaim(sk);
- if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
- !tcp_under_memory_pressure(sk))
- break;
+ if (!prev || goal <= 0) {
+ sk_mem_reclaim(sk);
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
+ !tcp_under_memory_pressure(sk))
+ break;
+ goal = sk->sk_rcvbuf >> 3;
+ }
node = prev;
} while (node);
tp->ooo_last_skb = rb_to_skb(prev);
@@ -4987,6 +5032,9 @@ static int tcp_prune_queue(struct sock *sk)
else if (tcp_under_memory_pressure(sk))
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf)
+ return 0;
+
tcp_collapse_ofo_queue(sk);
if (!skb_queue_empty(&sk->sk_receive_queue))
tcp_collapse(sk, &sk->sk_receive_queue, NULL,
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index bea17f1e8302..3b2711e33e4c 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -156,11 +156,24 @@ int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
*/
if (tcptw->tw_ts_recent_stamp &&
(!twp || (reuse && get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
- tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
- if (tp->write_seq == 0)
- tp->write_seq = 1;
- tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
- tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
+ /* In case of repair and re-using TIME-WAIT sockets we still
+ * want to be sure that it is safe as above but honor the
+ * sequence numbers and time stamps set as part of the repair
+ * process.
+ *
+ * Without this check re-using a TIME-WAIT socket with TCP
+ * repair would accumulate a -1 on the repair assigned
+ * sequence number. The first time it is reused the sequence
+ * is -1, the second time -2, etc. This fixes that issue
+ * without appearing to create any others.
+ */
+ if (likely(!tp->repair)) {
+ tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
+ if (tp->write_seq == 0)
+ tp->write_seq = 1;
+ tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
+ tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
+ }
sock_hold(sktw);
return 1;
}
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8e08b409c71e..c4172c1fb198 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -160,7 +160,8 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
}
/* Account for an ACK we sent. */
-static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
+static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts,
+ u32 rcv_nxt)
{
struct tcp_sock *tp = tcp_sk(sk);
@@ -171,6 +172,9 @@ static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
if (hrtimer_try_to_cancel(&tp->compressed_ack_timer) == 1)
__sock_put(sk);
}
+
+ if (unlikely(rcv_nxt != tp->rcv_nxt))
+ return; /* Special ACK sent by DCTCP to reflect ECN */
tcp_dec_quickack_mode(sk, pkts);
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
}
@@ -1023,8 +1027,8 @@ static void tcp_update_skb_after_send(struct tcp_sock *tp, struct sk_buff *skb)
* We are working here with either a clone of the original
* SKB, or a fresh unique copy made by the retransmit engine.
*/
-static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
- gfp_t gfp_mask)
+static int __tcp_transmit_skb(struct sock *sk, struct sk_buff *skb,
+ int clone_it, gfp_t gfp_mask, u32 rcv_nxt)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_sock *inet;
@@ -1100,7 +1104,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
th->source = inet->inet_sport;
th->dest = inet->inet_dport;
th->seq = htonl(tcb->seq);
- th->ack_seq = htonl(tp->rcv_nxt);
+ th->ack_seq = htonl(rcv_nxt);
*(((__be16 *)th) + 6) = htons(((tcp_header_size >> 2) << 12) |
tcb->tcp_flags);
@@ -1141,7 +1145,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
icsk->icsk_af_ops->send_check(sk, skb);
if (likely(tcb->tcp_flags & TCPHDR_ACK))
- tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
+ tcp_event_ack_sent(sk, tcp_skb_pcount(skb), rcv_nxt);
if (skb->len != tcp_header_size) {
tcp_event_data_sent(tp, sk);
@@ -1178,6 +1182,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
return err;
}
+static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
+ gfp_t gfp_mask)
+{
+ return __tcp_transmit_skb(sk, skb, clone_it, gfp_mask,
+ tcp_sk(sk)->rcv_nxt);
+}
+
/* This routine just queues the buffer for sending.
*
* NOTE: probe0 timer is not checked, do not forget tcp_push_pending_frames,
@@ -3523,8 +3534,6 @@ void tcp_send_delayed_ack(struct sock *sk)
int ato = icsk->icsk_ack.ato;
unsigned long timeout;
- tcp_ca_event(sk, CA_EVENT_DELAYED_ACK);
-
if (ato > TCP_DELACK_MIN) {
const struct tcp_sock *tp = tcp_sk(sk);
int max_ato = HZ / 2;
@@ -3573,7 +3582,7 @@ void tcp_send_delayed_ack(struct sock *sk)
}
/* This routine sends an ack and also updates the window. */
-void tcp_send_ack(struct sock *sk)
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt)
{
struct sk_buff *buff;
@@ -3581,8 +3590,6 @@ void tcp_send_ack(struct sock *sk)
if (sk->sk_state == TCP_CLOSE)
return;
- tcp_ca_event(sk, CA_EVENT_NON_DELAYED_ACK);
-
/* We are not putting this on the write queue, so
* tcp_transmit_skb() will set the ownership to this
* sock.
@@ -3608,9 +3615,14 @@ void tcp_send_ack(struct sock *sk)
skb_set_tcp_pure_ack(buff);
/* Send it off, this clears delayed acks for us. */
- tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0);
+ __tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0, rcv_nxt);
+}
+EXPORT_SYMBOL_GPL(__tcp_send_ack);
+
+void tcp_send_ack(struct sock *sk)
+{
+ __tcp_send_ack(sk, tcp_sk(sk)->rcv_nxt);
}
-EXPORT_SYMBOL_GPL(tcp_send_ack);
/* This routine sends a packet with an out of date sequence
* number. It assumes the other end will try to ack it.
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 9bb27df4dac5..24e116ddae79 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -2591,7 +2591,7 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname,
* udp_poll - wait for a UDP event.
* @file - file struct
* @sock - socket
- * @events - events to wait for
+ * @wait - poll table
*
* This is same as datagram poll, except for the special case of
* blocking sockets. If application is using a blocking fd
@@ -2600,23 +2600,23 @@ int compat_udp_getsockopt(struct sock *sk, int level, int optname,
* but then block when reading it. Add special case code
* to work around these arguably broken applications.
*/
-__poll_t udp_poll_mask(struct socket *sock, __poll_t events)
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
- __poll_t mask = datagram_poll_mask(sock, events);
+ __poll_t mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
if (!skb_queue_empty(&udp_sk(sk)->reader_queue))
mask |= EPOLLIN | EPOLLRDNORM;
/* Check for false positives due to checksum errors */
- if ((mask & EPOLLRDNORM) && !(sock->file->f_flags & O_NONBLOCK) &&
+ if ((mask & EPOLLRDNORM) && !(file->f_flags & O_NONBLOCK) &&
!(sk->sk_shutdown & RCV_SHUTDOWN) && first_packet_length(sk) == -1)
mask &= ~(EPOLLIN | EPOLLRDNORM);
return mask;
}
-EXPORT_SYMBOL(udp_poll_mask);
+EXPORT_SYMBOL(udp_poll);
int udp_abort(struct sock *sk, int err)
{
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 92dc9e5a7ff3..69c54540d5b4 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -394,7 +394,7 @@ unflush:
out_unlock:
rcu_read_unlock();
out:
- NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_flush_final(skb, pp, flush);
return pp;
}
EXPORT_SYMBOL(udp_gro_receive);
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index 0eff75525da1..b3885ca22d6f 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -108,6 +108,7 @@ config IPV6_MIP6
config IPV6_ILA
tristate "IPv6: Identifier Locator Addressing (ILA)"
depends on NETFILTER
+ select DST_CACHE
select LWTUNNEL
---help---
Support for IPv6 Identifier Locator Addressing (ILA).
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index c134286d6a41..f66a1cae3366 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -2374,7 +2374,8 @@ static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
continue;
if ((rt->fib6_flags & noflags) != 0)
continue;
- fib6_info_hold(rt);
+ if (!fib6_info_hold_safe(rt))
+ continue;
break;
}
out:
@@ -4528,6 +4529,7 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
unsigned long expires, u32 flags)
{
struct fib6_info *f6i;
+ u32 prio;
f6i = addrconf_get_prefix_route(&ifp->addr,
ifp->prefix_len,
@@ -4536,13 +4538,15 @@ static int modify_prefix_route(struct inet6_ifaddr *ifp,
if (!f6i)
return -ENOENT;
- if (f6i->fib6_metric != ifp->rt_priority) {
+ prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF;
+ if (f6i->fib6_metric != prio) {
+ /* delete old one */
+ ip6_del_rt(dev_net(ifp->idev->dev), f6i);
+
/* add new one */
addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
ifp->rt_priority, ifp->idev->dev,
expires, flags, GFP_KERNEL);
- /* delete old one */
- ip6_del_rt(dev_net(ifp->idev->dev), f6i);
} else {
if (!expires)
fib6_clean_expires(f6i);
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index 74f2a261e8df..9ed0eae91758 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -570,7 +570,7 @@ const struct proto_ops inet6_stream_ops = {
.socketpair = sock_no_socketpair, /* a do nothing */
.accept = inet_accept, /* ok */
.getname = inet6_getname,
- .poll_mask = tcp_poll_mask, /* ok */
+ .poll = tcp_poll, /* ok */
.ioctl = inet6_ioctl, /* must change */
.listen = inet_listen, /* ok */
.shutdown = inet_shutdown, /* ok */
@@ -603,7 +603,7 @@ const struct proto_ops inet6_dgram_ops = {
.socketpair = sock_no_socketpair, /* a do nothing */
.accept = sock_no_accept, /* a do nothing */
.getname = inet6_getname,
- .poll_mask = udp_poll_mask, /* ok */
+ .poll = udp_poll, /* ok */
.ioctl = inet6_ioctl, /* must change */
.listen = sock_no_listen, /* ok */
.shutdown = inet_shutdown, /* ok */
diff --git a/net/ipv6/calipso.c b/net/ipv6/calipso.c
index 1323b9679cf7..1c0bb9fb76e6 100644
--- a/net/ipv6/calipso.c
+++ b/net/ipv6/calipso.c
@@ -799,8 +799,7 @@ static int calipso_opt_update(struct sock *sk, struct ipv6_opt_hdr *hop)
{
struct ipv6_txoptions *old = txopt_get(inet6_sk(sk)), *txopts;
- txopts = ipv6_renew_options_kern(sk, old, IPV6_HOPOPTS,
- hop, hop ? ipv6_optlen(hop) : 0);
+ txopts = ipv6_renew_options(sk, old, IPV6_HOPOPTS, hop);
txopt_put(old);
if (IS_ERR(txopts))
return PTR_ERR(txopts);
@@ -1222,8 +1221,7 @@ static int calipso_req_setattr(struct request_sock *req,
if (IS_ERR(new))
return PTR_ERR(new);
- txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS,
- new, new ? ipv6_optlen(new) : 0);
+ txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new);
kfree(new);
@@ -1260,8 +1258,7 @@ static void calipso_req_delattr(struct request_sock *req)
if (calipso_opt_del(req_inet->ipv6_opt->hopopt, &new))
return; /* Nothing to do */
- txopts = ipv6_renew_options_kern(sk, req_inet->ipv6_opt, IPV6_HOPOPTS,
- new, new ? ipv6_optlen(new) : 0);
+ txopts = ipv6_renew_options(sk, req_inet->ipv6_opt, IPV6_HOPOPTS, new);
if (!IS_ERR(txopts)) {
txopts = xchg(&req_inet->ipv6_opt, txopts);
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 2ee08b6a86a4..1a1f876f8e28 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -700,13 +700,16 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
}
if (np->rxopt.bits.rxorigdstaddr) {
struct sockaddr_in6 sin6;
- __be16 *ports = (__be16 *) skb_transport_header(skb);
+ __be16 *ports;
+ int end;
- if (skb_transport_offset(skb) + 4 <= (int)skb->len) {
+ end = skb_transport_offset(skb) + 4;
+ if (end <= 0 || pskb_may_pull(skb, end)) {
/* All current transport protocols have the port numbers in the
* first four bytes of the transport header and this function is
* written with this assumption in mind.
*/
+ ports = (__be16 *)skb_transport_header(skb);
sin6.sin6_family = AF_INET6;
sin6.sin6_addr = ipv6_hdr(skb)->daddr;
diff --git a/net/ipv6/exthdrs.c b/net/ipv6/exthdrs.c
index 5bc2bf3733ab..20291c2036fc 100644
--- a/net/ipv6/exthdrs.c
+++ b/net/ipv6/exthdrs.c
@@ -1015,29 +1015,21 @@ ipv6_dup_options(struct sock *sk, struct ipv6_txoptions *opt)
}
EXPORT_SYMBOL_GPL(ipv6_dup_options);
-static int ipv6_renew_option(void *ohdr,
- struct ipv6_opt_hdr __user *newopt, int newoptlen,
- int inherit,
- struct ipv6_opt_hdr **hdr,
- char **p)
+static void ipv6_renew_option(int renewtype,
+ struct ipv6_opt_hdr **dest,
+ struct ipv6_opt_hdr *old,
+ struct ipv6_opt_hdr *new,
+ int newtype, char **p)
{
- if (inherit) {
- if (ohdr) {
- memcpy(*p, ohdr, ipv6_optlen((struct ipv6_opt_hdr *)ohdr));
- *hdr = (struct ipv6_opt_hdr *)*p;
- *p += CMSG_ALIGN(ipv6_optlen(*hdr));
- }
- } else {
- if (newopt) {
- if (copy_from_user(*p, newopt, newoptlen))
- return -EFAULT;
- *hdr = (struct ipv6_opt_hdr *)*p;
- if (ipv6_optlen(*hdr) > newoptlen)
- return -EINVAL;
- *p += CMSG_ALIGN(newoptlen);
- }
- }
- return 0;
+ struct ipv6_opt_hdr *src;
+
+ src = (renewtype == newtype ? new : old);
+ if (!src)
+ return;
+
+ memcpy(*p, src, ipv6_optlen(src));
+ *dest = (struct ipv6_opt_hdr *)*p;
+ *p += CMSG_ALIGN(ipv6_optlen(*dest));
}
/**
@@ -1063,13 +1055,11 @@ static int ipv6_renew_option(void *ohdr,
*/
struct ipv6_txoptions *
ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
- int newtype,
- struct ipv6_opt_hdr __user *newopt, int newoptlen)
+ int newtype, struct ipv6_opt_hdr *newopt)
{
int tot_len = 0;
char *p;
struct ipv6_txoptions *opt2;
- int err;
if (opt) {
if (newtype != IPV6_HOPOPTS && opt->hopopt)
@@ -1082,8 +1072,8 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
tot_len += CMSG_ALIGN(ipv6_optlen(opt->dst1opt));
}
- if (newopt && newoptlen)
- tot_len += CMSG_ALIGN(newoptlen);
+ if (newopt)
+ tot_len += CMSG_ALIGN(ipv6_optlen(newopt));
if (!tot_len)
return NULL;
@@ -1098,29 +1088,19 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
opt2->tot_len = tot_len;
p = (char *)(opt2 + 1);
- err = ipv6_renew_option(opt ? opt->hopopt : NULL, newopt, newoptlen,
- newtype != IPV6_HOPOPTS,
- &opt2->hopopt, &p);
- if (err)
- goto out;
-
- err = ipv6_renew_option(opt ? opt->dst0opt : NULL, newopt, newoptlen,
- newtype != IPV6_RTHDRDSTOPTS,
- &opt2->dst0opt, &p);
- if (err)
- goto out;
-
- err = ipv6_renew_option(opt ? opt->srcrt : NULL, newopt, newoptlen,
- newtype != IPV6_RTHDR,
- (struct ipv6_opt_hdr **)&opt2->srcrt, &p);
- if (err)
- goto out;
-
- err = ipv6_renew_option(opt ? opt->dst1opt : NULL, newopt, newoptlen,
- newtype != IPV6_DSTOPTS,
- &opt2->dst1opt, &p);
- if (err)
- goto out;
+ ipv6_renew_option(IPV6_HOPOPTS, &opt2->hopopt,
+ (opt ? opt->hopopt : NULL),
+ newopt, newtype, &p);
+ ipv6_renew_option(IPV6_RTHDRDSTOPTS, &opt2->dst0opt,
+ (opt ? opt->dst0opt : NULL),
+ newopt, newtype, &p);
+ ipv6_renew_option(IPV6_RTHDR,
+ (struct ipv6_opt_hdr **)&opt2->srcrt,
+ (opt ? (struct ipv6_opt_hdr *)opt->srcrt : NULL),
+ newopt, newtype, &p);
+ ipv6_renew_option(IPV6_DSTOPTS, &opt2->dst1opt,
+ (opt ? opt->dst1opt : NULL),
+ newopt, newtype, &p);
opt2->opt_nflen = (opt2->hopopt ? ipv6_optlen(opt2->hopopt) : 0) +
(opt2->dst0opt ? ipv6_optlen(opt2->dst0opt) : 0) +
@@ -1128,37 +1108,6 @@ ipv6_renew_options(struct sock *sk, struct ipv6_txoptions *opt,
opt2->opt_flen = (opt2->dst1opt ? ipv6_optlen(opt2->dst1opt) : 0);
return opt2;
-out:
- sock_kfree_s(sk, opt2, opt2->tot_len);
- return ERR_PTR(err);
-}
-
-/**
- * ipv6_renew_options_kern - replace a specific ext hdr with a new one.
- *
- * @sk: sock from which to allocate memory
- * @opt: original options
- * @newtype: option type to replace in @opt
- * @newopt: new option of type @newtype to replace (kernel-mem)
- * @newoptlen: length of @newopt
- *
- * See ipv6_renew_options(). The difference is that @newopt is
- * kernel memory, rather than user memory.
- */
-struct ipv6_txoptions *
-ipv6_renew_options_kern(struct sock *sk, struct ipv6_txoptions *opt,
- int newtype, struct ipv6_opt_hdr *newopt,
- int newoptlen)
-{
- struct ipv6_txoptions *ret_val;
- const mm_segment_t old_fs = get_fs();
-
- set_fs(KERNEL_DS);
- ret_val = ipv6_renew_options(sk, opt, newtype,
- (struct ipv6_opt_hdr __user *)newopt,
- newoptlen);
- set_fs(old_fs);
- return ret_val;
}
struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index be491bf6ab6e..ef2505aefc15 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -402,9 +402,10 @@ static int icmp6_iif(const struct sk_buff *skb)
/* for local traffic to local address, skb dev is the loopback
* device. Check if there is a dst attached to the skb and if so
- * get the real device index.
+ * get the real device index. Same is needed for replies to a link
+ * local address on a device enslaved to an L3 master device
*/
- if (unlikely(iif == LOOPBACK_IFINDEX)) {
+ if (unlikely(iif == LOOPBACK_IFINDEX || netif_is_l3_master(skb->dev))) {
const struct rt6_info *rt6 = skb_rt6_info(skb);
if (rt6)
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 2febe26de6a1..595ad408dba0 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -113,9 +113,9 @@ static inline int compute_score(struct sock *sk, struct net *net,
bool dev_match = (sk->sk_bound_dev_if == dif ||
sk->sk_bound_dev_if == sdif);
- if (exact_dif && !dev_match)
+ if (!dev_match)
return -1;
- if (sk->sk_bound_dev_if && dev_match)
+ if (sk->sk_bound_dev_if)
score++;
}
if (sk->sk_incoming_cpu == raw_smp_processor_id())
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 39d1d487eca2..d212738e9d10 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -167,8 +167,9 @@ struct fib6_info *fib6_info_alloc(gfp_t gfp_flags)
return f6i;
}
-void fib6_info_destroy(struct fib6_info *f6i)
+void fib6_info_destroy_rcu(struct rcu_head *head)
{
+ struct fib6_info *f6i = container_of(head, struct fib6_info, rcu);
struct rt6_exception_bucket *bucket;
struct dst_metrics *m;
@@ -206,7 +207,7 @@ void fib6_info_destroy(struct fib6_info *f6i)
kfree(f6i);
}
-EXPORT_SYMBOL_GPL(fib6_info_destroy);
+EXPORT_SYMBOL_GPL(fib6_info_destroy_rcu);
static struct fib6_node *node_alloc(struct net *net)
{
@@ -934,20 +935,19 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
{
struct fib6_info *leaf = rcu_dereference_protected(fn->leaf,
lockdep_is_held(&rt->fib6_table->tb6_lock));
- enum fib_event_type event = FIB_EVENT_ENTRY_ADD;
- struct fib6_info *iter = NULL, *match = NULL;
+ struct fib6_info *iter = NULL;
struct fib6_info __rcu **ins;
+ struct fib6_info __rcu **fallback_ins = NULL;
int replace = (info->nlh &&
(info->nlh->nlmsg_flags & NLM_F_REPLACE));
- int append = (info->nlh &&
- (info->nlh->nlmsg_flags & NLM_F_APPEND));
int add = (!info->nlh ||
(info->nlh->nlmsg_flags & NLM_F_CREATE));
int found = 0;
+ bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
u16 nlflags = NLM_F_EXCL;
int err;
- if (append)
+ if (info->nlh && (info->nlh->nlmsg_flags & NLM_F_APPEND))
nlflags |= NLM_F_APPEND;
ins = &fn->leaf;
@@ -969,8 +969,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
nlflags &= ~NLM_F_EXCL;
if (replace) {
- found++;
- break;
+ if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) {
+ found++;
+ break;
+ }
+ if (rt_can_ecmp)
+ fallback_ins = fallback_ins ?: ins;
+ goto next_iter;
}
if (rt6_duplicate_nexthop(iter, rt)) {
@@ -985,51 +990,71 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
fib6_metric_set(iter, RTAX_MTU, rt->fib6_pmtu);
return -EEXIST;
}
-
- /* first route that matches */
- if (!match)
- match = iter;
+ /* If we have the same destination and the same metric,
+ * but not the same gateway, then the route we try to
+ * add is sibling to this route, increment our counter
+ * of siblings, and later we will add our route to the
+ * list.
+ * Only static routes (which don't have flag
+ * RTF_EXPIRES) are used for ECMPv6.
+ *
+ * To avoid long list, we only had siblings if the
+ * route have a gateway.
+ */
+ if (rt_can_ecmp &&
+ rt6_qualify_for_ecmp(iter))
+ rt->fib6_nsiblings++;
}
if (iter->fib6_metric > rt->fib6_metric)
break;
+next_iter:
ins = &iter->fib6_next;
}
+ if (fallback_ins && !found) {
+ /* No ECMP-able route found, replace first non-ECMP one */
+ ins = fallback_ins;
+ iter = rcu_dereference_protected(*ins,
+ lockdep_is_held(&rt->fib6_table->tb6_lock));
+ found++;
+ }
+
/* Reset round-robin state, if necessary */
if (ins == &fn->leaf)
fn->rr_ptr = NULL;
/* Link this route to others same route. */
- if (append && match) {
+ if (rt->fib6_nsiblings) {
+ unsigned int fib6_nsiblings;
struct fib6_info *sibling, *temp_sibling;
- if (rt->fib6_flags & RTF_REJECT) {
- NL_SET_ERR_MSG(extack,
- "Can not append a REJECT route");
- return -EINVAL;
- } else if (match->fib6_flags & RTF_REJECT) {
- NL_SET_ERR_MSG(extack,
- "Can not append to a REJECT route");
- return -EINVAL;
+ /* Find the first route that have the same metric */
+ sibling = leaf;
+ while (sibling) {
+ if (sibling->fib6_metric == rt->fib6_metric &&
+ rt6_qualify_for_ecmp(sibling)) {
+ list_add_tail(&rt->fib6_siblings,
+ &sibling->fib6_siblings);
+ break;
+ }
+ sibling = rcu_dereference_protected(sibling->fib6_next,
+ lockdep_is_held(&rt->fib6_table->tb6_lock));
}
- event = FIB_EVENT_ENTRY_APPEND;
- rt->fib6_nsiblings = match->fib6_nsiblings;
- list_add_tail(&rt->fib6_siblings, &match->fib6_siblings);
- match->fib6_nsiblings++;
-
/* For each sibling in the list, increment the counter of
* siblings. BUG() if counters does not match, list of siblings
* is broken!
*/
+ fib6_nsiblings = 0;
list_for_each_entry_safe(sibling, temp_sibling,
- &match->fib6_siblings, fib6_siblings) {
+ &rt->fib6_siblings, fib6_siblings) {
sibling->fib6_nsiblings++;
- BUG_ON(sibling->fib6_nsiblings != match->fib6_nsiblings);
+ BUG_ON(sibling->fib6_nsiblings != rt->fib6_nsiblings);
+ fib6_nsiblings++;
}
-
- rt6_multipath_rebalance(match);
+ BUG_ON(fib6_nsiblings != rt->fib6_nsiblings);
+ rt6_multipath_rebalance(temp_sibling);
}
/*
@@ -1042,8 +1067,9 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
add:
nlflags |= NLM_F_CREATE;
- err = call_fib6_entry_notifiers(info->nl_net, event, rt,
- extack);
+ err = call_fib6_entry_notifiers(info->nl_net,
+ FIB_EVENT_ENTRY_ADD,
+ rt, extack);
if (err)
return err;
@@ -1061,7 +1087,7 @@ add:
}
} else {
- struct fib6_info *tmp;
+ int nsiblings;
if (!found) {
if (add)
@@ -1076,57 +1102,48 @@ add:
if (err)
return err;
- /* if route being replaced has siblings, set tmp to
- * last one, otherwise tmp is current route. this is
- * used to set fib6_next for new route
- */
- if (iter->fib6_nsiblings)
- tmp = list_last_entry(&iter->fib6_siblings,
- struct fib6_info,
- fib6_siblings);
- else
- tmp = iter;
-
- /* insert new route */
atomic_inc(&rt->fib6_ref);
rcu_assign_pointer(rt->fib6_node, fn);
- rt->fib6_next = tmp->fib6_next;
+ rt->fib6_next = iter->fib6_next;
rcu_assign_pointer(*ins, rt);
-
if (!info->skip_notify)
inet6_rt_notify(RTM_NEWROUTE, rt, info, NLM_F_REPLACE);
if (!(fn->fn_flags & RTN_RTINFO)) {
info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
fn->fn_flags |= RTN_RTINFO;
}
+ nsiblings = iter->fib6_nsiblings;
+ iter->fib6_node = NULL;
+ fib6_purge_rt(iter, fn, info->nl_net);
+ if (rcu_access_pointer(fn->rr_ptr) == iter)
+ fn->rr_ptr = NULL;
+ fib6_info_release(iter);
- /* delete old route */
- rt = iter;
-
- if (rt->fib6_nsiblings) {
- struct fib6_info *tmp;
-
+ if (nsiblings) {
/* Replacing an ECMP route, remove all siblings */
- list_for_each_entry_safe(iter, tmp, &rt->fib6_siblings,
- fib6_siblings) {
- iter->fib6_node = NULL;
- fib6_purge_rt(iter, fn, info->nl_net);
- if (rcu_access_pointer(fn->rr_ptr) == iter)
- fn->rr_ptr = NULL;
- fib6_info_release(iter);
-
- rt->fib6_nsiblings--;
- info->nl_net->ipv6.rt6_stats->fib_rt_entries--;
+ ins = &rt->fib6_next;
+ iter = rcu_dereference_protected(*ins,
+ lockdep_is_held(&rt->fib6_table->tb6_lock));
+ while (iter) {
+ if (iter->fib6_metric > rt->fib6_metric)
+ break;
+ if (rt6_qualify_for_ecmp(iter)) {
+ *ins = iter->fib6_next;
+ iter->fib6_node = NULL;
+ fib6_purge_rt(iter, fn, info->nl_net);
+ if (rcu_access_pointer(fn->rr_ptr) == iter)
+ fn->rr_ptr = NULL;
+ fib6_info_release(iter);
+ nsiblings--;
+ info->nl_net->ipv6.rt6_stats->fib_rt_entries--;
+ } else {
+ ins = &iter->fib6_next;
+ }
+ iter = rcu_dereference_protected(*ins,
+ lockdep_is_held(&rt->fib6_table->tb6_lock));
}
+ WARN_ON(nsiblings != 0);
}
-
- WARN_ON(rt->fib6_nsiblings != 0);
-
- rt->fib6_node = NULL;
- fib6_purge_rt(rt, fn, info->nl_net);
- if (rcu_access_pointer(fn->rr_ptr) == rt)
- fn->rr_ptr = NULL;
- fib6_info_release(rt);
}
return 0;
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index c8cf2fdbb13b..cd2cfb04e5d8 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -927,7 +927,6 @@ tx_err:
static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
struct ip6_tnl *t = netdev_priv(dev);
struct dst_entry *dst = skb_dst(skb);
struct net_device_stats *stats;
@@ -1010,6 +1009,8 @@ static netdev_tx_t ip6erspan_tunnel_xmit(struct sk_buff *skb,
goto tx_err;
}
} else {
+ struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+
switch (skb->protocol) {
case htons(ETH_P_IP):
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 021e5aef6ba3..3168847c30d1 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -570,6 +570,8 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->dev = from->dev;
to->mark = from->mark;
+ skb_copy_hash(to, from);
+
#ifdef CONFIG_NET_SCHED
to->tc_index = from->tc_index;
#endif
@@ -1219,7 +1221,8 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
if (mtu < IPV6_MIN_MTU)
return -EINVAL;
cork->base.fragsize = mtu;
- cork->base.gso_size = sk->sk_type == SOCK_DGRAM ? ipc6->gso_size : 0;
+ cork->base.gso_size = sk->sk_type == SOCK_DGRAM &&
+ sk->sk_protocol == IPPROTO_UDP ? ipc6->gso_size : 0;
if (dst_allfrag(xfrm_dst_path(&rt->dst)))
cork->base.flags |= IPCORK_ALLFRAG;
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 4d780c7f0130..568ca4187cd1 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -398,6 +398,12 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
case IPV6_DSTOPTS:
{
struct ipv6_txoptions *opt;
+ struct ipv6_opt_hdr *new = NULL;
+
+ /* hop-by-hop / destination options are privileged option */
+ retv = -EPERM;
+ if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
+ break;
/* remove any sticky options header with a zero option
* length, per RFC3542.
@@ -409,17 +415,22 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
else if (optlen < sizeof(struct ipv6_opt_hdr) ||
optlen & 0x7 || optlen > 8 * 255)
goto e_inval;
-
- /* hop-by-hop / destination options are privileged option */
- retv = -EPERM;
- if (optname != IPV6_RTHDR && !ns_capable(net->user_ns, CAP_NET_RAW))
- break;
+ else {
+ new = memdup_user(optval, optlen);
+ if (IS_ERR(new)) {
+ retv = PTR_ERR(new);
+ break;
+ }
+ if (unlikely(ipv6_optlen(new) > optlen)) {
+ kfree(new);
+ goto e_inval;
+ }
+ }
opt = rcu_dereference_protected(np->opt,
lockdep_sock_is_held(sk));
- opt = ipv6_renew_options(sk, opt, optname,
- (struct ipv6_opt_hdr __user *)optval,
- optlen);
+ opt = ipv6_renew_options(sk, opt, optname, new);
+ kfree(new);
if (IS_ERR(opt)) {
retv = PTR_ERR(opt);
break;
@@ -718,8 +729,9 @@ done:
struct sockaddr_in6 *psin6;
psin6 = (struct sockaddr_in6 *)&greqs.gsr_group;
- retv = ipv6_sock_mc_join(sk, greqs.gsr_interface,
- &psin6->sin6_addr);
+ retv = ipv6_sock_mc_join_ssm(sk, greqs.gsr_interface,
+ &psin6->sin6_addr,
+ MCAST_INCLUDE);
/* prior join w/ different source is ok */
if (retv && retv != -EADDRINUSE)
break;
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 975021df7c1c..f60f310785fd 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -95,6 +95,8 @@ static int ip6_mc_add_src(struct inet6_dev *idev, const struct in6_addr *pmca,
int delta);
static int ip6_mc_leave_src(struct sock *sk, struct ipv6_mc_socklist *iml,
struct inet6_dev *idev);
+static int __ipv6_dev_mc_inc(struct net_device *dev,
+ const struct in6_addr *addr, unsigned int mode);
#define MLD_QRV_DEFAULT 2
/* RFC3810, 9.2. Query Interval */
@@ -132,7 +134,8 @@ static int unsolicited_report_interval(struct inet6_dev *idev)
return iv > 0 ? iv : 1;
}
-int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
+static int __ipv6_sock_mc_join(struct sock *sk, int ifindex,
+ const struct in6_addr *addr, unsigned int mode)
{
struct net_device *dev = NULL;
struct ipv6_mc_socklist *mc_lst;
@@ -179,7 +182,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
}
mc_lst->ifindex = dev->ifindex;
- mc_lst->sfmode = MCAST_EXCLUDE;
+ mc_lst->sfmode = mode;
rwlock_init(&mc_lst->sflock);
mc_lst->sflist = NULL;
@@ -187,7 +190,7 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
* now add/increase the group membership on the device
*/
- err = ipv6_dev_mc_inc(dev, addr);
+ err = __ipv6_dev_mc_inc(dev, addr, mode);
if (err) {
sock_kfree_s(sk, mc_lst, sizeof(*mc_lst));
@@ -199,8 +202,19 @@ int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
return 0;
}
+
+int ipv6_sock_mc_join(struct sock *sk, int ifindex, const struct in6_addr *addr)
+{
+ return __ipv6_sock_mc_join(sk, ifindex, addr, MCAST_EXCLUDE);
+}
EXPORT_SYMBOL(ipv6_sock_mc_join);
+int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
+ const struct in6_addr *addr, unsigned int mode)
+{
+ return __ipv6_sock_mc_join(sk, ifindex, addr, mode);
+}
+
/*
* socket leave on multicast group
*/
@@ -646,7 +660,7 @@ bool inet6_mc_check(struct sock *sk, const struct in6_addr *mc_addr,
return rv;
}
-static void igmp6_group_added(struct ifmcaddr6 *mc)
+static void igmp6_group_added(struct ifmcaddr6 *mc, unsigned int mode)
{
struct net_device *dev = mc->idev->dev;
char buf[MAX_ADDR_LEN];
@@ -672,7 +686,13 @@ static void igmp6_group_added(struct ifmcaddr6 *mc)
}
/* else v2 */
- mc->mca_crcount = mc->idev->mc_qrv;
+ /* Based on RFC3810 6.1, for newly added INCLUDE SSM, we
+ * should not send filter-mode change record as the mode
+ * should be from IN() to IN(A).
+ */
+ if (mode == MCAST_EXCLUDE)
+ mc->mca_crcount = mc->idev->mc_qrv;
+
mld_ifc_event(mc->idev);
}
@@ -770,13 +790,13 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
spin_lock_bh(&im->mca_lock);
if (pmc) {
im->idev = pmc->idev;
- im->mca_crcount = idev->mc_qrv;
- im->mca_sfmode = pmc->mca_sfmode;
- if (pmc->mca_sfmode == MCAST_INCLUDE) {
+ if (im->mca_sfmode == MCAST_INCLUDE) {
im->mca_tomb = pmc->mca_tomb;
im->mca_sources = pmc->mca_sources;
for (psf = im->mca_sources; psf; psf = psf->sf_next)
- psf->sf_crcount = im->mca_crcount;
+ psf->sf_crcount = idev->mc_qrv;
+ } else {
+ im->mca_crcount = idev->mc_qrv;
}
in6_dev_put(pmc->idev);
kfree(pmc);
@@ -831,7 +851,8 @@ static void ma_put(struct ifmcaddr6 *mc)
}
static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
- const struct in6_addr *addr)
+ const struct in6_addr *addr,
+ unsigned int mode)
{
struct ifmcaddr6 *mc;
@@ -849,9 +870,8 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
refcount_set(&mc->mca_refcnt, 1);
spin_lock_init(&mc->mca_lock);
- /* initial mode is (EX, empty) */
- mc->mca_sfmode = MCAST_EXCLUDE;
- mc->mca_sfcount[MCAST_EXCLUDE] = 1;
+ mc->mca_sfmode = mode;
+ mc->mca_sfcount[mode] = 1;
if (ipv6_addr_is_ll_all_nodes(&mc->mca_addr) ||
IPV6_ADDR_MC_SCOPE(&mc->mca_addr) < IPV6_ADDR_SCOPE_LINKLOCAL)
@@ -863,7 +883,8 @@ static struct ifmcaddr6 *mca_alloc(struct inet6_dev *idev,
/*
* device multicast group inc (add if not found)
*/
-int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
+static int __ipv6_dev_mc_inc(struct net_device *dev,
+ const struct in6_addr *addr, unsigned int mode)
{
struct ifmcaddr6 *mc;
struct inet6_dev *idev;
@@ -887,14 +908,13 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
if (ipv6_addr_equal(&mc->mca_addr, addr)) {
mc->mca_users++;
write_unlock_bh(&idev->lock);
- ip6_mc_add_src(idev, &mc->mca_addr, MCAST_EXCLUDE, 0,
- NULL, 0);
+ ip6_mc_add_src(idev, &mc->mca_addr, mode, 0, NULL, 0);
in6_dev_put(idev);
return 0;
}
}
- mc = mca_alloc(idev, addr);
+ mc = mca_alloc(idev, addr, mode);
if (!mc) {
write_unlock_bh(&idev->lock);
in6_dev_put(idev);
@@ -911,11 +931,16 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
write_unlock_bh(&idev->lock);
mld_del_delrec(idev, mc);
- igmp6_group_added(mc);
+ igmp6_group_added(mc, mode);
ma_put(mc);
return 0;
}
+int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
+{
+ return __ipv6_dev_mc_inc(dev, addr, MCAST_EXCLUDE);
+}
+
/*
* device multicast group del
*/
@@ -1751,7 +1776,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
psf_next = psf->sf_next;
- if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
+ if (!is_in(pmc, psf, type, gdeleted, sdeleted) && !crsend) {
psf_prev = psf;
continue;
}
@@ -2066,7 +2091,7 @@ static void mld_send_initial_cr(struct inet6_dev *idev)
if (pmc->mca_sfcount[MCAST_EXCLUDE])
type = MLD2_CHANGE_TO_EXCLUDE;
else
- type = MLD2_CHANGE_TO_INCLUDE;
+ type = MLD2_ALLOW_NEW_SOURCES;
skb = add_grec(skb, pmc, type, 0, 0, 1);
spin_unlock_bh(&pmc->mca_lock);
}
@@ -2082,7 +2107,8 @@ void ipv6_mc_dad_complete(struct inet6_dev *idev)
mld_send_initial_cr(idev);
idev->mc_dad_count--;
if (idev->mc_dad_count)
- mld_dad_start_timer(idev, idev->mc_maxdelay);
+ mld_dad_start_timer(idev,
+ unsolicited_report_interval(idev));
}
}
@@ -2094,7 +2120,8 @@ static void mld_dad_timer_expire(struct timer_list *t)
if (idev->mc_dad_count) {
idev->mc_dad_count--;
if (idev->mc_dad_count)
- mld_dad_start_timer(idev, idev->mc_maxdelay);
+ mld_dad_start_timer(idev,
+ unsolicited_report_interval(idev));
}
in6_dev_put(idev);
}
@@ -2452,7 +2479,8 @@ static void mld_ifc_timer_expire(struct timer_list *t)
if (idev->mc_ifc_count) {
idev->mc_ifc_count--;
if (idev->mc_ifc_count)
- mld_ifc_start_timer(idev, idev->mc_maxdelay);
+ mld_ifc_start_timer(idev,
+ unsolicited_report_interval(idev));
}
in6_dev_put(idev);
}
@@ -2543,7 +2571,7 @@ void ipv6_mc_up(struct inet6_dev *idev)
ipv6_mc_reset(idev);
for (i = idev->mc_list; i; i = i->next) {
mld_del_delrec(idev, i);
- igmp6_group_added(i);
+ igmp6_group_added(i, i->mca_sfmode);
}
read_unlock_bh(&idev->lock);
}
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index e640d2f3c55c..0ec273997d1d 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -811,7 +811,7 @@ static void ndisc_recv_ns(struct sk_buff *skb)
return;
}
}
- if (ndopts.nd_opts_nonce)
+ if (ndopts.nd_opts_nonce && ndopts.nd_opts_nonce->nd_opt_len == 1)
memcpy(&nonce, (u8 *)(ndopts.nd_opts_nonce + 1), 6);
inc = ipv6_addr_is_multicast(daddr);
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 7eab959734bc..daf2e9e9193d 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1909,6 +1909,7 @@ static struct xt_match ip6t_builtin_mt[] __read_mostly = {
.checkentry = icmp6_checkentry,
.proto = IPPROTO_ICMPV6,
.family = NFPROTO_IPV6,
+ .me = THIS_MODULE,
},
};
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 5e0332014c17..e4d9e6976d3c 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -107,7 +107,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
if (hdr == NULL)
goto err_reg;
- net->nf_frag.sysctl.frags_hdr = hdr;
+ net->nf_frag_frags_hdr = hdr;
return 0;
err_reg:
@@ -121,8 +121,8 @@ static void __net_exit nf_ct_frags6_sysctl_unregister(struct net *net)
{
struct ctl_table *table;
- table = net->nf_frag.sysctl.frags_hdr->ctl_table_arg;
- unregister_net_sysctl_table(net->nf_frag.sysctl.frags_hdr);
+ table = net->nf_frag_frags_hdr->ctl_table_arg;
+ unregister_net_sysctl_table(net->nf_frag_frags_hdr);
if (!net_eq(net, &init_net))
kfree(table);
}
@@ -585,6 +585,8 @@ int nf_ct_frag6_gather(struct net *net, struct sk_buff *skb, u32 user)
fq->q.meat == fq->q.len &&
nf_ct_frag6_reasm(fq, skb, dev))
ret = 0;
+ else
+ skb_dst_drop(skb);
out_unlock:
spin_unlock_bh(&fq->q.lock);
diff --git a/net/ipv6/netfilter/nf_tproxy_ipv6.c b/net/ipv6/netfilter/nf_tproxy_ipv6.c
index bf1d6c421e3b..5dfd33af6451 100644
--- a/net/ipv6/netfilter/nf_tproxy_ipv6.c
+++ b/net/ipv6/netfilter/nf_tproxy_ipv6.c
@@ -55,7 +55,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
* to a listener socket if there's one */
struct sock *sk2;
- sk2 = nf_tproxy_get_sock_v6(net, skb, thoff, hp, tproto,
+ sk2 = nf_tproxy_get_sock_v6(net, skb, thoff, tproto,
&iph->saddr,
nf_tproxy_laddr6(skb, laddr, &iph->daddr),
hp->source,
@@ -72,7 +72,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
EXPORT_SYMBOL_GPL(nf_tproxy_handle_time_wait6);
struct sock *
-nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
+nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
const u8 protocol,
const struct in6_addr *saddr, const struct in6_addr *daddr,
const __be16 sport, const __be16 dport,
@@ -80,15 +80,20 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
const enum nf_tproxy_lookup_t lookup_type)
{
struct sock *sk;
- struct tcphdr *tcph;
switch (protocol) {
- case IPPROTO_TCP:
+ case IPPROTO_TCP: {
+ struct tcphdr _hdr, *hp;
+
+ hp = skb_header_pointer(skb, thoff,
+ sizeof(struct tcphdr), &_hdr);
+ if (hp == NULL)
+ return NULL;
+
switch (lookup_type) {
case NF_TPROXY_LOOKUP_LISTENER:
- tcph = hp;
sk = inet6_lookup_listener(net, &tcp_hashinfo, skb,
- thoff + __tcp_hdrlen(tcph),
+ thoff + __tcp_hdrlen(hp),
saddr, sport,
daddr, ntohs(dport),
in->ifindex, 0);
@@ -110,6 +115,7 @@ nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
BUG();
}
break;
+ }
case IPPROTO_UDP:
sk = udp6_lib_lookup(net, saddr, sport, daddr, dport,
in->ifindex);
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ce6f0d15b5dd..afc307c89d1a 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -1334,7 +1334,7 @@ void raw6_proc_exit(void)
}
#endif /* CONFIG_PROC_FS */
-/* Same as inet6_dgram_ops, sans udp_poll_mask. */
+/* Same as inet6_dgram_ops, sans udp_poll. */
const struct proto_ops inet6_sockraw_ops = {
.family = PF_INET6,
.owner = THIS_MODULE,
@@ -1344,7 +1344,7 @@ const struct proto_ops inet6_sockraw_ops = {
.socketpair = sock_no_socketpair, /* a do nothing */
.accept = sock_no_accept, /* a do nothing */
.getname = inet6_getname,
- .poll_mask = datagram_poll_mask, /* ok */
+ .poll = datagram_poll, /* ok */
.ioctl = inet6_ioctl, /* must change */
.listen = sock_no_listen, /* ok */
.shutdown = inet_shutdown, /* ok */
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 86a0e4333d42..ec18b3ce8b6d 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -972,10 +972,10 @@ static void ip6_rt_init_dst(struct rt6_info *rt, struct fib6_info *ort)
rt->dst.lastuse = jiffies;
}
+/* Caller must already hold reference to @from */
static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
{
rt->rt6i_flags &= ~RTF_EXPIRES;
- fib6_info_hold(from);
rcu_assign_pointer(rt->from, from);
dst_init_metrics(&rt->dst, from->fib6_metrics->metrics, true);
if (from->fib6_metrics != &dst_default_metrics) {
@@ -984,6 +984,7 @@ static void rt6_set_from(struct rt6_info *rt, struct fib6_info *from)
}
}
+/* Caller must already hold reference to @ort */
static void ip6_rt_copy_init(struct rt6_info *rt, struct fib6_info *ort)
{
struct net_device *dev = fib6_info_nh_dev(ort);
@@ -1044,9 +1045,14 @@ static struct rt6_info *ip6_create_rt_rcu(struct fib6_info *rt)
struct net_device *dev = rt->fib6_nh.nh_dev;
struct rt6_info *nrt;
+ if (!fib6_info_hold_safe(rt))
+ return NULL;
+
nrt = ip6_dst_alloc(dev_net(dev), dev, flags);
if (nrt)
ip6_rt_copy_init(nrt, rt);
+ else
+ fib6_info_release(rt);
return nrt;
}
@@ -1178,10 +1184,15 @@ static struct rt6_info *ip6_rt_cache_alloc(struct fib6_info *ort,
* Clone the route.
*/
+ if (!fib6_info_hold_safe(ort))
+ return NULL;
+
dev = ip6_rt_get_dev_rcu(ort);
rt = ip6_dst_alloc(dev_net(dev), dev, 0);
- if (!rt)
+ if (!rt) {
+ fib6_info_release(ort);
return NULL;
+ }
ip6_rt_copy_init(rt, ort);
rt->rt6i_flags |= RTF_CACHE;
@@ -1210,12 +1221,17 @@ static struct rt6_info *ip6_rt_pcpu_alloc(struct fib6_info *rt)
struct net_device *dev;
struct rt6_info *pcpu_rt;
+ if (!fib6_info_hold_safe(rt))
+ return NULL;
+
rcu_read_lock();
dev = ip6_rt_get_dev_rcu(rt);
pcpu_rt = ip6_dst_alloc(dev_net(dev), dev, flags);
rcu_read_unlock();
- if (!pcpu_rt)
+ if (!pcpu_rt) {
+ fib6_info_release(rt);
return NULL;
+ }
ip6_rt_copy_init(pcpu_rt, rt);
pcpu_rt->rt6i_flags |= RTF_PCPU;
return pcpu_rt;
@@ -2486,7 +2502,7 @@ restart:
out:
if (ret)
- dst_hold(&ret->dst);
+ ip6_hold_safe(net, &ret, true);
else
ret = ip6_create_rt_rcu(rt);
@@ -3303,7 +3319,8 @@ static int ip6_route_del(struct fib6_config *cfg,
continue;
if (cfg->fc_protocol && cfg->fc_protocol != rt->fib6_protocol)
continue;
- fib6_info_hold(rt);
+ if (!fib6_info_hold_safe(rt))
+ continue;
rcu_read_unlock();
/* if gateway was specified only delete the one hop */
@@ -3409,6 +3426,9 @@ static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_bu
rcu_read_lock();
from = rcu_dereference(rt->from);
+ /* This fib6_info_hold() is safe here because we hold reference to rt
+ * and rt already holds reference to fib6_info.
+ */
fib6_info_hold(from);
rcu_read_unlock();
@@ -3470,7 +3490,8 @@ static struct fib6_info *rt6_get_route_info(struct net *net,
continue;
if (!ipv6_addr_equal(&rt->fib6_nh.nh_gw, gwaddr))
continue;
- fib6_info_hold(rt);
+ if (!fib6_info_hold_safe(rt))
+ continue;
break;
}
out:
@@ -3530,8 +3551,8 @@ struct fib6_info *rt6_get_dflt_router(struct net *net,
ipv6_addr_equal(&rt->fib6_nh.nh_gw, addr))
break;
}
- if (rt)
- fib6_info_hold(rt);
+ if (rt && !fib6_info_hold_safe(rt))
+ rt = NULL;
rcu_read_unlock();
return rt;
}
@@ -3579,8 +3600,8 @@ restart:
struct inet6_dev *idev = dev ? __in6_dev_get(dev) : NULL;
if (rt->fib6_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
- (!idev || idev->cnf.accept_ra != 2)) {
- fib6_info_hold(rt);
+ (!idev || idev->cnf.accept_ra != 2) &&
+ fib6_info_hold_safe(rt)) {
rcu_read_unlock();
ip6_del_rt(net, rt);
goto restart;
@@ -3842,7 +3863,7 @@ static struct fib6_info *rt6_multipath_first_sibling(const struct fib6_info *rt)
lockdep_is_held(&rt->fib6_table->tb6_lock));
while (iter) {
if (iter->fib6_metric == rt->fib6_metric &&
- iter->fib6_nsiblings)
+ rt6_qualify_for_ecmp(iter))
return iter;
iter = rcu_dereference_protected(iter->fib6_next,
lockdep_is_held(&rt->fib6_table->tb6_lock));
@@ -4388,6 +4409,13 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
rt = NULL;
goto cleanup;
}
+ if (!rt6_qualify_for_ecmp(rt)) {
+ err = -EINVAL;
+ NL_SET_ERR_MSG(extack,
+ "Device only routes can not be added for IPv6 using the multipath API.");
+ fib6_info_release(rt);
+ goto cleanup;
+ }
rt->fib6_nh.nh_weight = rtnh->rtnh_hops + 1;
@@ -4439,7 +4467,6 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
*/
cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
NLM_F_REPLACE);
- cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_APPEND;
nhn++;
}
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
index 33fb35cbfac1..558fe8cc6d43 100644
--- a/net/ipv6/seg6_hmac.c
+++ b/net/ipv6/seg6_hmac.c
@@ -373,7 +373,7 @@ static int seg6_hmac_init_algo(void)
return -ENOMEM;
for_each_possible_cpu(cpu) {
- tfm = crypto_alloc_shash(algo->name, 0, GFP_KERNEL);
+ tfm = crypto_alloc_shash(algo->name, 0, 0);
if (IS_ERR(tfm))
return PTR_ERR(tfm);
p_tfm = per_cpu_ptr(algo->tfms, cpu);
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index 19ccf0dc996c..a8854dd3e9c5 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -101,7 +101,7 @@ static __be32 seg6_make_flowlabel(struct net *net, struct sk_buff *skb,
if (do_flowlabel > 0) {
hash = skb_get_hash(skb);
- rol32(hash, 16);
+ hash = rol32(hash, 16);
flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
} else if (!do_flowlabel && skb->protocol == htons(ETH_P_IPV6)) {
flowlabel = ip6_flowlabel(inner_hdr);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 7efa9fd7e109..03e6b7a2bc53 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -938,7 +938,8 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
&tcp_hashinfo, NULL, 0,
&ipv6h->saddr,
th->source, &ipv6h->daddr,
- ntohs(th->source), tcp_v6_iif(skb),
+ ntohs(th->source),
+ tcp_v6_iif_l3_slave(skb),
tcp_v6_sdif(skb));
if (!sk1)
goto out;
@@ -1609,7 +1610,8 @@ do_time_wait:
skb, __tcp_hdrlen(th),
&ipv6_hdr(skb)->saddr, th->source,
&ipv6_hdr(skb)->daddr,
- ntohs(th->dest), tcp_v6_iif(skb),
+ ntohs(th->dest),
+ tcp_v6_iif_l3_slave(skb),
sdif);
if (sk2) {
struct inet_timewait_sock *tw = inet_twsk(sk);
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index 68e86257a549..893a022f9620 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1488,11 +1488,14 @@ static inline __poll_t iucv_accept_poll(struct sock *parent)
return 0;
}
-static __poll_t iucv_sock_poll_mask(struct socket *sock, __poll_t events)
+__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
__poll_t mask = 0;
+ sock_poll_wait(file, sk_sleep(sk), wait);
+
if (sk->sk_state == IUCV_LISTEN)
return iucv_accept_poll(sk);
@@ -2385,7 +2388,7 @@ static const struct proto_ops iucv_sock_ops = {
.getname = iucv_sock_getname,
.sendmsg = iucv_sock_sendmsg,
.recvmsg = iucv_sock_recvmsg,
- .poll_mask = iucv_sock_poll_mask,
+ .poll = iucv_sock_poll,
.ioctl = sock_no_ioctl,
.mmap = sock_no_mmap,
.socketpair = sock_no_socketpair,
diff --git a/net/kcm/kcmsock.c b/net/kcm/kcmsock.c
index 84b7d5c6fec8..d3601d421571 100644
--- a/net/kcm/kcmsock.c
+++ b/net/kcm/kcmsock.c
@@ -1336,9 +1336,9 @@ static void init_kcm_sock(struct kcm_sock *kcm, struct kcm_mux *mux)
struct list_head *head;
int index = 0;
- /* For SOCK_SEQPACKET sock type, datagram_poll_mask checks the sk_state,
- * so we set sk_state, otherwise epoll_wait always returns right away
- * with EPOLLHUP
+ /* For SOCK_SEQPACKET sock type, datagram_poll checks the sk_state, so
+ * we set sk_state, otherwise epoll_wait always returns right away with
+ * EPOLLHUP
*/
kcm->sk.sk_state = TCP_ESTABLISHED;
@@ -1903,7 +1903,7 @@ static const struct proto_ops kcm_dgram_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = kcm_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
@@ -1924,7 +1924,7 @@ static const struct proto_ops kcm_seqpacket_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = kcm_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 8bdc1cbe490a..5e1d2946ffbf 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -3751,7 +3751,7 @@ static const struct proto_ops pfkey_ops = {
/* Now the operations that really occur. */
.release = pfkey_release,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.sendmsg = pfkey_sendmsg,
.recvmsg = pfkey_recvmsg,
};
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 181073bf6925..a9c05b2bc1b0 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -613,7 +613,7 @@ static const struct proto_ops l2tp_ip_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = l2tp_ip_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = inet_ioctl,
.listen = sock_no_listen,
.shutdown = inet_shutdown,
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 336e4c00abbc..957369192ca1 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -754,7 +754,7 @@ static const struct proto_ops l2tp_ip6_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = l2tp_ip6_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = inet6_ioctl,
.listen = sock_no_listen,
.shutdown = inet_shutdown,
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 55188382845c..e398797878a9 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1818,7 +1818,7 @@ static const struct proto_ops pppol2tp_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = pppol2tp_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = pppol2tp_setsockopt,
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 804de8490186..1beeea9549fa 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -1192,7 +1192,7 @@ static const struct proto_ops llc_ui_ops = {
.socketpair = sock_no_socketpair,
.accept = llc_ui_accept,
.getname = llc_ui_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = llc_ui_ioctl,
.listen = llc_ui_listen,
.shutdown = llc_ui_shutdown,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 0a38cc1cbebc..932985ca4e66 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -2254,11 +2254,8 @@ static void ieee80211_deliver_skb_to_local_stack(struct sk_buff *skb,
sdata->control_port_over_nl80211)) {
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
bool noencrypt = status->flag & RX_FLAG_DECRYPTED;
- struct ethhdr *ehdr = eth_hdr(skb);
- cfg80211_rx_control_port(dev, skb->data, skb->len,
- ehdr->h_source,
- be16_to_cpu(skb->protocol), noencrypt);
+ cfg80211_rx_control_port(dev, skb, noencrypt);
dev_kfree_skb(skb);
} else {
/* deliver to local stack */
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 44b5dfe8727d..fa1f1e63a264 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -4845,7 +4845,9 @@ int ieee80211_tx_control_port(struct wiphy *wiphy, struct net_device *dev,
skb_reset_network_header(skb);
skb_reset_mac_header(skb);
+ local_bh_disable();
__ieee80211_subif_start_xmit(skb, skb->dev, flags);
+ local_bh_enable();
return 0;
}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 5e2e511c4a6f..d02fbfec3783 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2111,7 +2111,8 @@ int ieee80211_reconfig(struct ieee80211_local *local)
if (!sta->uploaded)
continue;
- if (sta->sdata->vif.type != NL80211_IFTYPE_AP)
+ if (sta->sdata->vif.type != NL80211_IFTYPE_AP &&
+ sta->sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
continue;
for (state = IEEE80211_STA_NOTEXIST;
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index e7b05de1e6d1..25e483e8278b 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -73,8 +73,8 @@ static int ncsi_aen_handler_lsc(struct ncsi_dev_priv *ndp,
ncm->data[2] = data;
ncm->data[4] = ntohl(lsc->oem_status);
- netdev_info(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n",
- nc->id, data & 0x1 ? "up" : "down");
+ netdev_dbg(ndp->ndev.dev, "NCSI: LSC AEN - channel %u state %s\n",
+ nc->id, data & 0x1 ? "up" : "down");
chained = !list_empty(&nc->link);
state = nc->state;
@@ -148,9 +148,9 @@ static int ncsi_aen_handler_hncdsc(struct ncsi_dev_priv *ndp,
hncdsc = (struct ncsi_aen_hncdsc_pkt *)h;
ncm->data[3] = ntohl(hncdsc->status);
spin_unlock_irqrestore(&nc->lock, flags);
- netdev_printk(KERN_DEBUG, ndp->ndev.dev,
- "NCSI: host driver %srunning on channel %u\n",
- ncm->data[3] & 0x1 ? "" : "not ", nc->id);
+ netdev_dbg(ndp->ndev.dev,
+ "NCSI: host driver %srunning on channel %u\n",
+ ncm->data[3] & 0x1 ? "" : "not ", nc->id);
return 0;
}
diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c
index 5561e221b71f..091284760d21 100644
--- a/net/ncsi/ncsi-manage.c
+++ b/net/ncsi/ncsi-manage.c
@@ -788,8 +788,8 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
}
break;
case ncsi_dev_state_config_done:
- netdev_printk(KERN_DEBUG, ndp->ndev.dev,
- "NCSI: channel %u config done\n", nc->id);
+ netdev_dbg(ndp->ndev.dev, "NCSI: channel %u config done\n",
+ nc->id);
spin_lock_irqsave(&nc->lock, flags);
if (nc->reconfigure_needed) {
/* This channel's configuration has been updated
@@ -804,8 +804,7 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
- netdev_printk(KERN_DEBUG, dev,
- "Dirty NCSI channel state reset\n");
+ netdev_dbg(dev, "Dirty NCSI channel state reset\n");
ncsi_process_next_channel(ndp);
break;
}
@@ -816,9 +815,9 @@ static void ncsi_configure_channel(struct ncsi_dev_priv *ndp)
} else {
hot_nc = NULL;
nc->state = NCSI_CHANNEL_INACTIVE;
- netdev_warn(ndp->ndev.dev,
- "NCSI: channel %u link down after config\n",
- nc->id);
+ netdev_dbg(ndp->ndev.dev,
+ "NCSI: channel %u link down after config\n",
+ nc->id);
}
spin_unlock_irqrestore(&nc->lock, flags);
@@ -908,9 +907,9 @@ static int ncsi_choose_active_channel(struct ncsi_dev_priv *ndp)
}
ncm = &found->modes[NCSI_MODE_LINK];
- netdev_printk(KERN_DEBUG, ndp->ndev.dev,
- "NCSI: Channel %u added to queue (link %s)\n",
- found->id, ncm->data[2] & 0x1 ? "up" : "down");
+ netdev_dbg(ndp->ndev.dev,
+ "NCSI: Channel %u added to queue (link %s)\n",
+ found->id, ncm->data[2] & 0x1 ? "up" : "down");
out:
spin_lock_irqsave(&ndp->lock, flags);
@@ -1199,14 +1198,14 @@ int ncsi_process_next_channel(struct ncsi_dev_priv *ndp)
switch (old_state) {
case NCSI_CHANNEL_INACTIVE:
ndp->ndev.state = ncsi_dev_state_config;
- netdev_info(ndp->ndev.dev, "NCSI: configuring channel %u\n",
- nc->id);
+ netdev_dbg(ndp->ndev.dev, "NCSI: configuring channel %u\n",
+ nc->id);
ncsi_configure_channel(ndp);
break;
case NCSI_CHANNEL_ACTIVE:
ndp->ndev.state = ncsi_dev_state_suspend;
- netdev_info(ndp->ndev.dev, "NCSI: suspending channel %u\n",
- nc->id);
+ netdev_dbg(ndp->ndev.dev, "NCSI: suspending channel %u\n",
+ nc->id);
ncsi_suspend_channel(ndp);
break;
default:
@@ -1226,8 +1225,6 @@ out:
return ncsi_choose_active_channel(ndp);
}
- netdev_printk(KERN_DEBUG, ndp->ndev.dev,
- "NCSI: No more channels to process\n");
ncsi_report_link(ndp, false);
return -ENODEV;
}
@@ -1318,9 +1315,9 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
if ((ndp->ndev.state & 0xff00) ==
ncsi_dev_state_config ||
!list_empty(&nc->link)) {
- netdev_printk(KERN_DEBUG, nd->dev,
- "NCSI: channel %p marked dirty\n",
- nc);
+ netdev_dbg(nd->dev,
+ "NCSI: channel %p marked dirty\n",
+ nc);
nc->reconfigure_needed = true;
}
spin_unlock_irqrestore(&nc->lock, flags);
@@ -1338,8 +1335,7 @@ static int ncsi_kick_channels(struct ncsi_dev_priv *ndp)
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
- netdev_printk(KERN_DEBUG, nd->dev,
- "NCSI: kicked channel %p\n", nc);
+ netdev_dbg(nd->dev, "NCSI: kicked channel %p\n", nc);
n++;
}
}
@@ -1370,8 +1366,8 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
list_for_each_entry_rcu(vlan, &ndp->vlan_vids, list) {
n_vids++;
if (vlan->vid == vid) {
- netdev_printk(KERN_DEBUG, dev,
- "NCSI: vid %u already registered\n", vid);
+ netdev_dbg(dev, "NCSI: vid %u already registered\n",
+ vid);
return 0;
}
}
@@ -1390,7 +1386,7 @@ int ncsi_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
vlan->vid = vid;
list_add_rcu(&vlan->list, &ndp->vlan_vids);
- netdev_printk(KERN_DEBUG, dev, "NCSI: Added new vid %u\n", vid);
+ netdev_dbg(dev, "NCSI: Added new vid %u\n", vid);
found = ncsi_kick_channels(ndp) != 0;
@@ -1419,8 +1415,7 @@ int ncsi_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
/* Remove the VLAN id from our internal list */
list_for_each_entry_safe(vlan, tmp, &ndp->vlan_vids, list)
if (vlan->vid == vid) {
- netdev_printk(KERN_DEBUG, dev,
- "NCSI: vid %u found, removing\n", vid);
+ netdev_dbg(dev, "NCSI: vid %u found, removing\n", vid);
list_del_rcu(&vlan->list);
found = true;
kfree(vlan);
@@ -1547,7 +1542,7 @@ void ncsi_stop_dev(struct ncsi_dev *nd)
}
}
- netdev_printk(KERN_DEBUG, ndp->ndev.dev, "NCSI: Stopping device\n");
+ netdev_dbg(ndp->ndev.dev, "NCSI: Stopping device\n");
ncsi_report_link(ndp, true);
}
EXPORT_SYMBOL_GPL(ncsi_stop_dev);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index dbd7d1fad277..f0a1c536ef15 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -460,6 +460,13 @@ config NF_TABLES
if NF_TABLES
+config NF_TABLES_SET
+ tristate "Netfilter nf_tables set infrastructure"
+ help
+ This option enables the nf_tables set infrastructure that allows to
+ look up for elements in a set and to build one-way mappings between
+ matchings and actions.
+
config NF_TABLES_INET
depends on IPV6
select NF_TABLES_IPV4
@@ -493,24 +500,6 @@ config NFT_FLOW_OFFLOAD
This option adds the "flow_offload" expression that you can use to
choose what flows are placed into the hardware.
-config NFT_SET_RBTREE
- tristate "Netfilter nf_tables rbtree set module"
- help
- This option adds the "rbtree" set type (Red Black tree) that is used
- to build interval-based sets.
-
-config NFT_SET_HASH
- tristate "Netfilter nf_tables hash set module"
- help
- This option adds the "hash" set type that is used to build one-way
- mappings between matchings and actions.
-
-config NFT_SET_BITMAP
- tristate "Netfilter nf_tables bitmap set module"
- help
- This option adds the "bitmap" set type that is used to build sets
- whose keys are smaller or equal to 16 bits.
-
config NFT_COUNTER
tristate "Netfilter nf_tables counter module"
help
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 44449389e527..8a76dced974d 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -78,7 +78,11 @@ nf_tables-objs := nf_tables_core.o nf_tables_api.o nft_chain_filter.o \
nft_bitwise.o nft_byteorder.o nft_payload.o nft_lookup.o \
nft_dynset.o nft_meta.o nft_rt.o nft_exthdr.o
+nf_tables_set-objs := nf_tables_set_core.o \
+ nft_set_hash.o nft_set_bitmap.o nft_set_rbtree.o
+
obj-$(CONFIG_NF_TABLES) += nf_tables.o
+obj-$(CONFIG_NF_TABLES_SET) += nf_tables_set.o
obj-$(CONFIG_NFT_COMPAT) += nft_compat.o
obj-$(CONFIG_NFT_CONNLIMIT) += nft_connlimit.o
obj-$(CONFIG_NFT_NUMGEN) += nft_numgen.o
@@ -91,9 +95,6 @@ obj-$(CONFIG_NFT_QUEUE) += nft_queue.o
obj-$(CONFIG_NFT_QUOTA) += nft_quota.o
obj-$(CONFIG_NFT_REJECT) += nft_reject.o
obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o
-obj-$(CONFIG_NFT_SET_RBTREE) += nft_set_rbtree.o
-obj-$(CONFIG_NFT_SET_HASH) += nft_set_hash.o
-obj-$(CONFIG_NFT_SET_BITMAP) += nft_set_bitmap.o
obj-$(CONFIG_NFT_COUNTER) += nft_counter.o
obj-$(CONFIG_NFT_LOG) += nft_log.o
obj-$(CONFIG_NFT_MASQ) += nft_masq.o
diff --git a/net/netfilter/nf_conncount.c b/net/netfilter/nf_conncount.c
index d8383609fe28..510039862aa9 100644
--- a/net/netfilter/nf_conncount.c
+++ b/net/netfilter/nf_conncount.c
@@ -47,6 +47,8 @@ struct nf_conncount_tuple {
struct hlist_node node;
struct nf_conntrack_tuple tuple;
struct nf_conntrack_zone zone;
+ int cpu;
+ u32 jiffies32;
};
struct nf_conncount_rb {
@@ -91,11 +93,42 @@ bool nf_conncount_add(struct hlist_head *head,
return false;
conn->tuple = *tuple;
conn->zone = *zone;
+ conn->cpu = raw_smp_processor_id();
+ conn->jiffies32 = (u32)jiffies;
hlist_add_head(&conn->node, head);
return true;
}
EXPORT_SYMBOL_GPL(nf_conncount_add);
+static const struct nf_conntrack_tuple_hash *
+find_or_evict(struct net *net, struct nf_conncount_tuple *conn)
+{
+ const struct nf_conntrack_tuple_hash *found;
+ unsigned long a, b;
+ int cpu = raw_smp_processor_id();
+ __s32 age;
+
+ found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
+ if (found)
+ return found;
+ b = conn->jiffies32;
+ a = (u32)jiffies;
+
+ /* conn might have been added just before by another cpu and
+ * might still be unconfirmed. In this case, nf_conntrack_find()
+ * returns no result. Thus only evict if this cpu added the
+ * stale entry or if the entry is older than two jiffies.
+ */
+ age = a - b;
+ if (conn->cpu == cpu || age >= 2) {
+ hlist_del(&conn->node);
+ kmem_cache_free(conncount_conn_cachep, conn);
+ return ERR_PTR(-ENOENT);
+ }
+
+ return ERR_PTR(-EAGAIN);
+}
+
unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone,
@@ -103,18 +136,27 @@ unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
{
const struct nf_conntrack_tuple_hash *found;
struct nf_conncount_tuple *conn;
- struct hlist_node *n;
struct nf_conn *found_ct;
+ struct hlist_node *n;
unsigned int length = 0;
*addit = tuple ? true : false;
/* check the saved connections */
hlist_for_each_entry_safe(conn, n, head, node) {
- found = nf_conntrack_find_get(net, &conn->zone, &conn->tuple);
- if (found == NULL) {
- hlist_del(&conn->node);
- kmem_cache_free(conncount_conn_cachep, conn);
+ found = find_or_evict(net, conn);
+ if (IS_ERR(found)) {
+ /* Not found, but might be about to be confirmed */
+ if (PTR_ERR(found) == -EAGAIN) {
+ length++;
+ if (!tuple)
+ continue;
+
+ if (nf_ct_tuple_equal(&conn->tuple, tuple) &&
+ nf_ct_zone_id(&conn->zone, conn->zone.dir) ==
+ nf_ct_zone_id(zone, zone->dir))
+ *addit = false;
+ }
continue;
}
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 3465da2a98bd..3d5280425027 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -2043,7 +2043,7 @@ int nf_conntrack_set_hashsize(const char *val, const struct kernel_param *kp)
return -EOPNOTSUPP;
/* On boot, we can set this without any fancy locking. */
- if (!nf_conntrack_htable_size)
+ if (!nf_conntrack_hash)
return param_set_uint(val, kp);
rc = kstrtouint(val, 0, &hashsize);
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 551a1eddf0fa..a75b11c39312 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -465,6 +465,11 @@ void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
nf_ct_expect_iterate_destroy(expect_iter_me, NULL);
nf_ct_iterate_destroy(unhelp, me);
+
+ /* Maybe someone has gotten the helper already when unhelp above.
+ * So need to wait it.
+ */
+ synchronize_rcu();
}
EXPORT_SYMBOL_GPL(nf_conntrack_helper_unregister);
diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
index abe647d5b8c6..9ce6336d1e55 100644
--- a/net/netfilter/nf_conntrack_proto_dccp.c
+++ b/net/netfilter/nf_conntrack_proto_dccp.c
@@ -243,14 +243,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
* We currently ignore Sync packets
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+ sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
},
[DCCP_PKT_SYNCACK] = {
/*
* We currently ignore SyncAck packets
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+ sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
},
},
[CT_DCCP_ROLE_SERVER] = {
@@ -371,14 +371,14 @@ dccp_state_table[CT_DCCP_ROLE_MAX + 1][DCCP_PKT_SYNCACK + 1][CT_DCCP_MAX + 1] =
* We currently ignore Sync packets
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+ sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
},
[DCCP_PKT_SYNCACK] = {
/*
* We currently ignore SyncAck packets
*
* sNO, sRQ, sRS, sPO, sOP, sCR, sCG, sTW */
- sIG, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
+ sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG,
},
},
};
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 426457047578..a61d6df6e5f6 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -424,6 +424,10 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
if (write) {
struct ctl_table tmp = *table;
+ /* proc_dostring() can append to existing strings, so we need to
+ * initialize it as an empty string.
+ */
+ buf[0] = '\0';
tmp.data = buf;
r = proc_dostring(&tmp, write, buffer, lenp, ppos);
if (r)
@@ -442,14 +446,17 @@ static int nf_log_proc_dostring(struct ctl_table *table, int write,
rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
mutex_unlock(&nf_log_mutex);
} else {
+ struct ctl_table tmp = *table;
+
+ tmp.data = buf;
mutex_lock(&nf_log_mutex);
logger = nft_log_dereference(net->nf.nf_loggers[tindex]);
if (!logger)
- table->data = "NONE";
+ strlcpy(buf, "NONE", sizeof(buf));
else
- table->data = logger->name;
- r = proc_dostring(table, write, buffer, lenp, ppos);
+ strlcpy(buf, logger->name, sizeof(buf));
mutex_unlock(&nf_log_mutex);
+ r = proc_dostring(&tmp, write, buffer, lenp, ppos);
}
return r;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 896d4a36081d..f5745e4c6513 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -75,6 +75,7 @@ static void nft_ctx_init(struct nft_ctx *ctx,
{
ctx->net = net;
ctx->family = family;
+ ctx->level = 0;
ctx->table = table;
ctx->chain = chain;
ctx->nla = nla;
@@ -1597,7 +1598,6 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
struct nft_base_chain *basechain;
struct nft_stats *stats = NULL;
struct nft_chain_hook hook;
- const struct nlattr *name;
struct nf_hook_ops *ops;
struct nft_trans *trans;
int err;
@@ -1645,12 +1645,11 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
return PTR_ERR(stats);
}
+ err = -ENOMEM;
trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN,
sizeof(struct nft_trans_chain));
- if (trans == NULL) {
- free_percpu(stats);
- return -ENOMEM;
- }
+ if (trans == NULL)
+ goto err;
nft_trans_chain_stats(trans) = stats;
nft_trans_chain_update(trans) = true;
@@ -1660,19 +1659,37 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy,
else
nft_trans_chain_policy(trans) = -1;
- name = nla[NFTA_CHAIN_NAME];
- if (nla[NFTA_CHAIN_HANDLE] && name) {
- nft_trans_chain_name(trans) =
- nla_strdup(name, GFP_KERNEL);
- if (!nft_trans_chain_name(trans)) {
- kfree(trans);
- free_percpu(stats);
- return -ENOMEM;
+ if (nla[NFTA_CHAIN_HANDLE] &&
+ nla[NFTA_CHAIN_NAME]) {
+ struct nft_trans *tmp;
+ char *name;
+
+ err = -ENOMEM;
+ name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL);
+ if (!name)
+ goto err;
+
+ err = -EEXIST;
+ list_for_each_entry(tmp, &ctx->net->nft.commit_list, list) {
+ if (tmp->msg_type == NFT_MSG_NEWCHAIN &&
+ tmp->ctx.table == table &&
+ nft_trans_chain_update(tmp) &&
+ nft_trans_chain_name(tmp) &&
+ strcmp(name, nft_trans_chain_name(tmp)) == 0) {
+ kfree(name);
+ goto err;
+ }
}
+
+ nft_trans_chain_name(trans) = name;
}
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
return 0;
+err:
+ free_percpu(stats);
+ kfree(trans);
+ return err;
}
static int nf_tables_newchain(struct net *net, struct sock *nlsk,
@@ -2254,6 +2271,39 @@ done:
return skb->len;
}
+static int nf_tables_dump_rules_start(struct netlink_callback *cb)
+{
+ const struct nlattr * const *nla = cb->data;
+ struct nft_rule_dump_ctx *ctx = NULL;
+
+ if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) {
+ ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
+ if (!ctx)
+ return -ENOMEM;
+
+ if (nla[NFTA_RULE_TABLE]) {
+ ctx->table = nla_strdup(nla[NFTA_RULE_TABLE],
+ GFP_ATOMIC);
+ if (!ctx->table) {
+ kfree(ctx);
+ return -ENOMEM;
+ }
+ }
+ if (nla[NFTA_RULE_CHAIN]) {
+ ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN],
+ GFP_ATOMIC);
+ if (!ctx->chain) {
+ kfree(ctx->table);
+ kfree(ctx);
+ return -ENOMEM;
+ }
+ }
+ }
+
+ cb->data = ctx;
+ return 0;
+}
+
static int nf_tables_dump_rules_done(struct netlink_callback *cb)
{
struct nft_rule_dump_ctx *ctx = cb->data;
@@ -2283,38 +2333,13 @@ static int nf_tables_getrule(struct net *net, struct sock *nlsk,
if (nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
+ .start= nf_tables_dump_rules_start,
.dump = nf_tables_dump_rules,
.done = nf_tables_dump_rules_done,
.module = THIS_MODULE,
+ .data = (void *)nla,
};
- if (nla[NFTA_RULE_TABLE] || nla[NFTA_RULE_CHAIN]) {
- struct nft_rule_dump_ctx *ctx;
-
- ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC);
- if (!ctx)
- return -ENOMEM;
-
- if (nla[NFTA_RULE_TABLE]) {
- ctx->table = nla_strdup(nla[NFTA_RULE_TABLE],
- GFP_ATOMIC);
- if (!ctx->table) {
- kfree(ctx);
- return -ENOMEM;
- }
- }
- if (nla[NFTA_RULE_CHAIN]) {
- ctx->chain = nla_strdup(nla[NFTA_RULE_CHAIN],
- GFP_ATOMIC);
- if (!ctx->chain) {
- kfree(ctx->table);
- kfree(ctx);
- return -ENOMEM;
- }
- }
- c.data = ctx;
- }
-
return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
}
@@ -2384,6 +2409,9 @@ int nft_chain_validate(const struct nft_ctx *ctx, const struct nft_chain *chain)
struct nft_rule *rule;
int err;
+ if (ctx->level == NFT_JUMP_STACK_SIZE)
+ return -EMLINK;
+
list_for_each_entry(rule, &chain->rules, list) {
if (!nft_is_active_next(ctx->net, rule))
continue;
@@ -3161,6 +3189,18 @@ done:
return skb->len;
}
+static int nf_tables_dump_sets_start(struct netlink_callback *cb)
+{
+ struct nft_ctx *ctx_dump = NULL;
+
+ ctx_dump = kmemdup(cb->data, sizeof(*ctx_dump), GFP_ATOMIC);
+ if (ctx_dump == NULL)
+ return -ENOMEM;
+
+ cb->data = ctx_dump;
+ return 0;
+}
+
static int nf_tables_dump_sets_done(struct netlink_callback *cb)
{
kfree(cb->data);
@@ -3188,18 +3228,12 @@ static int nf_tables_getset(struct net *net, struct sock *nlsk,
if (nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
+ .start = nf_tables_dump_sets_start,
.dump = nf_tables_dump_sets,
.done = nf_tables_dump_sets_done,
+ .data = &ctx,
.module = THIS_MODULE,
};
- struct nft_ctx *ctx_dump;
-
- ctx_dump = kmalloc(sizeof(*ctx_dump), GFP_ATOMIC);
- if (ctx_dump == NULL)
- return -ENOMEM;
-
- *ctx_dump = ctx;
- c.data = ctx_dump;
return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
}
@@ -3849,6 +3883,15 @@ nla_put_failure:
return -ENOSPC;
}
+static int nf_tables_dump_set_start(struct netlink_callback *cb)
+{
+ struct nft_set_dump_ctx *dump_ctx = cb->data;
+
+ cb->data = kmemdup(dump_ctx, sizeof(*dump_ctx), GFP_ATOMIC);
+
+ return cb->data ? 0 : -ENOMEM;
+}
+
static int nf_tables_dump_set_done(struct netlink_callback *cb)
{
kfree(cb->data);
@@ -4002,20 +4045,17 @@ static int nf_tables_getsetelem(struct net *net, struct sock *nlsk,
if (nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
+ .start = nf_tables_dump_set_start,
.dump = nf_tables_dump_set,
.done = nf_tables_dump_set_done,
.module = THIS_MODULE,
};
- struct nft_set_dump_ctx *dump_ctx;
-
- dump_ctx = kmalloc(sizeof(*dump_ctx), GFP_ATOMIC);
- if (!dump_ctx)
- return -ENOMEM;
-
- dump_ctx->set = set;
- dump_ctx->ctx = ctx;
+ struct nft_set_dump_ctx dump_ctx = {
+ .set = set,
+ .ctx = ctx,
+ };
- c.data = dump_ctx;
+ c.data = &dump_ctx;
return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
}
@@ -4975,38 +5015,42 @@ done:
return skb->len;
}
-static int nf_tables_dump_obj_done(struct netlink_callback *cb)
+static int nf_tables_dump_obj_start(struct netlink_callback *cb)
{
- struct nft_obj_filter *filter = cb->data;
+ const struct nlattr * const *nla = cb->data;
+ struct nft_obj_filter *filter = NULL;
- if (filter) {
- kfree(filter->table);
- kfree(filter);
+ if (nla[NFTA_OBJ_TABLE] || nla[NFTA_OBJ_TYPE]) {
+ filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
+ if (!filter)
+ return -ENOMEM;
+
+ if (nla[NFTA_OBJ_TABLE]) {
+ filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC);
+ if (!filter->table) {
+ kfree(filter);
+ return -ENOMEM;
+ }
+ }
+
+ if (nla[NFTA_OBJ_TYPE])
+ filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
}
+ cb->data = filter;
return 0;
}
-static struct nft_obj_filter *
-nft_obj_filter_alloc(const struct nlattr * const nla[])
+static int nf_tables_dump_obj_done(struct netlink_callback *cb)
{
- struct nft_obj_filter *filter;
-
- filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
- if (!filter)
- return ERR_PTR(-ENOMEM);
+ struct nft_obj_filter *filter = cb->data;
- if (nla[NFTA_OBJ_TABLE]) {
- filter->table = nla_strdup(nla[NFTA_OBJ_TABLE], GFP_ATOMIC);
- if (!filter->table) {
- kfree(filter);
- return ERR_PTR(-ENOMEM);
- }
+ if (filter) {
+ kfree(filter->table);
+ kfree(filter);
}
- if (nla[NFTA_OBJ_TYPE])
- filter->type = ntohl(nla_get_be32(nla[NFTA_OBJ_TYPE]));
- return filter;
+ return 0;
}
/* called with rcu_read_lock held */
@@ -5027,21 +5071,13 @@ static int nf_tables_getobj(struct net *net, struct sock *nlsk,
if (nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
+ .start = nf_tables_dump_obj_start,
.dump = nf_tables_dump_obj,
.done = nf_tables_dump_obj_done,
.module = THIS_MODULE,
+ .data = (void *)nla,
};
- if (nla[NFTA_OBJ_TABLE] ||
- nla[NFTA_OBJ_TYPE]) {
- struct nft_obj_filter *filter;
-
- filter = nft_obj_filter_alloc(nla);
- if (IS_ERR(filter))
- return -ENOMEM;
-
- c.data = filter;
- }
return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
}
@@ -5320,8 +5356,6 @@ static int nf_tables_flowtable_parse_hook(const struct nft_ctx *ctx,
flowtable->ops[i].priv = &flowtable->data;
flowtable->ops[i].hook = flowtable->data.type->hook;
flowtable->ops[i].dev = dev_array[i];
- flowtable->dev_name[i] = kstrdup(dev_array[i]->name,
- GFP_KERNEL);
}
return err;
@@ -5479,10 +5513,8 @@ static int nf_tables_newflowtable(struct net *net, struct sock *nlsk,
err6:
i = flowtable->ops_len;
err5:
- for (k = i - 1; k >= 0; k--) {
- kfree(flowtable->dev_name[k]);
+ for (k = i - 1; k >= 0; k--)
nf_unregister_net_hook(net, &flowtable->ops[k]);
- }
kfree(flowtable->ops);
err4:
@@ -5581,9 +5613,10 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net,
goto nla_put_failure;
for (i = 0; i < flowtable->ops_len; i++) {
- if (flowtable->dev_name[i][0] &&
- nla_put_string(skb, NFTA_DEVICE_NAME,
- flowtable->dev_name[i]))
+ const struct net_device *dev = READ_ONCE(flowtable->ops[i].dev);
+
+ if (dev &&
+ nla_put_string(skb, NFTA_DEVICE_NAME, dev->name))
goto nla_put_failure;
}
nla_nest_end(skb, nest_devs);
@@ -5650,37 +5683,39 @@ done:
return skb->len;
}
-static int nf_tables_dump_flowtable_done(struct netlink_callback *cb)
+static int nf_tables_dump_flowtable_start(struct netlink_callback *cb)
{
- struct nft_flowtable_filter *filter = cb->data;
+ const struct nlattr * const *nla = cb->data;
+ struct nft_flowtable_filter *filter = NULL;
- if (!filter)
- return 0;
+ if (nla[NFTA_FLOWTABLE_TABLE]) {
+ filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
+ if (!filter)
+ return -ENOMEM;
- kfree(filter->table);
- kfree(filter);
+ filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE],
+ GFP_ATOMIC);
+ if (!filter->table) {
+ kfree(filter);
+ return -ENOMEM;
+ }
+ }
+ cb->data = filter;
return 0;
}
-static struct nft_flowtable_filter *
-nft_flowtable_filter_alloc(const struct nlattr * const nla[])
+static int nf_tables_dump_flowtable_done(struct netlink_callback *cb)
{
- struct nft_flowtable_filter *filter;
+ struct nft_flowtable_filter *filter = cb->data;
- filter = kzalloc(sizeof(*filter), GFP_ATOMIC);
if (!filter)
- return ERR_PTR(-ENOMEM);
+ return 0;
- if (nla[NFTA_FLOWTABLE_TABLE]) {
- filter->table = nla_strdup(nla[NFTA_FLOWTABLE_TABLE],
- GFP_ATOMIC);
- if (!filter->table) {
- kfree(filter);
- return ERR_PTR(-ENOMEM);
- }
- }
- return filter;
+ kfree(filter->table);
+ kfree(filter);
+
+ return 0;
}
/* called with rcu_read_lock held */
@@ -5700,20 +5735,13 @@ static int nf_tables_getflowtable(struct net *net, struct sock *nlsk,
if (nlh->nlmsg_flags & NLM_F_DUMP) {
struct netlink_dump_control c = {
+ .start = nf_tables_dump_flowtable_start,
.dump = nf_tables_dump_flowtable,
.done = nf_tables_dump_flowtable_done,
.module = THIS_MODULE,
+ .data = (void *)nla,
};
- if (nla[NFTA_FLOWTABLE_TABLE]) {
- struct nft_flowtable_filter *filter;
-
- filter = nft_flowtable_filter_alloc(nla);
- if (IS_ERR(filter))
- return -ENOMEM;
-
- c.data = filter;
- }
return nft_netlink_dump_start_rcu(nlsk, skb, nlh, &c);
}
@@ -5783,6 +5811,7 @@ static void nf_tables_flowtable_destroy(struct nft_flowtable *flowtable)
kfree(flowtable->name);
flowtable->data.type->free(&flowtable->data);
module_put(flowtable->data.type->owner);
+ kfree(flowtable);
}
static int nf_tables_fill_gen_info(struct sk_buff *skb, struct net *net,
@@ -5825,7 +5854,6 @@ static void nft_flowtable_event(unsigned long event, struct net_device *dev,
continue;
nf_unregister_net_hook(dev_net(dev), &flowtable->ops[i]);
- flowtable->dev_name[i][0] = '\0';
flowtable->ops[i].dev = NULL;
break;
}
@@ -6086,6 +6114,9 @@ static void nft_commit_release(struct nft_trans *trans)
case NFT_MSG_DELTABLE:
nf_tables_table_destroy(&trans->ctx);
break;
+ case NFT_MSG_NEWCHAIN:
+ kfree(nft_trans_chain_name(trans));
+ break;
case NFT_MSG_DELCHAIN:
nf_tables_chain_destroy(&trans->ctx);
break;
@@ -6315,13 +6346,15 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb)
nf_tables_table_notify(&trans->ctx, NFT_MSG_DELTABLE);
break;
case NFT_MSG_NEWCHAIN:
- if (nft_trans_chain_update(trans))
+ if (nft_trans_chain_update(trans)) {
nft_chain_commit_update(trans);
- else
+ nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
+ /* trans destroyed after rcu grace period */
+ } else {
nft_clear(net, trans->ctx.chain);
-
- nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
- nft_trans_destroy(trans);
+ nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN);
+ nft_trans_destroy(trans);
+ }
break;
case NFT_MSG_DELCHAIN:
nft_chain_del(trans->ctx.chain);
@@ -6471,7 +6504,7 @@ static int __nf_tables_abort(struct net *net)
case NFT_MSG_NEWCHAIN:
if (nft_trans_chain_update(trans)) {
free_percpu(nft_trans_chain_stats(trans));
-
+ kfree(nft_trans_chain_name(trans));
nft_trans_destroy(trans);
} else {
trans->ctx.table->use--;
@@ -6837,13 +6870,6 @@ int nft_validate_register_store(const struct nft_ctx *ctx,
err = nf_tables_check_loops(ctx, data->verdict.chain);
if (err < 0)
return err;
-
- if (ctx->chain->level + 1 >
- data->verdict.chain->level) {
- if (ctx->chain->level + 1 == NFT_JUMP_STACK_SIZE)
- return -EMLINK;
- data->verdict.chain->level = ctx->chain->level + 1;
- }
}
return 0;
diff --git a/net/netfilter/nf_tables_set_core.c b/net/netfilter/nf_tables_set_core.c
new file mode 100644
index 000000000000..814789644bd3
--- /dev/null
+++ b/net/netfilter/nf_tables_set_core.c
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <net/netfilter/nf_tables_core.h>
+
+static int __init nf_tables_set_module_init(void)
+{
+ nft_register_set(&nft_set_hash_fast_type);
+ nft_register_set(&nft_set_hash_type);
+ nft_register_set(&nft_set_rhash_type);
+ nft_register_set(&nft_set_bitmap_type);
+ nft_register_set(&nft_set_rbtree_type);
+
+ return 0;
+}
+
+static void __exit nf_tables_set_module_exit(void)
+{
+ nft_unregister_set(&nft_set_rbtree_type);
+ nft_unregister_set(&nft_set_bitmap_type);
+ nft_unregister_set(&nft_set_rhash_type);
+ nft_unregister_set(&nft_set_hash_type);
+ nft_unregister_set(&nft_set_hash_fast_type);
+}
+
+module_init(nf_tables_set_module_init);
+module_exit(nf_tables_set_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NFT_SET();
diff --git a/net/netfilter/nfnetlink_queue.c b/net/netfilter/nfnetlink_queue.c
index 4ccd2988f9db..ea4ba551abb2 100644
--- a/net/netfilter/nfnetlink_queue.c
+++ b/net/netfilter/nfnetlink_queue.c
@@ -1243,6 +1243,9 @@ static int nfqnl_recv_unsupp(struct net *net, struct sock *ctnl,
static const struct nla_policy nfqa_cfg_policy[NFQA_CFG_MAX+1] = {
[NFQA_CFG_CMD] = { .len = sizeof(struct nfqnl_msg_config_cmd) },
[NFQA_CFG_PARAMS] = { .len = sizeof(struct nfqnl_msg_config_params) },
+ [NFQA_CFG_QUEUE_MAXLEN] = { .type = NLA_U32 },
+ [NFQA_CFG_MASK] = { .type = NLA_U32 },
+ [NFQA_CFG_FLAGS] = { .type = NLA_U32 },
};
static const struct nf_queue_handler nfqh = {
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 8d1ff654e5af..32535eea51b2 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -832,10 +832,18 @@ nft_target_select_ops(const struct nft_ctx *ctx,
rev = ntohl(nla_get_be32(tb[NFTA_TARGET_REV]));
family = ctx->family;
+ if (strcmp(tg_name, XT_ERROR_TARGET) == 0 ||
+ strcmp(tg_name, XT_STANDARD_TARGET) == 0 ||
+ strcmp(tg_name, "standard") == 0)
+ return ERR_PTR(-EINVAL);
+
/* Re-use the existing target if it's already loaded. */
list_for_each_entry(nft_target, &nft_target_list, head) {
struct xt_target *target = nft_target->ops.data;
+ if (!target->target)
+ continue;
+
if (nft_target_cmp(target, tg_name, rev, family))
return &nft_target->ops;
}
@@ -844,6 +852,11 @@ nft_target_select_ops(const struct nft_ctx *ctx,
if (IS_ERR(target))
return ERR_PTR(-ENOENT);
+ if (!target->target) {
+ err = -EINVAL;
+ goto err;
+ }
+
if (target->targetsize > nla_len(tb[NFTA_TARGET_INFO])) {
err = -EINVAL;
goto err;
diff --git a/net/netfilter/nft_immediate.c b/net/netfilter/nft_immediate.c
index 15adf8ca82c3..0777a93211e2 100644
--- a/net/netfilter/nft_immediate.c
+++ b/net/netfilter/nft_immediate.c
@@ -98,6 +98,7 @@ static int nft_immediate_validate(const struct nft_ctx *ctx,
const struct nft_data **d)
{
const struct nft_immediate_expr *priv = nft_expr_priv(expr);
+ struct nft_ctx *pctx = (struct nft_ctx *)ctx;
const struct nft_data *data;
int err;
@@ -109,9 +110,11 @@ static int nft_immediate_validate(const struct nft_ctx *ctx,
switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
+ pctx->level++;
err = nft_chain_validate(ctx, data->verdict.chain);
if (err < 0)
return err;
+ pctx->level--;
break;
default:
break;
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 42e6fadf1417..c2a1d84cdfc4 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -155,7 +155,9 @@ static int nft_lookup_validate_setelem(const struct nft_ctx *ctx,
struct nft_set_elem *elem)
{
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
+ struct nft_ctx *pctx = (struct nft_ctx *)ctx;
const struct nft_data *data;
+ int err;
if (nft_set_ext_exists(ext, NFT_SET_EXT_FLAGS) &&
*nft_set_ext_flags(ext) & NFT_SET_ELEM_INTERVAL_END)
@@ -165,10 +167,17 @@ static int nft_lookup_validate_setelem(const struct nft_ctx *ctx,
switch (data->verdict.code) {
case NFT_JUMP:
case NFT_GOTO:
- return nft_chain_validate(ctx, data->verdict.chain);
+ pctx->level++;
+ err = nft_chain_validate(ctx, data->verdict.chain);
+ if (err < 0)
+ return err;
+ pctx->level--;
+ break;
default:
- return 0;
+ break;
}
+
+ return 0;
}
static int nft_lookup_validate(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_set_bitmap.c b/net/netfilter/nft_set_bitmap.c
index d6626e01c7ee..128bc16f52dd 100644
--- a/net/netfilter/nft_set_bitmap.c
+++ b/net/netfilter/nft_set_bitmap.c
@@ -296,7 +296,7 @@ static bool nft_bitmap_estimate(const struct nft_set_desc *desc, u32 features,
return true;
}
-static struct nft_set_type nft_bitmap_type __read_mostly = {
+struct nft_set_type nft_set_bitmap_type __read_mostly = {
.owner = THIS_MODULE,
.ops = {
.privsize = nft_bitmap_privsize,
@@ -314,20 +314,3 @@ static struct nft_set_type nft_bitmap_type __read_mostly = {
.get = nft_bitmap_get,
},
};
-
-static int __init nft_bitmap_module_init(void)
-{
- return nft_register_set(&nft_bitmap_type);
-}
-
-static void __exit nft_bitmap_module_exit(void)
-{
- nft_unregister_set(&nft_bitmap_type);
-}
-
-module_init(nft_bitmap_module_init);
-module_exit(nft_bitmap_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
-MODULE_ALIAS_NFT_SET();
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 6f9a1365a09f..90c3e7e6cacb 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -387,6 +387,7 @@ static void nft_rhash_destroy(const struct nft_set *set)
struct nft_rhash *priv = nft_set_priv(set);
cancel_delayed_work_sync(&priv->gc_work);
+ rcu_barrier();
rhashtable_free_and_destroy(&priv->ht, nft_rhash_elem_destroy,
(void *)set);
}
@@ -654,7 +655,7 @@ static bool nft_hash_fast_estimate(const struct nft_set_desc *desc, u32 features
return true;
}
-static struct nft_set_type nft_rhash_type __read_mostly = {
+struct nft_set_type nft_set_rhash_type __read_mostly = {
.owner = THIS_MODULE,
.features = NFT_SET_MAP | NFT_SET_OBJECT |
NFT_SET_TIMEOUT | NFT_SET_EVAL,
@@ -677,7 +678,7 @@ static struct nft_set_type nft_rhash_type __read_mostly = {
},
};
-static struct nft_set_type nft_hash_type __read_mostly = {
+struct nft_set_type nft_set_hash_type __read_mostly = {
.owner = THIS_MODULE,
.features = NFT_SET_MAP | NFT_SET_OBJECT,
.ops = {
@@ -697,7 +698,7 @@ static struct nft_set_type nft_hash_type __read_mostly = {
},
};
-static struct nft_set_type nft_hash_fast_type __read_mostly = {
+struct nft_set_type nft_set_hash_fast_type __read_mostly = {
.owner = THIS_MODULE,
.features = NFT_SET_MAP | NFT_SET_OBJECT,
.ops = {
@@ -716,26 +717,3 @@ static struct nft_set_type nft_hash_fast_type __read_mostly = {
.get = nft_hash_get,
},
};
-
-static int __init nft_hash_module_init(void)
-{
- if (nft_register_set(&nft_hash_fast_type) ||
- nft_register_set(&nft_hash_type) ||
- nft_register_set(&nft_rhash_type))
- return 1;
- return 0;
-}
-
-static void __exit nft_hash_module_exit(void)
-{
- nft_unregister_set(&nft_rhash_type);
- nft_unregister_set(&nft_hash_type);
- nft_unregister_set(&nft_hash_fast_type);
-}
-
-module_init(nft_hash_module_init);
-module_exit(nft_hash_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_SET();
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 7f3a9a211034..9873d734b494 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -381,7 +381,7 @@ static void nft_rbtree_gc(struct work_struct *work)
gcb = nft_set_gc_batch_check(set, gcb, GFP_ATOMIC);
if (!gcb)
- goto out;
+ break;
atomic_dec(&set->nelems);
nft_set_gc_batch_add(gcb, rbe);
@@ -390,10 +390,12 @@ static void nft_rbtree_gc(struct work_struct *work)
rbe = rb_entry(prev, struct nft_rbtree_elem, node);
atomic_dec(&set->nelems);
nft_set_gc_batch_add(gcb, rbe);
+ prev = NULL;
}
node = rb_next(node);
+ if (!node)
+ break;
}
-out:
if (gcb) {
for (i = 0; i < gcb->head.cnt; i++) {
rbe = gcb->elems[i];
@@ -440,6 +442,7 @@ static void nft_rbtree_destroy(const struct nft_set *set)
struct rb_node *node;
cancel_delayed_work_sync(&priv->gc_work);
+ rcu_barrier();
while ((node = priv->root.rb_node) != NULL) {
rb_erase(node, &priv->root);
rbe = rb_entry(node, struct nft_rbtree_elem, node);
@@ -462,7 +465,7 @@ static bool nft_rbtree_estimate(const struct nft_set_desc *desc, u32 features,
return true;
}
-static struct nft_set_type nft_rbtree_type __read_mostly = {
+struct nft_set_type nft_set_rbtree_type __read_mostly = {
.owner = THIS_MODULE,
.features = NFT_SET_INTERVAL | NFT_SET_MAP | NFT_SET_OBJECT | NFT_SET_TIMEOUT,
.ops = {
@@ -481,20 +484,3 @@ static struct nft_set_type nft_rbtree_type __read_mostly = {
.get = nft_rbtree_get,
},
};
-
-static int __init nft_rbtree_module_init(void)
-{
- return nft_register_set(&nft_rbtree_type);
-}
-
-static void __exit nft_rbtree_module_exit(void)
-{
- nft_unregister_set(&nft_rbtree_type);
-}
-
-module_init(nft_rbtree_module_init);
-module_exit(nft_rbtree_module_exit);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_SET();
diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c
index 58fce4e749a9..d76550a8b642 100644
--- a/net/netfilter/xt_TPROXY.c
+++ b/net/netfilter/xt_TPROXY.c
@@ -61,7 +61,7 @@ tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport,
* addresses, this happens if the redirect already happened
* and the current packet belongs to an already established
* connection */
- sk = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol,
+ sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
iph->saddr, iph->daddr,
hp->source, hp->dest,
skb->dev, NF_TPROXY_LOOKUP_ESTABLISHED);
@@ -77,7 +77,7 @@ tproxy_tg4(struct net *net, struct sk_buff *skb, __be32 laddr, __be16 lport,
else if (!sk)
/* no, there's no established connection, check if
* there's a listener on the redirected addr/port */
- sk = nf_tproxy_get_sock_v4(net, skb, hp, iph->protocol,
+ sk = nf_tproxy_get_sock_v4(net, skb, iph->protocol,
iph->saddr, laddr,
hp->source, lport,
skb->dev, NF_TPROXY_LOOKUP_LISTENER);
@@ -150,7 +150,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
* addresses, this happens if the redirect already happened
* and the current packet belongs to an already established
* connection */
- sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, hp, tproto,
+ sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, tproto,
&iph->saddr, &iph->daddr,
hp->source, hp->dest,
xt_in(par), NF_TPROXY_LOOKUP_ESTABLISHED);
@@ -171,7 +171,7 @@ tproxy_tg6_v1(struct sk_buff *skb, const struct xt_action_param *par)
else if (!sk)
/* no there's no established connection, check if
* there's a listener on the redirected addr/port */
- sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff, hp,
+ sk = nf_tproxy_get_sock_v6(xt_net(par), skb, thoff,
tproto, &iph->saddr, laddr,
hp->source, lport,
xt_in(par), NF_TPROXY_LOOKUP_LISTENER);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 1189b84413d5..393573a99a5a 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -2658,7 +2658,7 @@ static const struct proto_ops netlink_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = netlink_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = netlink_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
index 93fbcafbf388..03f37c4e64fe 100644
--- a/net/netrom/af_netrom.c
+++ b/net/netrom/af_netrom.c
@@ -1355,7 +1355,7 @@ static const struct proto_ops nr_proto_ops = {
.socketpair = sock_no_socketpair,
.accept = nr_accept,
.getname = nr_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = nr_ioctl,
.listen = nr_listen,
.shutdown = sock_no_shutdown,
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index 2ceefa183cee..6a196e438b6c 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -752,11 +752,14 @@ int nfc_llcp_send_ui_frame(struct nfc_llcp_sock *sock, u8 ssap, u8 dsap,
pr_debug("Fragment %zd bytes remaining %zd",
frag_len, remaining_len);
- pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, MSG_DONTWAIT,
+ pdu = nfc_alloc_send_skb(sock->dev, &sock->sk, 0,
frag_len + LLCP_HEADER_SIZE, &err);
if (pdu == NULL) {
- pr_err("Could not allocate PDU\n");
- continue;
+ pr_err("Could not allocate PDU (error=%d)\n", err);
+ len -= remaining_len;
+ if (len == 0)
+ len = err;
+ break;
}
pdu = llcp_add_header(pdu, dsap, ssap, LLCP_PDU_UI);
diff --git a/net/nfc/llcp_sock.c b/net/nfc/llcp_sock.c
index ab5bb14b49af..ea0c0c6f1874 100644
--- a/net/nfc/llcp_sock.c
+++ b/net/nfc/llcp_sock.c
@@ -548,13 +548,16 @@ static inline __poll_t llcp_accept_poll(struct sock *parent)
return 0;
}
-static __poll_t llcp_sock_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t llcp_sock_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
__poll_t mask = 0;
pr_debug("%p\n", sk);
+ sock_poll_wait(file, sk_sleep(sk), wait);
+
if (sk->sk_state == LLCP_LISTEN)
return llcp_accept_poll(sk);
@@ -896,7 +899,7 @@ static const struct proto_ops llcp_sock_ops = {
.socketpair = sock_no_socketpair,
.accept = llcp_sock_accept,
.getname = llcp_sock_getname,
- .poll_mask = llcp_sock_poll_mask,
+ .poll = llcp_sock_poll,
.ioctl = sock_no_ioctl,
.listen = llcp_sock_listen,
.shutdown = sock_no_shutdown,
@@ -916,7 +919,7 @@ static const struct proto_ops llcp_rawsock_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = llcp_sock_getname,
- .poll_mask = llcp_sock_poll_mask,
+ .poll = llcp_sock_poll,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
diff --git a/net/nfc/rawsock.c b/net/nfc/rawsock.c
index 60c322531c49..e2188deb08dc 100644
--- a/net/nfc/rawsock.c
+++ b/net/nfc/rawsock.c
@@ -284,7 +284,7 @@ static const struct proto_ops rawsock_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
@@ -304,7 +304,7 @@ static const struct proto_ops rawsock_raw_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
diff --git a/net/nsh/nsh.c b/net/nsh/nsh.c
index 9696ef96b719..1a30e165eeb4 100644
--- a/net/nsh/nsh.c
+++ b/net/nsh/nsh.c
@@ -104,7 +104,7 @@ static struct sk_buff *nsh_gso_segment(struct sk_buff *skb,
__skb_pull(skb, nsh_len);
skb_reset_mac_header(skb);
- skb_reset_mac_len(skb);
+ skb->mac_len = proto == htons(ETH_P_TEB) ? ETH_HLEN : 0;
skb->protocol = proto;
features &= NETIF_F_SG;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 50809748c127..9b27d0cd766d 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2262,6 +2262,13 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
if (po->stats.stats1.tp_drops)
status |= TP_STATUS_LOSING;
}
+
+ if (do_vnet &&
+ virtio_net_hdr_from_skb(skb, h.raw + macoff -
+ sizeof(struct virtio_net_hdr),
+ vio_le(), true, 0))
+ goto drop_n_account;
+
po->stats.stats1.tp_packets++;
if (copy_skb) {
status |= TP_STATUS_COPY;
@@ -2269,15 +2276,6 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
}
spin_unlock(&sk->sk_receive_queue.lock);
- if (do_vnet) {
- if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
- sizeof(struct virtio_net_hdr),
- vio_le(), true, 0)) {
- spin_lock(&sk->sk_receive_queue.lock);
- goto drop_n_account;
- }
- }
-
skb_copy_bits(skb, 0, h.raw + macoff, snaplen);
if (!(ts_status = tpacket_get_timestamp(skb, &ts, po->tp_tstamp)))
@@ -2880,6 +2878,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
goto out_free;
} else if (reserve) {
skb_reserve(skb, -reserve);
+ if (len < reserve)
+ skb_reset_network_header(skb);
}
/* Returns -EFAULT on error */
@@ -4078,11 +4078,12 @@ static int packet_ioctl(struct socket *sock, unsigned int cmd,
return 0;
}
-static __poll_t packet_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t packet_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
struct packet_sock *po = pkt_sk(sk);
- __poll_t mask = datagram_poll_mask(sock, events);
+ __poll_t mask = datagram_poll(file, sock, wait);
spin_lock_bh(&sk->sk_receive_queue.lock);
if (po->rx_ring.pg_vec) {
@@ -4424,7 +4425,7 @@ static const struct proto_ops packet_ops_spkt = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = packet_getname_spkt,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = packet_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
@@ -4445,7 +4446,7 @@ static const struct proto_ops packet_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = packet_getname,
- .poll_mask = packet_poll_mask,
+ .poll = packet_poll,
.ioctl = packet_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
diff --git a/net/phonet/socket.c b/net/phonet/socket.c
index c295c4e20f01..30187990257f 100644
--- a/net/phonet/socket.c
+++ b/net/phonet/socket.c
@@ -340,12 +340,15 @@ static int pn_socket_getname(struct socket *sock, struct sockaddr *addr,
return sizeof(struct sockaddr_pn);
}
-static __poll_t pn_socket_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t pn_socket_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
struct pep_sock *pn = pep_sk(sk);
__poll_t mask = 0;
+ poll_wait(file, sk_sleep(sk), wait);
+
if (sk->sk_state == TCP_CLOSE)
return EPOLLERR;
if (!skb_queue_empty(&sk->sk_receive_queue))
@@ -445,7 +448,7 @@ const struct proto_ops phonet_dgram_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = pn_socket_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = pn_socket_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
@@ -470,7 +473,7 @@ const struct proto_ops phonet_stream_ops = {
.socketpair = sock_no_socketpair,
.accept = pn_socket_accept,
.getname = pn_socket_getname,
- .poll_mask = pn_socket_poll_mask,
+ .poll = pn_socket_poll,
.ioctl = pn_socket_ioctl,
.listen = pn_socket_listen,
.shutdown = sock_no_shutdown,
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 1b5025ea5b04..86e1e37eb4e8 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -191,8 +191,13 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb,
hdr->type = cpu_to_le32(type);
hdr->src_node_id = cpu_to_le32(from->sq_node);
hdr->src_port_id = cpu_to_le32(from->sq_port);
- hdr->dst_node_id = cpu_to_le32(to->sq_node);
- hdr->dst_port_id = cpu_to_le32(to->sq_port);
+ if (to->sq_port == QRTR_PORT_CTRL) {
+ hdr->dst_node_id = cpu_to_le32(node->nid);
+ hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST);
+ } else {
+ hdr->dst_node_id = cpu_to_le32(to->sq_node);
+ hdr->dst_port_id = cpu_to_le32(to->sq_port);
+ }
hdr->size = cpu_to_le32(len);
hdr->confirm_rx = 0;
@@ -764,6 +769,10 @@ static int qrtr_sendmsg(struct socket *sock, struct msghdr *msg, size_t len)
node = NULL;
if (addr->sq_node == QRTR_NODE_BCAST) {
enqueue_fn = qrtr_bcast_enqueue;
+ if (addr->sq_port != QRTR_PORT_CTRL) {
+ release_sock(sk);
+ return -ENOTCONN;
+ }
} else if (addr->sq_node == ipc->us.sq_node) {
enqueue_fn = qrtr_local_enqueue;
} else {
@@ -1023,7 +1032,7 @@ static const struct proto_ops qrtr_proto_ops = {
.recvmsg = qrtr_recvmsg,
.getname = qrtr_getname,
.ioctl = qrtr_ioctl,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.shutdown = sock_no_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
diff --git a/net/rds/connection.c b/net/rds/connection.c
index abef75da89a7..cfb05953b0e5 100644
--- a/net/rds/connection.c
+++ b/net/rds/connection.c
@@ -659,11 +659,19 @@ static void rds_conn_info(struct socket *sock, unsigned int len,
int rds_conn_init(void)
{
+ int ret;
+
+ ret = rds_loop_net_init(); /* register pernet callback */
+ if (ret)
+ return ret;
+
rds_conn_slab = kmem_cache_create("rds_connection",
sizeof(struct rds_connection),
0, 0, NULL);
- if (!rds_conn_slab)
+ if (!rds_conn_slab) {
+ rds_loop_net_exit();
return -ENOMEM;
+ }
rds_info_register_func(RDS_INFO_CONNECTIONS, rds_conn_info);
rds_info_register_func(RDS_INFO_SEND_MESSAGES,
@@ -676,6 +684,7 @@ int rds_conn_init(void)
void rds_conn_exit(void)
{
+ rds_loop_net_exit(); /* unregister pernet callback */
rds_loop_exit();
WARN_ON(!hlist_empty(rds_conn_hash));
diff --git a/net/rds/loop.c b/net/rds/loop.c
index dac6218a460e..feea1f96ee2a 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -33,6 +33,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/in.h>
+#include <net/net_namespace.h>
+#include <net/netns/generic.h>
#include "rds_single_path.h"
#include "rds.h"
@@ -40,6 +42,17 @@
static DEFINE_SPINLOCK(loop_conns_lock);
static LIST_HEAD(loop_conns);
+static atomic_t rds_loop_unloading = ATOMIC_INIT(0);
+
+static void rds_loop_set_unloading(void)
+{
+ atomic_set(&rds_loop_unloading, 1);
+}
+
+static bool rds_loop_is_unloading(struct rds_connection *conn)
+{
+ return atomic_read(&rds_loop_unloading) != 0;
+}
/*
* This 'loopback' transport is a special case for flows that originate
@@ -165,6 +178,8 @@ void rds_loop_exit(void)
struct rds_loop_connection *lc, *_lc;
LIST_HEAD(tmp_list);
+ rds_loop_set_unloading();
+ synchronize_rcu();
/* avoid calling conn_destroy with irqs off */
spin_lock_irq(&loop_conns_lock);
list_splice(&loop_conns, &tmp_list);
@@ -177,6 +192,46 @@ void rds_loop_exit(void)
}
}
+static void rds_loop_kill_conns(struct net *net)
+{
+ struct rds_loop_connection *lc, *_lc;
+ LIST_HEAD(tmp_list);
+
+ spin_lock_irq(&loop_conns_lock);
+ list_for_each_entry_safe(lc, _lc, &loop_conns, loop_node) {
+ struct net *c_net = read_pnet(&lc->conn->c_net);
+
+ if (net != c_net)
+ continue;
+ list_move_tail(&lc->loop_node, &tmp_list);
+ }
+ spin_unlock_irq(&loop_conns_lock);
+
+ list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) {
+ WARN_ON(lc->conn->c_passive);
+ rds_conn_destroy(lc->conn);
+ }
+}
+
+static void __net_exit rds_loop_exit_net(struct net *net)
+{
+ rds_loop_kill_conns(net);
+}
+
+static struct pernet_operations rds_loop_net_ops = {
+ .exit = rds_loop_exit_net,
+};
+
+int rds_loop_net_init(void)
+{
+ return register_pernet_device(&rds_loop_net_ops);
+}
+
+void rds_loop_net_exit(void)
+{
+ unregister_pernet_device(&rds_loop_net_ops);
+}
+
/*
* This is missing .xmit_* because loop doesn't go through generic
* rds_send_xmit() and doesn't call rds_recv_incoming(). .listen_stop and
@@ -194,4 +249,5 @@ struct rds_transport rds_loop_transport = {
.inc_free = rds_loop_inc_free,
.t_name = "loopback",
.t_type = RDS_TRANS_LOOP,
+ .t_unloading = rds_loop_is_unloading,
};
diff --git a/net/rds/loop.h b/net/rds/loop.h
index 469fa4b2da4f..bbc8cdd030df 100644
--- a/net/rds/loop.h
+++ b/net/rds/loop.h
@@ -5,6 +5,8 @@
/* loop.c */
extern struct rds_transport rds_loop_transport;
+int rds_loop_net_init(void);
+void rds_loop_net_exit(void);
void rds_loop_exit(void);
#endif
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index ebe42e7eb456..d00a0ef39a56 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1470,7 +1470,7 @@ static const struct proto_ops rose_proto_ops = {
.socketpair = sock_no_socketpair,
.accept = rose_accept,
.getname = rose_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = rose_ioctl,
.listen = rose_listen,
.shutdown = sock_no_shutdown,
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index 3b1ac93efee2..2b463047dd7b 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -734,11 +734,15 @@ static int rxrpc_getsockopt(struct socket *sock, int level, int optname,
/*
* permit an RxRPC socket to be polled
*/
-static __poll_t rxrpc_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t rxrpc_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
struct rxrpc_sock *rx = rxrpc_sk(sk);
- __poll_t mask = 0;
+ __poll_t mask;
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+ mask = 0;
/* the socket is readable if there are any messages waiting on the Rx
* queue */
@@ -945,7 +949,7 @@ static const struct proto_ops rxrpc_rpc_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
- .poll_mask = rxrpc_poll_mask,
+ .poll = rxrpc_poll,
.ioctl = sock_no_ioctl,
.listen = rxrpc_listen,
.shutdown = rxrpc_shutdown,
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 526a8e491626..6e7124e57918 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -91,7 +91,7 @@ static int tcf_csum_init(struct net *net, struct nlattr *nla,
}
params_old = rtnl_dereference(p->params);
- params_new->action = parm->action;
+ p->tcf_action = parm->action;
params_new->update_flags = parm->update_flags;
rcu_assign_pointer(p->params, params_new);
if (params_old)
@@ -561,7 +561,7 @@ static int tcf_csum(struct sk_buff *skb, const struct tc_action *a,
tcf_lastuse_update(&p->tcf_tm);
bstats_cpu_update(this_cpu_ptr(p->common.cpu_bstats), skb);
- action = params->action;
+ action = READ_ONCE(p->tcf_action);
if (unlikely(action == TC_ACT_SHOT))
goto drop_stats;
@@ -599,11 +599,11 @@ static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind,
.index = p->tcf_index,
.refcnt = p->tcf_refcnt - ref,
.bindcnt = p->tcf_bindcnt - bind,
+ .action = p->tcf_action,
};
struct tcf_t t;
params = rtnl_dereference(p->params);
- opt.action = params->action;
opt.update_flags = params->update_flags;
if (nla_put(skb, TCA_CSUM_PARMS, sizeof(opt), &opt))
diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c
index 8527cfdc446d..20d7d36b2fc9 100644
--- a/net/sched/act_ife.c
+++ b/net/sched/act_ife.c
@@ -415,7 +415,8 @@ static void tcf_ife_cleanup(struct tc_action *a)
spin_unlock_bh(&ife->tcf_lock);
p = rcu_dereference_protected(ife->params, 1);
- kfree_rcu(p, rcu);
+ if (p)
+ kfree_rcu(p, rcu);
}
/* under ife->tcf_lock for existing action */
@@ -516,8 +517,6 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
saddr = nla_data(tb[TCA_IFE_SMAC]);
}
- ife->tcf_action = parm->action;
-
if (parm->flags & IFE_ENCODE) {
if (daddr)
ether_addr_copy(p->eth_dst, daddr);
@@ -543,10 +542,8 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla,
NULL, NULL);
if (err) {
metadata_parse_err:
- if (exists)
- tcf_idr_release(*a, bind);
if (ret == ACT_P_CREATED)
- _tcf_ife_cleanup(*a);
+ tcf_idr_release(*a, bind);
if (exists)
spin_unlock_bh(&ife->tcf_lock);
@@ -567,7 +564,7 @@ metadata_parse_err:
err = use_all_metadata(ife);
if (err) {
if (ret == ACT_P_CREATED)
- _tcf_ife_cleanup(*a);
+ tcf_idr_release(*a, bind);
if (exists)
spin_unlock_bh(&ife->tcf_lock);
@@ -576,6 +573,7 @@ metadata_parse_err:
}
}
+ ife->tcf_action = parm->action;
if (exists)
spin_unlock_bh(&ife->tcf_lock);
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 626dac81a48a..9bc6c2ae98a5 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -36,7 +36,7 @@ static int tunnel_key_act(struct sk_buff *skb, const struct tc_action *a,
tcf_lastuse_update(&t->tcf_tm);
bstats_cpu_update(this_cpu_ptr(t->common.cpu_bstats), skb);
- action = params->action;
+ action = READ_ONCE(t->tcf_action);
switch (params->tcft_action) {
case TCA_TUNNEL_KEY_ACT_RELEASE:
@@ -182,7 +182,7 @@ static int tunnel_key_init(struct net *net, struct nlattr *nla,
params_old = rtnl_dereference(t->params);
- params_new->action = parm->action;
+ t->tcf_action = parm->action;
params_new->tcft_action = parm->t_action;
params_new->tcft_enc_metadata = metadata;
@@ -254,13 +254,13 @@ static int tunnel_key_dump(struct sk_buff *skb, struct tc_action *a,
.index = t->tcf_index,
.refcnt = t->tcf_refcnt - ref,
.bindcnt = t->tcf_bindcnt - bind,
+ .action = t->tcf_action,
};
struct tcf_t tm;
params = rtnl_dereference(t->params);
opt.t_action = params->tcft_action;
- opt.action = params->action;
if (nla_put(skb, TCA_TUNNEL_KEY_PARMS, sizeof(opt), &opt))
goto nla_put_failure;
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index cdc3c87c53e6..f74513a7c7a8 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1053,7 +1053,7 @@ static void tfilter_notify_chain(struct net *net, struct sk_buff *oskb,
for (tp = rtnl_dereference(chain->filter_chain);
tp; tp = rtnl_dereference(tp->next))
tfilter_notify(net, oskb, n, tp, block,
- q, parent, 0, event, false);
+ q, parent, NULL, event, false);
}
static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
@@ -1444,7 +1444,7 @@ static bool tcf_chain_dump(struct tcf_chain *chain, struct Qdisc *q, u32 parent,
memset(&cb->args[1], 0,
sizeof(cb->args) - sizeof(cb->args[0]));
if (cb->args[1] == 0) {
- if (tcf_fill_node(net, skb, tp, block, q, parent, 0,
+ if (tcf_fill_node(net, skb, tp, block, q, parent, NULL,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq, NLM_F_MULTI,
RTM_NEWTFILTER) <= 0)
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 2b5be42a9f1c..9e8b26a80fb3 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -66,7 +66,7 @@ struct fl_flow_mask {
struct rhashtable_params filter_ht_params;
struct flow_dissector dissector;
struct list_head filters;
- struct rcu_head rcu;
+ struct rcu_work rwork;
struct list_head list;
};
@@ -203,6 +203,20 @@ static int fl_init(struct tcf_proto *tp)
return rhashtable_init(&head->ht, &mask_ht_params);
}
+static void fl_mask_free(struct fl_flow_mask *mask)
+{
+ rhashtable_destroy(&mask->ht);
+ kfree(mask);
+}
+
+static void fl_mask_free_work(struct work_struct *work)
+{
+ struct fl_flow_mask *mask = container_of(to_rcu_work(work),
+ struct fl_flow_mask, rwork);
+
+ fl_mask_free(mask);
+}
+
static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
bool async)
{
@@ -210,12 +224,11 @@ static bool fl_mask_put(struct cls_fl_head *head, struct fl_flow_mask *mask,
return false;
rhashtable_remove_fast(&head->ht, &mask->ht_node, mask_ht_params);
- rhashtable_destroy(&mask->ht);
list_del_rcu(&mask->list);
if (async)
- kfree_rcu(mask, rcu);
+ tcf_queue_work(&mask->rwork, fl_mask_free_work);
else
- kfree(mask);
+ fl_mask_free(mask);
return true;
}
diff --git a/net/sched/sch_blackhole.c b/net/sched/sch_blackhole.c
index c98a61e980ba..9c4c2bb547d7 100644
--- a/net/sched/sch_blackhole.c
+++ b/net/sched/sch_blackhole.c
@@ -21,7 +21,7 @@ static int blackhole_enqueue(struct sk_buff *skb, struct Qdisc *sch,
struct sk_buff **to_free)
{
qdisc_drop(skb, sch, to_free);
- return NET_XMIT_SUCCESS;
+ return NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
}
static struct sk_buff *blackhole_dequeue(struct Qdisc *sch)
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index cd2e0e342fb6..6c0a9d5dbf94 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -479,24 +479,28 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
q->cparams.mtu = psched_mtu(qdisc_dev(sch));
if (opt) {
- int err = fq_codel_change(sch, opt, extack);
+ err = fq_codel_change(sch, opt, extack);
if (err)
- return err;
+ goto init_failure;
}
err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
if (err)
- return err;
+ goto init_failure;
if (!q->flows) {
q->flows = kvcalloc(q->flows_cnt,
sizeof(struct fq_codel_flow),
GFP_KERNEL);
- if (!q->flows)
- return -ENOMEM;
+ if (!q->flows) {
+ err = -ENOMEM;
+ goto init_failure;
+ }
q->backlogs = kvcalloc(q->flows_cnt, sizeof(u32), GFP_KERNEL);
- if (!q->backlogs)
- return -ENOMEM;
+ if (!q->backlogs) {
+ err = -ENOMEM;
+ goto alloc_failure;
+ }
for (i = 0; i < q->flows_cnt; i++) {
struct fq_codel_flow *flow = q->flows + i;
@@ -509,6 +513,13 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt,
else
sch->flags &= ~TCQ_F_CAN_BYPASS;
return 0;
+
+alloc_failure:
+ kvfree(q->flows);
+ q->flows = NULL;
+init_failure:
+ q->flows_cnt = 0;
+ return err;
}
static int fq_codel_dump(struct Qdisc *sch, struct sk_buff *skb)
diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
index 3ae9877ea205..3278a76f6861 100644
--- a/net/sched/sch_hfsc.c
+++ b/net/sched/sch_hfsc.c
@@ -1385,8 +1385,8 @@ hfsc_schedule_watchdog(struct Qdisc *sch)
if (next_time == 0 || next_time > q->root.cl_cfmin)
next_time = q->root.cl_cfmin;
}
- WARN_ON(next_time == 0);
- qdisc_watchdog_schedule(&q->watchdog, next_time);
+ if (next_time)
+ qdisc_watchdog_schedule(&q->watchdog, next_time);
}
static int
diff --git a/net/sctp/chunk.c b/net/sctp/chunk.c
index 79daa98208c3..bfb9f812e2ef 100644
--- a/net/sctp/chunk.c
+++ b/net/sctp/chunk.c
@@ -237,7 +237,9 @@ struct sctp_datamsg *sctp_datamsg_from_user(struct sctp_association *asoc,
/* Account for a different sized first fragment */
if (msg_len >= first_len) {
msg->can_delay = 0;
- SCTP_INC_STATS(sock_net(asoc->base.sk), SCTP_MIB_FRAGUSRMSGS);
+ if (msg_len > first_len)
+ SCTP_INC_STATS(sock_net(asoc->base.sk),
+ SCTP_MIB_FRAGUSRMSGS);
} else {
/* Which may be the only one... */
first_len = msg_len;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 7339918a805d..0cd2e764f47f 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -1010,7 +1010,7 @@ static const struct proto_ops inet6_seqpacket_ops = {
.socketpair = sock_no_socketpair,
.accept = inet_accept,
.getname = sctp_getname,
- .poll_mask = sctp_poll_mask,
+ .poll = sctp_poll,
.ioctl = inet6_ioctl,
.listen = sctp_inet_listen,
.shutdown = inet_shutdown,
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index 5dffbc493008..67f73d3a1356 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1016,7 +1016,7 @@ static const struct proto_ops inet_seqpacket_ops = {
.socketpair = sock_no_socketpair,
.accept = inet_accept,
.getname = inet_getname, /* Semantics are different. */
- .poll_mask = sctp_poll_mask,
+ .poll = sctp_poll,
.ioctl = inet_ioctl,
.listen = sctp_inet_listen,
.shutdown = inet_shutdown, /* Looks harmless. */
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index d20f7addee19..ce620e878538 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -7717,12 +7717,14 @@ out:
* here, again, by modeling the current TCP/UDP code. We don't have
* a good way to test with it yet.
*/
-__poll_t sctp_poll_mask(struct socket *sock, __poll_t events)
+__poll_t sctp_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
struct sctp_sock *sp = sctp_sk(sk);
__poll_t mask;
+ poll_wait(file, sk_sleep(sk), wait);
+
sock_rps_record_flow(sk);
/* A TCP-style listening socket becomes readable when the accept queue
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index 445b7ef61677..12cac85da994 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -282,7 +282,7 @@ bool sctp_transport_update_pmtu(struct sctp_transport *t, u32 pmtu)
if (dst) {
/* Re-fetch, as under layers may have a higher minimum size */
- pmtu = SCTP_TRUNC4(dst_mtu(dst));
+ pmtu = sctp_dst_mtu(dst);
change = t->pathmtu != pmtu;
}
t->pathmtu = pmtu;
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index da7f02edcd37..05e4ffe5aabd 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -45,6 +45,7 @@ static DEFINE_MUTEX(smc_create_lgr_pending); /* serialize link group
*/
static void smc_tcp_listen_work(struct work_struct *);
+static void smc_connect_work(struct work_struct *);
static void smc_set_keepalive(struct sock *sk, int val)
{
@@ -122,6 +123,12 @@ static int smc_release(struct socket *sock)
goto out;
smc = smc_sk(sk);
+
+ /* cleanup for a dangling non-blocking connect */
+ flush_work(&smc->connect_work);
+ kfree(smc->connect_info);
+ smc->connect_info = NULL;
+
if (sk->sk_state == SMC_LISTEN)
/* smc_close_non_accepted() is called and acquires
* sock lock for child sockets again
@@ -140,7 +147,8 @@ static int smc_release(struct socket *sock)
smc->clcsock = NULL;
}
if (smc->use_fallback) {
- sock_put(sk); /* passive closing */
+ if (sk->sk_state != SMC_LISTEN && sk->sk_state != SMC_INIT)
+ sock_put(sk); /* passive closing */
sk->sk_state = SMC_CLOSED;
sk->sk_state_change(sk);
}
@@ -186,6 +194,7 @@ static struct sock *smc_sock_alloc(struct net *net, struct socket *sock,
sk->sk_protocol = protocol;
smc = smc_sk(sk);
INIT_WORK(&smc->tcp_listen_work, smc_tcp_listen_work);
+ INIT_WORK(&smc->connect_work, smc_connect_work);
INIT_DELAYED_WORK(&smc->conn.tx_work, smc_tx_work);
INIT_LIST_HEAD(&smc->accept_q);
spin_lock_init(&smc->accept_q_lock);
@@ -409,12 +418,18 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
{
int rc;
- if (reason_code < 0) /* error, fallback is not possible */
+ if (reason_code < 0) { /* error, fallback is not possible */
+ if (smc->sk.sk_state == SMC_INIT)
+ sock_put(&smc->sk); /* passive closing */
return reason_code;
+ }
if (reason_code != SMC_CLC_DECL_REPLY) {
rc = smc_clc_send_decline(smc, reason_code);
- if (rc < 0)
+ if (rc < 0) {
+ if (smc->sk.sk_state == SMC_INIT)
+ sock_put(&smc->sk); /* passive closing */
return rc;
+ }
}
return smc_connect_fallback(smc);
}
@@ -427,8 +442,6 @@ static int smc_connect_abort(struct smc_sock *smc, int reason_code,
smc_lgr_forget(smc->conn.lgr);
mutex_unlock(&smc_create_lgr_pending);
smc_conn_free(&smc->conn);
- if (reason_code < 0 && smc->sk.sk_state == SMC_INIT)
- sock_put(&smc->sk); /* passive closing */
return reason_code;
}
@@ -576,6 +589,35 @@ static int __smc_connect(struct smc_sock *smc)
return 0;
}
+static void smc_connect_work(struct work_struct *work)
+{
+ struct smc_sock *smc = container_of(work, struct smc_sock,
+ connect_work);
+ int rc;
+
+ lock_sock(&smc->sk);
+ rc = kernel_connect(smc->clcsock, &smc->connect_info->addr,
+ smc->connect_info->alen, smc->connect_info->flags);
+ if (smc->clcsock->sk->sk_err) {
+ smc->sk.sk_err = smc->clcsock->sk->sk_err;
+ goto out;
+ }
+ if (rc < 0) {
+ smc->sk.sk_err = -rc;
+ goto out;
+ }
+
+ rc = __smc_connect(smc);
+ if (rc < 0)
+ smc->sk.sk_err = -rc;
+
+out:
+ smc->sk.sk_state_change(&smc->sk);
+ kfree(smc->connect_info);
+ smc->connect_info = NULL;
+ release_sock(&smc->sk);
+}
+
static int smc_connect(struct socket *sock, struct sockaddr *addr,
int alen, int flags)
{
@@ -605,15 +647,32 @@ static int smc_connect(struct socket *sock, struct sockaddr *addr,
smc_copy_sock_settings_to_clc(smc);
tcp_sk(smc->clcsock->sk)->syn_smc = 1;
- rc = kernel_connect(smc->clcsock, addr, alen, flags);
- if (rc)
- goto out;
+ if (flags & O_NONBLOCK) {
+ if (smc->connect_info) {
+ rc = -EALREADY;
+ goto out;
+ }
+ smc->connect_info = kzalloc(alen + 2 * sizeof(int), GFP_KERNEL);
+ if (!smc->connect_info) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ smc->connect_info->alen = alen;
+ smc->connect_info->flags = flags ^ O_NONBLOCK;
+ memcpy(&smc->connect_info->addr, addr, alen);
+ schedule_work(&smc->connect_work);
+ rc = -EINPROGRESS;
+ } else {
+ rc = kernel_connect(smc->clcsock, addr, alen, flags);
+ if (rc)
+ goto out;
- rc = __smc_connect(smc);
- if (rc < 0)
- goto out;
- else
- rc = 0; /* success cases including fallback */
+ rc = __smc_connect(smc);
+ if (rc < 0)
+ goto out;
+ else
+ rc = 0; /* success cases including fallback */
+ }
out:
release_sock(sk);
@@ -1273,40 +1332,26 @@ static __poll_t smc_accept_poll(struct sock *parent)
return mask;
}
-static __poll_t smc_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t smc_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
__poll_t mask = 0;
struct smc_sock *smc;
- int rc;
if (!sk)
return EPOLLNVAL;
smc = smc_sk(sock->sk);
- sock_hold(sk);
- lock_sock(sk);
if ((sk->sk_state == SMC_INIT) || smc->use_fallback) {
/* delegate to CLC child sock */
- release_sock(sk);
- mask = smc->clcsock->ops->poll_mask(smc->clcsock, events);
- lock_sock(sk);
+ mask = smc->clcsock->ops->poll(file, smc->clcsock, wait);
sk->sk_err = smc->clcsock->sk->sk_err;
- if (sk->sk_err) {
+ if (sk->sk_err)
mask |= EPOLLERR;
- } else {
- /* if non-blocking connect finished ... */
- if (sk->sk_state == SMC_INIT &&
- mask & EPOLLOUT &&
- smc->clcsock->sk->sk_state != TCP_CLOSE) {
- rc = __smc_connect(smc);
- if (rc < 0)
- mask |= EPOLLERR;
- /* success cases including fallback */
- mask |= EPOLLOUT | EPOLLWRNORM;
- }
- }
} else {
+ if (sk->sk_state != SMC_CLOSED)
+ sock_poll_wait(file, sk_sleep(sk), wait);
if (sk->sk_err)
mask |= EPOLLERR;
if ((sk->sk_shutdown == SHUTDOWN_MASK) ||
@@ -1332,10 +1377,7 @@ static __poll_t smc_poll_mask(struct socket *sock, __poll_t events)
}
if (smc->conn.urg_state == SMC_URG_VALID)
mask |= EPOLLPRI;
-
}
- release_sock(sk);
- sock_put(sk);
return mask;
}
@@ -1415,7 +1457,8 @@ static int smc_setsockopt(struct socket *sock, int level, int optname,
if (optlen < sizeof(int))
return -EINVAL;
- get_user(val, (int __user *)optval);
+ if (get_user(val, (int __user *)optval))
+ return -EFAULT;
lock_sock(sk);
switch (optname) {
@@ -1483,10 +1526,13 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
return -EBADF;
return smc->clcsock->ops->ioctl(smc->clcsock, cmd, arg);
}
+ lock_sock(&smc->sk);
switch (cmd) {
case SIOCINQ: /* same as FIONREAD */
- if (smc->sk.sk_state == SMC_LISTEN)
+ if (smc->sk.sk_state == SMC_LISTEN) {
+ release_sock(&smc->sk);
return -EINVAL;
+ }
if (smc->sk.sk_state == SMC_INIT ||
smc->sk.sk_state == SMC_CLOSED)
answ = 0;
@@ -1495,8 +1541,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
break;
case SIOCOUTQ:
/* output queue size (not send + not acked) */
- if (smc->sk.sk_state == SMC_LISTEN)
+ if (smc->sk.sk_state == SMC_LISTEN) {
+ release_sock(&smc->sk);
return -EINVAL;
+ }
if (smc->sk.sk_state == SMC_INIT ||
smc->sk.sk_state == SMC_CLOSED)
answ = 0;
@@ -1506,8 +1554,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
break;
case SIOCOUTQNSD:
/* output queue size (not send only) */
- if (smc->sk.sk_state == SMC_LISTEN)
+ if (smc->sk.sk_state == SMC_LISTEN) {
+ release_sock(&smc->sk);
return -EINVAL;
+ }
if (smc->sk.sk_state == SMC_INIT ||
smc->sk.sk_state == SMC_CLOSED)
answ = 0;
@@ -1515,8 +1565,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
answ = smc_tx_prepared_sends(&smc->conn);
break;
case SIOCATMARK:
- if (smc->sk.sk_state == SMC_LISTEN)
+ if (smc->sk.sk_state == SMC_LISTEN) {
+ release_sock(&smc->sk);
return -EINVAL;
+ }
if (smc->sk.sk_state == SMC_INIT ||
smc->sk.sk_state == SMC_CLOSED) {
answ = 0;
@@ -1532,8 +1584,10 @@ static int smc_ioctl(struct socket *sock, unsigned int cmd,
}
break;
default:
+ release_sock(&smc->sk);
return -ENOIOCTLCMD;
}
+ release_sock(&smc->sk);
return put_user(answ, (int __user *)arg);
}
@@ -1619,7 +1673,7 @@ static const struct proto_ops smc_sock_ops = {
.socketpair = sock_no_socketpair,
.accept = smc_accept,
.getname = smc_getname,
- .poll_mask = smc_poll_mask,
+ .poll = smc_poll,
.ioctl = smc_ioctl,
.listen = smc_listen,
.shutdown = smc_shutdown,
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 51ae1f10d81a..d7ca26570482 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -187,11 +187,19 @@ struct smc_connection {
struct work_struct close_work; /* peer sent some closing */
};
+struct smc_connect_info {
+ int flags;
+ int alen;
+ struct sockaddr addr;
+};
+
struct smc_sock { /* smc sock container */
struct sock sk;
struct socket *clcsock; /* internal tcp socket */
struct smc_connection conn; /* smc connection */
struct smc_sock *listen_smc; /* listen parent */
+ struct smc_connect_info *connect_info; /* connect address & flags */
+ struct work_struct connect_work; /* handle non-blocking connect*/
struct work_struct tcp_listen_work;/* handle tcp socket accepts */
struct work_struct smc_listen_work;/* prepare new accept socket */
struct list_head accept_q; /* sockets to be accepted */
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 717449b1da0b..ae5d168653ce 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -250,6 +250,7 @@ out:
int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
u8 expected_type)
{
+ long rcvtimeo = smc->clcsock->sk->sk_rcvtimeo;
struct sock *clc_sk = smc->clcsock->sk;
struct smc_clc_msg_hdr *clcm = buf;
struct msghdr msg = {NULL, 0};
@@ -306,7 +307,6 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
memset(&msg, 0, sizeof(struct msghdr));
iov_iter_kvec(&msg.msg_iter, READ | ITER_KVEC, &vec, 1, datlen);
krflags = MSG_WAITALL;
- smc->clcsock->sk->sk_rcvtimeo = CLC_WAIT_TIME;
len = sock_recvmsg(smc->clcsock, &msg, krflags);
if (len < datlen || !smc_clc_msg_hdr_valid(clcm)) {
smc->sk.sk_err = EPROTO;
@@ -322,6 +322,7 @@ int smc_clc_wait_msg(struct smc_sock *smc, void *buf, int buflen,
}
out:
+ smc->clcsock->sk->sk_rcvtimeo = rcvtimeo;
return reason_code;
}
diff --git a/net/smc/smc_close.c b/net/smc/smc_close.c
index fa41d9881741..ac961dfb1ea1 100644
--- a/net/smc/smc_close.c
+++ b/net/smc/smc_close.c
@@ -107,6 +107,8 @@ static void smc_close_active_abort(struct smc_sock *smc)
}
switch (sk->sk_state) {
case SMC_INIT:
+ sk->sk_state = SMC_PEERABORTWAIT;
+ break;
case SMC_ACTIVE:
sk->sk_state = SMC_PEERABORTWAIT;
release_sock(sk);
diff --git a/net/smc/smc_tx.c b/net/smc/smc_tx.c
index cee666400752..f82886b7d1d8 100644
--- a/net/smc/smc_tx.c
+++ b/net/smc/smc_tx.c
@@ -495,7 +495,8 @@ out:
void smc_tx_consumer_update(struct smc_connection *conn, bool force)
{
- union smc_host_cursor cfed, cons;
+ union smc_host_cursor cfed, cons, prod;
+ int sender_free = conn->rmb_desc->len;
int to_confirm;
smc_curs_write(&cons,
@@ -505,11 +506,18 @@ void smc_tx_consumer_update(struct smc_connection *conn, bool force)
smc_curs_read(&conn->rx_curs_confirmed, conn),
conn);
to_confirm = smc_curs_diff(conn->rmb_desc->len, &cfed, &cons);
+ if (to_confirm > conn->rmbe_update_limit) {
+ smc_curs_write(&prod,
+ smc_curs_read(&conn->local_rx_ctrl.prod, conn),
+ conn);
+ sender_free = conn->rmb_desc->len -
+ smc_curs_diff(conn->rmb_desc->len, &prod, &cfed);
+ }
if (conn->local_rx_ctrl.prod_flags.cons_curs_upd_req ||
force ||
((to_confirm > conn->rmbe_update_limit) &&
- ((to_confirm > (conn->rmb_desc->len / 2)) ||
+ ((sender_free <= (conn->rmb_desc->len / 2)) ||
conn->local_rx_ctrl.prod_flags.write_blocked))) {
if ((smc_cdc_get_slot_and_msg_send(conn) < 0) &&
conn->alert_token_local) { /* connection healthy */
diff --git a/net/socket.c b/net/socket.c
index 8a109012608a..85633622c94d 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -117,10 +117,8 @@ static ssize_t sock_write_iter(struct kiocb *iocb, struct iov_iter *from);
static int sock_mmap(struct file *file, struct vm_area_struct *vma);
static int sock_close(struct inode *inode, struct file *file);
-static struct wait_queue_head *sock_get_poll_head(struct file *file,
- __poll_t events);
-static __poll_t sock_poll_mask(struct file *file, __poll_t);
-static __poll_t sock_poll(struct file *file, struct poll_table_struct *wait);
+static __poll_t sock_poll(struct file *file,
+ struct poll_table_struct *wait);
static long sock_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_COMPAT
static long compat_sock_ioctl(struct file *file,
@@ -143,8 +141,6 @@ static const struct file_operations socket_file_ops = {
.llseek = no_llseek,
.read_iter = sock_read_iter,
.write_iter = sock_write_iter,
- .get_poll_head = sock_get_poll_head,
- .poll_mask = sock_poll_mask,
.poll = sock_poll,
.unlocked_ioctl = sock_ioctl,
#ifdef CONFIG_COMPAT
@@ -1130,48 +1126,16 @@ out_release:
}
EXPORT_SYMBOL(sock_create_lite);
-static struct wait_queue_head *sock_get_poll_head(struct file *file,
- __poll_t events)
-{
- struct socket *sock = file->private_data;
-
- if (!sock->ops->poll_mask)
- return NULL;
- sock_poll_busy_loop(sock, events);
- return sk_sleep(sock->sk);
-}
-
-static __poll_t sock_poll_mask(struct file *file, __poll_t events)
-{
- struct socket *sock = file->private_data;
-
- /*
- * We need to be sure we are in sync with the socket flags modification.
- *
- * This memory barrier is paired in the wq_has_sleeper.
- */
- smp_mb();
-
- /* this socket can poll_ll so tell the system call */
- return sock->ops->poll_mask(sock, events) |
- (sk_can_busy_loop(sock->sk) ? POLL_BUSY_LOOP : 0);
-}
-
/* No kernel lock held - perfect */
static __poll_t sock_poll(struct file *file, poll_table *wait)
{
struct socket *sock = file->private_data;
- __poll_t events = poll_requested_events(wait), mask = 0;
-
- if (sock->ops->poll) {
- sock_poll_busy_loop(sock, events);
- mask = sock->ops->poll(file, sock, wait);
- } else if (sock->ops->poll_mask) {
- sock_poll_wait(file, sock_get_poll_head(file, events), wait);
- mask = sock->ops->poll_mask(sock, events);
- }
+ __poll_t events = poll_requested_events(wait);
- return mask | sock_poll_busy_flag(sock);
+ sock_poll_busy_loop(sock, events);
+ if (!sock->ops->poll)
+ return 0;
+ return sock->ops->poll(file, sock, wait) | sock_poll_busy_flag(sock);
}
static int sock_mmap(struct file *file, struct vm_area_struct *vma)
diff --git a/net/strparser/strparser.c b/net/strparser/strparser.c
index 1a9695183599..625acb27efcc 100644
--- a/net/strparser/strparser.c
+++ b/net/strparser/strparser.c
@@ -35,7 +35,6 @@ struct _strp_msg {
*/
struct strp_msg strp;
int accum_len;
- int early_eaten;
};
static inline struct _strp_msg *_strp_msg(struct sk_buff *skb)
@@ -115,20 +114,6 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
head = strp->skb_head;
if (head) {
/* Message already in progress */
-
- stm = _strp_msg(head);
- if (unlikely(stm->early_eaten)) {
- /* Already some number of bytes on the receive sock
- * data saved in skb_head, just indicate they
- * are consumed.
- */
- eaten = orig_len <= stm->early_eaten ?
- orig_len : stm->early_eaten;
- stm->early_eaten -= eaten;
-
- return eaten;
- }
-
if (unlikely(orig_offset)) {
/* Getting data with a non-zero offset when a message is
* in progress is not expected. If it does happen, we
@@ -297,9 +282,9 @@ static int __strp_recv(read_descriptor_t *desc, struct sk_buff *orig_skb,
}
stm->accum_len += cand_len;
+ eaten += cand_len;
strp->need_bytes = stm->strp.full_len -
stm->accum_len;
- stm->early_eaten = cand_len;
STRP_STATS_ADD(strp->stats.bytes, cand_len);
desc->count = 0; /* Stop reading socket */
break;
@@ -392,7 +377,7 @@ static int strp_read_sock(struct strparser *strp)
/* Lower sock lock held */
void strp_data_ready(struct strparser *strp)
{
- if (unlikely(strp->stopped))
+ if (unlikely(strp->stopped) || strp->paused)
return;
/* This check is needed to synchronize with do_strp_work.
@@ -407,9 +392,6 @@ void strp_data_ready(struct strparser *strp)
return;
}
- if (strp->paused)
- return;
-
if (strp->need_bytes) {
if (strp_peek_len(strp) < strp->need_bytes)
return;
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 3c85af058227..3fabf9f6a0f9 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -987,8 +987,6 @@ bool xprt_prepare_transmit(struct rpc_task *task)
task->tk_status = -EAGAIN;
goto out_unlock;
}
- if (!bc_prealloc(req) && !req->rq_xmit_bytes_sent)
- req->rq_xid = xprt_alloc_xid(xprt);
ret = true;
out_unlock:
spin_unlock_bh(&xprt->transport_lock);
@@ -1298,7 +1296,12 @@ void xprt_retry_reserve(struct rpc_task *task)
static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
{
- return (__force __be32)xprt->xid++;
+ __be32 xid;
+
+ spin_lock(&xprt->reserve_lock);
+ xid = (__force __be32)xprt->xid++;
+ spin_unlock(&xprt->reserve_lock);
+ return xid;
}
static inline void xprt_init_xid(struct rpc_xprt *xprt)
@@ -1316,6 +1319,7 @@ void xprt_request_init(struct rpc_task *task)
req->rq_task = task;
req->rq_xprt = xprt;
req->rq_buffer = NULL;
+ req->rq_xid = xprt_alloc_xid(xprt);
req->rq_connect_cookie = xprt->connect_cookie - 1;
req->rq_bytes_sent = 0;
req->rq_snd_buf.len = 0;
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 9f666e0650e2..2830709957bd 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -133,6 +133,8 @@ static void disc_dupl_alert(struct tipc_bearer *b, u32 node_addr,
}
/* tipc_disc_addr_trial(): - handle an address uniqueness trial from peer
+ * Returns true if message should be dropped by caller, i.e., if it is a
+ * trial message or we are inside trial period. Otherwise false.
*/
static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
struct tipc_media_addr *maddr,
@@ -168,8 +170,9 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
}
+ /* Accept regular link requests/responses only after trial period */
if (mtyp != DSC_TRIAL_MSG)
- return false;
+ return trial;
sugg_addr = tipc_node_try_addr(net, peer_id, src);
if (sugg_addr)
@@ -284,7 +287,6 @@ static void tipc_disc_timeout(struct timer_list *t)
{
struct tipc_discoverer *d = from_timer(d, t, timer);
struct tipc_net *tn = tipc_net(d->net);
- u32 self = tipc_own_addr(d->net);
struct tipc_media_addr maddr;
struct sk_buff *skb = NULL;
struct net *net = d->net;
@@ -298,12 +300,14 @@ static void tipc_disc_timeout(struct timer_list *t)
goto exit;
}
- /* Did we just leave the address trial period ? */
- if (!self && !time_before(jiffies, tn->addr_trial_end)) {
- self = tn->trial_addr;
- tipc_net_finalize(net, self);
- msg_set_prevnode(buf_msg(d->skb), self);
+ /* Trial period over ? */
+ if (!time_before(jiffies, tn->addr_trial_end)) {
+ /* Did we just leave it ? */
+ if (!tipc_own_addr(net))
+ tipc_net_finalize(net, tn->trial_addr);
+
msg_set_type(buf_msg(d->skb), DSC_REQ_MSG);
+ msg_set_prevnode(buf_msg(d->skb), tipc_own_addr(net));
}
/* Adjust timeout interval according to discovery phase */
diff --git a/net/tipc/net.c b/net/tipc/net.c
index 4fbaa0464405..a7f6964c3a4b 100644
--- a/net/tipc/net.c
+++ b/net/tipc/net.c
@@ -121,12 +121,17 @@ int tipc_net_init(struct net *net, u8 *node_id, u32 addr)
void tipc_net_finalize(struct net *net, u32 addr)
{
- tipc_set_node_addr(net, addr);
- smp_mb();
- tipc_named_reinit(net);
- tipc_sk_reinit(net);
- tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
- TIPC_CLUSTER_SCOPE, 0, addr);
+ struct tipc_net *tn = tipc_net(net);
+
+ spin_lock_bh(&tn->node_list_lock);
+ if (!tipc_own_addr(net)) {
+ tipc_set_node_addr(net, addr);
+ tipc_named_reinit(net);
+ tipc_sk_reinit(net);
+ tipc_nametbl_publish(net, TIPC_CFG_SRV, addr, addr,
+ TIPC_CLUSTER_SCOPE, 0, addr);
+ }
+ spin_unlock_bh(&tn->node_list_lock);
}
void tipc_net_stop(struct net *net)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 6a44eb812baf..0453bd451ce8 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -797,6 +797,7 @@ static u32 tipc_node_suggest_addr(struct net *net, u32 addr)
}
/* tipc_node_try_addr(): Check if addr can be used by peer, suggest other if not
+ * Returns suggested address if any, otherwise 0
*/
u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
{
@@ -819,12 +820,14 @@ u32 tipc_node_try_addr(struct net *net, u8 *id, u32 addr)
if (n) {
addr = n->addr;
tipc_node_put(n);
+ return addr;
}
- /* Even this node may be in trial phase */
+
+ /* Even this node may be in conflict */
if (tn->trial_addr == addr)
return tipc_node_suggest_addr(net, addr);
- return addr;
+ return 0;
}
void tipc_node_check_dest(struct net *net, u32 addr,
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 14a5d055717d..930852c54d7a 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -692,9 +692,10 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
}
/**
- * tipc_poll - read pollmask
+ * tipc_poll - read and possibly block on pollmask
* @file: file structure associated with the socket
* @sock: socket for which to calculate the poll bits
+ * @wait: ???
*
* Returns pollmask value
*
@@ -708,12 +709,15 @@ static int tipc_getname(struct socket *sock, struct sockaddr *uaddr,
* imply that the operation will succeed, merely that it should be performed
* and will not block.
*/
-static __poll_t tipc_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t tipc_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
__poll_t revents = 0;
+ sock_poll_wait(file, sk_sleep(sk), wait);
+
if (sk->sk_shutdown & RCV_SHUTDOWN)
revents |= EPOLLRDHUP | EPOLLIN | EPOLLRDNORM;
if (sk->sk_shutdown == SHUTDOWN_MASK)
@@ -3033,7 +3037,7 @@ static const struct proto_ops msg_ops = {
.socketpair = tipc_socketpair,
.accept = sock_no_accept,
.getname = tipc_getname,
- .poll_mask = tipc_poll_mask,
+ .poll = tipc_poll,
.ioctl = tipc_ioctl,
.listen = sock_no_listen,
.shutdown = tipc_shutdown,
@@ -3054,7 +3058,7 @@ static const struct proto_ops packet_ops = {
.socketpair = tipc_socketpair,
.accept = tipc_accept,
.getname = tipc_getname,
- .poll_mask = tipc_poll_mask,
+ .poll = tipc_poll,
.ioctl = tipc_ioctl,
.listen = tipc_listen,
.shutdown = tipc_shutdown,
@@ -3075,7 +3079,7 @@ static const struct proto_ops stream_ops = {
.socketpair = tipc_socketpair,
.accept = tipc_accept,
.getname = tipc_getname,
- .poll_mask = tipc_poll_mask,
+ .poll = tipc_poll,
.ioctl = tipc_ioctl,
.listen = tipc_listen,
.shutdown = tipc_shutdown,
diff --git a/net/tls/tls_main.c b/net/tls/tls_main.c
index a127d61e8af9..301f22430469 100644
--- a/net/tls/tls_main.c
+++ b/net/tls/tls_main.c
@@ -712,7 +712,7 @@ static int __init tls_register(void)
build_protos(tls_prots[TLSV4], &tcp_prot);
tls_sw_proto_ops = inet_stream_ops;
- tls_sw_proto_ops.poll_mask = tls_sw_poll_mask;
+ tls_sw_proto_ops.poll = tls_sw_poll;
tls_sw_proto_ops.splice_read = tls_sw_splice_read;
#ifdef CONFIG_TLS_DEVICE
diff --git a/net/tls/tls_sw.c b/net/tls/tls_sw.c
index f127fac88acf..1f3d9789af30 100644
--- a/net/tls/tls_sw.c
+++ b/net/tls/tls_sw.c
@@ -440,7 +440,7 @@ alloc_encrypted:
ret = tls_push_record(sk, msg->msg_flags, record_type);
if (!ret)
continue;
- if (ret == -EAGAIN)
+ if (ret < 0)
goto send_end;
copied -= try_to_copy;
@@ -646,6 +646,9 @@ static struct sk_buff *tls_wait_data(struct sock *sk, int flags,
return NULL;
}
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ return NULL;
+
if (sock_flag(sk, SOCK_DONE))
return NULL;
@@ -701,6 +704,10 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
nsg = skb_to_sgvec(skb, &sgin[1],
rxm->offset + tls_ctx->rx.prepend_size,
rxm->full_len - tls_ctx->rx.prepend_size);
+ if (nsg < 0) {
+ ret = nsg;
+ goto out;
+ }
tls_make_aad(ctx->rx_aad_ciphertext,
rxm->full_len - tls_ctx->rx.overhead_size,
@@ -712,6 +719,7 @@ static int decrypt_skb(struct sock *sk, struct sk_buff *skb,
rxm->full_len - tls_ctx->rx.overhead_size,
skb, sk->sk_allocation);
+out:
if (sgin != &sgin_arr[0])
kfree(sgin);
@@ -919,22 +927,23 @@ splice_read_end:
return copied ? : err;
}
-__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events)
+unsigned int tls_sw_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait)
{
+ unsigned int ret;
struct sock *sk = sock->sk;
struct tls_context *tls_ctx = tls_get_ctx(sk);
struct tls_sw_context_rx *ctx = tls_sw_ctx_rx(tls_ctx);
- __poll_t mask;
- /* Grab EPOLLOUT and EPOLLHUP from the underlying socket */
- mask = ctx->sk_poll_mask(sock, events);
+ /* Grab POLLOUT and POLLHUP from the underlying socket */
+ ret = ctx->sk_poll(file, sock, wait);
- /* Clear EPOLLIN bits, and set based on recv_pkt */
- mask &= ~(EPOLLIN | EPOLLRDNORM);
+ /* Clear POLLIN bits, and set based on recv_pkt */
+ ret &= ~(POLLIN | POLLRDNORM);
if (ctx->recv_pkt)
- mask |= EPOLLIN | EPOLLRDNORM;
+ ret |= POLLIN | POLLRDNORM;
- return mask;
+ return ret;
}
static int tls_read_size(struct strparser *strp, struct sk_buff *skb)
@@ -1191,7 +1200,7 @@ int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx)
sk->sk_data_ready = tls_data_ready;
write_unlock_bh(&sk->sk_callback_lock);
- sw_ctx_rx->sk_poll_mask = sk->sk_socket->ops->poll_mask;
+ sw_ctx_rx->sk_poll = sk->sk_socket->ops->poll;
strp_check_rcv(&sw_ctx_rx->strp);
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 95b02a71fd47..e5473c03d667 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -638,8 +638,9 @@ static int unix_stream_connect(struct socket *, struct sockaddr *,
static int unix_socketpair(struct socket *, struct socket *);
static int unix_accept(struct socket *, struct socket *, int, bool);
static int unix_getname(struct socket *, struct sockaddr *, int);
-static __poll_t unix_poll_mask(struct socket *, __poll_t);
-static __poll_t unix_dgram_poll_mask(struct socket *, __poll_t);
+static __poll_t unix_poll(struct file *, struct socket *, poll_table *);
+static __poll_t unix_dgram_poll(struct file *, struct socket *,
+ poll_table *);
static int unix_ioctl(struct socket *, unsigned int, unsigned long);
static int unix_shutdown(struct socket *, int);
static int unix_stream_sendmsg(struct socket *, struct msghdr *, size_t);
@@ -680,7 +681,7 @@ static const struct proto_ops unix_stream_ops = {
.socketpair = unix_socketpair,
.accept = unix_accept,
.getname = unix_getname,
- .poll_mask = unix_poll_mask,
+ .poll = unix_poll,
.ioctl = unix_ioctl,
.listen = unix_listen,
.shutdown = unix_shutdown,
@@ -703,7 +704,7 @@ static const struct proto_ops unix_dgram_ops = {
.socketpair = unix_socketpair,
.accept = sock_no_accept,
.getname = unix_getname,
- .poll_mask = unix_dgram_poll_mask,
+ .poll = unix_dgram_poll,
.ioctl = unix_ioctl,
.listen = sock_no_listen,
.shutdown = unix_shutdown,
@@ -725,7 +726,7 @@ static const struct proto_ops unix_seqpacket_ops = {
.socketpair = unix_socketpair,
.accept = unix_accept,
.getname = unix_getname,
- .poll_mask = unix_dgram_poll_mask,
+ .poll = unix_dgram_poll,
.ioctl = unix_ioctl,
.listen = unix_listen,
.shutdown = unix_shutdown,
@@ -2629,10 +2630,13 @@ static int unix_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
return err;
}
-static __poll_t unix_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t unix_poll(struct file *file, struct socket *sock, poll_table *wait)
{
struct sock *sk = sock->sk;
- __poll_t mask = 0;
+ __poll_t mask;
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+ mask = 0;
/* exceptional events? */
if (sk->sk_err)
@@ -2661,11 +2665,15 @@ static __poll_t unix_poll_mask(struct socket *sock, __poll_t events)
return mask;
}
-static __poll_t unix_dgram_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t unix_dgram_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
struct sock *sk = sock->sk, *other;
- int writable;
- __poll_t mask = 0;
+ unsigned int writable;
+ __poll_t mask;
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+ mask = 0;
/* exceptional events? */
if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue))
@@ -2691,7 +2699,7 @@ static __poll_t unix_dgram_poll_mask(struct socket *sock, __poll_t events)
}
/* No write status requested, avoid expensive OUT tests. */
- if (!(events & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
+ if (!(poll_requested_events(wait) & (EPOLLWRBAND|EPOLLWRNORM|EPOLLOUT)))
return mask;
writable = unix_writable(sk);
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index bb5d5fa68c35..c1076c19b858 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -850,11 +850,18 @@ static int vsock_shutdown(struct socket *sock, int mode)
return err;
}
-static __poll_t vsock_poll_mask(struct socket *sock, __poll_t events)
+static __poll_t vsock_poll(struct file *file, struct socket *sock,
+ poll_table *wait)
{
- struct sock *sk = sock->sk;
- struct vsock_sock *vsk = vsock_sk(sk);
- __poll_t mask = 0;
+ struct sock *sk;
+ __poll_t mask;
+ struct vsock_sock *vsk;
+
+ sk = sock->sk;
+ vsk = vsock_sk(sk);
+
+ poll_wait(file, sk_sleep(sk), wait);
+ mask = 0;
if (sk->sk_err)
/* Signify that there has been an error on this socket. */
@@ -1084,7 +1091,7 @@ static const struct proto_ops vsock_dgram_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = vsock_getname,
- .poll_mask = vsock_poll_mask,
+ .poll = vsock_poll,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = vsock_shutdown,
@@ -1842,7 +1849,7 @@ static const struct proto_ops vsock_stream_ops = {
.socketpair = sock_no_socketpair,
.accept = vsock_accept,
.getname = vsock_getname,
- .poll_mask = vsock_poll_mask,
+ .poll = vsock_poll,
.ioctl = sock_no_ioctl,
.listen = vsock_listen,
.shutdown = vsock_shutdown,
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 8e03bd3f3668..5d3cce9e8744 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -201,7 +201,7 @@ virtio_transport_send_pkt(struct virtio_vsock_pkt *pkt)
return -ENODEV;
}
- if (le32_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
+ if (le64_to_cpu(pkt->hdr.dst_cid) == vsock->guest_cid)
return virtio_transport_send_pkt_loopback(vsock, pkt);
if (pkt->reply)
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c7bbe5f0aae8..80bc986c79e5 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -4409,6 +4409,7 @@ static int parse_station_flags(struct genl_info *info,
params->sta_flags_mask = BIT(NL80211_STA_FLAG_AUTHENTICATED) |
BIT(NL80211_STA_FLAG_MFP) |
BIT(NL80211_STA_FLAG_AUTHORIZED);
+ break;
default:
return -EINVAL;
}
@@ -6231,7 +6232,7 @@ do { \
nl80211_check_s32);
/*
* Check HT operation mode based on
- * IEEE 802.11 2012 8.4.2.59 HT Operation element.
+ * IEEE 802.11-2016 9.4.2.57 HT Operation element.
*/
if (tb[NL80211_MESHCONF_HT_OPMODE]) {
ht_opmode = nla_get_u16(tb[NL80211_MESHCONF_HT_OPMODE]);
@@ -6241,22 +6242,9 @@ do { \
IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
return -EINVAL;
- if ((ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) &&
- (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
- return -EINVAL;
+ /* NON_HT_STA bit is reserved, but some programs set it */
+ ht_opmode &= ~IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT;
- switch (ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION) {
- case IEEE80211_HT_OP_MODE_PROTECTION_NONE:
- case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ:
- if (ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT)
- return -EINVAL;
- break;
- case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER:
- case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED:
- if (!(ht_opmode & IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT))
- return -EINVAL;
- break;
- }
cfg->ht_opmode = ht_opmode;
mask |= (1 << (NL80211_MESHCONF_HT_OPMODE - 1));
}
@@ -10962,9 +10950,12 @@ static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info)
rem) {
u8 *mask_pat;
- nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
- nl80211_packet_pattern_policy,
- info->extack);
+ err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+ nl80211_packet_pattern_policy,
+ info->extack);
+ if (err)
+ goto error;
+
err = -EINVAL;
if (!pat_tb[NL80211_PKTPAT_MASK] ||
!pat_tb[NL80211_PKTPAT_PATTERN])
@@ -11213,8 +11204,11 @@ static int nl80211_parse_coalesce_rule(struct cfg80211_registered_device *rdev,
rem) {
u8 *mask_pat;
- nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
- nl80211_packet_pattern_policy, NULL);
+ err = nla_parse_nested(pat_tb, MAX_NL80211_PKTPAT, pat,
+ nl80211_packet_pattern_policy, NULL);
+ if (err)
+ return err;
+
if (!pat_tb[NL80211_PKTPAT_MASK] ||
!pat_tb[NL80211_PKTPAT_PATTERN])
return -EINVAL;
@@ -14930,20 +14924,24 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
EXPORT_SYMBOL(cfg80211_mgmt_tx_status);
static int __nl80211_rx_control_port(struct net_device *dev,
- const u8 *buf, size_t len,
- const u8 *addr, u16 proto,
+ struct sk_buff *skb,
bool unencrypted, gfp_t gfp)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+ struct ethhdr *ehdr = eth_hdr(skb);
+ const u8 *addr = ehdr->h_source;
+ u16 proto = be16_to_cpu(skb->protocol);
struct sk_buff *msg;
void *hdr;
+ struct nlattr *frame;
+
u32 nlportid = READ_ONCE(wdev->conn_owner_nlportid);
if (!nlportid)
return -ENOENT;
- msg = nlmsg_new(100 + len, gfp);
+ msg = nlmsg_new(100 + skb->len, gfp);
if (!msg)
return -ENOMEM;
@@ -14957,13 +14955,17 @@ static int __nl80211_rx_control_port(struct net_device *dev,
nla_put_u32(msg, NL80211_ATTR_IFINDEX, dev->ifindex) ||
nla_put_u64_64bit(msg, NL80211_ATTR_WDEV, wdev_id(wdev),
NL80211_ATTR_PAD) ||
- nla_put(msg, NL80211_ATTR_FRAME, len, buf) ||
nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, addr) ||
nla_put_u16(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE, proto) ||
(unencrypted && nla_put_flag(msg,
NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT)))
goto nla_put_failure;
+ frame = nla_reserve(msg, NL80211_ATTR_FRAME, skb->len);
+ if (!frame)
+ goto nla_put_failure;
+
+ skb_copy_bits(skb, 0, nla_data(frame), skb->len);
genlmsg_end(msg, hdr);
return genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlportid);
@@ -14974,14 +14976,12 @@ static int __nl80211_rx_control_port(struct net_device *dev,
}
bool cfg80211_rx_control_port(struct net_device *dev,
- const u8 *buf, size_t len,
- const u8 *addr, u16 proto, bool unencrypted)
+ struct sk_buff *skb, bool unencrypted)
{
int ret;
- trace_cfg80211_rx_control_port(dev, buf, len, addr, proto, unencrypted);
- ret = __nl80211_rx_control_port(dev, buf, len, addr, proto,
- unencrypted, GFP_ATOMIC);
+ trace_cfg80211_rx_control_port(dev, skb, unencrypted);
+ ret = __nl80211_rx_control_port(dev, skb, unencrypted, GFP_ATOMIC);
trace_cfg80211_return_bool(ret == 0);
return ret == 0;
}
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index bbe6298e4bb9..4fc66a117b7d 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2240,7 +2240,9 @@ static void wiphy_update_regulatory(struct wiphy *wiphy,
* as some drivers used this to restore its orig_* reg domain.
*/
if (initiator == NL80211_REGDOM_SET_BY_CORE &&
- wiphy->regulatory_flags & REGULATORY_CUSTOM_REG)
+ wiphy->regulatory_flags & REGULATORY_CUSTOM_REG &&
+ !(wiphy->regulatory_flags &
+ REGULATORY_WIPHY_SELF_MANAGED))
reg_call_notifier(wiphy, lr);
return;
}
@@ -2787,26 +2789,6 @@ static void notify_self_managed_wiphys(struct regulatory_request *request)
}
}
-static bool reg_only_self_managed_wiphys(void)
-{
- struct cfg80211_registered_device *rdev;
- struct wiphy *wiphy;
- bool self_managed_found = false;
-
- ASSERT_RTNL();
-
- list_for_each_entry(rdev, &cfg80211_rdev_list, list) {
- wiphy = &rdev->wiphy;
- if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED)
- self_managed_found = true;
- else
- return false;
- }
-
- /* make sure at least one self-managed wiphy exists */
- return self_managed_found;
-}
-
/*
* Processes regulatory hints, this is all the NL80211_REGDOM_SET_BY_*
* Regulatory hints come on a first come first serve basis and we
@@ -2839,10 +2821,6 @@ static void reg_process_pending_hints(void)
spin_unlock(&reg_requests_lock);
notify_self_managed_wiphys(reg_request);
- if (reg_only_self_managed_wiphys()) {
- reg_free_request(reg_request);
- return;
- }
reg_process_hint(reg_request);
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index 2b417a2fe63f..7c73510b161f 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2627,23 +2627,25 @@ TRACE_EVENT(cfg80211_mgmt_tx_status,
);
TRACE_EVENT(cfg80211_rx_control_port,
- TP_PROTO(struct net_device *netdev, const u8 *buf, size_t len,
- const u8 *addr, u16 proto, bool unencrypted),
- TP_ARGS(netdev, buf, len, addr, proto, unencrypted),
+ TP_PROTO(struct net_device *netdev, struct sk_buff *skb,
+ bool unencrypted),
+ TP_ARGS(netdev, skb, unencrypted),
TP_STRUCT__entry(
NETDEV_ENTRY
- MAC_ENTRY(addr)
+ __field(int, len)
+ MAC_ENTRY(from)
__field(u16, proto)
__field(bool, unencrypted)
),
TP_fast_assign(
NETDEV_ASSIGN;
- MAC_ASSIGN(addr, addr);
- __entry->proto = proto;
+ __entry->len = skb->len;
+ MAC_ASSIGN(from, eth_hdr(skb)->h_source);
+ __entry->proto = be16_to_cpu(skb->protocol);
__entry->unencrypted = unencrypted;
),
- TP_printk(NETDEV_PR_FMT ", " MAC_PR_FMT " proto: 0x%x, unencrypted: %s",
- NETDEV_PR_ARG, MAC_PR_ARG(addr),
+ TP_printk(NETDEV_PR_FMT ", len=%d, " MAC_PR_FMT ", proto: 0x%x, unencrypted: %s",
+ NETDEV_PR_ARG, __entry->len, MAC_PR_ARG(from),
__entry->proto, BOOL_TO_STR(__entry->unencrypted))
);
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index f93365ae0fdd..d49aa79b7997 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -1750,7 +1750,7 @@ static const struct proto_ops x25_proto_ops = {
.socketpair = sock_no_socketpair,
.accept = x25_accept,
.getname = x25_getname,
- .poll_mask = datagram_poll_mask,
+ .poll = datagram_poll,
.ioctl = x25_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = compat_x25_ioctl,
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index 36919a254ba3..72335c2e8108 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -118,6 +118,9 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
u64 addr;
int err;
+ if (xs->dev != xdp->rxq->dev || xs->queue_id != xdp->rxq->queue_index)
+ return -EINVAL;
+
if (!xskq_peek_addr(xs->umem->fq, &addr) ||
len > xs->umem->chunk_size_nohr) {
xs->rx_dropped++;
@@ -196,8 +199,11 @@ static void xsk_destruct_skb(struct sk_buff *skb)
{
u64 addr = (u64)(long)skb_shinfo(skb)->destructor_arg;
struct xdp_sock *xs = xdp_sk(skb->sk);
+ unsigned long flags;
+ spin_lock_irqsave(&xs->tx_completion_lock, flags);
WARN_ON_ONCE(xskq_produce_addr(xs->umem->cq, addr));
+ spin_unlock_irqrestore(&xs->tx_completion_lock, flags);
sock_wfree(skb);
}
@@ -212,9 +218,6 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
struct sk_buff *skb;
int err = 0;
- if (unlikely(!xs->tx))
- return -ENOBUFS;
-
mutex_lock(&xs->mutex);
while (xskq_peek_desc(xs->tx, &desc)) {
@@ -227,22 +230,13 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
goto out;
}
- if (xskq_reserve_addr(xs->umem->cq)) {
- err = -EAGAIN;
+ if (xskq_reserve_addr(xs->umem->cq))
goto out;
- }
- len = desc.len;
- if (unlikely(len > xs->dev->mtu)) {
- err = -EMSGSIZE;
+ if (xs->queue_id >= xs->dev->real_num_tx_queues)
goto out;
- }
-
- if (xs->queue_id >= xs->dev->real_num_tx_queues) {
- err = -ENXIO;
- goto out;
- }
+ len = desc.len;
skb = sock_alloc_send_skb(sk, len, 1, &err);
if (unlikely(!skb)) {
err = -EAGAIN;
@@ -265,15 +259,15 @@ static int xsk_generic_xmit(struct sock *sk, struct msghdr *m,
skb->destructor = xsk_destruct_skb;
err = dev_direct_xmit(skb, xs->queue_id);
+ xskq_discard_desc(xs->tx);
/* Ignore NET_XMIT_CN as packet might have been sent */
if (err == NET_XMIT_DROP || err == NETDEV_TX_BUSY) {
- err = -EAGAIN;
- /* SKB consumed by dev_direct_xmit() */
+ /* SKB completed but not sent */
+ err = -EBUSY;
goto out;
}
sent_frame = true;
- xskq_discard_desc(xs->tx);
}
out:
@@ -294,15 +288,18 @@ static int xsk_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
return -ENXIO;
if (unlikely(!(xs->dev->flags & IFF_UP)))
return -ENETDOWN;
+ if (unlikely(!xs->tx))
+ return -ENOBUFS;
if (need_wait)
return -EOPNOTSUPP;
return (xs->zc) ? xsk_zc_xmit(sk) : xsk_generic_xmit(sk, m, total_len);
}
-static __poll_t xsk_poll_mask(struct socket *sock, __poll_t events)
+static unsigned int xsk_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait)
{
- __poll_t mask = datagram_poll_mask(sock, events);
+ unsigned int mask = datagram_poll(file, sock, wait);
struct sock *sk = sock->sk;
struct xdp_sock *xs = xdp_sk(sk);
@@ -693,7 +690,7 @@ static const struct proto_ops xsk_proto_ops = {
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = sock_no_getname,
- .poll_mask = xsk_poll_mask,
+ .poll = xsk_poll,
.ioctl = sock_no_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
@@ -751,6 +748,7 @@ static int xsk_create(struct net *net, struct socket *sock, int protocol,
xs = xdp_sk(sk);
mutex_init(&xs->mutex);
+ spin_lock_init(&xs->tx_completion_lock);
local_bh_disable();
sock_prot_inuse_add(net, &xsk_proto, 1);
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index ef6a6f0ec949..52ecaf770642 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -62,14 +62,9 @@ static inline u32 xskq_nb_avail(struct xsk_queue *q, u32 dcnt)
return (entries > dcnt) ? dcnt : entries;
}
-static inline u32 xskq_nb_free_lazy(struct xsk_queue *q, u32 producer)
-{
- return q->nentries - (producer - q->cons_tail);
-}
-
static inline u32 xskq_nb_free(struct xsk_queue *q, u32 producer, u32 dcnt)
{
- u32 free_entries = xskq_nb_free_lazy(q, producer);
+ u32 free_entries = q->nentries - (producer - q->cons_tail);
if (free_entries >= dcnt)
return free_entries;
@@ -129,7 +124,7 @@ static inline int xskq_produce_addr(struct xsk_queue *q, u64 addr)
{
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
- if (xskq_nb_free(q, q->prod_tail, LAZY_UPDATE_THRESHOLD) == 0)
+ if (xskq_nb_free(q, q->prod_tail, 1) == 0)
return -ENOSPC;
ring->desc[q->prod_tail++ & q->ring_mask] = addr;
diff --git a/samples/bpf/.gitignore b/samples/bpf/.gitignore
new file mode 100644
index 000000000000..8ae4940025f8
--- /dev/null
+++ b/samples/bpf/.gitignore
@@ -0,0 +1,49 @@
+cpustat
+fds_example
+lathist
+load_sock_ops
+lwt_len_hist
+map_perf_test
+offwaketime
+per_socket_stats_example
+sampleip
+sock_example
+sockex1
+sockex2
+sockex3
+spintest
+syscall_nrs.h
+syscall_tp
+task_fd_query
+tc_l2_redirect
+test_cgrp2_array_pin
+test_cgrp2_attach
+test_cgrp2_attach2
+test_cgrp2_sock
+test_cgrp2_sock2
+test_current_task_under_cgroup
+test_lru_dist
+test_map_in_map
+test_overhead
+test_probe_write_user
+trace_event
+trace_output
+tracex1
+tracex2
+tracex3
+tracex4
+tracex5
+tracex6
+tracex7
+xdp1
+xdp2
+xdp_adjust_tail
+xdp_fwd
+xdp_monitor
+xdp_redirect
+xdp_redirect_cpu
+xdp_redirect_map
+xdp_router_ipv4
+xdp_rxq_info
+xdp_tx_iptunnel
+xdpsock
diff --git a/samples/bpf/parse_varlen.c b/samples/bpf/parse_varlen.c
index 95c16324760c..0b6f22feb2c9 100644
--- a/samples/bpf/parse_varlen.c
+++ b/samples/bpf/parse_varlen.c
@@ -6,6 +6,7 @@
*/
#define KBUILD_MODNAME "foo"
#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/in.h>
@@ -108,11 +109,6 @@ static int parse_ipv6(void *data, uint64_t nh_off, void *data_end)
return 0;
}
-struct vlan_hdr {
- uint16_t h_vlan_TCI;
- uint16_t h_vlan_encapsulated_proto;
-};
-
SEC("varlen")
int handle_ingress(struct __sk_buff *skb)
{
diff --git a/samples/bpf/test_overhead_user.c b/samples/bpf/test_overhead_user.c
index 6caf47afa635..9d6dcaa9db92 100644
--- a/samples/bpf/test_overhead_user.c
+++ b/samples/bpf/test_overhead_user.c
@@ -6,6 +6,7 @@
*/
#define _GNU_SOURCE
#include <sched.h>
+#include <errno.h>
#include <stdio.h>
#include <sys/types.h>
#include <asm/unistd.h>
@@ -44,8 +45,13 @@ static void test_task_rename(int cpu)
exit(1);
}
start_time = time_get_ns();
- for (i = 0; i < MAX_CNT; i++)
- write(fd, buf, sizeof(buf));
+ for (i = 0; i < MAX_CNT; i++) {
+ if (write(fd, buf, sizeof(buf)) < 0) {
+ printf("task rename failed: %s\n", strerror(errno));
+ close(fd);
+ return;
+ }
+ }
printf("task_rename:%d: %lld events per sec\n",
cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
close(fd);
@@ -63,8 +69,13 @@ static void test_urandom_read(int cpu)
exit(1);
}
start_time = time_get_ns();
- for (i = 0; i < MAX_CNT; i++)
- read(fd, buf, sizeof(buf));
+ for (i = 0; i < MAX_CNT; i++) {
+ if (read(fd, buf, sizeof(buf)) < 0) {
+ printf("failed to read from /dev/urandom: %s\n", strerror(errno));
+ close(fd);
+ return;
+ }
+ }
printf("urandom_read:%d: %lld events per sec\n",
cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
close(fd);
diff --git a/samples/bpf/trace_event_user.c b/samples/bpf/trace_event_user.c
index 1fa1becfa641..d08046ab81f0 100644
--- a/samples/bpf/trace_event_user.c
+++ b/samples/bpf/trace_event_user.c
@@ -122,6 +122,16 @@ static void print_stacks(void)
}
}
+static inline int generate_load(void)
+{
+ if (system("dd if=/dev/zero of=/dev/null count=5000k status=none") < 0) {
+ printf("failed to generate some load with dd: %s\n", strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
static void test_perf_event_all_cpu(struct perf_event_attr *attr)
{
int nr_cpus = sysconf(_SC_NPROCESSORS_CONF);
@@ -142,7 +152,11 @@ static void test_perf_event_all_cpu(struct perf_event_attr *attr)
assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
assert(ioctl(pmu_fd[i], PERF_EVENT_IOC_ENABLE) == 0);
}
- system("dd if=/dev/zero of=/dev/null count=5000k status=none");
+
+ if (generate_load() < 0) {
+ error = 1;
+ goto all_cpu_err;
+ }
print_stacks();
all_cpu_err:
for (i--; i >= 0; i--) {
@@ -156,7 +170,7 @@ all_cpu_err:
static void test_perf_event_task(struct perf_event_attr *attr)
{
- int pmu_fd;
+ int pmu_fd, error = 0;
/* per task perf event, enable inherit so the "dd ..." command can be traced properly.
* Enabling inherit will cause bpf_perf_prog_read_time helper failure.
@@ -171,10 +185,17 @@ static void test_perf_event_task(struct perf_event_attr *attr)
}
assert(ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd[0]) == 0);
assert(ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE) == 0);
- system("dd if=/dev/zero of=/dev/null count=5000k status=none");
+
+ if (generate_load() < 0) {
+ error = 1;
+ goto err;
+ }
print_stacks();
+err:
ioctl(pmu_fd, PERF_EVENT_IOC_DISABLE);
close(pmu_fd);
+ if (error)
+ int_exit(0);
}
static void test_bpf_perf_event(void)
diff --git a/samples/bpf/xdp2skb_meta.sh b/samples/bpf/xdp2skb_meta.sh
index b9c9549c4c27..4bde9d066c46 100755
--- a/samples/bpf/xdp2skb_meta.sh
+++ b/samples/bpf/xdp2skb_meta.sh
@@ -16,8 +16,8 @@
BPF_FILE=xdp2skb_meta_kern.o
DIR=$(dirname $0)
-export TC=/usr/sbin/tc
-export IP=/usr/sbin/ip
+[ -z "$TC" ] && TC=tc
+[ -z "$IP" ] && IP=ip
function usage() {
echo ""
@@ -53,7 +53,7 @@ function _call_cmd() {
local allow_fail="$2"
shift 2
if [[ -n "$VERBOSE" ]]; then
- echo "$(basename $cmd) $@"
+ echo "$cmd $@"
fi
if [[ -n "$DRYRUN" ]]; then
return
diff --git a/samples/bpf/xdp_fwd_kern.c b/samples/bpf/xdp_fwd_kern.c
index 6673cdb9f55c..a7e94e7ff87d 100644
--- a/samples/bpf/xdp_fwd_kern.c
+++ b/samples/bpf/xdp_fwd_kern.c
@@ -48,9 +48,9 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
struct ethhdr *eth = data;
struct ipv6hdr *ip6h;
struct iphdr *iph;
- int out_index;
u16 h_proto;
u64 nh_off;
+ int rc;
nh_off = sizeof(*eth);
if (data + nh_off > data_end)
@@ -101,7 +101,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
fib_params.ifindex = ctx->ingress_ifindex;
- out_index = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags);
+ rc = bpf_fib_lookup(ctx, &fib_params, sizeof(fib_params), flags);
/* verify egress index has xdp support
* TO-DO bpf_map_lookup_elem(&tx_port, &key) fails with
@@ -109,7 +109,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
* NOTE: without verification that egress index supports XDP
* forwarding packets are dropped.
*/
- if (out_index > 0) {
+ if (rc == 0) {
if (h_proto == htons(ETH_P_IP))
ip_decrease_ttl(iph);
else if (h_proto == htons(ETH_P_IPV6))
@@ -117,7 +117,7 @@ static __always_inline int xdp_fwd_flags(struct xdp_md *ctx, u32 flags)
memcpy(eth->h_dest, fib_params.dmac, ETH_ALEN);
memcpy(eth->h_source, fib_params.smac, ETH_ALEN);
- return bpf_redirect_map(&tx_port, out_index, 0);
+ return bpf_redirect_map(&tx_port, fib_params.ifindex, 0);
}
return XDP_PASS;
diff --git a/samples/bpf/xdpsock_user.c b/samples/bpf/xdpsock_user.c
index d69c8d78d3fd..5904b1543831 100644
--- a/samples/bpf/xdpsock_user.c
+++ b/samples/bpf/xdpsock_user.c
@@ -729,7 +729,7 @@ static void kick_tx(int fd)
int ret;
ret = sendto(fd, NULL, 0, MSG_DONTWAIT, NULL, 0);
- if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN)
+ if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN || errno == EBUSY)
return;
lassert(0);
}
diff --git a/samples/vfio-mdev/mbochs.c b/samples/vfio-mdev/mbochs.c
index 2960e26c6ea4..2535c3677c7b 100644
--- a/samples/vfio-mdev/mbochs.c
+++ b/samples/vfio-mdev/mbochs.c
@@ -178,6 +178,8 @@ static const char *vbe_name(u32 index)
return "(invalid)";
}
+static struct page *__mbochs_get_page(struct mdev_state *mdev_state,
+ pgoff_t pgoff);
static struct page *mbochs_get_page(struct mdev_state *mdev_state,
pgoff_t pgoff);
@@ -394,7 +396,7 @@ static ssize_t mdev_access(struct mdev_device *mdev, char *buf, size_t count,
MBOCHS_MEMORY_BAR_OFFSET + mdev_state->memsize) {
pos -= MBOCHS_MMIO_BAR_OFFSET;
poff = pos & ~PAGE_MASK;
- pg = mbochs_get_page(mdev_state, pos >> PAGE_SHIFT);
+ pg = __mbochs_get_page(mdev_state, pos >> PAGE_SHIFT);
map = kmap(pg);
if (is_write)
memcpy(map + poff, buf, count);
@@ -657,7 +659,7 @@ static void mbochs_put_pages(struct mdev_state *mdev_state)
dev_dbg(dev, "%s: %d pages released\n", __func__, count);
}
-static int mbochs_region_vm_fault(struct vm_fault *vmf)
+static vm_fault_t mbochs_region_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct mdev_state *mdev_state = vma->vm_private_data;
@@ -695,7 +697,7 @@ static int mbochs_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
return 0;
}
-static int mbochs_dmabuf_vm_fault(struct vm_fault *vmf)
+static vm_fault_t mbochs_dmabuf_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct mbochs_dmabuf *dmabuf = vma->vm_private_data;
@@ -803,29 +805,26 @@ static void mbochs_release_dmabuf(struct dma_buf *buf)
mutex_unlock(&mdev_state->ops_lock);
}
-static void *mbochs_kmap_atomic_dmabuf(struct dma_buf *buf,
- unsigned long page_num)
+static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num)
{
struct mbochs_dmabuf *dmabuf = buf->priv;
struct page *page = dmabuf->pages[page_num];
- return kmap_atomic(page);
+ return kmap(page);
}
-static void *mbochs_kmap_dmabuf(struct dma_buf *buf, unsigned long page_num)
+static void mbochs_kunmap_dmabuf(struct dma_buf *buf, unsigned long page_num,
+ void *vaddr)
{
- struct mbochs_dmabuf *dmabuf = buf->priv;
- struct page *page = dmabuf->pages[page_num];
-
- return kmap(page);
+ kunmap(vaddr);
}
static struct dma_buf_ops mbochs_dmabuf_ops = {
.map_dma_buf = mbochs_map_dmabuf,
.unmap_dma_buf = mbochs_unmap_dmabuf,
.release = mbochs_release_dmabuf,
- .map_atomic = mbochs_kmap_atomic_dmabuf,
.map = mbochs_kmap_dmabuf,
+ .unmap = mbochs_kunmap_dmabuf,
.mmap = mbochs_mmap_dmabuf,
};
diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include
index c8156d61678c..86321f06461e 100644
--- a/scripts/Kbuild.include
+++ b/scripts/Kbuild.include
@@ -214,7 +214,7 @@ hdr-inst := -f $(srctree)/scripts/Makefile.headersinst obj
# Prefix -I with $(srctree) if it is not an absolute path.
# skip if -I has no parameter
addtree = $(if $(patsubst -I%,%,$(1)), \
-$(if $(filter-out -I/% -I./% -I../%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1)),$(1)))
+$(if $(filter-out -I/% -I./% -I../%,$(1)),$(patsubst -I%,-I$(srctree)/%,$(1)),$(1)),$(1))
# Find all -I options and call addtree
flags = $(foreach o,$($(1)),$(if $(filter -I%,$(o)),$(call addtree,$(o)),$(o)))
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 34d9e9ce97c2..514ed63ff571 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -239,6 +239,7 @@ cmd_record_mcount = \
"$(CC_FLAGS_FTRACE)" ]; then \
$(sub_cmd_record_mcount) \
fi;
+endif # -record-mcount
endif # CONFIG_FTRACE_MCOUNT_RECORD
ifdef CONFIG_STACK_VALIDATION
@@ -263,7 +264,6 @@ ifneq ($(RETPOLINE_CFLAGS),)
objtool_args += --retpoline
endif
endif
-endif
ifdef CONFIG_MODVERSIONS
@@ -590,7 +590,4 @@ endif
# We never want them to be removed automatically.
.SECONDARY: $(targets)
-# Declare the contents of the .PHONY variable as phony. We keep that
-# information in a variable se we can use it in if_changed and friends.
-
.PHONY: $(PHONY)
diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
index 808d09f27ad4..17ef94c635cd 100644
--- a/scripts/Makefile.clean
+++ b/scripts/Makefile.clean
@@ -88,7 +88,4 @@ PHONY += $(subdir-ymn)
$(subdir-ymn):
$(Q)$(MAKE) $(clean)=$@
-# Declare the contents of the .PHONY variable as phony. We keep that
-# information in a variable se we can use it in if_changed and friends.
-
.PHONY: $(PHONY)
diff --git a/scripts/Makefile.modbuiltin b/scripts/Makefile.modbuiltin
index a763b4775d06..40867a41615b 100644
--- a/scripts/Makefile.modbuiltin
+++ b/scripts/Makefile.modbuiltin
@@ -54,8 +54,4 @@ PHONY += $(subdir-ym)
$(subdir-ym):
$(Q)$(MAKE) $(modbuiltin)=$@
-
-# Declare the contents of the .PHONY variable as phony. We keep that
-# information in a variable se we can use it in if_changed and friends.
-
.PHONY: $(PHONY)
diff --git a/scripts/Makefile.modinst b/scripts/Makefile.modinst
index 51ca0244fc8a..ff5ca9817a85 100644
--- a/scripts/Makefile.modinst
+++ b/scripts/Makefile.modinst
@@ -35,8 +35,4 @@ modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
$(modules):
$(call cmd,modules_install,$(MODLIB)/$(modinst_dir))
-
-# Declare the contents of the .PHONY variable as phony. We keep that
-# information in a variable so we can use it in if_changed and friends.
-
.PHONY: $(PHONY)
diff --git a/scripts/Makefile.modpost b/scripts/Makefile.modpost
index df4174405feb..dd92dbbbaa68 100644
--- a/scripts/Makefile.modpost
+++ b/scripts/Makefile.modpost
@@ -149,8 +149,4 @@ ifneq ($(cmd_files),)
include $(cmd_files)
endif
-
-# Declare the contents of the .PHONY variable as phony. We keep that
-# information in a variable se we can use it in if_changed and friends.
-
.PHONY: $(PHONY)
diff --git a/scripts/Makefile.modsign b/scripts/Makefile.modsign
index 171483bc0538..da56aa78d245 100644
--- a/scripts/Makefile.modsign
+++ b/scripts/Makefile.modsign
@@ -27,7 +27,4 @@ modinst_dir = $(if $(KBUILD_EXTMOD),$(ext-mod-dir),kernel/$(@D))
$(modules):
$(call cmd,sign_ko,$(MODLIB)/$(modinst_dir))
-# Declare the contents of the .PHONY variable as phony. We keep that
-# information in a variable se we can use it in if_changed and friends.
-
.PHONY: $(PHONY)
diff --git a/scripts/cc-can-link.sh b/scripts/cc-can-link.sh
index 208eb2825dab..6efcead31989 100755
--- a/scripts/cc-can-link.sh
+++ b/scripts/cc-can-link.sh
@@ -1,7 +1,7 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
-cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1 && echo "y"
+cat << "END" | $@ -x c - -o /dev/null >/dev/null 2>&1
#include <stdio.h>
int main(void)
{
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index e3b7362b0ee4..447857ffaf6b 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2606,12 +2606,6 @@ sub process {
"A patch subject line should describe the change not the tool that found it\n" . $herecurr);
}
-# Check for old stable address
- if ($line =~ /^\s*cc:\s*.*<?\bstable\@kernel\.org\b>?.*$/i) {
- ERROR("STABLE_ADDRESS",
- "The 'stable' address should be 'stable\@vger.kernel.org'\n" . $herecurr);
- }
-
# Check for unwanted Gerrit info
if ($in_commit_log && $line =~ /^\s*change-id:/i) {
ERROR("GERRIT_CHANGE_ID",
@@ -5819,14 +5813,14 @@ sub process {
defined $stat &&
$stat =~ /^\+(?![^\{]*\{\s*).*\b(\w+)\s*\(.*$String\s*,/s &&
$1 !~ /^_*volatile_*$/) {
- my $specifier;
- my $extension;
- my $bad_specifier = "";
my $stat_real;
my $lc = $stat =~ tr@\n@@;
$lc = $lc + $linenr;
for (my $count = $linenr; $count <= $lc; $count++) {
+ my $specifier;
+ my $extension;
+ my $bad_specifier = "";
my $fmt = get_quoted_string($lines[$count - 1], raw_line($count, 0));
$fmt =~ s/%%//g;
diff --git a/scripts/extract-vmlinux b/scripts/extract-vmlinux
index 5061abcc2540..e6239f39abad 100755
--- a/scripts/extract-vmlinux
+++ b/scripts/extract-vmlinux
@@ -57,6 +57,8 @@ try_decompress '\3757zXZ\000' abcde unxz
try_decompress 'BZh' xy bunzip2
try_decompress '\135\0\0\0' xxx unlzma
try_decompress '\211\114\132' xy 'lzop -d'
+try_decompress '\002!L\030' xxx 'lz4 -d'
+try_decompress '(\265/\375' xxx unzstd
# Bail out:
echo "$me: Cannot find vmlinux." >&2
diff --git a/scripts/gcc-x86_64-has-stack-protector.sh b/scripts/gcc-x86_64-has-stack-protector.sh
index 3755af0cd9f7..75e4e22b986a 100755
--- a/scripts/gcc-x86_64-has-stack-protector.sh
+++ b/scripts/gcc-x86_64-has-stack-protector.sh
@@ -1,4 +1,4 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
-echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
+echo "int foo(void) { char X[200]; return 3; }" | $* -S -x c -c -m64 -O0 -mcmodel=kernel -fno-PIE -fstack-protector - -o - 2> /dev/null | grep -q "%gs"
diff --git a/scripts/kconfig/expr.h b/scripts/kconfig/expr.h
index 94a383b21df6..f63b41b0dd49 100644
--- a/scripts/kconfig/expr.h
+++ b/scripts/kconfig/expr.h
@@ -171,6 +171,9 @@ struct symbol {
* config BAZ
* int "BAZ Value"
* range 1..255
+ *
+ * Please, also check zconf.y:print_symbol() when modifying the
+ * list of property types!
*/
enum prop_type {
P_UNKNOWN,
diff --git a/scripts/kconfig/preprocess.c b/scripts/kconfig/preprocess.c
index 65da87fce907..5ca2df790d3c 100644
--- a/scripts/kconfig/preprocess.c
+++ b/scripts/kconfig/preprocess.c
@@ -156,7 +156,7 @@ static char *do_shell(int argc, char *argv[])
nread--;
/* remove trailing new lines */
- while (buf[nread - 1] == '\n')
+ while (nread > 0 && buf[nread - 1] == '\n')
nread--;
buf[nread] = 0;
diff --git a/scripts/kconfig/zconf.y b/scripts/kconfig/zconf.y
index 6f9b0aa32a82..4b68272ebdb9 100644
--- a/scripts/kconfig/zconf.y
+++ b/scripts/kconfig/zconf.y
@@ -31,7 +31,7 @@ struct symbol *symbol_hash[SYMBOL_HASHSIZE];
static struct menu *current_menu, *current_entry;
%}
-%expect 32
+%expect 31
%union
{
@@ -337,7 +337,7 @@ choice_block:
/* if entry */
-if_entry: T_IF expr nl
+if_entry: T_IF expr T_EOL
{
printd(DEBUG_PARSE, "%s:%d:if\n", zconf_curname(), zconf_lineno());
menu_add_entry(NULL);
@@ -717,6 +717,10 @@ static void print_symbol(FILE *out, struct menu *menu)
print_quoted_string(out, prop->text);
fputc('\n', out);
break;
+ case P_SYMBOL:
+ fputs( " symbol ", out);
+ fprintf(out, "%s\n", prop->sym->name);
+ break;
default:
fprintf(out, " unknown prop %d!\n", prop->type);
break;
diff --git a/scripts/tags.sh b/scripts/tags.sh
index 66f08bb1cce9..26de7d5aa5c8 100755
--- a/scripts/tags.sh
+++ b/scripts/tags.sh
@@ -152,6 +152,7 @@ regex_asm=(
)
regex_c=(
'/^SYSCALL_DEFINE[0-9](\([[:alnum:]_]*\).*/sys_\1/'
+ '/^BPF_CALL_[0-9](\([[:alnum:]_]*\).*/\1/'
'/^COMPAT_SYSCALL_DEFINE[0-9](\([[:alnum:]_]*\).*/compat_sys_\1/'
'/^TRACE_EVENT(\([[:alnum:]_]*\).*/trace_\1/'
'/^TRACE_EVENT(\([[:alnum:]_]*\).*/trace_\1_rcuidle/'
@@ -245,7 +246,7 @@ exuberant()
{
setup_regex exuberant asm c
all_target_sources | xargs $1 -a \
- -I __initdata,__exitdata,__initconst, \
+ -I __initdata,__exitdata,__initconst,__ro_after_init \
-I __initdata_memblock \
-I __refdata,__attribute,__maybe_unused,__always_unused \
-I __acquires,__releases,__deprecated \
diff --git a/security/keys/dh.c b/security/keys/dh.c
index f7403821db7f..b203f7758f97 100644
--- a/security/keys/dh.c
+++ b/security/keys/dh.c
@@ -142,6 +142,8 @@ static void kdf_dealloc(struct kdf_sdesc *sdesc)
* The src pointer is defined as Z || other info where Z is the shared secret
* from DH and other info is an arbitrary string (see SP800-56A section
* 5.8.1.2).
+ *
+ * 'dlen' must be a multiple of the digest size.
*/
static int kdf_ctr(struct kdf_sdesc *sdesc, const u8 *src, unsigned int slen,
u8 *dst, unsigned int dlen, unsigned int zlen)
@@ -205,8 +207,8 @@ static int keyctl_dh_compute_kdf(struct kdf_sdesc *sdesc,
{
uint8_t *outbuf = NULL;
int ret;
- size_t outbuf_len = round_up(buflen,
- crypto_shash_digestsize(sdesc->shash.tfm));
+ size_t outbuf_len = roundup(buflen,
+ crypto_shash_digestsize(sdesc->shash.tfm));
outbuf = kmalloc(outbuf_len, GFP_KERNEL);
if (!outbuf) {
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index f3d374d2ca04..79d3709b0671 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -441,22 +441,16 @@ static int sel_release_policy(struct inode *inode, struct file *filp)
static ssize_t sel_read_policy(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
- struct selinux_fs_info *fsi = file_inode(filp)->i_sb->s_fs_info;
struct policy_load_memory *plm = filp->private_data;
int ret;
- mutex_lock(&fsi->mutex);
-
ret = avc_has_perm(&selinux_state,
current_sid(), SECINITSID_SECURITY,
SECCLASS_SECURITY, SECURITY__READ_POLICY, NULL);
if (ret)
- goto out;
+ return ret;
- ret = simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
-out:
- mutex_unlock(&fsi->mutex);
- return ret;
+ return simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
}
static vm_fault_t sel_mmap_policy_fault(struct vm_fault *vmf)
@@ -1188,25 +1182,29 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
ret = -EINVAL;
if (index >= fsi->bool_num || strcmp(name,
fsi->bool_pending_names[index]))
- goto out;
+ goto out_unlock;
ret = -ENOMEM;
page = (char *)get_zeroed_page(GFP_KERNEL);
if (!page)
- goto out;
+ goto out_unlock;
cur_enforcing = security_get_bool_value(fsi->state, index);
if (cur_enforcing < 0) {
ret = cur_enforcing;
- goto out;
+ goto out_unlock;
}
length = scnprintf(page, PAGE_SIZE, "%d %d", cur_enforcing,
fsi->bool_pending_values[index]);
- ret = simple_read_from_buffer(buf, count, ppos, page, length);
-out:
mutex_unlock(&fsi->mutex);
+ ret = simple_read_from_buffer(buf, count, ppos, page, length);
+out_free:
free_page((unsigned long)page);
return ret;
+
+out_unlock:
+ mutex_unlock(&fsi->mutex);
+ goto out_free;
}
static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
@@ -1219,6 +1217,17 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
const char *name = filep->f_path.dentry->d_name.name;
+ if (count >= PAGE_SIZE)
+ return -ENOMEM;
+
+ /* No partial writes. */
+ if (*ppos != 0)
+ return -EINVAL;
+
+ page = memdup_user_nul(buf, count);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
mutex_lock(&fsi->mutex);
length = avc_has_perm(&selinux_state,
@@ -1233,22 +1242,6 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
fsi->bool_pending_names[index]))
goto out;
- length = -ENOMEM;
- if (count >= PAGE_SIZE)
- goto out;
-
- /* No partial writes. */
- length = -EINVAL;
- if (*ppos != 0)
- goto out;
-
- page = memdup_user_nul(buf, count);
- if (IS_ERR(page)) {
- length = PTR_ERR(page);
- page = NULL;
- goto out;
- }
-
length = -EINVAL;
if (sscanf(page, "%d", &new_value) != 1)
goto out;
@@ -1280,6 +1273,17 @@ static ssize_t sel_commit_bools_write(struct file *filep,
ssize_t length;
int new_value;
+ if (count >= PAGE_SIZE)
+ return -ENOMEM;
+
+ /* No partial writes. */
+ if (*ppos != 0)
+ return -EINVAL;
+
+ page = memdup_user_nul(buf, count);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
mutex_lock(&fsi->mutex);
length = avc_has_perm(&selinux_state,
@@ -1289,22 +1293,6 @@ static ssize_t sel_commit_bools_write(struct file *filep,
if (length)
goto out;
- length = -ENOMEM;
- if (count >= PAGE_SIZE)
- goto out;
-
- /* No partial writes. */
- length = -EINVAL;
- if (*ppos != 0)
- goto out;
-
- page = memdup_user_nul(buf, count);
- if (IS_ERR(page)) {
- length = PTR_ERR(page);
- page = NULL;
- goto out;
- }
-
length = -EINVAL;
if (sscanf(page, "%d", &new_value) != 1)
goto out;
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 7ad226018f51..19de675d4504 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -2296,6 +2296,7 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
struct smack_known *skp = smk_of_task_struct(p);
isp->smk_inode = skp;
+ isp->smk_flags |= SMK_INODE_INSTANT;
}
/*
diff --git a/sound/core/rawmidi.c b/sound/core/rawmidi.c
index 69616d00481c..b53026a72e73 100644
--- a/sound/core/rawmidi.c
+++ b/sound/core/rawmidi.c
@@ -635,7 +635,7 @@ static int snd_rawmidi_info_select_user(struct snd_card *card,
int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
struct snd_rawmidi_params * params)
{
- char *newbuf;
+ char *newbuf, *oldbuf;
struct snd_rawmidi_runtime *runtime = substream->runtime;
if (substream->append && substream->use_count > 1)
@@ -648,13 +648,17 @@ int snd_rawmidi_output_params(struct snd_rawmidi_substream *substream,
return -EINVAL;
}
if (params->buffer_size != runtime->buffer_size) {
- newbuf = krealloc(runtime->buffer, params->buffer_size,
- GFP_KERNEL);
+ newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
if (!newbuf)
return -ENOMEM;
+ spin_lock_irq(&runtime->lock);
+ oldbuf = runtime->buffer;
runtime->buffer = newbuf;
runtime->buffer_size = params->buffer_size;
runtime->avail = runtime->buffer_size;
+ runtime->appl_ptr = runtime->hw_ptr = 0;
+ spin_unlock_irq(&runtime->lock);
+ kfree(oldbuf);
}
runtime->avail_min = params->avail_min;
substream->active_sensing = !params->no_active_sensing;
@@ -665,7 +669,7 @@ EXPORT_SYMBOL(snd_rawmidi_output_params);
int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
struct snd_rawmidi_params * params)
{
- char *newbuf;
+ char *newbuf, *oldbuf;
struct snd_rawmidi_runtime *runtime = substream->runtime;
snd_rawmidi_drain_input(substream);
@@ -676,12 +680,16 @@ int snd_rawmidi_input_params(struct snd_rawmidi_substream *substream,
return -EINVAL;
}
if (params->buffer_size != runtime->buffer_size) {
- newbuf = krealloc(runtime->buffer, params->buffer_size,
- GFP_KERNEL);
+ newbuf = kmalloc(params->buffer_size, GFP_KERNEL);
if (!newbuf)
return -ENOMEM;
+ spin_lock_irq(&runtime->lock);
+ oldbuf = runtime->buffer;
runtime->buffer = newbuf;
runtime->buffer_size = params->buffer_size;
+ runtime->appl_ptr = runtime->hw_ptr = 0;
+ spin_unlock_irq(&runtime->lock);
+ kfree(oldbuf);
}
runtime->avail_min = params->avail_min;
return 0;
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 61a07fe34cd2..56ca78423040 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -2004,7 +2004,8 @@ static int snd_seq_ioctl_query_next_client(struct snd_seq_client *client,
struct snd_seq_client *cptr = NULL;
/* search for next client */
- info->client++;
+ if (info->client < INT_MAX)
+ info->client++;
if (info->client < 0)
info->client = 0;
for (; info->client < SNDRV_SEQ_MAX_CLIENTS; info->client++) {
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 665089c45560..b6f076bbc72d 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -1520,7 +1520,7 @@ static int snd_timer_user_next_device(struct snd_timer_id __user *_tid)
} else {
if (id.subdevice < 0)
id.subdevice = 0;
- else
+ else if (id.subdevice < INT_MAX)
id.subdevice++;
}
}
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index d91c87e41756..20a171ac4bb2 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2899,8 +2899,9 @@ static int hda_codec_runtime_suspend(struct device *dev)
list_for_each_entry(pcm, &codec->pcm_list_head, list)
snd_pcm_suspend_all(pcm->pcm);
state = hda_call_codec_suspend(codec);
- if (codec_has_clkstop(codec) && codec_has_epss(codec) &&
- (state & AC_PWRST_CLK_STOP_OK))
+ if (codec->link_down_at_suspend ||
+ (codec_has_clkstop(codec) && codec_has_epss(codec) &&
+ (state & AC_PWRST_CLK_STOP_OK)))
snd_hdac_codec_link_down(&codec->core);
snd_hdac_link_power(&codec->core, false);
return 0;
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 681c360f29f9..a8b1b31f161c 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -258,6 +258,7 @@ struct hda_codec {
unsigned int power_save_node:1; /* advanced PM for each widget */
unsigned int auto_runtime_pm:1; /* enable automatic codec runtime pm */
unsigned int force_pin_prefix:1; /* Add location prefix */
+ unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
#ifdef CONFIG_PM
unsigned long power_on_acct;
unsigned long power_off_acct;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 04e949aa01ad..321e95c409c1 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -991,6 +991,7 @@ struct ca0132_spec {
enum {
QUIRK_NONE,
QUIRK_ALIENWARE,
+ QUIRK_ALIENWARE_M17XR4,
QUIRK_SBZ,
QUIRK_R3DI,
};
@@ -1040,13 +1041,15 @@ static const struct hda_pintbl r3di_pincfgs[] = {
};
static const struct snd_pci_quirk ca0132_quirks[] = {
+ SND_PCI_QUIRK(0x1028, 0x057b, "Alienware M17x R4", QUIRK_ALIENWARE_M17XR4),
SND_PCI_QUIRK(0x1028, 0x0685, "Alienware 15 2015", QUIRK_ALIENWARE),
SND_PCI_QUIRK(0x1028, 0x0688, "Alienware 17 2015", QUIRK_ALIENWARE),
SND_PCI_QUIRK(0x1028, 0x0708, "Alienware 15 R2 2016", QUIRK_ALIENWARE),
SND_PCI_QUIRK(0x1102, 0x0010, "Sound Blaster Z", QUIRK_SBZ),
SND_PCI_QUIRK(0x1102, 0x0023, "Sound Blaster Z", QUIRK_SBZ),
SND_PCI_QUIRK(0x1458, 0xA016, "Recon3Di", QUIRK_R3DI),
- SND_PCI_QUIRK(0x1458, 0xA036, "Recon3Di", QUIRK_R3DI),
+ SND_PCI_QUIRK(0x1458, 0xA026, "Gigabyte G1.Sniper Z97", QUIRK_R3DI),
+ SND_PCI_QUIRK(0x1458, 0xA036, "Gigabyte GA-Z170X-Gaming 7", QUIRK_R3DI),
{}
};
@@ -5663,7 +5666,7 @@ static const char * const ca0132_alt_slave_pfxs[] = {
* I think this has to do with the pin for rear surround being 0x11,
* and the center/lfe being 0x10. Usually the pin order is the opposite.
*/
-const struct snd_pcm_chmap_elem ca0132_alt_chmaps[] = {
+static const struct snd_pcm_chmap_elem ca0132_alt_chmaps[] = {
{ .channels = 2,
.map = { SNDRV_CHMAP_FL, SNDRV_CHMAP_FR } },
{ .channels = 4,
@@ -5966,7 +5969,7 @@ static int ca0132_build_pcms(struct hda_codec *codec)
info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[0];
/* With the DSP enabled, desktops don't use this ADC. */
- if (spec->use_alt_functions) {
+ if (!spec->use_alt_functions) {
info = snd_hda_codec_pcm_new(codec, "CA0132 Analog Mic-In2");
if (!info)
return -ENOMEM;
@@ -6130,7 +6133,10 @@ static void ca0132_init_dmic(struct hda_codec *codec)
* Bit 6: set to select Data2, clear for Data1
* Bit 7: set to enable DMic, clear for AMic
*/
- val = 0x23;
+ if (spec->quirk == QUIRK_ALIENWARE_M17XR4)
+ val = 0x33;
+ else
+ val = 0x23;
/* keep a copy of dmic ctl val for enable/disable dmic purpuse */
spec->dmic_ctl = val;
snd_hda_codec_write(codec, spec->input_pins[0], 0,
@@ -7223,7 +7229,7 @@ static int ca0132_init(struct hda_codec *codec)
snd_hda_sequence_write(codec, spec->base_init_verbs);
- if (spec->quirk != QUIRK_NONE)
+ if (spec->use_alt_functions)
ca0132_alt_init(codec);
ca0132_download_dsp(codec);
@@ -7237,8 +7243,9 @@ static int ca0132_init(struct hda_codec *codec)
case QUIRK_R3DI:
r3di_setup_defaults(codec);
break;
- case QUIRK_NONE:
- case QUIRK_ALIENWARE:
+ case QUIRK_SBZ:
+ break;
+ default:
ca0132_setup_defaults(codec);
ca0132_init_analog_mic2(codec);
ca0132_init_dmic(codec);
@@ -7343,7 +7350,6 @@ static const struct hda_codec_ops ca0132_patch_ops = {
static void ca0132_config(struct hda_codec *codec)
{
struct ca0132_spec *spec = codec->spec;
- struct auto_pin_cfg *cfg = &spec->autocfg;
spec->dacs[0] = 0x2;
spec->dacs[1] = 0x3;
@@ -7405,12 +7411,7 @@ static void ca0132_config(struct hda_codec *codec)
/* SPDIF I/O */
spec->dig_out = 0x05;
spec->multiout.dig_out_nid = spec->dig_out;
- cfg->dig_out_pins[0] = 0x0c;
- cfg->dig_outs = 1;
- cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
spec->dig_in = 0x09;
- cfg->dig_in_pin = 0x0e;
- cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
break;
case QUIRK_R3DI:
codec_dbg(codec, "%s: QUIRK_R3DI applied.\n", __func__);
@@ -7438,9 +7439,6 @@ static void ca0132_config(struct hda_codec *codec)
/* SPDIF I/O */
spec->dig_out = 0x05;
spec->multiout.dig_out_nid = spec->dig_out;
- cfg->dig_out_pins[0] = 0x0c;
- cfg->dig_outs = 1;
- cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
break;
default:
spec->num_outputs = 2;
@@ -7463,12 +7461,7 @@ static void ca0132_config(struct hda_codec *codec)
/* SPDIF I/O */
spec->dig_out = 0x05;
spec->multiout.dig_out_nid = spec->dig_out;
- cfg->dig_out_pins[0] = 0x0c;
- cfg->dig_outs = 1;
- cfg->dig_out_type[0] = HDA_PCM_TYPE_SPDIF;
spec->dig_in = 0x09;
- cfg->dig_in_pin = 0x0e;
- cfg->dig_in_type = HDA_PCM_TYPE_SPDIF;
break;
}
}
@@ -7476,7 +7469,7 @@ static void ca0132_config(struct hda_codec *codec)
static int ca0132_prepare_verbs(struct hda_codec *codec)
{
/* Verbs + terminator (an empty element) */
-#define NUM_SPEC_VERBS 4
+#define NUM_SPEC_VERBS 2
struct ca0132_spec *spec = codec->spec;
spec->chip_init_verbs = ca0132_init_verbs0;
@@ -7488,34 +7481,24 @@ static int ca0132_prepare_verbs(struct hda_codec *codec)
if (!spec->spec_init_verbs)
return -ENOMEM;
- /* HP jack autodetection */
- spec->spec_init_verbs[0].nid = spec->unsol_tag_hp;
- spec->spec_init_verbs[0].param = AC_VERB_SET_UNSOLICITED_ENABLE;
- spec->spec_init_verbs[0].verb = AC_USRSP_EN | spec->unsol_tag_hp;
-
- /* MIC1 jack autodetection */
- spec->spec_init_verbs[1].nid = spec->unsol_tag_amic1;
- spec->spec_init_verbs[1].param = AC_VERB_SET_UNSOLICITED_ENABLE;
- spec->spec_init_verbs[1].verb = AC_USRSP_EN | spec->unsol_tag_amic1;
-
/* config EAPD */
- spec->spec_init_verbs[2].nid = 0x0b;
- spec->spec_init_verbs[2].param = 0x78D;
- spec->spec_init_verbs[2].verb = 0x00;
+ spec->spec_init_verbs[0].nid = 0x0b;
+ spec->spec_init_verbs[0].param = 0x78D;
+ spec->spec_init_verbs[0].verb = 0x00;
/* Previously commented configuration */
/*
- spec->spec_init_verbs[3].nid = 0x0b;
- spec->spec_init_verbs[3].param = AC_VERB_SET_EAPD_BTLENABLE;
+ spec->spec_init_verbs[2].nid = 0x0b;
+ spec->spec_init_verbs[2].param = AC_VERB_SET_EAPD_BTLENABLE;
+ spec->spec_init_verbs[2].verb = 0x02;
+
+ spec->spec_init_verbs[3].nid = 0x10;
+ spec->spec_init_verbs[3].param = 0x78D;
spec->spec_init_verbs[3].verb = 0x02;
spec->spec_init_verbs[4].nid = 0x10;
- spec->spec_init_verbs[4].param = 0x78D;
+ spec->spec_init_verbs[4].param = AC_VERB_SET_EAPD_BTLENABLE;
spec->spec_init_verbs[4].verb = 0x02;
-
- spec->spec_init_verbs[5].nid = 0x10;
- spec->spec_init_verbs[5].param = AC_VERB_SET_EAPD_BTLENABLE;
- spec->spec_init_verbs[5].verb = 0x02;
*/
/* Terminator: spec->spec_init_verbs[NUM_SPEC_VERBS-1] */
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index e7fcfc3b8885..f641c20095f7 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -964,6 +964,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
SND_PCI_QUIRK(0x103c, 0x8115, "HP Z1 Gen3", CXT_FIXUP_HP_GATE_MIC),
SND_PCI_QUIRK(0x103c, 0x814f, "HP ZBook 15u G3", CXT_FIXUP_MUTE_LED_GPIO),
SND_PCI_QUIRK(0x103c, 0x822e, "HP ProBook 440 G4", CXT_FIXUP_MUTE_LED_GPIO),
+ SND_PCI_QUIRK(0x103c, 0x836e, "HP ProBook 455 G5", CXT_FIXUP_MUTE_LED_GPIO),
SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 8840daf9c6a3..8a49415aebac 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -33,6 +33,7 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
+#include <linux/pm_runtime.h>
#include <sound/core.h>
#include <sound/jack.h>
#include <sound/asoundef.h>
@@ -764,8 +765,10 @@ static void check_presence_and_report(struct hda_codec *codec, hda_nid_t nid,
if (pin_idx < 0)
return;
+ mutex_lock(&spec->pcm_lock);
if (hdmi_present_sense(get_pin(spec, pin_idx), 1))
snd_hda_jack_report_sync(codec);
+ mutex_unlock(&spec->pcm_lock);
}
static void jack_callback(struct hda_codec *codec,
@@ -1628,21 +1631,23 @@ static void sync_eld_via_acomp(struct hda_codec *codec,
static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
{
struct hda_codec *codec = per_pin->codec;
- struct hdmi_spec *spec = codec->spec;
int ret;
/* no temporary power up/down needed for component notifier */
- if (!codec_has_acomp(codec))
- snd_hda_power_up_pm(codec);
+ if (!codec_has_acomp(codec)) {
+ ret = snd_hda_power_up_pm(codec);
+ if (ret < 0 && pm_runtime_suspended(hda_codec_dev(codec))) {
+ snd_hda_power_down_pm(codec);
+ return false;
+ }
+ }
- mutex_lock(&spec->pcm_lock);
if (codec_has_acomp(codec)) {
sync_eld_via_acomp(codec, per_pin);
ret = false; /* don't call snd_hda_jack_report_sync() */
} else {
ret = hdmi_present_sense_via_verbs(per_pin, repoll);
}
- mutex_unlock(&spec->pcm_lock);
if (!codec_has_acomp(codec))
snd_hda_power_down_pm(codec);
@@ -1654,12 +1659,16 @@ static void hdmi_repoll_eld(struct work_struct *work)
{
struct hdmi_spec_per_pin *per_pin =
container_of(to_delayed_work(work), struct hdmi_spec_per_pin, work);
+ struct hda_codec *codec = per_pin->codec;
+ struct hdmi_spec *spec = codec->spec;
if (per_pin->repoll_count++ > 6)
per_pin->repoll_count = 0;
+ mutex_lock(&spec->pcm_lock);
if (hdmi_present_sense(per_pin, per_pin->repoll_count))
snd_hda_jack_report_sync(per_pin->codec);
+ mutex_unlock(&spec->pcm_lock);
}
static void intel_haswell_fixup_connect_list(struct hda_codec *codec,
@@ -3741,6 +3750,11 @@ static int patch_atihdmi(struct hda_codec *codec)
spec->chmap.channels_max = max(spec->chmap.channels_max, 8u);
+ /* AMD GPUs have neither EPSS nor CLKSTOP bits, hence preventing
+ * the link-down as is. Tell the core to allow it.
+ */
+ codec->link_down_at_suspend = 1;
+
return 0;
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index e9bd33ea538f..f6af3e1c2b93 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2366,6 +2366,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
SND_PCI_QUIRK(0x1558, 0x9501, "Clevo P950HR", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1558, 0x95e1, "Clevo P95xER", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1558, 0x95e2, "Clevo P950ER", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -2545,6 +2546,7 @@ static const struct snd_pci_quirk alc262_fixup_tbl[] = {
SND_PCI_QUIRK(0x10cf, 0x1397, "Fujitsu Lifebook S7110", ALC262_FIXUP_FSC_S7110),
SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FIXUP_BENQ),
SND_PCI_QUIRK(0x10f1, 0x2915, "Tyan Thunder n6650W", ALC262_FIXUP_TYAN),
+ SND_PCI_QUIRK(0x1734, 0x1141, "FSC ESPRIMO U9210", ALC262_FIXUP_FSC_H270),
SND_PCI_QUIRK(0x1734, 0x1147, "FSC Celsius H270", ALC262_FIXUP_FSC_H270),
SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000", ALC262_FIXUP_LENOVO_3000),
SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_FIXUP_BENQ),
@@ -4995,7 +4997,6 @@ static void alc_fixup_tpt440_dock(struct hda_codec *codec,
struct alc_spec *spec = codec->spec;
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
- spec->shutup = alc_no_shutup; /* reduce click noise */
spec->reboot_notify = alc_d3_at_reboot; /* reduce noise */
spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
codec->power_save_node = 0; /* avoid click noises */
@@ -5394,6 +5395,13 @@ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
/* for hda_fixup_thinkpad_acpi() */
#include "thinkpad_helper.c"
+static void alc_fixup_thinkpad_acpi(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ alc_fixup_no_shutup(codec, fix, action); /* reduce click noise */
+ hda_fixup_thinkpad_acpi(codec, fix, action);
+}
+
/* for dell wmi mic mute led */
#include "dell_wmi_helper.c"
@@ -5946,7 +5954,7 @@ static const struct hda_fixup alc269_fixups[] = {
},
[ALC269_FIXUP_THINKPAD_ACPI] = {
.type = HDA_FIXUP_FUNC,
- .v.func = hda_fixup_thinkpad_acpi,
+ .v.func = alc_fixup_thinkpad_acpi,
.chained = true,
.chain_id = ALC269_FIXUP_SKU_IGNORE,
},
@@ -6562,6 +6570,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x10cf, 0x1629, "Lifebook U7x7", ALC255_FIXUP_LIFEBOOK_U7x7_HEADSET_MIC),
SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
SND_PCI_QUIRK(0x10ec, 0x10f2, "Intel Reference board", ALC700_FIXUP_INTEL_REFERENCE),
+ SND_PCI_QUIRK(0x10f7, 0x8338, "Panasonic CF-SZ6", ALC269_FIXUP_HEADSET_MODE),
SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x144d, 0xc740, "Samsung Ativ book 8 (NP870Z5G)", ALC269_FIXUP_ATIV_BOOK_8),
SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC),
@@ -6603,8 +6612,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
+ SND_PCI_QUIRK(0x17aa, 0x312a, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
SND_PCI_QUIRK(0x17aa, 0x312f, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
- SND_PCI_QUIRK(0x17aa, 0x3138, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
SND_PCI_QUIRK(0x17aa, 0x313c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
SND_PCI_QUIRK(0x17aa, 0x3902, "Lenovo E50-80", ALC269_FIXUP_DMIC_THINKPAD_ACPI),
SND_PCI_QUIRK(0x17aa, 0x3977, "IdeaPad S210", ALC283_FIXUP_INT_MIC),
@@ -6782,6 +6791,17 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
{0x14, 0x90170110},
{0x19, 0x02a11030},
{0x21, 0x02211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
+ {0x14, 0x90170110},
+ {0x19, 0x02a11030},
+ {0x1a, 0x02a11040},
+ {0x1b, 0x01014020},
+ {0x21, 0x0221101f}),
+ SND_HDA_PIN_QUIRK(0x10ec0235, 0x17aa, "Lenovo", ALC294_FIXUP_LENOVO_MIC_LOCATION,
+ {0x14, 0x90170110},
+ {0x19, 0x02a11020},
+ {0x1a, 0x02a11030},
+ {0x21, 0x0221101f}),
SND_HDA_PIN_QUIRK(0x10ec0236, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
{0x12, 0x90a60140},
{0x14, 0x90170110},
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
index 6c85f13ab23f..54f6252faca6 100644
--- a/sound/pci/lx6464es/lx6464es.c
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -1018,6 +1018,7 @@ static int snd_lx6464es_create(struct snd_card *card,
chip->port_dsp_bar = pci_ioremap_bar(pci, 2);
if (!chip->port_dsp_bar) {
dev_err(card->dev, "cannot remap PCI memory region\n");
+ err = -ENOMEM;
goto remap_pci_failed;
}
diff --git a/tools/arch/arm/include/uapi/asm/kvm.h b/tools/arch/arm/include/uapi/asm/kvm.h
index caae4843cb70..16e006f708ca 100644
--- a/tools/arch/arm/include/uapi/asm/kvm.h
+++ b/tools/arch/arm/include/uapi/asm/kvm.h
@@ -91,6 +91,7 @@ struct kvm_regs {
#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
#define KVM_VGIC_ITS_ADDR_TYPE 4
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5
#define KVM_VGIC_V3_DIST_SIZE SZ_64K
#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 04b3256f8e6d..4e76630dd655 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -91,6 +91,7 @@ struct kvm_regs {
#define KVM_VGIC_V3_ADDR_TYPE_DIST 2
#define KVM_VGIC_V3_ADDR_TYPE_REDIST 3
#define KVM_VGIC_ITS_ADDR_TYPE 4
+#define KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION 5
#define KVM_VGIC_V3_DIST_SIZE SZ_64K
#define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K)
diff --git a/tools/arch/powerpc/include/uapi/asm/kvm.h b/tools/arch/powerpc/include/uapi/asm/kvm.h
index 833ed9a16adf..1b32b56a03d3 100644
--- a/tools/arch/powerpc/include/uapi/asm/kvm.h
+++ b/tools/arch/powerpc/include/uapi/asm/kvm.h
@@ -633,6 +633,7 @@ struct kvm_ppc_cpu_char {
#define KVM_REG_PPC_PSSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbd)
#define KVM_REG_PPC_DEC_EXPIRY (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xbe)
+#define KVM_REG_PPC_ONLINE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xbf)
/* Transactional Memory checkpointed state:
* This is all GPRs, all VSX regs and a subset of SPRs
diff --git a/tools/arch/powerpc/include/uapi/asm/unistd.h b/tools/arch/powerpc/include/uapi/asm/unistd.h
index 389c36fd8299..ac5ba55066dd 100644
--- a/tools/arch/powerpc/include/uapi/asm/unistd.h
+++ b/tools/arch/powerpc/include/uapi/asm/unistd.h
@@ -398,5 +398,6 @@
#define __NR_pkey_alloc 384
#define __NR_pkey_free 385
#define __NR_pkey_mprotect 386
+#define __NR_rseq 387
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index fb00a2fca990..5701f5cecd31 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -282,7 +282,9 @@
#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
+#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
+#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
diff --git a/tools/bpf/bpftool/common.c b/tools/bpf/bpftool/common.c
index 32f9e397a6c0..3f140eff039f 100644
--- a/tools/bpf/bpftool/common.c
+++ b/tools/bpf/bpftool/common.c
@@ -217,6 +217,14 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
int err;
int fd;
+ if (argc < 3) {
+ p_err("too few arguments, id ID and FILE path is required");
+ return -1;
+ } else if (argc > 3) {
+ p_err("too many arguments");
+ return -1;
+ }
+
if (!is_prefix(*argv, "id")) {
p_err("expected 'id' got %s", *argv);
return -1;
@@ -230,9 +238,6 @@ int do_pin_any(int argc, char **argv, int (*get_fd_by_id)(__u32))
}
NEXT_ARG();
- if (argc != 1)
- usage();
-
fd = get_fd_by_id(id);
if (fd < 0) {
p_err("can't get prog by id (%u): %s", id, strerror(errno));
diff --git a/tools/bpf/bpftool/perf.c b/tools/bpf/bpftool/perf.c
index ac6b1a12c9b7..b76b77dcfd1f 100644
--- a/tools/bpf/bpftool/perf.c
+++ b/tools/bpf/bpftool/perf.c
@@ -29,9 +29,10 @@ static bool has_perf_query_support(void)
if (perf_query_supported)
goto out;
- fd = open(bin_name, O_RDONLY);
+ fd = open("/", O_RDONLY);
if (fd < 0) {
- p_err("perf_query_support: %s", strerror(errno));
+ p_err("perf_query_support: cannot open directory \"/\" (%s)",
+ strerror(errno));
goto out;
}
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index a4f435203fef..959aa53ab678 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -90,7 +90,9 @@ static void print_boot_time(__u64 nsecs, char *buf, unsigned int size)
}
wallclock_secs = (real_time_ts.tv_sec - boot_time_ts.tv_sec) +
- nsecs / 1000000000;
+ (real_time_ts.tv_nsec - boot_time_ts.tv_nsec + nsecs) /
+ 1000000000;
+
if (!localtime_r(&wallclock_secs, &load_tm)) {
snprintf(buf, size, "%llu", nsecs / 1000000000);
@@ -692,15 +694,19 @@ static int do_load(int argc, char **argv)
return -1;
}
- if (do_pin_fd(prog_fd, argv[1])) {
- p_err("failed to pin program");
- return -1;
- }
+ if (do_pin_fd(prog_fd, argv[1]))
+ goto err_close_obj;
if (json_output)
jsonw_null(json_wtr);
+ bpf_object__close(obj);
+
return 0;
+
+err_close_obj:
+ bpf_object__close(obj);
+ return -1;
}
static int do_help(int argc, char **argv)
diff --git a/tools/build/Build.include b/tools/build/Build.include
index a4bbb984941d..950c1504ca37 100644
--- a/tools/build/Build.include
+++ b/tools/build/Build.include
@@ -63,8 +63,8 @@ dep-cmd = $(if $(wildcard $(fixdep)),
$(fixdep) $(depfile) $@ '$(make-cmd)' > $(dot-target).tmp; \
rm -f $(depfile); \
mv -f $(dot-target).tmp $(dot-target).cmd, \
- printf '\# cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
- printf '\# using basic dep data\n\n' >> $(dot-target).cmd; \
+ printf '$(pound) cannot find fixdep (%s)\n' $(fixdep) > $(dot-target).cmd; \
+ printf '$(pound) using basic dep data\n\n' >> $(dot-target).cmd; \
cat $(depfile) >> $(dot-target).cmd; \
printf '\n%s\n' 'cmd_$@ := $(make-cmd)' >> $(dot-target).cmd)
@@ -98,4 +98,4 @@ cxx_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CXXFLAGS) -D"BUILD_STR(s)=\#s" $(CXX
###
## HOSTCC C flags
-host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(CHOSTFLAGS) -D"BUILD_STR(s)=\#s" $(CHOSTFLAGS_$(basetarget).o) $(CHOSTFLAGS_$(obj))
+host_c_flags = -Wp,-MD,$(depfile) -Wp,-MT,$@ $(HOSTCFLAGS) -D"BUILD_STR(s)=\#s" $(HOSTCFLAGS_$(basetarget).o) $(HOSTCFLAGS_$(obj))
diff --git a/tools/build/Makefile b/tools/build/Makefile
index 5eb4b5ad79cb..5edf65e684ab 100644
--- a/tools/build/Makefile
+++ b/tools/build/Makefile
@@ -43,7 +43,7 @@ $(OUTPUT)fixdep-in.o: FORCE
$(Q)$(MAKE) $(build)=fixdep
$(OUTPUT)fixdep: $(OUTPUT)fixdep-in.o
- $(QUIET_LINK)$(HOSTCC) $(LDFLAGS) -o $@ $<
+ $(QUIET_LINK)$(HOSTCC) $(HOSTLDFLAGS) -o $@ $<
FORCE:
diff --git a/tools/include/uapi/drm/drm.h b/tools/include/uapi/drm/drm.h
index 6fdff5945c8a..9c660e1688ab 100644
--- a/tools/include/uapi/drm/drm.h
+++ b/tools/include/uapi/drm/drm.h
@@ -680,6 +680,13 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_ATOMIC 3
+/**
+ * DRM_CLIENT_CAP_ASPECT_RATIO
+ *
+ * If set to 1, the DRM core will provide aspect ratio information in modes.
+ */
+#define DRM_CLIENT_CAP_ASPECT_RATIO 4
+
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index e0b06784f227..59b19b6a40d7 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -2630,7 +2630,7 @@ struct bpf_fib_lookup {
union {
/* inputs to lookup */
__u8 tos; /* AF_INET */
- __be32 flowlabel; /* AF_INET6 */
+ __be32 flowinfo; /* AF_INET6, flow_label + priority */
/* output: metric of fib result (IPv4/IPv6 only) */
__u32 rt_metric;
diff --git a/tools/include/uapi/linux/if_link.h b/tools/include/uapi/linux/if_link.h
index 68699f654118..cf01b6824244 100644
--- a/tools/include/uapi/linux/if_link.h
+++ b/tools/include/uapi/linux/if_link.h
@@ -333,6 +333,7 @@ enum {
IFLA_BRPORT_BCAST_FLOOD,
IFLA_BRPORT_GROUP_FWD_MASK,
IFLA_BRPORT_NEIGH_SUPPRESS,
+ IFLA_BRPORT_ISOLATED,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -516,6 +517,7 @@ enum {
IFLA_VXLAN_COLLECT_METADATA,
IFLA_VXLAN_LABEL,
IFLA_VXLAN_GPE,
+ IFLA_VXLAN_TTL_INHERIT,
__IFLA_VXLAN_MAX
};
#define IFLA_VXLAN_MAX (__IFLA_VXLAN_MAX - 1)
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index 39e364c70caf..b6270a3b38e9 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -948,6 +948,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_S390_BPB 152
#define KVM_CAP_GET_MSR_FEATURES 153
#define KVM_CAP_HYPERV_EVENTFD 154
+#define KVM_CAP_HYPERV_TLBFLUSH 155
#ifdef KVM_CAP_IRQ_ROUTING
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 38047c6aa575..f4a25bd1871f 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -164,6 +164,7 @@ static int __dead_end_function(struct objtool_file *file, struct symbol *func,
"lbug_with_loc",
"fortify_panic",
"usercopy_abort",
+ "machine_real_restart",
};
if (func->bind == STB_WEAK)
diff --git a/tools/objtool/elf.c b/tools/objtool/elf.c
index 4e60e105583e..7ec85d567598 100644
--- a/tools/objtool/elf.c
+++ b/tools/objtool/elf.c
@@ -302,19 +302,34 @@ static int read_symbols(struct elf *elf)
continue;
sym->pfunc = sym->cfunc = sym;
coldstr = strstr(sym->name, ".cold.");
- if (coldstr) {
- coldstr[0] = '\0';
- pfunc = find_symbol_by_name(elf, sym->name);
- coldstr[0] = '.';
-
- if (!pfunc) {
- WARN("%s(): can't find parent function",
- sym->name);
- goto err;
- }
-
- sym->pfunc = pfunc;
- pfunc->cfunc = sym;
+ if (!coldstr)
+ continue;
+
+ coldstr[0] = '\0';
+ pfunc = find_symbol_by_name(elf, sym->name);
+ coldstr[0] = '.';
+
+ if (!pfunc) {
+ WARN("%s(): can't find parent function",
+ sym->name);
+ goto err;
+ }
+
+ sym->pfunc = pfunc;
+ pfunc->cfunc = sym;
+
+ /*
+ * Unfortunately, -fnoreorder-functions puts the child
+ * inside the parent. Remove the overlap so we can
+ * have sane assumptions.
+ *
+ * Note that pfunc->len now no longer matches
+ * pfunc->sym.st_size.
+ */
+ if (sym->sec == pfunc->sec &&
+ sym->offset >= pfunc->offset &&
+ sym->offset + sym->len == pfunc->offset + pfunc->len) {
+ pfunc->len -= sym->len;
}
}
}
@@ -504,10 +519,12 @@ struct section *elf_create_section(struct elf *elf, const char *name,
sec->sh.sh_flags = SHF_ALLOC;
- /* Add section name to .shstrtab */
+ /* Add section name to .shstrtab (or .strtab for Clang) */
shstrtab = find_section_by_name(elf, ".shstrtab");
+ if (!shstrtab)
+ shstrtab = find_section_by_name(elf, ".strtab");
if (!shstrtab) {
- WARN("can't find .shstrtab section");
+ WARN("can't find .shstrtab or .strtab section");
return NULL;
}
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 5dfe102fb5b5..b10a90b6a718 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -178,6 +178,9 @@ Print count deltas for fixed number of times.
This option should be used together with "-I" option.
example: 'perf stat -I 1000 --interval-count 2 -e cycles -a'
+--interval-clear::
+Clear the screen before next interval.
+
--timeout msecs::
Stop the 'perf stat' session and print count deltas after N milliseconds (minimum: 10 ms).
This option is not supported with the "-I" option.
diff --git a/tools/perf/Makefile.config b/tools/perf/Makefile.config
index b5ac356ba323..f5a3b402589e 100644
--- a/tools/perf/Makefile.config
+++ b/tools/perf/Makefile.config
@@ -207,8 +207,7 @@ ifdef PYTHON_CONFIG
PYTHON_EMBED_LDOPTS := $(shell $(PYTHON_CONFIG_SQ) --ldflags 2>/dev/null)
PYTHON_EMBED_LDFLAGS := $(call strip-libs,$(PYTHON_EMBED_LDOPTS))
PYTHON_EMBED_LIBADD := $(call grep-libs,$(PYTHON_EMBED_LDOPTS)) -lutil
- PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --cflags 2>/dev/null)
- PYTHON_EMBED_CCOPTS := $(filter-out -specs=%,$(PYTHON_EMBED_CCOPTS))
+ PYTHON_EMBED_CCOPTS := $(shell $(PYTHON_CONFIG_SQ) --includes 2>/dev/null)
FLAGS_PYTHON_EMBED := $(PYTHON_EMBED_CCOPTS) $(PYTHON_EMBED_LDOPTS)
endif
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
index 3598b8b75d27..ef5d59a5742e 100644
--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -243,7 +243,7 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
u64 ip;
u64 skip_slot = -1;
- if (chain->nr < 3)
+ if (!chain || chain->nr < 3)
return skip_slot;
ip = chain->ips[2];
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index 4dfe42666d0c..f0b1709a5ffb 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -341,6 +341,8 @@
330 common pkey_alloc __x64_sys_pkey_alloc
331 common pkey_free __x64_sys_pkey_free
332 common statx __x64_sys_statx
+333 common io_pgetevents __x64_sys_io_pgetevents
+334 common rseq __x64_sys_rseq
#
# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/tools/perf/arch/x86/util/perf_regs.c b/tools/perf/arch/x86/util/perf_regs.c
index 4b2caf6d48e7..fead6b3b4206 100644
--- a/tools/perf/arch/x86/util/perf_regs.c
+++ b/tools/perf/arch/x86/util/perf_regs.c
@@ -226,7 +226,7 @@ int arch_sdt_arg_parse_op(char *old_op, char **new_op)
else if (rm[2].rm_so != rm[2].rm_eo)
prefix[0] = '+';
else
- strncpy(prefix, "+0", 2);
+ scnprintf(prefix, sizeof(prefix), "+0");
}
/* Rename register */
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index 63eb49082774..44195514b19e 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -1098,7 +1098,7 @@ static void *worker_thread(void *__tdata)
u8 *global_data;
u8 *process_data;
u8 *thread_data;
- u64 bytes_done;
+ u64 bytes_done, secs;
long work_done;
u32 l;
struct rusage rusage;
@@ -1254,7 +1254,8 @@ static void *worker_thread(void *__tdata)
timersub(&stop, &start0, &diff);
td->runtime_ns = diff.tv_sec * NSEC_PER_SEC;
td->runtime_ns += diff.tv_usec * NSEC_PER_USEC;
- td->speed_gbs = bytes_done / (td->runtime_ns / NSEC_PER_SEC) / 1e9;
+ secs = td->runtime_ns / NSEC_PER_SEC;
+ td->speed_gbs = secs ? bytes_done / secs / 1e9 : 0;
getrusage(RUSAGE_THREAD, &rusage);
td->system_time_ns = rusage.ru_stime.tv_sec * NSEC_PER_SEC;
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 5eb22cc56363..8180319285af 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -283,6 +283,15 @@ out_put:
return ret;
}
+static int process_feature_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_session *session)
+{
+ if (event->feat.feat_id < HEADER_LAST_FEATURE)
+ return perf_event__process_feature(tool, event, session);
+ return 0;
+}
+
static int hist_entry__tty_annotate(struct hist_entry *he,
struct perf_evsel *evsel,
struct perf_annotate *ann)
@@ -471,7 +480,7 @@ int cmd_annotate(int argc, const char **argv)
.attr = perf_event__process_attr,
.build_id = perf_event__process_build_id,
.tracing_data = perf_event__process_tracing_data,
- .feature = perf_event__process_feature,
+ .feature = process_feature_event,
.ordered_events = true,
.ordering_requires_timestamps = true,
},
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index 307b3594525f..6a8738f7ead3 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -56,16 +56,16 @@ struct c2c_hist_entry {
struct compute_stats cstats;
+ unsigned long paddr;
+ unsigned long paddr_cnt;
+ bool paddr_zero;
+ char *nodestr;
+
/*
* must be at the end,
* because of its callchain dynamic entry
*/
struct hist_entry he;
-
- unsigned long paddr;
- unsigned long paddr_cnt;
- bool paddr_zero;
- char *nodestr;
};
static char const *coalesce_default = "pid,iaddr";
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index cdb5b6949832..c04dc7b53797 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -217,7 +217,8 @@ static int process_feature_event(struct perf_tool *tool,
}
/*
- * All features are received, we can force the
+ * (feat_id = HEADER_LAST_FEATURE) is the end marker which
+ * means all features are received, now we can force the
* group if needed.
*/
setup_forced_leader(rep, session->evlist);
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index b3bf35512d21..568ddfac3213 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -180,6 +180,18 @@ static struct {
PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE
},
+ [PERF_TYPE_HW_CACHE] = {
+ .user_set = false,
+
+ .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
+ PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
+ PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
+ PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
+ PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
+
+ .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
+ },
+
[PERF_TYPE_RAW] = {
.user_set = false,
@@ -1822,6 +1834,7 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
struct perf_evlist *evlist;
struct perf_evsel *evsel, *pos;
int err;
+ static struct perf_evsel_script *es;
err = perf_event__process_attr(tool, event, pevlist);
if (err)
@@ -1830,6 +1843,19 @@ static int process_attr(struct perf_tool *tool, union perf_event *event,
evlist = *pevlist;
evsel = perf_evlist__last(*pevlist);
+ if (!evsel->priv) {
+ if (scr->per_event_dump) {
+ evsel->priv = perf_evsel_script__new(evsel,
+ scr->session->data);
+ } else {
+ es = zalloc(sizeof(*es));
+ if (!es)
+ return -ENOMEM;
+ es->fp = stdout;
+ evsel->priv = es;
+ }
+ }
+
if (evsel->attr.type >= PERF_TYPE_MAX &&
evsel->attr.type != PERF_TYPE_SYNTH)
return 0;
@@ -3018,6 +3044,15 @@ int process_cpu_map_event(struct perf_tool *tool __maybe_unused,
return set_maps(script);
}
+static int process_feature_event(struct perf_tool *tool,
+ union perf_event *event,
+ struct perf_session *session)
+{
+ if (event->feat.feat_id < HEADER_LAST_FEATURE)
+ return perf_event__process_feature(tool, event, session);
+ return 0;
+}
+
#ifdef HAVE_AUXTRACE_SUPPORT
static int perf_script__process_auxtrace_info(struct perf_tool *tool,
union perf_event *event,
@@ -3062,7 +3097,7 @@ int cmd_script(int argc, const char **argv)
.attr = process_attr,
.event_update = perf_event__process_event_update,
.tracing_data = perf_event__process_tracing_data,
- .feature = perf_event__process_feature,
+ .feature = process_feature_event,
.build_id = perf_event__process_build_id,
.id_index = perf_event__process_id_index,
.auxtrace_info = perf_script__process_auxtrace_info,
@@ -3113,8 +3148,9 @@ int cmd_script(int argc, const char **argv)
"+field to add and -field to remove."
"Valid types: hw,sw,trace,raw,synth. "
"Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
- "addr,symoff,period,iregs,uregs,brstack,brstacksym,flags,"
- "bpf-output,callindent,insn,insnlen,brstackinsn,synth,phys_addr",
+ "addr,symoff,srcline,period,iregs,uregs,brstack,"
+ "brstacksym,flags,bpf-output,brstackinsn,brstackoff,"
+ "callindent,insn,insnlen,synth,phys_addr,metric,misc",
parse_output_fields),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
"system-wide collection from all CPUs"),
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 096ccb25c11f..05be023c3f0e 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -65,6 +65,7 @@
#include "util/tool.h"
#include "util/string2.h"
#include "util/metricgroup.h"
+#include "util/top.h"
#include "asm/bug.h"
#include <linux/time64.h>
@@ -144,6 +145,8 @@ static struct target target = {
typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu);
+#define METRIC_ONLY_LEN 20
+
static int run_count = 1;
static bool no_inherit = false;
static volatile pid_t child_pid = -1;
@@ -173,6 +176,7 @@ static struct cpu_map *aggr_map;
static aggr_get_id_t aggr_get_id;
static bool append_file;
static bool interval_count;
+static bool interval_clear;
static const char *output_name;
static int output_fd;
static int print_free_counters_hint;
@@ -180,6 +184,7 @@ static int print_mixed_hw_group_error;
static u64 *walltime_run;
static bool ru_display = false;
static struct rusage ru_data;
+static unsigned int metric_only_len = METRIC_ONLY_LEN;
struct perf_stat {
bool record;
@@ -967,8 +972,6 @@ static void print_metric_csv(void *ctx,
fprintf(out, "%s%s%s%s", csv_sep, vals, csv_sep, unit);
}
-#define METRIC_ONLY_LEN 20
-
/* Filter out some columns that don't work well in metrics only mode */
static bool valid_only_metric(const char *unit)
@@ -999,22 +1002,20 @@ static void print_metric_only(void *ctx, const char *color, const char *fmt,
{
struct outstate *os = ctx;
FILE *out = os->fh;
- int n;
- char buf[1024];
- unsigned mlen = METRIC_ONLY_LEN;
+ char buf[1024], str[1024];
+ unsigned mlen = metric_only_len;
if (!valid_only_metric(unit))
return;
unit = fixunit(buf, os->evsel, unit);
- if (color)
- n = color_fprintf(out, color, fmt, val);
- else
- n = fprintf(out, fmt, val);
- if (n > METRIC_ONLY_LEN)
- n = METRIC_ONLY_LEN;
if (mlen < strlen(unit))
mlen = strlen(unit) + 1;
- fprintf(out, "%*s", mlen - n, "");
+
+ if (color)
+ mlen += strlen(color) + sizeof(PERF_COLOR_RESET) - 1;
+
+ color_snprintf(str, sizeof(str), color ?: "", fmt, val);
+ fprintf(out, "%*s ", mlen, str);
}
static void print_metric_only_csv(void *ctx, const char *color __maybe_unused,
@@ -1054,7 +1055,7 @@ static void print_metric_header(void *ctx, const char *color __maybe_unused,
if (csv_output)
fprintf(os->fh, "%s%s", unit, csv_sep);
else
- fprintf(os->fh, "%-*s ", METRIC_ONLY_LEN, unit);
+ fprintf(os->fh, "%*s ", metric_only_len, unit);
}
static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
@@ -1704,9 +1705,12 @@ static void print_interval(char *prefix, struct timespec *ts)
FILE *output = stat_config.output;
static int num_print_interval;
+ if (interval_clear)
+ puts(CONSOLE_CLEAR);
+
sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep);
- if (num_print_interval == 0 && !csv_output) {
+ if ((num_print_interval == 0 && !csv_output) || interval_clear) {
switch (stat_config.aggr_mode) {
case AGGR_SOCKET:
fprintf(output, "# time socket cpus");
@@ -1719,7 +1723,7 @@ static void print_interval(char *prefix, struct timespec *ts)
fprintf(output, " counts %*s events\n", unit_width, "unit");
break;
case AGGR_NONE:
- fprintf(output, "# time CPU");
+ fprintf(output, "# time CPU ");
if (!metric_only)
fprintf(output, " counts %*s events\n", unit_width, "unit");
break;
@@ -1738,7 +1742,7 @@ static void print_interval(char *prefix, struct timespec *ts)
}
}
- if (num_print_interval == 0 && metric_only)
+ if ((num_print_interval == 0 || interval_clear) && metric_only)
print_metric_headers(" ", true);
if (++num_print_interval == 25)
num_print_interval = 0;
@@ -2057,6 +2061,8 @@ static const struct option stat_options[] = {
"(overhead is possible for values <= 100ms)"),
OPT_INTEGER(0, "interval-count", &stat_config.times,
"print counts for fixed number of times"),
+ OPT_BOOLEAN(0, "interval-clear", &interval_clear,
+ "clear screen in between new interval"),
OPT_UINTEGER(0, "timeout", &stat_config.timeout,
"stop workload and print counts after a timeout period in ms (>= 10ms)"),
OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
@@ -2436,14 +2442,13 @@ static int add_default_attributes(void)
(PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
};
+ struct parse_events_error errinfo;
/* Set attrs if no event is selected and !null_run: */
if (null_run)
return 0;
if (transaction_run) {
- struct parse_events_error errinfo;
-
if (pmu_have_event("cpu", "cycles-ct") &&
pmu_have_event("cpu", "el-start"))
err = parse_events(evsel_list, transaction_attrs,
@@ -2454,6 +2459,7 @@ static int add_default_attributes(void)
&errinfo);
if (err) {
fprintf(stderr, "Cannot set up transaction events\n");
+ parse_events_print_error(&errinfo, transaction_attrs);
return -1;
}
return 0;
@@ -2479,10 +2485,11 @@ static int add_default_attributes(void)
pmu_have_event("msr", "smi")) {
if (!force_metric_only)
metric_only = true;
- err = parse_events(evsel_list, smi_cost_attrs, NULL);
+ err = parse_events(evsel_list, smi_cost_attrs, &errinfo);
} else {
fprintf(stderr, "To measure SMI cost, it needs "
"msr/aperf/, msr/smi/ and cpu/cycles/ support\n");
+ parse_events_print_error(&errinfo, smi_cost_attrs);
return -1;
}
if (err) {
@@ -2517,12 +2524,13 @@ static int add_default_attributes(void)
if (topdown_attrs[0] && str) {
if (warn)
arch_topdown_group_warn();
- err = parse_events(evsel_list, str, NULL);
+ err = parse_events(evsel_list, str, &errinfo);
if (err) {
fprintf(stderr,
"Cannot set up top down events %s: %d\n",
str, err);
free(str);
+ parse_events_print_error(&errinfo, str);
return -1;
}
} else {
diff --git a/tools/perf/jvmti/jvmti_agent.c b/tools/perf/jvmti/jvmti_agent.c
index 0c6d1002b524..ac1bcdc17dae 100644
--- a/tools/perf/jvmti/jvmti_agent.c
+++ b/tools/perf/jvmti/jvmti_agent.c
@@ -35,6 +35,7 @@
#include <sys/mman.h>
#include <syscall.h> /* for gettid() */
#include <err.h>
+#include <linux/kernel.h>
#include "jvmti_agent.h"
#include "../util/jitdump.h"
@@ -249,7 +250,7 @@ void *jvmti_open(void)
/*
* jitdump file name
*/
- snprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
+ scnprintf(dump_path, PATH_MAX, "%s/jit-%i.dump", jit_path, getpid());
fd = open(dump_path, O_CREAT|O_TRUNC|O_RDWR, 0666);
if (fd == -1)
diff --git a/tools/perf/pmu-events/Build b/tools/perf/pmu-events/Build
index 17783913d330..215ba30b8534 100644
--- a/tools/perf/pmu-events/Build
+++ b/tools/perf/pmu-events/Build
@@ -1,7 +1,7 @@
hostprogs := jevents
jevents-y += json.o jsmn.o jevents.o
-CHOSTFLAGS_jevents.o = -I$(srctree)/tools/include
+HOSTCFLAGS_jevents.o = -I$(srctree)/tools/include
pmu-events-y += pmu-events.o
JDIR = pmu-events/arch/$(SRCARCH)
JSON = $(shell [ -d $(JDIR) ] && \
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
index 38dfb720fb6f..54ace2f6bc36 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Core.py
@@ -31,10 +31,8 @@ def flag_str(event_name, field_name, value):
string = ""
if flag_fields[event_name][field_name]:
- print_delim = 0
- keys = flag_fields[event_name][field_name]['values'].keys()
- keys.sort()
- for idx in keys:
+ print_delim = 0
+ for idx in sorted(flag_fields[event_name][field_name]['values']):
if not value and not idx:
string += flag_fields[event_name][field_name]['values'][idx]
break
@@ -51,14 +49,12 @@ def symbol_str(event_name, field_name, value):
string = ""
if symbolic_fields[event_name][field_name]:
- keys = symbolic_fields[event_name][field_name]['values'].keys()
- keys.sort()
- for idx in keys:
+ for idx in sorted(symbolic_fields[event_name][field_name]['values']):
if not value and not idx:
- string = symbolic_fields[event_name][field_name]['values'][idx]
+ string = symbolic_fields[event_name][field_name]['values'][idx]
break
- if (value == idx):
- string = symbolic_fields[event_name][field_name]['values'][idx]
+ if (value == idx):
+ string = symbolic_fields[event_name][field_name]['values'][idx]
break
return string
@@ -74,19 +70,17 @@ def trace_flag_str(value):
string = ""
print_delim = 0
- keys = trace_flags.keys()
-
- for idx in keys:
- if not value and not idx:
- string += "NONE"
- break
-
- if idx and (value & idx) == idx:
- if print_delim:
- string += " | ";
- string += trace_flags[idx]
- print_delim = 1
- value &= ~idx
+ for idx in trace_flags:
+ if not value and not idx:
+ string += "NONE"
+ break
+
+ if idx and (value & idx) == idx:
+ if print_delim:
+ string += " | ";
+ string += trace_flags[idx]
+ print_delim = 1
+ value &= ~idx
return string
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
index 81a56cd2b3c1..21a7a1298094 100755
--- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py
@@ -8,6 +8,7 @@
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
+from __future__ import print_function
import struct
@@ -44,7 +45,8 @@ class PerfEvent(object):
PerfEvent.event_num += 1
def show(self):
- print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
+ print("PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" %
+ (self.name, self.symbol, self.comm, self.dso))
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
index fdd92f699055..cac7b2542ee8 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/SchedGui.py
@@ -11,7 +11,7 @@
try:
import wx
except ImportError:
- raise ImportError, "You need to install the wxpython lib for this script"
+ raise ImportError("You need to install the wxpython lib for this script")
class RootFrame(wx.Frame):
diff --git a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
index f6c84966e4f8..7384dcb628c4 100644
--- a/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
+++ b/tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py
@@ -5,6 +5,7 @@
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
+from __future__ import print_function
import errno, os
@@ -33,7 +34,7 @@ def nsecs_str(nsecs):
return str
def add_stats(dict, key, value):
- if not dict.has_key(key):
+ if key not in dict:
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
@@ -72,10 +73,10 @@ try:
except:
if not audit_package_warned:
audit_package_warned = True
- print "Install the audit-libs-python package to get syscall names.\n" \
- "For example:\n # apt-get install python-audit (Ubuntu)" \
- "\n # yum install audit-libs-python (Fedora)" \
- "\n etc.\n"
+ print("Install the audit-libs-python package to get syscall names.\n"
+ "For example:\n # apt-get install python-audit (Ubuntu)"
+ "\n # yum install audit-libs-python (Fedora)"
+ "\n etc.\n")
def syscall_name(id):
try:
diff --git a/tools/perf/scripts/python/sched-migration.py b/tools/perf/scripts/python/sched-migration.py
index de66cb3b72c9..3473e7f66081 100644
--- a/tools/perf/scripts/python/sched-migration.py
+++ b/tools/perf/scripts/python/sched-migration.py
@@ -9,13 +9,17 @@
# This software is distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
-
+from __future__ import print_function
import os
import sys
from collections import defaultdict
-from UserList import UserList
+try:
+ from UserList import UserList
+except ImportError:
+ # Python 3: UserList moved to the collections package
+ from collections import UserList
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
@@ -300,7 +304,7 @@ class TimeSliceList(UserList):
if i == -1:
return
- for i in xrange(i, len(self.data)):
+ for i in range(i, len(self.data)):
timeslice = self.data[i]
if timeslice.start > end:
return
@@ -336,8 +340,8 @@ class SchedEventProxy:
on_cpu_task = self.current_tsk[headers.cpu]
if on_cpu_task != -1 and on_cpu_task != prev_pid:
- print "Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
- (headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
+ print("Sched switch event rejected ts: %s cpu: %d prev: %s(%d) next: %s(%d)" % \
+ headers.ts_format(), headers.cpu, prev_comm, prev_pid, next_comm, next_pid)
threads[prev_pid] = prev_comm
threads[next_pid] = next_comm
diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 2bde505e2e7e..dd850a26d579 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -422,7 +422,7 @@ static const char *shell_test__description(char *description, size_t size,
#define for_each_shell_test(dir, base, ent) \
while ((ent = readdir(dir)) != NULL) \
- if (!is_directory(base, ent))
+ if (!is_directory(base, ent) && ent->d_name[0] != '.')
static const char *shell_tests__dir(char *path, size_t size)
{
diff --git a/tools/perf/tests/parse-events.c b/tools/perf/tests/parse-events.c
index 7d4077068454..61211918bfba 100644
--- a/tools/perf/tests/parse-events.c
+++ b/tools/perf/tests/parse-events.c
@@ -1309,6 +1309,11 @@ static int test__checkevent_config_cache(struct perf_evlist *evlist)
return 0;
}
+static bool test__intel_pt_valid(void)
+{
+ return !!perf_pmu__find("intel_pt");
+}
+
static int test__intel_pt(struct perf_evlist *evlist)
{
struct perf_evsel *evsel = perf_evlist__first(evlist);
@@ -1375,6 +1380,7 @@ struct evlist_test {
const char *name;
__u32 type;
const int id;
+ bool (*valid)(void);
int (*check)(struct perf_evlist *evlist);
};
@@ -1648,6 +1654,7 @@ static struct evlist_test test__events[] = {
},
{
.name = "intel_pt//u",
+ .valid = test__intel_pt_valid,
.check = test__intel_pt,
.id = 52,
},
@@ -1686,17 +1693,24 @@ static struct terms_test test__terms[] = {
static int test_event(struct evlist_test *e)
{
+ struct parse_events_error err = { .idx = 0, };
struct perf_evlist *evlist;
int ret;
+ if (e->valid && !e->valid()) {
+ pr_debug("... SKIP");
+ return 0;
+ }
+
evlist = perf_evlist__new();
if (evlist == NULL)
return -ENOMEM;
- ret = parse_events(evlist, e->name, NULL);
+ ret = parse_events(evlist, e->name, &err);
if (ret) {
- pr_debug("failed to parse event '%s', err %d\n",
- e->name, ret);
+ pr_debug("failed to parse event '%s', err %d, str '%s'\n",
+ e->name, ret, err.str);
+ parse_events_print_error(&err, e->name);
} else {
ret = e->check(evlist);
}
@@ -1714,10 +1728,11 @@ static int test_events(struct evlist_test *events, unsigned cnt)
for (i = 0; i < cnt; i++) {
struct evlist_test *e = &events[i];
- pr_debug("running test %d '%s'\n", e->id, e->name);
+ pr_debug("running test %d '%s'", e->id, e->name);
ret1 = test_event(e);
if (ret1)
ret2 = ret1;
+ pr_debug("\n");
}
return ret2;
@@ -1799,7 +1814,7 @@ static int test_pmu_events(void)
}
while (!ret && (ent = readdir(dir))) {
- struct evlist_test e;
+ struct evlist_test e = { .id = 0, };
char name[2 * NAME_MAX + 1 + 12 + 3];
/* Names containing . are special and cannot be used directly */
diff --git a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
index 263057039693..94e513e62b34 100755
--- a/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
+++ b/tools/perf/tests/shell/record+probe_libc_inet_pton.sh
@@ -14,35 +14,40 @@ libc=$(grep -w libc /proc/self/maps | head -1 | sed -r 's/.*[[:space:]](\/.*)/\1
nm -Dg $libc 2>/dev/null | fgrep -q inet_pton || exit 254
trace_libc_inet_pton_backtrace() {
- idx=0
- expected[0]="ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)"
- expected[1]=".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$"
+
+ expected=`mktemp -u /tmp/expected.XXX`
+
+ echo "ping[][0-9 \.:]+probe_libc:inet_pton: \([[:xdigit:]]+\)" > $expected
+ echo ".*inet_pton\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
case "$(uname -m)" in
s390x)
eventattr='call-graph=dwarf,max-stack=4'
- expected[2]="gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$"
- expected[3]="(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$"
- expected[4]="main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$"
+ echo "gaih_inet.*\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
+ echo "(__GI_)?getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc|inlined\)$" >> $expected
+ echo "main\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
;;
*)
eventattr='max-stack=3'
- expected[2]="getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$"
- expected[3]=".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$"
+ echo "getaddrinfo\+0x[[:xdigit:]]+[[:space:]]\($libc\)$" >> $expected
+ echo ".*\+0x[[:xdigit:]]+[[:space:]]\(.*/bin/ping.*\)$" >> $expected
;;
esac
- file=`mktemp -u /tmp/perf.data.XXX`
+ perf_data=`mktemp -u /tmp/perf.data.XXX`
+ perf_script=`mktemp -u /tmp/perf.script.XXX`
+ perf record -e probe_libc:inet_pton/$eventattr/ -o $perf_data ping -6 -c 1 ::1 > /dev/null 2>&1
+ perf script -i $perf_data > $perf_script
- perf record -e probe_libc:inet_pton/$eventattr/ -o $file ping -6 -c 1 ::1 > /dev/null 2>&1
- perf script -i $file | while read line ; do
+ exec 3<$perf_script
+ exec 4<$expected
+ while read line <&3 && read -r pattern <&4; do
+ [ -z "$pattern" ] && break
echo $line
- echo "$line" | egrep -q "${expected[$idx]}"
+ echo "$line" | egrep -q "$pattern"
if [ $? -ne 0 ] ; then
- printf "FAIL: expected backtrace entry %d \"%s\" got \"%s\"\n" $idx "${expected[$idx]}" "$line"
+ printf "FAIL: expected backtrace entry \"%s\" got \"%s\"\n" "$pattern" "$line"
exit 1
fi
- let idx+=1
- [ -z "${expected[$idx]}" ] && break
done
# If any statements are executed from this point onwards,
@@ -58,6 +63,6 @@ skip_if_no_perf_probe && \
perf probe -q $libc inet_pton && \
trace_libc_inet_pton_backtrace
err=$?
-rm -f ${file}
+rm -f ${perf_data} ${perf_script} ${expected}
perf probe -q -d probe_libc:inet_pton
exit $err
diff --git a/tools/perf/tests/shell/trace+probe_vfs_getname.sh b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
index 55ad9793d544..4ce276efe6b4 100755
--- a/tools/perf/tests/shell/trace+probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/trace+probe_vfs_getname.sh
@@ -17,7 +17,7 @@ skip_if_no_perf_probe || exit 2
file=$(mktemp /tmp/temporary_file.XXXXX)
trace_open_vfs_getname() {
- evts=$(echo $(perf list syscalls:sys_enter_open* |& egrep 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/')
+ evts=$(echo $(perf list syscalls:sys_enter_open* 2>&1 | egrep 'open(at)? ' | sed -r 's/.*sys_enter_([a-z]+) +\[.*$/\1/') | sed 's/ /,/')
perf trace -e $evts touch $file 2>&1 | \
egrep " +[0-9]+\.[0-9]+ +\( +[0-9]+\.[0-9]+ ms\): +touch\/[0-9]+ open(at)?\((dfd: +CWD, +)?filename: +${file}, +flags: CREAT\|NOCTTY\|NONBLOCK\|WRONLY, +mode: +IRUGO\|IWUGO\) += +[0-9]+$"
}
diff --git a/tools/perf/tests/topology.c b/tools/perf/tests/topology.c
index 40e30a26b23c..9497d02f69e6 100644
--- a/tools/perf/tests/topology.c
+++ b/tools/perf/tests/topology.c
@@ -45,6 +45,7 @@ static int session_write_header(char *path)
perf_header__set_feat(&session->header, HEADER_CPU_TOPOLOGY);
perf_header__set_feat(&session->header, HEADER_NRCPUS);
+ perf_header__set_feat(&session->header, HEADER_ARCH);
session->header.data_size += DATA_SIZE;
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index b085f1b3e34d..4ab663ec3e5e 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -382,7 +382,7 @@ static void perf_gtk__show_hists(GtkWidget *window, struct hists *hists,
gtk_tree_store_set(store, &iter, col_idx++, s, -1);
}
- if (hists__has_callchains(hists) &&
+ if (hist_entry__has_callchains(h) &&
symbol_conf.use_callchain && hists__has(hists, sym)) {
if (callchain_param.mode == CHAIN_GRAPH_REL)
total = symbol_conf.cumulate_callchain ?
diff --git a/tools/perf/util/c++/clang.cpp b/tools/perf/util/c++/clang.cpp
index bf31ceab33bd..89512504551b 100644
--- a/tools/perf/util/c++/clang.cpp
+++ b/tools/perf/util/c++/clang.cpp
@@ -146,8 +146,15 @@ getBPFObjectFromModule(llvm::Module *Module)
raw_svector_ostream ostream(*Buffer);
legacy::PassManager PM;
- if (TargetMachine->addPassesToEmitFile(PM, ostream,
- TargetMachine::CGFT_ObjectFile)) {
+ bool NotAdded;
+#if CLANG_VERSION_MAJOR < 7
+ NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream,
+ TargetMachine::CGFT_ObjectFile);
+#else
+ NotAdded = TargetMachine->addPassesToEmitFile(PM, ostream, nullptr,
+ TargetMachine::CGFT_ObjectFile);
+#endif
+ if (NotAdded) {
llvm::errs() << "TargetMachine can't emit a file of this type\n";
return std::unique_ptr<llvm::SmallVectorImpl<char>>(nullptr);;
}
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 540cd2dcd3e7..653ff65aa2c3 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -2129,6 +2129,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
int cpu_nr = ff->ph->env.nr_cpus_avail;
u64 size = 0;
struct perf_header *ph = ff->ph;
+ bool do_core_id_test = true;
ph->env.cpu = calloc(cpu_nr, sizeof(*ph->env.cpu));
if (!ph->env.cpu)
@@ -2183,6 +2184,13 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
return 0;
}
+ /* On s390 the socket_id number is not related to the numbers of cpus.
+ * The socket_id number might be higher than the numbers of cpus.
+ * This depends on the configuration.
+ */
+ if (ph->env.arch && !strncmp(ph->env.arch, "s390", 4))
+ do_core_id_test = false;
+
for (i = 0; i < (u32)cpu_nr; i++) {
if (do_read_u32(ff, &nr))
goto free_cpu;
@@ -2192,7 +2200,7 @@ static int process_cpu_topology(struct feat_fd *ff, void *data __maybe_unused)
if (do_read_u32(ff, &nr))
goto free_cpu;
- if (nr != (u32)-1 && nr > (u32)cpu_nr) {
+ if (do_core_id_test && nr != (u32)-1 && nr > (u32)cpu_nr) {
pr_debug("socket_id number is too big."
"You may need to upgrade the perf tool.\n");
goto free_cpu;
@@ -3456,7 +3464,7 @@ int perf_event__process_feature(struct perf_tool *tool,
pr_warning("invalid record type %d in pipe-mode\n", type);
return 0;
}
- if (feat == HEADER_RESERVED || feat > HEADER_LAST_FEATURE) {
+ if (feat == HEADER_RESERVED || feat >= HEADER_LAST_FEATURE) {
pr_warning("invalid record type %d in pipe-mode\n", type);
return -1;
}
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 52e8fda93a47..828cb9794c76 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -370,9 +370,11 @@ void hists__delete_entries(struct hists *hists)
static int hist_entry__init(struct hist_entry *he,
struct hist_entry *template,
- bool sample_self)
+ bool sample_self,
+ size_t callchain_size)
{
*he = *template;
+ he->callchain_size = callchain_size;
if (symbol_conf.cumulate_callchain) {
he->stat_acc = malloc(sizeof(he->stat));
@@ -473,7 +475,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
he = ops->new(callchain_size);
if (he) {
- err = hist_entry__init(he, template, sample_self);
+ err = hist_entry__init(he, template, sample_self, callchain_size);
if (err) {
ops->free(he);
he = NULL;
@@ -619,9 +621,11 @@ __hists__add_entry(struct hists *hists,
.raw_data = sample->raw_data,
.raw_size = sample->raw_size,
.ops = ops,
- };
+ }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
- return hists__findnew_entry(hists, &entry, al, sample_self);
+ if (!hists->has_callchains && he && he->callchain_size != 0)
+ hists->has_callchains = true;
+ return he;
}
struct hist_entry *hists__add_entry(struct hists *hists,
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 06607c434949..73049f7f0f60 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -85,6 +85,7 @@ struct hists {
struct events_stats stats;
u64 event_stream;
u16 col_len[HISTC_NR_COLS];
+ bool has_callchains;
int socket_filter;
struct perf_hpp_list *hpp_list;
struct list_head hpp_formats;
@@ -222,8 +223,7 @@ static inline struct hists *evsel__hists(struct perf_evsel *evsel)
static __pure inline bool hists__has_callchains(struct hists *hists)
{
- const struct perf_evsel *evsel = hists_to_evsel(hists);
- return evsel__has_callchain(evsel);
+ return hists->has_callchains;
}
int hists__init(void);
diff --git a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
index ba4c9dd18643..d426761a549d 100644
--- a/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
+++ b/tools/perf/util/intel-pt-decoder/intel-pt-pkt-decoder.c
@@ -366,7 +366,7 @@ static int intel_pt_get_cyc(unsigned int byte, const unsigned char *buf,
if (len < offs)
return INTEL_PT_NEED_MORE_BYTES;
byte = buf[offs++];
- payload |= (byte >> 1) << shift;
+ payload |= ((uint64_t)byte >> 1) << shift;
}
packet->type = INTEL_PT_CYC;
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index 976e658e38dc..5e94857dfca2 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -266,16 +266,16 @@ static const char *kinc_fetch_script =
"#!/usr/bin/env sh\n"
"if ! test -d \"$KBUILD_DIR\"\n"
"then\n"
-" exit -1\n"
+" exit 1\n"
"fi\n"
"if ! test -f \"$KBUILD_DIR/include/generated/autoconf.h\"\n"
"then\n"
-" exit -1\n"
+" exit 1\n"
"fi\n"
"TMPDIR=`mktemp -d`\n"
"if test -z \"$TMPDIR\"\n"
"then\n"
-" exit -1\n"
+" exit 1\n"
"fi\n"
"cat << EOF > $TMPDIR/Makefile\n"
"obj-y := dummy.o\n"
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 155d2570274f..da8fe57691b8 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -227,11 +227,16 @@ event_def: event_pmu |
event_pmu:
PE_NAME opt_pmu_config
{
+ struct parse_events_state *parse_state = _parse_state;
+ struct parse_events_error *error = parse_state->error;
struct list_head *list, *orig_terms, *terms;
if (parse_events_copy_term_list($2, &orig_terms))
YYABORT;
+ if (error)
+ error->idx = @1.first_column;
+
ALLOC_LIST(list);
if (parse_events_add_pmu(_parse_state, list, $1, $2, false, false)) {
struct perf_pmu *pmu = NULL;
diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
index d2fb597c9a8c..3ba6a1742f91 100644
--- a/tools/perf/util/pmu.c
+++ b/tools/perf/util/pmu.c
@@ -234,6 +234,74 @@ static int perf_pmu__parse_snapshot(struct perf_pmu_alias *alias,
return 0;
}
+static void perf_pmu_assign_str(char *name, const char *field, char **old_str,
+ char **new_str)
+{
+ if (!*old_str)
+ goto set_new;
+
+ if (*new_str) { /* Have new string, check with old */
+ if (strcasecmp(*old_str, *new_str))
+ pr_debug("alias %s differs in field '%s'\n",
+ name, field);
+ zfree(old_str);
+ } else /* Nothing new --> keep old string */
+ return;
+set_new:
+ *old_str = *new_str;
+ *new_str = NULL;
+}
+
+static void perf_pmu_update_alias(struct perf_pmu_alias *old,
+ struct perf_pmu_alias *newalias)
+{
+ perf_pmu_assign_str(old->name, "desc", &old->desc, &newalias->desc);
+ perf_pmu_assign_str(old->name, "long_desc", &old->long_desc,
+ &newalias->long_desc);
+ perf_pmu_assign_str(old->name, "topic", &old->topic, &newalias->topic);
+ perf_pmu_assign_str(old->name, "metric_expr", &old->metric_expr,
+ &newalias->metric_expr);
+ perf_pmu_assign_str(old->name, "metric_name", &old->metric_name,
+ &newalias->metric_name);
+ perf_pmu_assign_str(old->name, "value", &old->str, &newalias->str);
+ old->scale = newalias->scale;
+ old->per_pkg = newalias->per_pkg;
+ old->snapshot = newalias->snapshot;
+ memcpy(old->unit, newalias->unit, sizeof(old->unit));
+}
+
+/* Delete an alias entry. */
+static void perf_pmu_free_alias(struct perf_pmu_alias *newalias)
+{
+ zfree(&newalias->name);
+ zfree(&newalias->desc);
+ zfree(&newalias->long_desc);
+ zfree(&newalias->topic);
+ zfree(&newalias->str);
+ zfree(&newalias->metric_expr);
+ zfree(&newalias->metric_name);
+ parse_events_terms__purge(&newalias->terms);
+ free(newalias);
+}
+
+/* Merge an alias, search in alias list. If this name is already
+ * present merge both of them to combine all information.
+ */
+static bool perf_pmu_merge_alias(struct perf_pmu_alias *newalias,
+ struct list_head *alist)
+{
+ struct perf_pmu_alias *a;
+
+ list_for_each_entry(a, alist, list) {
+ if (!strcasecmp(newalias->name, a->name)) {
+ perf_pmu_update_alias(a, newalias);
+ perf_pmu_free_alias(newalias);
+ return true;
+ }
+ }
+ return false;
+}
+
static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
char *desc, char *val,
char *long_desc, char *topic,
@@ -241,9 +309,11 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
char *metric_expr,
char *metric_name)
{
+ struct parse_events_term *term;
struct perf_pmu_alias *alias;
int ret;
int num;
+ char newval[256];
alias = malloc(sizeof(*alias));
if (!alias)
@@ -262,6 +332,27 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
return ret;
}
+ /* Scan event and remove leading zeroes, spaces, newlines, some
+ * platforms have terms specified as
+ * event=0x0091 (read from files ../<PMU>/events/<FILE>
+ * and terms specified as event=0x91 (read from JSON files).
+ *
+ * Rebuild string to make alias->str member comparable.
+ */
+ memset(newval, 0, sizeof(newval));
+ ret = 0;
+ list_for_each_entry(term, &alias->terms, list) {
+ if (ret)
+ ret += scnprintf(newval + ret, sizeof(newval) - ret,
+ ",");
+ if (term->type_val == PARSE_EVENTS__TERM_TYPE_NUM)
+ ret += scnprintf(newval + ret, sizeof(newval) - ret,
+ "%s=%#x", term->config, term->val.num);
+ else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR)
+ ret += scnprintf(newval + ret, sizeof(newval) - ret,
+ "%s=%s", term->config, term->val.str);
+ }
+
alias->name = strdup(name);
if (dir) {
/*
@@ -285,9 +376,10 @@ static int __perf_pmu__new_alias(struct list_head *list, char *dir, char *name,
snprintf(alias->unit, sizeof(alias->unit), "%s", unit);
}
alias->per_pkg = perpkg && sscanf(perpkg, "%d", &num) == 1 && num == 1;
- alias->str = strdup(val);
+ alias->str = strdup(newval);
- list_add_tail(&alias->list, list);
+ if (!perf_pmu_merge_alias(alias, list))
+ list_add_tail(&alias->list, list);
return 0;
}
@@ -303,6 +395,9 @@ static int perf_pmu__new_alias(struct list_head *list, char *dir, char *name, FI
buf[ret] = 0;
+ /* Remove trailing newline from sysfs file */
+ rtrim(buf);
+
return __perf_pmu__new_alias(list, dir, name, NULL, buf, NULL, NULL, NULL,
NULL, NULL, NULL);
}
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 46e9e19ab1ac..bc32e57d17be 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -908,14 +908,11 @@ static void python_process_tracepoint(struct perf_sample *sample,
if (_PyTuple_Resize(&t, n) == -1)
Py_FatalError("error resizing Python tuple");
- if (!dict) {
+ if (!dict)
call_object(handler, t, handler_name);
- } else {
+ else
call_object(handler, t, default_handler_name);
- Py_DECREF(dict);
- }
- Py_XDECREF(all_entries_dict);
Py_DECREF(t);
}
@@ -1235,7 +1232,6 @@ static void python_process_general_event(struct perf_sample *sample,
call_object(handler, t, handler_name);
- Py_DECREF(dict);
Py_DECREF(t);
}
@@ -1627,6 +1623,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
fprintf(ofp, "# See the perf-script-python Documentation for the list "
"of available functions.\n\n");
+ fprintf(ofp, "from __future__ import print_function\n\n");
fprintf(ofp, "import os\n");
fprintf(ofp, "import sys\n\n");
@@ -1636,10 +1633,10 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
fprintf(ofp, "from Core import *\n\n\n");
fprintf(ofp, "def trace_begin():\n");
- fprintf(ofp, "\tprint \"in trace_begin\"\n\n");
+ fprintf(ofp, "\tprint(\"in trace_begin\")\n\n");
fprintf(ofp, "def trace_end():\n");
- fprintf(ofp, "\tprint \"in trace_end\"\n\n");
+ fprintf(ofp, "\tprint(\"in trace_end\")\n\n");
while ((event = trace_find_next_event(pevent, event))) {
fprintf(ofp, "def %s__%s(", event->system, event->name);
@@ -1675,7 +1672,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
"common_secs, common_nsecs,\n\t\t\t"
"common_pid, common_comm)\n\n");
- fprintf(ofp, "\t\tprint \"");
+ fprintf(ofp, "\t\tprint(\"");
not_first = 0;
count = 0;
@@ -1736,31 +1733,31 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
fprintf(ofp, "%s", f->name);
}
- fprintf(ofp, ")\n\n");
+ fprintf(ofp, "))\n\n");
- fprintf(ofp, "\t\tprint 'Sample: {'+"
- "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}'\n\n");
+ fprintf(ofp, "\t\tprint('Sample: {'+"
+ "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}')\n\n");
fprintf(ofp, "\t\tfor node in common_callchain:");
fprintf(ofp, "\n\t\t\tif 'sym' in node:");
- fprintf(ofp, "\n\t\t\t\tprint \"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name'])");
+ fprintf(ofp, "\n\t\t\t\tprint(\"\\t[%%x] %%s\" %% (node['ip'], node['sym']['name']))");
fprintf(ofp, "\n\t\t\telse:");
- fprintf(ofp, "\n\t\t\t\tprint \"\t[%%x]\" %% (node['ip'])\n\n");
- fprintf(ofp, "\t\tprint \"\\n\"\n\n");
+ fprintf(ofp, "\n\t\t\t\tprint(\"\t[%%x]\" %% (node['ip']))\n\n");
+ fprintf(ofp, "\t\tprint()\n\n");
}
fprintf(ofp, "def trace_unhandled(event_name, context, "
"event_fields_dict, perf_sample_dict):\n");
- fprintf(ofp, "\t\tprint get_dict_as_string(event_fields_dict)\n");
- fprintf(ofp, "\t\tprint 'Sample: {'+"
- "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}'\n\n");
+ fprintf(ofp, "\t\tprint(get_dict_as_string(event_fields_dict))\n");
+ fprintf(ofp, "\t\tprint('Sample: {'+"
+ "get_dict_as_string(perf_sample_dict['sample'], ', ')+'}')\n\n");
fprintf(ofp, "def print_header("
"event_name, cpu, secs, nsecs, pid, comm):\n"
- "\tprint \"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t"
- "(event_name, cpu, secs, nsecs, pid, comm),\n\n");
+ "\tprint(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \" %% \\\n\t"
+ "(event_name, cpu, secs, nsecs, pid, comm), end=\"\")\n\n");
fprintf(ofp, "def get_dict_as_string(a_dict, delimiter=' '):\n"
"\treturn delimiter.join"
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 7cf2d5cc038e..8bf302cafcec 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -112,6 +112,8 @@ struct hist_entry {
char level;
u8 filtered;
+
+ u16 callchain_size;
union {
/*
* Since perf diff only supports the stdio output, TUI
@@ -153,7 +155,7 @@ struct hist_entry {
static __pure inline bool hist_entry__has_callchains(struct hist_entry *he)
{
- return hists__has_callchains(he->hists);
+ return he->callchain_size != 0;
}
static inline bool hist_entry__has_pairs(struct hist_entry *he)
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index ca9ef7017624..d39e4ff7d0bf 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -56,7 +56,7 @@ name as necessary to disambiguate it from others is necessary. Note that option
.PP
\fB--hide column\fP do not show the specified built-in columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--hide sysfs" to hide the sysfs statistics columns as a group.
.PP
-\fB--enable column\fP show the specified built-in columns, which are otherwise disabled, by default. Currently the only built-in counters disabled by default are "usec" and "Time_Of_Day_Seconds".
+\fB--enable column\fP show the specified built-in columns, which are otherwise disabled, by default. Currently the only built-in counters disabled by default are "usec", "Time_Of_Day_Seconds", "APIC" and "X2APIC".
The column name "all" can be used to enable all disabled-by-default built-in counters.
.PP
\fB--show column\fP show only the specified built-in columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--show sysfs" to show the sysfs statistics columns as a group.
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index d6cff3070ebd..4d14bbbf9b63 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -109,6 +109,7 @@ unsigned int has_hwp_activity_window; /* IA32_HWP_REQUEST[bits 41:32] */
unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */
unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */
unsigned int has_misc_feature_control;
+unsigned int first_counter_read = 1;
#define RAPL_PKG (1 << 0)
/* 0x610 MSR_PKG_POWER_LIMIT */
@@ -170,6 +171,8 @@ struct thread_data {
unsigned long long irq_count;
unsigned int smi_count;
unsigned int cpu_id;
+ unsigned int apic_id;
+ unsigned int x2apic_id;
unsigned int flags;
#define CPU_IS_FIRST_THREAD_IN_CORE 0x2
#define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4
@@ -381,19 +384,23 @@ int get_msr(int cpu, off_t offset, unsigned long long *msr)
}
/*
- * Each string in this array is compared in --show and --hide cmdline.
- * Thus, strings that are proper sub-sets must follow their more specific peers.
+ * This list matches the column headers, except
+ * 1. built-in only, the sysfs counters are not here -- we learn of those at run-time
+ * 2. Core and CPU are moved to the end, we can't have strings that contain them
+ * matching on them for --show and --hide.
*/
struct msr_counter bic[] = {
{ 0x0, "usec" },
{ 0x0, "Time_Of_Day_Seconds" },
{ 0x0, "Package" },
+ { 0x0, "Node" },
{ 0x0, "Avg_MHz" },
+ { 0x0, "Busy%" },
{ 0x0, "Bzy_MHz" },
{ 0x0, "TSC_MHz" },
{ 0x0, "IRQ" },
{ 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL},
- { 0x0, "Busy%" },
+ { 0x0, "sysfs" },
{ 0x0, "CPU%c1" },
{ 0x0, "CPU%c3" },
{ 0x0, "CPU%c6" },
@@ -424,73 +431,73 @@ struct msr_counter bic[] = {
{ 0x0, "Cor_J" },
{ 0x0, "GFX_J" },
{ 0x0, "RAM_J" },
- { 0x0, "Core" },
- { 0x0, "CPU" },
{ 0x0, "Mod%c6" },
- { 0x0, "sysfs" },
{ 0x0, "Totl%C0" },
{ 0x0, "Any%C0" },
{ 0x0, "GFX%C0" },
{ 0x0, "CPUGFX%" },
- { 0x0, "Node%" },
+ { 0x0, "Core" },
+ { 0x0, "CPU" },
+ { 0x0, "APIC" },
+ { 0x0, "X2APIC" },
};
-
-
#define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
#define BIC_USEC (1ULL << 0)
#define BIC_TOD (1ULL << 1)
#define BIC_Package (1ULL << 2)
-#define BIC_Avg_MHz (1ULL << 3)
-#define BIC_Bzy_MHz (1ULL << 4)
-#define BIC_TSC_MHz (1ULL << 5)
-#define BIC_IRQ (1ULL << 6)
-#define BIC_SMI (1ULL << 7)
-#define BIC_Busy (1ULL << 8)
-#define BIC_CPU_c1 (1ULL << 9)
-#define BIC_CPU_c3 (1ULL << 10)
-#define BIC_CPU_c6 (1ULL << 11)
-#define BIC_CPU_c7 (1ULL << 12)
-#define BIC_ThreadC (1ULL << 13)
-#define BIC_CoreTmp (1ULL << 14)
-#define BIC_CoreCnt (1ULL << 15)
-#define BIC_PkgTmp (1ULL << 16)
-#define BIC_GFX_rc6 (1ULL << 17)
-#define BIC_GFXMHz (1ULL << 18)
-#define BIC_Pkgpc2 (1ULL << 19)
-#define BIC_Pkgpc3 (1ULL << 20)
-#define BIC_Pkgpc6 (1ULL << 21)
-#define BIC_Pkgpc7 (1ULL << 22)
-#define BIC_Pkgpc8 (1ULL << 23)
-#define BIC_Pkgpc9 (1ULL << 24)
-#define BIC_Pkgpc10 (1ULL << 25)
-#define BIC_CPU_LPI (1ULL << 26)
-#define BIC_SYS_LPI (1ULL << 27)
-#define BIC_PkgWatt (1ULL << 26)
-#define BIC_CorWatt (1ULL << 27)
-#define BIC_GFXWatt (1ULL << 28)
-#define BIC_PkgCnt (1ULL << 29)
-#define BIC_RAMWatt (1ULL << 30)
-#define BIC_PKG__ (1ULL << 31)
-#define BIC_RAM__ (1ULL << 32)
-#define BIC_Pkg_J (1ULL << 33)
-#define BIC_Cor_J (1ULL << 34)
-#define BIC_GFX_J (1ULL << 35)
-#define BIC_RAM_J (1ULL << 36)
-#define BIC_Core (1ULL << 37)
-#define BIC_CPU (1ULL << 38)
-#define BIC_Mod_c6 (1ULL << 39)
-#define BIC_sysfs (1ULL << 40)
-#define BIC_Totl_c0 (1ULL << 41)
-#define BIC_Any_c0 (1ULL << 42)
-#define BIC_GFX_c0 (1ULL << 43)
-#define BIC_CPUGFX (1ULL << 44)
-#define BIC_Node (1ULL << 45)
-
-#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD)
+#define BIC_Node (1ULL << 3)
+#define BIC_Avg_MHz (1ULL << 4)
+#define BIC_Busy (1ULL << 5)
+#define BIC_Bzy_MHz (1ULL << 6)
+#define BIC_TSC_MHz (1ULL << 7)
+#define BIC_IRQ (1ULL << 8)
+#define BIC_SMI (1ULL << 9)
+#define BIC_sysfs (1ULL << 10)
+#define BIC_CPU_c1 (1ULL << 11)
+#define BIC_CPU_c3 (1ULL << 12)
+#define BIC_CPU_c6 (1ULL << 13)
+#define BIC_CPU_c7 (1ULL << 14)
+#define BIC_ThreadC (1ULL << 15)
+#define BIC_CoreTmp (1ULL << 16)
+#define BIC_CoreCnt (1ULL << 17)
+#define BIC_PkgTmp (1ULL << 18)
+#define BIC_GFX_rc6 (1ULL << 19)
+#define BIC_GFXMHz (1ULL << 20)
+#define BIC_Pkgpc2 (1ULL << 21)
+#define BIC_Pkgpc3 (1ULL << 22)
+#define BIC_Pkgpc6 (1ULL << 23)
+#define BIC_Pkgpc7 (1ULL << 24)
+#define BIC_Pkgpc8 (1ULL << 25)
+#define BIC_Pkgpc9 (1ULL << 26)
+#define BIC_Pkgpc10 (1ULL << 27)
+#define BIC_CPU_LPI (1ULL << 28)
+#define BIC_SYS_LPI (1ULL << 29)
+#define BIC_PkgWatt (1ULL << 30)
+#define BIC_CorWatt (1ULL << 31)
+#define BIC_GFXWatt (1ULL << 32)
+#define BIC_PkgCnt (1ULL << 33)
+#define BIC_RAMWatt (1ULL << 34)
+#define BIC_PKG__ (1ULL << 35)
+#define BIC_RAM__ (1ULL << 36)
+#define BIC_Pkg_J (1ULL << 37)
+#define BIC_Cor_J (1ULL << 38)
+#define BIC_GFX_J (1ULL << 39)
+#define BIC_RAM_J (1ULL << 40)
+#define BIC_Mod_c6 (1ULL << 41)
+#define BIC_Totl_c0 (1ULL << 42)
+#define BIC_Any_c0 (1ULL << 43)
+#define BIC_GFX_c0 (1ULL << 44)
+#define BIC_CPUGFX (1ULL << 45)
+#define BIC_Core (1ULL << 46)
+#define BIC_CPU (1ULL << 47)
+#define BIC_APIC (1ULL << 48)
+#define BIC_X2APIC (1ULL << 49)
+
+#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAULT);
-unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs;
+unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC;
#define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
#define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME)
@@ -517,17 +524,34 @@ void help(void)
"when COMMAND completes.\n"
"If no COMMAND is specified, turbostat wakes every 5-seconds\n"
"to print statistics, until interrupted.\n"
- "--add add a counter\n"
- " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n"
- "--cpu cpu-set limit output to summary plus cpu-set:\n"
- " {core | package | j,k,l..m,n-p }\n"
- "--quiet skip decoding system configuration header\n"
- "--interval sec.subsec Override default 5-second measurement interval\n"
- "--help print this help message\n"
- "--list list column headers only\n"
- "--num_iterations num number of the measurement iterations\n"
- "--out file create or truncate \"file\" for all output\n"
- "--version print version information\n"
+ " -a, --add add a counter\n"
+ " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n"
+ " -c, --cpu cpu-set limit output to summary plus cpu-set:\n"
+ " {core | package | j,k,l..m,n-p }\n"
+ " -d, --debug displays usec, Time_Of_Day_Seconds and more debugging\n"
+ " -D, --Dump displays the raw counter values\n"
+ " -e, --enable [all | column]\n"
+ " shows all or the specified disabled column\n"
+ " -H, --hide [column|column,column,...]\n"
+ " hide the specified column(s)\n"
+ " -i, --interval sec.subsec\n"
+ " Override default 5-second measurement interval\n"
+ " -J, --Joules displays energy in Joules instead of Watts\n"
+ " -l, --list list column headers only\n"
+ " -n, --num_iterations num\n"
+ " number of the measurement iterations\n"
+ " -o, --out file\n"
+ " create or truncate \"file\" for all output\n"
+ " -q, --quiet skip decoding system configuration header\n"
+ " -s, --show [column|column,column,...]\n"
+ " show only the specified column(s)\n"
+ " -S, --Summary\n"
+ " limits output to 1-line system summary per interval\n"
+ " -T, --TCC temperature\n"
+ " sets the Thermal Control Circuit temperature in\n"
+ " degrees Celsius\n"
+ " -h, --help print this help message\n"
+ " -v, --version print version information\n"
"\n"
"For more help, run \"man turbostat\"\n");
}
@@ -601,6 +625,10 @@ void print_header(char *delim)
outp += sprintf(outp, "%sCore", (printed++ ? delim : ""));
if (DO_BIC(BIC_CPU))
outp += sprintf(outp, "%sCPU", (printed++ ? delim : ""));
+ if (DO_BIC(BIC_APIC))
+ outp += sprintf(outp, "%sAPIC", (printed++ ? delim : ""));
+ if (DO_BIC(BIC_X2APIC))
+ outp += sprintf(outp, "%sX2APIC", (printed++ ? delim : ""));
if (DO_BIC(BIC_Avg_MHz))
outp += sprintf(outp, "%sAvg_MHz", (printed++ ? delim : ""));
if (DO_BIC(BIC_Busy))
@@ -880,6 +908,10 @@ int format_counters(struct thread_data *t, struct core_data *c,
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
if (DO_BIC(BIC_CPU))
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+ if (DO_BIC(BIC_APIC))
+ outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+ if (DO_BIC(BIC_X2APIC))
+ outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
} else {
if (DO_BIC(BIC_Package)) {
if (p)
@@ -904,6 +936,10 @@ int format_counters(struct thread_data *t, struct core_data *c,
}
if (DO_BIC(BIC_CPU))
outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->cpu_id);
+ if (DO_BIC(BIC_APIC))
+ outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->apic_id);
+ if (DO_BIC(BIC_X2APIC))
+ outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->x2apic_id);
}
if (DO_BIC(BIC_Avg_MHz))
@@ -1231,6 +1267,12 @@ delta_thread(struct thread_data *new, struct thread_data *old,
int i;
struct msr_counter *mp;
+ /* we run cpuid just the 1st time, copy the results */
+ if (DO_BIC(BIC_APIC))
+ new->apic_id = old->apic_id;
+ if (DO_BIC(BIC_X2APIC))
+ new->x2apic_id = old->x2apic_id;
+
/*
* the timestamps from start of measurement interval are in "old"
* the timestamp from end of measurement interval are in "new"
@@ -1393,6 +1435,12 @@ int sum_counters(struct thread_data *t, struct core_data *c,
int i;
struct msr_counter *mp;
+ /* copy un-changing apic_id's */
+ if (DO_BIC(BIC_APIC))
+ average.threads.apic_id = t->apic_id;
+ if (DO_BIC(BIC_X2APIC))
+ average.threads.x2apic_id = t->x2apic_id;
+
/* remember first tv_begin */
if (average.threads.tv_begin.tv_sec == 0)
average.threads.tv_begin = t->tv_begin;
@@ -1619,6 +1667,34 @@ int get_mp(int cpu, struct msr_counter *mp, unsigned long long *counterp)
return 0;
}
+void get_apic_id(struct thread_data *t)
+{
+ unsigned int eax, ebx, ecx, edx, max_level;
+
+ eax = ebx = ecx = edx = 0;
+
+ if (!genuine_intel)
+ return;
+
+ __cpuid(0, max_level, ebx, ecx, edx);
+
+ __cpuid(1, eax, ebx, ecx, edx);
+ t->apic_id = (ebx >> 24) & 0xf;
+
+ if (max_level < 0xb)
+ return;
+
+ if (!DO_BIC(BIC_X2APIC))
+ return;
+
+ ecx = 0;
+ __cpuid(0xb, eax, ebx, ecx, edx);
+ t->x2apic_id = edx;
+
+ if (debug && (t->apic_id != t->x2apic_id))
+ fprintf(stderr, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id);
+}
+
/*
* get_counters(...)
* migrate to cpu
@@ -1632,7 +1708,6 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
struct msr_counter *mp;
int i;
-
gettimeofday(&t->tv_begin, (struct timezone *)NULL);
if (cpu_migrate(cpu)) {
@@ -1640,6 +1715,8 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
return -1;
}
+ if (first_counter_read)
+ get_apic_id(t);
retry:
t->tsc = rdtsc(); /* we are running on local CPU of interest */
@@ -2432,6 +2509,12 @@ void set_node_data(void)
if (pni[pkg].count > topo.nodes_per_pkg)
topo.nodes_per_pkg = pni[0].count;
+ /* Fake 1 node per pkg for machines that don't
+ * expose nodes and thus avoid -nan results
+ */
+ if (topo.nodes_per_pkg == 0)
+ topo.nodes_per_pkg = 1;
+
for (cpu = 0; cpu < topo.num_cpus; cpu++) {
pkg = cpus[cpu].physical_package_id;
node = cpus[cpu].physical_node_id;
@@ -2879,6 +2962,7 @@ void do_sleep(void)
}
}
+
void turbostat_loop()
{
int retval;
@@ -2892,6 +2976,7 @@ restart:
snapshot_proc_sysfs_files();
retval = for_all_cpus(get_counters, EVEN_COUNTERS);
+ first_counter_read = 0;
if (retval < -1) {
exit(retval);
} else if (retval == -1) {
@@ -4392,7 +4477,7 @@ void process_cpuid()
if (!quiet) {
fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
max_level, family, model, stepping, family, model, stepping);
- fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s\n",
+ fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n",
ecx & (1 << 0) ? "SSE3" : "-",
ecx & (1 << 3) ? "MONITOR" : "-",
ecx & (1 << 6) ? "SMX" : "-",
@@ -4401,6 +4486,7 @@ void process_cpuid()
edx & (1 << 4) ? "TSC" : "-",
edx & (1 << 5) ? "MSR" : "-",
edx & (1 << 22) ? "ACPI-TM" : "-",
+ edx & (1 << 28) ? "HT" : "-",
edx & (1 << 29) ? "TM" : "-");
}
@@ -4652,7 +4738,6 @@ void process_cpuid()
return;
}
-
/*
* in /dev/cpu/ return success for names that are numbers
* ie. filter out ".", "..", "microcode".
@@ -4842,6 +4927,13 @@ void init_counter(struct thread_data *thread_base, struct core_data *core_base,
struct core_data *c;
struct pkg_data *p;
+
+ /* Workaround for systems where physical_node_id==-1
+ * and logical_node_id==(-1 - topo.num_cpus)
+ */
+ if (node_id < 0)
+ node_id = 0;
+
t = GET_THREAD(thread_base, thread_id, core_id, node_id, pkg_id);
c = GET_CORE(core_base, core_id, node_id, pkg_id);
p = GET_PKG(pkg_base, pkg_id);
@@ -4946,6 +5038,7 @@ int fork_it(char **argv)
snapshot_proc_sysfs_files();
status = for_all_cpus(get_counters, EVEN_COUNTERS);
+ first_counter_read = 0;
if (status)
exit(status);
/* clear affinity side-effect of get_counters() */
@@ -5009,7 +5102,7 @@ int get_and_dump_counters(void)
}
void print_version() {
- fprintf(outf, "turbostat version 18.06.01"
+ fprintf(outf, "turbostat version 18.06.20"
" - Len Brown <lenb@kernel.org>\n");
}
@@ -5381,7 +5474,7 @@ void cmdline(int argc, char **argv)
break;
case 'e':
/* --enable specified counter */
- bic_enabled |= bic_lookup(optarg, SHOW_LIST);
+ bic_enabled = bic_enabled | bic_lookup(optarg, SHOW_LIST);
break;
case 'd':
debug++;
@@ -5465,7 +5558,6 @@ void cmdline(int argc, char **argv)
int main(int argc, char **argv)
{
outf = stderr;
-
cmdline(argc, argv);
if (!quiet)
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index a8fb63edcf89..e2926f72a821 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -1991,8 +1991,7 @@ static void nfit_test0_setup(struct nfit_test *t)
pcap->header.type = ACPI_NFIT_TYPE_CAPABILITIES;
pcap->header.length = sizeof(*pcap);
pcap->highest_capability = 1;
- pcap->capabilities = ACPI_NFIT_CAPABILITY_CACHE_FLUSH |
- ACPI_NFIT_CAPABILITY_MEM_FLUSH;
+ pcap->capabilities = ACPI_NFIT_CAPABILITY_MEM_FLUSH;
offset += pcap->header.length;
if (t->setup_hotplug) {
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 7a6214e9ae58..a362e3d7abc6 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -105,7 +105,7 @@ $(OUTPUT)/test_xdp_noinline.o: CLANG_FLAGS += -fno-inline
BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
-BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --version 2>&1 | grep LLVM)
+BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
ifneq ($(BTF_LLC_PROBE),)
ifneq ($(BTF_PAHOLE_PROBE),)
diff --git a/tools/testing/selftests/bpf/config b/tools/testing/selftests/bpf/config
index 1eefe211a4a8..b4994a94968b 100644
--- a/tools/testing/selftests/bpf/config
+++ b/tools/testing/selftests/bpf/config
@@ -6,4 +6,15 @@ CONFIG_TEST_BPF=m
CONFIG_CGROUP_BPF=y
CONFIG_NETDEVSIM=m
CONFIG_NET_CLS_ACT=y
+CONFIG_NET_SCHED=y
CONFIG_NET_SCH_INGRESS=y
+CONFIG_NET_IPIP=y
+CONFIG_IPV6=y
+CONFIG_NET_IPGRE_DEMUX=y
+CONFIG_NET_IPGRE=y
+CONFIG_IPV6_GRE=y
+CONFIG_CRYPTO_USER_API_HASH=m
+CONFIG_CRYPTO_HMAC=m
+CONFIG_CRYPTO_SHA256=m
+CONFIG_VXLAN=y
+CONFIG_GENEVE=y
diff --git a/tools/testing/selftests/bpf/test_kmod.sh b/tools/testing/selftests/bpf/test_kmod.sh
index 35669ccd4d23..9df0d2ac45f8 100755
--- a/tools/testing/selftests/bpf/test_kmod.sh
+++ b/tools/testing/selftests/bpf/test_kmod.sh
@@ -1,6 +1,15 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ "$(id -u)" != "0" ]; then
+ echo $msg please run this as root >&2
+ exit $ksft_skip
+fi
+
SRC_TREE=../../../../
test_run()
diff --git a/tools/testing/selftests/bpf/test_lirc_mode2.sh b/tools/testing/selftests/bpf/test_lirc_mode2.sh
index ce2e15e4f976..677686198df3 100755
--- a/tools/testing/selftests/bpf/test_lirc_mode2.sh
+++ b/tools/testing/selftests/bpf/test_lirc_mode2.sh
@@ -1,6 +1,15 @@
#!/bin/bash
# SPDX-License-Identifier: GPL-2.0
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ $UID != 0 ]; then
+ echo $msg please run this as root >&2
+ exit $ksft_skip
+fi
+
GREEN='\033[0;92m'
RED='\033[0;31m'
NC='\033[0m' # No Color
diff --git a/tools/testing/selftests/bpf/test_lwt_seg6local.sh b/tools/testing/selftests/bpf/test_lwt_seg6local.sh
index 1c77994b5e71..270fa8f49573 100755
--- a/tools/testing/selftests/bpf/test_lwt_seg6local.sh
+++ b/tools/testing/selftests/bpf/test_lwt_seg6local.sh
@@ -21,6 +21,15 @@
# An UDP datagram is sent from fb00::1 to fb00::6. The test succeeds if this
# datagram can be read on NS6 when binding to fb00::6.
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+msg="skip all tests:"
+if [ $UID != 0 ]; then
+ echo $msg please run this as root >&2
+ exit $ksft_skip
+fi
+
TMP_FILE="/tmp/selftest_lwt_seg6local.txt"
cleanup()
diff --git a/tools/testing/selftests/bpf/test_offload.py b/tools/testing/selftests/bpf/test_offload.py
index e78aad0a68bb..be800d0e7a84 100755
--- a/tools/testing/selftests/bpf/test_offload.py
+++ b/tools/testing/selftests/bpf/test_offload.py
@@ -163,6 +163,10 @@ def bpftool(args, JSON=True, ns="", fail=True):
def bpftool_prog_list(expected=None, ns=""):
_, progs = bpftool("prog show", JSON=True, ns=ns, fail=True)
+ # Remove the base progs
+ for p in base_progs:
+ if p in progs:
+ progs.remove(p)
if expected is not None:
if len(progs) != expected:
fail(True, "%d BPF programs loaded, expected %d" %
@@ -171,6 +175,10 @@ def bpftool_prog_list(expected=None, ns=""):
def bpftool_map_list(expected=None, ns=""):
_, maps = bpftool("map show", JSON=True, ns=ns, fail=True)
+ # Remove the base maps
+ for m in base_maps:
+ if m in maps:
+ maps.remove(m)
if expected is not None:
if len(maps) != expected:
fail(True, "%d BPF maps loaded, expected %d" %
@@ -585,8 +593,8 @@ skip(os.getuid() != 0, "test must be run as root")
# Check tools
ret, progs = bpftool("prog", fail=False)
skip(ret != 0, "bpftool not installed")
-# Check no BPF programs are loaded
-skip(len(progs) != 0, "BPF programs already loaded on the system")
+base_progs = progs
+_, base_maps = bpftool("map")
# Check netdevsim
ret, out = cmd("modprobe netdevsim", fail=False)
diff --git a/tools/testing/selftests/bpf/test_sockmap.c b/tools/testing/selftests/bpf/test_sockmap.c
index 05c8cb71724a..9e78df207919 100644
--- a/tools/testing/selftests/bpf/test_sockmap.c
+++ b/tools/testing/selftests/bpf/test_sockmap.c
@@ -1413,18 +1413,12 @@ out:
int main(int argc, char **argv)
{
- struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
int iov_count = 1, length = 1024, rate = 1;
struct sockmap_options options = {0};
int opt, longindex, err, cg_fd = 0;
char *bpf_file = BPF_SOCKMAP_FILENAME;
int test = PING_PONG;
- if (setrlimit(RLIMIT_MEMLOCK, &r)) {
- perror("setrlimit(RLIMIT_MEMLOCK)");
- return 1;
- }
-
if (argc < 2)
return test_suite();
diff --git a/tools/testing/selftests/bpf/test_tunnel.sh b/tools/testing/selftests/bpf/test_tunnel.sh
index aeb2901f21f4..546aee3e9fb4 100755
--- a/tools/testing/selftests/bpf/test_tunnel.sh
+++ b/tools/testing/selftests/bpf/test_tunnel.sh
@@ -608,28 +608,26 @@ setup_xfrm_tunnel()
test_xfrm_tunnel()
{
config_device
- #tcpdump -nei veth1 ip &
- output=$(mktemp)
- cat /sys/kernel/debug/tracing/trace_pipe | tee $output &
- setup_xfrm_tunnel
+ > /sys/kernel/debug/tracing/trace
+ setup_xfrm_tunnel
tc qdisc add dev veth1 clsact
tc filter add dev veth1 proto ip ingress bpf da obj test_tunnel_kern.o \
sec xfrm_get_state
ip netns exec at_ns0 ping $PING_ARG 10.1.1.200
sleep 1
- grep "reqid 1" $output
+ grep "reqid 1" /sys/kernel/debug/tracing/trace
check_err $?
- grep "spi 0x1" $output
+ grep "spi 0x1" /sys/kernel/debug/tracing/trace
check_err $?
- grep "remote ip 0xac100164" $output
+ grep "remote ip 0xac100164" /sys/kernel/debug/tracing/trace
check_err $?
cleanup
if [ $ret -ne 0 ]; then
- echo -e ${RED}"FAIL: xfrm tunnel"${NC}
- return 1
- fi
- echo -e ${GREEN}"PASS: xfrm tunnel"${NC}
+ echo -e ${RED}"FAIL: xfrm tunnel"${NC}
+ return 1
+ fi
+ echo -e ${GREEN}"PASS: xfrm tunnel"${NC}
}
attach_bpf()
@@ -657,6 +655,10 @@ cleanup()
ip link del ip6geneve11 2> /dev/null
ip link del erspan11 2> /dev/null
ip link del ip6erspan11 2> /dev/null
+ ip xfrm policy delete dir out src 10.1.1.200/32 dst 10.1.1.100/32 2> /dev/null
+ ip xfrm policy delete dir in src 10.1.1.100/32 dst 10.1.1.200/32 2> /dev/null
+ ip xfrm state delete src 172.16.1.100 dst 172.16.1.200 proto esp spi 0x1 2> /dev/null
+ ip xfrm state delete src 172.16.1.200 dst 172.16.1.100 proto esp spi 0x2 2> /dev/null
}
cleanup_exit()
@@ -668,7 +670,7 @@ cleanup_exit()
check()
{
- ip link help $1 2>&1 | grep -q "^Usage:"
+ ip link help 2>&1 | grep -q "\s$1\s"
if [ $? -ne 0 ];then
echo "SKIP $1: iproute2 not support"
cleanup
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 2ecd27b670d7..41106d9d5cc7 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -4975,6 +4975,24 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_LWT_XMIT,
},
{
+ "make headroom for LWT_XMIT",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
+ BPF_MOV64_IMM(BPF_REG_2, 34),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
+ /* split for s390 to succeed */
+ BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
+ BPF_MOV64_IMM(BPF_REG_2, 42),
+ BPF_MOV64_IMM(BPF_REG_3, 0),
+ BPF_EMIT_CALL(BPF_FUNC_skb_change_head),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_LWT_XMIT,
+ },
+ {
"invalid access of tc_classid for LWT_IN",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
@@ -11987,6 +12005,46 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_XDP,
},
{
+ "xadd/w check whether src/dst got mangled, 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 3,
+ },
+ {
+ "xadd/w check whether src/dst got mangled, 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_0),
+ BPF_MOV64_REG(BPF_REG_7, BPF_REG_10),
+ BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ BPF_STX_XADD(BPF_W, BPF_REG_10, BPF_REG_0, -8),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_6, BPF_REG_0, 3),
+ BPF_JMP_REG(BPF_JNE, BPF_REG_7, BPF_REG_10, 2),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_10, -8),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 42),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ .retval = 3,
+ },
+ {
"bpf_get_stack return R0 within range",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
@@ -12554,8 +12612,11 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
}
if (fd_prog >= 0) {
+ __u8 tmp[TEST_DATA_LEN << 2];
+ __u32 size_tmp = sizeof(tmp);
+
err = bpf_prog_test_run(fd_prog, 1, test->data,
- sizeof(test->data), NULL, NULL,
+ sizeof(test->data), tmp, &size_tmp,
&retval, NULL);
if (err && errno != 524/*ENOTSUPP*/ && errno != EPERM) {
printf("Unexpected bpf_prog_test_run error\n");
diff --git a/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
new file mode 100644
index 000000000000..3b1f45e13a2e
--- /dev/null
+++ b/tools/testing/selftests/ftrace/test.d/00basic/snapshot.tc
@@ -0,0 +1,28 @@
+#!/bin/sh
+# description: Snapshot and tracing setting
+# flags: instance
+
+[ ! -f snapshot ] && exit_unsupported
+
+echo "Set tracing off"
+echo 0 > tracing_on
+
+echo "Allocate and take a snapshot"
+echo 1 > snapshot
+
+# Since trace buffer is empty, snapshot is also empty, but allocated
+grep -q "Snapshot is allocated" snapshot
+
+echo "Ensure keep tracing off"
+test `cat tracing_on` -eq 0
+
+echo "Set tracing on"
+echo 1 > tracing_on
+
+echo "Take a snapshot again"
+echo 1 > snapshot
+
+echo "Ensure keep tracing on"
+test `cat tracing_on` -eq 1
+
+exit 0
diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore
index 128e548aa377..1a0ac3a29ec5 100644
--- a/tools/testing/selftests/net/.gitignore
+++ b/tools/testing/selftests/net/.gitignore
@@ -12,3 +12,4 @@ tcp_mmap
udpgso
udpgso_bench_rx
udpgso_bench_tx
+tcp_inq
diff --git a/tools/testing/selftests/net/config b/tools/testing/selftests/net/config
index 7ba089b33e8b..cd3a2f1545b5 100644
--- a/tools/testing/selftests/net/config
+++ b/tools/testing/selftests/net/config
@@ -12,3 +12,5 @@ CONFIG_NET_IPVTI=y
CONFIG_INET6_XFRM_MODE_TUNNEL=y
CONFIG_IPV6_VTI=y
CONFIG_DUMMY=y
+CONFIG_BRIDGE=y
+CONFIG_VLAN_8021Q=y
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 78245d60d8bc..0f45633bd634 100644..100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -740,13 +740,6 @@ ipv6_rt_add()
run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64"
log_test $? 2 "Attempt to add duplicate route - reject route"
- # iproute2 prepend only sets NLM_F_CREATE
- # - adds a new route; does NOT convert existing route to ECMP
- add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
- run_cmd "$IP -6 ro prepend 2001:db8:104::/64 via 2001:db8:103::2"
- check_route6 "2001:db8:104::/64 via 2001:db8:101::2 dev veth1 metric 1024 2001:db8:104::/64 via 2001:db8:103::2 dev veth3 metric 1024"
- log_test $? 0 "Add new route for existing prefix (w/o NLM_F_EXCL)"
-
# route append with same prefix adds a new route
# - iproute2 sets NLM_F_CREATE | NLM_F_APPEND
add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
@@ -754,27 +747,6 @@ ipv6_rt_add()
check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
log_test $? 0 "Append nexthop to existing route - gw"
- add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
- run_cmd "$IP -6 ro append 2001:db8:104::/64 dev veth3"
- check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop dev veth3 weight 1"
- log_test $? 0 "Append nexthop to existing route - dev only"
-
- # multipath route can not have a nexthop that is a reject route
- add_route6 "2001:db8:104::/64" "via 2001:db8:101::2"
- run_cmd "$IP -6 ro append unreachable 2001:db8:104::/64"
- log_test $? 2 "Append nexthop to existing route - reject route"
-
- # reject route can not be converted to multipath route
- run_cmd "$IP -6 ro flush 2001:db8:104::/64"
- run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64"
- run_cmd "$IP -6 ro append 2001:db8:104::/64 via 2001:db8:103::2"
- log_test $? 2 "Append nexthop to existing reject route - gw"
-
- run_cmd "$IP -6 ro flush 2001:db8:104::/64"
- run_cmd "$IP -6 ro add unreachable 2001:db8:104::/64"
- run_cmd "$IP -6 ro append 2001:db8:104::/64 dev veth3"
- log_test $? 2 "Append nexthop to existing reject route - dev only"
-
# insert mpath directly
add_route6 "2001:db8:104::/64" "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::2 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
@@ -819,13 +791,6 @@ ipv6_rt_replace_single()
check_route6 "2001:db8:104::/64 metric 1024 nexthop via 2001:db8:101::3 dev veth1 weight 1 nexthop via 2001:db8:103::2 dev veth3 weight 1"
log_test $? 0 "Single path with multipath"
- # single path with reject
- #
- add_initial_route6 "nexthop via 2001:db8:101::2"
- run_cmd "$IP -6 ro replace unreachable 2001:db8:104::/64"
- check_route6 "unreachable 2001:db8:104::/64 dev lo metric 1024"
- log_test $? 0 "Single path with reject route"
-
# single path with single path using MULTIPATH attribute
#
add_initial_route6 "via 2001:db8:101::2"
@@ -873,12 +838,6 @@ ipv6_rt_replace_mpath()
check_route6 "2001:db8:104::/64 via 2001:db8:101::3 dev veth1 metric 1024"
log_test $? 0 "Multipath with single path via multipath attribute"
- # multipath with reject
- add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
- run_cmd "$IP -6 ro replace unreachable 2001:db8:104::/64"
- check_route6 "unreachable 2001:db8:104::/64 dev lo metric 1024"
- log_test $? 0 "Multipath with reject route"
-
# route replace fails - invalid nexthop 1
add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
run_cmd "$IP -6 ro replace 2001:db8:104::/64 nexthop via 2001:db8:111::3 nexthop via 2001:db8:103::3"
diff --git a/tools/testing/selftests/net/udpgso_bench.sh b/tools/testing/selftests/net/udpgso_bench.sh
index 792fa4d0285e..850767befa47 100755
--- a/tools/testing/selftests/net/udpgso_bench.sh
+++ b/tools/testing/selftests/net/udpgso_bench.sh
@@ -35,9 +35,6 @@ run_udp() {
echo "udp gso"
run_in_netns ${args} -S
-
- echo "udp gso zerocopy"
- run_in_netns ${args} -S -z
}
run_tcp() {
diff --git a/tools/testing/selftests/pstore/pstore_post_reboot_tests b/tools/testing/selftests/pstore/pstore_post_reboot_tests
index 6ccb154cb4aa..22f8df1ad7d4 100755
--- a/tools/testing/selftests/pstore/pstore_post_reboot_tests
+++ b/tools/testing/selftests/pstore/pstore_post_reboot_tests
@@ -7,13 +7,16 @@
#
# Released under the terms of the GPL v2.
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
. ./common_tests
if [ -e $REBOOT_FLAG ]; then
rm $REBOOT_FLAG
else
prlog "pstore_crash_test has not been executed yet. we skip further tests."
- exit 0
+ exit $ksft_skip
fi
prlog -n "Mounting pstore filesystem ... "
diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c
index 6a9f602a8718..615252331813 100644
--- a/tools/testing/selftests/rseq/param_test.c
+++ b/tools/testing/selftests/rseq/param_test.c
@@ -137,6 +137,30 @@ unsigned int yield_mod_cnt, nr_abort;
"subic. %%" INJECT_ASM_REG ", %%" INJECT_ASM_REG ", 1\n\t" \
"bne 222b\n\t" \
"333:\n\t"
+
+#elif defined(__mips__)
+
+#define RSEQ_INJECT_INPUT \
+ , [loop_cnt_1]"m"(loop_cnt[1]) \
+ , [loop_cnt_2]"m"(loop_cnt[2]) \
+ , [loop_cnt_3]"m"(loop_cnt[3]) \
+ , [loop_cnt_4]"m"(loop_cnt[4]) \
+ , [loop_cnt_5]"m"(loop_cnt[5]) \
+ , [loop_cnt_6]"m"(loop_cnt[6])
+
+#define INJECT_ASM_REG "$5"
+
+#define RSEQ_INJECT_CLOBBER \
+ , INJECT_ASM_REG
+
+#define RSEQ_INJECT_ASM(n) \
+ "lw " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \
+ "beqz " INJECT_ASM_REG ", 333f\n\t" \
+ "222:\n\t" \
+ "addiu " INJECT_ASM_REG ", -1\n\t" \
+ "bnez " INJECT_ASM_REG ", 222b\n\t" \
+ "333:\n\t"
+
#else
#error unsupported target
#endif
diff --git a/tools/testing/selftests/rseq/rseq-arm.h b/tools/testing/selftests/rseq/rseq-arm.h
index 3b055f9aeaab..3cea19877227 100644
--- a/tools/testing/selftests/rseq/rseq-arm.h
+++ b/tools/testing/selftests/rseq/rseq-arm.h
@@ -57,6 +57,7 @@ do { \
#define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \
abort_label, version, flags, \
start_ip, post_commit_offset, abort_ip) \
+ ".balign 32\n\t" \
__rseq_str(table_label) ":\n\t" \
".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \
diff --git a/tools/testing/selftests/rseq/rseq-mips.h b/tools/testing/selftests/rseq/rseq-mips.h
new file mode 100644
index 000000000000..7f48ecf46994
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq-mips.h
@@ -0,0 +1,725 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * Author: Paul Burton <paul.burton@mips.com>
+ * (C) Copyright 2018 MIPS Tech LLC
+ *
+ * Based on rseq-arm.h:
+ * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define RSEQ_SIG 0x53053053
+
+#define rseq_smp_mb() __asm__ __volatile__ ("sync" ::: "memory")
+#define rseq_smp_rmb() rseq_smp_mb()
+#define rseq_smp_wmb() rseq_smp_mb()
+
+#define rseq_smp_load_acquire(p) \
+__extension__ ({ \
+ __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \
+ rseq_smp_mb(); \
+ ____p1; \
+})
+
+#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb()
+
+#define rseq_smp_store_release(p, v) \
+do { \
+ rseq_smp_mb(); \
+ RSEQ_WRITE_ONCE(*p, v); \
+} while (0)
+
+#ifdef RSEQ_SKIP_FASTPATH
+#include "rseq-skip.h"
+#else /* !RSEQ_SKIP_FASTPATH */
+
+#if _MIPS_SZLONG == 64
+# define LONG ".dword"
+# define LONG_LA "dla"
+# define LONG_L "ld"
+# define LONG_S "sd"
+# define LONG_ADDI "daddiu"
+# define U32_U64_PAD(x) x
+#elif _MIPS_SZLONG == 32
+# define LONG ".word"
+# define LONG_LA "la"
+# define LONG_L "lw"
+# define LONG_S "sw"
+# define LONG_ADDI "addiu"
+# ifdef __BIG_ENDIAN
+# define U32_U64_PAD(x) "0x0, " x
+# else
+# define U32_U64_PAD(x) x ", 0x0"
+# endif
+#else
+# error unsupported _MIPS_SZLONG
+#endif
+
+#define __RSEQ_ASM_DEFINE_TABLE(version, flags, start_ip, \
+ post_commit_offset, abort_ip) \
+ ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".balign 32\n\t" \
+ ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+ LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \
+ LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \
+ LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \
+ ".popsection\n\t"
+
+#define RSEQ_ASM_DEFINE_TABLE(start_ip, post_commit_ip, abort_ip) \
+ __RSEQ_ASM_DEFINE_TABLE(0x0, 0x0, start_ip, \
+ (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
+ RSEQ_INJECT_ASM(1) \
+ LONG_LA " $4, " __rseq_str(cs_label) "\n\t" \
+ LONG_S " $4, %[" __rseq_str(rseq_cs) "]\n\t" \
+ __rseq_str(label) ":\n\t"
+
+#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
+ RSEQ_INJECT_ASM(2) \
+ "lw $4, %[" __rseq_str(current_cpu_id) "]\n\t" \
+ "bne $4, %[" __rseq_str(cpu_id) "], " __rseq_str(label) "\n\t"
+
+#define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \
+ abort_label, version, flags, \
+ start_ip, post_commit_offset, abort_ip) \
+ ".balign 32\n\t" \
+ __rseq_str(table_label) ":\n\t" \
+ ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+ LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \
+ LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \
+ LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \
+ ".word " __rseq_str(RSEQ_SIG) "\n\t" \
+ __rseq_str(label) ":\n\t" \
+ teardown \
+ "b %l[" __rseq_str(abort_label) "]\n\t"
+
+#define RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, abort_label, \
+ start_ip, post_commit_ip, abort_ip) \
+ __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \
+ abort_label, 0x0, 0x0, start_ip, \
+ (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \
+ __rseq_str(label) ":\n\t" \
+ teardown \
+ "b %l[" __rseq_str(cmpfail_label) "]\n\t"
+
+#define rseq_workaround_gcc_asm_size_guess() __asm__ __volatile__("")
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[error2]\n\t"
+#endif
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
+ off_t voffp, intptr_t *load, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "beq $4, %[expectnot], %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_L " $4, %[v]\n\t"
+ "beq $4, %[expectnot], %l[error2]\n\t"
+#endif
+ LONG_S " $4, %[load]\n\t"
+ LONG_ADDI " $4, %[voffp]\n\t"
+ LONG_L " $4, 0($4)\n\t"
+ /* final store */
+ LONG_S " $4, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expectnot] "r" (expectnot),
+ [voffp] "Ir" (voffp),
+ [load] "m" (*load)
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_addv(intptr_t *v, intptr_t count, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+ LONG_L " $4, %[v]\n\t"
+ LONG_ADDI " $4, %[count]\n\t"
+ /* final store */
+ LONG_S " $4, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(4)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [v] "m" (*v),
+ [count] "Ir" (count)
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[error2]\n\t"
+#endif
+ /* try store */
+ LONG_S " %[newv2], %[v2]\n\t"
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[error2]\n\t"
+#endif
+ /* try store */
+ LONG_S " %[newv2], %[v2]\n\t"
+ RSEQ_INJECT_ASM(5)
+ "sync\n\t" /* full sync provides store-release */
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+ LONG_L " $4, %[v2]\n\t"
+ "bne $4, %[expect2], %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[error2]\n\t"
+ LONG_L " $4, %[v2]\n\t"
+ "bne $4, %[expect2], %l[error3]\n\t"
+#endif
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* cmp2 input */
+ [v2] "m" (*v2),
+ [expect2] "r" (expect2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2, error3
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("1st expected value comparison failed");
+error3:
+ rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ uintptr_t rseq_scratch[3];
+
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ LONG_S " %[src], %[rseq_scratch0]\n\t"
+ LONG_S " %[dst], %[rseq_scratch1]\n\t"
+ LONG_S " %[len], %[rseq_scratch2]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], 5f\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], 7f\n\t"
+#endif
+ /* try memcpy */
+ "beqz %[len], 333f\n\t" \
+ "222:\n\t" \
+ "lb $4, 0(%[src])\n\t" \
+ "sb $4, 0(%[dst])\n\t" \
+ LONG_ADDI " %[src], 1\n\t" \
+ LONG_ADDI " %[dst], 1\n\t" \
+ LONG_ADDI " %[len], -1\n\t" \
+ "bnez %[len], 222b\n\t" \
+ "333:\n\t" \
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t"
+ "b 8f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ abort, 1b, 2b, 4f)
+ RSEQ_ASM_DEFINE_CMPFAIL(5,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_CMPFAIL(6,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ error1)
+ RSEQ_ASM_DEFINE_CMPFAIL(7,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ error2)
+#endif
+ "8:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len),
+ [rseq_scratch0] "m" (rseq_scratch[0]),
+ [rseq_scratch1] "m" (rseq_scratch[1]),
+ [rseq_scratch2] "m" (rseq_scratch[2])
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_workaround_gcc_asm_size_guess();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_workaround_gcc_asm_size_guess();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ uintptr_t rseq_scratch[3];
+
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ LONG_S " %[src], %[rseq_scratch0]\n\t"
+ LONG_S " %[dst], %[rseq_scratch1]\n\t"
+ LONG_S " %[len], %[rseq_scratch2]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], 5f\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], 7f\n\t"
+#endif
+ /* try memcpy */
+ "beqz %[len], 333f\n\t" \
+ "222:\n\t" \
+ "lb $4, 0(%[src])\n\t" \
+ "sb $4, 0(%[dst])\n\t" \
+ LONG_ADDI " %[src], 1\n\t" \
+ LONG_ADDI " %[dst], 1\n\t" \
+ LONG_ADDI " %[len], -1\n\t" \
+ "bnez %[len], 222b\n\t" \
+ "333:\n\t" \
+ RSEQ_INJECT_ASM(5)
+ "sync\n\t" /* full sync provides store-release */
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t"
+ "b 8f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ abort, 1b, 2b, 4f)
+ RSEQ_ASM_DEFINE_CMPFAIL(5,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_CMPFAIL(6,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ error1)
+ RSEQ_ASM_DEFINE_CMPFAIL(7,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ error2)
+#endif
+ "8:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len),
+ [rseq_scratch0] "m" (rseq_scratch[0]),
+ [rseq_scratch1] "m" (rseq_scratch[1]),
+ [rseq_scratch2] "m" (rseq_scratch[2])
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_workaround_gcc_asm_size_guess();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_workaround_gcc_asm_size_guess();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+#endif /* !RSEQ_SKIP_FASTPATH */
diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h
index 0a808575cbc4..86ce22417e0d 100644
--- a/tools/testing/selftests/rseq/rseq.h
+++ b/tools/testing/selftests/rseq/rseq.h
@@ -73,6 +73,8 @@ extern __thread volatile struct rseq __rseq_abi;
#include <rseq-arm.h>
#elif defined(__PPC__)
#include <rseq-ppc.h>
+#elif defined(__mips__)
+#include <rseq-mips.h>
#else
#error unsupported target
#endif
@@ -131,17 +133,27 @@ static inline uint32_t rseq_current_cpu(void)
return cpu;
}
+static inline void rseq_clear_rseq_cs(void)
+{
+#ifdef __LP64__
+ __rseq_abi.rseq_cs.ptr = 0;
+#else
+ __rseq_abi.rseq_cs.ptr.ptr32 = 0;
+#endif
+}
+
/*
- * rseq_prepare_unload() should be invoked by each thread using rseq_finish*()
- * at least once between their last rseq_finish*() and library unload of the
- * library defining the rseq critical section (struct rseq_cs). This also
- * applies to use of rseq in code generated by JIT: rseq_prepare_unload()
- * should be invoked at least once by each thread using rseq_finish*() before
- * reclaim of the memory holding the struct rseq_cs.
+ * rseq_prepare_unload() should be invoked by each thread executing a rseq
+ * critical section at least once between their last critical section and
+ * library unload of the library defining the rseq critical section
+ * (struct rseq_cs). This also applies to use of rseq in code generated by
+ * JIT: rseq_prepare_unload() should be invoked at least once by each
+ * thread executing a rseq critical section before reclaim of the memory
+ * holding the struct rseq_cs.
*/
static inline void rseq_prepare_unload(void)
{
- __rseq_abi.rseq_cs = 0;
+ rseq_clear_rseq_cs();
}
#endif /* RSEQ_H_ */
diff --git a/tools/testing/selftests/rseq/run_param_test.sh b/tools/testing/selftests/rseq/run_param_test.sh
index 3acd6d75ff9f..3acd6d75ff9f 100644..100755
--- a/tools/testing/selftests/rseq/run_param_test.sh
+++ b/tools/testing/selftests/rseq/run_param_test.sh
diff --git a/tools/testing/selftests/sparc64/Makefile b/tools/testing/selftests/sparc64/Makefile
index 2082eeffd779..a19531dba4dc 100644
--- a/tools/testing/selftests/sparc64/Makefile
+++ b/tools/testing/selftests/sparc64/Makefile
@@ -1,7 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+uname_M := $(shell uname -m 2>/dev/null || echo not)
+ARCH ?= $(shell echo $(uname_M) | sed -e s/x86_64/x86/)
+
+ifneq ($(ARCH),sparc64)
+nothing:
+.PHONY: all clean run_tests install
+.SILENT:
+else
+
SUBDIRS := drivers
TEST_PROGS := run.sh
+
.PHONY: all clean
include ../lib.mk
@@ -18,10 +29,6 @@ all:
fi \
done
-override define RUN_TESTS
- @cd $(OUTPUT); ./run.sh
-endef
-
override define INSTALL_RULE
mkdir -p $(INSTALL_PATH)
install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)
@@ -33,10 +40,6 @@ override define INSTALL_RULE
done;
endef
-override define EMIT_TESTS
- echo "./run.sh"
-endef
-
override define CLEAN
@for DIR in $(SUBDIRS); do \
BUILD_TARGET=$(OUTPUT)/$$DIR; \
@@ -44,3 +47,4 @@ override define CLEAN
make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
done
endef
+endif
diff --git a/tools/testing/selftests/sparc64/drivers/Makefile b/tools/testing/selftests/sparc64/drivers/Makefile
index 6264f40bbdbc..deb0df415565 100644
--- a/tools/testing/selftests/sparc64/drivers/Makefile
+++ b/tools/testing/selftests/sparc64/drivers/Makefile
@@ -1,4 +1,4 @@
-
+# SPDX-License-Identifier: GPL-2.0
INCLUDEDIR := -I.
CFLAGS := $(CFLAGS) $(INCLUDEDIR) -Wall -O2 -g
diff --git a/tools/testing/selftests/static_keys/test_static_keys.sh b/tools/testing/selftests/static_keys/test_static_keys.sh
index 24cff498b31a..fc9f8cde7d42 100755
--- a/tools/testing/selftests/static_keys/test_static_keys.sh
+++ b/tools/testing/selftests/static_keys/test_static_keys.sh
@@ -2,6 +2,19 @@
# SPDX-License-Identifier: GPL-2.0
# Runs static keys kernel module tests
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if ! /sbin/modprobe -q -n test_static_key_base; then
+ echo "static_key: module test_static_key_base is not found [SKIP]"
+ exit $ksft_skip
+fi
+
+if ! /sbin/modprobe -q -n test_static_keys; then
+ echo "static_key: module test_static_keys is not found [SKIP]"
+ exit $ksft_skip
+fi
+
if /sbin/modprobe -q test_static_key_base; then
if /sbin/modprobe -q test_static_keys; then
echo "static_key: ok"
diff --git a/tools/testing/selftests/sync/config b/tools/testing/selftests/sync/config
new file mode 100644
index 000000000000..1ab7e8130db2
--- /dev/null
+++ b/tools/testing/selftests/sync/config
@@ -0,0 +1,4 @@
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh
index ec232c3cfcaa..584eb8ea780a 100755
--- a/tools/testing/selftests/sysctl/sysctl.sh
+++ b/tools/testing/selftests/sysctl/sysctl.sh
@@ -14,6 +14,9 @@
# This performs a series tests against the proc sysctl interface.
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
TEST_NAME="sysctl"
TEST_DRIVER="test_${TEST_NAME}"
TEST_DIR=$(dirname $0)
@@ -41,7 +44,7 @@ test_modprobe()
echo "$0: $DIR not present" >&2
echo "You must have the following enabled in your kernel:" >&2
cat $TEST_DIR/config >&2
- exit 1
+ exit $ksft_skip
fi
}
@@ -98,28 +101,30 @@ test_reqs()
uid=$(id -u)
if [ $uid -ne 0 ]; then
echo $msg must be run as root >&2
- exit 0
+ exit $ksft_skip
fi
if ! which perl 2> /dev/null > /dev/null; then
echo "$0: You need perl installed"
- exit 1
+ exit $ksft_skip
fi
if ! which getconf 2> /dev/null > /dev/null; then
echo "$0: You need getconf installed"
- exit 1
+ exit $ksft_skip
fi
if ! which diff 2> /dev/null > /dev/null; then
echo "$0: You need diff installed"
- exit 1
+ exit $ksft_skip
fi
}
function load_req_mod()
{
- trap "test_modprobe" EXIT
-
if [ ! -d $DIR ]; then
+ if ! modprobe -q -n $TEST_DRIVER; then
+ echo "$0: module $TEST_DRIVER not found [SKIP]"
+ exit $ksft_skip
+ fi
modprobe $TEST_DRIVER
if [ $? -ne 0 ]; then
exit
@@ -765,6 +770,7 @@ function parse_args()
test_reqs
allow_user_defaults
check_production_sysctl_writes_strict
+test_modprobe
load_req_mod
trap "test_finish" EXIT
diff --git a/tools/testing/selftests/user/test_user_copy.sh b/tools/testing/selftests/user/test_user_copy.sh
index d60506fc77f8..f9b31a57439b 100755
--- a/tools/testing/selftests/user/test_user_copy.sh
+++ b/tools/testing/selftests/user/test_user_copy.sh
@@ -2,6 +2,13 @@
# SPDX-License-Identifier: GPL-2.0
# Runs copy_to/from_user infrastructure using test_user_copy kernel module
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if ! /sbin/modprobe -q -n test_user_copy; then
+ echo "user: module test_user_copy is not found [SKIP]"
+ exit $ksft_skip
+fi
if /sbin/modprobe -q test_user_copy; then
/sbin/modprobe -q -r test_user_copy
echo "user_copy: ok"
diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c
index 1097f04e4d80..bcec71250873 100644
--- a/tools/testing/selftests/vm/compaction_test.c
+++ b/tools/testing/selftests/vm/compaction_test.c
@@ -16,6 +16,8 @@
#include <unistd.h>
#include <string.h>
+#include "../kselftest.h"
+
#define MAP_SIZE 1048576
struct map_list {
@@ -169,7 +171,7 @@ int main(int argc, char **argv)
printf("Either the sysctl compact_unevictable_allowed is not\n"
"set to 1 or couldn't read the proc file.\n"
"Skipping the test\n");
- return 0;
+ return KSFT_SKIP;
}
lim.rlim_cur = RLIM_INFINITY;
diff --git a/tools/testing/selftests/vm/mlock2-tests.c b/tools/testing/selftests/vm/mlock2-tests.c
index 4997b9222cfa..637b6d0ac0d0 100644
--- a/tools/testing/selftests/vm/mlock2-tests.c
+++ b/tools/testing/selftests/vm/mlock2-tests.c
@@ -9,6 +9,8 @@
#include <stdbool.h>
#include "mlock2.h"
+#include "../kselftest.h"
+
struct vm_boundaries {
unsigned long start;
unsigned long end;
@@ -303,7 +305,7 @@ static int test_mlock_lock()
if (mlock2_(map, 2 * page_size, 0)) {
if (errno == ENOSYS) {
printf("Cannot call new mlock family, skipping test\n");
- _exit(0);
+ _exit(KSFT_SKIP);
}
perror("mlock2(0)");
goto unmap;
@@ -412,7 +414,7 @@ static int test_mlock_onfault()
if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) {
if (errno == ENOSYS) {
printf("Cannot call new mlock family, skipping test\n");
- _exit(0);
+ _exit(KSFT_SKIP);
}
perror("mlock2(MLOCK_ONFAULT)");
goto unmap;
@@ -425,7 +427,7 @@ static int test_mlock_onfault()
if (munlock(map, 2 * page_size)) {
if (errno == ENOSYS) {
printf("Cannot call new mlock family, skipping test\n");
- _exit(0);
+ _exit(KSFT_SKIP);
}
perror("munlock()");
goto unmap;
@@ -457,7 +459,7 @@ static int test_lock_onfault_of_present()
if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) {
if (errno == ENOSYS) {
printf("Cannot call new mlock family, skipping test\n");
- _exit(0);
+ _exit(KSFT_SKIP);
}
perror("mlock2(MLOCK_ONFAULT)");
goto unmap;
@@ -583,7 +585,7 @@ static int test_vma_management(bool call_mlock)
if (call_mlock && mlock2_(map, 3 * page_size, MLOCK_ONFAULT)) {
if (errno == ENOSYS) {
printf("Cannot call new mlock family, skipping test\n");
- _exit(0);
+ _exit(KSFT_SKIP);
}
perror("mlock(ONFAULT)\n");
goto out;
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
index 22d564673830..88cbe5575f0c 100755
--- a/tools/testing/selftests/vm/run_vmtests
+++ b/tools/testing/selftests/vm/run_vmtests
@@ -2,6 +2,9 @@
# SPDX-License-Identifier: GPL-2.0
#please run as root
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
mnt=./huge
exitcode=0
@@ -36,7 +39,7 @@ if [ -n "$freepgs" ] && [ -n "$hpgsize_KB" ]; then
echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages
if [ $? -ne 0 ]; then
echo "Please run this test as root"
- exit 1
+ exit $ksft_skip
fi
while read name size unit; do
if [ "$name" = "HugePages_Free:" ]; then
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index de2f9ec8a87f..7b8171e3128a 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -69,6 +69,8 @@
#include <setjmp.h>
#include <stdbool.h>
+#include "../kselftest.h"
+
#ifdef __NR_userfaultfd
static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
@@ -1322,7 +1324,7 @@ int main(int argc, char **argv)
int main(void)
{
printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n");
- return 0;
+ return KSFT_SKIP;
}
#endif /* __NR_userfaultfd */
diff --git a/tools/testing/selftests/x86/sigreturn.c b/tools/testing/selftests/x86/sigreturn.c
index 246145b84a12..4d9dc3f2fd70 100644
--- a/tools/testing/selftests/x86/sigreturn.c
+++ b/tools/testing/selftests/x86/sigreturn.c
@@ -610,21 +610,41 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss)
*/
for (int i = 0; i < NGREG; i++) {
greg_t req = requested_regs[i], res = resulting_regs[i];
+
if (i == REG_TRAPNO || i == REG_IP)
continue; /* don't care */
- if (i == REG_SP) {
- printf("\tSP: %llx -> %llx\n", (unsigned long long)req,
- (unsigned long long)res);
+ if (i == REG_SP) {
/*
- * In many circumstances, the high 32 bits of rsp
- * are zeroed. For example, we could be a real
- * 32-bit program, or we could hit any of a number
- * of poorly-documented IRET or segmented ESP
- * oddities. If this happens, it's okay.
+ * If we were using a 16-bit stack segment, then
+ * the kernel is a bit stuck: IRET only restores
+ * the low 16 bits of ESP/RSP if SS is 16-bit.
+ * The kernel uses a hack to restore bits 31:16,
+ * but that hack doesn't help with bits 63:32.
+ * On Intel CPUs, bits 63:32 end up zeroed, and, on
+ * AMD CPUs, they leak the high bits of the kernel
+ * espfix64 stack pointer. There's very little that
+ * the kernel can do about it.
+ *
+ * Similarly, if we are returning to a 32-bit context,
+ * the CPU will often lose the high 32 bits of RSP.
*/
- if (res == (req & 0xFFFFFFFF))
- continue; /* OK; not expected to work */
+
+ if (res == req)
+ continue;
+
+ if (cs_bits != 64 && ((res ^ req) & 0xFFFFFFFF) == 0) {
+ printf("[NOTE]\tSP: %llx -> %llx\n",
+ (unsigned long long)req,
+ (unsigned long long)res);
+ continue;
+ }
+
+ printf("[FAIL]\tSP mismatch: requested 0x%llx; got 0x%llx\n",
+ (unsigned long long)requested_regs[i],
+ (unsigned long long)resulting_regs[i]);
+ nerrs++;
+ continue;
}
bool ignore_reg = false;
@@ -654,25 +674,18 @@ static int test_valid_sigreturn(int cs_bits, bool use_16bit_ss, int force_ss)
#endif
/* Sanity check on the kernel */
- if (i == REG_CX && requested_regs[i] != resulting_regs[i]) {
+ if (i == REG_CX && req != res) {
printf("[FAIL]\tCX (saved SP) mismatch: requested 0x%llx; got 0x%llx\n",
- (unsigned long long)requested_regs[i],
- (unsigned long long)resulting_regs[i]);
+ (unsigned long long)req,
+ (unsigned long long)res);
nerrs++;
continue;
}
- if (requested_regs[i] != resulting_regs[i] && !ignore_reg) {
- /*
- * SP is particularly interesting here. The
- * usual cause of failures is that we hit the
- * nasty IRET case of returning to a 16-bit SS,
- * in which case bits 16:31 of the *kernel*
- * stack pointer persist in ESP.
- */
+ if (req != res && !ignore_reg) {
printf("[FAIL]\tReg %d mismatch: requested 0x%llx; got 0x%llx\n",
- i, (unsigned long long)requested_regs[i],
- (unsigned long long)resulting_regs[i]);
+ i, (unsigned long long)req,
+ (unsigned long long)res);
nerrs++;
}
}
diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh
index 754de7da426a..232e958ec454 100755
--- a/tools/testing/selftests/zram/zram.sh
+++ b/tools/testing/selftests/zram/zram.sh
@@ -2,6 +2,9 @@
# SPDX-License-Identifier: GPL-2.0
TCID="zram.sh"
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
. ./zram_lib.sh
run_zram () {
@@ -24,5 +27,5 @@ elif [ -b /dev/zram0 ]; then
else
echo "$TCID : No zram.ko module or /dev/zram0 device file not found"
echo "$TCID : CONFIG_ZRAM is not set"
- exit 1
+ exit $ksft_skip
fi
diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh
index f6a9c73e7a44..9e73a4fb9b0a 100755
--- a/tools/testing/selftests/zram/zram_lib.sh
+++ b/tools/testing/selftests/zram/zram_lib.sh
@@ -18,6 +18,9 @@ MODULE=0
dev_makeswap=-1
dev_mounted=-1
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
trap INT
check_prereqs()
@@ -27,7 +30,7 @@ check_prereqs()
if [ $uid -ne 0 ]; then
echo $msg must be run as root >&2
- exit 0
+ exit $ksft_skip
fi
}
diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c
index 95dd14648ba5..0f395dfb7774 100644
--- a/tools/usb/ffs-test.c
+++ b/tools/usb/ffs-test.c
@@ -44,12 +44,25 @@
/******************** Little Endian Handling ********************************/
-#define cpu_to_le16(x) htole16(x)
-#define cpu_to_le32(x) htole32(x)
+/*
+ * cpu_to_le16/32 are used when initializing structures, a context where a
+ * function call is not allowed. To solve this, we code cpu_to_le16/32 in a way
+ * that allows them to be used when initializing structures.
+ */
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define cpu_to_le16(x) (x)
+#define cpu_to_le32(x) (x)
+#else
+#define cpu_to_le16(x) ((((x) >> 8) & 0xffu) | (((x) & 0xffu) << 8))
+#define cpu_to_le32(x) \
+ ((((x) & 0xff000000u) >> 24) | (((x) & 0x00ff0000u) >> 8) | \
+ (((x) & 0x0000ff00u) << 8) | (((x) & 0x000000ffu) << 24))
+#endif
+
#define le32_to_cpu(x) le32toh(x)
#define le16_to_cpu(x) le16toh(x)
-
/******************** Messages and Errors ***********************************/
static const char argv0[] = "ffs-test";
diff --git a/tools/virtio/linux/scatterlist.h b/tools/virtio/linux/scatterlist.h
index 9a45f90e2d08..369ee308b668 100644
--- a/tools/virtio/linux/scatterlist.h
+++ b/tools/virtio/linux/scatterlist.h
@@ -36,7 +36,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
*/
BUG_ON((unsigned long) page & 0x03);
#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
sg->page_link = page_link | (unsigned long) page;
@@ -67,7 +66,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
static inline struct page *sg_page(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
return (struct page *)((sg)->page_link & ~0x3);
@@ -116,9 +114,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
**/
static inline void sg_mark_end(struct scatterlist *sg)
{
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
/*
* Set termination bit, clear potential chain bit
*/
@@ -136,17 +131,11 @@ static inline void sg_mark_end(struct scatterlist *sg)
**/
static inline void sg_unmark_end(struct scatterlist *sg)
{
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
sg->page_link &= ~0x02;
}
static inline struct scatterlist *sg_next(struct scatterlist *sg)
{
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
if (sg_is_last(sg))
return NULL;
@@ -160,13 +149,6 @@ static inline struct scatterlist *sg_next(struct scatterlist *sg)
static inline void sg_init_table(struct scatterlist *sgl, unsigned int nents)
{
memset(sgl, 0, sizeof(*sgl) * nents);
-#ifdef CONFIG_DEBUG_SG
- {
- unsigned int i;
- for (i = 0; i < nents; i++)
- sgl[i].sg_magic = SG_MAGIC;
- }
-#endif
sg_mark_end(&sgl[nents - 1]);
}
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index 72143cfaf6ec..ea434ddc8499 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -47,7 +47,7 @@ config KVM_GENERIC_DIRTYLOG_READ_PROTECT
config KVM_COMPAT
def_bool y
- depends on KVM && COMPAT && !S390
+ depends on KVM && COMPAT && !(S390 || ARM64)
config HAVE_KVM_IRQ_BYPASS
bool
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 8d90de213ce9..1d90d79706bd 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -297,6 +297,8 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
phys_addr_t next;
assert_spin_locked(&kvm->mmu_lock);
+ WARN_ON(size & ~PAGE_MASK);
+
pgd = kvm->arch.pgd + stage2_pgd_index(addr);
do {
/*
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index ff7dc890941a..cdce653e3c47 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -617,11 +617,6 @@ int vgic_v3_probe(const struct gic_kvm_info *info)
pr_warn("GICV physical address 0x%llx not page aligned\n",
(unsigned long long)info->vcpu.start);
kvm_vgic_global_state.vcpu_base = 0;
- } else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
- pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
- (unsigned long long)resource_size(&info->vcpu),
- PAGE_SIZE);
- kvm_vgic_global_state.vcpu_base = 0;
} else {
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
kvm_vgic_global_state.can_emulate_gicv2 = true;
diff --git a/virt/kvm/eventfd.c b/virt/kvm/eventfd.c
index 90d30fbe95ae..b20b751286fc 100644
--- a/virt/kvm/eventfd.c
+++ b/virt/kvm/eventfd.c
@@ -119,8 +119,12 @@ irqfd_shutdown(struct work_struct *work)
{
struct kvm_kernel_irqfd *irqfd =
container_of(work, struct kvm_kernel_irqfd, shutdown);
+ struct kvm *kvm = irqfd->kvm;
u64 cnt;
+ /* Make sure irqfd has been initalized in assign path. */
+ synchronize_srcu(&kvm->irq_srcu);
+
/*
* Synchronize with the wait-queue and unhook ourselves to prevent
* further events.
@@ -387,7 +391,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
idx = srcu_read_lock(&kvm->irq_srcu);
irqfd_update(kvm, irqfd);
- srcu_read_unlock(&kvm->irq_srcu, idx);
list_add_tail(&irqfd->list, &kvm->irqfds.items);
@@ -402,11 +405,6 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
if (events & EPOLLIN)
schedule_work(&irqfd->inject);
- /*
- * do not drop the file until the irqfd is fully initialized, otherwise
- * we might race against the EPOLLHUP
- */
- fdput(f);
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
if (kvm_arch_has_irq_bypass()) {
irqfd->consumer.token = (void *)irqfd->eventfd;
@@ -421,6 +419,13 @@ kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
}
#endif
+ srcu_read_unlock(&kvm->irq_srcu, idx);
+
+ /*
+ * do not drop the file until the irqfd is fully initialized, otherwise
+ * we might race against the EPOLLHUP
+ */
+ fdput(f);
return 0;
fail:
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index ada21f47f22b..8b47507faab5 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -116,6 +116,11 @@ static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
#ifdef CONFIG_KVM_COMPAT
static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
+#define KVM_COMPAT(c) .compat_ioctl = (c)
+#else
+static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
+ unsigned long arg) { return -EINVAL; }
+#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl
#endif
static int hardware_enable_all(void);
static void hardware_disable_all(void);
@@ -2396,11 +2401,9 @@ static int kvm_vcpu_release(struct inode *inode, struct file *filp)
static struct file_operations kvm_vcpu_fops = {
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
-#ifdef CONFIG_KVM_COMPAT
- .compat_ioctl = kvm_vcpu_compat_ioctl,
-#endif
.mmap = kvm_vcpu_mmap,
.llseek = noop_llseek,
+ KVM_COMPAT(kvm_vcpu_compat_ioctl),
};
/*
@@ -2824,10 +2827,8 @@ static int kvm_device_release(struct inode *inode, struct file *filp)
static const struct file_operations kvm_device_fops = {
.unlocked_ioctl = kvm_device_ioctl,
-#ifdef CONFIG_KVM_COMPAT
- .compat_ioctl = kvm_device_ioctl,
-#endif
.release = kvm_device_release,
+ KVM_COMPAT(kvm_device_ioctl),
};
struct kvm_device *kvm_device_from_filp(struct file *filp)
@@ -3165,10 +3166,8 @@ static long kvm_vm_compat_ioctl(struct file *filp,
static struct file_operations kvm_vm_fops = {
.release = kvm_vm_release,
.unlocked_ioctl = kvm_vm_ioctl,
-#ifdef CONFIG_KVM_COMPAT
- .compat_ioctl = kvm_vm_compat_ioctl,
-#endif
.llseek = noop_llseek,
+ KVM_COMPAT(kvm_vm_compat_ioctl),
};
static int kvm_dev_ioctl_create_vm(unsigned long type)
@@ -3259,8 +3258,8 @@ out:
static struct file_operations kvm_chardev_ops = {
.unlocked_ioctl = kvm_dev_ioctl,
- .compat_ioctl = kvm_dev_ioctl,
.llseek = noop_llseek,
+ KVM_COMPAT(kvm_dev_ioctl),
};
static struct miscdevice kvm_dev = {