summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.gitignore3
-rw-r--r--Documentation/00-INDEX24
-rw-r--r--Documentation/PCI/MSI-HOWTO.txt119
-rw-r--r--Documentation/RCU/00-INDEX2
-rw-r--r--Documentation/arm/00-INDEX14
-rw-r--r--Documentation/blackfin/00-INDEX6
-rw-r--r--Documentation/block/00-INDEX2
-rw-r--r--Documentation/device-mapper/cache.txt11
-rw-r--r--Documentation/device-mapper/thin-provisioning.txt34
-rw-r--r--Documentation/devicetree/00-INDEX2
-rw-r--r--Documentation/devicetree/bindings/arm/omap/omap.txt2
-rw-r--r--Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt4
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt16
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt18
-rw-r--r--Documentation/devicetree/bindings/mmc/atmel-hsmci.txt5
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt5
-rw-r--r--Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt5
-rw-r--r--Documentation/devicetree/bindings/net/micrel-ks8851.txt1
-rw-r--r--Documentation/devicetree/bindings/net/opencores-ethoc.txt22
-rw-r--r--Documentation/devicetree/bindings/net/sti-dwmac.txt58
-rw-r--r--Documentation/devicetree/bindings/pinctrl/brcm,bcm11351-pinctrl.txt (renamed from Documentation/devicetree/bindings/pinctrl/brcm,capri-pinctrl.txt)8
-rw-r--r--Documentation/devicetree/bindings/power/bq2415x.txt47
-rw-r--r--Documentation/devicetree/bindings/spi/efm32-spi.txt8
-rw-r--r--Documentation/devicetree/bindings/spi/qcom,spi-qup.txt85
-rw-r--r--Documentation/devicetree/bindings/spi/sh-hspi.txt28
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt42
-rw-r--r--Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/spi-rspi.txt61
-rw-r--r--Documentation/devicetree/bindings/spi/spi_atmel.txt5
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt6
-rw-r--r--Documentation/dvb/contributors.txt2
-rw-r--r--Documentation/fb/00-INDEX6
-rw-r--r--Documentation/filesystems/00-INDEX2
-rw-r--r--Documentation/filesystems/nfs/00-INDEX4
-rw-r--r--Documentation/i2c/instantiating-devices41
-rw-r--r--Documentation/ide/00-INDEX2
-rw-r--r--Documentation/kernel-parameters.txt8
-rw-r--r--Documentation/laptops/00-INDEX6
-rw-r--r--Documentation/leds/00-INDEX8
-rw-r--r--Documentation/m68k/00-INDEX2
-rw-r--r--Documentation/networking/00-INDEX30
-rw-r--r--Documentation/networking/3c505.txt45
-rw-r--r--Documentation/networking/can.txt6
-rw-r--r--Documentation/networking/netlink_mmap.txt4
-rw-r--r--Documentation/networking/packet_mmap.txt2
-rw-r--r--Documentation/networking/timestamping.txt52
-rw-r--r--Documentation/phy.txt26
-rw-r--r--Documentation/power/00-INDEX6
-rw-r--r--Documentation/ptp/testptp.c11
-rw-r--r--Documentation/s390/00-INDEX8
-rw-r--r--Documentation/scheduler/00-INDEX2
-rw-r--r--Documentation/scsi/00-INDEX16
-rw-r--r--Documentation/serial/00-INDEX6
-rw-r--r--Documentation/spi/00-INDEX22
-rw-r--r--Documentation/spi/spi-summary17
-rw-r--r--Documentation/spi/spidev6
-rw-r--r--Documentation/spi/spidev_fdx.c8
-rw-r--r--Documentation/spi/spidev_test.c45
-rw-r--r--Documentation/timers/00-INDEX2
-rw-r--r--Documentation/virtual/kvm/00-INDEX2
-rw-r--r--Documentation/vm/00-INDEX4
-rw-r--r--Documentation/w1/masters/00-INDEX4
-rw-r--r--Documentation/w1/slaves/00-INDEX2
-rw-r--r--Documentation/x86/00-INDEX18
-rw-r--r--Documentation/zh_CN/arm64/booting.txt65
-rw-r--r--Documentation/zh_CN/arm64/memory.txt46
-rw-r--r--Documentation/zh_CN/arm64/tagged-pointers.txt52
-rw-r--r--MAINTAINERS166
-rw-r--r--Makefile10
-rw-r--r--arch/arc/mm/cache_arc700.c4
-rw-r--r--arch/arm/Kconfig3
-rw-r--r--arch/arm/boot/compressed/.gitignore1
-rw-r--r--arch/arm/boot/dts/Makefile4
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts11
-rw-r--r--arch/arm/boot/dts/armada-xp-mv78260.dtsi3
-rw-r--r--arch/arm/boot/dts/at91-sama5d3_xplained.dts229
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9n12ek.dts4
-rw-r--r--arch/arm/boot/dts/bcm11351.dtsi2
-rw-r--r--arch/arm/boot/dts/dove.dtsi11
-rw-r--r--arch/arm/boot/dts/imx6dl-hummingboard.dts10
-rw-r--r--arch/arm/boot/dts/imx6qdl-cubox-i.dtsi10
-rw-r--r--arch/arm/boot/dts/keystone-clocks.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-gta04.dts8
-rw-r--r--arch/arm/boot/dts/omap3-igep0020.dts2
-rw-r--r--arch/arm/boot/dts/omap3-igep0030.dts2
-rw-r--r--arch/arm/boot/dts/omap3-n9.dts2
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts4
-rw-r--r--arch/arm/boot/dts/omap3-n950.dts2
-rw-r--r--arch/arm/boot/dts/omap3-overo-storm-tobi.dts22
-rw-r--r--arch/arm/boot/dts/omap3-overo-tobi-common.dtsi (renamed from arch/arm/boot/dts/omap3-tobi.dts)3
-rw-r--r--arch/arm/boot/dts/omap3-overo-tobi.dts22
-rw-r--r--arch/arm/boot/dts/omap3-overo.dtsi3
-rw-r--r--arch/arm/boot/dts/sama5d3.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d36.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-href.dtsi1
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi6
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi6
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi2
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi16
-rw-r--r--arch/arm/boot/dts/tegra114.dtsi4
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi4
-rw-r--r--arch/arm/boot/dts/tegra30-cardhu.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi4
-rw-r--r--arch/arm/boot/dts/testcases/tests.dtsi2
-rw-r--r--arch/arm/boot/dts/versatile-pb.dts4
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/tegra_defconfig3
-rw-r--r--arch/arm/include/asm/cacheflush.h1
-rw-r--r--arch/arm/include/asm/memory.h9
-rw-r--r--arch/arm/include/asm/pgtable-3level.h15
-rw-r--r--arch/arm/include/asm/spinlock.h15
-rw-r--r--arch/arm/kernel/head-common.S12
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/setup.c2
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/arm/kvm/interrupts.S11
-rw-r--r--arch/arm/mach-hisi/Kconfig2
-rw-r--r--arch/arm/mach-imx/Makefile2
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c3
-rw-r--r--arch/arm/mach-imx/clk-imx6sl.c3
-rw-r--r--arch/arm/mach-imx/common.h4
-rw-r--r--arch/arm/mach-imx/pm-imx6q.c2
-rw-r--r--arch/arm/mach-moxart/Kconfig1
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c1
-rw-r--r--arch/arm/mach-omap2/Kconfig10
-rw-r--r--arch/arm/mach-omap2/cclock3xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/cpuidle44xx.c8
-rw-r--r--arch/arm/mach-omap2/dpll3xxx.c92
-rw-r--r--arch/arm/mach-omap2/gpmc.c4
-rw-r--r--arch/arm/mach-omap2/io.c9
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c20
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c9
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c21
-rw-r--r--arch/arm/mach-omap2/prminst44xx.c4
-rw-r--r--arch/arm/mach-pxa/am300epd.c1
-rw-r--r--arch/arm/mach-pxa/include/mach/balloon3.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/corgi.h1
-rw-r--r--arch/arm/mach-pxa/include/mach/csb726.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/gumstix.h1
-rw-r--r--arch/arm/mach-pxa/include/mach/idp.h1
-rw-r--r--arch/arm/mach-pxa/include/mach/palmld.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/palmt5.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/palmtc.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/palmtx.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/pcm027.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/pcm990_baseboard.h1
-rw-r--r--arch/arm/mach-pxa/include/mach/poodle.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/spitz.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/tosa.h2
-rw-r--r--arch/arm/mach-pxa/include/mach/trizeps4.h2
-rw-r--r--arch/arm/mach-pxa/mioa701.c9
-rw-r--r--arch/arm/mach-sa1100/include/mach/collie.h2
-rw-r--r--arch/arm/mach-shmobile/Kconfig2
-rw-r--r--arch/arm/mach-tegra/pm.c1
-rw-r--r--arch/arm/mach-tegra/tegra.c10
-rw-r--r--arch/arm/mach-zynq/common.c14
-rw-r--r--arch/arm/mm/dma-mapping.c2
-rw-r--r--arch/arm/mm/dump.c3
-rw-r--r--arch/arm/mm/mm.h1
-rw-r--r--arch/arm/mm/mmu.c7
-rw-r--r--arch/arm/mm/proc-v6.S3
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/configs/defconfig18
-rw-r--r--arch/arm64/include/asm/atomic.h53
-rw-r--r--arch/arm64/include/asm/barrier.h2
-rw-r--r--arch/arm64/include/asm/cacheflush.h1
-rw-r--r--arch/arm64/include/asm/cmpxchg.h17
-rw-r--r--arch/arm64/include/asm/esr.h2
-rw-r--r--arch/arm64/include/asm/futex.h10
-rw-r--r--arch/arm64/include/asm/kvm_arm.h2
-rw-r--r--arch/arm64/include/asm/percpu.h8
-rw-r--r--arch/arm64/include/asm/pgtable.h10
-rw-r--r--arch/arm64/include/asm/spinlock.h10
-rw-r--r--arch/arm64/include/asm/unistd32.h5
-rw-r--r--arch/arm64/include/uapi/asm/kvm.h9
-rw-r--r--arch/arm64/kernel/kuser32.S6
-rw-r--r--arch/arm64/kernel/stacktrace.c6
-rw-r--r--arch/arm64/kernel/vdso.c4
-rw-r--r--arch/arm64/kernel/vdso/Makefile2
-rw-r--r--arch/arm64/kernel/vdso/gettimeofday.S7
-rw-r--r--arch/arm64/kvm/hyp.S27
-rw-r--r--arch/arm64/lib/bitops.S3
-rw-r--r--arch/arm64/mm/dma-mapping.c1
-rw-r--r--arch/arm64/mm/mmu.c12
-rw-r--r--arch/arm64/mm/pgd.c11
-rw-r--r--arch/avr32/Makefile2
-rw-r--r--arch/avr32/boards/mimc200/fram.c1
-rw-r--r--arch/avr32/include/asm/Kbuild1
-rw-r--r--arch/avr32/include/asm/io.h2
-rw-r--r--arch/c6x/include/asm/cache.h1
-rw-r--r--arch/cris/include/asm/bitops.h2
-rw-r--r--arch/ia64/include/asm/unistd.h2
-rw-r--r--arch/ia64/include/uapi/asm/unistd.h2
-rw-r--r--arch/ia64/kernel/entry.S2
-rw-r--r--arch/ia64/kernel/uncached.c2
-rw-r--r--arch/m68k/include/asm/Kbuild6
-rw-r--r--arch/m68k/include/asm/barrier.h8
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h2
-rw-r--r--arch/m68k/kernel/syscalltable.S2
-rw-r--r--arch/microblaze/include/asm/delay.h2
-rw-r--r--arch/microblaze/include/asm/io.h6
-rw-r--r--arch/microblaze/kernel/head.S2
-rw-r--r--arch/mips/Kconfig24
-rw-r--r--arch/mips/alchemy/board-gpr.c4
-rw-r--r--arch/mips/alchemy/board-mtx1.c4
-rw-r--r--arch/mips/alchemy/devboards/db1000.c7
-rw-r--r--arch/mips/bcm47xx/board.c1
-rw-r--r--arch/mips/bcm47xx/nvram.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c22
-rw-r--r--arch/mips/include/asm/asmmacro.h15
-rw-r--r--arch/mips/include/asm/fpu.h4
-rw-r--r--arch/mips/include/asm/ftrace.h20
-rw-r--r--arch/mips/include/asm/syscall.h10
-rw-r--r--arch/mips/include/uapi/asm/inst.h4
-rw-r--r--arch/mips/include/uapi/asm/unistd.h18
-rw-r--r--arch/mips/kernel/ftrace.c5
-rw-r--r--arch/mips/kernel/r4k_fpu.S16
-rw-r--r--arch/mips/kernel/rtlx-cmp.c3
-rw-r--r--arch/mips/kernel/rtlx-mt.c3
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/math-emu/cp1emu.c6
-rw-r--r--arch/mips/mti-malta/malta-amon.c2
-rw-r--r--arch/mips/mti-malta/malta-int.c4
-rw-r--r--arch/mips/pci/msi-octeon.c1
-rw-r--r--arch/parisc/hpux/fs.c15
-rw-r--r--arch/parisc/include/asm/page.h11
-rw-r--r--arch/parisc/include/asm/spinlock.h4
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h4
-rw-r--r--arch/parisc/kernel/cache.c64
-rw-r--r--arch/parisc/kernel/syscall_table.S1
-rw-r--r--arch/powerpc/include/asm/compat.h5
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h1
-rw-r--r--arch/powerpc/include/asm/eeh.h21
-rw-r--r--arch/powerpc/include/asm/hugetlb.h2
-rw-r--r--arch/powerpc/include/asm/iommu.h1
-rw-r--r--arch/powerpc/include/asm/opal.h4
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h26
-rw-r--r--arch/powerpc/include/asm/pgtable.h22
-rw-r--r--arch/powerpc/include/asm/ptrace.h16
-rw-r--r--arch/powerpc/include/asm/sections.h12
-rw-r--r--arch/powerpc/include/asm/vdso.h6
-rw-r--r--arch/powerpc/kernel/crash_dump.c8
-rw-r--r--arch/powerpc/kernel/dma.c10
-rw-r--r--arch/powerpc/kernel/eeh.c32
-rw-r--r--arch/powerpc/kernel/eeh_driver.c8
-rw-r--r--arch/powerpc/kernel/ftrace.c1
-rw-r--r--arch/powerpc/kernel/iommu.c12
-rw-r--r--arch/powerpc/kernel/irq.c5
-rw-r--r--arch/powerpc/kernel/machine_kexec.c14
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c6
-rw-r--r--arch/powerpc/kernel/misc_32.S5
-rw-r--r--arch/powerpc/kernel/process.c9
-rw-r--r--arch/powerpc/kernel/reloc_64.S5
-rw-r--r--arch/powerpc/kernel/setup_32.c5
-rw-r--r--arch/powerpc/kernel/signal_64.c4
-rw-r--r--arch/powerpc/kernel/vdso32/vdso32_wrapper.S2
-rw-r--r--arch/powerpc/kernel/vdso64/vdso64_wrapper.S2
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S71
-rw-r--r--arch/powerpc/mm/hash_utils_64.c14
-rw-r--r--arch/powerpc/mm/pgtable_64.c12
-rw-r--r--arch/powerpc/mm/subpage-prot.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c5
-rw-r--r--arch/powerpc/perf/power8-pmu.c144
-rw-r--r--arch/powerpc/platforms/cell/ras.c3
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c128
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-xscom.c21
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c84
-rw-r--r--arch/powerpc/platforms/powernv/pci.c230
-rw-r--r--arch/powerpc/platforms/powernv/pci.h6
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h8
-rw-r--r--arch/powerpc/platforms/powernv/setup.c9
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c22
-rw-r--r--arch/powerpc/platforms/pseries/pci.c22
-rw-r--r--arch/powerpc/platforms/pseries/setup.c3
-rw-r--r--arch/powerpc/sysdev/mpic.c38
-rw-r--r--arch/powerpc/xmon/xmon.c24
-rw-r--r--arch/s390/appldata/appldata_base.c1
-rw-r--r--arch/s390/crypto/aes_s390.c65
-rw-r--r--arch/s390/crypto/des_s390.c95
-rw-r--r--arch/s390/kernel/compat_wrapper.S2
-rw-r--r--arch/s390/kernel/head64.S7
-rw-r--r--arch/s390/mm/page-states.c10
-rw-r--r--arch/s390/pci/pci_dma.c8
-rw-r--r--arch/sh/include/cpu-sh2/cpu/cache.h2
-rw-r--r--arch/sh/include/cpu-sh2a/cpu/cache.h4
-rw-r--r--arch/sh/include/cpu-sh3/cpu/cache.h2
-rw-r--r--arch/sh/include/cpu-sh4/cpu/cache.h2
-rw-r--r--arch/sh/kernel/cpu/init.c4
-rw-r--r--arch/sh/mm/cache-debugfs.c2
-rw-r--r--arch/sh/mm/cache-sh2.c4
-rw-r--r--arch/sh/mm/cache-sh2a.c6
-rw-r--r--arch/sh/mm/cache-sh4.c4
-rw-r--r--arch/sh/mm/cache-shx3.c4
-rw-r--r--arch/sh/mm/cache.c4
-rw-r--r--arch/sparc/Kconfig2
-rw-r--r--arch/sparc/kernel/process_64.c4
-rw-r--r--arch/sparc/kernel/syscalls.S4
-rw-r--r--arch/sparc/mm/srmmu.c2
-rw-r--r--arch/sparc/mm/tsb.c2
-rw-r--r--arch/x86/Kconfig7
-rw-r--r--arch/x86/Kconfig.cpu4
-rw-r--r--arch/x86/Kconfig.debug1
-rw-r--r--arch/x86/boot/compressed/aslr.c9
-rw-r--r--arch/x86/include/asm/amd_nb.h2
-rw-r--r--arch/x86/include/asm/barrier.h8
-rw-r--r--arch/x86/include/asm/efi.h3
-rw-r--r--arch/x86/include/asm/io.h2
-rw-r--r--arch/x86/include/asm/pgtable.h14
-rw-r--r--arch/x86/include/asm/spinlock.h5
-rw-r--r--arch/x86/include/asm/tlbflush.h6
-rw-r--r--arch/x86/include/asm/tsc.h2
-rw-r--r--arch/x86/include/asm/xen/page.h5
-rw-r--r--arch/x86/kernel/amd_nb.c2
-rw-r--r--arch/x86/kernel/aperture_64.c20
-rw-r--r--arch/x86/kernel/cpu/amd.c5
-rw-r--r--arch/x86/kernel/cpu/centaur.c272
-rw-r--r--arch/x86/kernel/cpu/common.c7
-rw-r--r--arch/x86/kernel/cpu/intel.c10
-rw-r--r--arch/x86/kernel/cpu/microcode/amd_early.c43
-rw-r--r--arch/x86/kernel/cpu/mtrr/generic.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c11
-rw-r--r--arch/x86/kernel/cpu/perf_event.h1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c11
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c13
-rw-r--r--arch/x86/kernel/cpu/perf_event_p6.c48
-rw-r--r--arch/x86/kernel/ftrace.c83
-rw-r--r--arch/x86/kernel/head_32.S7
-rw-r--r--arch/x86/kernel/head_64.S6
-rw-r--r--arch/x86/kernel/i387.c15
-rw-r--r--arch/x86/kernel/irq.c9
-rw-r--r--arch/x86/kernel/machine_kexec_64.c2
-rw-r--r--arch/x86/kernel/pci-dma.c4
-rw-r--r--arch/x86/kernel/quirks.c39
-rw-r--r--arch/x86/kernel/setup.c10
-rw-r--r--arch/x86/kernel/tsc.c11
-rw-r--r--arch/x86/kernel/tsc_msr.c30
-rw-r--r--arch/x86/kvm/mmu.c1
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/mm/fault.c61
-rw-r--r--arch/x86/mm/numa.c21
-rw-r--r--arch/x86/mm/numa_32.c2
-rw-r--r--arch/x86/mm/srat.c16
-rw-r--r--arch/x86/mm/tlb.c52
-rw-r--r--arch/x86/net/bpf_jit.S2
-rw-r--r--arch/x86/platform/efi/efi-bgrt.c12
-rw-r--r--arch/x86/platform/efi/efi.c25
-rw-r--r--arch/x86/platform/efi/efi_32.c6
-rw-r--r--arch/x86/platform/efi/efi_64.c9
-rw-r--r--arch/x86/um/asm/barrier.h4
-rw-r--r--arch/x86/xen/enlighten.c12
-rw-r--r--arch/x86/xen/mmu.c4
-rw-r--r--arch/x86/xen/p2m.c17
-rw-r--r--arch/xtensa/Kconfig3
-rw-r--r--arch/xtensa/boot/dts/xtfpga.dtsi12
-rw-r--r--arch/xtensa/include/asm/io.h2
-rw-r--r--arch/xtensa/include/asm/traps.h44
-rw-r--r--arch/xtensa/include/asm/vectors.h2
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h7
-rw-r--r--arch/xtensa/kernel/entry.S449
-rw-r--r--arch/xtensa/kernel/setup.c2
-rw-r--r--arch/xtensa/kernel/time.c1
-rw-r--r--arch/xtensa/kernel/vectors.S2
-rw-r--r--arch/xtensa/kernel/xtensa_ksyms.c2
-rw-r--r--arch/xtensa/mm/init.c13
-rw-r--r--arch/xtensa/mm/mmu.c2
-rw-r--r--arch/xtensa/platforms/xtfpga/setup.c7
-rw-r--r--arch/xtensa/variants/fsf/include/variant/tie.h9
-rw-r--r--block/blk-core.c19
-rw-r--r--block/blk-exec.c2
-rw-r--r--block/blk-flush.c108
-rw-r--r--block/blk-lib.c8
-rw-r--r--block/blk-merge.c91
-rw-r--r--block/blk-mq-cpu.c14
-rw-r--r--block/blk-mq-tag.c2
-rw-r--r--block/blk-mq.c237
-rw-r--r--block/blk-mq.h5
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/blk-timeout.c2
-rw-r--r--block/blk.h2
-rw-r--r--drivers/acpi/ac.c2
-rw-r--r--drivers/acpi/battery.c4
-rw-r--r--drivers/acpi/blacklist.c58
-rw-r--r--drivers/acpi/button.c2
-rw-r--r--drivers/acpi/container.c5
-rw-r--r--drivers/acpi/dock.c13
-rw-r--r--drivers/acpi/ec.c64
-rw-r--r--drivers/acpi/fan.c3
-rw-r--r--drivers/acpi/pci_irq.c1
-rw-r--r--drivers/acpi/proc.c2
-rw-r--r--drivers/acpi/processor_throttling.c69
-rw-r--r--drivers/acpi/resource.c10
-rw-r--r--drivers/acpi/sbs.c4
-rw-r--r--drivers/acpi/scan.c6
-rw-r--r--drivers/acpi/sleep.c32
-rw-r--r--drivers/acpi/thermal.c2
-rw-r--r--drivers/acpi/utils.c4
-rw-r--r--drivers/acpi/video.c147
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/ata/Kconfig1
-rw-r--r--drivers/ata/ahci.c18
-rw-r--r--drivers/ata/libata-core.c3
-rw-r--r--drivers/ata/libata-pmp.c7
-rw-r--r--drivers/ata/pata_imx.c8
-rw-r--r--drivers/ata/sata_mv.c16
-rw-r--r--drivers/ata/sata_sil.c1
-rw-r--r--drivers/base/component.c8
-rw-r--r--drivers/base/dma-buf.c25
-rw-r--r--drivers/base/firmware_class.c1
-rw-r--r--drivers/base/power/Makefile3
-rw-r--r--drivers/base/power/runtime.c162
-rw-r--r--drivers/block/aoe/aoecmd.c4
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c2
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h2
-rw-r--r--drivers/block/null_blk.c97
-rw-r--r--drivers/block/nvme-core.c610
-rw-r--r--drivers/block/nvme-scsi.c147
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkback/blkback.c81
-rw-r--r--drivers/block/xen-blkback/common.h5
-rw-r--r--drivers/block/xen-blkback/xenbus.c14
-rw-r--r--drivers/block/xen-blkfront.c11
-rw-r--r--drivers/block/zram/zram_drv.c2
-rw-r--r--drivers/char/Kconfig1
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/char/virtio_console.c9
-rw-r--r--drivers/clk/at91/clk-master.c2
-rw-r--r--drivers/clk/clk-nomadik.c3
-rw-r--r--drivers/clk/clk.c13
-rw-r--r--drivers/clk/keystone/gate.c1
-rw-r--r--drivers/clk/mvebu/armada-370.c21
-rw-r--r--drivers/clk/mvebu/armada-xp.c20
-rw-r--r--drivers/clk/mvebu/dove.c19
-rw-r--r--drivers/clk/mvebu/kirkwood.c34
-rw-r--r--drivers/clk/shmobile/clk-rcar-gen2.c48
-rw-r--r--drivers/clk/tegra/clk-divider.c2
-rw-r--r--drivers/clk/tegra/clk-id.h4
-rw-r--r--drivers/clk/tegra/clk-tegra-periph.c10
-rw-r--r--drivers/clk/tegra/clk-tegra-super-gen4.c2
-rw-r--r--drivers/clk/tegra/clk-tegra114.c8
-rw-r--r--drivers/clk/tegra/clk-tegra124.c48
-rw-r--r--drivers/clk/tegra/clk-tegra20.c2
-rw-r--r--drivers/clocksource/bcm_kona_timer.c54
-rw-r--r--drivers/clocksource/vf_pit_timer.c2
-rw-r--r--drivers/cpufreq/cpufreq.c58
-rw-r--r--drivers/cpufreq/intel_pstate.c61
-rw-r--r--drivers/cpufreq/powernow-k8.c10
-rw-r--r--drivers/crypto/nx/nx-842.c29
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/imx-sdma.c1
-rw-r--r--drivers/dma/ioat/dma.c52
-rw-r--r--drivers/dma/ioat/dma.h1
-rw-r--r--drivers/dma/ioat/dma_v2.c11
-rw-r--r--drivers/dma/ioat/dma_v3.c3
-rw-r--r--drivers/dma/mv_xor.c24
-rw-r--r--drivers/dma/ste_dma40.c4
-rw-r--r--drivers/edac/edac_mc.c13
-rw-r--r--drivers/edac/edac_mc_sysfs.c10
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/edac/i7300_edac.c38
-rw-r--r--drivers/edac/i7core_edac.c9
-rw-r--r--drivers/extcon/extcon-arizona.c12
-rw-r--r--drivers/firewire/core-device.c22
-rw-r--r--drivers/firewire/net.c6
-rw-r--r--drivers/firewire/ohci.c15
-rw-r--r--drivers/firewire/sbp2.c17
-rw-r--r--drivers/fmc/fmc-write-eeprom.c2
-rw-r--r--drivers/gpio/Kconfig1
-rw-r--r--drivers/gpio/gpio-bcm-kona.c4
-rw-r--r--drivers/gpio/gpio-clps711x.c1
-rw-r--r--drivers/gpio/gpio-intel-mid.c4
-rw-r--r--drivers/gpio/gpio-xtensa.c16
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c10
-rw-r--r--drivers/gpu/drm/ast/ast_fb.c2
-rw-r--r--drivers/gpu/drm/bochs/Kconfig1
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c2
-rw-r--r--drivers/gpu/drm/drm_ioctl.c12
-rw-r--r--drivers/gpu/drm/drm_pci.c2
-rw-r--r--drivers/gpu/drm/exynos/Kconfig4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c18
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c66
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c19
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c23
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c26
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c5
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c74
-rw-r--r--drivers/gpu/drm/i915/intel_ddi.c1
-rw-r--r--drivers/gpu/drm/i915/intel_display.c22
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c43
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c6
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c7
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c9
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c6
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c21
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_fb.c2
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c179
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c9
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c3
-rw-r--r--drivers/gpu/drm/nouveau/Makefile1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nv40.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nv50.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/mc.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c45
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c31
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c9
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c32
-rw-r--r--drivers/gpu/drm/radeon/btcd.h4
-rw-r--r--drivers/gpu/drm/radeon/cik.c10
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c14
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c15
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c7
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c26
-rw-r--r--drivers/gpu/drm/radeon/evergreen_smc.h2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ni.c3
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c10
-rw-r--r--drivers/gpu/drm/radeon/r100.c2
-rw-r--r--drivers/gpu/drm/radeon/r300.c2
-rw-r--r--drivers/gpu/drm/radeon/r420.c2
-rw-r--r--drivers/gpu/drm/radeon/r520.c2
-rw-r--r--drivers/gpu/drm/radeon/r600.c7
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c14
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c18
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c15
-rw-r--r--drivers/gpu/drm/radeon/radeon.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_semaphore.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r6001
-rw-r--r--drivers/gpu/drm/radeon/rs400.c2
-rw-r--r--drivers/gpu/drm/radeon/rs600.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c2
-rw-r--r--drivers/gpu/drm/radeon/rv515.c2
-rw-r--r--drivers/gpu/drm/radeon/rv770.c5
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c14
-rw-r--r--drivers/gpu/drm/radeon/si.c7
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c5
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c1
-rw-r--r--drivers/gpu/drm/tegra/drm.c2
-rw-r--r--drivers/gpu/drm/tegra/rgb.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c1
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c8
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_object.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_reg.h153
-rw-r--r--drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h11
-rw-r--r--drivers/gpu/drm/vmwgfx/svga_reg.h9
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_context.c141
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h38
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c337
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c96
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_mob.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_shader.c469
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c24
-rw-r--r--drivers/gpu/host1x/job.c2
-rw-r--r--drivers/hid/hid-apple.c3
-rw-r--r--drivers/hid/hid-core.c3
-rw-r--r--drivers/hid/hid-hyperv.c11
-rw-r--r--drivers/hid/hid-ids.h8
-rw-r--r--drivers/hid/hid-input.c2
-rw-r--r--drivers/hid/hid-lg4ff.c2
-rw-r--r--drivers/hid/hid-microsoft.c4
-rw-r--r--drivers/hid/hid-multitouch.c5
-rw-r--r--drivers/hid/hid-sensor-hub.c3
-rw-r--r--drivers/hid/hid-sony.c27
-rw-r--r--drivers/hid/hidraw.c4
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c2
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hv/connection.c13
-rw-r--r--drivers/hwmon/da9055-hwmon.c4
-rw-r--r--drivers/hwmon/max1668.c2
-rw-r--r--drivers/hwmon/ntc_thermistor.c6
-rw-r--r--drivers/hwmon/pmbus/pmbus_core.c68
-rw-r--r--drivers/i2c/busses/Kconfig2
-rw-r--r--drivers/i2c/busses/i2c-mv64xxx.c33
-rw-r--r--drivers/iio/accel/bma180.c16
-rw-r--r--drivers/iio/adc/max1363.c2
-rw-r--r--drivers/iio/gyro/Kconfig2
-rw-r--r--drivers/iio/gyro/st_gyro.h1
-rw-r--r--drivers/iio/gyro/st_gyro_core.c9
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c1
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c1
-rw-r--r--drivers/iio/imu/adis16400.h1
-rw-r--r--drivers/iio/imu/adis16400_core.c10
-rw-r--r--drivers/iio/light/cm32181.c16
-rw-r--r--drivers/iio/light/cm36651.c45
-rw-r--r--drivers/iio/light/tsl2563.c16
-rw-r--r--drivers/iio/magnetometer/ak8975.c16
-rw-r--r--drivers/iio/magnetometer/mag3110.c8
-rw-r--r--drivers/infiniband/hw/amso1100/c2.c4
-rw-r--r--drivers/infiniband/hw/amso1100/c2_rnic.c3
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c1
-rw-r--r--drivers/infiniband/hw/mlx4/main.c189
-rw-r--r--drivers/infiniband/hw/mlx5/Kconfig2
-rw-r--r--drivers/infiniband/hw/mlx5/main.c26
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c18
-rw-r--r--drivers/infiniband/hw/mlx5/user.h7
-rw-r--r--drivers/infiniband/hw/nes/nes.c5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c4
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c5
-rw-r--r--drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c9
-rw-r--r--drivers/infiniband/ulp/iser/iser_initiator.c3
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c10
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c181
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.h7
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c14
-rw-r--r--drivers/input/misc/arizona-haptics.c19
-rw-r--r--drivers/iommu/arm-smmu.c105
-rw-r--r--drivers/iommu/omap-iommu-debug.c4
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c4
-rw-r--r--drivers/irqchip/irq-metag-ext.c2
-rw-r--r--drivers/irqchip/irq-metag.c2
-rw-r--r--drivers/irqchip/irq-orion.c22
-rw-r--r--drivers/irqchip/irq-zevio.c127
-rw-r--r--drivers/isdn/capi/Kconfig18
-rw-r--r--drivers/isdn/hisax/q931.c2
-rw-r--r--drivers/md/Kconfig10
-rw-r--r--drivers/md/bcache/bcache.h4
-rw-r--r--drivers/md/bcache/bset.c7
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/extents.c2
-rw-r--r--drivers/md/bcache/request.c6
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/dm-cache-policy-mq.c4
-rw-r--r--drivers/md/dm-cache-target.c24
-rw-r--r--drivers/md/dm-io.c23
-rw-r--r--drivers/md/dm-mpath.c7
-rw-r--r--drivers/md/dm-raid1.c3
-rw-r--r--drivers/md/dm-snap-persistent.c3
-rw-r--r--drivers/md/dm-thin-metadata.c58
-rw-r--r--drivers/md/dm-thin-metadata.h21
-rw-r--r--drivers/md/dm-thin.c343
-rw-r--r--drivers/md/persistent-data/Kconfig10
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c115
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.h11
-rw-r--r--drivers/md/raid1.c13
-rw-r--r--drivers/md/raid5.c90
-rw-r--r--drivers/media/dvb-frontends/cx24117.c10
-rw-r--r--drivers/media/dvb-frontends/nxt200x.c2
-rw-r--r--drivers/media/i2c/adv7842.c2
-rw-r--r--drivers/media/i2c/s5k5baf.c30
-rw-r--r--drivers/media/pci/bt8xx/bttv-cards.c2
-rw-r--r--drivers/media/pci/bt8xx/bttv-gpio.c2
-rw-r--r--drivers/media/pci/saa7134/saa7134-cards.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c5
-rw-r--r--drivers/media/platform/exynos4-is/fimc-lite.c7
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c8
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h2
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h4
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.h2
-rw-r--r--drivers/media/usb/hdpvr/hdpvr-core.c4
-rw-r--r--drivers/media/v4l2-core/v4l2-dv-timings.c1
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c12
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-sg.c10
-rw-r--r--drivers/media/v4l2-core/videobuf-vmalloc.c10
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c5
-rw-r--r--drivers/message/i2o/i2o_config.c4
-rw-r--r--drivers/mfd/da9055-i2c.c12
-rw-r--r--drivers/mfd/max14577.c2
-rw-r--r--drivers/mfd/max8997.c6
-rw-r--r--drivers/mfd/max8998.c6
-rw-r--r--drivers/mfd/sec-core.c2
-rw-r--r--drivers/mfd/tps65217.c4
-rw-r--r--drivers/mfd/wm8994-core.c2
-rw-r--r--drivers/misc/genwqe/card_dev.c1
-rw-r--r--drivers/misc/mei/client.c15
-rw-r--r--drivers/misc/mic/host/mic_virtio.c3
-rw-r--r--drivers/misc/sgi-gru/grukdump.c11
-rw-r--r--drivers/misc/sgi-xp/xpc_uv.c2
-rw-r--r--drivers/mmc/card/queue.c2
-rw-r--r--drivers/mtd/nand/nand_base.c2
-rw-r--r--drivers/mtd/nand/omap2.c61
-rw-r--r--drivers/mtd/ubi/fastmap.c8
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/bonding/bond_3ad.c22
-rw-r--r--drivers/net/bonding/bond_3ad.h1
-rw-r--r--drivers/net/bonding/bond_alb.c2
-rw-r--r--drivers/net/bonding/bond_main.c159
-rw-r--r--drivers/net/bonding/bond_options.c3
-rw-r--r--drivers/net/bonding/bonding.h47
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/dev.c15
-rw-r--r--drivers/net/can/flexcan.c179
-rw-r--r--drivers/net/can/janz-ican3.c20
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2
-rw-r--r--drivers/net/can/vcan.c9
-rw-r--r--drivers/net/ethernet/3com/3c59x.c2
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c3
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c15
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/ethernet/broadcom/b44.c14
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c39
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c13
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c6
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c111
-rw-r--r--drivers/net/ethernet/broadcom/cnic.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h2
-rw-r--r--drivers/net/ethernet/broadcom/cnic_if.h16
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c20
-rw-r--r--drivers/net/ethernet/broadcom/tg3.h6
-rw-r--r--drivers/net/ethernet/brocade/bna/bfa_ioc.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c40
-rw-r--r--drivers/net/ethernet/cadence/macb.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/tulip_core.c1
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c84
-rw-r--r--drivers/net/ethernet/ethoc.c138
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c31
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c25
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.h1
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/ethernet/lantiq_etop.c2
-rw-r--r--drivers/net/ethernet/marvell/Kconfig6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/Kconfig2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c30
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c9
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/sfc/ptp.c7
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig11
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Makefile1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/chain_mode.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/common.h20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c330
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/ring_mode.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c93
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c5
-rw-r--r--drivers/net/ethernet/ti/cpsw.c35
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c4
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c53
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c8
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c13
-rw-r--r--drivers/net/hyperv/netvsc_drv.c53
-rw-r--r--drivers/net/hyperv/rndis_filter.c21
-rw-r--r--drivers/net/ieee802154/at86rf230.c11
-rw-r--r--drivers/net/irda/Kconfig7
-rw-r--r--drivers/net/irda/Makefile1
-rw-r--r--drivers/net/irda/ep7211-sir.c70
-rw-r--r--drivers/net/irda/irtty-sir.c1
-rw-r--r--drivers/net/macvlan.c12
-rw-r--r--drivers/net/phy/dp83640.c32
-rw-r--r--drivers/net/phy/mdio-sun4i.c3
-rw-r--r--drivers/net/phy/phy.c11
-rw-r--r--drivers/net/phy/phy_device.c57
-rw-r--r--drivers/net/team/team.c2
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/usb/Kconfig15
-rw-r--r--drivers/net/usb/Makefile3
-rw-r--r--drivers/net/usb/asix_devices.c3
-rw-r--r--drivers/net/usb/ax88179_178a.c46
-rw-r--r--drivers/net/usb/cdc_ether.c7
-rw-r--r--drivers/net/usb/cdc_ncm.c48
-rw-r--r--drivers/net/usb/gl620a.c4
-rw-r--r--drivers/net/usb/hso.c32
-rw-r--r--drivers/net/usb/mcs7830.c5
-rw-r--r--drivers/net/usb/net1080.c4
-rw-r--r--drivers/net/usb/qmi_wwan.c11
-rw-r--r--drivers/net/usb/r8152.c32
-rw-r--r--drivers/net/usb/r815x.c248
-rw-r--r--drivers/net/usb/rndis_host.c4
-rw-r--r--drivers/net/usb/smsc75xx.c4
-rw-r--r--drivers/net/usb/smsc95xx.c4
-rw-r--r--drivers/net/usb/sr9800.c874
-rw-r--r--drivers/net/usb/sr9800.h202
-rw-r--r--drivers/net/usb/usbnet.c25
-rw-r--r--drivers/net/veth.c3
-rw-r--r--drivers/net/virtio_net.c3
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c19
-rw-r--r--drivers/net/vxlan.c133
-rw-r--r--drivers/net/wan/dlci.c5
-rw-r--r--drivers/net/wireless/ath/ar5523/ar5523.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h4
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c63
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c70
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c15
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c18
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_proc.c2
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c22
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/sta.c1
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/tx.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-modparams.h11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c7
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c24
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c91
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c8
-rw-r--r--drivers/net/wireless/libertas/cfg.c2
-rw-r--r--drivers/net/wireless/mwifiex/11ac.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n.c3
-rw-r--r--drivers/net/wireless/mwifiex/main.c2
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c34
-rw-r--r--drivers/net/wireless/mwifiex/scan.c8
-rw-r--r--drivers/net/wireless/mwifiex/usb.c12
-rw-r--r--drivers/net/wireless/mwifiex/wmm.c3
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c5
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c11
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c23
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8187.h10
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c18
-rw-r--r--drivers/net/wireless/ti/wl1251/rx.c2
-rw-r--r--drivers/net/xen-netback/common.h6
-rw-r--r--drivers/net/xen-netback/interface.c4
-rw-r--r--drivers/net/xen-netback/netback.c55
-rw-r--r--drivers/net/xen-netfront.c6
-rw-r--r--drivers/of/address.c5
-rw-r--r--drivers/of/base.c128
-rw-r--r--drivers/of/of_mdio.c22
-rw-r--r--drivers/of/selftest.c67
-rw-r--r--drivers/of/testcase-data/testcases.dtsi3
-rw-r--r--drivers/of/testcase-data/tests-interrupts.dtsi (renamed from arch/arm/boot/dts/testcases/tests-interrupts.dtsi)0
-rw-r--r--drivers/of/testcase-data/tests-match.dtsi19
-rw-r--r--drivers/of/testcase-data/tests-phandle.dtsi (renamed from arch/arm/boot/dts/testcases/tests-phandle.dtsi)0
-rw-r--r--drivers/pci/bus.c2
-rw-r--r--drivers/pci/host/pci-mvebu.c11
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c70
-rw-r--r--drivers/pci/msi.c10
-rw-r--r--drivers/pci/pci.c13
-rw-r--r--drivers/phy/Kconfig3
-rw-r--r--drivers/phy/phy-core.c76
-rw-r--r--drivers/phy/phy-exynos-dp-video.c8
-rw-r--r--drivers/phy/phy-exynos-mipi-video.c10
-rw-r--r--drivers/phy/phy-mvebu-sata.c10
-rw-r--r--drivers/phy/phy-omap-usb2.c10
-rw-r--r--drivers/phy/phy-twl4030-usb.c10
-rw-r--r--drivers/pinctrl/Kconfig2
-rw-r--r--drivers/pinctrl/core.c8
-rw-r--r--drivers/pinctrl/pinctrl-at91.c10
-rw-r--r--drivers/pinctrl/pinctrl-capri.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx1-core.c10
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.c6
-rw-r--r--drivers/pinctrl/pinctrl-sunxi.h6
-rw-r--r--drivers/pinctrl/pinctrl-tegra.c2
-rw-r--r--drivers/pinctrl/sh-pfc/pfc-r8a7791.c6
-rw-r--r--drivers/pinctrl/sirf/pinctrl-prima2.c2
-rw-r--r--drivers/pinctrl/sirf/pinctrl-sirf.c4
-rw-r--r--drivers/pinctrl/vt8500/pinctrl-wmt.c15
-rw-r--r--drivers/pnp/pnpacpi/rsparser.c15
-rw-r--r--drivers/power/ds2782_battery.c2
-rw-r--r--drivers/power/isp1704_charger.c2
-rw-r--r--drivers/power/max17040_battery.c5
-rw-r--r--drivers/pwm/pwm-lp3943.c4
-rw-r--r--drivers/rapidio/devices/tsi721.h1
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c27
-rw-r--r--drivers/regulator/ab3100.c4
-rw-r--r--drivers/regulator/core.c59
-rw-r--r--drivers/regulator/da9055-regulator.c4
-rw-r--r--drivers/regulator/da9063-regulator.c4
-rw-r--r--drivers/regulator/max14577.c10
-rw-r--r--drivers/regulator/s2mps11.c1
-rw-r--r--drivers/regulator/s5m8767.c4
-rw-r--r--drivers/rtc/rtc-s3c.c17
-rw-r--r--drivers/s390/cio/chsc.c1
-rw-r--r--drivers/s390/cio/cio.c40
-rw-r--r--drivers/s390/cio/qdio.h14
-rw-r--r--drivers/s390/cio/qdio_main.c2
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c24
-rw-r--r--drivers/s390/net/qeth_core_main.c5
-rw-r--r--drivers/s390/net/qeth_l2_main.c3
-rw-r--r--drivers/s390/net/qeth_l3_main.c3
-rw-r--r--drivers/sbus/char/jsflash.c1
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c16
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c38
-rw-r--r--drivers/scsi/bnx2i/bnx2i_hwi.c52
-rw-r--r--drivers/scsi/bnx2i/bnx2i_iscsi.c23
-rw-r--r--drivers/scsi/isci/host.h5
-rw-r--r--drivers/scsi/isci/port_config.c7
-rw-r--r--drivers/scsi/isci/task.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c46
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c44
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h3
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c158
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h7
-rw-r--r--drivers/scsi/scsi_lib.c2
-rw-r--r--drivers/scsi/storvsc_drv.c3
-rw-r--r--drivers/spi/Kconfig26
-rw-r--r--drivers/spi/Makefile1
-rw-r--r--drivers/spi/spi-altera.c7
-rw-r--r--drivers/spi/spi-ath79.c5
-rw-r--r--drivers/spi/spi-atmel.c51
-rw-r--r--drivers/spi/spi-au1550.c30
-rw-r--r--drivers/spi/spi-bcm2835.c1
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c8
-rw-r--r--drivers/spi/spi-bcm63xx.c6
-rw-r--r--drivers/spi/spi-bfin-sport.c1
-rw-r--r--drivers/spi/spi-bfin-v3.c3
-rw-r--r--drivers/spi/spi-bfin5xx.c8
-rw-r--r--drivers/spi/spi-bitbang.c5
-rw-r--r--drivers/spi/spi-butterfly.c3
-rw-r--r--drivers/spi/spi-clps711x.c227
-rw-r--r--drivers/spi/spi-coldfire-qspi.c118
-rw-r--r--drivers/spi/spi-davinci.c14
-rw-r--r--drivers/spi/spi-dw-mmio.c2
-rw-r--r--drivers/spi/spi-dw.c17
-rw-r--r--drivers/spi/spi-efm32.c46
-rw-r--r--drivers/spi/spi-ep93xx.c21
-rw-r--r--drivers/spi/spi-falcon.c5
-rw-r--r--drivers/spi/spi-fsl-dspi.c100
-rw-r--r--drivers/spi/spi-fsl-espi.c5
-rw-r--r--drivers/spi/spi-fsl-lib.c14
-rw-r--r--drivers/spi/spi-fsl-spi.c30
-rw-r--r--drivers/spi/spi-gpio.c8
-rw-r--r--drivers/spi/spi-imx.c11
-rw-r--r--drivers/spi/spi-mpc512x-psc.c17
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c1
-rw-r--r--drivers/spi/spi-mpc52xx.c17
-rw-r--r--drivers/spi/spi-mxs.c7
-rw-r--r--drivers/spi/spi-nuc900.c30
-rw-r--r--drivers/spi/spi-oc-tiny.c3
-rw-r--r--drivers/spi/spi-octeon.c80
-rw-r--r--drivers/spi/spi-omap-100k.c52
-rw-r--r--drivers/spi/spi-omap-uwire.c34
-rw-r--r--drivers/spi/spi-omap2-mcspi.c65
-rw-r--r--drivers/spi/spi-orion.c80
-rw-r--r--drivers/spi/spi-pl022.c80
-rw-r--r--drivers/spi/spi-ppc4xx.c1
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c1
-rw-r--r--drivers/spi/spi-pxa2xx-pxadma.c1
-rw-r--r--drivers/spi/spi-pxa2xx.c3
-rw-r--r--drivers/spi/spi-qup.c779
-rw-r--r--drivers/spi/spi-rspi.c842
-rw-r--r--drivers/spi/spi-s3c24xx.c19
-rw-r--r--drivers/spi/spi-s3c64xx.c424
-rw-r--r--drivers/spi/spi-sc18is602.c29
-rw-r--r--drivers/spi/spi-sh-hspi.c43
-rw-r--r--drivers/spi/spi-sh-msiof.c385
-rw-r--r--drivers/spi/spi-sh-sci.c8
-rw-r--r--drivers/spi/spi-tegra114.c23
-rw-r--r--drivers/spi/spi-tegra20-sflash.c26
-rw-r--r--drivers/spi/spi-tegra20-slink.c20
-rw-r--r--drivers/spi/spi-ti-qspi.c4
-rw-r--r--drivers/spi/spi-topcliff-pch.c15
-rw-r--r--drivers/spi/spi-xcomm.c1
-rw-r--r--drivers/spi/spi-xilinx.c1
-rw-r--r--drivers/spi/spi.c239
-rw-r--r--drivers/spi/spidev.c23
-rw-r--r--drivers/staging/android/ashmem.c45
-rw-r--r--drivers/staging/android/binder.c3
-rw-r--r--drivers/staging/android/ion/compat_ion.c26
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c12
-rw-r--r--drivers/staging/android/ion/ion_heap.c2
-rw-r--r--drivers/staging/android/ion/ion_priv.h1
-rw-r--r--drivers/staging/android/ion/ion_system_heap.c6
-rw-r--r--drivers/staging/android/sw_sync.h17
-rw-r--r--drivers/staging/android/sync.c14
-rw-r--r--drivers/staging/bcm/Bcmnet.c2
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/staging/comedi/drivers/adv_pci1710.c17
-rw-r--r--drivers/staging/comedi/drivers/usbduxsigma.c6
-rw-r--r--drivers/staging/cxt1e1/linux.c2
-rw-r--r--drivers/staging/dgrp/dgrp_net_ops.c330
-rw-r--r--drivers/staging/gdm72xx/gdm_usb.c3
-rw-r--r--drivers/staging/iio/Documentation/iio_utils.h6
-rw-r--r--drivers/staging/iio/adc/ad799x_core.c13
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c7
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c2
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c55
-rw-r--r--drivers/staging/imx-drm/imx-hdmi.c22
-rw-r--r--drivers/staging/lustre/TODO5
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c2
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c5
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h3
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c2
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c6
-rw-r--r--drivers/staging/media/go7007/go7007-loader.c4
-rw-r--r--drivers/staging/netlogic/xlr_net.c7
-rw-r--r--drivers/staging/octeon-usb/octeon-hcd.c273
-rw-r--r--drivers/staging/ozwpan/ozproto.c3
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c22
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c12
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c3
-rw-r--r--drivers/staging/rtl8821ae/Kconfig2
-rw-r--r--drivers/staging/rtl8821ae/wifi.h2
-rw-r--r--drivers/staging/usbip/userspace/libsrc/names.c8
-rw-r--r--drivers/staging/usbip/vhci_sysfs.c3
-rw-r--r--drivers/staging/wlags49_h2/wl_wext.c2
-rw-r--r--drivers/target/iscsi/iscsi_target.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_erl1.c4
-rw-r--r--drivers/target/iscsi/iscsi_target_erl2.c16
-rw-r--r--drivers/target/iscsi/iscsi_target_tpg.c2
-rw-r--r--drivers/target/target_core_alua.c2
-rw-r--r--drivers/target/target_core_pr.c11
-rw-r--r--drivers/target/target_core_sbc.c39
-rw-r--r--drivers/target/target_core_spc.c4
-rw-r--r--drivers/target/target_core_transport.c8
-rw-r--r--drivers/thermal/Kconfig13
-rw-r--r--drivers/thermal/thermal_core.c27
-rw-r--r--drivers/thermal/x86_pkg_temp_thermal.c11
-rw-r--r--drivers/tty/hvc/hvc_opal.c8
-rw-r--r--drivers/tty/hvc/hvc_rtas.c12
-rw-r--r--drivers/tty/hvc/hvc_udbg.c9
-rw-r--r--drivers/tty/hvc/hvc_xen.c17
-rw-r--r--drivers/tty/n_gsm.c11
-rw-r--r--drivers/tty/n_tty.c14
-rw-r--r--drivers/tty/serial/8250/8250_core.c18
-rw-r--r--drivers/tty/serial/8250/8250_dw.c4
-rw-r--r--drivers/tty/serial/8250/8250_pci.c3
-rw-r--r--drivers/tty/serial/omap-serial.c11
-rw-r--r--drivers/tty/serial/sirfsoc_uart.c4
-rw-r--r--drivers/tty/serial/sunhv.c22
-rw-r--r--drivers/tty/serial/sunsab.c14
-rw-r--r--drivers/tty/serial/sunsu.c14
-rw-r--r--drivers/tty/serial/sunzilog.c14
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/usb/chipidea/udc.c4
-rw-r--r--drivers/usb/core/config.c4
-rw-r--r--drivers/usb/core/driver.c24
-rw-r--r--drivers/usb/core/hcd.c1
-rw-r--r--drivers/usb/core/hub.c7
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/usb/dwc2/core.c2
-rw-r--r--drivers/usb/dwc2/hcd.c11
-rw-r--r--drivers/usb/dwc2/platform.c3
-rw-r--r--drivers/usb/gadget/bcm63xx_udc.c58
-rw-r--r--drivers/usb/gadget/f_fs.c7
-rw-r--r--drivers/usb/gadget/printer.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c13
-rw-r--r--drivers/usb/host/ehci-hub.c26
-rw-r--r--drivers/usb/host/xhci-dbg.c6
-rw-r--r--drivers/usb/host/xhci-mem.c14
-rw-r--r--drivers/usb/host/xhci-pci.c5
-rw-r--r--drivers/usb/host/xhci-ring.c68
-rw-r--r--drivers/usb/host/xhci.c24
-rw-r--r--drivers/usb/host/xhci.h41
-rw-r--r--drivers/usb/musb/musb_core.c15
-rw-r--r--drivers/usb/musb/musb_host.c3
-rw-r--r--drivers/usb/musb/musb_virthub.c26
-rw-r--r--drivers/usb/musb/omap2430.c2
-rw-r--r--drivers/usb/phy/phy-msm-usb.c57
-rw-r--r--drivers/usb/phy/phy.c8
-rw-r--r--drivers/usb/serial/ftdi_sio.c5
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h13
-rw-r--r--drivers/usb/serial/option.c6
-rw-r--r--drivers/usb/serial/qcserial.c3
-rw-r--r--drivers/usb/serial/usb-serial-simple.c3
-rw-r--r--drivers/usb/storage/Kconfig4
-rw-r--r--drivers/usb/storage/scsiglue.c6
-rw-r--r--drivers/usb/storage/unusual_cypress.h2
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/vfio/vfio_iommu_type1.c4
-rw-r--r--drivers/vhost/net.c47
-rw-r--r--drivers/vhost/scsi.c6
-rw-r--r--drivers/video/Kconfig2
-rw-r--r--drivers/video/exynos/Kconfig3
-rw-r--r--drivers/video/omap2/dss/dispc.c16
-rw-r--r--drivers/video/omap2/dss/dpi.c2
-rw-r--r--drivers/video/omap2/dss/sdi.c2
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c4
-rw-r--r--drivers/vme/bridges/vme_tsi148.c4
-rw-r--r--drivers/watchdog/Kconfig1
-rw-r--r--drivers/watchdog/w83697hf_wdt.c2
-rw-r--r--drivers/xen/Makefile1
-rw-r--r--drivers/xen/events/events_base.c2
-rw-r--r--drivers/xen/gntdev.c13
-rw-r--r--drivers/xen/grant-table.c89
-rw-r--r--drivers/xen/xencomm.c219
-rw-r--r--fs/bio-integrity.c18
-rw-r--r--fs/bio.c15
-rw-r--r--fs/btrfs/check-integrity.c4
-rw-r--r--fs/btrfs/compression.c2
-rw-r--r--fs/btrfs/disk-io.c5
-rw-r--r--fs/btrfs/extent-tree.c1
-rw-r--r--fs/btrfs/inode.c4
-rw-r--r--fs/btrfs/ioctl.c22
-rw-r--r--fs/btrfs/send.c22
-rw-r--r--fs/btrfs/super.c13
-rw-r--r--fs/btrfs/sysfs.c10
-rw-r--r--fs/buffer.c6
-rw-r--r--fs/ceph/acl.c11
-rw-r--r--fs/ceph/dir.c23
-rw-r--r--fs/ceph/file.c1
-rw-r--r--fs/ceph/super.c32
-rw-r--r--fs/ceph/super.h7
-rw-r--r--fs/ceph/xattr.c54
-rw-r--r--fs/cifs/cifsacl.c61
-rw-r--r--fs/cifs/cifsglob.h13
-rw-r--r--fs/cifs/cifsproto.h9
-rw-r--r--fs/cifs/cifssmb.c15
-rw-r--r--fs/cifs/dir.c2
-rw-r--r--fs/cifs/file.c94
-rw-r--r--fs/cifs/inode.c15
-rw-r--r--fs/cifs/smb1ops.c9
-rw-r--r--fs/cifs/smb2glob.h3
-rw-r--r--fs/cifs/smb2ops.c14
-rw-r--r--fs/cifs/smb2pdu.c9
-rw-r--r--fs/cifs/smb2proto.h3
-rw-r--r--fs/cifs/transport.c29
-rw-r--r--fs/cifs/xattr.c15
-rw-r--r--fs/exec.c45
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/extents.c1
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/ioctl.c3
-rw-r--r--fs/ext4/resize.c34
-rw-r--r--fs/ext4/super.c20
-rw-r--r--fs/file.c58
-rw-r--r--fs/file_table.c1
-rw-r--r--fs/fs-writeback.c33
-rw-r--r--fs/fscache/object-list.c5
-rw-r--r--fs/fscache/object.c3
-rw-r--r--fs/hfsplus/catalog.c41
-rw-r--r--fs/hfsplus/hfsplus_fs.h1
-rw-r--r--fs/hfsplus/hfsplus_raw.h6
-rw-r--r--fs/hfsplus/inode.c9
-rw-r--r--fs/hfsplus/options.c2
-rw-r--r--fs/jbd2/transaction.c6
-rw-r--r--fs/jfs/acl.c2
-rw-r--r--fs/jfs/xattr.c14
-rw-r--r--fs/kernfs/dir.c12
-rw-r--r--fs/kernfs/mount.c8
-rw-r--r--fs/lockd/svclock.c8
-rw-r--r--fs/namei.c32
-rw-r--r--fs/nfs/delegation.c11
-rw-r--r--fs/nfs/dir.c5
-rw-r--r--fs/nfs/inode.c14
-rw-r--r--fs/nfs/internal.h12
-rw-r--r--fs/nfs/nfs3acl.c34
-rw-r--r--fs/nfs/nfs3proc.c1
-rw-r--r--fs/nfs/nfs4client.c9
-rw-r--r--fs/nfs/nfs4filelayout.c10
-rw-r--r--fs/nfs/nfs4namespace.c12
-rw-r--r--fs/nfs/nfs4proc.c32
-rw-r--r--fs/nfs/nfs4session.c25
-rw-r--r--fs/nfs/nfs4session.h2
-rw-r--r--fs/nfs/nfs4state.c19
-rw-r--r--fs/nfsd/nfs4acl.c9
-rw-r--r--fs/notify/dnotify/dnotify.c2
-rw-r--r--fs/notify/fanotify/fanotify.c8
-rw-r--r--fs/notify/fanotify/fanotify_user.c13
-rw-r--r--fs/notify/fsnotify.c2
-rw-r--r--fs/notify/group.c8
-rw-r--r--fs/notify/inotify/inotify.h2
-rw-r--r--fs/notify/inotify/inotify_fsnotify.c3
-rw-r--r--fs/notify/inotify/inotify_user.c14
-rw-r--r--fs/notify/notification.c20
-rw-r--r--fs/ntfs/file.c2
-rw-r--r--fs/ocfs2/alloc.c40
-rw-r--r--fs/ocfs2/file.c60
-rw-r--r--fs/ocfs2/localalloc.c42
-rw-r--r--fs/ocfs2/localalloc.h6
-rw-r--r--fs/ocfs2/namei.c17
-rw-r--r--fs/ocfs2/quota_global.c27
-rw-r--r--fs/ocfs2/quota_local.c4
-rw-r--r--fs/open.c4
-rw-r--r--fs/posix_acl.c18
-rw-r--r--fs/proc/base.c1
-rw-r--r--fs/proc/page.c5
-rw-r--r--fs/proc/vmcore.c26
-rw-r--r--fs/quota/dquot.c14
-rw-r--r--fs/read_write.c40
-rw-r--r--fs/reiserfs/do_balan.c895
-rw-r--r--fs/sync.c32
-rw-r--r--fs/sysfs/mount.c5
-rw-r--r--fs/udf/file.c14
-rw-r--r--fs/udf/inode.c1
-rw-r--r--fs/xfs/xfs_file.c2
-rw-r--r--fs/xfs/xfs_iops.c16
-rw-r--r--fs/xfs/xfs_log_cil.c19
-rw-r--r--fs/xfs/xfs_mount.c24
-rw-r--r--fs/xfs/xfs_sb.c10
-rw-r--r--fs/xfs/xfs_super.c2
-rw-r--r--include/asm-generic/pgtable.h39
-rw-r--r--include/drm/drmP.h3
-rw-r--r--include/drm/drm_crtc.h3
-rw-r--r--include/drm/ttm/ttm_page_alloc.h2
-rw-r--r--include/dt-bindings/clock/tegra124-car.h4
-rw-r--r--include/kvm/arm_vgic.h5
-rw-r--r--include/linux/audit.h3
-rw-r--r--include/linux/binfmts.h1
-rw-r--r--include/linux/bio.h12
-rw-r--r--include/linux/blk-mq.h16
-rw-r--r--include/linux/blkdev.h11
-rw-r--r--include/linux/can/skb.h38
-rw-r--r--include/linux/ceph/ceph_fs.h5
-rw-r--r--include/linux/cgroup.h2
-rw-r--r--include/linux/clk/ti.h4
-rw-r--r--include/linux/compiler-gcc4.h6
-rw-r--r--include/linux/dma-buf.h2
-rw-r--r--include/linux/file.h27
-rw-r--r--include/linux/firewire.h1
-rw-r--r--include/linux/fs.h17
-rw-r--r--include/linux/fsnotify_backend.h4
-rw-r--r--include/linux/ftrace_event.h4
-rw-r--r--include/linux/gfp.h4
-rw-r--r--include/linux/gpio/consumer.h4
-rw-r--r--include/linux/huge_mm.h41
-rw-r--r--include/linux/hyperv.h2
-rw-r--r--include/linux/interrupt.h5
-rw-r--r--include/linux/ipc_namespace.h2
-rw-r--r--include/linux/kernfs.h9
-rw-r--r--include/linux/mfd/max8997-private.h2
-rw-r--r--include/linux/mfd/max8998-private.h2
-rw-r--r--include/linux/mfd/tps65217.h4
-rw-r--r--include/linux/mlx5/driver.h3
-rw-r--r--include/linux/mm.h20
-rw-r--r--include/linux/mmzone.h4
-rw-r--r--include/linux/netdevice.h36
-rw-r--r--include/linux/nfs_xdr.h7
-rw-r--r--include/linux/nvme.h6
-rw-r--r--include/linux/of.h153
-rw-r--r--include/linux/of_device.h4
-rw-r--r--include/linux/page-flags.h4
-rw-r--r--include/linux/pci.h20
-rw-r--r--include/linux/phy/phy.h14
-rw-r--r--include/linux/platform_data/spi-s3c64xx.h9
-rw-r--r--include/linux/pm_runtime.h4
-rw-r--r--include/linux/rmap.h3
-rw-r--r--include/linux/sched.h3
-rw-r--r--include/linux/security.h10
-rw-r--r--include/linux/skbuff.h22
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/smp.h3
-rw-r--r--include/linux/spi/spi.h38
-rw-r--r--include/linux/spi/spi_bitbang.h2
-rw-r--r--include/linux/syscalls.h6
-rw-r--r--include/linux/tracepoint.h6
-rw-r--r--include/linux/usb.h2
-rw-r--r--include/linux/usb/cdc_ncm.h1
-rw-r--r--include/linux/vm_event_item.h4
-rw-r--r--include/linux/vmstat.h8
-rw-r--r--include/linux/workqueue.h5
-rw-r--r--include/linux/writeback.h2
-rw-r--r--include/net/datalink.h2
-rw-r--r--include/net/dn.h2
-rw-r--r--include/net/dn_route.h2
-rw-r--r--include/net/ethoc.h1
-rw-r--r--include/net/ip_tunnels.h1
-rw-r--r--include/net/ipx.h11
-rw-r--r--include/net/net_namespace.h8
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netfilter/nf_tables.h9
-rw-r--r--include/net/netfilter/nft_reject.h25
-rw-r--r--include/net/sctp/structs.h14
-rw-r--r--include/net/sock.h6
-rw-r--r--include/net/tcp.h14
-rw-r--r--include/net/xfrm.h11
-rw-r--r--include/rdma/ib_verbs.h3
-rw-r--r--include/sound/soc-dapm.h8
-rw-r--r--include/target/iscsi/iscsi_transport.h1
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/events/power.h7
-rw-r--r--include/trace/events/sunrpc.h4
-rw-r--r--include/trace/events/writeback.h6
-rw-r--r--include/trace/ftrace.h7
-rw-r--r--include/uapi/asm-generic/unistd.h6
-rw-r--r--include/uapi/drm/drm.h2
-rw-r--r--include/uapi/drm/vmwgfx_drm.h1
-rw-r--r--include/uapi/linux/btrfs.h1
-rw-r--r--include/uapi/linux/in6.h23
-rw-r--r--include/uapi/linux/mic_ioctl.h2
-rw-r--r--include/uapi/linux/nvme.h11
-rw-r--r--include/uapi/linux/spi/spidev.h14
-rw-r--r--include/uapi/xen/Kbuild2
-rw-r--r--include/uapi/xen/gntalloc.h (renamed from include/xen/gntalloc.h)0
-rw-r--r--include/uapi/xen/gntdev.h (renamed from include/xen/gntdev.h)0
-rw-r--r--include/xen/grant_table.h8
-rw-r--r--include/xen/interface/io/blkif.h34
-rw-r--r--include/xen/interface/xencomm.h41
-rw-r--r--include/xen/xencomm.h77
-rw-r--r--init/main.c4
-rw-r--r--ipc/mq_sysctl.c18
-rw-r--r--ipc/mqueue.c6
-rw-r--r--ipc/msg.c2
-rw-r--r--kernel/audit.c31
-rw-r--r--kernel/audit.h2
-rw-r--r--kernel/audit_tree.c2
-rw-r--r--kernel/audit_watch.c2
-rw-r--r--kernel/auditfilter.c10
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/cgroup.c71
-rw-r--r--kernel/cpuset.c10
-rw-r--r--kernel/events/core.c12
-rw-r--r--kernel/futex.c53
-rw-r--r--kernel/irq/Kconfig1
-rw-r--r--kernel/irq/devres.c45
-rw-r--r--kernel/irq/irqdesc.c1
-rw-r--r--kernel/irq/irqdomain.c1
-rw-r--r--kernel/irq/manage.c3
-rw-r--r--kernel/kmod.c2
-rw-r--r--kernel/power/console.c1
-rw-r--r--kernel/printk/printk.c2
-rw-r--r--kernel/profile.c4
-rw-r--r--kernel/sched/clock.c4
-rw-r--r--kernel/sched/core.c37
-rw-r--r--kernel/sched/cpudeadline.c6
-rw-r--r--kernel/sched/deadline.c20
-rw-r--r--kernel/sched/fair.c10
-rw-r--r--kernel/sched/rt.c8
-rw-r--r--kernel/sched/sched.h1
-rw-r--r--kernel/stop_machine.c2
-rw-r--r--kernel/time/jiffies.c6
-rw-r--r--kernel/time/sched_clock.c46
-rw-r--r--kernel/time/tick-broadcast.c1
-rw-r--r--kernel/trace/ring_buffer.c7
-rw-r--r--kernel/trace/trace_events.c16
-rw-r--r--kernel/trace/trace_export.c7
-rw-r--r--kernel/tracepoint.c7
-rw-r--r--kernel/user_namespace.c2
-rw-r--r--kernel/workqueue.c7
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/Makefile1
-rw-r--r--lib/dma-debug.c131
-rw-r--r--lib/fonts/Kconfig6
-rw-r--r--lib/percpu_ida.c7
-rw-r--r--lib/radix-tree.c4
-rw-r--r--mm/Kconfig4
-rw-r--r--mm/compaction.c20
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/fremap.c28
-rw-r--r--mm/huge_memory.c20
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/memcontrol.c20
-rw-r--r--mm/memory-failure.c8
-rw-r--r--mm/memory.c15
-rw-r--r--mm/migrate.c43
-rw-r--r--mm/mprotect.c25
-rw-r--r--mm/page-writeback.c5
-rw-r--r--mm/page_alloc.c30
-rw-r--r--mm/rmap.c5
-rw-r--r--mm/slub.c38
-rw-r--r--mm/swap.c4
-rw-r--r--mm/swap_state.c63
-rw-r--r--mm/swapfile.c11
-rw-r--r--mm/vmpressure.c1
-rw-r--r--mm/vmstat.c4
-rw-r--r--net/8021q/vlan_dev.c3
-rw-r--r--net/9p/client.c2
-rw-r--r--net/9p/trans_virtio.c5
-rw-r--r--net/batman-adv/bat_iv_ogm.c30
-rw-r--r--net/batman-adv/hard-interface.c22
-rw-r--r--net/batman-adv/originator.c36
-rw-r--r--net/batman-adv/originator.h4
-rw-r--r--net/batman-adv/routing.c4
-rw-r--r--net/batman-adv/send.c9
-rw-r--r--net/batman-adv/translation-table.c23
-rw-r--r--net/bluetooth/hidp/core.c16
-rw-r--r--net/bluetooth/hidp/hidp.h4
-rw-r--r--net/bridge/br_device.c54
-rw-r--r--net/bridge/br_fdb.c137
-rw-r--r--net/bridge/br_if.c6
-rw-r--r--net/bridge/br_input.c4
-rw-r--r--net/bridge/br_multicast.c33
-rw-r--r--net/bridge/br_private.h13
-rw-r--r--net/bridge/br_stp_if.c2
-rw-r--r--net/bridge/br_vlan.c27
-rw-r--r--net/caif/caif_dev.c1
-rw-r--r--net/caif/cfsrvl.c1
-rw-r--r--net/can/af_can.c3
-rw-r--r--net/can/bcm.c4
-rw-r--r--net/can/raw.c27
-rw-r--r--net/ceph/messenger.c8
-rw-r--r--net/ceph/osd_client.c84
-rw-r--r--net/core/dev.c28
-rw-r--r--net/core/fib_rules.c7
-rw-r--r--net/core/flow_dissector.c20
-rw-r--r--net/core/neighbour.c8
-rw-r--r--net/core/netpoll.c6
-rw-r--r--net/core/rtnetlink.c31
-rw-r--r--net/core/skbuff.c103
-rw-r--r--net/core/sock.c11
-rw-r--r--net/dccp/ccids/lib/tfrc.c2
-rw-r--r--net/dccp/ccids/lib/tfrc.h1
-rw-r--r--net/decnet/af_decnet.c5
-rw-r--r--net/hsr/hsr_framereg.c2
-rw-r--r--net/ieee802154/6lowpan.c23
-rw-r--r--net/ipv4/af_inet.c7
-rw-r--r--net/ipv4/devinet.c3
-rw-r--r--net/ipv4/inet_fragment.c5
-rw-r--r--net/ipv4/ip_forward.c71
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/ip_tunnel.c82
-rw-r--r--net/ipv4/ip_tunnel_core.c47
-rw-r--r--net/ipv4/ipconfig.c2
-rw-r--r--net/ipv4/ipmr.c13
-rw-r--r--net/ipv4/netfilter/Kconfig5
-rw-r--r--net/ipv4/netfilter/Makefile1
-rw-r--r--net/ipv4/netfilter/nf_nat_h323.c5
-rw-r--r--net/ipv4/netfilter/nf_nat_snmp_basic.c4
-rw-r--r--net/ipv4/netfilter/nft_reject_ipv4.c75
-rw-r--r--net/ipv4/route.c13
-rw-r--r--net/ipv4/tcp.c10
-rw-r--r--net/ipv4/tcp_cong.c3
-rw-r--r--net/ipv4/tcp_input.c21
-rw-r--r--net/ipv4/tcp_output.c48
-rw-r--r--net/ipv4/udp_offload.c17
-rw-r--r--net/ipv6/Kconfig1
-rw-r--r--net/ipv6/addrconf.c7
-rw-r--r--net/ipv6/exthdrs_core.c2
-rw-r--r--net/ipv6/exthdrs_offload.c4
-rw-r--r--net/ipv6/icmp.c2
-rw-r--r--net/ipv6/ip6_offload.c20
-rw-r--r--net/ipv6/ip6_output.c34
-rw-r--r--net/ipv6/ip6mr.c13
-rw-r--r--net/ipv6/netfilter/Kconfig5
-rw-r--r--net/ipv6/netfilter/Makefile1
-rw-r--r--net/ipv6/netfilter/nft_reject_ipv6.c76
-rw-r--r--net/ipv6/ping.c1
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/sit.c19
-rw-r--r--net/ipv6/udp_offload.c2
-rw-r--r--net/ipx/af_ipx.c22
-rw-r--r--net/ipx/ipx_route.c4
-rw-r--r--net/key/af_key.c19
-rw-r--r--net/l2tp/l2tp_core.c4
-rw-r--r--net/l2tp/l2tp_core.h1
-rw-r--r--net/l2tp/l2tp_netlink.c4
-rw-r--r--net/l2tp/l2tp_ppp.c13
-rw-r--r--net/mac80211/cfg.c44
-rw-r--r--net/mac80211/chan.c6
-rw-r--r--net/mac80211/ht.c4
-rw-r--r--net/mac80211/ibss.c5
-rw-r--r--net/mac80211/ieee80211_i.h10
-rw-r--r--net/mac80211/iface.c33
-rw-r--r--net/mac80211/mesh_ps.c1
-rw-r--r--net/mac80211/mlme.c24
-rw-r--r--net/mac80211/rx.c7
-rw-r--r--net/mac80211/sta_info.c67
-rw-r--r--net/mac80211/sta_info.h7
-rw-r--r--net/mac80211/tx.c17
-rw-r--r--net/mac80211/util.c48
-rw-r--r--net/mac80211/wme.c5
-rw-r--r--net/netfilter/Kconfig6
-rw-r--r--net/netfilter/Makefile1
-rw-r--r--net/netfilter/ipvs/ip_vs_conn.c8
-rw-r--r--net/netfilter/nf_conntrack_core.c55
-rw-r--r--net/netfilter/nf_conntrack_netlink.c35
-rw-r--r--net/netfilter/nf_nat_core.c56
-rw-r--r--net/netfilter/nf_synproxy_core.c5
-rw-r--r--net/netfilter/nf_tables_api.c82
-rw-r--r--net/netfilter/nf_tables_core.c6
-rw-r--r--net/netfilter/nft_ct.c16
-rw-r--r--net/netfilter/nft_log.c5
-rw-r--r--net/netfilter/nft_lookup.c1
-rw-r--r--net/netfilter/nft_meta.c4
-rw-r--r--net/netfilter/nft_payload.c3
-rw-r--r--net/netfilter/nft_queue.c4
-rw-r--r--net/netfilter/nft_rbtree.c16
-rw-r--r--net/netfilter/nft_reject.c89
-rw-r--r--net/netfilter/nft_reject_inet.c63
-rw-r--r--net/netfilter/xt_CT.c7
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/nfc/nci/core.c2
-rw-r--r--net/openvswitch/datapath.c32
-rw-r--r--net/openvswitch/flow.c3
-rw-r--r--net/openvswitch/flow_table.c88
-rw-r--r--net/openvswitch/flow_table.h2
-rw-r--r--net/packet/af_packet.c26
-rw-r--r--net/sched/sch_api.c7
-rw-r--r--net/sched/sch_fq.c21
-rw-r--r--net/sched/sch_pie.c21
-rw-r--r--net/sched/sch_tbf.c24
-rw-r--r--net/sctp/associola.c211
-rw-r--r--net/sctp/ipv6.c2
-rw-r--r--net/sctp/sm_make_chunk.c4
-rw-r--r--net/sctp/sm_sideeffect.c7
-rw-r--r--net/sctp/sm_statefuns.c12
-rw-r--r--net/sctp/socket.c47
-rw-r--r--net/sctp/sysctl.c18
-rw-r--r--net/sctp/ulpevent.c8
-rw-r--r--net/socket.c17
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c19
-rw-r--r--net/sunrpc/backchannel_rqst.c6
-rw-r--r--net/sunrpc/svc_xprt.c6
-rw-r--r--net/sunrpc/xprtsock.c6
-rw-r--r--net/tipc/bearer.c7
-rw-r--r--net/tipc/config.c11
-rw-r--r--net/tipc/core.c109
-rw-r--r--net/tipc/core.h2
-rw-r--r--net/tipc/handler.c1
-rw-r--r--net/tipc/link.c7
-rw-r--r--net/tipc/name_table.c40
-rw-r--r--net/tipc/netlink.c8
-rw-r--r--net/tipc/ref.c3
-rw-r--r--net/tipc/server.c19
-rw-r--r--net/tipc/server.h2
-rw-r--r--net/tipc/socket.c12
-rw-r--r--net/tipc/subscr.c48
-rw-r--r--net/unix/af_unix.c3
-rw-r--r--net/wireless/core.c19
-rw-r--r--net/wireless/core.h4
-rw-r--r--net/wireless/nl80211.c32
-rw-r--r--net/wireless/nl80211.h8
-rw-r--r--net/wireless/reg.c12
-rw-r--r--net/wireless/scan.c40
-rw-r--r--net/wireless/sme.c2
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_state.c23
-rw-r--r--net/xfrm/xfrm_user.c11
-rw-r--r--scripts/Makefile.lib1
-rwxr-xr-xscripts/checkpatch.pl4
-rw-r--r--scripts/gen_initramfs_list.sh2
-rwxr-xr-xscripts/get_maintainer.pl2
-rw-r--r--scripts/mod/file2alias.c4
-rw-r--r--scripts/mod/modpost.c13
-rw-r--r--security/Kconfig2
-rw-r--r--security/capability.c3
-rw-r--r--security/keys/keyring.c6
-rw-r--r--security/security.c6
-rw-r--r--security/selinux/hooks.c13
-rw-r--r--security/selinux/include/security.h2
-rw-r--r--security/selinux/include/xfrm.h3
-rw-r--r--security/selinux/nlmsgtab.c2
-rw-r--r--security/selinux/selinuxfs.c28
-rw-r--r--security/selinux/ss/policydb.c8
-rw-r--r--security/selinux/ss/services.c10
-rw-r--r--security/selinux/xfrm.c14
-rw-r--r--sound/core/compress_offload.c2
-rw-r--r--sound/pci/hda/hda_codec.c34
-rw-r--r--sound/pci/hda/hda_generic.c8
-rw-r--r--sound/pci/hda/hda_generic.h1
-rw-r--r--sound/pci/hda/hda_intel.c2
-rw-r--r--sound/pci/hda/patch_analog.c31
-rw-r--r--sound/pci/hda/patch_ca0132.c68
-rw-r--r--sound/pci/hda/patch_conexant.c3
-rw-r--r--sound/pci/hda/patch_realtek.c92
-rw-r--r--sound/pci/hda/patch_sigmatel.c60
-rw-r--r--sound/pci/hda/thinkpad_helper.c1
-rw-r--r--sound/pci/oxygen/xonar_dg.c30
-rw-r--r--sound/soc/blackfin/Kconfig11
-rw-r--r--sound/soc/codecs/88pm860x-codec.c3
-rw-r--r--sound/soc/codecs/ad1980.c4
-rw-r--r--sound/soc/codecs/da732x.c12
-rw-r--r--sound/soc/codecs/da9055.c11
-rw-r--r--sound/soc/codecs/isabelle.c52
-rw-r--r--sound/soc/codecs/max98090.c21
-rw-r--r--sound/soc/codecs/rt5640.c1
-rw-r--r--sound/soc/codecs/si476x.c2
-rw-r--r--sound/soc/codecs/sta32x.c76
-rw-r--r--sound/soc/codecs/wm8400.c34
-rw-r--r--sound/soc/codecs/wm8770.c4
-rw-r--r--sound/soc/codecs/wm8900.c44
-rw-r--r--sound/soc/codecs/wm8958-dsp2.c2
-rw-r--r--sound/soc/codecs/wm8993.c1
-rw-r--r--sound/soc/codecs/wm8994.c135
-rw-r--r--sound/soc/davinci/davinci-evm.c1
-rw-r--r--sound/soc/davinci/davinci-mcasp.c83
-rw-r--r--sound/soc/fsl/fsl_esai.c4
-rw-r--r--sound/soc/fsl/fsl_esai.h2
-rw-r--r--sound/soc/fsl/imx-mc13783.c1
-rw-r--r--sound/soc/fsl/imx-sgtl5000.c10
-rw-r--r--sound/soc/fsl/imx-wm8962.c11
-rw-r--r--sound/soc/omap/n810.c4
-rw-r--r--sound/soc/samsung/Kconfig6
-rw-r--r--sound/soc/soc-dapm.c139
-rw-r--r--sound/soc/soc-pcm.c3
-rw-r--r--sound/soc/txx9/txx9aclc-ac97.c8
-rw-r--r--sound/usb/Kconfig1
-rw-r--r--sound/usb/mixer.c1
-rw-r--r--sound/usb/mixer_maps.c9
-rw-r--r--tools/lib/lockdep/Makefile6
-rw-r--r--tools/lib/lockdep/preload.c2
-rwxr-xr-x[-rw-r--r--]tools/lib/lockdep/run_tests.sh0
-rw-r--r--tools/lib/lockdep/uinclude/asm/hash.h6
-rw-r--r--tools/lib/lockdep/uinclude/linux/rcu.h5
-rw-r--r--tools/net/Makefile2
-rw-r--r--tools/perf/bench/numa.c1
-rw-r--r--tools/perf/builtin-bench.c2
-rw-r--r--tools/perf/builtin-buildid-cache.c33
-rw-r--r--tools/perf/builtin-record.c10
-rw-r--r--tools/perf/builtin-report.c40
-rw-r--r--tools/perf/builtin-top.c6
-rw-r--r--tools/perf/builtin-trace.c32
-rw-r--r--tools/perf/config/Makefile2
-rw-r--r--tools/perf/config/feature-checks/Makefile2
-rw-r--r--tools/perf/design.txt1
-rw-r--r--tools/perf/perf.h4
-rw-r--r--tools/perf/tests/vmlinux-kallsyms.c10
-rw-r--r--tools/perf/util/annotate.c9
-rw-r--r--tools/perf/util/annotate.h2
-rw-r--r--tools/perf/util/event.c36
-rw-r--r--tools/perf/util/event.h6
-rw-r--r--tools/perf/util/include/asm/hash.h6
-rw-r--r--tools/perf/util/include/linux/bitops.h4
-rw-r--r--tools/perf/util/machine.c44
-rw-r--r--tools/perf/util/machine.h2
-rw-r--r--tools/perf/util/map.c5
-rw-r--r--tools/perf/util/map.h1
-rw-r--r--tools/perf/util/parse-events.c17
-rw-r--r--tools/perf/util/probe-event.c2
-rw-r--r--tools/perf/util/session.c6
-rw-r--r--tools/perf/util/symbol-elf.c10
-rw-r--r--tools/perf/util/symbol.c67
-rw-r--r--tools/testing/selftests/ipc/msgque.c1
-rw-r--r--virt/kvm/arm/vgic.c1
-rw-r--r--virt/kvm/coalesced_mmio.c8
1679 files changed, 22013 insertions, 12424 deletions
diff --git a/.gitignore b/.gitignore
index 7e9932e55475..42fa0d5626a9 100644
--- a/.gitignore
+++ b/.gitignore
@@ -92,3 +92,6 @@ extra_certificates
signing_key.priv
signing_key.x509
x509.genkey
+
+# Kconfig presets
+all.config
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX
index 38f8444bdd0e..07de7e19b4ce 100644
--- a/Documentation/00-INDEX
+++ b/Documentation/00-INDEX
@@ -29,6 +29,8 @@ DMA-ISA-LPC.txt
- How to do DMA with ISA (and LPC) devices.
DMA-attributes.txt
- listing of the various possible attributes a DMA region can have
+dmatest.txt
+ - how to compile, configure and use the dmatest system.
DocBook/
- directory with DocBook templates etc. for kernel documentation.
EDID/
@@ -77,6 +79,8 @@ arm/
- directory with info about Linux on the ARM architecture.
arm64/
- directory with info about Linux on the 64 bit ARM architecture.
+assoc_array.txt
+ - generic associative array intro.
atomic_ops.txt
- semantics and behavior of atomic and bitmask operations.
auxdisplay/
@@ -87,6 +91,8 @@ bad_memory.txt
- how to use kernel parameters to exclude bad RAM regions.
basic_profiling.txt
- basic instructions for those who wants to profile Linux kernel.
+bcache.txt
+ - Block-layer cache on fast SSDs to improve slow (raid) I/O performance.
binfmt_misc.txt
- info on the kernel support for extra binary formats.
blackfin/
@@ -171,6 +177,8 @@ early-userspace/
- info about initramfs, klibc, and userspace early during boot.
edac.txt
- information on EDAC - Error Detection And Correction
+efi-stub.txt
+ - How to use the EFI boot stub to bypass GRUB or elilo on EFI systems.
eisa.txt
- info on EISA bus support.
email-clients.txt
@@ -195,8 +203,8 @@ futex-requeue-pi.txt
- info on requeueing of tasks from a non-PI futex to a PI futex
gcov.txt
- use of GCC's coverage testing tool "gcov" with the Linux kernel
-gpio.txt
- - overview of GPIO (General Purpose Input/Output) access conventions.
+gpio/
+ - gpio related documentation
hid/
- directory with information on human interface devices
highuid.txt
@@ -255,6 +263,8 @@ kernel-docs.txt
- listing of various WWW + books that document kernel internals.
kernel-parameters.txt
- summary listing of command line / boot prompt args for the kernel.
+kernel-per-CPU-kthreads.txt
+ - List of all per-CPU kthreads and how they introduce jitter.
kmemcheck.txt
- info on dynamic checker that detects uses of uninitialized memory.
kmemleak.txt
@@ -299,8 +309,6 @@ memory-devices/
- directory with info on parts like the Texas Instruments EMIF driver
memory-hotplug.txt
- Hotpluggable memory support, how to use and current status.
-memory.txt
- - info on typical Linux memory problems.
metag/
- directory with info about Linux on Meta architecture.
mips/
@@ -311,6 +319,8 @@ mmc/
- directory with info about the MMC subsystem
mn10300/
- directory with info about the mn10300 architecture port
+module-signing.txt
+ - Kernel module signing for increased security when loading modules.
mtd/
- directory with info about memory technology devices (flash)
mono.txt
@@ -343,6 +353,8 @@ pcmcia/
- info on the Linux PCMCIA driver.
percpu-rw-semaphore.txt
- RCU based read-write semaphore optimized for locking for reading
+phy.txt
+ - Description of the generic PHY framework.
pi-futex.txt
- documentation on lightweight priority inheritance futexes.
pinctrl.txt
@@ -431,6 +443,8 @@ sysrq.txt
- info on the magic SysRq key.
target/
- directory with info on generating TCM v4 fabric .ko modules
+this_cpu_ops.txt
+ - List rationale behind and the way to use this_cpu operations.
thermal/
- directory with information on managing thermal issues (CPU/temp)
trace/
@@ -469,6 +483,8 @@ wimax/
- directory with info about Intel Wireless Wimax Connections
workqueue.txt
- information on the Concurrency Managed Workqueue implementation
+ww-mutex-design.txt
+ - Intro to Mutex wait/would deadlock handling.s
x86/x86_64/
- directory with info on Linux support for AMD x86-64 (Hammer) machines.
xtensa/
diff --git a/Documentation/PCI/MSI-HOWTO.txt b/Documentation/PCI/MSI-HOWTO.txt
index a8d01005f480..10a93696e55a 100644
--- a/Documentation/PCI/MSI-HOWTO.txt
+++ b/Documentation/PCI/MSI-HOWTO.txt
@@ -82,7 +82,19 @@ Most of the hard work is done for the driver in the PCI layer. It simply
has to request that the PCI layer set up the MSI capability for this
device.
-4.2.1 pci_enable_msi_range
+4.2.1 pci_enable_msi
+
+int pci_enable_msi(struct pci_dev *dev)
+
+A successful call allocates ONE interrupt to the device, regardless
+of how many MSIs the device supports. The device is switched from
+pin-based interrupt mode to MSI mode. The dev->irq number is changed
+to a new number which represents the message signaled interrupt;
+consequently, this function should be called before the driver calls
+request_irq(), because an MSI is delivered via a vector that is
+different from the vector of a pin-based interrupt.
+
+4.2.2 pci_enable_msi_range
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec)
@@ -147,6 +159,11 @@ static int foo_driver_enable_msi(struct pci_dev *pdev, int nvec)
return pci_enable_msi_range(pdev, nvec, nvec);
}
+Note, unlike pci_enable_msi_exact() function, which could be also used to
+enable a particular number of MSI-X interrupts, pci_enable_msi_range()
+returns either a negative errno or 'nvec' (not negative errno or 0 - as
+pci_enable_msi_exact() does).
+
4.2.1.3 Single MSI mode
The most notorious example of the request type described above is
@@ -158,7 +175,27 @@ static int foo_driver_enable_single_msi(struct pci_dev *pdev)
return pci_enable_msi_range(pdev, 1, 1);
}
-4.2.2 pci_disable_msi
+Note, unlike pci_enable_msi() function, which could be also used to
+enable the single MSI mode, pci_enable_msi_range() returns either a
+negative errno or 1 (not negative errno or 0 - as pci_enable_msi()
+does).
+
+4.2.3 pci_enable_msi_exact
+
+int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
+
+This variation on pci_enable_msi_range() call allows a device driver to
+request exactly 'nvec' MSIs.
+
+If this function returns a negative number, it indicates an error and
+the driver should not attempt to request any more MSI interrupts for
+this device.
+
+By contrast with pci_enable_msi_range() function, pci_enable_msi_exact()
+returns zero in case of success, which indicates MSI interrupts have been
+successfully allocated.
+
+4.2.4 pci_disable_msi
void pci_disable_msi(struct pci_dev *dev)
@@ -172,7 +209,7 @@ on any interrupt for which it previously called request_irq().
Failure to do so results in a BUG_ON(), leaving the device with
MSI enabled and thus leaking its vector.
-4.2.3 pci_msi_vec_count
+4.2.4 pci_msi_vec_count
int pci_msi_vec_count(struct pci_dev *dev)
@@ -257,8 +294,8 @@ possible, likely up to the limit returned by pci_msix_vec_count() function:
static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
{
- return pci_enable_msi_range(adapter->pdev, adapter->msix_entries,
- 1, nvec);
+ return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ 1, nvec);
}
Note the value of 'minvec' parameter is 1. As 'minvec' is inclusive,
@@ -269,8 +306,8 @@ In this case the function could look like this:
static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
{
- return pci_enable_msi_range(adapter->pdev, adapter->msix_entries,
- FOO_DRIVER_MINIMUM_NVEC, nvec);
+ return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ FOO_DRIVER_MINIMUM_NVEC, nvec);
}
4.3.1.2 Exact number of MSI-X interrupts
@@ -282,10 +319,15 @@ parameters:
static int foo_driver_enable_msix(struct foo_adapter *adapter, int nvec)
{
- return pci_enable_msi_range(adapter->pdev, adapter->msix_entries,
- nvec, nvec);
+ return pci_enable_msix_range(adapter->pdev, adapter->msix_entries,
+ nvec, nvec);
}
+Note, unlike pci_enable_msix_exact() function, which could be also used to
+enable a particular number of MSI-X interrupts, pci_enable_msix_range()
+returns either a negative errno or 'nvec' (not negative errno or 0 - as
+pci_enable_msix_exact() does).
+
4.3.1.3 Specific requirements to the number of MSI-X interrupts
As noted above, there could be devices that can not operate with just any
@@ -332,7 +374,64 @@ Note how pci_enable_msix_range() return value is analized for a fallback -
any error code other than -ENOSPC indicates a fatal error and should not
be retried.
-4.3.2 pci_disable_msix
+4.3.2 pci_enable_msix_exact
+
+int pci_enable_msix_exact(struct pci_dev *dev,
+ struct msix_entry *entries, int nvec)
+
+This variation on pci_enable_msix_range() call allows a device driver to
+request exactly 'nvec' MSI-Xs.
+
+If this function returns a negative number, it indicates an error and
+the driver should not attempt to allocate any more MSI-X interrupts for
+this device.
+
+By contrast with pci_enable_msix_range() function, pci_enable_msix_exact()
+returns zero in case of success, which indicates MSI-X interrupts have been
+successfully allocated.
+
+Another version of a routine that enables MSI-X mode for a device with
+specific requirements described in chapter 4.3.1.3 might look like this:
+
+/*
+ * Assume 'minvec' and 'maxvec' are non-zero
+ */
+static int foo_driver_enable_msix(struct foo_adapter *adapter,
+ int minvec, int maxvec)
+{
+ int rc;
+
+ minvec = roundup_pow_of_two(minvec);
+ maxvec = rounddown_pow_of_two(maxvec);
+
+ if (minvec > maxvec)
+ return -ERANGE;
+
+retry:
+ rc = pci_enable_msix_exact(adapter->pdev,
+ adapter->msix_entries, maxvec);
+
+ /*
+ * -ENOSPC is the only error code allowed to be analyzed
+ */
+ if (rc == -ENOSPC) {
+ if (maxvec == 1)
+ return -ENOSPC;
+
+ maxvec /= 2;
+
+ if (minvec > maxvec)
+ return -ENOSPC;
+
+ goto retry;
+ } else if (rc < 0) {
+ return rc;
+ }
+
+ return maxvec;
+}
+
+4.3.3 pci_disable_msix
void pci_disable_msix(struct pci_dev *dev)
diff --git a/Documentation/RCU/00-INDEX b/Documentation/RCU/00-INDEX
index 1d7a885761f5..fa57139f50bf 100644
--- a/Documentation/RCU/00-INDEX
+++ b/Documentation/RCU/00-INDEX
@@ -8,6 +8,8 @@ listRCU.txt
- Using RCU to Protect Read-Mostly Linked Lists
lockdep.txt
- RCU and lockdep checking
+lockdep-splat.txt
+ - RCU Lockdep splats explained.
NMI-RCU.txt
- Using RCU to Protect Dynamic NMI Handlers
rcubarrier.txt
diff --git a/Documentation/arm/00-INDEX b/Documentation/arm/00-INDEX
index 36420e116c90..a94090cc785d 100644
--- a/Documentation/arm/00-INDEX
+++ b/Documentation/arm/00-INDEX
@@ -4,6 +4,8 @@ Booting
- requirements for booting
Interrupts
- ARM Interrupt subsystem documentation
+IXP4xx
+ - Intel IXP4xx Network processor.
msm
- MSM specific documentation
Netwinder
@@ -24,8 +26,16 @@ SPEAr
- ST SPEAr platform Linux Overview
VFP/
- Release notes for Linux Kernel Vector Floating Point support code
+cluster-pm-race-avoidance.txt
+ - Algorithm for CPU and Cluster setup/teardown
empeg/
- Ltd's Empeg MP3 Car Audio Player
+firmware.txt
+ - Secure firmware registration and calling.
+kernel_mode_neon.txt
+ - How to use NEON instructions in kernel mode
+kernel_user_helpers.txt
+ - Helper functions in kernel space made available for userspace.
mem_alignment
- alignment abort handler documentation
memory.txt
@@ -34,3 +44,7 @@ nwfpe/
- NWFPE floating point emulator documentation
swp_emulation
- SWP/SWPB emulation handler/logging description
+tcm.txt
+ - ARM Tightly Coupled Memory
+vlocks.txt
+ - Voting locks, low-level mechanism relying on memory system atomic writes.
diff --git a/Documentation/blackfin/00-INDEX b/Documentation/blackfin/00-INDEX
index 2df0365f2dff..c54fcdd4ae9f 100644
--- a/Documentation/blackfin/00-INDEX
+++ b/Documentation/blackfin/00-INDEX
@@ -1,8 +1,10 @@
00-INDEX
- This file
-
+Makefile
+ - Makefile for gptimers example file.
bfin-gpio-notes.txt
- Notes in developing/using bfin-gpio driver.
-
bfin-spi-notes.txt
- Notes for using bfin spi bus driver.
+gptimers-example.c
+ - gptimers example
diff --git a/Documentation/block/00-INDEX b/Documentation/block/00-INDEX
index 929d9904f74b..e840b47613f7 100644
--- a/Documentation/block/00-INDEX
+++ b/Documentation/block/00-INDEX
@@ -14,6 +14,8 @@ deadline-iosched.txt
- Deadline IO scheduler tunables
ioprio.txt
- Block io priorities (in CFQ scheduler)
+null_blk.txt
+ - Null block for block-layer benchmarking.
queue-sysfs.txt
- Queue's sysfs entries
request.txt
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
index e6b72d355151..68c0f517c60e 100644
--- a/Documentation/device-mapper/cache.txt
+++ b/Documentation/device-mapper/cache.txt
@@ -124,12 +124,11 @@ the default being 204800 sectors (or 100MB).
Updating on-disk metadata
-------------------------
-On-disk metadata is committed every time a REQ_SYNC or REQ_FUA bio is
-written. If no such requests are made then commits will occur every
-second. This means the cache behaves like a physical disk that has a
-write cache (the same is true of the thin-provisioning target). If
-power is lost you may lose some recent writes. The metadata should
-always be consistent in spite of any crash.
+On-disk metadata is committed every time a FLUSH or FUA bio is written.
+If no such requests are made then commits will occur every second. This
+means the cache behaves like a physical disk that has a volatile write
+cache. If power is lost you may lose some recent writes. The metadata
+should always be consistent in spite of any crash.
The 'dirty' state for a cache block changes far too frequently for us
to keep updating it on the fly. So we treat it as a hint. In normal
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 8a7a3d46e0da..05a27e9442bd 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -116,6 +116,35 @@ Resuming a device with a new table itself triggers an event so the
userspace daemon can use this to detect a situation where a new table
already exceeds the threshold.
+A low water mark for the metadata device is maintained in the kernel and
+will trigger a dm event if free space on the metadata device drops below
+it.
+
+Updating on-disk metadata
+-------------------------
+
+On-disk metadata is committed every time a FLUSH or FUA bio is written.
+If no such requests are made then commits will occur every second. This
+means the thin-provisioning target behaves like a physical disk that has
+a volatile write cache. If power is lost you may lose some recent
+writes. The metadata should always be consistent in spite of any crash.
+
+If data space is exhausted the pool will either error or queue IO
+according to the configuration (see: error_if_no_space). If metadata
+space is exhausted or a metadata operation fails: the pool will error IO
+until the pool is taken offline and repair is performed to 1) fix any
+potential inconsistencies and 2) clear the flag that imposes repair.
+Once the pool's metadata device is repaired it may be resized, which
+will allow the pool to return to normal operation. Note that if a pool
+is flagged as needing repair, the pool's data and metadata devices
+cannot be resized until repair is performed. It should also be noted
+that when the pool's metadata space is exhausted the current metadata
+transaction is aborted. Given that the pool will cache IO whose
+completion may have already been acknowledged to upper IO layers
+(e.g. filesystem) it is strongly suggested that consistency checks
+(e.g. fsck) be performed on those layers when repair of the pool is
+required.
+
Thin provisioning
-----------------
@@ -258,10 +287,9 @@ ii) Status
should register for the event and then check the target's status.
held metadata root:
- The location, in sectors, of the metadata root that has been
+ The location, in blocks, of the metadata root that has been
'held' for userspace read access. '-' indicates there is no
- held root. This feature is not yet implemented so '-' is
- always returned.
+ held root.
discard_passdown|no_discard_passdown
Whether or not discards are actually being passed down to the
diff --git a/Documentation/devicetree/00-INDEX b/Documentation/devicetree/00-INDEX
index b78f691fd847..8c4102c6a5e7 100644
--- a/Documentation/devicetree/00-INDEX
+++ b/Documentation/devicetree/00-INDEX
@@ -8,3 +8,5 @@ https://lists.ozlabs.org/listinfo/devicetree-discuss
- this file
booting-without-of.txt
- Booting Linux without Open Firmware, describes history and format of device trees.
+usage-model.txt
+ - How Linux uses DT and what DT aims to solve. \ No newline at end of file
diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt
index 34dc40cffdfd..af9b4a0d902b 100644
--- a/Documentation/devicetree/bindings/arm/omap/omap.txt
+++ b/Documentation/devicetree/bindings/arm/omap/omap.txt
@@ -91,7 +91,7 @@ Boards:
compatible = "ti,omap3-beagle", "ti,omap3"
- OMAP3 Tobi with Overo : Commercial expansion board with daughter board
- compatible = "ti,omap3-tobi", "ti,omap3-overo", "ti,omap3"
+ compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3"
- OMAP4 SDP : Software Development Board
compatible = "ti,omap4-sdp", "ti,omap4430"
diff --git a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
index a6a352c2771e..5992dceec7af 100644
--- a/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
+++ b/Documentation/devicetree/bindings/clock/renesas,cpg-mstp-clocks.txt
@@ -21,9 +21,9 @@ Required Properties:
must appear in the same order as the output clocks.
- #clock-cells: Must be 1
- clock-output-names: The name of the clocks as free-form strings
- - renesas,indices: Indices of the gate clocks into the group (0 to 31)
+ - renesas,clock-indices: Indices of the gate clocks into the group (0 to 31)
-The clocks, clock-output-names and renesas,indices properties contain one
+The clocks, clock-output-names and renesas,clock-indices properties contain one
entry per gate clock. The MSTP groups are sparsely populated. Unimplemented
gate clocks must not be declared.
diff --git a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
index 68b83ecc3850..ee9be9961524 100644
--- a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
+++ b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
@@ -1,12 +1,16 @@
* Freescale Smart Direct Memory Access (SDMA) Controller for i.MX
Required properties:
-- compatible : Should be "fsl,imx31-sdma", "fsl,imx31-to1-sdma",
- "fsl,imx31-to2-sdma", "fsl,imx35-sdma", "fsl,imx35-to1-sdma",
- "fsl,imx35-to2-sdma", "fsl,imx51-sdma", "fsl,imx53-sdma" or
- "fsl,imx6q-sdma". The -to variants should be preferred since they
- allow to determnine the correct ROM script addresses needed for
- the driver to work without additional firmware.
+- compatible : Should be one of
+ "fsl,imx25-sdma"
+ "fsl,imx31-sdma", "fsl,imx31-to1-sdma", "fsl,imx31-to2-sdma"
+ "fsl,imx35-sdma", "fsl,imx35-to1-sdma", "fsl,imx35-to2-sdma"
+ "fsl,imx51-sdma"
+ "fsl,imx53-sdma"
+ "fsl,imx6q-sdma"
+ The -to variants should be preferred since they allow to determnine the
+ correct ROM script addresses needed for the driver to work without additional
+ firmware.
- reg : Should contain SDMA registers location and length
- interrupts : Should contain SDMA interrupt
- #dma-cells : Must be <3>.
diff --git a/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt b/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt
new file mode 100644
index 000000000000..aee38e7c13e7
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/lsi,zevio-intc.txt
@@ -0,0 +1,18 @@
+TI-NSPIRE interrupt controller
+
+Required properties:
+- compatible: Compatible property value should be "lsi,zevio-intc".
+
+- reg: Physical base address of the controller and length of memory mapped
+ region.
+
+- interrupt-controller : Identifies the node as an interrupt controller
+
+Example:
+
+interrupt-controller {
+ compatible = "lsi,zevio-intc";
+ interrupt-controller;
+ reg = <0xDC000000 0x1000>;
+ #interrupt-cells = <1>;
+};
diff --git a/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt b/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt
index 0a85c70cd30a..07ad02075a93 100644
--- a/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt
+++ b/Documentation/devicetree/bindings/mmc/atmel-hsmci.txt
@@ -13,6 +13,9 @@ Required properties:
- #address-cells: should be one. The cell is the slot id.
- #size-cells: should be zero.
- at least one slot node
+- clock-names: tuple listing input clock names.
+ Required elements: "mci_clk"
+- clocks: phandles to input clocks.
The node contains child nodes for each slot that the platform uses
@@ -24,6 +27,8 @@ mmc0: mmc@f0008000 {
interrupts = <12 4>;
#address-cells = <1>;
#size-cells = <0>;
+ clock-names = "mci_clk";
+ clocks = <&mci0_clk>;
[ child node definitions...]
};
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
index b90bfcd138ff..863d5b8155c7 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
+++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-emac.txt
@@ -1,7 +1,8 @@
* Allwinner EMAC ethernet controller
Required properties:
-- compatible: should be "allwinner,sun4i-emac".
+- compatible: should be "allwinner,sun4i-a10-emac" (Deprecated:
+ "allwinner,sun4i-emac")
- reg: address and length of the register set for the device.
- interrupts: interrupt for the device
- phy: A phandle to a phy node defining the PHY address (as the reg
@@ -14,7 +15,7 @@ Optional properties:
Example:
emac: ethernet@01c0b000 {
- compatible = "allwinner,sun4i-emac";
+ compatible = "allwinner,sun4i-a10-emac";
reg = <0x01c0b000 0x1000>;
interrupts = <55>;
clocks = <&ahb_gates 17>;
diff --git a/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt b/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
index 00b9f9a3ec1d..4ec56413779d 100644
--- a/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
+++ b/Documentation/devicetree/bindings/net/allwinner,sun4i-mdio.txt
@@ -1,7 +1,8 @@
* Allwinner A10 MDIO Ethernet Controller interface
Required properties:
-- compatible: should be "allwinner,sun4i-mdio".
+- compatible: should be "allwinner,sun4i-a10-mdio"
+ (Deprecated: "allwinner,sun4i-mdio").
- reg: address and length of the register set for the device.
Optional properties:
@@ -9,7 +10,7 @@ Optional properties:
Example at the SoC level:
mdio@01c0b080 {
- compatible = "allwinner,sun4i-mdio";
+ compatible = "allwinner,sun4i-a10-mdio";
reg = <0x01c0b080 0x14>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/micrel-ks8851.txt b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
index 11ace3c3d805..4fc392763611 100644
--- a/Documentation/devicetree/bindings/net/micrel-ks8851.txt
+++ b/Documentation/devicetree/bindings/net/micrel-ks8851.txt
@@ -7,3 +7,4 @@ Required properties:
Optional properties:
- local-mac-address : Ethernet mac address to use
+- vdd-supply: supply for Ethernet mac
diff --git a/Documentation/devicetree/bindings/net/opencores-ethoc.txt b/Documentation/devicetree/bindings/net/opencores-ethoc.txt
new file mode 100644
index 000000000000..2dc127c30d9b
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/opencores-ethoc.txt
@@ -0,0 +1,22 @@
+* OpenCores MAC 10/100 Mbps
+
+Required properties:
+- compatible: Should be "opencores,ethoc".
+- reg: two memory regions (address and length),
+ first region is for the device registers and descriptor rings,
+ second is for the device packet memory.
+- interrupts: interrupt for the device.
+
+Optional properties:
+- clocks: phandle to refer to the clk used as per
+ Documentation/devicetree/bindings/clock/clock-bindings.txt
+
+Examples:
+
+ enet0: ethoc@fd030000 {
+ compatible = "opencores,ethoc";
+ reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
+ interrupts = <1>;
+ local-mac-address = [00 50 c2 13 6f 00];
+ clocks = <&osc>;
+ };
diff --git a/Documentation/devicetree/bindings/net/sti-dwmac.txt b/Documentation/devicetree/bindings/net/sti-dwmac.txt
new file mode 100644
index 000000000000..3dd3d0bf112f
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/sti-dwmac.txt
@@ -0,0 +1,58 @@
+STMicroelectronics SoC DWMAC glue layer controller
+
+The device node has following properties.
+
+Required properties:
+ - compatible : Can be "st,stih415-dwmac", "st,stih416-dwmac" or
+ "st,stid127-dwmac".
+ - reg : Offset of the glue configuration register map in system
+ configuration regmap pointed by st,syscon property and size.
+
+ - reg-names : Should be "sti-ethconf".
+
+ - st,syscon : Should be phandle to system configuration node which
+ encompases this glue registers.
+
+ - st,tx-retime-src: On STi Parts for Giga bit speeds, 125Mhz clocks can be
+ wired up in from different sources. One via TXCLK pin and other via CLK_125
+ pin. This wiring is totally board dependent. However the retiming glue
+ logic should be configured accordingly. Possible values for this property
+
+ "txclk" - if 125Mhz clock is wired up via txclk line.
+ "clk_125" - if 125Mhz clock is wired up via clk_125 line.
+
+ This property is only valid for Giga bit setup( GMII, RGMII), and it is
+ un-used for non-giga bit (MII and RMII) setups. Also note that internal
+ clockgen can not generate stable 125Mhz clock.
+
+ - st,ext-phyclk: This boolean property indicates who is generating the clock
+ for tx and rx. This property is only valid for RMII case where the clock can
+ be generated from the MAC or PHY.
+
+ - clock-names: should be "sti-ethclk".
+ - clocks: Should point to ethernet clockgen which can generate phyclk.
+
+
+Example:
+
+ethernet0: dwmac@fe810000 {
+ device_type = "network";
+ compatible = "st,stih416-dwmac", "snps,dwmac", "snps,dwmac-3.710";
+ reg = <0xfe810000 0x8000>, <0x8bc 0x4>;
+ reg-names = "stmmaceth", "sti-ethconf";
+ interrupts = <0 133 0>, <0 134 0>, <0 135 0>;
+ interrupt-names = "macirq", "eth_wake_irq", "eth_lpi";
+ phy-mode = "mii";
+
+ st,syscon = <&syscfg_rear>;
+
+ snps,pbl = <32>;
+ snps,mixed-burst;
+
+ resets = <&softreset STIH416_ETH0_SOFTRESET>;
+ reset-names = "stmmaceth";
+ pinctrl-0 = <&pinctrl_mii0>;
+ pinctrl-names = "default";
+ clocks = <&CLK_S_GMAC0_PHY>;
+ clock-names = "stmmaceth";
+};
diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,capri-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/brcm,bcm11351-pinctrl.txt
index 9e9e9ef9f852..c119debe6bab 100644
--- a/Documentation/devicetree/bindings/pinctrl/brcm,capri-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm11351-pinctrl.txt
@@ -1,4 +1,4 @@
-Broadcom Capri Pin Controller
+Broadcom BCM281xx Pin Controller
This is a pin controller for the Broadcom BCM281xx SoC family, which includes
BCM11130, BCM11140, BCM11351, BCM28145, and BCM28155 SoCs.
@@ -7,14 +7,14 @@ BCM11130, BCM11140, BCM11351, BCM28145, and BCM28155 SoCs.
Required Properties:
-- compatible: Must be "brcm,capri-pinctrl".
+- compatible: Must be "brcm,bcm11351-pinctrl"
- reg: Base address of the PAD Controller register block and the size
of the block.
For example, the following is the bare minimum node:
pinctrl@35004800 {
- compatible = "brcm,capri-pinctrl";
+ compatible = "brcm,bcm11351-pinctrl";
reg = <0x35004800 0x430>;
};
@@ -119,7 +119,7 @@ Optional Properties (for HDMI pins):
Example:
// pin controller node
pinctrl@35004800 {
- compatible = "brcm,capri-pinctrl";
+ compatible = "brcmbcm11351-pinctrl";
reg = <0x35004800 0x430>;
// pin configuration node
diff --git a/Documentation/devicetree/bindings/power/bq2415x.txt b/Documentation/devicetree/bindings/power/bq2415x.txt
new file mode 100644
index 000000000000..d0327f0b59ad
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/bq2415x.txt
@@ -0,0 +1,47 @@
+Binding for TI bq2415x Li-Ion Charger
+
+Required properties:
+- compatible: Should contain one of the following:
+ * "ti,bq24150"
+ * "ti,bq24150"
+ * "ti,bq24150a"
+ * "ti,bq24151"
+ * "ti,bq24151a"
+ * "ti,bq24152"
+ * "ti,bq24153"
+ * "ti,bq24153a"
+ * "ti,bq24155"
+ * "ti,bq24156"
+ * "ti,bq24156a"
+ * "ti,bq24158"
+- reg: integer, i2c address of the device.
+- ti,current-limit: integer, initial maximum current charger can pull
+ from power supply in mA.
+- ti,weak-battery-voltage: integer, weak battery voltage threshold in mV.
+ The chip will use slow precharge if battery voltage
+ is below this value.
+- ti,battery-regulation-voltage: integer, maximum charging voltage in mV.
+- ti,charge-current: integer, maximum charging current in mA.
+- ti,termination-current: integer, charge will be terminated when current in
+ constant-voltage phase drops below this value (in mA).
+- ti,resistor-sense: integer, value of sensing resistor in milliohm.
+
+Optional properties:
+- ti,usb-charger-detection: phandle to usb charger detection device.
+ (required for auto mode)
+
+Example from Nokia N900:
+
+bq24150a {
+ compatible = "ti,bq24150a";
+ reg = <0x6b>;
+
+ ti,current-limit = <100>;
+ ti,weak-battery-voltage = <3400>;
+ ti,battery-regulation-voltage = <4200>;
+ ti,charge-current = <650>;
+ ti,termination-current = <100>;
+ ti,resistor-sense = <68>;
+
+ ti,usb-charger-detection = <&isp1704>;
+};
diff --git a/Documentation/devicetree/bindings/spi/efm32-spi.txt b/Documentation/devicetree/bindings/spi/efm32-spi.txt
index a590ca51be75..8f081c96a4fa 100644
--- a/Documentation/devicetree/bindings/spi/efm32-spi.txt
+++ b/Documentation/devicetree/bindings/spi/efm32-spi.txt
@@ -3,24 +3,24 @@
Required properties:
- #address-cells: see spi-bus.txt
- #size-cells: see spi-bus.txt
-- compatible: should be "efm32,spi"
+- compatible: should be "energymicro,efm32-spi"
- reg: Offset and length of the register set for the controller
- interrupts: pair specifying rx and tx irq
- clocks: phandle to the spi clock
- cs-gpios: see spi-bus.txt
-- location: Value to write to the ROUTE register's LOCATION bitfield to configure the pinmux for the device, see datasheet for values.
+- efm32,location: Value to write to the ROUTE register's LOCATION bitfield to configure the pinmux for the device, see datasheet for values.
Example:
spi1: spi@0x4000c400 { /* USART1 */
#address-cells = <1>;
#size-cells = <0>;
- compatible = "efm32,spi";
+ compatible = "energymicro,efm32-spi";
reg = <0x4000c400 0x400>;
interrupts = <15 16>;
clocks = <&cmu 20>;
cs-gpios = <&gpio 51 1>; // D3
- location = <1>;
+ efm32,location = <1>;
status = "ok";
ks8851@0 {
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt b/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt
new file mode 100644
index 000000000000..b82a268f1bd4
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt
@@ -0,0 +1,85 @@
+Qualcomm Universal Peripheral (QUP) Serial Peripheral Interface (SPI)
+
+The QUP core is an AHB slave that provides a common data path (an output FIFO
+and an input FIFO) for serial peripheral interface (SPI) mini-core.
+
+SPI in master mode supports up to 50MHz, up to four chip selects, programmable
+data path from 4 bits to 32 bits and numerous protocol variants.
+
+Required properties:
+- compatible: Should contain "qcom,spi-qup-v2.1.1" or "qcom,spi-qup-v2.2.1"
+- reg: Should contain base register location and length
+- interrupts: Interrupt number used by this controller
+
+- clocks: Should contain the core clock and the AHB clock.
+- clock-names: Should be "core" for the core clock and "iface" for the
+ AHB clock.
+
+- #address-cells: Number of cells required to define a chip select
+ address on the SPI bus. Should be set to 1.
+- #size-cells: Should be zero.
+
+Optional properties:
+- spi-max-frequency: Specifies maximum SPI clock frequency,
+ Units - Hz. Definition as per
+ Documentation/devicetree/bindings/spi/spi-bus.txt
+
+SPI slave nodes must be children of the SPI master node and can contain
+properties described in Documentation/devicetree/bindings/spi/spi-bus.txt
+
+Example:
+
+ spi_8: spi@f9964000 { /* BLSP2 QUP2 */
+
+ compatible = "qcom,spi-qup-v2";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0xf9964000 0x1000>;
+ interrupts = <0 102 0>;
+ spi-max-frequency = <19200000>;
+
+ clocks = <&gcc GCC_BLSP2_QUP2_SPI_APPS_CLK>, <&gcc GCC_BLSP2_AHB_CLK>;
+ clock-names = "core", "iface";
+
+ pinctrl-names = "default";
+ pinctrl-0 = <&spi8_default>;
+
+ device@0 {
+ compatible = "arm,pl022-dummy";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <0>; /* Chip select 0 */
+ spi-max-frequency = <19200000>;
+ spi-cpol;
+ };
+
+ device@1 {
+ compatible = "arm,pl022-dummy";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <1>; /* Chip select 1 */
+ spi-max-frequency = <9600000>;
+ spi-cpha;
+ };
+
+ device@2 {
+ compatible = "arm,pl022-dummy";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <2>; /* Chip select 2 */
+ spi-max-frequency = <19200000>;
+ spi-cpol;
+ spi-cpha;
+ };
+
+ device@3 {
+ compatible = "arm,pl022-dummy";
+ #address-cells = <1>;
+ #size-cells = <1>;
+ reg = <3>; /* Chip select 3 */
+ spi-max-frequency = <19200000>;
+ spi-cpol;
+ spi-cpha;
+ spi-cs-high;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/spi/sh-hspi.txt b/Documentation/devicetree/bindings/spi/sh-hspi.txt
index 30b57b1c8a13..319bad4af875 100644
--- a/Documentation/devicetree/bindings/spi/sh-hspi.txt
+++ b/Documentation/devicetree/bindings/spi/sh-hspi.txt
@@ -1,7 +1,29 @@
Renesas HSPI.
Required properties:
-- compatible : "renesas,hspi"
-- reg : Offset and length of the register set for the device
-- interrupts : interrupt line used by HSPI
+- compatible : "renesas,hspi-<soctype>", "renesas,hspi" as fallback.
+ Examples with soctypes are:
+ - "renesas,hspi-r8a7778" (R-Car M1)
+ - "renesas,hspi-r8a7779" (R-Car H1)
+- reg : Offset and length of the register set for the device
+- interrupt-parent : The phandle for the interrupt controller that
+ services interrupts for this device
+- interrupts : Interrupt specifier
+- #address-cells : Must be <1>
+- #size-cells : Must be <0>
+
+Pinctrl properties might be needed, too. See
+Documentation/devicetree/bindings/pinctrl/renesas,*.
+
+Example:
+
+ hspi0: spi@fffc7000 {
+ compatible = "renesas,hspi-r8a7778", "renesas,hspi";
+ reg = <0xfffc7000 0x18>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 63 IRQ_TYPE_LEVEL_HIGH>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index e6222106ca36..f24baf3b6cc1 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -1,12 +1,40 @@
Renesas MSIOF spi controller
Required properties:
-- compatible : "renesas,sh-msiof" for SuperH or
- "renesas,sh-mobile-msiof" for SH Mobile series
-- reg : Offset and length of the register set for the device
-- interrupts : interrupt line used by MSIOF
+- compatible : "renesas,msiof-<soctype>" for SoCs,
+ "renesas,sh-msiof" for SuperH, or
+ "renesas,sh-mobile-msiof" for SH Mobile series.
+ Examples with soctypes are:
+ "renesas,msiof-r8a7790" (R-Car H2)
+ "renesas,msiof-r8a7791" (R-Car M2)
+- reg : Offset and length of the register set for the device
+- interrupt-parent : The phandle for the interrupt controller that
+ services interrupts for this device
+- interrupts : Interrupt specifier
+- #address-cells : Must be <1>
+- #size-cells : Must be <0>
Optional properties:
-- num-cs : total number of chip-selects
-- renesas,tx-fifo-size : Overrides the default tx fifo size given in words
-- renesas,rx-fifo-size : Overrides the default rx fifo size given in words
+- clocks : Must contain a reference to the functional clock.
+- num-cs : Total number of chip-selects (default is 1)
+
+Optional properties, deprecated for soctype-specific bindings:
+- renesas,tx-fifo-size : Overrides the default tx fifo size given in words
+ (default is 64)
+- renesas,rx-fifo-size : Overrides the default rx fifo size given in words
+ (default is 64, or 256 on R-Car H2 and M2)
+
+Pinctrl properties might be needed, too. See
+Documentation/devicetree/bindings/pinctrl/renesas,*.
+
+Example:
+
+ msiof0: spi@e6e20000 {
+ compatible = "renesas,msiof-r8a7791";
+ reg = <0 0xe6e20000 0 0x0064>;
+ interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
diff --git a/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt b/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
index a1fb3035a42b..5376de40f10b 100644
--- a/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
+++ b/Documentation/devicetree/bindings/spi/spi-fsl-dspi.txt
@@ -10,6 +10,7 @@ Required properties:
- pinctrl-names: must contain a "default" entry.
- spi-num-chipselects : the number of the chipselect signals.
- bus-num : the slave chip chipselect signal number.
+- big-endian : if DSPI modudle is big endian, the bool will be set in node.
Example:
dspi0@4002c000 {
@@ -24,6 +25,7 @@ dspi0@4002c000 {
bus-num = <0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_dspi0_1>;
+ big-endian;
status = "okay";
sflash: at26df081a@0 {
diff --git a/Documentation/devicetree/bindings/spi/spi-rspi.txt b/Documentation/devicetree/bindings/spi/spi-rspi.txt
new file mode 100644
index 000000000000..d57d82a74054
--- /dev/null
+++ b/Documentation/devicetree/bindings/spi/spi-rspi.txt
@@ -0,0 +1,61 @@
+Device tree configuration for Renesas RSPI/QSPI driver
+
+Required properties:
+- compatible : For Renesas Serial Peripheral Interface on legacy SH:
+ "renesas,rspi-<soctype>", "renesas,rspi" as fallback.
+ For Renesas Serial Peripheral Interface on RZ/A1H:
+ "renesas,rspi-<soctype>", "renesas,rspi-rz" as fallback.
+ For Quad Serial Peripheral Interface on R-Car Gen2:
+ "renesas,qspi-<soctype>", "renesas,qspi" as fallback.
+ Examples with soctypes are:
+ - "renesas,rspi-sh7757" (SH)
+ - "renesas,rspi-r7s72100" (RZ/A1H)
+ - "renesas,qspi-r8a7790" (R-Car H2)
+ - "renesas,qspi-r8a7791" (R-Car M2)
+- reg : Address start and address range size of the device
+- interrupts : A list of interrupt-specifiers, one for each entry in
+ interrupt-names.
+ If interrupt-names is not present, an interrupt specifier
+ for a single muxed interrupt.
+- interrupt-names : A list of interrupt names. Should contain (if present):
+ - "error" for SPEI,
+ - "rx" for SPRI,
+ - "tx" to SPTI,
+ - "mux" for a single muxed interrupt.
+- interrupt-parent : The phandle for the interrupt controller that
+ services interrupts for this device.
+- num-cs : Number of chip selects. Some RSPI cores have more than 1.
+- #address-cells : Must be <1>
+- #size-cells : Must be <0>
+
+Optional properties:
+- clocks : Must contain a reference to the functional clock.
+
+Pinctrl properties might be needed, too. See
+Documentation/devicetree/bindings/pinctrl/renesas,*.
+
+Examples:
+
+ spi0: spi@e800c800 {
+ compatible = "renesas,rspi-r7s72100", "renesas,rspi-rz";
+ reg = <0xe800c800 0x24>;
+ interrupts = <0 238 IRQ_TYPE_LEVEL_HIGH>,
+ <0 239 IRQ_TYPE_LEVEL_HIGH>,
+ <0 240 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "error", "rx", "tx";
+ interrupt-parent = <&gic>;
+ num-cs = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ spi: spi@e6b10000 {
+ compatible = "renesas,qspi-r8a7791", "renesas,qspi";
+ reg = <0 0xe6b10000 0 0x2c>;
+ interrupt-parent = <&gic>;
+ interrupts = <0 184 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp9_clks R8A7791_CLK_QSPI_MOD>;
+ num-cs = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
diff --git a/Documentation/devicetree/bindings/spi/spi_atmel.txt b/Documentation/devicetree/bindings/spi/spi_atmel.txt
index 07e04cdc0c9e..4f8184d069cb 100644
--- a/Documentation/devicetree/bindings/spi/spi_atmel.txt
+++ b/Documentation/devicetree/bindings/spi/spi_atmel.txt
@@ -5,6 +5,9 @@ Required properties:
- reg: Address and length of the register set for the device
- interrupts: Should contain spi interrupt
- cs-gpios: chipselects
+- clock-names: tuple listing input clock names.
+ Required elements: "spi_clk"
+- clocks: phandles to input clocks.
Example:
@@ -14,6 +17,8 @@ spi1: spi@fffcc000 {
interrupts = <13 4 5>;
#address-cells = <1>;
#size-cells = <0>;
+ clocks = <&spi1_clk>;
+ clock-names = "spi_clk";
cs-gpios = <&pioB 3 0>;
status = "okay";
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 3f900cd51bf0..40ce2df0e0e9 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -8,6 +8,7 @@ ad Avionic Design GmbH
adi Analog Devices, Inc.
aeroflexgaisler Aeroflex Gaisler AB
ak Asahi Kasei Corp.
+allwinner Allwinner Technology Co., Ltd.
altr Altera Corp.
amcc Applied Micro Circuits Corporation (APM, formally AMCC)
amstaos AMS-Taos Inc.
@@ -40,6 +41,7 @@ gmt Global Mixed-mode Technology, Inc.
gumstix Gumstix, Inc.
haoyu Haoyu Microelectronic Co. Ltd.
hisilicon Hisilicon Limited.
+honeywell Honeywell
hp Hewlett Packard
ibm International Business Machines (IBM)
idt Integrated Device Technologies, Inc.
@@ -55,6 +57,7 @@ maxim Maxim Integrated Products
microchip Microchip Technology Inc.
mosaixtech Mosaix Technologies, Inc.
national National Semiconductor
+neonode Neonode Inc.
nintendo Nintendo
nvidia NVIDIA
nxp NXP Semiconductors
@@ -64,7 +67,7 @@ phytec PHYTEC Messtechnik GmbH
picochip Picochip Ltd
powervr PowerVR (deprecated, use img)
qca Qualcomm Atheros, Inc.
-qcom Qualcomm, Inc.
+qcom Qualcomm Technologies, Inc
ralink Mediatek/Ralink Technology Corp.
ramtron Ramtron International
realtek Realtek Semiconductor Corp.
@@ -78,6 +81,7 @@ silabs Silicon Laboratories
simtek
sirf SiRF Technology, Inc.
snps Synopsys, Inc.
+spansion Spansion Inc.
st STMicroelectronics
ste ST-Ericsson
stericsson ST-Ericsson
diff --git a/Documentation/dvb/contributors.txt b/Documentation/dvb/contributors.txt
index 47c30098dab6..731a009723c7 100644
--- a/Documentation/dvb/contributors.txt
+++ b/Documentation/dvb/contributors.txt
@@ -78,7 +78,7 @@ Peter Beutner <p.beutner@gmx.net>
Wilson Michaels <wilsonmichaels@earthlink.net>
for the lgdt330x frontend driver, and various bugfixes
-Michael Krufky <mkrufky@m1k.net>
+Michael Krufky <mkrufky@linuxtv.org>
for maintaining v4l/dvb inter-tree dependencies
Taylor Jacob <rtjacob@earthlink.net>
diff --git a/Documentation/fb/00-INDEX b/Documentation/fb/00-INDEX
index 30a70542e823..fe85e7c5907a 100644
--- a/Documentation/fb/00-INDEX
+++ b/Documentation/fb/00-INDEX
@@ -5,6 +5,8 @@ please mail me.
00-INDEX
- this file.
+api.txt
+ - The frame buffer API between applications and buffer devices.
arkfb.txt
- info on the fbdev driver for ARK Logic chips.
aty128fb.txt
@@ -51,12 +53,16 @@ sh7760fb.txt
- info on the SH7760/SH7763 integrated LCDC Framebuffer driver.
sisfb.txt
- info on the framebuffer device driver for various SiS chips.
+sm501.txt
+ - info on the framebuffer device driver for sm501 videoframebuffer.
sstfb.txt
- info on the frame buffer driver for 3dfx' Voodoo Graphics boards.
tgafb.txt
- info on the TGA (DECChip 21030) frame buffer driver.
tridentfb.txt
info on the framebuffer driver for some Trident chip based cards.
+udlfb.txt
+ - Driver for DisplayLink USB 2.0 chips.
uvesafb.txt
- info on the userspace VESA (VBE2+ compliant) frame buffer device.
vesafb.txt
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 632211cbdd56..ac28149aede4 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -2,6 +2,8 @@
- this file (info on some of the filesystems supported by linux).
Locking
- info on locking rules as they pertain to Linux VFS.
+Makefile
+ - Makefile for building the filsystems-part of DocBook.
9p.txt
- 9p (v9fs) is an implementation of the Plan 9 remote fs protocol.
adfs.txt
diff --git a/Documentation/filesystems/nfs/00-INDEX b/Documentation/filesystems/nfs/00-INDEX
index 66eb6c8c5334..53f3b596ac0d 100644
--- a/Documentation/filesystems/nfs/00-INDEX
+++ b/Documentation/filesystems/nfs/00-INDEX
@@ -12,6 +12,8 @@ nfs41-server.txt
- info on the Linux server implementation of NFSv4 minor version 1.
nfs-rdma.txt
- how to install and setup the Linux NFS/RDMA client and server software
+nfsd-admin-interfaces.txt
+ - Administrative interfaces for nfsd.
nfsroot.txt
- short guide on setting up a diskless box with NFS root filesystem.
pnfs.txt
@@ -20,5 +22,5 @@ rpc-cache.txt
- introduction to the caching mechanisms in the sunrpc layer.
idmapper.txt
- information for configuring request-keys to be used by idmapper
-knfsd-rpcgss.txt
+rpc-server-gss.txt
- Information on GSS authentication support in the NFS Server
diff --git a/Documentation/i2c/instantiating-devices b/Documentation/i2c/instantiating-devices
index c70e7a7638d1..0d85ac1935b7 100644
--- a/Documentation/i2c/instantiating-devices
+++ b/Documentation/i2c/instantiating-devices
@@ -8,8 +8,8 @@ reason, the kernel code must instantiate I2C devices explicitly. There are
several ways to achieve this, depending on the context and requirements.
-Method 1: Declare the I2C devices by bus number
------------------------------------------------
+Method 1a: Declare the I2C devices by bus number
+------------------------------------------------
This method is appropriate when the I2C bus is a system bus as is the case
for many embedded systems. On such systems, each I2C bus has a number
@@ -51,6 +51,43 @@ The devices will be automatically unbound and destroyed when the I2C bus
they sit on goes away (if ever.)
+Method 1b: Declare the I2C devices via devicetree
+-------------------------------------------------
+
+This method has the same implications as method 1a. The declaration of I2C
+devices is here done via devicetree as subnodes of the master controller.
+
+Example:
+
+ i2c1: i2c@400a0000 {
+ /* ... master properties skipped ... */
+ clock-frequency = <100000>;
+
+ flash@50 {
+ compatible = "atmel,24c256";
+ reg = <0x50>;
+ };
+
+ pca9532: gpio@60 {
+ compatible = "nxp,pca9532";
+ gpio-controller;
+ #gpio-cells = <2>;
+ reg = <0x60>;
+ };
+ };
+
+Here, two devices are attached to the bus using a speed of 100kHz. For
+additional properties which might be needed to set up the device, please refer
+to its devicetree documentation in Documentation/devicetree/bindings/.
+
+
+Method 1c: Declare the I2C devices via ACPI
+-------------------------------------------
+
+ACPI can also describe I2C devices. There is special documentation for this
+which is currently located at Documentation/acpi/enumeration.txt.
+
+
Method 2: Instantiate the devices explicitly
--------------------------------------------
diff --git a/Documentation/ide/00-INDEX b/Documentation/ide/00-INDEX
index d6b778842b75..22f98ca79539 100644
--- a/Documentation/ide/00-INDEX
+++ b/Documentation/ide/00-INDEX
@@ -10,3 +10,5 @@ ide-tape.txt
- info on the IDE ATAPI streaming tape driver
ide.txt
- important info for users of ATA devices (IDE/EIDE disks and CD-ROMS).
+warm-plug-howto.txt
+ - using sysfs to remove and add IDE devices. \ No newline at end of file
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 8f441dab0396..7116fda7077f 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1726,16 +1726,16 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
option description.
memmap=nn[KMG]@ss[KMG]
- [KNL] Force usage of a specific region of memory
- Region of memory to be used, from ss to ss+nn.
+ [KNL] Force usage of a specific region of memory.
+ Region of memory to be used is from ss to ss+nn.
memmap=nn[KMG]#ss[KMG]
[KNL,ACPI] Mark specific memory as ACPI data.
- Region of memory to be used, from ss to ss+nn.
+ Region of memory to be marked is from ss to ss+nn.
memmap=nn[KMG]$ss[KMG]
[KNL,ACPI] Mark specific memory as reserved.
- Region of memory to be used, from ss to ss+nn.
+ Region of memory to be reserved is from ss to ss+nn.
Example: Exclude memory from 0x18690000-0x1869ffff
memmap=64K$0x18690000
or
diff --git a/Documentation/laptops/00-INDEX b/Documentation/laptops/00-INDEX
index fa688538e757..d13b9a9a9e00 100644
--- a/Documentation/laptops/00-INDEX
+++ b/Documentation/laptops/00-INDEX
@@ -1,13 +1,15 @@
00-INDEX
- This file
-acer-wmi.txt
- - information on the Acer Laptop WMI Extras driver.
+Makefile
+ - Makefile for building dslm example program.
asus-laptop.txt
- information on the Asus Laptop Extras driver.
disk-shock-protection.txt
- information on hard disk shock protection.
dslm.c
- Simple Disk Sleep Monitor program
+hpfall.c
+ - (HP) laptop accelerometer program for disk protection.
laptop-mode.txt
- how to conserve battery power using laptop-mode.
sony-laptop.txt
diff --git a/Documentation/leds/00-INDEX b/Documentation/leds/00-INDEX
index 1ecd1596633e..b4ef1f34e25f 100644
--- a/Documentation/leds/00-INDEX
+++ b/Documentation/leds/00-INDEX
@@ -1,3 +1,7 @@
+00-INDEX
+ - This file
+leds-blinkm.txt
+ - Driver for BlinkM LED-devices.
leds-class.txt
- documents LED handling under Linux.
leds-lp3944.txt
@@ -12,3 +16,7 @@ leds-lp55xx.txt
- description about lp55xx common driver.
leds-lm3556.txt
- notes on how to use the leds-lm3556 driver.
+ledtrig-oneshot.txt
+ - One-shot LED trigger for both sporadic and dense events.
+ledtrig-transient.txt
+ - LED Transient Trigger, one shot timer activation.
diff --git a/Documentation/m68k/00-INDEX b/Documentation/m68k/00-INDEX
index a014e9f00765..2be8c6b00e74 100644
--- a/Documentation/m68k/00-INDEX
+++ b/Documentation/m68k/00-INDEX
@@ -1,5 +1,7 @@
00-INDEX
- this file
+README.buddha
+ - Amiga Buddha and Catweasel IDE Driver
kernel-options.txt
- command line options for Linux/m68k
diff --git a/Documentation/networking/00-INDEX b/Documentation/networking/00-INDEX
index f11580f8719a..557b6ef70c26 100644
--- a/Documentation/networking/00-INDEX
+++ b/Documentation/networking/00-INDEX
@@ -6,8 +6,14 @@
- information on the 3Com Etherlink III Series Ethernet cards.
6pack.txt
- info on the 6pack protocol, an alternative to KISS for AX.25
-DLINK.txt
- - info on the D-Link DE-600/DE-620 parallel port pocket adapters
+LICENSE.qla3xxx
+ - GPLv2 for QLogic Linux Networking HBA Driver
+LICENSE.qlge
+ - GPLv2 for QLogic Linux qlge NIC Driver
+LICENSE.qlcnic
+ - GPLv2 for QLogic Linux qlcnic NIC Driver
+Makefile
+ - Makefile for docsrc.
PLIP.txt
- PLIP: The Parallel Line Internet Protocol device driver
README.ipw2100
@@ -17,7 +23,7 @@ README.ipw2200
README.sb1000
- info on General Instrument/NextLevel SURFboard1000 cable modem.
alias.txt
- - info on using alias network devices
+ - info on using alias network devices.
arcnet-hardware.txt
- tons of info on ARCnet, hubs, jumper settings for ARCnet cards, etc.
arcnet.txt
@@ -80,7 +86,7 @@ framerelay.txt
- info on using Frame Relay/Data Link Connection Identifier (DLCI).
gen_stats.txt
- Generic networking statistics for netlink users.
-generic_hdlc.txt
+generic-hdlc.txt
- The generic High Level Data Link Control (HDLC) layer.
generic_netlink.txt
- info on Generic Netlink
@@ -88,6 +94,8 @@ gianfar.txt
- Gianfar Ethernet Driver.
i40e.txt
- README for the Intel Ethernet Controller XL710 Driver (i40e).
+i40evf.txt
+ - Short note on the Driver for the Intel(R) XL710 X710 Virtual Function
ieee802154.txt
- Linux IEEE 802.15.4 implementation, API and drivers
igb.txt
@@ -102,6 +110,8 @@ ipddp.txt
- AppleTalk-IP Decapsulation and AppleTalk-IP Encapsulation
iphase.txt
- Interphase PCI ATM (i)Chip IA Linux driver info.
+ipsec.txt
+ - Note on not compressing IPSec payload and resulting failed policy check.
ipv6.txt
- Options to the ipv6 kernel module.
ipvs-sysctl.txt
@@ -120,6 +130,8 @@ lapb-module.txt
- programming information of the LAPB module.
ltpc.txt
- the Apple or Farallon LocalTalk PC card driver
+mac80211-auth-assoc-deauth.txt
+ - authentication and association / deauth-disassoc with max80211
mac80211-injection.txt
- HOWTO use packet injection with mac80211
multiqueue.txt
@@ -134,6 +146,10 @@ netdevices.txt
- info on network device driver functions exported to the kernel.
netif-msg.txt
- Design of the network interface message level setting (NETIF_MSG_*).
+netlink_mmap.txt
+ - memory mapped I/O with netlink
+nf_conntrack-sysctl.txt
+ - list of netfilter-sysctl knobs.
nfc.txt
- The Linux Near Field Communication (NFS) subsystem.
openvswitch.txt
@@ -176,7 +192,7 @@ skfp.txt
- SysKonnect FDDI (SK-5xxx, Compaq Netelligent) driver info.
smc9.txt
- the driver for SMC's 9000 series of Ethernet cards
-spider-net.txt
+spider_net.txt
- README for the Spidernet Driver (as found in PS3 / Cell BE).
stmmac.txt
- README for the STMicro Synopsys Ethernet driver.
@@ -188,6 +204,8 @@ tcp.txt
- short blurb on how TCP output takes place.
tcp-thin.txt
- kernel tuning options for low rate 'thin' TCP streams.
+team.txt
+ - pointer to information for ethernet teaming devices.
tlan.txt
- ThunderLAN (Compaq Netelligent 10/100, Olicom OC-2xxx) driver info.
tproxy.txt
@@ -200,6 +218,8 @@ vortex.txt
- info on using 3Com Vortex (3c590, 3c592, 3c595, 3c597) Ethernet cards.
vxge.txt
- README for the Neterion X3100 PCIe Server Adapter.
+vxlan.txt
+ - Virtual extensible LAN overview
x25.txt
- general info on X.25 development.
x25-iface.txt
diff --git a/Documentation/networking/3c505.txt b/Documentation/networking/3c505.txt
deleted file mode 100644
index 72f38b13101d..000000000000
--- a/Documentation/networking/3c505.txt
+++ /dev/null
@@ -1,45 +0,0 @@
-The 3Com Etherlink Plus (3c505) driver.
-
-This driver now uses DMA. There is currently no support for PIO operation.
-The default DMA channel is 6; this is _not_ autoprobed, so you must
-make sure you configure it correctly. If loading the driver as a
-module, you can do this with "modprobe 3c505 dma=n". If the driver is
-linked statically into the kernel, you must either use an "ether="
-statement on the command line, or change the definition of ELP_DMA in 3c505.h.
-
-The driver will warn you if it has to fall back on the compiled in
-default DMA channel.
-
-If no base address is given at boot time, the driver will autoprobe
-ports 0x300, 0x280 and 0x310 (in that order). If no IRQ is given, the driver
-will try to probe for it.
-
-The driver can be used as a loadable module.
-
-Theoretically, one instance of the driver can now run multiple cards,
-in the standard way (when loading a module, say "modprobe 3c505
-io=0x300,0x340 irq=10,11 dma=6,7" or whatever). I have not tested
-this, though.
-
-The driver may now support revision 2 hardware; the dependency on
-being able to read the host control register has been removed. This
-is also untested, since I don't have a suitable card.
-
-Known problems:
- I still see "DMA upload timed out" messages from time to time. These
-seem to be fairly non-fatal though.
- The card is old and slow.
-
-To do:
- Improve probe/setup code
- Test multicast and promiscuous operation
-
-Authors:
- The driver is mainly written by Craig Southeren, email
- <craigs@ineluki.apana.org.au>.
- Parts of the driver (adapting the driver to 1.1.4+ kernels,
- IRQ/address detection, some changes) and this README by
- Juha Laiho <jlaiho@ichaos.nullnet.fi>.
- DMA mode, more fixes, etc, by Philip Blundell <pjb27@cam.ac.uk>
- Multicard support, Software configurable DMA, etc., by
- Christopher Collins <ccollins@pcug.org.au>
diff --git a/Documentation/networking/can.txt b/Documentation/networking/can.txt
index f3089d423515..0cbe6ec22d6f 100644
--- a/Documentation/networking/can.txt
+++ b/Documentation/networking/can.txt
@@ -554,12 +554,6 @@ solution for a couple of reasons:
not specified in the struct can_frame and therefore it is only valid in
CANFD_MTU sized CAN FD frames.
- As long as the payload length is <=8 the received CAN frames from CAN FD
- capable CAN devices can be received and read by legacy sockets too. When
- user-generated CAN FD frames have a payload length <=8 these can be send
- by legacy CAN network interfaces too. Sending CAN FD frames with payload
- length > 8 to a legacy CAN network interface returns an -EMSGSIZE error.
-
Implementation hint for new CAN applications:
To build a CAN FD aware application use struct canfd_frame as basic CAN
diff --git a/Documentation/networking/netlink_mmap.txt b/Documentation/networking/netlink_mmap.txt
index b26122973525..c6af4bac5aa8 100644
--- a/Documentation/networking/netlink_mmap.txt
+++ b/Documentation/networking/netlink_mmap.txt
@@ -226,9 +226,9 @@ Ring setup:
void *rx_ring, *tx_ring;
/* Configure ring parameters */
- if (setsockopt(fd, NETLINK_RX_RING, &req, sizeof(req)) < 0)
+ if (setsockopt(fd, SOL_NETLINK, NETLINK_RX_RING, &req, sizeof(req)) < 0)
exit(1);
- if (setsockopt(fd, NETLINK_TX_RING, &req, sizeof(req)) < 0)
+ if (setsockopt(fd, SOL_NETLINK, NETLINK_TX_RING, &req, sizeof(req)) < 0)
exit(1)
/* Calculate size of each individual ring */
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index 1404674c0a02..6fea79efb4cb 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -453,7 +453,7 @@ TP_STATUS_COPY : This flag indicates that the frame (and associated
enabled previously with setsockopt() and
the PACKET_COPY_THRESH option.
- The number of frames than can be buffered to
+ The number of frames that can be buffered to
be read with recvfrom is limited like a normal socket.
See the SO_RCVBUF option in the socket (7) man page.
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 661d3c316a17..048c92b487f6 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -21,26 +21,38 @@ has such a feature).
SO_TIMESTAMPING:
-Instructs the socket layer which kind of information is wanted. The
-parameter is an integer with some of the following bits set. Setting
-other bits is an error and doesn't change the current state.
-
-SOF_TIMESTAMPING_TX_HARDWARE: try to obtain send time stamp in hardware
-SOF_TIMESTAMPING_TX_SOFTWARE: if SOF_TIMESTAMPING_TX_HARDWARE is off or
- fails, then do it in software
-SOF_TIMESTAMPING_RX_HARDWARE: return the original, unmodified time stamp
- as generated by the hardware
-SOF_TIMESTAMPING_RX_SOFTWARE: if SOF_TIMESTAMPING_RX_HARDWARE is off or
- fails, then do it in software
-SOF_TIMESTAMPING_RAW_HARDWARE: return original raw hardware time stamp
-SOF_TIMESTAMPING_SYS_HARDWARE: return hardware time stamp transformed to
- the system time base
-SOF_TIMESTAMPING_SOFTWARE: return system time stamp generated in
- software
-
-SOF_TIMESTAMPING_TX/RX determine how time stamps are generated.
-SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the
-following control message:
+Instructs the socket layer which kind of information should be collected
+and/or reported. The parameter is an integer with some of the following
+bits set. Setting other bits is an error and doesn't change the current
+state.
+
+Four of the bits are requests to the stack to try to generate
+timestamps. Any combination of them is valid.
+
+SOF_TIMESTAMPING_TX_HARDWARE: try to obtain send time stamps in hardware
+SOF_TIMESTAMPING_TX_SOFTWARE: try to obtain send time stamps in software
+SOF_TIMESTAMPING_RX_HARDWARE: try to obtain receive time stamps in hardware
+SOF_TIMESTAMPING_RX_SOFTWARE: try to obtain receive time stamps in software
+
+The other three bits control which timestamps will be reported in a
+generated control message. If none of these bits are set or if none of
+the set bits correspond to data that is available, then the control
+message will not be generated:
+
+SOF_TIMESTAMPING_SOFTWARE: report systime if available
+SOF_TIMESTAMPING_SYS_HARDWARE: report hwtimetrans if available
+SOF_TIMESTAMPING_RAW_HARDWARE: report hwtimeraw if available
+
+It is worth noting that timestamps may be collected for reasons other
+than being requested by a particular socket with
+SOF_TIMESTAMPING_[TR]X_(HARD|SOFT)WARE. For example, most drivers that
+can generate hardware receive timestamps ignore
+SOF_TIMESTAMPING_RX_HARDWARE. It is still a good idea to set that flag
+in case future drivers pay attention.
+
+If timestamps are reported, they will appear in a control message with
+cmsg_level==SOL_SOCKET, cmsg_type==SO_TIMESTAMPING, and a payload like
+this:
struct scm_timestamping {
struct timespec systime;
diff --git a/Documentation/phy.txt b/Documentation/phy.txt
index 0103e4b15b0e..ebff6ee52441 100644
--- a/Documentation/phy.txt
+++ b/Documentation/phy.txt
@@ -75,14 +75,26 @@ Before the controller can make use of the PHY, it has to get a reference to
it. This framework provides the following APIs to get a reference to the PHY.
struct phy *phy_get(struct device *dev, const char *string);
+struct phy *phy_optional_get(struct device *dev, const char *string);
struct phy *devm_phy_get(struct device *dev, const char *string);
-
-phy_get and devm_phy_get can be used to get the PHY. In the case of dt boot,
-the string arguments should contain the phy name as given in the dt data and
-in the case of non-dt boot, it should contain the label of the PHY.
-The only difference between the two APIs is that devm_phy_get associates the
-device with the PHY using devres on successful PHY get. On driver detach,
-release function is invoked on the the devres data and devres data is freed.
+struct phy *devm_phy_optional_get(struct device *dev, const char *string);
+
+phy_get, phy_optional_get, devm_phy_get and devm_phy_optional_get can
+be used to get the PHY. In the case of dt boot, the string arguments
+should contain the phy name as given in the dt data and in the case of
+non-dt boot, it should contain the label of the PHY. The two
+devm_phy_get associates the device with the PHY using devres on
+successful PHY get. On driver detach, release function is invoked on
+the the devres data and devres data is freed. phy_optional_get and
+devm_phy_optional_get should be used when the phy is optional. These
+two functions will never return -ENODEV, but instead returns NULL when
+the phy cannot be found.
+
+It should be noted that NULL is a valid phy reference. All phy
+consumer calls on the NULL phy become NOPs. That is the release calls,
+the phy_init() and phy_exit() calls, and phy_power_on() and
+phy_power_off() calls are all NOP when applied to a NULL phy. The NULL
+phy is useful in devices for handling optional phy devices.
5. Releasing a reference to the PHY
diff --git a/Documentation/power/00-INDEX b/Documentation/power/00-INDEX
index a4d682f54231..ad04cc8097ed 100644
--- a/Documentation/power/00-INDEX
+++ b/Documentation/power/00-INDEX
@@ -4,6 +4,8 @@ apm-acpi.txt
- basic info about the APM and ACPI support.
basic-pm-debugging.txt
- Debugging suspend and resume
+charger-manager.txt
+ - Battery charger management.
devices.txt
- How drivers interact with system-wide power management
drivers-testing.txt
@@ -22,6 +24,8 @@ pm_qos_interface.txt
- info on Linux PM Quality of Service interface
power_supply_class.txt
- Tells userspace about battery, UPS, AC or DC power supply properties
+runtime_pm.txt
+ - Power management framework for I/O devices.
s2ram.txt
- How to get suspend to ram working (and debug it when it isn't)
states.txt
@@ -38,7 +42,5 @@ tricks.txt
- How to trick software suspend (to disk) into working when it isn't
userland-swsusp.txt
- Experimental implementation of software suspend in userspace
-video_extension.txt
- - ACPI video extensions
video.txt
- Video issues during resume from suspend
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c
index a74d0a84d329..4aba0436da65 100644
--- a/Documentation/ptp/testptp.c
+++ b/Documentation/ptp/testptp.c
@@ -117,6 +117,7 @@ static void usage(char *progname)
" -f val adjust the ptp clock frequency by 'val' ppb\n"
" -g get the ptp clock time\n"
" -h prints this message\n"
+ " -i val index for event/trigger\n"
" -k val measure the time offset between system and phc clock\n"
" for 'val' times (Maximum 25)\n"
" -p val enable output with a period of 'val' nanoseconds\n"
@@ -154,6 +155,7 @@ int main(int argc, char *argv[])
int capabilities = 0;
int extts = 0;
int gettime = 0;
+ int index = 0;
int oneshot = 0;
int pct_offset = 0;
int n_samples = 0;
@@ -167,7 +169,7 @@ int main(int argc, char *argv[])
progname = strrchr(argv[0], '/');
progname = progname ? 1+progname : argv[0];
- while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghk:p:P:sSt:v"))) {
+ while (EOF != (c = getopt(argc, argv, "a:A:cd:e:f:ghi:k:p:P:sSt:v"))) {
switch (c) {
case 'a':
oneshot = atoi(optarg);
@@ -190,6 +192,9 @@ int main(int argc, char *argv[])
case 'g':
gettime = 1;
break;
+ case 'i':
+ index = atoi(optarg);
+ break;
case 'k':
pct_offset = 1;
n_samples = atoi(optarg);
@@ -301,7 +306,7 @@ int main(int argc, char *argv[])
if (extts) {
memset(&extts_request, 0, sizeof(extts_request));
- extts_request.index = 0;
+ extts_request.index = index;
extts_request.flags = PTP_ENABLE_FEATURE;
if (ioctl(fd, PTP_EXTTS_REQUEST, &extts_request)) {
perror("PTP_EXTTS_REQUEST");
@@ -375,7 +380,7 @@ int main(int argc, char *argv[])
return -1;
}
memset(&perout_request, 0, sizeof(perout_request));
- perout_request.index = 0;
+ perout_request.index = index;
perout_request.start.sec = ts.tv_sec + 2;
perout_request.start.nsec = 0;
perout_request.period.sec = 0;
diff --git a/Documentation/s390/00-INDEX b/Documentation/s390/00-INDEX
index 3a2b96302ecc..10c874ebdfe5 100644
--- a/Documentation/s390/00-INDEX
+++ b/Documentation/s390/00-INDEX
@@ -16,11 +16,13 @@ Debugging390.txt
- hints for debugging on s390 systems.
driver-model.txt
- information on s390 devices and the driver model.
+kvm.txt
+ - ioctl calls to /dev/kvm on s390.
monreader.txt
- information on accessing the z/VM monitor stream from Linux.
+qeth.txt
+ - HiperSockets Bridge Port Support.
s390dbf.txt
- information on using the s390 debug feature.
-TAPE
- - information on the driver for channel-attached tapes.
-zfcpdump
+zfcpdump.txt
- information on the s390 SCSI dump tool.
diff --git a/Documentation/scheduler/00-INDEX b/Documentation/scheduler/00-INDEX
index 46702e4f89c9..eccf7ad2e7f9 100644
--- a/Documentation/scheduler/00-INDEX
+++ b/Documentation/scheduler/00-INDEX
@@ -2,6 +2,8 @@
- this file.
sched-arch.txt
- CPU Scheduler implementation hints for architecture specific code.
+sched-bwc.txt
+ - CFS bandwidth control overview.
sched-design-CFS.txt
- goals, design and implementation of the Completely Fair Scheduler.
sched-domains.txt
diff --git a/Documentation/scsi/00-INDEX b/Documentation/scsi/00-INDEX
index 2044be565d93..c4b978a72f78 100644
--- a/Documentation/scsi/00-INDEX
+++ b/Documentation/scsi/00-INDEX
@@ -36,6 +36,8 @@ NinjaSCSI.txt
- info on WorkBiT NinjaSCSI-32/32Bi driver
aacraid.txt
- Driver supporting Adaptec RAID controllers
+advansys.txt
+ - List of Advansys Host Adapters
aha152x.txt
- info on driver for Adaptec AHA152x based adapters
aic79xx.txt
@@ -44,6 +46,12 @@ aic7xxx.txt
- info on driver for Adaptec controllers
arcmsr_spec.txt
- ARECA FIRMWARE SPEC (for IOP331 adapter)
+bfa.txt
+ - Brocade FC/FCOE adapter driver.
+bnx2fc.txt
+ - FCoE hardware offload for Broadcom network interfaces.
+cxgb3i.txt
+ - Chelsio iSCSI Linux Driver
dc395x.txt
- README file for the dc395x SCSI driver
dpti.txt
@@ -52,18 +60,24 @@ dtc3x80.txt
- info on driver for DTC 2x80 based adapters
g_NCR5380.txt
- info on driver for NCR5380 and NCR53c400 based adapters
+hpsa.txt
+ - HP Smart Array Controller SCSI driver.
hptiop.txt
- HIGHPOINT ROCKETRAID 3xxx RAID DRIVER
in2000.txt
- info on in2000 driver
libsas.txt
- Serial Attached SCSI management layer.
+link_power_management_policy.txt
+ - Link power management options.
lpfc.txt
- LPFC driver release notes
megaraid.txt
- Common Management Module, shared code handling ioctls for LSI drivers
ncr53c8xx.txt
- info on driver for NCR53c8xx based adapters
+osd.txt
+ Object-Based Storage Device, command set introduction.
osst.txt
- info on driver for OnStream SC-x0 SCSI tape
ppa.txt
@@ -74,6 +88,8 @@ scsi-changer.txt
- README for the SCSI media changer driver
scsi-generic.txt
- info on the sg driver for generic (non-disk/CD/tape) SCSI devices.
+scsi-parameters.txt
+ - List of SCSI-parameters to pass to the kernel at module load-time.
scsi.txt
- short blurb on using SCSI support as a module.
scsi_mid_low_api.txt
diff --git a/Documentation/serial/00-INDEX b/Documentation/serial/00-INDEX
index 1f1b22fbd739..f9c6b5ed03e7 100644
--- a/Documentation/serial/00-INDEX
+++ b/Documentation/serial/00-INDEX
@@ -4,10 +4,12 @@ README.cycladesZ
- info on Cyclades-Z firmware loading.
digiepca.txt
- info on Digi Intl. {PC,PCI,EISA}Xx and Xem series cards.
-hayes-esp.txt
- - info on using the Hayes ESP serial driver.
+driver
+ - intro to the low level serial driver.
moxa-smartio
- file with info on installing/using Moxa multiport serial driver.
+n_gsm.txt
+ - GSM 0710 tty multiplexer howto.
riscom8.txt
- notes on using the RISCom/8 multi-port serial driver.
rocket.txt
diff --git a/Documentation/spi/00-INDEX b/Documentation/spi/00-INDEX
new file mode 100644
index 000000000000..a128fa835512
--- /dev/null
+++ b/Documentation/spi/00-INDEX
@@ -0,0 +1,22 @@
+00-INDEX
+ - this file.
+Makefile
+ - Makefile for the example sourcefiles.
+butterfly
+ - AVR Butterfly SPI driver overview and pin configuration.
+ep93xx_spi
+ - Basic EP93xx SPI driver configuration.
+pxa2xx
+ - PXA2xx SPI master controller build by spi_message fifo wq
+spidev
+ - Intro to the userspace API for spi devices
+spidev_fdx.c
+ - spidev example file
+spi-lm70llp
+ - Connecting an LM70-LLP sensor to the kernel via the SPI subsys.
+spi-sc18is602
+ - NXP SC18IS602/603 I2C-bus to SPI bridge
+spi-summary
+ - (Linux) SPI overview. If unsure about SPI or SPI in Linux, start here.
+spidev_test.c
+ - SPI testing utility.
diff --git a/Documentation/spi/spi-summary b/Documentation/spi/spi-summary
index f72e0d1e0da8..7982bcc4d151 100644
--- a/Documentation/spi/spi-summary
+++ b/Documentation/spi/spi-summary
@@ -543,7 +543,22 @@ SPI MASTER METHODS
queuing transfers that arrive in the meantime. When the driver is
finished with this message, it must call
spi_finalize_current_message() so the subsystem can issue the next
- transfer. This may sleep.
+ message. This may sleep.
+
+ master->transfer_one(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *transfer)
+ The subsystem calls the driver to transfer a single transfer while
+ queuing transfers that arrive in the meantime. When the driver is
+ finished with this transfer, it must call
+ spi_finalize_current_transfer() so the subsystem can issue the next
+ transfer. This may sleep. Note: transfer_one and transfer_one_message
+ are mutually exclusive; when both are set, the generic subsystem does
+ not call your transfer_one callback.
+
+ Return values:
+ negative errno: error
+ 0: transfer is finished
+ 1: transfer is still in progress
DEPRECATED METHODS
diff --git a/Documentation/spi/spidev b/Documentation/spi/spidev
index ed2da5e5b28a..3d14035b1766 100644
--- a/Documentation/spi/spidev
+++ b/Documentation/spi/spidev
@@ -85,6 +85,12 @@ settings for data transfer parameters:
SPI_MODE_0..SPI_MODE_3; or if you prefer you can combine SPI_CPOL
(clock polarity, idle high iff this is set) or SPI_CPHA (clock phase,
sample on trailing edge iff this is set) flags.
+ Note that this request is limited to SPI mode flags that fit in a
+ single byte.
+
+ SPI_IOC_RD_MODE32, SPI_IOC_WR_MODE32 ... pass a pointer to a uin32_t
+ which will return (RD) or assign (WR) the full SPI transfer mode,
+ not limited to the bits that fit in one byte.
SPI_IOC_RD_LSB_FIRST, SPI_IOC_WR_LSB_FIRST ... pass a pointer to a byte
which will return (RD) or assign (WR) the bit justification used to
diff --git a/Documentation/spi/spidev_fdx.c b/Documentation/spi/spidev_fdx.c
index 36ec0774ca0b..0ea3e51292fc 100644
--- a/Documentation/spi/spidev_fdx.c
+++ b/Documentation/spi/spidev_fdx.c
@@ -78,10 +78,10 @@ static void do_msg(int fd, int len)
static void dumpstat(const char *name, int fd)
{
- __u8 mode, lsb, bits;
- __u32 speed;
+ __u8 lsb, bits;
+ __u32 mode, speed;
- if (ioctl(fd, SPI_IOC_RD_MODE, &mode) < 0) {
+ if (ioctl(fd, SPI_IOC_RD_MODE32, &mode) < 0) {
perror("SPI rd_mode");
return;
}
@@ -98,7 +98,7 @@ static void dumpstat(const char *name, int fd)
return;
}
- printf("%s: spi mode %d, %d bits %sper word, %d Hz max\n",
+ printf("%s: spi mode 0x%x, %d bits %sper word, %d Hz max\n",
name, mode, bits, lsb ? "(lsb first) " : "", speed);
}
diff --git a/Documentation/spi/spidev_test.c b/Documentation/spi/spidev_test.c
index 16feda901469..3a2f9d59edab 100644
--- a/Documentation/spi/spidev_test.c
+++ b/Documentation/spi/spidev_test.c
@@ -30,7 +30,7 @@ static void pabort(const char *s)
}
static const char *device = "/dev/spidev1.1";
-static uint8_t mode;
+static uint32_t mode;
static uint8_t bits = 8;
static uint32_t speed = 500000;
static uint16_t delay;
@@ -57,6 +57,21 @@ static void transfer(int fd)
.bits_per_word = bits,
};
+ if (mode & SPI_TX_QUAD)
+ tr.tx_nbits = 4;
+ else if (mode & SPI_TX_DUAL)
+ tr.tx_nbits = 2;
+ if (mode & SPI_RX_QUAD)
+ tr.rx_nbits = 4;
+ else if (mode & SPI_RX_DUAL)
+ tr.rx_nbits = 2;
+ if (!(mode & SPI_LOOP)) {
+ if (mode & (SPI_TX_QUAD | SPI_TX_DUAL))
+ tr.rx_buf = 0;
+ else if (mode & (SPI_RX_QUAD | SPI_RX_DUAL))
+ tr.tx_buf = 0;
+ }
+
ret = ioctl(fd, SPI_IOC_MESSAGE(1), &tr);
if (ret < 1)
pabort("can't send spi message");
@@ -81,7 +96,11 @@ static void print_usage(const char *prog)
" -O --cpol clock polarity\n"
" -L --lsb least significant bit first\n"
" -C --cs-high chip select active high\n"
- " -3 --3wire SI/SO signals shared\n");
+ " -3 --3wire SI/SO signals shared\n"
+ " -N --no-cs no chip select\n"
+ " -R --ready slave pulls low to pause\n"
+ " -2 --dual dual transfer\n"
+ " -4 --quad quad transfer\n");
exit(1);
}
@@ -101,11 +120,13 @@ static void parse_opts(int argc, char *argv[])
{ "3wire", 0, 0, '3' },
{ "no-cs", 0, 0, 'N' },
{ "ready", 0, 0, 'R' },
+ { "dual", 0, 0, '2' },
+ { "quad", 0, 0, '4' },
{ NULL, 0, 0, 0 },
};
int c;
- c = getopt_long(argc, argv, "D:s:d:b:lHOLC3NR", lopts, NULL);
+ c = getopt_long(argc, argv, "D:s:d:b:lHOLC3NR24", lopts, NULL);
if (c == -1)
break;
@@ -147,11 +168,23 @@ static void parse_opts(int argc, char *argv[])
case 'R':
mode |= SPI_READY;
break;
+ case '2':
+ mode |= SPI_TX_DUAL;
+ break;
+ case '4':
+ mode |= SPI_TX_QUAD;
+ break;
default:
print_usage(argv[0]);
break;
}
}
+ if (mode & SPI_LOOP) {
+ if (mode & SPI_TX_DUAL)
+ mode |= SPI_RX_DUAL;
+ if (mode & SPI_TX_QUAD)
+ mode |= SPI_RX_QUAD;
+ }
}
int main(int argc, char *argv[])
@@ -168,11 +201,11 @@ int main(int argc, char *argv[])
/*
* spi mode
*/
- ret = ioctl(fd, SPI_IOC_WR_MODE, &mode);
+ ret = ioctl(fd, SPI_IOC_WR_MODE32, &mode);
if (ret == -1)
pabort("can't set spi mode");
- ret = ioctl(fd, SPI_IOC_RD_MODE, &mode);
+ ret = ioctl(fd, SPI_IOC_RD_MODE32, &mode);
if (ret == -1)
pabort("can't get spi mode");
@@ -198,7 +231,7 @@ int main(int argc, char *argv[])
if (ret == -1)
pabort("can't get max speed hz");
- printf("spi mode: %d\n", mode);
+ printf("spi mode: 0x%x\n", mode);
printf("bits per word: %d\n", bits);
printf("max speed: %d Hz (%d KHz)\n", speed, speed/1000);
diff --git a/Documentation/timers/00-INDEX b/Documentation/timers/00-INDEX
index ef2ccbf77fa2..6d042dc1cce0 100644
--- a/Documentation/timers/00-INDEX
+++ b/Documentation/timers/00-INDEX
@@ -8,6 +8,8 @@ hpet_example.c
- sample hpet timer test program
hrtimers.txt
- subsystem for high-resolution kernel timers
+Makefile
+ - Build and link hpet_example
NO_HZ.txt
- Summary of the different methods for the scheduler clock-interrupts management.
timers-howto.txt
diff --git a/Documentation/virtual/kvm/00-INDEX b/Documentation/virtual/kvm/00-INDEX
index 641ec9220179..fee9f2bf9c64 100644
--- a/Documentation/virtual/kvm/00-INDEX
+++ b/Documentation/virtual/kvm/00-INDEX
@@ -20,5 +20,7 @@ ppc-pv.txt
- the paravirtualization interface on PowerPC.
review-checklist.txt
- review checklist for KVM patches.
+s390-diag.txt
+ - Diagnose hypercall description (for IBM S/390)
timekeeping.txt
- timekeeping virtualization for x86-based architectures.
diff --git a/Documentation/vm/00-INDEX b/Documentation/vm/00-INDEX
index a39d06680e1c..081c49777abb 100644
--- a/Documentation/vm/00-INDEX
+++ b/Documentation/vm/00-INDEX
@@ -16,8 +16,6 @@ hwpoison.txt
- explains what hwpoison is
ksm.txt
- how to use the Kernel Samepage Merging feature.
-locking
- - info on how locking and synchronization is done in the Linux vm code.
numa
- information about NUMA specific code in the Linux vm.
numa_memory_policy.txt
@@ -32,6 +30,8 @@ slub.txt
- a short users guide for SLUB.
soft-dirty.txt
- short explanation for soft-dirty PTEs
+split_page_table_lock
+ - Separate per-table lock to improve scalability of the old page_table_lock.
transhuge.txt
- Transparent Hugepage Support, alternative way of using hugepages.
unevictable-lru.txt
diff --git a/Documentation/w1/masters/00-INDEX b/Documentation/w1/masters/00-INDEX
index d63fa024ac05..8330cf9325f0 100644
--- a/Documentation/w1/masters/00-INDEX
+++ b/Documentation/w1/masters/00-INDEX
@@ -4,7 +4,9 @@ ds2482
- The Maxim/Dallas Semiconductor DS2482 provides 1-wire busses.
ds2490
- The Maxim/Dallas Semiconductor DS2490 builds USB <-> W1 bridges.
-mxc_w1
+mxc-w1
- W1 master controller driver found on Freescale MX2/MX3 SoCs
+omap-hdq
+ - HDQ/1-wire module of TI OMAP 2430/3430.
w1-gpio
- GPIO 1-wire bus master driver.
diff --git a/Documentation/w1/slaves/00-INDEX b/Documentation/w1/slaves/00-INDEX
index 75613c9ac4db..6e18c70c3474 100644
--- a/Documentation/w1/slaves/00-INDEX
+++ b/Documentation/w1/slaves/00-INDEX
@@ -4,3 +4,5 @@ w1_therm
- The Maxim/Dallas Semiconductor ds18*20 temperature sensor.
w1_ds2423
- The Maxim/Dallas Semiconductor ds2423 counter device.
+w1_ds28e04
+ - The Maxim/Dallas Semiconductor ds28e04 eeprom.
diff --git a/Documentation/x86/00-INDEX b/Documentation/x86/00-INDEX
index f37b46d34861..692264456f0f 100644
--- a/Documentation/x86/00-INDEX
+++ b/Documentation/x86/00-INDEX
@@ -1,6 +1,20 @@
00-INDEX
- this file
-mtrr.txt
- - how to use x86 Memory Type Range Registers to increase performance
+boot.txt
+ - List of boot protocol versions
+early-microcode.txt
+ - How to load microcode from an initrd-CPIO archive early to fix CPU issues.
+earlyprintk.txt
+ - Using earlyprintk with a USB2 debug port key.
+entry_64.txt
+ - Describe (some of the) kernel entry points for x86.
exception-tables.txt
- why and how Linux kernel uses exception tables on x86
+mtrr.txt
+ - how to use x86 Memory Type Range Registers to increase performance
+pat.txt
+ - Page Attribute Table intro and API
+usb-legacy-support.txt
+ - how to fix/avoid quirks when using emulated PS/2 mouse/keyboard.
+zero-page.txt
+ - layout of the first page of memory.
diff --git a/Documentation/zh_CN/arm64/booting.txt b/Documentation/zh_CN/arm64/booting.txt
index 28fa325b7461..6f6d956ac1c9 100644
--- a/Documentation/zh_CN/arm64/booting.txt
+++ b/Documentation/zh_CN/arm64/booting.txt
@@ -7,7 +7,7 @@ help. Contact the Chinese maintainer if this translation is outdated
or if there is a problem with the translation.
Maintainer: Will Deacon <will.deacon@arm.com>
-Chinese maintainer: Fu Wei <tekkamanninja@gmail.com>
+Chinese maintainer: Fu Wei <wefu@redhat.com>
---------------------------------------------------------------------
Documentation/arm64/booting.txt 的中文翻译
@@ -16,9 +16,9 @@ Documentation/arm64/booting.txt 的中文翻译
译存在问题,请è”系中文版维护者。
英文版维护者: Will Deacon <will.deacon@arm.com>
-中文版维护者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
-中文版翻译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
-中文版校译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
+中文版维护者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版翻译者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版校译者: 傅炜 Fu Wei <wefu@redhat.com>
以下为正文
---------------------------------------------------------------------
@@ -64,8 +64,8 @@ RAM,或å¯èƒ½ä½¿ç”¨å¯¹è¿™ä¸ªè®¾å¤‡å·²çŸ¥çš„ RAM ä¿¡æ¯ï¼Œè¿˜å¯èƒ½ä½¿ç”¨ä»»ä½•
å¿…è¦æ€§: 强制
-设备树数æ®å—(dtb)大å°å¿…é¡»ä¸å¤§äºŽ 2 MB,且ä½äºŽä»Žå†…核映åƒèµ·å§‹ç®—起第一个
-512MB 内的 2MB 边界上。这使得内核å¯ä»¥é€šè¿‡åˆå§‹é¡µè¡¨ä¸­çš„å•ä¸ªèŠ‚æ述符æ¥
+设备树数æ®å—(dtb)必须 8 字节对é½ï¼Œå¹¶ä½äºŽä»Žå†…核映åƒèµ·å§‹ç®—起第一个 512MB
+内,且ä¸å¾—跨越 2MB 对é½è¾¹ç•Œã€‚这使得内核å¯ä»¥é€šè¿‡åˆå§‹é¡µè¡¨ä¸­çš„å•ä¸ªèŠ‚æ述符æ¥
映射此数æ®å—。
@@ -84,13 +84,23 @@ AArch64 内核当å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ­¤å¦‚果使用了压缩内
å¿…è¦æ€§: 强制
-已解压的内核映åƒåŒ…å«ä¸€ä¸ª 32 字节的头,内容如下:
+已解压的内核映åƒåŒ…å«ä¸€ä¸ª 64 字节的头,内容如下:
- u32 magic = 0x14000008; /* 跳转到 stext, å°ç«¯ */
- u32 res0 = 0; /* ä¿ç•™ */
+ u32 code0; /* å¯æ‰§è¡Œä»£ç  */
+ u32 code1; /* å¯æ‰§è¡Œä»£ç  */
u64 text_offset; /* 映åƒè£…è½½å移 */
+ u64 res0 = 0; /* ä¿ç•™ */
u64 res1 = 0; /* ä¿ç•™ */
u64 res2 = 0; /* ä¿ç•™ */
+ u64 res3 = 0; /* ä¿ç•™ */
+ u64 res4 = 0; /* ä¿ç•™ */
+ u32 magic = 0x644d5241; /* 魔数, å°ç«¯, "ARM\x64" */
+ u32 res5 = 0; /* ä¿ç•™ */
+
+
+映åƒå¤´æ³¨é‡Šï¼š
+
+- code0/code1 负责跳转到 stext.
映åƒå¿…é¡»ä½äºŽç³»ç»Ÿ RAM 起始处的特定å移(当å‰æ˜¯ 0x80000)。系统 RAM
的起始地å€å¿…须是以 2MB 对é½çš„。
@@ -118,9 +128,9 @@ AArch64 内核当å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ­¤å¦‚果使用了压缩内
外部高速缓存(如果存在)必须é…置并ç¦ç”¨ã€‚
- 架构计时器
- CNTFRQ 必须设定为计时器的频率。
- 如果在 EL1 模å¼ä¸‹è¿›å…¥å†…核,则 CNTHCTL_EL2 中的 EL1PCTEN (bit 0)
- 必须置ä½ã€‚
+ CNTFRQ 必须设定为计时器的频率,且 CNTVOFF 必须设定为对所有 CPU
+ 都一致的值。如果在 EL1 模å¼ä¸‹è¿›å…¥å†…核,则 CNTHCTL_EL2 中的
+ EL1PCTEN (bit 0) 必须置ä½ã€‚
- 一致性
通过内核å¯åŠ¨çš„所有 CPU 在内核入å£åœ°å€ä¸Šå¿…须处于相åŒçš„一致性域中。
@@ -131,23 +141,40 @@ AArch64 内核当å‰æ²¡æœ‰æ供自解压代ç ï¼Œå› æ­¤å¦‚果使用了压缩内
在进入内核映åƒçš„异常级中,所有构架中å¯å†™çš„系统寄存器必须通过软件
在一个更高的异常级别下åˆå§‹åŒ–,以防止在 未知 状æ€ä¸‹è¿è¡Œã€‚
+以上对于 CPU 模å¼ã€é«˜é€Ÿç¼“å­˜ã€MMUã€æž¶æž„计时器ã€ä¸€è‡´æ€§ã€ç³»ç»Ÿå¯„存器的
+å¿…è¦æ¡ä»¶æ述适用于所有 CPU。所有 CPU 必须在åŒä¸€å¼‚常级别跳入内核。
+
引导装载程åºå¿…须在æ¯ä¸ª CPU 处于以下状æ€æ—¶è·³å…¥å†…核入å£ï¼š
- 主 CPU 必须直接跳入内核映åƒçš„第一æ¡æŒ‡ä»¤ã€‚通过此 CPU 传递的设备树
- æ•°æ®å—必须在æ¯ä¸ª CPU 节点中包å«ä»¥ä¸‹å†…容:
-
- 1ã€â€˜enable-method’属性。目å‰ï¼Œæ­¤å­—段支æŒçš„值仅为字符串“spin-tableâ€ã€‚
-
- 2ã€â€˜cpu-release-addr’标识一个 64-bitã€åˆå§‹åŒ–为零的内存ä½ç½®ã€‚
+ æ•°æ®å—必须在æ¯ä¸ª CPU 节点中包å«ä¸€ä¸ª ‘enable-method’ 属性,所
+ 支æŒçš„ enable-method 请è§ä¸‹æ–‡ã€‚
引导装载程åºå¿…须生æˆè¿™äº›è®¾å¤‡æ ‘属性,并在跳入内核入å£ä¹‹å‰å°†å…¶æ’å…¥
æ•°æ®å—。
-- 任何辅助 CPU 必须在内存ä¿ç•™åŒºï¼ˆé€šè¿‡è®¾å¤‡æ ‘中的 /memreserve/ 域传递
+- enable-method 为 “spin-table†的 CPU 必须在它们的 CPU
+ 节点中包å«ä¸€ä¸ª ‘cpu-release-addr’ 属性。这个属性标识了一个
+ 64 ä½è‡ªç„¶å¯¹é½ä¸”åˆå§‹åŒ–为零的内存ä½ç½®ã€‚
+
+ 这些 CPU 必须在内存ä¿ç•™åŒºï¼ˆé€šè¿‡è®¾å¤‡æ ‘中的 /memreserve/ 域传递
给内核)中自旋于内核之外,轮询它们的 cpu-release-addr ä½ç½®ï¼ˆå¿…é¡»
包å«åœ¨ä¿ç•™åŒºä¸­ï¼‰ã€‚å¯é€šè¿‡æ’å…¥ wfe 指令æ¥é™ä½Žå¿™å¾ªçŽ¯å¼€é”€ï¼Œè€Œä¸» CPU å°†
å‘出 sev 指令。当对 cpu-release-addr 所指ä½ç½®çš„读å–æ“作返回éžé›¶å€¼
- 时,CPU 必须直接跳入此值所指å‘的地å€ã€‚
+ 时,CPU 必须跳入此值所指å‘的地å€ã€‚此值为一个å•ç‹¬çš„ 64 ä½å°ç«¯å€¼ï¼Œ
+ å› æ­¤ CPU 须在跳转å‰å°†æ‰€è¯»å–的值转æ¢ä¸ºå…¶æœ¬èº«çš„端模å¼ã€‚
+
+- enable-method 为 “psci†的 CPU ä¿æŒåœ¨å†…核外(比如,在
+ memory 节点中æ述为内核空间的内存区外,或在通过设备树 /memreserve/
+ 域中æ述为内核ä¿ç•™åŒºçš„空间中)。内核将会å‘起在 ARM 文档(编å·
+ ARM DEN 0022A:用于 ARM 上的电æºçŠ¶æ€å调接å£ç³»ç»Ÿè½¯ä»¶ï¼‰ä¸­æè¿°çš„
+ CPU_ON 调用æ¥å°† CPU 带入内核。
+
+ *译者注:到文档翻译时,此文档已更新为 ARM DEN 0022B。
+
+ 设备树必须包å«ä¸€ä¸ª ‘psci’ 节点,请å‚考以下文档:
+ Documentation/devicetree/bindings/arm/psci.txt
+
- 辅助 CPU 通用寄存器设置
x0 = 0 (ä¿ç•™ï¼Œå°†æ¥å¯èƒ½ä½¿ç”¨)
diff --git a/Documentation/zh_CN/arm64/memory.txt b/Documentation/zh_CN/arm64/memory.txt
index a5f6283829f9..a782704c1cb5 100644
--- a/Documentation/zh_CN/arm64/memory.txt
+++ b/Documentation/zh_CN/arm64/memory.txt
@@ -7,7 +7,7 @@ help. Contact the Chinese maintainer if this translation is outdated
or if there is a problem with the translation.
Maintainer: Catalin Marinas <catalin.marinas@arm.com>
-Chinese maintainer: Fu Wei <tekkamanninja@gmail.com>
+Chinese maintainer: Fu Wei <wefu@redhat.com>
---------------------------------------------------------------------
Documentation/arm64/memory.txt 的中文翻译
@@ -16,9 +16,9 @@ Documentation/arm64/memory.txt 的中文翻译
译存在问题,请è”系中文版维护者。
英文版维护者: Catalin Marinas <catalin.marinas@arm.com>
-中文版维护者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
-中文版翻译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
-中文版校译者: 傅炜 Fu Wei <tekkamanninja@gmail.com>
+中文版维护者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版翻译者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版校译者: 傅炜 Fu Wei <wefu@redhat.com>
以下为正文
---------------------------------------------------------------------
@@ -41,7 +41,7 @@ AArch64 Linux 使用页大å°ä¸º 4KB çš„ 3 级转æ¢è¡¨é…置,对于用户和å
TTBR1 中,且从ä¸å†™å…¥ TTBR0。
-AArch64 Linux 内存布局:
+AArch64 Linux 在页大å°ä¸º 4KB 时的内存布局:
èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€”
-----------------------------------------------------------------------
@@ -55,15 +55,42 @@ ffffffbc00000000 ffffffbdffffffff 8GB vmemmap
ffffffbe00000000 ffffffbffbbfffff ~8GB [防护页,未æ¥ç”¨äºŽ vmmemap]
+ffffffbffbc00000 ffffffbffbdfffff 2MB earlyprintk 设备
+
ffffffbffbe00000 ffffffbffbe0ffff 64KB PCI I/O 空间
-ffffffbbffff0000 ffffffbcffffffff ~2MB [防护页]
+ffffffbffbe10000 ffffffbcffffffff ~2MB [防护页]
ffffffbffc000000 ffffffbfffffffff 64MB 模å—
ffffffc000000000 ffffffffffffffff 256GB 内核逻辑内存映射
+AArch64 Linux 在页大å°ä¸º 64KB 时的内存布局:
+
+èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€”
+-----------------------------------------------------------------------
+0000000000000000 000003ffffffffff 4TB 用户空间
+
+fffffc0000000000 fffffdfbfffeffff ~2TB vmalloc
+
+fffffdfbffff0000 fffffdfbffffffff 64KB [防护页]
+
+fffffdfc00000000 fffffdfdffffffff 8GB vmemmap
+
+fffffdfe00000000 fffffdfffbbfffff ~8GB [防护页,未æ¥ç”¨äºŽ vmmemap]
+
+fffffdfffbc00000 fffffdfffbdfffff 2MB earlyprintk 设备
+
+fffffdfffbe00000 fffffdfffbe0ffff 64KB PCI I/O 空间
+
+fffffdfffbe10000 fffffdfffbffffff ~2MB [防护页]
+
+fffffdfffc000000 fffffdffffffffff 64MB 模å—
+
+fffffe0000000000 ffffffffffffffff 2TB 内核逻辑内存映射
+
+
4KB 页大å°çš„转æ¢è¡¨æŸ¥æ‰¾ï¼š
+--------+--------+--------+--------+--------+--------+--------+--------+
@@ -91,3 +118,10 @@ ffffffc000000000 ffffffffffffffff 256GB 内核逻辑内存映射
| | +--------------------------> [41:29] L2 索引 (仅使用 38:29 )
| +-------------------------------> [47:42] L1 索引 (未使用)
+-------------------------------------------------> [63] TTBR0/1
+
+当使用 KVM æ—¶, 管ç†ç¨‹åºï¼ˆhypervisor)在 EL2 中通过相对内核虚拟地å€çš„
+一个固定å移æ¥æ˜ å°„内核页(内核虚拟地å€çš„高 24 ä½è®¾ä¸ºé›¶ï¼‰:
+
+èµ·å§‹åœ°å€ ç»“æŸåœ°å€ å¤§å° ç”¨é€”
+-----------------------------------------------------------------------
+0000004000000000 0000007fffffffff 256GB 在 HYP 中映射的内核对象
diff --git a/Documentation/zh_CN/arm64/tagged-pointers.txt b/Documentation/zh_CN/arm64/tagged-pointers.txt
new file mode 100644
index 000000000000..2664d1bd5a1c
--- /dev/null
+++ b/Documentation/zh_CN/arm64/tagged-pointers.txt
@@ -0,0 +1,52 @@
+Chinese translated version of Documentation/arm64/tagged-pointers.txt
+
+If you have any comment or update to the content, please contact the
+original document maintainer directly. However, if you have a problem
+communicating in English you can also ask the Chinese maintainer for
+help. Contact the Chinese maintainer if this translation is outdated
+or if there is a problem with the translation.
+
+Maintainer: Will Deacon <will.deacon@arm.com>
+Chinese maintainer: Fu Wei <wefu@redhat.com>
+---------------------------------------------------------------------
+Documentation/arm64/tagged-pointers.txt 的中文翻译
+
+如果想评论或更新本文的内容,请直接è”系原文档的维护者。如果你使用英文
+交æµæœ‰å›°éš¾çš„è¯ï¼Œä¹Ÿå¯ä»¥å‘中文版维护者求助。如果本翻译更新ä¸åŠæ—¶æˆ–者翻
+译存在问题,请è”系中文版维护者。
+
+英文版维护者: Will Deacon <will.deacon@arm.com>
+中文版维护者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版翻译者: 傅炜 Fu Wei <wefu@redhat.com>
+中文版校译者: 傅炜 Fu Wei <wefu@redhat.com>
+
+以下为正文
+---------------------------------------------------------------------
+ Linux 在 AArch64 中带标记的虚拟地å€
+ =================================
+
+作者: Will Deacon <will.deacon@arm.com>
+日期: 2013 年 06 月 12 日
+
+本文档简述了在 AArch64 地å€è½¬æ¢ç³»ç»Ÿä¸­æ供的带标记的虚拟地å€åŠå…¶åœ¨
+AArch64 Linux 中的潜在用途。
+
+内核æ供的地å€è½¬æ¢è¡¨é…置使通过 TTBR0 完æˆçš„虚拟地å€è½¬æ¢ï¼ˆå³ç”¨æˆ·ç©ºé—´
+映射),其虚拟地å€çš„最高 8 ä½ï¼ˆ63:56)会被转æ¢ç¡¬ä»¶æ‰€å¿½ç•¥ã€‚è¿™ç§æœºåˆ¶
+让这些ä½å¯ä¾›åº”用程åºè‡ªç”±ä½¿ç”¨ï¼Œå…¶æ³¨æ„事项如下:
+
+ (1) 内核è¦æ±‚所有传递到 EL1 的用户空间地å€å¸¦æœ‰ 0x00 标记。
+ è¿™æ„味ç€ä»»ä½•æºå¸¦ç”¨æˆ·ç©ºé—´è™šæ‹Ÿåœ°å€çš„系统调用(syscall)
+ å‚æ•° *å¿…é¡»* 在陷入内核å‰ä½¿å®ƒä»¬çš„最高字节被清零。
+
+ (2) éžé›¶æ ‡è®°åœ¨ä¼ é€’ä¿¡å·æ—¶ä¸è¢«ä¿å­˜ã€‚è¿™æ„味ç€åœ¨åº”用程åºä¸­åˆ©ç”¨äº†
+ 标记的信å·å¤„ç†å‡½æ•°æ— æ³•ä¾èµ– siginfo_t 的用户空间虚拟
+ 地å€æ‰€æºå¸¦çš„包å«å…¶å†…部域信æ¯çš„标记。此规则的一个例外是
+ 当信å·æ˜¯åœ¨è°ƒè¯•è§‚察点的异常处ç†ç¨‹åºä¸­äº§ç”Ÿçš„,此时标记的
+ ä¿¡æ¯å°†è¢«ä¿å­˜ã€‚
+
+ (3) 当使用带标记的指针时需特别留心,因为仅对两个虚拟地å€
+ 的高字节,C 编译器很å¯èƒ½æ— æ³•åˆ¤æ–­å®ƒä»¬æ˜¯ä¸åŒçš„。
+
+此构架会阻止对带标记的 PC 指针的利用,因此在异常返回时,其高字节
+将被设置æˆä¸€ä¸ªä¸º “55†的扩展符。
diff --git a/MAINTAINERS b/MAINTAINERS
index b2cf5cfb4d29..82640e640f36 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -73,7 +73,8 @@ Descriptions of section entries:
L: Mailing list that is relevant to this area
W: Web-page with status/info
Q: Patchwork web based patch tracking system site
- T: SCM tree type and location. Type is one of: git, hg, quilt, stgit, topgit.
+ T: SCM tree type and location.
+ Type is one of: git, hg, quilt, stgit, topgit
S: Status, one of the following:
Supported: Someone is actually paid to look after this.
Maintained: Someone actually looks after it.
@@ -473,7 +474,7 @@ F: net/rxrpc/af_rxrpc.c
AGPGART DRIVER
M: David Airlie <airlied@linux.ie>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git
+T: git git://people.freedesktop.org/~airlied/linux (part of drm maint)
S: Maintained
F: drivers/char/agp/
F: include/linux/agp*
@@ -538,7 +539,7 @@ F: arch/alpha/
ALTERA UART/JTAG UART SERIAL DRIVERS
M: Tobias Klauser <tklauser@distanz.ch>
L: linux-serial@vger.kernel.org
-L: nios2-dev@sopc.et.ntust.edu.tw (moderated for non-subscribers)
+L: nios2-dev@lists.rocketboards.org (moderated for non-subscribers)
S: Maintained
F: drivers/tty/serial/altera_uart.c
F: drivers/tty/serial/altera_jtaguart.c
@@ -910,11 +911,11 @@ F: arch/arm/include/asm/hardware/dec21285.h
F: arch/arm/mach-footbridge/
ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
-M: Shawn Guo <shawn.guo@linaro.org>
+M: Shawn Guo <shawn.guo@freescale.com>
M: Sascha Hauer <kernel@pengutronix.de>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
F: arch/arm/mach-imx/
F: arch/arm/boot/dts/imx*
F: arch/arm/configs/imx*_defconfig
@@ -1612,11 +1613,11 @@ S: Maintained
F: drivers/net/wireless/atmel*
ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
-M: Bradley Grove <linuxdrivers@attotech.com>
-L: linux-scsi@vger.kernel.org
-W: http://www.attotech.com
-S: Supported
-F: drivers/scsi/esas2r
+M: Bradley Grove <linuxdrivers@attotech.com>
+L: linux-scsi@vger.kernel.org
+W: http://www.attotech.com
+S: Supported
+F: drivers/scsi/esas2r
AUDIT SUBSYSTEM
M: Eric Paris <eparis@redhat.com>
@@ -1737,6 +1738,7 @@ F: include/uapi/linux/bfs_fs.h
BLACKFIN ARCHITECTURE
M: Steven Miao <realmz6@gmail.com>
L: adi-buildroot-devel@lists.sourceforge.net
+T: git git://git.code.sf.net/p/adi-linux/code
W: http://blackfin.uclinux.org
S: Supported
F: arch/blackfin/
@@ -1860,6 +1862,7 @@ F: drivers/net/ethernet/broadcom/bnx2x/
BROADCOM BCM281XX/BCM11XXX ARM ARCHITECTURE
M: Christian Daudt <bcm@fixthebug.org>
+M: Matt Porter <mporter@linaro.org>
L: bcm-kernel-feedback-list@broadcom.com
T: git git://git.github.com/broadcom/bcm11351
S: Maintained
@@ -2158,7 +2161,7 @@ F: Documentation/zh_CN/
CHIPIDEA USB HIGH SPEED DUAL ROLE CONTROLLER
M: Peter Chen <Peter.Chen@freescale.com>
-T: git://github.com/hzpeterchen/linux-usb.git
+T: git git://github.com/hzpeterchen/linux-usb.git
L: linux-usb@vger.kernel.org
S: Maintained
F: drivers/usb/chipidea/
@@ -2178,9 +2181,9 @@ S: Supported
F: drivers/net/ethernet/cisco/enic/
CISCO VIC LOW LATENCY NIC DRIVER
-M: Upinder Malhi <umalhi@cisco.com>
-S: Supported
-F: drivers/infiniband/hw/usnic
+M: Upinder Malhi <umalhi@cisco.com>
+S: Supported
+F: drivers/infiniband/hw/usnic
CIRRUS LOGIC EP93XX ETHERNET DRIVER
M: Hartley Sweeten <hsweeten@visionengravers.com>
@@ -2367,7 +2370,7 @@ F: include/linux/cpufreq.h
CPU FREQUENCY DRIVERS - ARM BIG LITTLE
M: Viresh Kumar <viresh.kumar@linaro.org>
-M: Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
+M: Sudeep Holla <sudeep.holla@arm.com>
L: cpufreq@vger.kernel.org
L: linux-pm@vger.kernel.org
W: http://www.arm.com/products/processors/technologies/biglittleprocessing.php
@@ -2377,20 +2380,20 @@ F: drivers/cpufreq/arm_big_little.c
F: drivers/cpufreq/arm_big_little_dt.c
CPUIDLE DRIVER - ARM BIG LITTLE
-M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
-M: Daniel Lezcano <daniel.lezcano@linaro.org>
-L: linux-pm@vger.kernel.org
-L: linux-arm-kernel@lists.infradead.org
-T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
-S: Maintained
-F: drivers/cpuidle/cpuidle-big_little.c
+M: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+M: Daniel Lezcano <daniel.lezcano@linaro.org>
+L: linux-pm@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
+S: Maintained
+F: drivers/cpuidle/cpuidle-big_little.c
CPUIDLE DRIVERS
M: Rafael J. Wysocki <rjw@rjwysocki.net>
M: Daniel Lezcano <daniel.lezcano@linaro.org>
L: linux-pm@vger.kernel.org
S: Maintained
-T: git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rafael/linux-pm.git
F: drivers/cpuidle/*
F: include/linux/cpuidle.h
@@ -2408,8 +2411,10 @@ F: tools/power/cpupower/
CPUSETS
M: Li Zefan <lizefan@huawei.com>
+L: cgroups@vger.kernel.org
W: http://www.bullopensource.org/cpuset/
W: http://oss.sgi.com/projects/cpusets/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git
S: Maintained
F: Documentation/cgroups/cpusets.txt
F: include/linux/cpuset.h
@@ -2455,9 +2460,9 @@ S: Maintained
F: sound/pci/cs5535audio/
CW1200 WLAN driver
-M: Solomon Peachy <pizza@shaftnet.org>
-S: Maintained
-F: drivers/net/wireless/cw1200/
+M: Solomon Peachy <pizza@shaftnet.org>
+S: Maintained
+F: drivers/net/wireless/cw1200/
CX18 VIDEO4LINUX DRIVER
M: Andy Walls <awalls@md.metrocast.net>
@@ -2608,9 +2613,9 @@ DC395x SCSI driver
M: Oliver Neukum <oliver@neukum.org>
M: Ali Akcaagac <aliakc@web.de>
M: Jamie Lenehan <lenehan@twibble.org>
-W: http://twibble.org/dist/dc395x/
L: dc395x@twibble.org
-L: http://lists.twibble.org/mailman/listinfo/dc395x/
+W: http://twibble.org/dist/dc395x/
+W: http://lists.twibble.org/mailman/listinfo/dc395x/
S: Maintained
F: Documentation/scsi/dc395x.txt
F: drivers/scsi/dc395x.*
@@ -2845,19 +2850,29 @@ F: lib/kobj*
DRM DRIVERS
M: David Airlie <airlied@linux.ie>
L: dri-devel@lists.freedesktop.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git
+T: git git://people.freedesktop.org/~airlied/linux
S: Maintained
F: drivers/gpu/drm/
F: include/drm/
F: include/uapi/drm/
+RADEON DRM DRIVERS
+M: Alex Deucher <alexander.deucher@amd.com>
+M: Christian König <christian.koenig@amd.com>
+L: dri-devel@lists.freedesktop.org
+T: git git://people.freedesktop.org/~agd5f/linux
+S: Supported
+F: drivers/gpu/drm/radeon/
+F: include/drm/radeon*
+F: include/uapi/drm/radeon*
+
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Daniel Vetter <daniel.vetter@ffwll.ch>
M: Jani Nikula <jani.nikula@linux.intel.com>
L: intel-gfx@lists.freedesktop.org
L: dri-devel@lists.freedesktop.org
Q: http://patchwork.freedesktop.org/project/intel-gfx/
-T: git git://people.freedesktop.org/~danvet/drm-intel
+T: git git://anongit.freedesktop.org/drm-intel
S: Supported
F: drivers/gpu/drm/i915/
F: include/drm/i915*
@@ -3082,6 +3097,8 @@ F: fs/ecryptfs/
EDAC-CORE
M: Doug Thompson <dougthompson@xmission.com>
+M: Borislav Petkov <bp@alien8.de>
+M: Mauro Carvalho Chehab <m.chehab@samsung.com>
L: linux-edac@vger.kernel.org
W: bluesmoke.sourceforge.net
S: Supported
@@ -3324,6 +3341,17 @@ S: Maintained
F: include/linux/netfilter_bridge/
F: net/bridge/
+ETHERNET PHY LIBRARY
+M: Florian Fainelli <f.fainelli@gmail.com>
+L: netdev@vger.kernel.org
+S: Maintained
+F: include/linux/phy.h
+F: include/linux/phy_fixed.h
+F: drivers/net/phy/
+F: Documentation/networking/phy.txt
+F: drivers/of/of_mdio.c
+F: drivers/of/of_net.c
+
EXT2 FILE SYSTEM
M: Jan Kara <jack@suse.cz>
L: linux-ext4@vger.kernel.org
@@ -4517,6 +4545,7 @@ M: Greg Rose <gregory.v.rose@intel.com>
M: Alex Duyck <alexander.h.duyck@intel.com>
M: John Ronciak <john.ronciak@intel.com>
M: Mitch Williams <mitch.a.williams@intel.com>
+M: Linux NICS <linux.nics@intel.com>
L: e1000-devel@lists.sourceforge.net
W: http://www.intel.com/support/feedback.htm
W: http://e1000.sourceforge.net/
@@ -4534,6 +4563,7 @@ F: Documentation/networking/ixgbevf.txt
F: Documentation/networking/i40e.txt
F: Documentation/networking/i40evf.txt
F: drivers/net/ethernet/intel/
+F: drivers/net/ethernet/intel/*/
INTEL-MID GPIO DRIVER
M: David Cohen <david.a.cohen@linux.intel.com>
@@ -4890,7 +4920,7 @@ F: drivers/staging/ktap/
KCONFIG
M: "Yann E. MORIN" <yann.morin.1998@free.fr>
L: linux-kbuild@vger.kernel.org
-T: git://gitorious.org/linux-kconfig/linux-kconfig
+T: git git://gitorious.org/linux-kconfig/linux-kconfig
S: Maintained
F: Documentation/kbuild/kconfig-language.txt
F: scripts/kconfig/
@@ -5447,11 +5477,11 @@ S: Maintained
F: drivers/media/tuners/m88ts2022*
MA901 MASTERKIT USB FM RADIO DRIVER
-M: Alexey Klimov <klimov.linux@gmail.com>
-L: linux-media@vger.kernel.org
-T: git git://linuxtv.org/media_tree.git
-S: Maintained
-F: drivers/media/radio/radio-ma901.c
+M: Alexey Klimov <klimov.linux@gmail.com>
+L: linux-media@vger.kernel.org
+T: git git://linuxtv.org/media_tree.git
+S: Maintained
+F: drivers/media/radio/radio-ma901.c
MAC80211
M: Johannes Berg <johannes@sipsolutions.net>
@@ -5487,6 +5517,11 @@ W: http://www.kernel.org/doc/man-pages
L: linux-man@vger.kernel.org
S: Maintained
+MARVELL ARMADA DRM SUPPORT
+M: Russell King <rmk+kernel@arm.linux.org.uk>
+S: Maintained
+F: drivers/gpu/drm/armada/
+
MARVELL GIGABIT ETHERNET DRIVERS (skge/sky2)
M: Mirko Lindner <mlindner@marvell.com>
M: Stephen Hemminger <stephen@networkplumber.org>
@@ -5607,7 +5642,7 @@ F: drivers/scsi/megaraid/
MELLANOX ETHERNET DRIVER (mlx4_en)
M: Amir Vadai <amirv@mellanox.com>
-L: netdev@vger.kernel.org
+L: netdev@vger.kernel.org
S: Supported
W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/
@@ -5648,7 +5683,7 @@ F: include/linux/mtd/
F: include/uapi/mtd/
MEN A21 WATCHDOG DRIVER
-M: Johannes Thumshirn <johannes.thumshirn@men.de>
+M: Johannes Thumshirn <johannes.thumshirn@men.de>
L: linux-watchdog@vger.kernel.org
S: Supported
F: drivers/watchdog/mena21_wdt.c
@@ -5704,20 +5739,20 @@ L: linux-rdma@vger.kernel.org
W: http://www.mellanox.com
Q: http://patchwork.ozlabs.org/project/netdev/list/
Q: http://patchwork.kernel.org/project/linux-rdma/list/
-T: git://openfabrics.org/~eli/connect-ib.git
+T: git git://openfabrics.org/~eli/connect-ib.git
S: Supported
F: drivers/net/ethernet/mellanox/mlx5/core/
F: include/linux/mlx5/
Mellanox MLX5 IB driver
-M: Eli Cohen <eli@mellanox.com>
-L: linux-rdma@vger.kernel.org
-W: http://www.mellanox.com
-Q: http://patchwork.kernel.org/project/linux-rdma/list/
-T: git://openfabrics.org/~eli/connect-ib.git
-S: Supported
-F: include/linux/mlx5/
-F: drivers/infiniband/hw/mlx5/
+M: Eli Cohen <eli@mellanox.com>
+L: linux-rdma@vger.kernel.org
+W: http://www.mellanox.com
+Q: http://patchwork.kernel.org/project/linux-rdma/list/
+T: git git://openfabrics.org/~eli/connect-ib.git
+S: Supported
+F: include/linux/mlx5/
+F: drivers/infiniband/hw/mlx5/
MODULE SUPPORT
M: Rusty Russell <rusty@rustcorp.com.au>
@@ -5969,6 +6004,8 @@ F: include/linux/netdevice.h
F: include/uapi/linux/in.h
F: include/uapi/linux/net.h
F: include/uapi/linux/netdevice.h
+F: tools/net/
+F: tools/testing/selftests/net/
NETWORKING [IPv4/IPv6]
M: "David S. Miller" <davem@davemloft.net>
@@ -6142,6 +6179,12 @@ S: Supported
F: drivers/block/nvme*
F: include/linux/nvme.h
+NXP TDA998X DRM DRIVER
+M: Russell King <rmk+kernel@arm.linux.org.uk>
+S: Supported
+F: drivers/gpu/drm/i2c/tda998x_drv.c
+F: include/drm/i2c/tda998x.h
+
OMAP SUPPORT
M: Tony Lindgren <tony@atomide.com>
L: linux-omap@vger.kernel.org
@@ -7196,7 +7239,7 @@ S: Maintained
F: drivers/net/ethernet/rdc/r6040.c
RDS - RELIABLE DATAGRAM SOCKETS
-M: Venkat Venkatsubra <venkat.x.venkatsubra@oracle.com>
+M: Chien Yen <chien.yen@oracle.com>
L: rds-devel@oss.oracle.com (moderated for non-subscribers)
S: Supported
F: net/rds/
@@ -8429,8 +8472,8 @@ TARGET SUBSYSTEM
M: Nicholas A. Bellinger <nab@linux-iscsi.org>
L: linux-scsi@vger.kernel.org
L: target-devel@vger.kernel.org
-L: http://groups.google.com/group/linux-iscsi-target-dev
W: http://www.linux-iscsi.org
+W: http://groups.google.com/group/linux-iscsi-target-dev
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
S: Supported
F: drivers/target/
@@ -8671,17 +8714,17 @@ S: Maintained
F: drivers/media/radio/radio-raremono.c
THERMAL
-M: Zhang Rui <rui.zhang@intel.com>
-M: Eduardo Valentin <eduardo.valentin@ti.com>
-L: linux-pm@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git
-Q: https://patchwork.kernel.org/project/linux-pm/list/
-S: Supported
-F: drivers/thermal/
-F: include/linux/thermal.h
-F: include/linux/cpu_cooling.h
-F: Documentation/devicetree/bindings/thermal/
+M: Zhang Rui <rui.zhang@intel.com>
+M: Eduardo Valentin <eduardo.valentin@ti.com>
+L: linux-pm@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git
+Q: https://patchwork.kernel.org/project/linux-pm/list/
+S: Supported
+F: drivers/thermal/
+F: include/linux/thermal.h
+F: include/linux/cpu_cooling.h
+F: Documentation/devicetree/bindings/thermal/
THINGM BLINK(1) USB RGB LED DRIVER
M: Vivien Didelot <vivien.didelot@savoirfairelinux.com>
@@ -9715,7 +9758,6 @@ F: drivers/xen/*swiotlb*
XFS FILESYSTEM
P: Silicon Graphics Inc
M: Dave Chinner <david@fromorbit.com>
-M: Ben Myers <bpm@sgi.com>
M: xfs@oss.sgi.com
L: xfs@oss.sgi.com
W: http://oss.sgi.com/projects/xfs
@@ -9784,7 +9826,7 @@ ZR36067 VIDEO FOR LINUX DRIVER
L: mjpeg-users@lists.sourceforge.net
L: linux-media@vger.kernel.org
W: http://mjpeg.sourceforge.net/driver-zoran/
-T: Mercurial http://linuxtv.org/hg/v4l-dvb
+T: hg http://linuxtv.org/hg/v4l-dvb
S: Odd Fixes
F: drivers/media/pci/zoran/
diff --git a/Makefile b/Makefile
index 606ef7c4a544..c10b734339db 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 14
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc8
NAME = Shuffling Zombie Juror
# *DOCUMENTATION*
@@ -605,10 +605,11 @@ endif
ifdef CONFIG_CC_STACKPROTECTOR_REGULAR
stackp-flag := -fstack-protector
ifeq ($(call cc-option, $(stackp-flag)),)
- $(warning Cannot use CONFIG_CC_STACKPROTECTOR: \
- -fstack-protector not supported by compiler))
+ $(warning Cannot use CONFIG_CC_STACKPROTECTOR_REGULAR: \
+ -fstack-protector not supported by compiler)
endif
-else ifdef CONFIG_CC_STACKPROTECTOR_STRONG
+else
+ifdef CONFIG_CC_STACKPROTECTOR_STRONG
stackp-flag := -fstack-protector-strong
ifeq ($(call cc-option, $(stackp-flag)),)
$(warning Cannot use CONFIG_CC_STACKPROTECTOR_STRONG: \
@@ -618,6 +619,7 @@ else
# Force off for distro compilers that enable stack protector by default.
stackp-flag := $(call cc-option, -fno-stack-protector)
endif
+endif
KBUILD_CFLAGS += $(stackp-flag)
# This warning generated too much noise in a regular build.
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index 6b58c1de7577..400c663b21c2 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -282,7 +282,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
#else
/* if V-P const for loop, PTAG can be written once outside loop */
if (full_page_op)
- write_aux_reg(ARC_REG_DC_PTAG, paddr);
+ write_aux_reg(aux_tag, paddr);
#endif
while (num_lines-- > 0) {
@@ -296,7 +296,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
write_aux_reg(aux_cmd, vaddr);
vaddr += L1_CACHE_BYTES;
#else
- write_aux_reg(aux, paddr);
+ write_aux_reg(aux_cmd, paddr);
paddr += L1_CACHE_BYTES;
#endif
}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index e25419817791..15949459611f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1578,6 +1578,7 @@ config BL_SWITCHER_DUMMY_IF
choice
prompt "Memory split"
+ depends on MMU
default VMSPLIT_3G
help
Select the desired split between kernel and user memory.
@@ -1595,6 +1596,7 @@ endchoice
config PAGE_OFFSET
hex
+ default PHYS_OFFSET if !MMU
default 0x40000000 if VMSPLIT_1G
default 0x80000000 if VMSPLIT_2G
default 0xC0000000
@@ -1903,6 +1905,7 @@ config XEN
depends on ARM && AEABI && OF
depends on CPU_V7 && !CPU_V6
depends on !GENERIC_ATOMIC64
+ depends on MMU
select ARM_PSCI
select SWIOTLB_XEN
select ARCH_DMA_ADDR_T_64BIT
diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore
index 47279aa96a6a..0714e0334e33 100644
--- a/arch/arm/boot/compressed/.gitignore
+++ b/arch/arm/boot/compressed/.gitignore
@@ -1,4 +1,5 @@
ashldi3.S
+bswapsdi2.S
font.c
lib1funcs.S
hyp-stub.S
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index b9d6a8b485e0..032030361bef 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -38,6 +38,7 @@ dtb-$(CONFIG_ARCH_AT91) += at91sam9g35ek.dtb
dtb-$(CONFIG_ARCH_AT91) += at91sam9x25ek.dtb
dtb-$(CONFIG_ARCH_AT91) += at91sam9x35ek.dtb
# sama5d3
+dtb-$(CONFIG_ARCH_AT91) += at91-sama5d3_xplained.dtb
dtb-$(CONFIG_ARCH_AT91) += sama5d31ek.dtb
dtb-$(CONFIG_ARCH_AT91) += sama5d33ek.dtb
dtb-$(CONFIG_ARCH_AT91) += sama5d34ek.dtb
@@ -208,7 +209,8 @@ dtb-$(CONFIG_ARCH_OMAP2PLUS) += omap2420-h4.dtb \
omap3-n900.dtb \
omap3-n9.dtb \
omap3-n950.dtb \
- omap3-tobi.dtb \
+ omap3-overo-tobi.dtb \
+ omap3-overo-storm-tobi.dtb \
omap3-gta04.dtb \
omap3-igep0020.dtb \
omap3-igep0030.dtb \
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index 4718ec4a4dbf..486880b74831 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -121,7 +121,7 @@
ti,model = "AM335x-EVMSK";
ti,audio-codec = <&tlv320aic3106>;
ti,mcasp-controller = <&mcasp1>;
- ti,codec-clock-rate = <24576000>;
+ ti,codec-clock-rate = <24000000>;
ti,audio-routing =
"Headphone Jack", "HPLOUT",
"Headphone Jack", "HPROUT";
@@ -256,6 +256,12 @@
>;
};
+ mmc1_pins: pinmux_mmc1_pins {
+ pinctrl-single,pins = <
+ 0x160 (PIN_INPUT | MUX_MODE7) /* spi0_cs1.gpio0_6 */
+ >;
+ };
+
mcasp1_pins: mcasp1_pins {
pinctrl-single,pins = <
0x10c (PIN_INPUT_PULLDOWN | MUX_MODE4) /* mii1_crs.mcasp1_aclkx */
@@ -456,6 +462,9 @@
status = "okay";
vmmc-supply = <&vmmc_reg>;
bus-width = <4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&mmc1_pins>;
+ cd-gpios = <&gpio0 6 GPIO_ACTIVE_HIGH>;
};
&sham {
diff --git a/arch/arm/boot/dts/armada-xp-mv78260.dtsi b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
index 66609684d41b..9480cf891f8c 100644
--- a/arch/arm/boot/dts/armada-xp-mv78260.dtsi
+++ b/arch/arm/boot/dts/armada-xp-mv78260.dtsi
@@ -23,6 +23,7 @@
gpio0 = &gpio0;
gpio1 = &gpio1;
gpio2 = &gpio2;
+ eth3 = &eth3;
};
cpus {
@@ -291,7 +292,7 @@
interrupts = <91>;
};
- ethernet@34000 {
+ eth3: ethernet@34000 {
compatible = "marvell,armada-370-neta";
reg = <0x34000 0x4000>;
interrupts = <14>;
diff --git a/arch/arm/boot/dts/at91-sama5d3_xplained.dts b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
new file mode 100644
index 000000000000..ce1375595e5f
--- /dev/null
+++ b/arch/arm/boot/dts/at91-sama5d3_xplained.dts
@@ -0,0 +1,229 @@
+/*
+ * at91-sama5d3_xplained.dts - Device Tree file for the SAMA5D3 Xplained board
+ *
+ * Copyright (C) 2014 Atmel,
+ * 2014 Nicolas Ferre <nicolas.ferre@atmel.com>
+ *
+ * Licensed under GPLv2 or later.
+ */
+/dts-v1/;
+#include "sama5d36.dtsi"
+
+/ {
+ model = "SAMA5D3 Xplained";
+ compatible = "atmel,sama5d3-xplained", "atmel,sama5d3", "atmel,sama5";
+
+ chosen {
+ bootargs = "console=ttyS0,115200";
+ };
+
+ memory {
+ reg = <0x20000000 0x10000000>;
+ };
+
+ ahb {
+ apb {
+ mmc0: mmc@f0000000 {
+ pinctrl-0 = <&pinctrl_mmc0_clk_cmd_dat0 &pinctrl_mmc0_dat1_3 &pinctrl_mmc0_dat4_7 &pinctrl_mmc0_cd>;
+ status = "okay";
+ slot@0 {
+ reg = <0>;
+ bus-width = <8>;
+ cd-gpios = <&pioE 0 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ spi0: spi@f0004000 {
+ cs-gpios = <&pioD 13 0>;
+ status = "okay";
+ };
+
+ can0: can@f000c000 {
+ status = "okay";
+ };
+
+ i2c0: i2c@f0014000 {
+ status = "okay";
+ };
+
+ i2c1: i2c@f0018000 {
+ status = "okay";
+ };
+
+ macb0: ethernet@f0028000 {
+ phy-mode = "rgmii";
+ status = "okay";
+ };
+
+ usart0: serial@f001c000 {
+ status = "okay";
+ };
+
+ usart1: serial@f0020000 {
+ pinctrl-0 = <&pinctrl_usart1 &pinctrl_usart1_rts_cts>;
+ status = "okay";
+ };
+
+ uart0: serial@f0024000 {
+ status = "okay";
+ };
+
+ mmc1: mmc@f8000000 {
+ pinctrl-0 = <&pinctrl_mmc1_clk_cmd_dat0 &pinctrl_mmc1_dat1_3 &pinctrl_mmc1_cd>;
+ status = "okay";
+ slot@0 {
+ reg = <0>;
+ bus-width = <4>;
+ cd-gpios = <&pioE 1 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ spi1: spi@f8008000 {
+ cs-gpios = <&pioC 25 0>, <0>, <0>, <&pioD 16 0>;
+ status = "okay";
+ };
+
+ adc0: adc@f8018000 {
+ pinctrl-0 = <
+ &pinctrl_adc0_adtrg
+ &pinctrl_adc0_ad0
+ &pinctrl_adc0_ad1
+ &pinctrl_adc0_ad2
+ &pinctrl_adc0_ad3
+ &pinctrl_adc0_ad4
+ &pinctrl_adc0_ad5
+ &pinctrl_adc0_ad6
+ &pinctrl_adc0_ad7
+ &pinctrl_adc0_ad8
+ &pinctrl_adc0_ad9
+ >;
+ status = "okay";
+ };
+
+ i2c2: i2c@f801c000 {
+ dmas = <0>, <0>; /* Do not use DMA for i2c2 */
+ status = "okay";
+ };
+
+ macb1: ethernet@f802c000 {
+ phy-mode = "rmii";
+ status = "okay";
+ };
+
+ dbgu: serial@ffffee00 {
+ status = "okay";
+ };
+
+ pinctrl@fffff200 {
+ board {
+ pinctrl_mmc0_cd: mmc0_cd {
+ atmel,pins =
+ <AT91_PIOE 0 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
+ };
+
+ pinctrl_mmc1_cd: mmc1_cd {
+ atmel,pins =
+ <AT91_PIOE 1 AT91_PERIPH_GPIO AT91_PINCTRL_PULL_UP_DEGLITCH>;
+ };
+
+ pinctrl_usba_vbus: usba_vbus {
+ atmel,pins =
+ <AT91_PIOE 9 AT91_PERIPH_GPIO AT91_PINCTRL_DEGLITCH>; /* PE9, conflicts with A9 */
+ };
+ };
+ };
+
+ pmc: pmc@fffffc00 {
+ main: mainck {
+ clock-frequency = <12000000>;
+ };
+ };
+ };
+
+ nand0: nand@60000000 {
+ nand-bus-width = <8>;
+ nand-ecc-mode = "hw";
+ atmel,has-pmecc;
+ atmel,pmecc-cap = <4>;
+ atmel,pmecc-sector-size = <512>;
+ nand-on-flash-bbt;
+ status = "okay";
+
+ at91bootstrap@0 {
+ label = "at91bootstrap";
+ reg = <0x0 0x40000>;
+ };
+
+ bootloader@40000 {
+ label = "bootloader";
+ reg = <0x40000 0x80000>;
+ };
+
+ bootloaderenv@c0000 {
+ label = "bootloader env";
+ reg = <0xc0000 0xc0000>;
+ };
+
+ dtb@180000 {
+ label = "device tree";
+ reg = <0x180000 0x80000>;
+ };
+
+ kernel@200000 {
+ label = "kernel";
+ reg = <0x200000 0x600000>;
+ };
+
+ rootfs@800000 {
+ label = "rootfs";
+ reg = <0x800000 0x0f800000>;
+ };
+ };
+
+ usb0: gadget@00500000 {
+ atmel,vbus-gpio = <&pioE 9 GPIO_ACTIVE_HIGH>; /* PE9, conflicts with A9 */
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_usba_vbus>;
+ status = "okay";
+ };
+
+ usb1: ohci@00600000 {
+ num-ports = <3>;
+ atmel,vbus-gpio = <0
+ &pioE 3 GPIO_ACTIVE_LOW
+ &pioE 4 GPIO_ACTIVE_LOW
+ >;
+ status = "okay";
+ };
+
+ usb2: ehci@00700000 {
+ status = "okay";
+ };
+ };
+
+ gpio_keys {
+ compatible = "gpio-keys";
+
+ bp3 {
+ label = "PB_USER";
+ gpios = <&pioE 29 GPIO_ACTIVE_LOW>;
+ linux,code = <0x104>;
+ gpio-key,wakeup;
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ d2 {
+ label = "d2";
+ gpios = <&pioE 23 GPIO_ACTIVE_LOW>; /* PE23, conflicts with A23, CTS2 */
+ linux,default-trigger = "heartbeat";
+ };
+
+ d3 {
+ label = "d3";
+ gpios = <&pioE 24 GPIO_ACTIVE_HIGH>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 0042f73068b0..fece8665fb63 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -523,7 +523,7 @@
};
i2c0: i2c@fff88000 {
- compatible = "atmel,at91sam9263-i2c";
+ compatible = "atmel,at91sam9260-i2c";
reg = <0xfff88000 0x100>;
interrupts = <13 IRQ_TYPE_LEVEL_HIGH 6>;
#address-cells = <1>;
diff --git a/arch/arm/boot/dts/at91sam9n12ek.dts b/arch/arm/boot/dts/at91sam9n12ek.dts
index e9487f6f0166..924a6a6ffd0f 100644
--- a/arch/arm/boot/dts/at91sam9n12ek.dts
+++ b/arch/arm/boot/dts/at91sam9n12ek.dts
@@ -124,6 +124,10 @@
nand-on-flash-bbt;
status = "okay";
};
+
+ usb0: ohci@00500000 {
+ status = "okay";
+ };
};
leds {
diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi
index e491b82f8d67..792fde1b7f75 100644
--- a/arch/arm/boot/dts/bcm11351.dtsi
+++ b/arch/arm/boot/dts/bcm11351.dtsi
@@ -147,7 +147,7 @@
};
pinctrl@35004800 {
- compatible = "brcm,capri-pinctrl";
+ compatible = "brcm,bcm11351-pinctrl";
reg = <0x35004800 0x430>;
};
diff --git a/arch/arm/boot/dts/dove.dtsi b/arch/arm/boot/dts/dove.dtsi
index 2b76524f4aa7..187fd46b7b5e 100644
--- a/arch/arm/boot/dts/dove.dtsi
+++ b/arch/arm/boot/dts/dove.dtsi
@@ -379,15 +379,6 @@
#clock-cells = <1>;
};
- pmu_intc: pmu-interrupt-ctrl@d0050 {
- compatible = "marvell,dove-pmu-intc";
- interrupt-controller;
- #interrupt-cells = <1>;
- reg = <0xd0050 0x8>;
- interrupts = <33>;
- marvell,#interrupts = <7>;
- };
-
pinctrl: pin-ctrl@d0200 {
compatible = "marvell,dove-pinctrl";
reg = <0xd0200 0x10>;
@@ -610,8 +601,6 @@
rtc: real-time-clock@d8500 {
compatible = "marvell,orion-rtc";
reg = <0xd8500 0x20>;
- interrupt-parent = <&pmu_intc>;
- interrupts = <5>;
};
gpio2: gpio-ctrl@e8400 {
diff --git a/arch/arm/boot/dts/imx6dl-hummingboard.dts b/arch/arm/boot/dts/imx6dl-hummingboard.dts
index fd8fc7cd53f3..5bfae54fb780 100644
--- a/arch/arm/boot/dts/imx6dl-hummingboard.dts
+++ b/arch/arm/boot/dts/imx6dl-hummingboard.dts
@@ -52,12 +52,6 @@
};
};
- codec: spdif-transmitter {
- compatible = "linux,spdif-dit";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_hummingboard_spdif>;
- };
-
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "imx-spdif";
@@ -111,7 +105,7 @@
};
pinctrl_hummingboard_spdif: hummingboard-spdif {
- fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>;
+ fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
};
pinctrl_hummingboard_usbh1_vbus: hummingboard-usbh1-vbus {
@@ -142,6 +136,8 @@
};
&spdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_hummingboard_spdif>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
index 64daa3b311f6..c2a24888a276 100644
--- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
@@ -46,12 +46,6 @@
};
};
- codec: spdif-transmitter {
- compatible = "linux,spdif-dit";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_cubox_i_spdif>;
- };
-
sound-spdif {
compatible = "fsl,imx-audio-spdif";
model = "imx-spdif";
@@ -89,7 +83,7 @@
};
pinctrl_cubox_i_spdif: cubox-i-spdif {
- fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x1b0b0>;
+ fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
};
pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus {
@@ -121,6 +115,8 @@
};
&spdif {
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_cubox_i_spdif>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/keystone-clocks.dtsi b/arch/arm/boot/dts/keystone-clocks.dtsi
index 2363593e1050..ef58d1c24313 100644
--- a/arch/arm/boot/dts/keystone-clocks.dtsi
+++ b/arch/arm/boot/dts/keystone-clocks.dtsi
@@ -612,7 +612,7 @@ clocks {
compatible = "ti,keystone,psc-clock";
clocks = <&chipclk13>;
clock-output-names = "vcp-3";
- reg = <0x0235000a8 0xb00>, <0x02350060 0x400>;
+ reg = <0x023500a8 0xb00>, <0x02350060 0x400>;
reg-names = "control", "domain";
domain-id = <24>;
};
diff --git a/arch/arm/boot/dts/omap3-gta04.dts b/arch/arm/boot/dts/omap3-gta04.dts
index b9b55c95a566..d3b253bbc885 100644
--- a/arch/arm/boot/dts/omap3-gta04.dts
+++ b/arch/arm/boot/dts/omap3-gta04.dts
@@ -13,7 +13,7 @@
/ {
model = "OMAP3 GTA04";
- compatible = "ti,omap3-gta04", "ti,omap3";
+ compatible = "ti,omap3-gta04", "ti,omap36xx", "ti,omap3";
cpus {
cpu@0 {
@@ -32,7 +32,7 @@
aux-button {
label = "aux";
linux,code = <169>;
- gpios = <&gpio1 7 GPIO_ACTIVE_LOW>;
+ gpios = <&gpio1 7 GPIO_ACTIVE_HIGH>;
gpio-key,wakeup;
};
};
@@ -92,6 +92,8 @@
bmp085@77 {
compatible = "bosch,bmp085";
reg = <0x77>;
+ interrupt-parent = <&gpio4>;
+ interrupts = <17 IRQ_TYPE_EDGE_RISING>;
};
/* leds */
@@ -141,8 +143,8 @@
pinctrl-names = "default";
pinctrl-0 = <&mmc1_pins>;
vmmc-supply = <&vmmc1>;
- vmmc_aux-supply = <&vsim>;
bus-width = <4>;
+ ti,non-removable;
};
&mmc2 {
diff --git a/arch/arm/boot/dts/omap3-igep0020.dts b/arch/arm/boot/dts/omap3-igep0020.dts
index 25a2b5f652fd..f2779ac75872 100644
--- a/arch/arm/boot/dts/omap3-igep0020.dts
+++ b/arch/arm/boot/dts/omap3-igep0020.dts
@@ -14,7 +14,7 @@
/ {
model = "IGEPv2 (TI OMAP AM/DM37x)";
- compatible = "isee,omap3-igep0020", "ti,omap3";
+ compatible = "isee,omap3-igep0020", "ti,omap36xx", "ti,omap3";
leds {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/omap3-igep0030.dts b/arch/arm/boot/dts/omap3-igep0030.dts
index 145c58cfc8ac..2793749eb1ba 100644
--- a/arch/arm/boot/dts/omap3-igep0030.dts
+++ b/arch/arm/boot/dts/omap3-igep0030.dts
@@ -13,7 +13,7 @@
/ {
model = "IGEP COM MODULE (TI OMAP AM/DM37x)";
- compatible = "isee,omap3-igep0030", "ti,omap3";
+ compatible = "isee,omap3-igep0030", "ti,omap36xx", "ti,omap3";
leds {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/omap3-n9.dts b/arch/arm/boot/dts/omap3-n9.dts
index 39828ce464ee..9938b5dc1909 100644
--- a/arch/arm/boot/dts/omap3-n9.dts
+++ b/arch/arm/boot/dts/omap3-n9.dts
@@ -14,5 +14,5 @@
/ {
model = "Nokia N9";
- compatible = "nokia,omap3-n9", "ti,omap3";
+ compatible = "nokia,omap3-n9", "ti,omap36xx", "ti,omap3";
};
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 6fc85f963530..0bf40c90faba 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -1,6 +1,6 @@
/*
* Copyright (C) 2013 Pavel Machek <pavel@ucw.cz>
- * Copyright 2013 Aaro Koskinen <aaro.koskinen@iki.fi>
+ * Copyright (C) 2013-2014 Aaro Koskinen <aaro.koskinen@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 (or later) as
@@ -13,7 +13,7 @@
/ {
model = "Nokia N900";
- compatible = "nokia,omap3-n900", "ti,omap3";
+ compatible = "nokia,omap3-n900", "ti,omap3430", "ti,omap3";
cpus {
cpu@0 {
diff --git a/arch/arm/boot/dts/omap3-n950.dts b/arch/arm/boot/dts/omap3-n950.dts
index b076a526b999..261c5589bfa3 100644
--- a/arch/arm/boot/dts/omap3-n950.dts
+++ b/arch/arm/boot/dts/omap3-n950.dts
@@ -14,5 +14,5 @@
/ {
model = "Nokia N950";
- compatible = "nokia,omap3-n950", "ti,omap3";
+ compatible = "nokia,omap3-n950", "ti,omap36xx", "ti,omap3";
};
diff --git a/arch/arm/boot/dts/omap3-overo-storm-tobi.dts b/arch/arm/boot/dts/omap3-overo-storm-tobi.dts
new file mode 100644
index 000000000000..966b5c9cd96a
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-overo-storm-tobi.dts
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 Florian Vaussard, EPFL Mobots group
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Tobi expansion board is manufactured by Gumstix Inc.
+ */
+
+/dts-v1/;
+
+#include "omap36xx.dtsi"
+#include "omap3-overo-tobi-common.dtsi"
+
+/ {
+ model = "OMAP36xx/AM37xx/DM37xx Gumstix Overo on Tobi";
+ compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap36xx", "ti,omap3";
+};
+
diff --git a/arch/arm/boot/dts/omap3-tobi.dts b/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
index 7e4ad2aec37a..4edc013a91c1 100644
--- a/arch/arm/boot/dts/omap3-tobi.dts
+++ b/arch/arm/boot/dts/omap3-overo-tobi-common.dtsi
@@ -13,9 +13,6 @@
#include "omap3-overo.dtsi"
/ {
- model = "TI OMAP3 Gumstix Overo on Tobi";
- compatible = "ti,omap3-tobi", "ti,omap3-overo", "ti,omap3";
-
leds {
compatible = "gpio-leds";
heartbeat {
diff --git a/arch/arm/boot/dts/omap3-overo-tobi.dts b/arch/arm/boot/dts/omap3-overo-tobi.dts
new file mode 100644
index 000000000000..de5653e1b5ca
--- /dev/null
+++ b/arch/arm/boot/dts/omap3-overo-tobi.dts
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2012 Florian Vaussard, EPFL Mobots group
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Tobi expansion board is manufactured by Gumstix Inc.
+ */
+
+/dts-v1/;
+
+#include "omap34xx.dtsi"
+#include "omap3-overo-tobi-common.dtsi"
+
+/ {
+ model = "OMAP35xx Gumstix Overo on Tobi";
+ compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3";
+};
+
diff --git a/arch/arm/boot/dts/omap3-overo.dtsi b/arch/arm/boot/dts/omap3-overo.dtsi
index a461d2fd1fb0..597099907f8e 100644
--- a/arch/arm/boot/dts/omap3-overo.dtsi
+++ b/arch/arm/boot/dts/omap3-overo.dtsi
@@ -9,9 +9,6 @@
/*
* The Gumstix Overo must be combined with an expansion board.
*/
-/dts-v1/;
-
-#include "omap34xx.dtsi"
/ {
pwmleds {
diff --git a/arch/arm/boot/dts/sama5d3.dtsi b/arch/arm/boot/dts/sama5d3.dtsi
index 52447c17537a..3d5faf85f51b 100644
--- a/arch/arm/boot/dts/sama5d3.dtsi
+++ b/arch/arm/boot/dts/sama5d3.dtsi
@@ -1228,7 +1228,7 @@
compatible = "atmel,at91rm9200-ohci", "usb-ohci";
reg = <0x00600000 0x100000>;
interrupts = <32 IRQ_TYPE_LEVEL_HIGH 2>;
- clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>,
+ clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>,
<&uhpck>;
clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
status = "disabled";
diff --git a/arch/arm/boot/dts/sama5d36.dtsi b/arch/arm/boot/dts/sama5d36.dtsi
index 6c31c26e6cc0..db58cad6acd3 100644
--- a/arch/arm/boot/dts/sama5d36.dtsi
+++ b/arch/arm/boot/dts/sama5d36.dtsi
@@ -8,8 +8,8 @@
*/
#include "sama5d3.dtsi"
#include "sama5d3_can.dtsi"
-#include "sama5d3_emac.dtsi"
#include "sama5d3_gmac.dtsi"
+#include "sama5d3_emac.dtsi"
#include "sama5d3_lcd.dtsi"
#include "sama5d3_mci2.dtsi"
#include "sama5d3_tcb1.dtsi"
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index 0c1e8d871ed1..6cb9b68e2188 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -188,7 +188,6 @@
msp2: msp@80117000 {
pinctrl-names = "default";
pinctrl-0 = <&msp2_default_mode>;
- status = "okay";
};
msp3: msp@80125000 {
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 040bb0eba152..d4d2763f4794 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -315,7 +315,7 @@
ranges;
emac: ethernet@01c0b000 {
- compatible = "allwinner,sun4i-emac";
+ compatible = "allwinner,sun4i-a10-emac";
reg = <0x01c0b000 0x1000>;
interrupts = <55>;
clocks = <&ahb_gates 17>;
@@ -323,7 +323,7 @@
};
mdio@01c0b080 {
- compatible = "allwinner,sun4i-mdio";
+ compatible = "allwinner,sun4i-a10-mdio";
reg = <0x01c0b080 0x14>;
status = "disabled";
#address-cells = <1>;
@@ -426,7 +426,7 @@
};
rtp: rtp@01c25000 {
- compatible = "allwinner,sun4i-ts";
+ compatible = "allwinner,sun4i-a10-ts";
reg = <0x01c25000 0x100>;
interrupts = <29>;
};
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index ea16054857a4..79fd412005b0 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -278,7 +278,7 @@
ranges;
emac: ethernet@01c0b000 {
- compatible = "allwinner,sun4i-emac";
+ compatible = "allwinner,sun4i-a10-emac";
reg = <0x01c0b000 0x1000>;
interrupts = <55>;
clocks = <&ahb_gates 17>;
@@ -286,7 +286,7 @@
};
mdio@01c0b080 {
- compatible = "allwinner,sun4i-mdio";
+ compatible = "allwinner,sun4i-a10-mdio";
reg = <0x01c0b080 0x14>;
status = "disabled";
#address-cells = <1>;
@@ -383,7 +383,7 @@
};
rtp: rtp@01c25000 {
- compatible = "allwinner,sun4i-ts";
+ compatible = "allwinner,sun4i-a10-ts";
reg = <0x01c25000 0x100>;
interrupts = <29>;
};
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index 320335abfccd..c463fd730c91 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -346,7 +346,7 @@
};
rtp: rtp@01c25000 {
- compatible = "allwinner,sun4i-ts";
+ compatible = "allwinner,sun4i-a10-ts";
reg = <0x01c25000 0x100>;
interrupts = <29>;
};
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index 119f066f0d98..6f25cf559ad0 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -340,7 +340,7 @@
ranges;
emac: ethernet@01c0b000 {
- compatible = "allwinner,sun4i-emac";
+ compatible = "allwinner,sun4i-a10-emac";
reg = <0x01c0b000 0x1000>;
interrupts = <0 55 4>;
clocks = <&ahb_gates 17>;
@@ -348,7 +348,7 @@
};
mdio@01c0b080 {
- compatible = "allwinner,sun4i-mdio";
+ compatible = "allwinner,sun4i-a10-mdio";
reg = <0x01c0b080 0x14>;
status = "disabled";
#address-cells = <1>;
@@ -454,7 +454,7 @@
rtc: rtc@01c20d00 {
compatible = "allwinner,sun7i-a20-rtc";
reg = <0x01c20d00 0x20>;
- interrupts = <0 24 1>;
+ interrupts = <0 24 4>;
};
sid: eeprom@01c23800 {
@@ -463,7 +463,7 @@
};
rtp: rtp@01c25000 {
- compatible = "allwinner,sun4i-ts";
+ compatible = "allwinner,sun4i-a10-ts";
reg = <0x01c25000 0x100>;
interrupts = <0 29 4>;
};
@@ -596,10 +596,10 @@
hstimer@01c60000 {
compatible = "allwinner,sun7i-a20-hstimer";
reg = <0x01c60000 0x1000>;
- interrupts = <0 81 1>,
- <0 82 1>,
- <0 83 1>,
- <0 84 1>;
+ interrupts = <0 81 4>,
+ <0 82 4>,
+ <0 83 4>,
+ <0 84 4>;
clocks = <&ahb_gates 28>;
};
diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi
index 389e987ec281..44ec401ec366 100644
--- a/arch/arm/boot/dts/tegra114.dtsi
+++ b/arch/arm/boot/dts/tegra114.dtsi
@@ -57,6 +57,8 @@
resets = <&tegra_car 27>;
reset-names = "dc";
+ nvidia,head = <0>;
+
rgb {
status = "disabled";
};
@@ -72,6 +74,8 @@
resets = <&tegra_car 26>;
reset-names = "dc";
+ nvidia,head = <1>;
+
rgb {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 480ecda3416b..48d2a7f4d0c0 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -94,6 +94,8 @@
resets = <&tegra_car 27>;
reset-names = "dc";
+ nvidia,head = <0>;
+
rgb {
status = "disabled";
};
@@ -109,6 +111,8 @@
resets = <&tegra_car 26>;
reset-names = "dc";
+ nvidia,head = <1>;
+
rgb {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
index 9104224124ee..1e156d9d0506 100644
--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
+++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
@@ -28,7 +28,7 @@
compatible = "nvidia,cardhu", "nvidia,tegra30";
aliases {
- rtc0 = "/i2c@7000d000/tps6586x@34";
+ rtc0 = "/i2c@7000d000/tps65911@2d";
rtc1 = "/rtc@7000e000";
};
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index ed8e7700b46d..19a84e933f4e 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -170,6 +170,8 @@
resets = <&tegra_car 27>;
reset-names = "dc";
+ nvidia,head = <0>;
+
rgb {
status = "disabled";
};
@@ -185,6 +187,8 @@
resets = <&tegra_car 26>;
reset-names = "dc";
+ nvidia,head = <1>;
+
rgb {
status = "disabled";
};
diff --git a/arch/arm/boot/dts/testcases/tests.dtsi b/arch/arm/boot/dts/testcases/tests.dtsi
deleted file mode 100644
index 3f123ecc9dd7..000000000000
--- a/arch/arm/boot/dts/testcases/tests.dtsi
+++ /dev/null
@@ -1,2 +0,0 @@
-/include/ "tests-phandle.dtsi"
-/include/ "tests-interrupts.dtsi"
diff --git a/arch/arm/boot/dts/versatile-pb.dts b/arch/arm/boot/dts/versatile-pb.dts
index f43907c40c93..65f657711323 100644
--- a/arch/arm/boot/dts/versatile-pb.dts
+++ b/arch/arm/boot/dts/versatile-pb.dts
@@ -1,4 +1,4 @@
-/include/ "versatile-ab.dts"
+#include <versatile-ab.dts>
/ {
model = "ARM Versatile PB";
@@ -47,4 +47,4 @@
};
};
-/include/ "testcases/tests.dtsi"
+#include <testcases.dtsi>
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 845bc745706b..ee6982976d66 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -29,6 +29,7 @@ CONFIG_ARCH_OMAP3=y
CONFIG_ARCH_OMAP4=y
CONFIG_SOC_OMAP5=y
CONFIG_SOC_AM33XX=y
+CONFIG_SOC_DRA7XX=y
CONFIG_SOC_AM43XX=y
CONFIG_ARCH_ROCKCHIP=y
CONFIG_ARCH_SOCFPGA=y
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 00fe9e9710fd..27d69b558c5d 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -204,7 +204,10 @@ CONFIG_MMC_BLOCK_MINORS=16
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_TEGRA=y
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_ONESHOT=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index e9a49fe0284e..8b8b61685a34 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -212,6 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
static inline void __flush_icache_all(void)
{
__flush_icache_preferred();
+ dsb();
}
/*
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 8756e4bcdba0..4afb376d9c7c 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -30,14 +30,15 @@
*/
#define UL(x) _AC(x, UL)
+/* PAGE_OFFSET - the virtual address of the start of the kernel image */
+#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
+
#ifdef CONFIG_MMU
/*
- * PAGE_OFFSET - the virtual address of the start of the kernel image
* TASK_SIZE - the maximum size of a user space task.
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/
-#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
@@ -104,10 +105,6 @@
#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
#endif
-#ifndef PAGE_OFFSET
-#define PAGE_OFFSET PLAT_PHYS_OFFSET
-#endif
-
/*
* The module can be at any place in ram in nommu mode.
*/
diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
index 03243f7eeddf..85c60adc8b60 100644
--- a/arch/arm/include/asm/pgtable-3level.h
+++ b/arch/arm/include/asm/pgtable-3level.h
@@ -120,13 +120,16 @@
/*
* 2nd stage PTE definitions for LPAE.
*/
-#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x5) << 2) /* MemAttr[3:0] */
-#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* MemAttr[3:0] */
-#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* MemAttr[3:0] */
-#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
-#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+#define L_PTE_S2_MT_UNCACHED (_AT(pteval_t, 0x0) << 2) /* strongly ordered */
+#define L_PTE_S2_MT_WRITETHROUGH (_AT(pteval_t, 0xa) << 2) /* normal inner write-through */
+#define L_PTE_S2_MT_WRITEBACK (_AT(pteval_t, 0xf) << 2) /* normal inner write-back */
+#define L_PTE_S2_MT_DEV_SHARED (_AT(pteval_t, 0x1) << 2) /* device */
+#define L_PTE_S2_MT_MASK (_AT(pteval_t, 0xf) << 2)
-#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
+#define L_PTE_S2_RDONLY (_AT(pteval_t, 1) << 6) /* HAP[1] */
+#define L_PTE_S2_RDWR (_AT(pteval_t, 3) << 6) /* HAP[2:1] */
+
+#define L_PMD_S2_RDWR (_AT(pmdval_t, 3) << 6) /* HAP[2:1] */
/*
* Hyp-mode PL2 PTE definitions for LPAE.
diff --git a/arch/arm/include/asm/spinlock.h b/arch/arm/include/asm/spinlock.h
index ef3c6072aa45..ac4bfae26702 100644
--- a/arch/arm/include/asm/spinlock.h
+++ b/arch/arm/include/asm/spinlock.h
@@ -37,18 +37,9 @@
static inline void dsb_sev(void)
{
-#if __LINUX_ARM_ARCH__ >= 7
- __asm__ __volatile__ (
- "dsb ishst\n"
- SEV
- );
-#else
- __asm__ __volatile__ (
- "mcr p15, 0, %0, c7, c10, 4\n"
- SEV
- : : "r" (0)
- );
-#endif
+
+ dsb(ishst);
+ __asm__(SEV);
}
/*
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 47cd974e57ea..c96ecacb2021 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -177,6 +177,18 @@ __lookup_processor_type_data:
.long __proc_info_end
.size __lookup_processor_type_data, . - __lookup_processor_type_data
+__error_lpae:
+#ifdef CONFIG_DEBUG_LL
+ adr r0, str_lpae
+ bl printascii
+ b __error
+str_lpae: .asciz "\nError: Kernel with LPAE support, but CPU does not support LPAE.\n"
+#else
+ b __error
+#endif
+ .align
+ENDPROC(__error_lpae)
+
__error_p:
#ifdef CONFIG_DEBUG_LL
adr r0, str_p1
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 914616e0bdcd..f5f381d91556 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -102,7 +102,7 @@ ENTRY(stext)
and r3, r3, #0xf @ extract VMSA support
cmp r3, #5 @ long-descriptor translation table format?
THUMB( it lo ) @ force fixup-able long branch encoding
- blo __error_p @ only classic page table format
+ blo __error_lpae @ only classic page table format
#endif
#ifndef CONFIG_XIP_KERNEL
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index b0df9761de6d..1e8b030dbefd 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -731,7 +731,7 @@ static void __init request_standard_resources(const struct machine_desc *mdesc)
kernel_data.end = virt_to_phys(_end - 1);
for_each_memblock(memory, region) {
- res = memblock_virt_alloc_low(sizeof(*res), 0);
+ res = memblock_virt_alloc(sizeof(*res), 0);
res->name = "System RAM";
res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 1d8248ea5669..bd18bb8b2770 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -878,7 +878,8 @@ static int hyp_init_cpu_pm_notifier(struct notifier_block *self,
unsigned long cmd,
void *v)
{
- if (cmd == CPU_PM_EXIT) {
+ if (cmd == CPU_PM_EXIT &&
+ __hyp_get_vectors() == hyp_default_vectors) {
cpu_init_hyp_mode(NULL);
return NOTIFY_OK;
}
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index ddc15539bad2..0d68d4073068 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -220,6 +220,10 @@ after_vfp_restore:
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
* passed in r0 and r1.
*
+ * A function pointer with a value of 0xffffffff has a special meaning,
+ * and is used to implement __hyp_get_vectors in the same way as in
+ * arch/arm/kernel/hyp_stub.S.
+ *
* The calling convention follows the standard AAPCS:
* r0 - r3: caller save
* r12: caller save
@@ -363,6 +367,11 @@ hyp_hvc:
host_switch_to_hyp:
pop {r0, r1, r2}
+ /* Check for __hyp_get_vectors */
+ cmp r0, #-1
+ mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
+ beq 1f
+
push {lr}
mrs lr, SPSR
push {lr}
@@ -378,7 +387,7 @@ THUMB( orr lr, #1)
pop {lr}
msr SPSR_csxf, lr
pop {lr}
- eret
+1: eret
guest_trap:
load_vcpu @ Load VCPU pointer to r0
diff --git a/arch/arm/mach-hisi/Kconfig b/arch/arm/mach-hisi/Kconfig
index 8f4649b301b2..1abae5f6a418 100644
--- a/arch/arm/mach-hisi/Kconfig
+++ b/arch/arm/mach-hisi/Kconfig
@@ -8,7 +8,7 @@ config ARCH_HI3xxx
select CLKSRC_OF
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU
- select HAVE_ARM_TWD
+ select HAVE_ARM_TWD if SMP
select HAVE_SMP
select PINCTRL
select PINCTRL_SINGLE
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index befcaf5d0574..ec419649320f 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -101,11 +101,9 @@ obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
obj-$(CONFIG_SOC_IMX6SL) += clk-imx6sl.o mach-imx6sl.o
-ifeq ($(CONFIG_PM),y)
obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o headsmp.o
# i.MX6SL reuses i.MX6Q code
obj-$(CONFIG_SOC_IMX6SL) += pm-imx6q.o headsmp.o
-endif
# i.MX5 based machines
obj-$(CONFIG_MACH_MX51_BABBAGE) += mach-mx51_babbage.o
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index af2e582d2b74..4d677f442539 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -482,6 +482,9 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
if (IS_ENABLED(CONFIG_PCI_IMX6))
clk_set_parent(clk[lvds1_sel], clk[sata_ref]);
+ /* Set initial power mode */
+ imx6q_set_lpm(WAIT_CLOCKED);
+
np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
base = of_iomap(np, 0);
WARN_ON(!base);
diff --git a/arch/arm/mach-imx/clk-imx6sl.c b/arch/arm/mach-imx/clk-imx6sl.c
index 3781a1853998..4c86f3035205 100644
--- a/arch/arm/mach-imx/clk-imx6sl.c
+++ b/arch/arm/mach-imx/clk-imx6sl.c
@@ -266,6 +266,9 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
/* Audio-related clocks configuration */
clk_set_parent(clks[IMX6SL_CLK_SPDIF0_SEL], clks[IMX6SL_CLK_PLL3_PFD3]);
+ /* Set initial power mode */
+ imx6q_set_lpm(WAIT_CLOCKED);
+
np = of_find_compatible_node(NULL, NULL, "fsl,imx6sl-gpt");
base = of_iomap(np, 0);
WARN_ON(!base);
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 59c3b9b26bb4..baf439dc22d8 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -144,13 +144,11 @@ void imx6q_set_chicken_bit(void);
void imx_cpu_die(unsigned int cpu);
int imx_cpu_kill(unsigned int cpu);
-#ifdef CONFIG_PM
void imx6q_pm_init(void);
void imx6q_pm_set_ccm_base(void __iomem *base);
+#ifdef CONFIG_PM
void imx5_pm_init(void);
#else
-static inline void imx6q_pm_init(void) {}
-static inline void imx6q_pm_set_ccm_base(void __iomem *base) {}
static inline void imx5_pm_init(void) {}
#endif
diff --git a/arch/arm/mach-imx/pm-imx6q.c b/arch/arm/mach-imx/pm-imx6q.c
index 9d47adc078aa..7a9b98589db7 100644
--- a/arch/arm/mach-imx/pm-imx6q.c
+++ b/arch/arm/mach-imx/pm-imx6q.c
@@ -236,8 +236,6 @@ void __init imx6q_pm_init(void)
regmap_update_bits(gpr, IOMUXC_GPR1, IMX6Q_GPR1_GINT,
IMX6Q_GPR1_GINT);
- /* Set initial power mode */
- imx6q_set_lpm(WAIT_CLOCKED);
suspend_set_ops(&imx6q_pm_ops);
}
diff --git a/arch/arm/mach-moxart/Kconfig b/arch/arm/mach-moxart/Kconfig
index ba470d64493b..3795ae28a613 100644
--- a/arch/arm/mach-moxart/Kconfig
+++ b/arch/arm/mach-moxart/Kconfig
@@ -2,7 +2,6 @@ config ARCH_MOXART
bool "MOXA ART SoC" if ARCH_MULTI_V4T
select CPU_FA526
select ARM_DMA_MEM_BUFFERABLE
- select DMA_OF
select USE_OF
select CLKSRC_OF
select CLKSRC_MMIO
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index 91449c5cb70f..85089d821982 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -156,6 +156,7 @@ static struct omap_usb_config nokia770_usb_config __initdata = {
.register_dev = 1,
.hmc_mode = 16,
.pins[0] = 6,
+ .extcon = "tahvo-usb",
};
#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 653b489479e0..0af7ca02314d 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -50,11 +50,12 @@ config SOC_OMAP5
bool "TI OMAP5"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
+ select ARCH_HAS_OPP
select ARM_CPU_SUSPEND if PM
select ARM_GIC
select CPU_V7
select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_ARM_TWD if SMP
select HAVE_SMP
select HAVE_ARM_ARCH_TIMER
select ARM_ERRATA_798181 if SMP
@@ -63,6 +64,7 @@ config SOC_AM33XX
bool "TI AM33XX"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
+ select ARCH_HAS_OPP
select ARM_CPU_SUSPEND if PM
select CPU_V7
select MULTI_IRQ_HANDLER
@@ -72,6 +74,7 @@ config SOC_AM43XX
depends on ARCH_MULTI_V7
select CPU_V7
select ARCH_OMAP2PLUS
+ select ARCH_HAS_OPP
select MULTI_IRQ_HANDLER
select ARM_GIC
select MACH_OMAP_GENERIC
@@ -80,6 +83,7 @@ config SOC_DRA7XX
bool "TI DRA7XX"
depends on ARCH_MULTI_V7
select ARCH_OMAP2PLUS
+ select ARCH_HAS_OPP
select ARM_CPU_SUSPEND if PM
select ARM_GIC
select CPU_V7
@@ -268,9 +272,6 @@ config MACH_OMAP_3430SDP
default y
select OMAP_PACKAGE_CBB
-config MACH_NOKIA_N800
- bool
-
config MACH_NOKIA_N810
bool
@@ -281,7 +282,6 @@ config MACH_NOKIA_N8X0
bool "Nokia N800/N810"
depends on SOC_OMAP2420
default y
- select MACH_NOKIA_N800
select MACH_NOKIA_N810
select MACH_NOKIA_N810_WIMAX
select OMAP_PACKAGE_ZAC
diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c
index 3b05aea56d1f..11ed9152e665 100644
--- a/arch/arm/mach-omap2/cclock3xxx_data.c
+++ b/arch/arm/mach-omap2/cclock3xxx_data.c
@@ -433,7 +433,9 @@ static const struct clk_ops dpll4_m5x2_ck_ops = {
.enable = &omap2_dflt_clk_enable,
.disable = &omap2_dflt_clk_disable,
.is_enabled = &omap2_dflt_clk_is_enabled,
+ .set_rate = &omap3_clkoutx2_set_rate,
.recalc_rate = &omap3_clkoutx2_recalc,
+ .round_rate = &omap3_clkoutx2_round_rate,
};
static const struct clk_ops dpll4_m5x2_ck_3630_ops = {
diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c
index 4c158c838d40..01fc710c8181 100644
--- a/arch/arm/mach-omap2/cpuidle44xx.c
+++ b/arch/arm/mach-omap2/cpuidle44xx.c
@@ -23,6 +23,8 @@
#include "prm.h"
#include "clockdomain.h"
+#define MAX_CPUS 2
+
/* Machine specific information */
struct idle_statedata {
u32 cpu_state;
@@ -48,11 +50,11 @@ static struct idle_statedata omap4_idle_data[] = {
},
};
-static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS];
-static struct clockdomain *cpu_clkdm[NR_CPUS];
+static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS];
+static struct clockdomain *cpu_clkdm[MAX_CPUS];
static atomic_t abort_barrier;
-static bool cpu_done[NR_CPUS];
+static bool cpu_done[MAX_CPUS];
static struct idle_statedata *state_ptr = &omap4_idle_data[0];
/* Private functions */
diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c
index 3185ced807c9..3c418ea54bbe 100644
--- a/arch/arm/mach-omap2/dpll3xxx.c
+++ b/arch/arm/mach-omap2/dpll3xxx.c
@@ -623,6 +623,32 @@ void omap3_dpll_deny_idle(struct clk_hw_omap *clk)
/* Clock control for DPLL outputs */
+/* Find the parent DPLL for the given clkoutx2 clock */
+static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw)
+{
+ struct clk_hw_omap *pclk = NULL;
+ struct clk *parent;
+
+ /* Walk up the parents of clk, looking for a DPLL */
+ do {
+ do {
+ parent = __clk_get_parent(hw->clk);
+ hw = __clk_get_hw(parent);
+ } while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC));
+ if (!hw)
+ break;
+ pclk = to_clk_hw_omap(hw);
+ } while (pclk && !pclk->dpll_data);
+
+ /* clk does not have a DPLL as a parent? error in the clock data */
+ if (!pclk) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return pclk;
+}
+
/**
* omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate
* @clk: DPLL output struct clk
@@ -637,27 +663,14 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
unsigned long rate;
u32 v;
struct clk_hw_omap *pclk = NULL;
- struct clk *parent;
if (!parent_rate)
return 0;
- /* Walk up the parents of clk, looking for a DPLL */
- do {
- do {
- parent = __clk_get_parent(hw->clk);
- hw = __clk_get_hw(parent);
- } while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC));
- if (!hw)
- break;
- pclk = to_clk_hw_omap(hw);
- } while (pclk && !pclk->dpll_data);
+ pclk = omap3_find_clkoutx2_dpll(hw);
- /* clk does not have a DPLL as a parent? error in the clock data */
- if (!pclk) {
- WARN_ON(1);
+ if (!pclk)
return 0;
- }
dd = pclk->dpll_data;
@@ -672,6 +685,55 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
return rate;
}
+int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ return 0;
+}
+
+long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate)
+{
+ const struct dpll_data *dd;
+ u32 v;
+ struct clk_hw_omap *pclk = NULL;
+
+ if (!*prate)
+ return 0;
+
+ pclk = omap3_find_clkoutx2_dpll(hw);
+
+ if (!pclk)
+ return 0;
+
+ dd = pclk->dpll_data;
+
+ /* TYPE J does not have a clkoutx2 */
+ if (dd->flags & DPLL_J_TYPE) {
+ *prate = __clk_round_rate(__clk_get_parent(pclk->hw.clk), rate);
+ return *prate;
+ }
+
+ WARN_ON(!dd->enable_mask);
+
+ v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask;
+ v >>= __ffs(dd->enable_mask);
+
+ /* If in bypass, the rate is fixed to the bypass rate*/
+ if (v != OMAP3XXX_EN_DPLL_LOCKED)
+ return *prate;
+
+ if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
+ unsigned long best_parent;
+
+ best_parent = (rate / 2);
+ *prate = __clk_round_rate(__clk_get_parent(hw->clk),
+ best_parent);
+ }
+
+ return *prate * 2;
+}
+
/* OMAP3/4 non-CORE DPLL clkops */
const struct clk_hw_omap_ops clkhwops_omap3_dpll = {
.allow_idle = omap3_dpll_allow_idle,
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index d24926e6340f..ab43755364f5 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1339,7 +1339,7 @@ static void __maybe_unused gpmc_read_timings_dt(struct device_node *np,
of_property_read_bool(np, "gpmc,time-para-granularity");
}
-#ifdef CONFIG_MTD_NAND
+#if IS_ENABLED(CONFIG_MTD_NAND)
static const char * const nand_xfer_types[] = {
[NAND_OMAP_PREFETCH_POLLED] = "prefetch-polled",
@@ -1429,7 +1429,7 @@ static int gpmc_probe_nand_child(struct platform_device *pdev,
}
#endif
-#ifdef CONFIG_MTD_ONENAND
+#if IS_ENABLED(CONFIG_MTD_ONENAND)
static int gpmc_probe_onenand_child(struct platform_device *pdev,
struct device_node *child)
{
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index d408b15b4fbf..af432b191255 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -179,15 +179,6 @@ static struct map_desc omap34xx_io_desc[] __initdata = {
.length = L4_EMU_34XX_SIZE,
.type = MT_DEVICE
},
-#if defined(CONFIG_DEBUG_LL) && \
- (defined(CONFIG_MACH_OMAP_ZOOM2) || defined(CONFIG_MACH_OMAP_ZOOM3))
- {
- .virtual = ZOOM_UART_VIRT,
- .pfn = __phys_to_pfn(ZOOM_UART_BASE),
- .length = SZ_1M,
- .type = MT_DEVICE
- },
-#endif
};
#endif
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index 42d81885c700..1f33f5db10d5 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -1947,29 +1947,31 @@ static int _ocp_softreset(struct omap_hwmod *oh)
goto dis_opt_clks;
_write_sysconfig(v, oh);
- ret = _clear_softreset(oh, &v);
- if (ret)
- goto dis_opt_clks;
-
- _write_sysconfig(v, oh);
if (oh->class->sysc->srst_udelay)
udelay(oh->class->sysc->srst_udelay);
c = _wait_softreset_complete(oh);
- if (c == MAX_MODULE_SOFTRESET_WAIT)
+ if (c == MAX_MODULE_SOFTRESET_WAIT) {
pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n",
oh->name, MAX_MODULE_SOFTRESET_WAIT);
- else
+ ret = -ETIMEDOUT;
+ goto dis_opt_clks;
+ } else {
pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c);
+ }
+
+ ret = _clear_softreset(oh, &v);
+ if (ret)
+ goto dis_opt_clks;
+
+ _write_sysconfig(v, oh);
/*
* XXX add _HWMOD_STATE_WEDGED for modules that don't come back from
* _wait_target_ready() or _reset()
*/
- ret = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0;
-
dis_opt_clks:
if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET)
_disable_optional_clocks(oh);
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 18f333c440db..810c205d668b 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1365,11 +1365,10 @@ static struct omap_hwmod_class_sysconfig dra7xx_spinlock_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
- .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY |
- SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
- .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
- SIDLE_SMART_WKUP),
+ .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP |
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+ SYSS_HAS_RESET_STATUS),
+ .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
};
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 3d5b24dcd9a4..c33e07e2f0d4 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -22,6 +22,8 @@
#include "common-board-devices.h"
#include "dss-common.h"
#include "control.h"
+#include "omap-secure.h"
+#include "soc.h"
struct pdata_init {
const char *compatible;
@@ -169,6 +171,22 @@ static void __init am3517_evm_legacy_init(void)
omap_ctrl_writel(v, AM35XX_CONTROL_IP_SW_RESET);
omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); /* OCP barrier */
}
+
+static void __init nokia_n900_legacy_init(void)
+{
+ hsmmc2_internal_input_clk();
+
+ if (omap_type() == OMAP2_DEVICE_TYPE_SEC) {
+ if (IS_ENABLED(CONFIG_ARM_ERRATA_430973)) {
+ pr_info("RX-51: Enabling ARM errata 430973 workaround\n");
+ /* set IBE to 1 */
+ rx51_secure_update_aux_cr(BIT(6), 0);
+ } else {
+ pr_warning("RX-51: Not enabling ARM errata 430973 workaround\n");
+ pr_warning("Thumb binaries may crash randomly without this workaround\n");
+ }
+ }
+}
#endif /* CONFIG_ARCH_OMAP3 */
#ifdef CONFIG_ARCH_OMAP4
@@ -239,6 +257,7 @@ struct of_dev_auxdata omap_auxdata_lookup[] __initdata = {
#endif
#ifdef CONFIG_ARCH_OMAP3
OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002030, "48002030.pinmux", &pcs_pdata),
+ OF_DEV_AUXDATA("ti,omap3-padconf", 0x480025a0, "480025a0.pinmux", &pcs_pdata),
OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002a00, "48002a00.pinmux", &pcs_pdata),
/* Only on am3517 */
OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL),
@@ -259,7 +278,7 @@ struct of_dev_auxdata omap_auxdata_lookup[] __initdata = {
static struct pdata_init pdata_quirks[] __initdata = {
#ifdef CONFIG_ARCH_OMAP3
{ "compulab,omap3-sbc-t3730", omap3_sbc_t3730_legacy_init, },
- { "nokia,omap3-n900", hsmmc2_internal_input_clk, },
+ { "nokia,omap3-n900", nokia_n900_legacy_init, },
{ "nokia,omap3-n9", hsmmc2_internal_input_clk, },
{ "nokia,omap3-n950", hsmmc2_internal_input_clk, },
{ "isee,omap3-igep0020", omap3_igep0020_legacy_init, },
diff --git a/arch/arm/mach-omap2/prminst44xx.c b/arch/arm/mach-omap2/prminst44xx.c
index 6334b96b4097..280f3c58abe5 100644
--- a/arch/arm/mach-omap2/prminst44xx.c
+++ b/arch/arm/mach-omap2/prminst44xx.c
@@ -183,11 +183,11 @@ void omap4_prminst_global_warm_sw_reset(void)
OMAP4_PRM_RSTCTRL_OFFSET);
v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK;
omap4_prminst_write_inst_reg(v, OMAP4430_PRM_PARTITION,
- OMAP4430_PRM_DEVICE_INST,
+ dev_inst,
OMAP4_PRM_RSTCTRL_OFFSET);
/* OCP barrier */
v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION,
- OMAP4430_PRM_DEVICE_INST,
+ dev_inst,
OMAP4_PRM_RSTCTRL_OFFSET);
}
diff --git a/arch/arm/mach-pxa/am300epd.c b/arch/arm/mach-pxa/am300epd.c
index c9f309ae88c5..8b90c4f2d430 100644
--- a/arch/arm/mach-pxa/am300epd.c
+++ b/arch/arm/mach-pxa/am300epd.c
@@ -30,6 +30,7 @@
#include <mach/gumstix.h>
#include <mach/mfp-pxa25x.h>
+#include <mach/irqs.h>
#include <linux/platform_data/video-pxafb.h>
#include "generic.h"
diff --git a/arch/arm/mach-pxa/include/mach/balloon3.h b/arch/arm/mach-pxa/include/mach/balloon3.h
index 954641e6c8b1..1b0825911e62 100644
--- a/arch/arm/mach-pxa/include/mach/balloon3.h
+++ b/arch/arm/mach-pxa/include/mach/balloon3.h
@@ -14,6 +14,8 @@
#ifndef ASM_ARCH_BALLOON3_H
#define ASM_ARCH_BALLOON3_H
+#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */
+
enum balloon3_features {
BALLOON3_FEATURE_OHCI,
BALLOON3_FEATURE_MMC,
diff --git a/arch/arm/mach-pxa/include/mach/corgi.h b/arch/arm/mach-pxa/include/mach/corgi.h
index f3c3493b468d..c030d955bbd7 100644
--- a/arch/arm/mach-pxa/include/mach/corgi.h
+++ b/arch/arm/mach-pxa/include/mach/corgi.h
@@ -13,6 +13,7 @@
#ifndef __ASM_ARCH_CORGI_H
#define __ASM_ARCH_CORGI_H 1
+#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */
/*
* Corgi (Non Standard) GPIO Definitions
diff --git a/arch/arm/mach-pxa/include/mach/csb726.h b/arch/arm/mach-pxa/include/mach/csb726.h
index 2628e7b72116..00cfbbbf73f7 100644
--- a/arch/arm/mach-pxa/include/mach/csb726.h
+++ b/arch/arm/mach-pxa/include/mach/csb726.h
@@ -11,6 +11,8 @@
#ifndef CSB726_H
#define CSB726_H
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
#define CSB726_GPIO_IRQ_LAN 52
#define CSB726_GPIO_IRQ_SM501 53
#define CSB726_GPIO_MMC_DETECT 100
diff --git a/arch/arm/mach-pxa/include/mach/gumstix.h b/arch/arm/mach-pxa/include/mach/gumstix.h
index dba14b6503ad..f7df27bbb42e 100644
--- a/arch/arm/mach-pxa/include/mach/gumstix.h
+++ b/arch/arm/mach-pxa/include/mach/gumstix.h
@@ -6,6 +6,7 @@
* published by the Free Software Foundation.
*/
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
/* BTRESET - Reset line to Bluetooth module, active low signal. */
#define GPIO_GUMSTIX_BTRESET 7
diff --git a/arch/arm/mach-pxa/include/mach/idp.h b/arch/arm/mach-pxa/include/mach/idp.h
index 22a96f87232b..7e63f4680271 100644
--- a/arch/arm/mach-pxa/include/mach/idp.h
+++ b/arch/arm/mach-pxa/include/mach/idp.h
@@ -23,6 +23,7 @@
* IDP hardware.
*/
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
#define IDP_FLASH_PHYS (PXA_CS0_PHYS)
#define IDP_ALT_FLASH_PHYS (PXA_CS1_PHYS)
diff --git a/arch/arm/mach-pxa/include/mach/palmld.h b/arch/arm/mach-pxa/include/mach/palmld.h
index 2c4471336570..b184f296023b 100644
--- a/arch/arm/mach-pxa/include/mach/palmld.h
+++ b/arch/arm/mach-pxa/include/mach/palmld.h
@@ -13,6 +13,8 @@
#ifndef _INCLUDE_PALMLD_H_
#define _INCLUDE_PALMLD_H_
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
/** HERE ARE GPIOs **/
/* GPIOs */
diff --git a/arch/arm/mach-pxa/include/mach/palmt5.h b/arch/arm/mach-pxa/include/mach/palmt5.h
index 0bd4f036c72f..e342c5921405 100644
--- a/arch/arm/mach-pxa/include/mach/palmt5.h
+++ b/arch/arm/mach-pxa/include/mach/palmt5.h
@@ -15,6 +15,8 @@
#ifndef _INCLUDE_PALMT5_H_
#define _INCLUDE_PALMT5_H_
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
/** HERE ARE GPIOs **/
/* GPIOs */
diff --git a/arch/arm/mach-pxa/include/mach/palmtc.h b/arch/arm/mach-pxa/include/mach/palmtc.h
index c383a21680b6..81c727b3cfd2 100644
--- a/arch/arm/mach-pxa/include/mach/palmtc.h
+++ b/arch/arm/mach-pxa/include/mach/palmtc.h
@@ -16,6 +16,8 @@
#ifndef _INCLUDE_PALMTC_H_
#define _INCLUDE_PALMTC_H_
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
/** HERE ARE GPIOs **/
/* GPIOs */
diff --git a/arch/arm/mach-pxa/include/mach/palmtx.h b/arch/arm/mach-pxa/include/mach/palmtx.h
index f2e530380253..92bc1f05300d 100644
--- a/arch/arm/mach-pxa/include/mach/palmtx.h
+++ b/arch/arm/mach-pxa/include/mach/palmtx.h
@@ -16,6 +16,8 @@
#ifndef _INCLUDE_PALMTX_H_
#define _INCLUDE_PALMTX_H_
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
/** HERE ARE GPIOs **/
/* GPIOs */
diff --git a/arch/arm/mach-pxa/include/mach/pcm027.h b/arch/arm/mach-pxa/include/mach/pcm027.h
index 6bf28de228bd..86ebd7b6c960 100644
--- a/arch/arm/mach-pxa/include/mach/pcm027.h
+++ b/arch/arm/mach-pxa/include/mach/pcm027.h
@@ -23,6 +23,8 @@
* Definitions of CPU card resources only
*/
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
/* phyCORE-PXA270 (PCM027) Interrupts */
#define PCM027_IRQ(x) (IRQ_BOARD_START + (x))
#define PCM027_BTDET_IRQ PCM027_IRQ(0)
diff --git a/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h b/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h
index 0260aaa2fc17..7e544c14967e 100644
--- a/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h
+++ b/arch/arm/mach-pxa/include/mach/pcm990_baseboard.h
@@ -20,6 +20,7 @@
*/
#include <mach/pcm027.h>
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
/*
* definitions relevant only when the PCM-990
diff --git a/arch/arm/mach-pxa/include/mach/poodle.h b/arch/arm/mach-pxa/include/mach/poodle.h
index f32ff75dcca8..b56b19351a03 100644
--- a/arch/arm/mach-pxa/include/mach/poodle.h
+++ b/arch/arm/mach-pxa/include/mach/poodle.h
@@ -15,6 +15,8 @@
#ifndef __ASM_ARCH_POODLE_H
#define __ASM_ARCH_POODLE_H 1
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
/*
* GPIOs
*/
diff --git a/arch/arm/mach-pxa/include/mach/spitz.h b/arch/arm/mach-pxa/include/mach/spitz.h
index 0bfe6507c95d..25c9f62e46aa 100644
--- a/arch/arm/mach-pxa/include/mach/spitz.h
+++ b/arch/arm/mach-pxa/include/mach/spitz.h
@@ -15,8 +15,8 @@
#define __ASM_ARCH_SPITZ_H 1
#endif
+#include "irqs.h" /* PXA_NR_BUILTIN_GPIO, PXA_GPIO_TO_IRQ */
#include <linux/fb.h>
-#include <linux/gpio.h>
/* Spitz/Akita GPIOs */
diff --git a/arch/arm/mach-pxa/include/mach/tosa.h b/arch/arm/mach-pxa/include/mach/tosa.h
index 2bb0e862598c..0497d95cef25 100644
--- a/arch/arm/mach-pxa/include/mach/tosa.h
+++ b/arch/arm/mach-pxa/include/mach/tosa.h
@@ -13,6 +13,8 @@
#ifndef _ASM_ARCH_TOSA_H_
#define _ASM_ARCH_TOSA_H_ 1
+#include "irqs.h" /* PXA_NR_BUILTIN_GPIO */
+
/* TOSA Chip selects */
#define TOSA_LCDC_PHYS PXA_CS4_PHYS
/* Internel Scoop */
diff --git a/arch/arm/mach-pxa/include/mach/trizeps4.h b/arch/arm/mach-pxa/include/mach/trizeps4.h
index d2ca01053f69..ae3ca013afab 100644
--- a/arch/arm/mach-pxa/include/mach/trizeps4.h
+++ b/arch/arm/mach-pxa/include/mach/trizeps4.h
@@ -10,6 +10,8 @@
#ifndef _TRIPEPS4_H_
#define _TRIPEPS4_H_
+#include "irqs.h" /* PXA_GPIO_TO_IRQ */
+
/* physical memory regions */
#define TRIZEPS4_FLASH_PHYS (PXA_CS0_PHYS) /* Flash region */
#define TRIZEPS4_DISK_PHYS (PXA_CS1_PHYS) /* Disk On Chip region */
diff --git a/arch/arm/mach-pxa/mioa701.c b/arch/arm/mach-pxa/mioa701.c
index f70583fee59f..29997bde277d 100644
--- a/arch/arm/mach-pxa/mioa701.c
+++ b/arch/arm/mach-pxa/mioa701.c
@@ -38,6 +38,7 @@
#include <linux/mtd/physmap.h>
#include <linux/usb/gpio_vbus.h>
#include <linux/reboot.h>
+#include <linux/regulator/fixed.h>
#include <linux/regulator/max1586.h>
#include <linux/slab.h>
#include <linux/i2c/pxa-i2c.h>
@@ -714,6 +715,10 @@ static struct gpio global_gpios[] = {
{ GPIO56_MT9M111_nOE, GPIOF_OUT_INIT_LOW, "Camera nOE" },
};
+static struct regulator_consumer_supply fixed_5v0_consumers[] = {
+ REGULATOR_SUPPLY("power", "pwm-backlight"),
+};
+
static void __init mioa701_machine_init(void)
{
int rc;
@@ -753,6 +758,10 @@ static void __init mioa701_machine_init(void)
pxa_set_i2c_info(&i2c_pdata);
pxa27x_set_i2c_power_info(NULL);
pxa_set_camera_info(&mioa701_pxacamera_platform_data);
+
+ regulator_register_always_on(0, "fixed-5.0V", fixed_5v0_consumers,
+ ARRAY_SIZE(fixed_5v0_consumers),
+ 5000000);
}
static void mioa701_machine_exit(void)
diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h
index f33679d2d3ee..50e1d850ee2e 100644
--- a/arch/arm/mach-sa1100/include/mach/collie.h
+++ b/arch/arm/mach-sa1100/include/mach/collie.h
@@ -13,6 +13,8 @@
#ifndef __ASM_ARCH_COLLIE_H
#define __ASM_ARCH_COLLIE_H
+#include "hardware.h" /* Gives GPIO_MAX */
+
extern void locomolcd_power(int on);
#define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1)
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index 338640631e08..05fa505df585 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -8,7 +8,7 @@ config ARCH_SHMOBILE_MULTI
select CPU_V7
select GENERIC_CLOCKEVENTS
select HAVE_ARM_SCU if SMP
- select HAVE_ARM_TWD if LOCAL_TIMERS
+ select HAVE_ARM_TWD if SMP
select HAVE_SMP
select ARM_GIC
select MIGHT_HAVE_CACHE_L2X0
diff --git a/arch/arm/mach-tegra/pm.c b/arch/arm/mach-tegra/pm.c
index 4ae0286b468d..f55b05a29b55 100644
--- a/arch/arm/mach-tegra/pm.c
+++ b/arch/arm/mach-tegra/pm.c
@@ -24,6 +24,7 @@
#include <linux/cpu_pm.h>
#include <linux/suspend.h>
#include <linux/err.h>
+#include <linux/slab.h>
#include <linux/clk/tegra.h>
#include <asm/smp_plat.h>
diff --git a/arch/arm/mach-tegra/tegra.c b/arch/arm/mach-tegra/tegra.c
index 303a285d80fd..6191603379e1 100644
--- a/arch/arm/mach-tegra/tegra.c
+++ b/arch/arm/mach-tegra/tegra.c
@@ -73,10 +73,20 @@ u32 tegra_uart_config[3] = {
static void __init tegra_init_cache(void)
{
#ifdef CONFIG_CACHE_L2X0
+ static const struct of_device_id pl310_ids[] __initconst = {
+ { .compatible = "arm,pl310-cache", },
+ {}
+ };
+
+ struct device_node *np;
int ret;
void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000;
u32 aux_ctrl, cache_type;
+ np = of_find_matching_node(NULL, pl310_ids);
+ if (!np)
+ return;
+
cache_type = readl(p + L2X0_CACHE_TYPE);
aux_ctrl = (cache_type & 0x700) << (17-8);
aux_ctrl |= 0x7C400001;
diff --git a/arch/arm/mach-zynq/common.c b/arch/arm/mach-zynq/common.c
index 1db2a5ca9ab8..8c09a8393fb6 100644
--- a/arch/arm/mach-zynq/common.c
+++ b/arch/arm/mach-zynq/common.c
@@ -25,6 +25,7 @@
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of.h>
+#include <linux/memblock.h>
#include <linux/irqchip.h>
#include <linux/irqchip/arm-gic.h>
@@ -41,6 +42,18 @@
void __iomem *zynq_scu_base;
+/**
+ * zynq_memory_init - Initialize special memory
+ *
+ * We need to stop things allocating the low memory as DMA can't work in
+ * the 1st 512K of memory.
+ */
+static void __init zynq_memory_init(void)
+{
+ if (!__pa(PAGE_OFFSET))
+ memblock_reserve(__pa(PAGE_OFFSET), __pa(swapper_pg_dir));
+}
+
static struct platform_device zynq_cpuidle_device = {
.name = "cpuidle-zynq",
};
@@ -117,5 +130,6 @@ DT_MACHINE_START(XILINX_EP107, "Xilinx Zynq Platform")
.init_machine = zynq_init_machine,
.init_time = zynq_timer_init,
.dt_compat = zynq_dt_match,
+ .reserve = zynq_memory_init,
.restart = zynq_system_reset,
MACHINE_END
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 1a77450e728a..11b3914660d2 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1358,7 +1358,7 @@ static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
*handle = DMA_ERROR_CODE;
size = PAGE_ALIGN(size);
- if (gfp & GFP_ATOMIC)
+ if (!(gfp & __GFP_WAIT))
return __iommu_alloc_atomic(dev, size, handle);
/*
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 2b3a56414271..ef69152f9b52 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -264,6 +264,9 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start)
note_page(st, addr, 3, pmd_val(*pmd));
else
walk_pte(st, pmd, addr);
+
+ if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1]))
+ note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1]));
}
}
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index d5a982d15a88..7ea641b7aa7d 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -38,6 +38,7 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
struct mem_type {
pteval_t prot_pte;
+ pteval_t prot_pte_s2;
pmdval_t prot_l1;
pmdval_t prot_sect;
unsigned int domain;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 4f08c133cc25..a623cb3ad012 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -232,12 +232,16 @@ __setup("noalign", noalign_setup);
#endif /* ifdef CONFIG_CPU_CP15 / else */
#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
+#define PROT_PTE_S2_DEVICE PROT_PTE_DEVICE
#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
static struct mem_type mem_types[] = {
[MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
.prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
L_PTE_SHARED,
+ .prot_pte_s2 = s2_policy(PROT_PTE_S2_DEVICE) |
+ s2_policy(L_PTE_S2_MT_DEV_SHARED) |
+ L_PTE_SHARED,
.prot_l1 = PMD_TYPE_TABLE,
.prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
.domain = DOMAIN_IO,
@@ -508,7 +512,8 @@ static void __init build_mem_type_table(void)
cp = &cache_policies[cachepolicy];
vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
s2_pgprot = cp->pte_s2;
- hyp_device_pgprot = s2_device_pgprot = mem_types[MT_DEVICE].prot_pte;
+ hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
+ s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
/*
* ARMv6 and above have extended page tables.
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 45dc29f85d56..32b3558321c4 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -208,7 +208,6 @@ __v6_setup:
mcr p15, 0, r0, c7, c14, 0 @ clean+invalidate D cache
mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
mcr p15, 0, r0, c7, c15, 0 @ clean+invalidate cache
- mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
#ifdef CONFIG_MMU
mcr p15, 0, r0, c8, c7, 0 @ invalidate I + D TLBs
mcr p15, 0, r0, c2, c0, 2 @ TTB control register
@@ -218,6 +217,8 @@ __v6_setup:
ALT_UP(orr r8, r8, #TTB_FLAGS_UP)
mcr p15, 0, r8, c2, c0, 1 @ load TTB1
#endif /* CONFIG_MMU */
+ mcr p15, 0, r0, c7, c10, 4 @ drain write buffer and
+ @ complete invalidations
adr r5, v6_crval
ldmia r5, {r5, r6}
ARM_BE8(orr r6, r6, #1 << 25) @ big-endian page tables
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index bd1781979a39..74f6033e76dd 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -351,7 +351,6 @@ __v7_setup:
4: mov r10, #0
mcr p15, 0, r10, c7, c5, 0 @ I+BTB cache invalidate
- dsb
#ifdef CONFIG_MMU
mcr p15, 0, r10, c8, c7, 0 @ invalidate I + D TLBs
v7_ttb_setup r10, r4, r8, r5 @ TTBCR, TTBRx setup
@@ -360,6 +359,7 @@ __v7_setup:
mcr p15, 0, r5, c10, c2, 0 @ write PRRR
mcr p15, 0, r6, c10, c2, 1 @ write NMRR
#endif
+ dsb @ Complete invalidations
#ifndef CONFIG_ARM_THUMBEE
mrc p15, 0, r0, c0, c1, 0 @ read ID_PFR0 for ThumbEE
and r0, r0, #(0xf << 12) @ ThumbEE enabled field
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index dd4327f09ba4..27bbcfc7202a 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -36,6 +36,7 @@ config ARM64
select HAVE_GENERIC_DMA_COHERENT
select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_MEMBLOCK
+ select HAVE_PATA_PLATFORM
select HAVE_PERF_EVENTS
select IRQ_DOMAIN
select MODULES_USE_ELF_RELA
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 84139be62ae6..7959dd0ca5d5 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,4 +1,3 @@
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
@@ -19,6 +18,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_KALLSYMS_ALL=y
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
+CONFIG_JUMP_LABEL=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
@@ -27,6 +27,7 @@ CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_XGENE=y
CONFIG_SMP=y
CONFIG_PREEMPT=y
+CONFIG_CMA=y
CONFIG_CMDLINE="console=ttyAMA0"
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
@@ -42,14 +43,17 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_WIRELESS is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
-CONFIG_BLK_DEV=y
+CONFIG_DMA_CMA=y
CONFIG_SCSI=y
# CONFIG_SCSI_PROC_FS is not set
CONFIG_BLK_DEV_SD=y
# CONFIG_SCSI_LOWLEVEL is not set
+CONFIG_ATA=y
+CONFIG_PATA_PLATFORM=y
+CONFIG_PATA_OF_PLATFORM=y
CONFIG_NETDEVICES=y
-CONFIG_MII=y
CONFIG_SMC91X=y
+CONFIG_SMSC911X=y
# CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y
# CONFIG_SERIO_I8042 is not set
@@ -62,13 +66,19 @@ CONFIG_SERIAL_AMBA_PL011=y
CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
+CONFIG_REGULATOR=y
+CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_FB=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
-# CONFIG_USB_SUPPORT is not set
+CONFIG_USB=y
+CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_STORAGE=y
+CONFIG_MMC=y
+CONFIG_MMC_ARMMMCI=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
diff --git a/arch/arm64/include/asm/atomic.h b/arch/arm64/include/asm/atomic.h
index 01de5aaa3edc..0237f0867e37 100644
--- a/arch/arm64/include/asm/atomic.h
+++ b/arch/arm64/include/asm/atomic.h
@@ -54,8 +54,7 @@ static inline void atomic_add(int i, atomic_t *v)
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline int atomic_add_return(int i, atomic_t *v)
@@ -64,14 +63,15 @@ static inline int atomic_add_return(int i, atomic_t *v)
int result;
asm volatile("// atomic_add_return\n"
-"1: ldaxr %w0, %2\n"
+"1: ldxr %w0, %2\n"
" add %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -86,8 +86,7 @@ static inline void atomic_sub(int i, atomic_t *v)
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline int atomic_sub_return(int i, atomic_t *v)
@@ -96,14 +95,15 @@ static inline int atomic_sub_return(int i, atomic_t *v)
int result;
asm volatile("// atomic_sub_return\n"
-"1: ldaxr %w0, %2\n"
+"1: ldxr %w0, %2\n"
" sub %w0, %w0, %w3\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -112,17 +112,20 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
unsigned long tmp;
int oldval;
+ smp_mb();
+
asm volatile("// atomic_cmpxchg\n"
-"1: ldaxr %w1, %2\n"
+"1: ldxr %w1, %2\n"
" cmp %w1, %w3\n"
" b.ne 2f\n"
-" stlxr %w0, %w4, %2\n"
+" stxr %w0, %w4, %2\n"
" cbnz %w0, 1b\n"
"2:"
: "=&r" (tmp), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
- : "cc", "memory");
+ : "cc");
+ smp_mb();
return oldval;
}
@@ -173,8 +176,7 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline long atomic64_add_return(long i, atomic64_t *v)
@@ -183,14 +185,15 @@ static inline long atomic64_add_return(long i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_add_return\n"
-"1: ldaxr %0, %2\n"
+"1: ldxr %0, %2\n"
" add %0, %0, %3\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -205,8 +208,7 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
" stxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
- : "Ir" (i)
- : "cc");
+ : "Ir" (i));
}
static inline long atomic64_sub_return(long i, atomic64_t *v)
@@ -215,14 +217,15 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_sub_return\n"
-"1: ldaxr %0, %2\n"
+"1: ldxr %0, %2\n"
" sub %0, %0, %3\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
: "Ir" (i)
- : "cc", "memory");
+ : "memory");
+ smp_mb();
return result;
}
@@ -231,17 +234,20 @@ static inline long atomic64_cmpxchg(atomic64_t *ptr, long old, long new)
long oldval;
unsigned long res;
+ smp_mb();
+
asm volatile("// atomic64_cmpxchg\n"
-"1: ldaxr %1, %2\n"
+"1: ldxr %1, %2\n"
" cmp %1, %3\n"
" b.ne 2f\n"
-" stlxr %w0, %4, %2\n"
+" stxr %w0, %4, %2\n"
" cbnz %w0, 1b\n"
"2:"
: "=&r" (res), "=&r" (oldval), "+Q" (ptr->counter)
: "Ir" (old), "r" (new)
- : "cc", "memory");
+ : "cc");
+ smp_mb();
return oldval;
}
@@ -253,11 +259,12 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
unsigned long tmp;
asm volatile("// atomic64_dec_if_positive\n"
-"1: ldaxr %0, %2\n"
+"1: ldxr %0, %2\n"
" subs %0, %0, #1\n"
" b.mi 2f\n"
" stlxr %w1, %0, %2\n"
" cbnz %w1, 1b\n"
+" dmb ish\n"
"2:"
: "=&r" (result), "=&r" (tmp), "+Q" (v->counter)
:
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 78e20ba8806b..409ca370cfe2 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -25,7 +25,7 @@
#define wfi() asm volatile("wfi" : : : "memory")
#define isb() asm volatile("isb" : : : "memory")
-#define dsb() asm volatile("dsb sy" : : : "memory")
+#define dsb(opt) asm volatile("dsb sy" : : : "memory")
#define mb() dsb()
#define rmb() asm volatile("dsb ld" : : : "memory")
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index fea9ee327206..889324981aa4 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -116,6 +116,7 @@ extern void flush_dcache_page(struct page *);
static inline void __flush_icache_all(void)
{
asm("ic ialluis");
+ dsb();
}
#define flush_dcache_mmap_lock(mapping) \
diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h
index 56166d7f4a25..57c0fa7bf711 100644
--- a/arch/arm64/include/asm/cmpxchg.h
+++ b/arch/arm64/include/asm/cmpxchg.h
@@ -29,44 +29,45 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
switch (size) {
case 1:
asm volatile("// __xchg1\n"
- "1: ldaxrb %w0, %2\n"
+ "1: ldxrb %w0, %2\n"
" stlxrb %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u8 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 2:
asm volatile("// __xchg2\n"
- "1: ldaxrh %w0, %2\n"
+ "1: ldxrh %w0, %2\n"
" stlxrh %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u16 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 4:
asm volatile("// __xchg4\n"
- "1: ldaxr %w0, %2\n"
+ "1: ldxr %w0, %2\n"
" stlxr %w1, %w3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u32 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
case 8:
asm volatile("// __xchg8\n"
- "1: ldaxr %0, %2\n"
+ "1: ldxr %0, %2\n"
" stlxr %w1, %3, %2\n"
" cbnz %w1, 1b\n"
: "=&r" (ret), "=&r" (tmp), "+Q" (*(u64 *)ptr)
: "r" (x)
- : "cc", "memory");
+ : "memory");
break;
default:
BUILD_BUG();
}
+ smp_mb();
return ret;
}
diff --git a/arch/arm64/include/asm/esr.h b/arch/arm64/include/asm/esr.h
index 78834123a32e..c4a7f940b387 100644
--- a/arch/arm64/include/asm/esr.h
+++ b/arch/arm64/include/asm/esr.h
@@ -42,7 +42,7 @@
#define ESR_EL1_EC_SP_ALIGN (0x26)
#define ESR_EL1_EC_FP_EXC32 (0x28)
#define ESR_EL1_EC_FP_EXC64 (0x2C)
-#define ESR_EL1_EC_SERRROR (0x2F)
+#define ESR_EL1_EC_SERROR (0x2F)
#define ESR_EL1_EC_BREAKPT_EL0 (0x30)
#define ESR_EL1_EC_BREAKPT_EL1 (0x31)
#define ESR_EL1_EC_SOFTSTP_EL0 (0x32)
diff --git a/arch/arm64/include/asm/futex.h b/arch/arm64/include/asm/futex.h
index 78cc3aba5d69..5f750dc96e0f 100644
--- a/arch/arm64/include/asm/futex.h
+++ b/arch/arm64/include/asm/futex.h
@@ -24,10 +24,11 @@
#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \
asm volatile( \
-"1: ldaxr %w1, %2\n" \
+"1: ldxr %w1, %2\n" \
insn "\n" \
"2: stlxr %w3, %w0, %2\n" \
" cbnz %w3, 1b\n" \
+" dmb ish\n" \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
@@ -40,7 +41,7 @@
" .popsection\n" \
: "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp) \
: "r" (oparg), "Ir" (-EFAULT) \
- : "cc", "memory")
+ : "memory")
static inline int
futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
@@ -111,11 +112,12 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return -EFAULT;
asm volatile("// futex_atomic_cmpxchg_inatomic\n"
-"1: ldaxr %w1, %2\n"
+"1: ldxr %w1, %2\n"
" sub %w3, %w1, %w4\n"
" cbnz %w3, 3f\n"
"2: stlxr %w3, %w5, %2\n"
" cbnz %w3, 1b\n"
+" dmb ish\n"
"3:\n"
" .pushsection .fixup,\"ax\"\n"
"4: mov %w0, %w6\n"
@@ -127,7 +129,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
" .popsection\n"
: "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp)
: "r" (oldval), "r" (newval), "Ir" (-EFAULT)
- : "cc", "memory");
+ : "memory");
*uval = val;
return ret;
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index c98ef4771c73..0eb398655378 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -231,7 +231,7 @@
#define ESR_EL2_EC_SP_ALIGN (0x26)
#define ESR_EL2_EC_FP_EXC32 (0x28)
#define ESR_EL2_EC_FP_EXC64 (0x2C)
-#define ESR_EL2_EC_SERRROR (0x2F)
+#define ESR_EL2_EC_SERROR (0x2F)
#define ESR_EL2_EC_BREAKPT (0x30)
#define ESR_EL2_EC_BREAKPT_HYP (0x31)
#define ESR_EL2_EC_SOFTSTP (0x32)
diff --git a/arch/arm64/include/asm/percpu.h b/arch/arm64/include/asm/percpu.h
index 13fb0b3efc5f..453a179469a3 100644
--- a/arch/arm64/include/asm/percpu.h
+++ b/arch/arm64/include/asm/percpu.h
@@ -16,6 +16,8 @@
#ifndef __ASM_PERCPU_H
#define __ASM_PERCPU_H
+#ifdef CONFIG_SMP
+
static inline void set_my_cpu_offset(unsigned long off)
{
asm volatile("msr tpidr_el1, %0" :: "r" (off) : "memory");
@@ -36,6 +38,12 @@ static inline unsigned long __my_cpu_offset(void)
}
#define __my_cpu_offset __my_cpu_offset()
+#else /* !CONFIG_SMP */
+
+#define set_my_cpu_offset(x) do { } while (0)
+
+#endif /* CONFIG_SMP */
+
#include <asm-generic/percpu.h>
#endif /* __ASM_PERCPU_H */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index b524dcd17243..aa3917c8b623 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -136,11 +136,11 @@ extern struct page *empty_zero_page;
/*
* The following only work if pte_present(). Undefined behaviour otherwise.
*/
-#define pte_present(pte) (pte_val(pte) & (PTE_VALID | PTE_PROT_NONE))
-#define pte_dirty(pte) (pte_val(pte) & PTE_DIRTY)
-#define pte_young(pte) (pte_val(pte) & PTE_AF)
-#define pte_special(pte) (pte_val(pte) & PTE_SPECIAL)
-#define pte_write(pte) (pte_val(pte) & PTE_WRITE)
+#define pte_present(pte) (!!(pte_val(pte) & (PTE_VALID | PTE_PROT_NONE)))
+#define pte_dirty(pte) (!!(pte_val(pte) & PTE_DIRTY))
+#define pte_young(pte) (!!(pte_val(pte) & PTE_AF))
+#define pte_special(pte) (!!(pte_val(pte) & PTE_SPECIAL))
+#define pte_write(pte) (!!(pte_val(pte) & PTE_WRITE))
#define pte_exec(pte) (!(pte_val(pte) & PTE_UXN))
#define pte_valid_user(pte) \
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 3d5cf064d7a1..c45b7b1b7197 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -132,7 +132,7 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
" cbnz %w0, 2b\n"
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
- : "cc", "memory");
+ : "memory");
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
@@ -146,7 +146,7 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
"1:\n"
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
- : "cc", "memory");
+ : "memory");
return !tmp;
}
@@ -187,7 +187,7 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
" cbnz %w1, 2b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
@@ -201,7 +201,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
" cbnz %w1, 1b\n"
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
}
static inline int arch_read_trylock(arch_rwlock_t *rw)
@@ -216,7 +216,7 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
"1:\n"
: "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
:
- : "cc", "memory");
+ : "memory");
return !tmp2;
}
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 58125bf008d3..bb8eb8a78e67 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -399,7 +399,10 @@ __SYSCALL(374, compat_sys_sendmmsg)
__SYSCALL(375, sys_setns)
__SYSCALL(376, compat_sys_process_vm_readv)
__SYSCALL(377, compat_sys_process_vm_writev)
-__SYSCALL(378, sys_ni_syscall) /* 378 for kcmp */
+__SYSCALL(378, sys_kcmp)
+__SYSCALL(379, sys_finit_module)
+__SYSCALL(380, sys_sched_setattr)
+__SYSCALL(381, sys_sched_getattr)
#define __NR_compat_syscalls 379
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 495ab6f84a61..eaf54a30bedc 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -148,6 +148,15 @@ struct kvm_arch_memory_slot {
#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
+/* Device Control API: ARM VGIC */
+#define KVM_DEV_ARM_VGIC_GRP_ADDR 0
+#define KVM_DEV_ARM_VGIC_GRP_DIST_REGS 1
+#define KVM_DEV_ARM_VGIC_GRP_CPU_REGS 2
+#define KVM_DEV_ARM_VGIC_CPUID_SHIFT 32
+#define KVM_DEV_ARM_VGIC_CPUID_MASK (0xffULL << KVM_DEV_ARM_VGIC_CPUID_SHIFT)
+#define KVM_DEV_ARM_VGIC_OFFSET_SHIFT 0
+#define KVM_DEV_ARM_VGIC_OFFSET_MASK (0xffffffffULL << KVM_DEV_ARM_VGIC_OFFSET_SHIFT)
+
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
#define KVM_ARM_IRQ_TYPE_MASK 0xff
diff --git a/arch/arm64/kernel/kuser32.S b/arch/arm64/kernel/kuser32.S
index 63c48ffdf230..7787208e8cc6 100644
--- a/arch/arm64/kernel/kuser32.S
+++ b/arch/arm64/kernel/kuser32.S
@@ -38,12 +38,13 @@ __kuser_cmpxchg64: // 0xffff0f60
.inst 0xe92d00f0 // push {r4, r5, r6, r7}
.inst 0xe1c040d0 // ldrd r4, r5, [r0]
.inst 0xe1c160d0 // ldrd r6, r7, [r1]
- .inst 0xe1b20e9f // 1: ldaexd r0, r1, [r2]
+ .inst 0xe1b20f9f // 1: ldrexd r0, r1, [r2]
.inst 0xe0303004 // eors r3, r0, r4
.inst 0x00313005 // eoreqs r3, r1, r5
.inst 0x01a23e96 // stlexdeq r3, r6, [r2]
.inst 0x03330001 // teqeq r3, #1
.inst 0x0afffff9 // beq 1b
+ .inst 0xf57ff05b // dmb ish
.inst 0xe2730000 // rsbs r0, r3, #0
.inst 0xe8bd00f0 // pop {r4, r5, r6, r7}
.inst 0xe12fff1e // bx lr
@@ -55,11 +56,12 @@ __kuser_memory_barrier: // 0xffff0fa0
.align 5
__kuser_cmpxchg: // 0xffff0fc0
- .inst 0xe1923e9f // 1: ldaex r3, [r2]
+ .inst 0xe1923f9f // 1: ldrex r3, [r2]
.inst 0xe0533000 // subs r3, r3, r0
.inst 0x01823e91 // stlexeq r3, r1, [r2]
.inst 0x03330001 // teqeq r3, #1
.inst 0x0afffffa // beq 1b
+ .inst 0xf57ff05b // dmb ish
.inst 0xe2730000 // rsbs r0, r3, #0
.inst 0xe12fff1e // bx lr
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index c3b6c63ea5fb..38f0558f0c0a 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -48,7 +48,11 @@ int unwind_frame(struct stackframe *frame)
frame->sp = fp + 0x10;
frame->fp = *(unsigned long *)(fp);
- frame->pc = *(unsigned long *)(fp + 8);
+ /*
+ * -4 here because we care about the PC at time of bl,
+ * not where the return will go.
+ */
+ frame->pc = *(unsigned long *)(fp + 8) - 4;
return 0;
}
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index 65d40cf6945a..a7149cae1615 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -238,6 +238,8 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->use_syscall = use_syscall;
vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec;
vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec;
+ vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
+ vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
if (!use_syscall) {
vdso_data->cs_cycle_last = tk->clock->cycle_last;
@@ -245,8 +247,6 @@ void update_vsyscall(struct timekeeper *tk)
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
vdso_data->cs_mult = tk->mult;
vdso_data->cs_shift = tk->shift;
- vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
- vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
}
smp_wmb();
diff --git a/arch/arm64/kernel/vdso/Makefile b/arch/arm64/kernel/vdso/Makefile
index d8064af42e62..6d20b7d162d8 100644
--- a/arch/arm64/kernel/vdso/Makefile
+++ b/arch/arm64/kernel/vdso/Makefile
@@ -48,7 +48,7 @@ $(obj-vdso): %.o: %.S
# Actual build commands
quiet_cmd_vdsold = VDSOL $@
- cmd_vdsold = $(CC) $(c_flags) -Wl,-T $^ -o $@
+ cmd_vdsold = $(CC) $(c_flags) -Wl,-n -Wl,-T $^ -o $@
quiet_cmd_vdsoas = VDSOA $@
cmd_vdsoas = $(CC) $(a_flags) -c -o $@ $<
diff --git a/arch/arm64/kernel/vdso/gettimeofday.S b/arch/arm64/kernel/vdso/gettimeofday.S
index f0a6d10b5211..fe652ffd34c2 100644
--- a/arch/arm64/kernel/vdso/gettimeofday.S
+++ b/arch/arm64/kernel/vdso/gettimeofday.S
@@ -103,6 +103,8 @@ ENTRY(__kernel_clock_gettime)
bl __do_get_tspec
seqcnt_check w9, 1b
+ mov x30, x2
+
cmp w0, #CLOCK_MONOTONIC
b.ne 6f
@@ -118,6 +120,9 @@ ENTRY(__kernel_clock_gettime)
ccmp w0, #CLOCK_MONOTONIC_COARSE, #0x4, ne
b.ne 8f
+ /* xtime_coarse_nsec is already right-shifted */
+ mov x12, #0
+
/* Get coarse timespec. */
adr vdso_data, _vdso_data
3: seqcnt_acquire
@@ -156,7 +161,7 @@ ENTRY(__kernel_clock_gettime)
lsr x11, x11, x12
stp x10, x11, [x1, #TSPEC_TV_SEC]
mov x0, xzr
- ret x2
+ ret
7:
mov x30, x2
8: /* Syscall fallback. */
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 3b47c36e10ff..2c56012cb2d2 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -694,6 +694,24 @@ __hyp_panic_str:
.align 2
+/*
+ * u64 kvm_call_hyp(void *hypfn, ...);
+ *
+ * This is not really a variadic function in the classic C-way and care must
+ * be taken when calling this to ensure parameters are passed in registers
+ * only, since the stack will change between the caller and the callee.
+ *
+ * Call the function with the first argument containing a pointer to the
+ * function you wish to call in Hyp mode, and subsequent arguments will be
+ * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
+ * function pointer can be passed). The function being called must be mapped
+ * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
+ * passed in r0 and r1.
+ *
+ * A function pointer with a value of 0 has a special meaning, and is
+ * used to implement __hyp_get_vectors in the same way as in
+ * arch/arm64/kernel/hyp_stub.S.
+ */
ENTRY(kvm_call_hyp)
hvc #0
ret
@@ -737,7 +755,12 @@ el1_sync: // Guest trapped into EL2
pop x2, x3
pop x0, x1
- push lr, xzr
+ /* Check for __hyp_get_vectors */
+ cbnz x0, 1f
+ mrs x0, vbar_el2
+ b 2f
+
+1: push lr, xzr
/*
* Compute the function address in EL2, and shuffle the parameters.
@@ -750,7 +773,7 @@ el1_sync: // Guest trapped into EL2
blr lr
pop lr, xzr
- eret
+2: eret
el1_trap:
/*
diff --git a/arch/arm64/lib/bitops.S b/arch/arm64/lib/bitops.S
index e5db797790d3..7dac371cc9a2 100644
--- a/arch/arm64/lib/bitops.S
+++ b/arch/arm64/lib/bitops.S
@@ -46,11 +46,12 @@ ENTRY( \name )
mov x2, #1
add x1, x1, x0, lsr #3 // Get word offset
lsl x4, x2, x3 // Create mask
-1: ldaxr x2, [x1]
+1: ldxr x2, [x1]
lsr x0, x2, x3 // Save old value of bit
\instr x2, x2, x4 // toggle bit
stlxr w5, x2, [x1]
cbnz w5, 1b
+ dmb ish
and x0, x0, #1
3: ret
ENDPROC(\name )
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 45b5ab54c9ee..fbd76785c5db 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -45,6 +45,7 @@ static void *arm64_swiotlb_alloc_coherent(struct device *dev, size_t size,
if (IS_ENABLED(CONFIG_DMA_CMA)) {
struct page *page;
+ size = PAGE_ALIGN(size);
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
get_order(size));
if (!page)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index f557ebbe7013..f8dc7e8fce6f 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -203,10 +203,18 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
do {
next = pmd_addr_end(addr, end);
/* try section mapping first */
- if (((addr | next | phys) & ~SECTION_MASK) == 0)
+ if (((addr | next | phys) & ~SECTION_MASK) == 0) {
+ pmd_t old_pmd =*pmd;
set_pmd(pmd, __pmd(phys | prot_sect_kernel));
- else
+ /*
+ * Check for previous table entries created during
+ * boot (__create_page_tables) and flush them.
+ */
+ if (!pmd_none(old_pmd))
+ flush_tlb_all();
+ } else {
alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys));
+ }
phys += next - addr;
} while (pmd++, addr = next, addr != end);
}
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 7083cdada657..62c6101df260 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -32,17 +32,10 @@
pgd_t *pgd_alloc(struct mm_struct *mm)
{
- pgd_t *new_pgd;
-
if (PGD_SIZE == PAGE_SIZE)
- new_pgd = (pgd_t *)get_zeroed_page(GFP_KERNEL);
+ return (pgd_t *)get_zeroed_page(GFP_KERNEL);
else
- new_pgd = kzalloc(PGD_SIZE, GFP_KERNEL);
-
- if (!new_pgd)
- return NULL;
-
- return new_pgd;
+ return kzalloc(PGD_SIZE, GFP_KERNEL);
}
void pgd_free(struct mm_struct *mm, pgd_t *pgd)
diff --git a/arch/avr32/Makefile b/arch/avr32/Makefile
index 22fb66590dcd..dba48a5d5bb9 100644
--- a/arch/avr32/Makefile
+++ b/arch/avr32/Makefile
@@ -11,7 +11,7 @@ all: uImage vmlinux.elf
KBUILD_DEFCONFIG := atstk1002_defconfig
-KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic
+KBUILD_CFLAGS += -pipe -fno-builtin -mno-pic -D__linux__
KBUILD_AFLAGS += -mrelax -mno-pic
KBUILD_CFLAGS_MODULE += -mno-relax
LDFLAGS_vmlinux += --relax
diff --git a/arch/avr32/boards/mimc200/fram.c b/arch/avr32/boards/mimc200/fram.c
index 9764a1a1073e..c1466a872b9c 100644
--- a/arch/avr32/boards/mimc200/fram.c
+++ b/arch/avr32/boards/mimc200/fram.c
@@ -11,6 +11,7 @@
#define FRAM_VERSION "1.0"
#include <linux/miscdevice.h>
+#include <linux/module.h>
#include <linux/proc_fs.h>
#include <linux/mm.h>
#include <linux/io.h>
diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild
index cfb9fe1b8df9..c7c64a63c29f 100644
--- a/arch/avr32/include/asm/Kbuild
+++ b/arch/avr32/include/asm/Kbuild
@@ -17,5 +17,6 @@ generic-y += scatterlist.h
generic-y += sections.h
generic-y += topology.h
generic-y += trace_clock.h
+generic-y += vga.h
generic-y += xor.h
generic-y += hash.h
diff --git a/arch/avr32/include/asm/io.h b/arch/avr32/include/asm/io.h
index fc6483f83ccc..4f5ec2bb7172 100644
--- a/arch/avr32/include/asm/io.h
+++ b/arch/avr32/include/asm/io.h
@@ -295,6 +295,8 @@ extern void __iounmap(void __iomem *addr);
#define iounmap(addr) \
__iounmap(addr)
+#define ioremap_wc ioremap_nocache
+
#define cached(addr) P1SEGADDR(addr)
#define uncached(addr) P2SEGADDR(addr)
diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h
index 09c5a0f5f4d1..86648c083bb4 100644
--- a/arch/c6x/include/asm/cache.h
+++ b/arch/c6x/include/asm/cache.h
@@ -12,6 +12,7 @@
#define _ASM_C6X_CACHE_H
#include <linux/irqflags.h>
+#include <linux/init.h>
/*
* Cache line size
diff --git a/arch/cris/include/asm/bitops.h b/arch/cris/include/asm/bitops.h
index 184066ceb1f6..053c17b36559 100644
--- a/arch/cris/include/asm/bitops.h
+++ b/arch/cris/include/asm/bitops.h
@@ -144,7 +144,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
* definition, which doesn't have the same semantics. We don't want to
* use -fno-builtin, so just hide the name ffs.
*/
-#define ffs kernel_ffs
+#define ffs(x) kernel_ffs(x)
#include <asm-generic/bitops/fls.h>
#include <asm-generic/bitops/__fls.h>
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index afd45e0d552e..ae763d8bf55a 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -11,7 +11,7 @@
-#define NR_syscalls 312 /* length of syscall table */
+#define NR_syscalls 314 /* length of syscall table */
/*
* The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index 34fd6fe46da1..715e85f858de 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -325,5 +325,7 @@
#define __NR_process_vm_writev 1333
#define __NR_accept4 1334
#define __NR_finit_module 1335
+#define __NR_sched_setattr 1336
+#define __NR_sched_getattr 1337
#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index ddea607f948a..fa8d61a312a7 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1773,6 +1773,8 @@ sys_call_table:
data8 sys_process_vm_writev
data8 sys_accept4
data8 sys_finit_module // 1335
+ data8 sys_sched_setattr
+ data8 sys_sched_getattr
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index a96bcf83a735..20e8a9b21d75 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -98,7 +98,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid)
/* attempt to allocate a granule's worth of cached memory pages */
page = alloc_pages_exact_node(nid,
- GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
IA64_GRANULE_SHIFT-PAGE_SHIFT);
if (!page) {
mutex_unlock(&uc_pool->add_chunk_mutex);
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 7cc8c364924d..6fb9e813a910 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,4 +1,4 @@
-
+generic-y += barrier.h
generic-y += bitsperlong.h
generic-y += clkdev.h
generic-y += cputime.h
@@ -6,6 +6,7 @@ generic-y += device.h
generic-y += emergency-restart.h
generic-y += errno.h
generic-y += exec.h
+generic-y += hash.h
generic-y += hw_irq.h
generic-y += ioctl.h
generic-y += ipcbuf.h
@@ -18,6 +19,7 @@ generic-y += local.h
generic-y += mman.h
generic-y += mutex.h
generic-y += percpu.h
+generic-y += preempt.h
generic-y += resource.h
generic-y += scatterlist.h
generic-y += sections.h
@@ -31,5 +33,3 @@ generic-y += trace_clock.h
generic-y += types.h
generic-y += word-at-a-time.h
generic-y += xor.h
-generic-y += preempt.h
-generic-y += hash.h
diff --git a/arch/m68k/include/asm/barrier.h b/arch/m68k/include/asm/barrier.h
deleted file mode 100644
index 15c5f77c1614..000000000000
--- a/arch/m68k/include/asm/barrier.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef _M68K_BARRIER_H
-#define _M68K_BARRIER_H
-
-#define nop() do { asm volatile ("nop"); barrier(); } while (0)
-
-#include <asm-generic/barrier.h>
-
-#endif /* _M68K_BARRIER_H */
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 014f288fc813..9d38b73989eb 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 349
+#define NR_syscalls 351
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 625f321001dc..b932dd470041 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -354,5 +354,7 @@
#define __NR_process_vm_writev 346
#define __NR_kcmp 347
#define __NR_finit_module 348
+#define __NR_sched_setattr 349
+#define __NR_sched_getattr 350
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 3f04ea0ab802..b6223dc41d82 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -369,4 +369,6 @@ ENTRY(sys_call_table)
.long sys_process_vm_writev
.long sys_kcmp
.long sys_finit_module
+ .long sys_sched_setattr
+ .long sys_sched_getattr /* 350 */
diff --git a/arch/microblaze/include/asm/delay.h b/arch/microblaze/include/asm/delay.h
index 05b7d39e4391..66fc24c24238 100644
--- a/arch/microblaze/include/asm/delay.h
+++ b/arch/microblaze/include/asm/delay.h
@@ -13,6 +13,8 @@
#ifndef _ASM_MICROBLAZE_DELAY_H
#define _ASM_MICROBLAZE_DELAY_H
+#include <linux/param.h>
+
extern inline void __delay(unsigned long loops)
{
asm volatile ("# __delay \n\t" \
diff --git a/arch/microblaze/include/asm/io.h b/arch/microblaze/include/asm/io.h
index a2cea7206077..3fbb7f1db3bc 100644
--- a/arch/microblaze/include/asm/io.h
+++ b/arch/microblaze/include/asm/io.h
@@ -89,6 +89,11 @@ static inline unsigned int readl(const volatile void __iomem *addr)
{
return le32_to_cpu(*(volatile unsigned int __force *)addr);
}
+#define readq readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+ return le64_to_cpu(__raw_readq(addr));
+}
static inline void writeb(unsigned char v, volatile void __iomem *addr)
{
*(volatile unsigned char __force *)addr = v;
@@ -101,6 +106,7 @@ static inline void writel(unsigned int v, volatile void __iomem *addr)
{
*(volatile unsigned int __force *)addr = cpu_to_le32(v);
}
+#define writeq(b, addr) __raw_writeq(cpu_to_le64(b), addr)
/* ioread and iowrite variants. thease are for now same as __raw_
* variants of accessors. we might check for endianess in the feature
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index b7fb0438458c..17645b2e2f07 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -66,7 +66,7 @@ real_start:
mts rmsr, r0
/* Disable stack protection from bootloader */
mts rslr, r0
- addi r8, r0, 0xFFFFFFF
+ addi r8, r0, 0xFFFFFFFF
mts rshr, r8
/*
* According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc'
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index dcae3a7035db..95fa1f1d5c8b 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1776,12 +1776,12 @@ endchoice
config FORCE_MAX_ZONEORDER
int "Maximum zone order"
- range 14 64 if HUGETLB_PAGE && PAGE_SIZE_64KB
- default "14" if HUGETLB_PAGE && PAGE_SIZE_64KB
- range 13 64 if HUGETLB_PAGE && PAGE_SIZE_32KB
- default "13" if HUGETLB_PAGE && PAGE_SIZE_32KB
- range 12 64 if HUGETLB_PAGE && PAGE_SIZE_16KB
- default "12" if HUGETLB_PAGE && PAGE_SIZE_16KB
+ range 14 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB
+ default "14" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_64KB
+ range 13 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB
+ default "13" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_32KB
+ range 12 64 if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB
+ default "12" if MIPS_HUGE_TLB_SUPPORT && PAGE_SIZE_16KB
range 11 64
default "11"
help
@@ -2353,9 +2353,8 @@ config SECCOMP
If unsure, say Y. Only embedded should say N here.
config MIPS_O32_FP64_SUPPORT
- bool "Support for O32 binaries using 64-bit FP"
+ bool "Support for O32 binaries using 64-bit FP (EXPERIMENTAL)"
depends on 32BIT || MIPS32_O32
- default y
help
When this is enabled, the kernel will support use of 64-bit floating
point registers with binaries using the O32 ABI along with the
@@ -2367,7 +2366,14 @@ config MIPS_O32_FP64_SUPPORT
of your kernel & potentially improve FP emulation performance by
saying N here.
- If unsure, say Y.
+ Although binutils currently supports use of this flag the details
+ concerning its effect upon the O32 ABI in userland are still being
+ worked on. In order to avoid userland becoming dependant upon current
+ behaviour before the details have been finalised, this option should
+ be considered experimental and only enabled by those working upon
+ said details.
+
+ If unsure, say N.
config USE_OF
bool
diff --git a/arch/mips/alchemy/board-gpr.c b/arch/mips/alchemy/board-gpr.c
index 9edc35ff8cf1..acf9a2a37f5a 100644
--- a/arch/mips/alchemy/board-gpr.c
+++ b/arch/mips/alchemy/board-gpr.c
@@ -53,10 +53,8 @@ void __init prom_init(void)
prom_init_cmdline();
memsize_str = prom_getenv("memsize");
- if (!memsize_str)
+ if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
memsize = 0x04000000;
- else
- strict_strtoul(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
diff --git a/arch/mips/alchemy/board-mtx1.c b/arch/mips/alchemy/board-mtx1.c
index 9969dbab19e3..25a59a23547e 100644
--- a/arch/mips/alchemy/board-mtx1.c
+++ b/arch/mips/alchemy/board-mtx1.c
@@ -52,10 +52,8 @@ void __init prom_init(void)
prom_init_cmdline();
memsize_str = prom_getenv("memsize");
- if (!memsize_str)
+ if (!memsize_str || kstrtoul(memsize_str, 0, &memsize))
memsize = 0x04000000;
- else
- strict_strtoul(memsize_str, 0, &memsize);
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
index 11f3ad20321c..5483906e0f86 100644
--- a/arch/mips/alchemy/devboards/db1000.c
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -534,13 +534,10 @@ static int __init db1000_dev_init(void)
s0 = AU1100_GPIO1_INT;
s1 = AU1100_GPIO4_INT;
+ gpio_request(19, "sd0_cd");
+ gpio_request(20, "sd1_cd");
gpio_direction_input(19); /* sd0 cd# */
gpio_direction_input(20); /* sd1 cd# */
- gpio_direction_input(21); /* touch pendown# */
- gpio_direction_input(207); /* SPI MISO */
- gpio_direction_output(208, 0); /* SPI MOSI */
- gpio_direction_output(209, 1); /* SPI SCK */
- gpio_direction_output(210, 1); /* SPI CS# */
/* spi_gpio on SSI0 pins */
pfc = __raw_readl((void __iomem *)SYS_PINFUNC);
diff --git a/arch/mips/bcm47xx/board.c b/arch/mips/bcm47xx/board.c
index 6d612e2b949b..cdd8246f92b3 100644
--- a/arch/mips/bcm47xx/board.c
+++ b/arch/mips/bcm47xx/board.c
@@ -1,3 +1,4 @@
+#include <linux/errno.h>
#include <linux/export.h>
#include <linux/string.h>
#include <bcm47xx_board.h>
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c
index 6decb27cf48b..2bed73a684ae 100644
--- a/arch/mips/bcm47xx/nvram.c
+++ b/arch/mips/bcm47xx/nvram.c
@@ -196,7 +196,7 @@ int bcm47xx_nvram_gpio_pin(const char *name)
char nvram_var[10];
char buf[30];
- for (i = 0; i < 16; i++) {
+ for (i = 0; i < 32; i++) {
err = snprintf(nvram_var, sizeof(nvram_var), "gpio%i", i);
if (err <= 0)
continue;
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 25fbfae06c1f..c2bb4f896ce7 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -975,10 +975,6 @@ static int octeon_irq_ciu_xlat(struct irq_domain *d,
if (ciu > 1 || bit > 63)
return -EINVAL;
- /* These are the GPIO lines */
- if (ciu == 0 && bit >= 16 && bit < 32)
- return -EINVAL;
-
*out_hwirq = (ciu << 6) | bit;
*out_type = 0;
@@ -1007,6 +1003,10 @@ static int octeon_irq_ciu_map(struct irq_domain *d,
if (!octeon_irq_virq_in_range(virq))
return -EINVAL;
+ /* Don't map irq if it is reserved for GPIO. */
+ if (line == 0 && bit >= 16 && bit <32)
+ return 0;
+
if (line > 1 || octeon_irq_ciu_to_irq[line][bit] != 0)
return -EINVAL;
@@ -1525,10 +1525,6 @@ static int octeon_irq_ciu2_xlat(struct irq_domain *d,
ciu = intspec[0];
bit = intspec[1];
- /* Line 7 are the GPIO lines */
- if (ciu > 6 || bit > 63)
- return -EINVAL;
-
*out_hwirq = (ciu << 6) | bit;
*out_type = 0;
@@ -1570,8 +1566,14 @@ static int octeon_irq_ciu2_map(struct irq_domain *d,
if (!octeon_irq_virq_in_range(virq))
return -EINVAL;
- /* Line 7 are the GPIO lines */
- if (line > 6 || octeon_irq_ciu_to_irq[line][bit] != 0)
+ /*
+ * Don't map irq if it is reserved for GPIO.
+ * (Line 7 are the GPIO lines.)
+ */
+ if (line == 7)
+ return 0;
+
+ if (line > 7 || octeon_irq_ciu_to_irq[line][bit] != 0)
return -EINVAL;
if (octeon_irq_ciu2_is_edge(line, bit))
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 3220c93ea981..4225e99bd7bf 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -9,6 +9,7 @@
#define _ASM_ASMMACRO_H
#include <asm/hazards.h>
+#include <asm/asm-offsets.h>
#ifdef CONFIG_32BIT
#include <asm/asmmacro-32.h>
@@ -54,11 +55,21 @@
.endm
.macro local_irq_disable reg=t0
+#ifdef CONFIG_PREEMPT
+ lw \reg, TI_PRE_COUNT($28)
+ addi \reg, \reg, 1
+ sw \reg, TI_PRE_COUNT($28)
+#endif
mfc0 \reg, CP0_STATUS
ori \reg, \reg, 1
xori \reg, \reg, 1
mtc0 \reg, CP0_STATUS
irq_disable_hazard
+#ifdef CONFIG_PREEMPT
+ lw \reg, TI_PRE_COUNT($28)
+ addi \reg, \reg, -1
+ sw \reg, TI_PRE_COUNT($28)
+#endif
.endm
#endif /* CONFIG_MIPS_MT_SMTC */
@@ -106,7 +117,7 @@
.endm
.macro fpu_save_double thread status tmp
-#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
sll \tmp, \status, 5
bgez \tmp, 10f
fpu_save_16odd \thread
@@ -159,7 +170,7 @@
.endm
.macro fpu_restore_double thread status tmp
-#if defined(CONFIG_MIPS64) || defined(CONFIG_CPU_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
sll \tmp, \status, 5
bgez \tmp, 10f # 16 register mode?
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index cfe092fc720d..58e50cbdb1a6 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -57,7 +57,7 @@ static inline int __enable_fpu(enum fpu_mode mode)
return 0;
case FPU_64BIT:
-#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_MIPS64))
+#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT))
/* we only have a 32-bit FPU */
return SIGFPE;
#endif
@@ -74,6 +74,8 @@ static inline int __enable_fpu(enum fpu_mode mode)
default:
BUG();
}
+
+ return SIGFPE;
}
#define __disable_fpu() \
diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
index ce35c9af0c28..992aaba603b5 100644
--- a/arch/mips/include/asm/ftrace.h
+++ b/arch/mips/include/asm/ftrace.h
@@ -22,12 +22,12 @@ extern void _mcount(void);
#define safe_load(load, src, dst, error) \
do { \
asm volatile ( \
- "1: " load " %[" STR(dst) "], 0(%[" STR(src) "])\n"\
- " li %[" STR(error) "], 0\n" \
+ "1: " load " %[tmp_dst], 0(%[tmp_src])\n" \
+ " li %[tmp_err], 0\n" \
"2:\n" \
\
".section .fixup, \"ax\"\n" \
- "3: li %[" STR(error) "], 1\n" \
+ "3: li %[tmp_err], 1\n" \
" j 2b\n" \
".previous\n" \
\
@@ -35,8 +35,8 @@ do { \
STR(PTR) "\t1b, 3b\n\t" \
".previous\n" \
\
- : [dst] "=&r" (dst), [error] "=r" (error)\
- : [src] "r" (src) \
+ : [tmp_dst] "=&r" (dst), [tmp_err] "=r" (error)\
+ : [tmp_src] "r" (src) \
: "memory" \
); \
} while (0)
@@ -44,12 +44,12 @@ do { \
#define safe_store(store, src, dst, error) \
do { \
asm volatile ( \
- "1: " store " %[" STR(src) "], 0(%[" STR(dst) "])\n"\
- " li %[" STR(error) "], 0\n" \
+ "1: " store " %[tmp_src], 0(%[tmp_dst])\n"\
+ " li %[tmp_err], 0\n" \
"2:\n" \
\
".section .fixup, \"ax\"\n" \
- "3: li %[" STR(error) "], 1\n" \
+ "3: li %[tmp_err], 1\n" \
" j 2b\n" \
".previous\n" \
\
@@ -57,8 +57,8 @@ do { \
STR(PTR) "\t1b, 3b\n\t" \
".previous\n" \
\
- : [error] "=r" (error) \
- : [dst] "r" (dst), [src] "r" (src)\
+ : [tmp_err] "=r" (error) \
+ : [tmp_dst] "r" (dst), [tmp_src] "r" (src)\
: "memory" \
); \
} while (0)
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 33e8dbfc1b63..f35b131977e6 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -13,6 +13,7 @@
#ifndef __ASM_MIPS_SYSCALL_H
#define __ASM_MIPS_SYSCALL_H
+#include <linux/compiler.h>
#include <linux/audit.h>
#include <linux/elf-em.h>
#include <linux/kernel.h>
@@ -39,14 +40,14 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
#ifdef CONFIG_32BIT
case 4: case 5: case 6: case 7:
- return get_user(*arg, (int *)usp + 4 * n);
+ return get_user(*arg, (int *)usp + n);
#endif
#ifdef CONFIG_64BIT
case 4: case 5: case 6: case 7:
#ifdef CONFIG_MIPS32_O32
if (test_thread_flag(TIF_32BIT_REGS))
- return get_user(*arg, (int *)usp + 4 * n);
+ return get_user(*arg, (int *)usp + n);
else
#endif
*arg = regs->regs[4 + n];
@@ -57,6 +58,8 @@ static inline unsigned long mips_get_syscall_arg(unsigned long *arg,
default:
BUG();
}
+
+ unreachable();
}
static inline long syscall_get_return_value(struct task_struct *task,
@@ -83,11 +86,10 @@ static inline void syscall_get_arguments(struct task_struct *task,
unsigned int i, unsigned int n,
unsigned long *args)
{
- unsigned long arg;
int ret;
while (n--)
- ret |= mips_get_syscall_arg(&arg, task, regs, i++);
+ ret |= mips_get_syscall_arg(args++, task, regs, i++);
/*
* No way to communicate an error because this is a void function.
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index b39ba25b41cc..f25181b19941 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -163,8 +163,8 @@ enum cop1_sdw_func {
*/
enum cop1x_func {
lwxc1_op = 0x00, ldxc1_op = 0x01,
- pfetch_op = 0x07, swxc1_op = 0x08,
- sdxc1_op = 0x09, madd_s_op = 0x20,
+ swxc1_op = 0x08, sdxc1_op = 0x09,
+ pfetch_op = 0x0f, madd_s_op = 0x20,
madd_d_op = 0x21, madd_e_op = 0x22,
msub_s_op = 0x28, msub_d_op = 0x29,
msub_e_op = 0x2a, nmadd_s_op = 0x30,
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 1dee279f9665..d6e154a9e6a5 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -369,16 +369,18 @@
#define __NR_process_vm_writev (__NR_Linux + 346)
#define __NR_kcmp (__NR_Linux + 347)
#define __NR_finit_module (__NR_Linux + 348)
+#define __NR_sched_setattr (__NR_Linux + 349)
+#define __NR_sched_getattr (__NR_Linux + 350)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 348
+#define __NR_Linux_syscalls 350
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 348
+#define __NR_O32_Linux_syscalls 350
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -695,16 +697,18 @@
#define __NR_kcmp (__NR_Linux + 306)
#define __NR_finit_module (__NR_Linux + 307)
#define __NR_getdents64 (__NR_Linux + 308)
+#define __NR_sched_setattr (__NR_Linux + 309)
+#define __NR_sched_getattr (__NR_Linux + 310)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 308
+#define __NR_Linux_syscalls 310
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 308
+#define __NR_64_Linux_syscalls 310
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -1025,15 +1029,17 @@
#define __NR_process_vm_writev (__NR_Linux + 310)
#define __NR_kcmp (__NR_Linux + 311)
#define __NR_finit_module (__NR_Linux + 312)
+#define __NR_sched_setattr (__NR_Linux + 313)
+#define __NR_sched_getattr (__NR_Linux + 314)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 312
+#define __NR_Linux_syscalls 314
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 312
+#define __NR_N32_Linux_syscalls 314
#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
index 185ba258361b..374ed74cd516 100644
--- a/arch/mips/kernel/ftrace.c
+++ b/arch/mips/kernel/ftrace.c
@@ -111,11 +111,10 @@ static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
safe_store_code(new_code1, ip, faulted);
if (unlikely(faulted))
return -EFAULT;
- ip += 4;
- safe_store_code(new_code2, ip, faulted);
+ safe_store_code(new_code2, ip + 4, faulted);
if (unlikely(faulted))
return -EFAULT;
- flush_icache_range(ip, ip + 8); /* original ip + 12 */
+ flush_icache_range(ip, ip + 8);
return 0;
}
#endif
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 253b2fb52026..73b0ddf910d4 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -35,9 +35,9 @@
LEAF(_save_fp_context)
cfc1 t1, fcr31
-#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
.set push
-#ifdef CONFIG_MIPS32_R2
+#ifdef CONFIG_CPU_MIPS32_R2
.set mips64r2
mfc0 t0, CP0_STATUS
sll t0, t0, 5
@@ -146,11 +146,11 @@ LEAF(_save_fp_context32)
* - cp1 status/control register
*/
LEAF(_restore_fp_context)
- EX lw t0, SC_FPC_CSR(a0)
+ EX lw t1, SC_FPC_CSR(a0)
-#if defined(CONFIG_64BIT) || defined(CONFIG_MIPS32_R2)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
.set push
-#ifdef CONFIG_MIPS32_R2
+#ifdef CONFIG_CPU_MIPS32_R2
.set mips64r2
mfc0 t0, CP0_STATUS
sll t0, t0, 5
@@ -191,7 +191,7 @@ LEAF(_restore_fp_context)
EX ldc1 $f26, SC_FPREGS+208(a0)
EX ldc1 $f28, SC_FPREGS+224(a0)
EX ldc1 $f30, SC_FPREGS+240(a0)
- ctc1 t0, fcr31
+ ctc1 t1, fcr31
jr ra
li v0, 0 # success
END(_restore_fp_context)
@@ -199,7 +199,7 @@ LEAF(_restore_fp_context)
#ifdef CONFIG_MIPS32_COMPAT
LEAF(_restore_fp_context32)
/* Restore an o32 sigcontext. */
- EX lw t0, SC32_FPC_CSR(a0)
+ EX lw t1, SC32_FPC_CSR(a0)
mfc0 t0, CP0_STATUS
sll t0, t0, 5
@@ -239,7 +239,7 @@ LEAF(_restore_fp_context32)
EX ldc1 $f26, SC32_FPREGS+208(a0)
EX ldc1 $f28, SC32_FPREGS+224(a0)
EX ldc1 $f30, SC32_FPREGS+240(a0)
- ctc1 t0, fcr31
+ ctc1 t1, fcr31
jr ra
li v0, 0 # success
END(_restore_fp_context32)
diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c
index 56dc69635153..758fb3cd2326 100644
--- a/arch/mips/kernel/rtlx-cmp.c
+++ b/arch/mips/kernel/rtlx-cmp.c
@@ -112,5 +112,8 @@ void __exit rtlx_module_exit(void)
for (i = 0; i < RTLX_CHANNELS; i++)
device_destroy(mt_class, MKDEV(major, i));
+
unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ aprp_hook = NULL;
}
diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c
index 91d61ba422b4..9c1aca00fd54 100644
--- a/arch/mips/kernel/rtlx-mt.c
+++ b/arch/mips/kernel/rtlx-mt.c
@@ -144,5 +144,8 @@ void __exit rtlx_module_exit(void)
for (i = 0; i < RTLX_CHANNELS; i++)
device_destroy(mt_class, MKDEV(major, i));
+
unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ aprp_hook = NULL;
}
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index e8e541b40d86..a5b14f48e1af 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -563,3 +563,5 @@ EXPORT(sys_call_table)
PTR sys_process_vm_writev
PTR sys_kcmp
PTR sys_finit_module
+ PTR sys_sched_setattr
+ PTR sys_sched_getattr /* 4350 */
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 57e3742fec59..b56e254beb15 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -425,4 +425,6 @@ EXPORT(sys_call_table)
PTR sys_kcmp
PTR sys_finit_module
PTR sys_getdents64
+ PTR sys_sched_setattr
+ PTR sys_sched_getattr /* 5310 */
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 2f48f5934399..f7e5b72cf481 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -418,4 +418,6 @@ EXPORT(sysn32_call_table)
PTR compat_sys_process_vm_writev /* 6310 */
PTR sys_kcmp
PTR sys_finit_module
+ PTR sys_sched_setattr
+ PTR sys_sched_getattr
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index f1acdb429f4f..6788727d91af 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -541,4 +541,6 @@ EXPORT(sys32_call_table)
PTR compat_sys_process_vm_writev
PTR sys_kcmp
PTR sys_finit_module
+ PTR sys_sched_setattr
+ PTR sys_sched_getattr /* 4350 */
.size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 506925b2c3f3..0b4e2e38294b 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -1538,10 +1538,10 @@ static int fpux_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
break;
}
- case 0x7: /* 7 */
- if (MIPSInst_FUNC(ir) != pfetch_op) {
+ case 0x3:
+ if (MIPSInst_FUNC(ir) != pfetch_op)
return SIGILL;
- }
+
/* ignore prefx operation */
break;
diff --git a/arch/mips/mti-malta/malta-amon.c b/arch/mips/mti-malta/malta-amon.c
index 592ac0427426..84ac523b0ce0 100644
--- a/arch/mips/mti-malta/malta-amon.c
+++ b/arch/mips/mti-malta/malta-amon.c
@@ -72,7 +72,7 @@ int amon_cpu_start(int cpu,
return 0;
}
-#ifdef CONFIG_MIPS_VPE_LOADER
+#ifdef CONFIG_MIPS_VPE_LOADER_CMP
int vpe_run(struct vpe *v)
{
struct vpe_notifications *n;
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index ca3e3a46a42f..2242181a6284 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -119,7 +119,7 @@ static void malta_hw0_irqdispatch(void)
do_IRQ(MALTA_INT_BASE + irq);
-#ifdef MIPS_VPE_APSP_API
+#ifdef CONFIG_MIPS_VPE_APSP_API_MT
if (aprp_hook)
aprp_hook();
#endif
@@ -310,7 +310,7 @@ static void ipi_call_dispatch(void)
static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
{
-#ifdef MIPS_VPE_APSP_API
+#ifdef CONFIG_MIPS_VPE_APSP_API_CMP
if (aprp_hook)
aprp_hook();
#endif
diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c
index d37be36dc659..2b91b0e61566 100644
--- a/arch/mips/pci/msi-octeon.c
+++ b/arch/mips/pci/msi-octeon.c
@@ -150,6 +150,7 @@ msi_irq_allocated:
msg.address_lo =
((128ul << 20) + CVMX_PCI_MSI_RCV) & 0xffffffff;
msg.address_hi = ((128ul << 20) + CVMX_PCI_MSI_RCV) >> 32;
+ break;
case OCTEON_DMA_BAR_TYPE_BIG:
/* When using big bar, Bar 0 is based at 0 */
msg.address_lo = (0 + CVMX_PCI_MSI_RCV) & 0xffffffff;
diff --git a/arch/parisc/hpux/fs.c b/arch/parisc/hpux/fs.c
index 88d0962de65a..2bedafea3d94 100644
--- a/arch/parisc/hpux/fs.c
+++ b/arch/parisc/hpux/fs.c
@@ -33,22 +33,9 @@
int hpux_execve(struct pt_regs *regs)
{
- int error;
- struct filename *filename;
-
- filename = getname((const char __user *) regs->gr[26]);
- error = PTR_ERR(filename);
- if (IS_ERR(filename))
- goto out;
-
- error = do_execve(filename->name,
+ return do_execve(getname((const char __user *) regs->gr[26]),
(const char __user *const __user *) regs->gr[25],
(const char __user *const __user *) regs->gr[24]);
-
- putname(filename);
-
-out:
- return error;
}
struct hpux_dirent {
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index 637fe031aa84..60d5d174dfe4 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -32,17 +32,6 @@ void copy_page_asm(void *to, void *from);
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
struct page *pg);
-/* #define CONFIG_PARISC_TMPALIAS */
-
-#ifdef CONFIG_PARISC_TMPALIAS
-void clear_user_highpage(struct page *page, unsigned long vaddr);
-#define clear_user_highpage clear_user_highpage
-struct vm_area_struct;
-void copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr, struct vm_area_struct *vma);
-#define __HAVE_ARCH_COPY_USER_HIGHPAGE
-#endif
-
/*
* These are used to make use of C type-checking..
*/
diff --git a/arch/parisc/include/asm/spinlock.h b/arch/parisc/include/asm/spinlock.h
index 3516e0b27044..64f2992e439f 100644
--- a/arch/parisc/include/asm/spinlock.h
+++ b/arch/parisc/include/asm/spinlock.h
@@ -191,8 +191,4 @@ static __inline__ int arch_write_can_lock(arch_rwlock_t *rw)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
-#define arch_spin_relax(lock) cpu_relax()
-#define arch_read_relax(lock) cpu_relax()
-#define arch_write_relax(lock) cpu_relax()
-
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 42706794a36f..265ae5190b0a 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -828,13 +828,13 @@
#define __NR_finit_module (__NR_Linux + 333)
#define __NR_sched_setattr (__NR_Linux + 334)
#define __NR_sched_getattr (__NR_Linux + 335)
+#define __NR_utimes (__NR_Linux + 336)
-#define __NR_Linux_syscalls (__NR_sched_getattr + 1)
+#define __NR_Linux_syscalls (__NR_utimes + 1)
#define __IGNORE_select /* newselect */
#define __IGNORE_fadvise64 /* fadvise64_64 */
-#define __IGNORE_utimes /* utime */
#define HPUX_GATEWAY_ADDR 0xC0000004
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index ac87a40502e6..a6ffc775a9f8 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -581,67 +581,3 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
}
-
-#ifdef CONFIG_PARISC_TMPALIAS
-
-void clear_user_highpage(struct page *page, unsigned long vaddr)
-{
- void *vto;
- unsigned long flags;
-
- /* Clear using TMPALIAS region. The page doesn't need to
- be flushed but the kernel mapping needs to be purged. */
-
- vto = kmap_atomic(page);
-
- /* The PA-RISC 2.0 Architecture book states on page F-6:
- "Before a write-capable translation is enabled, *all*
- non-equivalently-aliased translations must be removed
- from the page table and purged from the TLB. (Note
- that the caches are not required to be flushed at this
- time.) Before any non-equivalent aliased translation
- is re-enabled, the virtual address range for the writeable
- page (the entire page) must be flushed from the cache,
- and the write-capable translation removed from the page
- table and purged from the TLB." */
-
- purge_kernel_dcache_page_asm((unsigned long)vto);
- purge_tlb_start(flags);
- pdtlb_kernel(vto);
- purge_tlb_end(flags);
- preempt_disable();
- clear_user_page_asm(vto, vaddr);
- preempt_enable();
-
- pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
-}
-
-void copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr, struct vm_area_struct *vma)
-{
- void *vfrom, *vto;
- unsigned long flags;
-
- /* Copy using TMPALIAS region. This has the advantage
- that the `from' page doesn't need to be flushed. However,
- the `to' page must be flushed in copy_user_page_asm since
- it can be used to bring in executable code. */
-
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
-
- purge_kernel_dcache_page_asm((unsigned long)vto);
- purge_tlb_start(flags);
- pdtlb_kernel(vto);
- pdtlb_kernel(vfrom);
- purge_tlb_end(flags);
- preempt_disable();
- copy_user_page_asm(vto, vfrom, vaddr);
- flush_dcache_page_asm(__pa(vto), vaddr);
- preempt_enable();
-
- pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */
- pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
-}
-
-#endif /* CONFIG_PARISC_TMPALIAS */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 8fa3fbb3e4d3..80e5dd248934 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -431,6 +431,7 @@
ENTRY_SAME(finit_module)
ENTRY_SAME(sched_setattr)
ENTRY_SAME(sched_getattr) /* 335 */
+ ENTRY_COMP(utimes)
/* Nothing yet */
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index 84fdf6857c31..a613d2c82fd9 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -200,10 +200,11 @@ static inline void __user *arch_compat_alloc_user_space(long len)
/*
* We can't access below the stack pointer in the 32bit ABI and
- * can access 288 bytes in the 64bit ABI
+ * can access 288 bytes in the 64bit big-endian ABI,
+ * or 512 bytes with the new ELFv2 little-endian ABI.
*/
if (!is_32bit_task())
- usp -= 288;
+ usp -= USER_REDZONE_SIZE;
return (void __user *) (usp - len);
}
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index e27e9ad6818e..150866b2a3fe 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -134,6 +134,7 @@ static inline int dma_supported(struct device *dev, u64 mask)
}
extern int dma_set_mask(struct device *dev, u64 dma_mask);
+extern int __dma_set_mask(struct device *dev, u64 dma_mask);
#define dma_alloc_coherent(d,s,h,f) dma_alloc_attrs(d,s,h,f,NULL)
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 9e39ceb1d19f..d4dd41fb951b 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -172,10 +172,20 @@ struct eeh_ops {
};
extern struct eeh_ops *eeh_ops;
-extern int eeh_subsystem_enabled;
+extern bool eeh_subsystem_enabled;
extern raw_spinlock_t confirm_error_lock;
extern int eeh_probe_mode;
+static inline bool eeh_enabled(void)
+{
+ return eeh_subsystem_enabled;
+}
+
+static inline void eeh_set_enable(bool mode)
+{
+ eeh_subsystem_enabled = mode;
+}
+
#define EEH_PROBE_MODE_DEV (1<<0) /* From PCI device */
#define EEH_PROBE_MODE_DEVTREE (1<<1) /* From device tree */
@@ -246,7 +256,7 @@ void eeh_remove_device(struct pci_dev *);
* If this macro yields TRUE, the caller relays to eeh_check_failure()
* which does further tests out of line.
*/
-#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_subsystem_enabled)
+#define EEH_POSSIBLE_ERROR(val, type) ((val) == (type)~0 && eeh_enabled())
/*
* Reads from a device which has been isolated by EEH will return
@@ -257,6 +267,13 @@ void eeh_remove_device(struct pci_dev *);
#else /* !CONFIG_EEH */
+static inline bool eeh_enabled(void)
+{
+ return false;
+}
+
+static inline void eeh_set_enable(bool mode) { }
+
static inline int eeh_init(void)
{
return 0;
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index d750336b171d..623f2971ce0e 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -127,7 +127,7 @@ static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
#ifdef CONFIG_PPC64
- return __pte(pte_update(mm, addr, ptep, ~0UL, 1));
+ return __pte(pte_update(mm, addr, ptep, ~0UL, 0, 1));
#else
return __pte(pte_update(ptep, ~0UL, 0));
#endif
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index f7a8036579b5..42632c7a2a4e 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -77,6 +77,7 @@ struct iommu_table {
#ifdef CONFIG_IOMMU_API
struct iommu_group *it_group;
#endif
+ void (*set_bypass)(struct iommu_table *tbl, bool enable);
};
/* Pure 2^n version of get_order */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 40157e2ca691..ed82142a3251 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -816,8 +816,8 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
int64_t opal_pci_poll(uint64_t phb_id);
int64_t opal_return_cpu(void);
-int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val);
-int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val);
+int64_t opal_xscom_read(uint32_t gcid, uint64_t pcb_addr, __be64 *val);
+int64_t opal_xscom_write(uint32_t gcid, uint64_t pcb_addr, uint64_t val);
int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
uint32_t addr, uint32_t data, uint32_t sz);
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index bc141c950b1e..eb9261024f51 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -195,6 +195,7 @@ extern void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
static inline unsigned long pte_update(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep, unsigned long clr,
+ unsigned long set,
int huge)
{
#ifdef PTE_ATOMIC_UPDATES
@@ -205,14 +206,15 @@ static inline unsigned long pte_update(struct mm_struct *mm,
andi. %1,%0,%6\n\
bne- 1b \n\
andc %1,%0,%4 \n\
+ or %1,%1,%7\n\
stdcx. %1,0,%3 \n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*ptep)
- : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY)
+ : "r" (ptep), "r" (clr), "m" (*ptep), "i" (_PAGE_BUSY), "r" (set)
: "cc" );
#else
unsigned long old = pte_val(*ptep);
- *ptep = __pte(old & ~clr);
+ *ptep = __pte((old & ~clr) | set);
#endif
/* huge pages use the old page table lock */
if (!huge)
@@ -231,9 +233,9 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm,
{
unsigned long old;
- if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
+ if ((pte_val(*ptep) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
return 0;
- old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0);
+ old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0);
return (old & _PAGE_ACCESSED) != 0;
}
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
@@ -252,7 +254,7 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
if ((pte_val(*ptep) & _PAGE_RW) == 0)
return;
- pte_update(mm, addr, ptep, _PAGE_RW, 0);
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 0);
}
static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
@@ -261,7 +263,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
if ((pte_val(*ptep) & _PAGE_RW) == 0)
return;
- pte_update(mm, addr, ptep, _PAGE_RW, 1);
+ pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
}
/*
@@ -284,14 +286,14 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0);
+ unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0);
return __pte(old);
}
static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
pte_t * ptep)
{
- pte_update(mm, addr, ptep, ~0UL, 0);
+ pte_update(mm, addr, ptep, ~0UL, 0, 0);
}
@@ -506,7 +508,9 @@ extern int pmdp_set_access_flags(struct vm_area_struct *vma,
extern unsigned long pmd_hugepage_update(struct mm_struct *mm,
unsigned long addr,
- pmd_t *pmdp, unsigned long clr);
+ pmd_t *pmdp,
+ unsigned long clr,
+ unsigned long set);
static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
unsigned long addr, pmd_t *pmdp)
@@ -515,7 +519,7 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm,
if ((pmd_val(*pmdp) & (_PAGE_ACCESSED | _PAGE_HASHPTE)) == 0)
return 0;
- old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED);
+ old = pmd_hugepage_update(mm, addr, pmdp, _PAGE_ACCESSED, 0);
return ((old & _PAGE_ACCESSED) != 0);
}
@@ -542,7 +546,7 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr,
if ((pmd_val(*pmdp) & _PAGE_RW) == 0)
return;
- pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW);
+ pmd_hugepage_update(mm, addr, pmdp, _PAGE_RW, 0);
}
#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index f83b6f3e1b39..3ebb188c3ff5 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -75,12 +75,34 @@ static inline pte_t pte_mknuma(pte_t pte)
return pte;
}
+#define ptep_set_numa ptep_set_numa
+static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ if ((pte_val(*ptep) & _PAGE_PRESENT) == 0)
+ VM_BUG_ON(1);
+
+ pte_update(mm, addr, ptep, _PAGE_PRESENT, _PAGE_NUMA, 0);
+ return;
+}
+
#define pmd_numa pmd_numa
static inline int pmd_numa(pmd_t pmd)
{
return pte_numa(pmd_pte(pmd));
}
+#define pmdp_set_numa pmdp_set_numa
+static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp)
+{
+ if ((pmd_val(*pmdp) & _PAGE_PRESENT) == 0)
+ VM_BUG_ON(1);
+
+ pmd_hugepage_update(mm, addr, pmdp, _PAGE_PRESENT, _PAGE_NUMA);
+ return;
+}
+
#define pmd_mknonnuma pmd_mknonnuma
static inline pmd_t pmd_mknonnuma(pmd_t pmd)
{
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index becc08e6a65c..279b80f3bb29 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -28,11 +28,23 @@
#ifdef __powerpc64__
+/*
+ * Size of redzone that userspace is allowed to use below the stack
+ * pointer. This is 288 in the 64-bit big-endian ELF ABI, and 512 in
+ * the new ELFv2 little-endian ABI, so we allow the larger amount.
+ *
+ * For kernel code we allow a 288-byte redzone, in order to conserve
+ * kernel stack space; gcc currently only uses 288 bytes, and will
+ * hopefully allow explicit control of the redzone size in future.
+ */
+#define USER_REDZONE_SIZE 512
+#define KERNEL_REDZONE_SIZE 288
+
#define STACK_FRAME_OVERHEAD 112 /* size of minimum stack frame */
#define STACK_FRAME_LR_SAVE 2 /* Location of LR in stack frame */
#define STACK_FRAME_REGS_MARKER ASM_CONST(0x7265677368657265)
#define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + \
- STACK_FRAME_OVERHEAD + 288)
+ STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE)
#define STACK_FRAME_MARKER 12
/* Size of dummy stack frame allocated when calling signal handler. */
@@ -41,6 +53,8 @@
#else /* __powerpc64__ */
+#define USER_REDZONE_SIZE 0
+#define KERNEL_REDZONE_SIZE 0
#define STACK_FRAME_OVERHEAD 16 /* size of minimum stack frame */
#define STACK_FRAME_LR_SAVE 1 /* Location of LR in stack frame */
#define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773)
diff --git a/arch/powerpc/include/asm/sections.h b/arch/powerpc/include/asm/sections.h
index 4ee06fe15de4..d0e784e0ff48 100644
--- a/arch/powerpc/include/asm/sections.h
+++ b/arch/powerpc/include/asm/sections.h
@@ -8,6 +8,7 @@
#ifdef __powerpc64__
+extern char __start_interrupts[];
extern char __end_interrupts[];
extern char __prom_init_toc_start[];
@@ -21,6 +22,17 @@ static inline int in_kernel_text(unsigned long addr)
return 0;
}
+static inline int overlaps_interrupt_vector_text(unsigned long start,
+ unsigned long end)
+{
+ unsigned long real_start, real_end;
+ real_start = __start_interrupts - _stext;
+ real_end = __end_interrupts - _stext;
+
+ return start < (unsigned long)__va(real_end) &&
+ (unsigned long)__va(real_start) < end;
+}
+
static inline int overlaps_kernel_text(unsigned long start, unsigned long end)
{
return start < (unsigned long)__init_end &&
diff --git a/arch/powerpc/include/asm/vdso.h b/arch/powerpc/include/asm/vdso.h
index 0d9cecddf8a4..c53f5f6d1761 100644
--- a/arch/powerpc/include/asm/vdso.h
+++ b/arch/powerpc/include/asm/vdso.h
@@ -4,11 +4,11 @@
#ifdef __KERNEL__
/* Default link addresses for the vDSOs */
-#define VDSO32_LBASE 0x100000
-#define VDSO64_LBASE 0x100000
+#define VDSO32_LBASE 0x0
+#define VDSO64_LBASE 0x0
/* Default map addresses for 32bit vDSO */
-#define VDSO32_MBASE VDSO32_LBASE
+#define VDSO32_MBASE 0x100000
#define VDSO_VERSION_STRING LINUX_2.6.15
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 11c1d069d920..7a13f378ca2c 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -98,17 +98,19 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
+ phys_addr_t paddr;
if (!csize)
return 0;
csize = min_t(size_t, csize, PAGE_SIZE);
+ paddr = pfn << PAGE_SHIFT;
- if ((min_low_pfn < pfn) && (pfn < max_pfn)) {
- vaddr = __va(pfn << PAGE_SHIFT);
+ if (memblock_is_region_memory(paddr, csize)) {
+ vaddr = __va(paddr);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
} else {
- vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0);
+ vaddr = __ioremap(paddr, PAGE_SIZE, 0);
csize = copy_oldmem_vaddr(vaddr, buf, csize, offset, userbuf);
iounmap(vaddr);
}
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 8032b97ccdcb..ee78f6e49d64 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -191,12 +191,10 @@ EXPORT_SYMBOL(dma_direct_ops);
#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
-int dma_set_mask(struct device *dev, u64 dma_mask)
+int __dma_set_mask(struct device *dev, u64 dma_mask)
{
struct dma_map_ops *dma_ops = get_dma_ops(dev);
- if (ppc_md.dma_set_mask)
- return ppc_md.dma_set_mask(dev, dma_mask);
if ((dma_ops != NULL) && (dma_ops->set_dma_mask != NULL))
return dma_ops->set_dma_mask(dev, dma_mask);
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
@@ -204,6 +202,12 @@ int dma_set_mask(struct device *dev, u64 dma_mask)
*dev->dma_mask = dma_mask;
return 0;
}
+int dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (ppc_md.dma_set_mask)
+ return ppc_md.dma_set_mask(dev, dma_mask);
+ return __dma_set_mask(dev, dma_mask);
+}
EXPORT_SYMBOL(dma_set_mask);
u64 dma_get_required_mask(struct device *dev)
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 148db72a8c43..e7b76a6bf150 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -28,6 +28,7 @@
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/rbtree.h>
+#include <linux/reboot.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/export.h>
@@ -89,7 +90,7 @@
/* Platform dependent EEH operations */
struct eeh_ops *eeh_ops = NULL;
-int eeh_subsystem_enabled;
+bool eeh_subsystem_enabled = false;
EXPORT_SYMBOL(eeh_subsystem_enabled);
/*
@@ -364,7 +365,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
eeh_stats.total_mmio_ffs++;
- if (!eeh_subsystem_enabled)
+ if (!eeh_enabled())
return 0;
if (!edev) {
@@ -747,6 +748,17 @@ int __exit eeh_ops_unregister(const char *name)
return -EEXIST;
}
+static int eeh_reboot_notifier(struct notifier_block *nb,
+ unsigned long action, void *unused)
+{
+ eeh_set_enable(false);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block eeh_reboot_nb = {
+ .notifier_call = eeh_reboot_notifier,
+};
+
/**
* eeh_init - EEH initialization
*
@@ -778,6 +790,14 @@ int eeh_init(void)
if (machine_is(powernv) && cnt++ <= 0)
return ret;
+ /* Register reboot notifier */
+ ret = register_reboot_notifier(&eeh_reboot_nb);
+ if (ret) {
+ pr_warn("%s: Failed to register notifier (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
/* call platform initialization function */
if (!eeh_ops) {
pr_warning("%s: Platform EEH operation not found\n",
@@ -822,7 +842,7 @@ int eeh_init(void)
return ret;
}
- if (eeh_subsystem_enabled)
+ if (eeh_enabled())
pr_info("EEH: PCI Enhanced I/O Error Handling Enabled\n");
else
pr_warning("EEH: No capable adapters found\n");
@@ -897,7 +917,7 @@ void eeh_add_device_late(struct pci_dev *dev)
struct device_node *dn;
struct eeh_dev *edev;
- if (!dev || !eeh_subsystem_enabled)
+ if (!dev || !eeh_enabled())
return;
pr_debug("EEH: Adding device %s\n", pci_name(dev));
@@ -1005,7 +1025,7 @@ void eeh_remove_device(struct pci_dev *dev)
{
struct eeh_dev *edev;
- if (!dev || !eeh_subsystem_enabled)
+ if (!dev || !eeh_enabled())
return;
edev = pci_dev_to_eeh_dev(dev);
@@ -1045,7 +1065,7 @@ void eeh_remove_device(struct pci_dev *dev)
static int proc_eeh_show(struct seq_file *m, void *v)
{
- if (0 == eeh_subsystem_enabled) {
+ if (!eeh_enabled()) {
seq_printf(m, "EEH Subsystem is globally disabled\n");
seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs);
} else {
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 7bb30dca4e19..fdc679d309ec 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -362,9 +362,13 @@ static void *eeh_rmv_device(void *data, void *userdata)
*/
if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
return NULL;
+
driver = eeh_pcid_get(dev);
- if (driver && driver->err_handler)
- return NULL;
+ if (driver) {
+ eeh_pcid_put(dev);
+ if (driver->err_handler)
+ return NULL;
+ }
/* Remove it from PCI subsystem */
pr_debug("EEH: Removing %s without EEH sensitive driver\n",
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 9b27b293a922..b0ded97ee4e1 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -74,6 +74,7 @@ ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
*/
static int test_24bit_addr(unsigned long ip, unsigned long addr)
{
+ addr = ppc_function_entry((void *)addr);
/* use the create_branch to verify that this offset can be branched */
return create_branch((unsigned int *)ip, addr, 0);
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index d773dd440a45..88e3ec6e1d96 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -1088,6 +1088,14 @@ int iommu_take_ownership(struct iommu_table *tbl)
memset(tbl->it_map, 0xff, sz);
iommu_clear_tces_and_put_pages(tbl, tbl->it_offset, tbl->it_size);
+ /*
+ * Disable iommu bypass, otherwise the user can DMA to all of
+ * our physical memory via the bypass window instead of just
+ * the pages that has been explicitly mapped into the iommu
+ */
+ if (tbl->set_bypass)
+ tbl->set_bypass(tbl, false);
+
return 0;
}
EXPORT_SYMBOL_GPL(iommu_take_ownership);
@@ -1102,6 +1110,10 @@ void iommu_release_ownership(struct iommu_table *tbl)
/* Restore bit#0 set by iommu_init_table() */
if (tbl->it_offset == 0)
set_bit(0, tbl->it_map);
+
+ /* The kernel owns the device now, we can restore the iommu bypass */
+ if (tbl->set_bypass)
+ tbl->set_bypass(tbl, true);
}
EXPORT_SYMBOL_GPL(iommu_release_ownership);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 9729b23bfb0a..1d0848bba049 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -559,8 +559,13 @@ void exc_lvl_ctx_init(void)
#ifdef CONFIG_PPC64
cpu_nr = i;
#else
+#ifdef CONFIG_SMP
cpu_nr = get_hard_smp_processor_id(i);
+#else
+ cpu_nr = 0;
#endif
+#endif
+
memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE);
tp = critirq_ctx[cpu_nr];
tp->cpu = cpu_nr;
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 75d4f7340da8..015ae55c1868 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -196,7 +196,9 @@ int overlaps_crashkernel(unsigned long start, unsigned long size)
/* Values we need to export to the second kernel via the device tree. */
static phys_addr_t kernel_end;
+static phys_addr_t crashk_base;
static phys_addr_t crashk_size;
+static unsigned long long mem_limit;
static struct property kernel_end_prop = {
.name = "linux,kernel-end",
@@ -207,7 +209,7 @@ static struct property kernel_end_prop = {
static struct property crashk_base_prop = {
.name = "linux,crashkernel-base",
.length = sizeof(phys_addr_t),
- .value = &crashk_res.start,
+ .value = &crashk_base
};
static struct property crashk_size_prop = {
@@ -219,9 +221,11 @@ static struct property crashk_size_prop = {
static struct property memory_limit_prop = {
.name = "linux,memory-limit",
.length = sizeof(unsigned long long),
- .value = &memory_limit,
+ .value = &mem_limit,
};
+#define cpu_to_be_ulong __PASTE(cpu_to_be, BITS_PER_LONG)
+
static void __init export_crashk_values(struct device_node *node)
{
struct property *prop;
@@ -237,8 +241,9 @@ static void __init export_crashk_values(struct device_node *node)
of_remove_property(node, prop);
if (crashk_res.start != 0) {
+ crashk_base = cpu_to_be_ulong(crashk_res.start),
of_add_property(node, &crashk_base_prop);
- crashk_size = resource_size(&crashk_res);
+ crashk_size = cpu_to_be_ulong(resource_size(&crashk_res));
of_add_property(node, &crashk_size_prop);
}
@@ -246,6 +251,7 @@ static void __init export_crashk_values(struct device_node *node)
* memory_limit is required by the kexec-tools to limit the
* crash regions to the actual memory used.
*/
+ mem_limit = cpu_to_be_ulong(memory_limit);
of_update_property(node, &memory_limit_prop);
}
@@ -264,7 +270,7 @@ static int __init kexec_setup(void)
of_remove_property(node, prop);
/* information needed by userspace when using default_machine_kexec */
- kernel_end = __pa(_end);
+ kernel_end = cpu_to_be_ulong(__pa(_end));
of_add_property(node, &kernel_end_prop);
export_crashk_values(node);
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index be4e6d648f60..59d229a2a3e0 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -369,6 +369,7 @@ void default_machine_kexec(struct kimage *image)
/* Values we need to export to the second kernel via the device tree. */
static unsigned long htab_base;
+static unsigned long htab_size;
static struct property htab_base_prop = {
.name = "linux,htab-base",
@@ -379,7 +380,7 @@ static struct property htab_base_prop = {
static struct property htab_size_prop = {
.name = "linux,htab-size",
.length = sizeof(unsigned long),
- .value = &htab_size_bytes,
+ .value = &htab_size,
};
static int __init export_htab_values(void)
@@ -403,8 +404,9 @@ static int __init export_htab_values(void)
if (prop)
of_remove_property(node, prop);
- htab_base = __pa(htab_address);
+ htab_base = cpu_to_be64(__pa(htab_address));
of_add_property(node, &htab_base_prop);
+ htab_size = cpu_to_be64(htab_size_bytes);
of_add_property(node, &htab_size_prop);
of_node_put(node);
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 879f09620f83..7c6bb4b17b49 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -57,11 +57,14 @@ _GLOBAL(call_do_softirq)
mtlr r0
blr
+/*
+ * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
+ */
_GLOBAL(call_do_irq)
mflr r0
stw r0,4(r1)
lwz r10,THREAD+KSP_LIMIT(r2)
- addi r11,r3,THREAD_INFO_GAP
+ addi r11,r4,THREAD_INFO_GAP
stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
mr r1,r4
stw r10,8(r1)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 8d4c247f1738..af064d28b365 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1048,6 +1048,15 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
flush_altivec_to_thread(src);
flush_vsx_to_thread(src);
flush_spe_to_thread(src);
+ /*
+ * Flush TM state out so we can copy it. __switch_to_tm() does this
+ * flush but it removes the checkpointed state from the current CPU and
+ * transitions the CPU out of TM mode. Hence we need to call
+ * tm_recheckpoint_new_task() (on the same task) to restore the
+ * checkpointed state back and the TM mode.
+ */
+ __switch_to_tm(src);
+ tm_recheckpoint_new_task(src);
*dst = *src;
diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S
index b47a0e1ab001..d88736fbece6 100644
--- a/arch/powerpc/kernel/reloc_64.S
+++ b/arch/powerpc/kernel/reloc_64.S
@@ -69,8 +69,8 @@ _GLOBAL(relocate)
* R_PPC64_RELATIVE ones.
*/
mtctr r8
-5: lwz r0,12(9) /* ELF64_R_TYPE(reloc->r_info) */
- cmpwi r0,R_PPC64_RELATIVE
+5: ld r0,8(9) /* ELF64_R_TYPE(reloc->r_info) */
+ cmpdi r0,R_PPC64_RELATIVE
bne 6f
ld r6,0(r9) /* reloc->r_offset */
ld r0,16(r9) /* reloc->r_addend */
@@ -81,6 +81,7 @@ _GLOBAL(relocate)
6: blr
+.balign 8
p_dyn: .llong __dynamic_start - 0b
p_rela: .llong __rela_dyn_start - 0b
p_st: .llong _stext - 0b
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 2b0da27eaee4..04cc4fcca78b 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -247,7 +247,12 @@ static void __init exc_lvl_early_init(void)
/* interrupt stacks must be in lowmem, we get that for free on ppc32
* as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */
for_each_possible_cpu(i) {
+#ifdef CONFIG_SMP
hw_cpu = get_hard_smp_processor_id(i);
+#else
+ hw_cpu = 0;
+#endif
+
critirq_ctx[hw_cpu] = (struct thread_info *)
__va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
#ifdef CONFIG_BOOKE
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index e35bf773df7a..8d253c29649b 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -65,8 +65,8 @@ struct rt_sigframe {
struct siginfo __user *pinfo;
void __user *puc;
struct siginfo info;
- /* 64 bit ABI allows for 288 bytes below sp before decrementing it. */
- char abigap[288];
+ /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */
+ char abigap[USER_REDZONE_SIZE];
} __attribute__ ((aligned (16)));
static const char fmt32[] = KERN_INFO \
diff --git a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
index 79683d0393f5..6ac107ac402a 100644
--- a/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
+++ b/arch/powerpc/kernel/vdso32/vdso32_wrapper.S
@@ -6,7 +6,7 @@
.globl vdso32_start, vdso32_end
.balign PAGE_SIZE
vdso32_start:
- .incbin "arch/powerpc/kernel/vdso32/vdso32.so"
+ .incbin "arch/powerpc/kernel/vdso32/vdso32.so.dbg"
.balign PAGE_SIZE
vdso32_end:
diff --git a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
index 8df9e2463007..df60fca6a13d 100644
--- a/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
+++ b/arch/powerpc/kernel/vdso64/vdso64_wrapper.S
@@ -6,7 +6,7 @@
.globl vdso64_start, vdso64_end
.balign PAGE_SIZE
vdso64_start:
- .incbin "arch/powerpc/kernel/vdso64/vdso64.so"
+ .incbin "arch/powerpc/kernel/vdso64/vdso64.so.dbg"
.balign PAGE_SIZE
vdso64_end:
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index e66d4ec04d95..818dce344e82 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -1504,73 +1504,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1: addi r8,r8,16
.endr
- /* Save DEC */
- mfspr r5,SPRN_DEC
- mftb r6
- extsw r5,r5
- add r5,r5,r6
- std r5,VCPU_DEC_EXPIRES(r9)
-
-BEGIN_FTR_SECTION
- b 8f
-END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
- /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
- mfmsr r8
- li r0, 1
- rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
- mtmsrd r8
-
- /* Save POWER8-specific registers */
- mfspr r5, SPRN_IAMR
- mfspr r6, SPRN_PSPB
- mfspr r7, SPRN_FSCR
- std r5, VCPU_IAMR(r9)
- stw r6, VCPU_PSPB(r9)
- std r7, VCPU_FSCR(r9)
- mfspr r5, SPRN_IC
- mfspr r6, SPRN_VTB
- mfspr r7, SPRN_TAR
- std r5, VCPU_IC(r9)
- std r6, VCPU_VTB(r9)
- std r7, VCPU_TAR(r9)
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- mfspr r5, SPRN_TFHAR
- mfspr r6, SPRN_TFIAR
- mfspr r7, SPRN_TEXASR
- std r5, VCPU_TFHAR(r9)
- std r6, VCPU_TFIAR(r9)
- std r7, VCPU_TEXASR(r9)
-#endif
- mfspr r8, SPRN_EBBHR
- std r8, VCPU_EBBHR(r9)
- mfspr r5, SPRN_EBBRR
- mfspr r6, SPRN_BESCR
- mfspr r7, SPRN_CSIGR
- mfspr r8, SPRN_TACR
- std r5, VCPU_EBBRR(r9)
- std r6, VCPU_BESCR(r9)
- std r7, VCPU_CSIGR(r9)
- std r8, VCPU_TACR(r9)
- mfspr r5, SPRN_TCSCR
- mfspr r6, SPRN_ACOP
- mfspr r7, SPRN_PID
- mfspr r8, SPRN_WORT
- std r5, VCPU_TCSCR(r9)
- std r6, VCPU_ACOP(r9)
- stw r7, VCPU_GUEST_PID(r9)
- std r8, VCPU_WORT(r9)
-8:
-
- /* Save and reset AMR and UAMOR before turning on the MMU */
-BEGIN_FTR_SECTION
- mfspr r5,SPRN_AMR
- mfspr r6,SPRN_UAMOR
- std r5,VCPU_AMR(r9)
- std r6,VCPU_UAMOR(r9)
- li r6,0
- mtspr SPRN_AMR,r6
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
-
/* Unset guest mode */
li r0, KVM_GUEST_MODE_NONE
stb r0, HSTATE_IN_GUEST(r13)
@@ -2203,7 +2136,7 @@ BEGIN_FTR_SECTION
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
mfspr r6,SPRN_VRSAVE
- stw r6,VCPU_VRSAVE(r3)
+ stw r6,VCPU_VRSAVE(r31)
mtlr r30
mtmsrd r5
isync
@@ -2240,7 +2173,7 @@ BEGIN_FTR_SECTION
bl .load_vr_state
END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
#endif
- lwz r7,VCPU_VRSAVE(r4)
+ lwz r7,VCPU_VRSAVE(r31)
mtspr SPRN_VRSAVE,r7
mtlr r30
mr r4,r31
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index de6881259aef..d766d6ee33fe 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -207,6 +207,20 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
if (overlaps_kernel_text(vaddr, vaddr + step))
tprot &= ~HPTE_R_N;
+ /*
+ * If relocatable, check if it overlaps interrupt vectors that
+ * are copied down to real 0. For relocatable kernel
+ * (e.g. kdump case) we copy interrupt vectors down to real
+ * address 0. Mark that region as executable. This is
+ * because on p8 system with relocation on exception feature
+ * enabled, exceptions are raised with MMU (IR=DR=1) ON. Hence
+ * in order to execute the interrupt handlers in virtual
+ * mode the vector region need to be marked as executable.
+ */
+ if ((PHYSICAL_START > MEMORY_START) &&
+ overlaps_interrupt_vector_text(vaddr, vaddr + step))
+ tprot &= ~HPTE_R_N;
+
hash = hpt_hash(vpn, shift, ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 65b7b65e8708..62bf5e8e78da 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -510,7 +510,8 @@ int pmdp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
}
unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
- pmd_t *pmdp, unsigned long clr)
+ pmd_t *pmdp, unsigned long clr,
+ unsigned long set)
{
unsigned long old, tmp;
@@ -526,14 +527,15 @@ unsigned long pmd_hugepage_update(struct mm_struct *mm, unsigned long addr,
andi. %1,%0,%6\n\
bne- 1b \n\
andc %1,%0,%4 \n\
+ or %1,%1,%7\n\
stdcx. %1,0,%3 \n\
bne- 1b"
: "=&r" (old), "=&r" (tmp), "=m" (*pmdp)
- : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY)
+ : "r" (pmdp), "r" (clr), "m" (*pmdp), "i" (_PAGE_BUSY), "r" (set)
: "cc" );
#else
old = pmd_val(*pmdp);
- *pmdp = __pmd(old & ~clr);
+ *pmdp = __pmd((old & ~clr) | set);
#endif
if (old & _PAGE_HASHPTE)
hpte_do_hugepage_flush(mm, addr, pmdp);
@@ -708,7 +710,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
- pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT);
+ pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
}
/*
@@ -835,7 +837,7 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm,
unsigned long old;
pgtable_t *pgtable_slot;
- old = pmd_hugepage_update(mm, addr, pmdp, ~0UL);
+ old = pmd_hugepage_update(mm, addr, pmdp, ~0UL, 0);
old_pmd = __pmd(old);
/*
* We have pmd == none and we are holding page_table_lock.
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c
index a770df2dae70..6c0b1f5f8d2c 100644
--- a/arch/powerpc/mm/subpage-prot.c
+++ b/arch/powerpc/mm/subpage-prot.c
@@ -78,7 +78,7 @@ static void hpte_flush_range(struct mm_struct *mm, unsigned long addr,
pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
arch_enter_lazy_mmu_mode();
for (; npages > 0; --npages) {
- pte_update(mm, addr, pte, 0, 0);
+ pte_update(mm, addr, pte, 0, 0, 0);
addr += PAGE_SIZE;
++pte;
}
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 29b89e863d7c..67cf22083f4c 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1147,6 +1147,9 @@ static void power_pmu_enable(struct pmu *pmu)
mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
mb();
+ if (cpuhw->bhrb_users)
+ ppmu->config_bhrb(cpuhw->bhrb_filter);
+
write_mmcr0(cpuhw, mmcr0);
/*
@@ -1158,8 +1161,6 @@ static void power_pmu_enable(struct pmu *pmu)
}
out:
- if (cpuhw->bhrb_users)
- ppmu->config_bhrb(cpuhw->bhrb_filter);
local_irq_restore(flags);
}
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index a3f7abd2f13f..96cee20dcd34 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -25,6 +25,37 @@
#define PM_BRU_FIN 0x10068
#define PM_BR_MPRED_CMPL 0x400f6
+/* All L1 D cache load references counted at finish, gated by reject */
+#define PM_LD_REF_L1 0x100ee
+/* Load Missed L1 */
+#define PM_LD_MISS_L1 0x3e054
+/* Store Missed L1 */
+#define PM_ST_MISS_L1 0x300f0
+/* L1 cache data prefetches */
+#define PM_L1_PREF 0x0d8b8
+/* Instruction fetches from L1 */
+#define PM_INST_FROM_L1 0x04080
+/* Demand iCache Miss */
+#define PM_L1_ICACHE_MISS 0x200fd
+/* Instruction Demand sectors wriittent into IL1 */
+#define PM_L1_DEMAND_WRITE 0x0408c
+/* Instruction prefetch written into IL1 */
+#define PM_IC_PREF_WRITE 0x0408e
+/* The data cache was reloaded from local core's L3 due to a demand load */
+#define PM_DATA_FROM_L3 0x4c042
+/* Demand LD - L3 Miss (not L2 hit and not L3 hit) */
+#define PM_DATA_FROM_L3MISS 0x300fe
+/* All successful D-side store dispatches for this thread */
+#define PM_L2_ST 0x17080
+/* All successful D-side store dispatches for this thread that were L2 Miss */
+#define PM_L2_ST_MISS 0x17082
+/* Total HW L3 prefetches(Load+store) */
+#define PM_L3_PREF_ALL 0x4e052
+/* Data PTEG reload */
+#define PM_DTLB_MISS 0x300fc
+/* ITLB Reloaded */
+#define PM_ITLB_MISS 0x400fc
+
/*
* Raw event encoding for POWER8:
@@ -557,6 +588,8 @@ static int power8_generic_events[] = {
[PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
[PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
+ [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
};
static u64 power8_bhrb_filter_map(u64 branch_sample_type)
@@ -596,6 +629,116 @@ static void power8_config_bhrb(u64 pmu_bhrb_filter)
mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
}
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [ C(L1D) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
+ [ C(RESULT_MISS) ] = PM_LD_MISS_L1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L1_PREF,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(L1I) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
+ [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(LL) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
+ [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L2_ST,
+ [ C(RESULT_MISS) ] = PM_L2_ST_MISS,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
+ [ C(RESULT_MISS) ] = 0,
+ },
+ },
+ [ C(DTLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_DTLB_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(ITLB) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = 0,
+ [ C(RESULT_MISS) ] = PM_ITLB_MISS,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(BPU) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = PM_BRU_FIN,
+ [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+ [ C(NODE) ] = {
+ [ C(OP_READ) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_WRITE) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ [ C(OP_PREFETCH) ] = {
+ [ C(RESULT_ACCESS) ] = -1,
+ [ C(RESULT_MISS) ] = -1,
+ },
+ },
+};
+
+#undef C
+
static struct power_pmu power8_pmu = {
.name = "POWER8",
.n_counter = 6,
@@ -611,6 +754,7 @@ static struct power_pmu power8_pmu = {
.flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB,
.n_generic = ARRAY_SIZE(power8_generic_events),
.generic_events = power8_generic_events,
+ .cache_events = &power8_cache_events,
.attr_groups = power8_pmu_attr_groups,
.bhrb_nr = 32,
};
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c
index 5ec1e47a0d77..e865d748179b 100644
--- a/arch/powerpc/platforms/cell/ras.c
+++ b/arch/powerpc/platforms/cell/ras.c
@@ -123,7 +123,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order)
area->nid = nid;
area->order = order;
- area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE,
+ area->pages = alloc_pages_exact_node(area->nid,
+ GFP_KERNEL|__GFP_THISNODE,
area->order);
if (!area->pages) {
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index e1e71618b70c..253fefe3d1a0 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -44,7 +44,8 @@ static int ioda_eeh_event(struct notifier_block *nb,
/* We simply send special EEH event */
if ((changed_evts & OPAL_EVENT_PCI_ERROR) &&
- (events & OPAL_EVENT_PCI_ERROR))
+ (events & OPAL_EVENT_PCI_ERROR) &&
+ eeh_enabled())
eeh_send_failure_event(NULL);
return 0;
@@ -113,6 +114,7 @@ DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
#endif /* CONFIG_DEBUG_FS */
+
/**
* ioda_eeh_post_init - Chip dependent post initialization
* @hose: PCI controller
@@ -220,6 +222,22 @@ static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
return ret;
}
+static void ioda_eeh_phb_diag(struct pci_controller *hose)
+{
+ struct pnv_phb *phb = hose->private_data;
+ long rc;
+
+ rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
+ PNV_PCI_DIAG_BUF_SIZE);
+ if (rc != OPAL_SUCCESS) {
+ pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
+ __func__, hose->global_number, rc);
+ return;
+ }
+
+ pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
+}
+
/**
* ioda_eeh_get_state - Retrieve the state of PE
* @pe: EEH PE
@@ -271,6 +289,9 @@ static int ioda_eeh_get_state(struct eeh_pe *pe)
result |= EEH_STATE_DMA_ACTIVE;
result |= EEH_STATE_MMIO_ENABLED;
result |= EEH_STATE_DMA_ENABLED;
+ } else if (!(pe->state & EEH_PE_ISOLATED)) {
+ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ ioda_eeh_phb_diag(hose);
}
return result;
@@ -314,6 +335,15 @@ static int ioda_eeh_get_state(struct eeh_pe *pe)
__func__, fstate, hose->global_number, pe_no);
}
+ /* Dump PHB diag-data for frozen PE */
+ if (result != EEH_STATE_NOT_SUPPORT &&
+ (result & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) !=
+ (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE) &&
+ !(pe->state & EEH_PE_ISOLATED)) {
+ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ ioda_eeh_phb_diag(hose);
+ }
+
return result;
}
@@ -489,8 +519,7 @@ static int ioda_eeh_bridge_reset(struct pci_controller *hose,
static int ioda_eeh_reset(struct eeh_pe *pe, int option)
{
struct pci_controller *hose = pe->phb;
- struct eeh_dev *edev;
- struct pci_dev *dev;
+ struct pci_bus *bus;
int ret;
/*
@@ -519,73 +548,17 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
if (pe->type & EEH_PE_PHB) {
ret = ioda_eeh_phb_reset(hose, option);
} else {
- if (pe->type & EEH_PE_DEVICE) {
- /*
- * If it's device PE, we didn't refer to the parent
- * PCI bus yet. So we have to figure it out indirectly.
- */
- edev = list_first_entry(&pe->edevs,
- struct eeh_dev, list);
- dev = eeh_dev_to_pci_dev(edev);
- dev = dev->bus->self;
- } else {
- /*
- * If it's bus PE, the parent PCI bus is already there
- * and just pick it up.
- */
- dev = pe->bus->self;
- }
-
- /*
- * Do reset based on the fact that the direct upstream bridge
- * is root bridge (port) or not.
- */
- if (dev->bus->number == 0)
+ bus = eeh_pe_bus_get(pe);
+ if (pci_is_root_bus(bus))
ret = ioda_eeh_root_reset(hose, option);
else
- ret = ioda_eeh_bridge_reset(hose, dev, option);
+ ret = ioda_eeh_bridge_reset(hose, bus->self, option);
}
return ret;
}
/**
- * ioda_eeh_get_log - Retrieve error log
- * @pe: EEH PE
- * @severity: Severity level of the log
- * @drv_log: buffer to store the log
- * @len: space of the log buffer
- *
- * The function is used to retrieve error log from P7IOC.
- */
-static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
- char *drv_log, unsigned long len)
-{
- s64 ret;
- unsigned long flags;
- struct pci_controller *hose = pe->phb;
- struct pnv_phb *phb = hose->private_data;
-
- spin_lock_irqsave(&phb->lock, flags);
-
- ret = opal_pci_get_phb_diag_data2(phb->opal_id,
- phb->diag.blob, PNV_PCI_DIAG_BUF_SIZE);
- if (ret) {
- spin_unlock_irqrestore(&phb->lock, flags);
- pr_warning("%s: Can't get log for PHB#%x-PE#%x (%lld)\n",
- __func__, hose->global_number, pe->addr, ret);
- return -EIO;
- }
-
- /* The PHB diag-data is always indicative */
- pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
-
- spin_unlock_irqrestore(&phb->lock, flags);
-
- return 0;
-}
-
-/**
* ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE
* @pe: EEH PE
*
@@ -666,22 +639,6 @@ static void ioda_eeh_hub_diag(struct pci_controller *hose)
}
}
-static void ioda_eeh_phb_diag(struct pci_controller *hose)
-{
- struct pnv_phb *phb = hose->private_data;
- long rc;
-
- rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
- PNV_PCI_DIAG_BUF_SIZE);
- if (rc != OPAL_SUCCESS) {
- pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
- __func__, hose->global_number, rc);
- return;
- }
-
- pnv_pci_dump_phb_diag_data(hose, phb->diag.blob);
-}
-
static int ioda_eeh_get_phb_pe(struct pci_controller *hose,
struct eeh_pe **pe)
{
@@ -855,6 +812,20 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
}
/*
+ * EEH core will try recover from fenced PHB or
+ * frozen PE. In the time for frozen PE, EEH core
+ * enable IO path for that before collecting logs,
+ * but it ruins the site. So we have to dump the
+ * log in advance here.
+ */
+ if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
+ ret == EEH_NEXT_ERR_FENCED_PHB) &&
+ !((*pe)->state & EEH_PE_ISOLATED)) {
+ eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
+ ioda_eeh_phb_diag(hose);
+ }
+
+ /*
* If we have no errors on the specific PHB or only
* informative error there, we continue poking it.
* Otherwise, we need actions to be taken by upper
@@ -872,7 +843,6 @@ struct pnv_eeh_ops ioda_eeh_ops = {
.set_option = ioda_eeh_set_option,
.get_state = ioda_eeh_get_state,
.reset = ioda_eeh_reset,
- .get_log = ioda_eeh_get_log,
.configure_bridge = ioda_eeh_configure_bridge,
.next_error = ioda_eeh_next_error
};
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index a79fddc5e74e..a59788e83b8b 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -145,7 +145,7 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
* Enable EEH explicitly so that we will do EEH check
* while accessing I/O stuff
*/
- eeh_subsystem_enabled = 1;
+ eeh_set_enable(true);
/* Save memory bars */
eeh_save_bars(edev);
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
index 4fbf276ac99e..4cd2ea6c0dbe 100644
--- a/arch/powerpc/platforms/powernv/opal-xscom.c
+++ b/arch/powerpc/platforms/powernv/opal-xscom.c
@@ -71,11 +71,11 @@ static int opal_xscom_err_xlate(int64_t rc)
}
}
-static u64 opal_scom_unmangle(u64 reg)
+static u64 opal_scom_unmangle(u64 addr)
{
/*
* XSCOM indirect addresses have the top bit set. Additionally
- * the reset of the top 3 nibbles is always 0.
+ * the rest of the top 3 nibbles is always 0.
*
* Because the debugfs interface uses signed offsets and shifts
* the address left by 3, we basically cannot use the top 4 bits
@@ -86,10 +86,13 @@ static u64 opal_scom_unmangle(u64 reg)
* conversion here. To leave room for further xscom address
* expansion, we only clear out the top byte
*
+ * For in-kernel use, we also support the real indirect bit, so
+ * we test for any of the top 5 bits
+ *
*/
- if (reg & (1ull << 59))
- reg = (reg & ~(0xffull << 56)) | (1ull << 63);
- return reg;
+ if (addr & (0x1full << 59))
+ addr = (addr & ~(0xffull << 56)) | (1ull << 63);
+ return addr;
}
static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
@@ -98,8 +101,8 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
int64_t rc;
__be64 v;
- reg = opal_scom_unmangle(reg);
- rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v));
+ reg = opal_scom_unmangle(m->addr + reg);
+ rc = opal_xscom_read(m->chip, reg, (__be64 *)__pa(&v));
*value = be64_to_cpu(v);
return opal_xscom_err_xlate(rc);
}
@@ -109,8 +112,8 @@ static int opal_scom_write(scom_map_t map, u64 reg, u64 value)
struct opal_scom_map *m = map;
int64_t rc;
- reg = opal_scom_unmangle(reg);
- rc = opal_xscom_write(m->chip, m->addr + reg, value);
+ reg = opal_scom_unmangle(m->addr + reg);
+ rc = opal_xscom_write(m->chip, reg, value);
return opal_xscom_err_xlate(rc);
}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 7d6dcc6d5fa9..3b2b4fb3585b 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -21,6 +21,7 @@
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/msi.h>
+#include <linux/memblock.h>
#include <asm/sections.h>
#include <asm/io.h>
@@ -460,9 +461,39 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
return;
pe = &phb->ioda.pe_array[pdn->pe_number];
+ WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
}
+static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
+ struct pci_dev *pdev, u64 dma_mask)
+{
+ struct pci_dn *pdn = pci_get_pdn(pdev);
+ struct pnv_ioda_pe *pe;
+ uint64_t top;
+ bool bypass = false;
+
+ if (WARN_ON(!pdn || pdn->pe_number == IODA_INVALID_PE))
+ return -ENODEV;;
+
+ pe = &phb->ioda.pe_array[pdn->pe_number];
+ if (pe->tce_bypass_enabled) {
+ top = pe->tce_bypass_base + memblock_end_of_DRAM() - 1;
+ bypass = (dma_mask >= top);
+ }
+
+ if (bypass) {
+ dev_info(&pdev->dev, "Using 64-bit DMA iommu bypass\n");
+ set_dma_ops(&pdev->dev, &dma_direct_ops);
+ set_dma_offset(&pdev->dev, pe->tce_bypass_base);
+ } else {
+ dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
+ set_dma_ops(&pdev->dev, &dma_iommu_ops);
+ set_iommu_table_base(&pdev->dev, &pe->tce32_table);
+ }
+ return 0;
+}
+
static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, struct pci_bus *bus)
{
struct pci_dev *dev;
@@ -657,6 +688,56 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
__free_pages(tce_mem, get_order(TCE32_TABLE_SIZE * segs));
}
+static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
+{
+ struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
+ tce32_table);
+ uint16_t window_id = (pe->pe_number << 1 ) + 1;
+ int64_t rc;
+
+ pe_info(pe, "%sabling 64-bit DMA bypass\n", enable ? "En" : "Dis");
+ if (enable) {
+ phys_addr_t top = memblock_end_of_DRAM();
+
+ top = roundup_pow_of_two(top);
+ rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
+ pe->pe_number,
+ window_id,
+ pe->tce_bypass_base,
+ top);
+ } else {
+ rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
+ pe->pe_number,
+ window_id,
+ pe->tce_bypass_base,
+ 0);
+
+ /*
+ * We might want to reset the DMA ops of all devices on
+ * this PE. However in theory, that shouldn't be necessary
+ * as this is used for VFIO/KVM pass-through and the device
+ * hasn't yet been returned to its kernel driver
+ */
+ }
+ if (rc)
+ pe_err(pe, "OPAL error %lld configuring bypass window\n", rc);
+ else
+ pe->tce_bypass_enabled = enable;
+}
+
+static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
+ struct pnv_ioda_pe *pe)
+{
+ /* TVE #1 is selected by PCI address bit 59 */
+ pe->tce_bypass_base = 1ull << 59;
+
+ /* Install set_bypass callback for VFIO */
+ pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass;
+
+ /* Enable bypass by default */
+ pnv_pci_ioda2_set_bypass(&pe->tce32_table, true);
+}
+
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
struct pnv_ioda_pe *pe)
{
@@ -727,6 +808,8 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
else
pnv_ioda_setup_bus_dma(pe, pe->pbus);
+ /* Also create a bypass window */
+ pnv_pci_ioda2_setup_bypass_pe(phb, pe);
return;
fail:
if (pe->tce32_seg >= 0)
@@ -1286,6 +1369,7 @@ void __init pnv_pci_init_ioda_phb(struct device_node *np,
/* Setup TCEs */
phb->dma_dev_setup = pnv_pci_ioda_dma_dev_setup;
+ phb->dma_set_mask = pnv_pci_ioda_dma_set_mask;
/* Setup shutdown function for kexec */
phb->shutdown = pnv_pci_ioda_shutdown;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index b555ebc57ef5..8518817dcdfd 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -134,57 +134,72 @@ static void pnv_pci_dump_p7ioc_diag_data(struct pci_controller *hose,
pr_info("P7IOC PHB#%d Diag-data (Version: %d)\n\n",
hose->global_number, common->version);
- pr_info(" brdgCtl: %08x\n", data->brdgCtl);
-
- pr_info(" portStatusReg: %08x\n", data->portStatusReg);
- pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
- pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
-
- pr_info(" deviceStatus: %08x\n", data->deviceStatus);
- pr_info(" slotStatus: %08x\n", data->slotStatus);
- pr_info(" linkStatus: %08x\n", data->linkStatus);
- pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
- pr_info(" devSecStatus: %08x\n", data->devSecStatus);
-
- pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
- pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
- pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
- pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
- pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
- pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
- pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
- pr_info(" sourceId: %08x\n", data->sourceId);
- pr_info(" errorClass: %016llx\n", data->errorClass);
- pr_info(" correlator: %016llx\n", data->correlator);
- pr_info(" p7iocPlssr: %016llx\n", data->p7iocPlssr);
- pr_info(" p7iocCsr: %016llx\n", data->p7iocCsr);
- pr_info(" lemFir: %016llx\n", data->lemFir);
- pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
- pr_info(" lemWOF: %016llx\n", data->lemWOF);
- pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
- pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
- pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
- pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
- pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
- pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
- pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
- pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
- pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
- pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
- pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
- pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
- pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
- pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
- pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
- pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
+ if (data->brdgCtl)
+ pr_info(" brdgCtl: %08x\n",
+ data->brdgCtl);
+ if (data->portStatusReg || data->rootCmplxStatus ||
+ data->busAgentStatus)
+ pr_info(" UtlSts: %08x %08x %08x\n",
+ data->portStatusReg, data->rootCmplxStatus,
+ data->busAgentStatus);
+ if (data->deviceStatus || data->slotStatus ||
+ data->linkStatus || data->devCmdStatus ||
+ data->devSecStatus)
+ pr_info(" RootSts: %08x %08x %08x %08x %08x\n",
+ data->deviceStatus, data->slotStatus,
+ data->linkStatus, data->devCmdStatus,
+ data->devSecStatus);
+ if (data->rootErrorStatus || data->uncorrErrorStatus ||
+ data->corrErrorStatus)
+ pr_info(" RootErrSts: %08x %08x %08x\n",
+ data->rootErrorStatus, data->uncorrErrorStatus,
+ data->corrErrorStatus);
+ if (data->tlpHdr1 || data->tlpHdr2 ||
+ data->tlpHdr3 || data->tlpHdr4)
+ pr_info(" RootErrLog: %08x %08x %08x %08x\n",
+ data->tlpHdr1, data->tlpHdr2,
+ data->tlpHdr3, data->tlpHdr4);
+ if (data->sourceId || data->errorClass ||
+ data->correlator)
+ pr_info(" RootErrLog1: %08x %016llx %016llx\n",
+ data->sourceId, data->errorClass,
+ data->correlator);
+ if (data->p7iocPlssr || data->p7iocCsr)
+ pr_info(" PhbSts: %016llx %016llx\n",
+ data->p7iocPlssr, data->p7iocCsr);
+ if (data->lemFir || data->lemErrorMask ||
+ data->lemWOF)
+ pr_info(" Lem: %016llx %016llx %016llx\n",
+ data->lemFir, data->lemErrorMask,
+ data->lemWOF);
+ if (data->phbErrorStatus || data->phbFirstErrorStatus ||
+ data->phbErrorLog0 || data->phbErrorLog1)
+ pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n",
+ data->phbErrorStatus, data->phbFirstErrorStatus,
+ data->phbErrorLog0, data->phbErrorLog1);
+ if (data->mmioErrorStatus || data->mmioFirstErrorStatus ||
+ data->mmioErrorLog0 || data->mmioErrorLog1)
+ pr_info(" OutErr: %016llx %016llx %016llx %016llx\n",
+ data->mmioErrorStatus, data->mmioFirstErrorStatus,
+ data->mmioErrorLog0, data->mmioErrorLog1);
+ if (data->dma0ErrorStatus || data->dma0FirstErrorStatus ||
+ data->dma0ErrorLog0 || data->dma0ErrorLog1)
+ pr_info(" InAErr: %016llx %016llx %016llx %016llx\n",
+ data->dma0ErrorStatus, data->dma0FirstErrorStatus,
+ data->dma0ErrorLog0, data->dma0ErrorLog1);
+ if (data->dma1ErrorStatus || data->dma1FirstErrorStatus ||
+ data->dma1ErrorLog0 || data->dma1ErrorLog1)
+ pr_info(" InBErr: %016llx %016llx %016llx %016llx\n",
+ data->dma1ErrorStatus, data->dma1FirstErrorStatus,
+ data->dma1ErrorLog0, data->dma1ErrorLog1);
for (i = 0; i < OPAL_P7IOC_NUM_PEST_REGS; i++) {
if ((data->pestA[i] >> 63) == 0 &&
(data->pestB[i] >> 63) == 0)
continue;
- pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
- pr_info(" PESTB: %016llx\n", data->pestB[i]);
+ pr_info(" PE[%3d] A/B: %016llx %016llx\n",
+ i, data->pestA[i], data->pestB[i]);
}
}
@@ -197,62 +212,77 @@ static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
data = (struct OpalIoPhb3ErrorData*)common;
pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n\n",
hose->global_number, common->version);
-
- pr_info(" brdgCtl: %08x\n", data->brdgCtl);
-
- pr_info(" portStatusReg: %08x\n", data->portStatusReg);
- pr_info(" rootCmplxStatus: %08x\n", data->rootCmplxStatus);
- pr_info(" busAgentStatus: %08x\n", data->busAgentStatus);
-
- pr_info(" deviceStatus: %08x\n", data->deviceStatus);
- pr_info(" slotStatus: %08x\n", data->slotStatus);
- pr_info(" linkStatus: %08x\n", data->linkStatus);
- pr_info(" devCmdStatus: %08x\n", data->devCmdStatus);
- pr_info(" devSecStatus: %08x\n", data->devSecStatus);
-
- pr_info(" rootErrorStatus: %08x\n", data->rootErrorStatus);
- pr_info(" uncorrErrorStatus: %08x\n", data->uncorrErrorStatus);
- pr_info(" corrErrorStatus: %08x\n", data->corrErrorStatus);
- pr_info(" tlpHdr1: %08x\n", data->tlpHdr1);
- pr_info(" tlpHdr2: %08x\n", data->tlpHdr2);
- pr_info(" tlpHdr3: %08x\n", data->tlpHdr3);
- pr_info(" tlpHdr4: %08x\n", data->tlpHdr4);
- pr_info(" sourceId: %08x\n", data->sourceId);
- pr_info(" errorClass: %016llx\n", data->errorClass);
- pr_info(" correlator: %016llx\n", data->correlator);
-
- pr_info(" nFir: %016llx\n", data->nFir);
- pr_info(" nFirMask: %016llx\n", data->nFirMask);
- pr_info(" nFirWOF: %016llx\n", data->nFirWOF);
- pr_info(" PhbPlssr: %016llx\n", data->phbPlssr);
- pr_info(" PhbCsr: %016llx\n", data->phbCsr);
- pr_info(" lemFir: %016llx\n", data->lemFir);
- pr_info(" lemErrorMask: %016llx\n", data->lemErrorMask);
- pr_info(" lemWOF: %016llx\n", data->lemWOF);
- pr_info(" phbErrorStatus: %016llx\n", data->phbErrorStatus);
- pr_info(" phbFirstErrorStatus: %016llx\n", data->phbFirstErrorStatus);
- pr_info(" phbErrorLog0: %016llx\n", data->phbErrorLog0);
- pr_info(" phbErrorLog1: %016llx\n", data->phbErrorLog1);
- pr_info(" mmioErrorStatus: %016llx\n", data->mmioErrorStatus);
- pr_info(" mmioFirstErrorStatus: %016llx\n", data->mmioFirstErrorStatus);
- pr_info(" mmioErrorLog0: %016llx\n", data->mmioErrorLog0);
- pr_info(" mmioErrorLog1: %016llx\n", data->mmioErrorLog1);
- pr_info(" dma0ErrorStatus: %016llx\n", data->dma0ErrorStatus);
- pr_info(" dma0FirstErrorStatus: %016llx\n", data->dma0FirstErrorStatus);
- pr_info(" dma0ErrorLog0: %016llx\n", data->dma0ErrorLog0);
- pr_info(" dma0ErrorLog1: %016llx\n", data->dma0ErrorLog1);
- pr_info(" dma1ErrorStatus: %016llx\n", data->dma1ErrorStatus);
- pr_info(" dma1FirstErrorStatus: %016llx\n", data->dma1FirstErrorStatus);
- pr_info(" dma1ErrorLog0: %016llx\n", data->dma1ErrorLog0);
- pr_info(" dma1ErrorLog1: %016llx\n", data->dma1ErrorLog1);
+ if (data->brdgCtl)
+ pr_info(" brdgCtl: %08x\n",
+ data->brdgCtl);
+ if (data->portStatusReg || data->rootCmplxStatus ||
+ data->busAgentStatus)
+ pr_info(" UtlSts: %08x %08x %08x\n",
+ data->portStatusReg, data->rootCmplxStatus,
+ data->busAgentStatus);
+ if (data->deviceStatus || data->slotStatus ||
+ data->linkStatus || data->devCmdStatus ||
+ data->devSecStatus)
+ pr_info(" RootSts: %08x %08x %08x %08x %08x\n",
+ data->deviceStatus, data->slotStatus,
+ data->linkStatus, data->devCmdStatus,
+ data->devSecStatus);
+ if (data->rootErrorStatus || data->uncorrErrorStatus ||
+ data->corrErrorStatus)
+ pr_info(" RootErrSts: %08x %08x %08x\n",
+ data->rootErrorStatus, data->uncorrErrorStatus,
+ data->corrErrorStatus);
+ if (data->tlpHdr1 || data->tlpHdr2 ||
+ data->tlpHdr3 || data->tlpHdr4)
+ pr_info(" RootErrLog: %08x %08x %08x %08x\n",
+ data->tlpHdr1, data->tlpHdr2,
+ data->tlpHdr3, data->tlpHdr4);
+ if (data->sourceId || data->errorClass ||
+ data->correlator)
+ pr_info(" RootErrLog1: %08x %016llx %016llx\n",
+ data->sourceId, data->errorClass,
+ data->correlator);
+ if (data->nFir || data->nFirMask ||
+ data->nFirWOF)
+ pr_info(" nFir: %016llx %016llx %016llx\n",
+ data->nFir, data->nFirMask,
+ data->nFirWOF);
+ if (data->phbPlssr || data->phbCsr)
+ pr_info(" PhbSts: %016llx %016llx\n",
+ data->phbPlssr, data->phbCsr);
+ if (data->lemFir || data->lemErrorMask ||
+ data->lemWOF)
+ pr_info(" Lem: %016llx %016llx %016llx\n",
+ data->lemFir, data->lemErrorMask,
+ data->lemWOF);
+ if (data->phbErrorStatus || data->phbFirstErrorStatus ||
+ data->phbErrorLog0 || data->phbErrorLog1)
+ pr_info(" PhbErr: %016llx %016llx %016llx %016llx\n",
+ data->phbErrorStatus, data->phbFirstErrorStatus,
+ data->phbErrorLog0, data->phbErrorLog1);
+ if (data->mmioErrorStatus || data->mmioFirstErrorStatus ||
+ data->mmioErrorLog0 || data->mmioErrorLog1)
+ pr_info(" OutErr: %016llx %016llx %016llx %016llx\n",
+ data->mmioErrorStatus, data->mmioFirstErrorStatus,
+ data->mmioErrorLog0, data->mmioErrorLog1);
+ if (data->dma0ErrorStatus || data->dma0FirstErrorStatus ||
+ data->dma0ErrorLog0 || data->dma0ErrorLog1)
+ pr_info(" InAErr: %016llx %016llx %016llx %016llx\n",
+ data->dma0ErrorStatus, data->dma0FirstErrorStatus,
+ data->dma0ErrorLog0, data->dma0ErrorLog1);
+ if (data->dma1ErrorStatus || data->dma1FirstErrorStatus ||
+ data->dma1ErrorLog0 || data->dma1ErrorLog1)
+ pr_info(" InBErr: %016llx %016llx %016llx %016llx\n",
+ data->dma1ErrorStatus, data->dma1FirstErrorStatus,
+ data->dma1ErrorLog0, data->dma1ErrorLog1);
for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
if ((data->pestA[i] >> 63) == 0 &&
(data->pestB[i] >> 63) == 0)
continue;
- pr_info(" PE[%3d] PESTA: %016llx\n", i, data->pestA[i]);
- pr_info(" PESTB: %016llx\n", data->pestB[i]);
+ pr_info(" PE[%3d] A/B: %016llx %016llx\n",
+ i, data->pestA[i], data->pestB[i]);
}
}
@@ -634,6 +664,16 @@ static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
pnv_pci_dma_fallback_setup(hose, pdev);
}
+int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
+{
+ struct pci_controller *hose = pci_bus_to_host(pdev->bus);
+ struct pnv_phb *phb = hose->private_data;
+
+ if (phb && phb->dma_set_mask)
+ return phb->dma_set_mask(phb, pdev, dma_mask);
+ return __dma_set_mask(&pdev->dev, dma_mask);
+}
+
void pnv_pci_shutdown(void)
{
struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 13f1942a9a5f..cde169442775 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -54,7 +54,9 @@ struct pnv_ioda_pe {
struct iommu_table tce32_table;
phys_addr_t tce_inval_reg_phys;
- /* XXX TODO: Add support for additional 64-bit iommus */
+ /* 64-bit TCE bypass region */
+ bool tce_bypass_enabled;
+ uint64_t tce_bypass_base;
/* MSIs. MVE index is identical for for 32 and 64 bit MSI
* and -1 if not supported. (It's actually identical to the
@@ -113,6 +115,8 @@ struct pnv_phb {
unsigned int hwirq, unsigned int virq,
unsigned int is_64, struct msi_msg *msg);
void (*dma_dev_setup)(struct pnv_phb *phb, struct pci_dev *pdev);
+ int (*dma_set_mask)(struct pnv_phb *phb, struct pci_dev *pdev,
+ u64 dma_mask);
void (*fixup_phb)(struct pci_controller *hose);
u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
void (*shutdown)(struct pnv_phb *phb);
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index de6819be1f95..0051e108ef0f 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -7,12 +7,20 @@ extern void pnv_smp_init(void);
static inline void pnv_smp_init(void) { }
#endif
+struct pci_dev;
+
#ifdef CONFIG_PCI
extern void pnv_pci_init(void);
extern void pnv_pci_shutdown(void);
+extern int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask);
#else
static inline void pnv_pci_init(void) { }
static inline void pnv_pci_shutdown(void) { }
+
+static inline int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
+{
+ return -ENODEV;
+}
#endif
extern void pnv_lpc_init(void);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 21166f65c97c..110f4fbd319f 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -27,6 +27,7 @@
#include <linux/interrupt.h>
#include <linux/bug.h>
#include <linux/cpuidle.h>
+#include <linux/pci.h>
#include <asm/machdep.h>
#include <asm/firmware.h>
@@ -141,6 +142,13 @@ static void pnv_progress(char *s, unsigned short hex)
{
}
+static int pnv_dma_set_mask(struct device *dev, u64 dma_mask)
+{
+ if (dev_is_pci(dev))
+ return pnv_pci_dma_set_mask(to_pci_dev(dev), dma_mask);
+ return __dma_set_mask(dev, dma_mask);
+}
+
static void pnv_shutdown(void)
{
/* Let the PCI code clear up IODA tables */
@@ -238,6 +246,7 @@ define_machine(powernv) {
.machine_shutdown = pnv_shutdown,
.power_save = powernv_idle,
.calibrate_decr = generic_calibrate_decr,
+ .dma_set_mask = pnv_dma_set_mask,
#ifdef CONFIG_KEXEC
.kexec_cpu_down = pnv_kexec_cpu_down,
#endif
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 37300f6ee244..80b1d57c306a 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -20,6 +20,7 @@ config PPC_PSERIES
select PPC_DOORBELL
select HAVE_CONTEXT_TRACKING
select HOTPLUG_CPU if SMP
+ select ARCH_RANDOM
default y
config PPC_SPLPAR
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index 9ef3cc8ebc11..8a8f0472d98f 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -265,7 +265,7 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
enable = 1;
if (enable) {
- eeh_subsystem_enabled = 1;
+ eeh_set_enable(true);
eeh_add_to_parent_pe(edev);
pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n",
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index 82789e79e539..0ea99e3d4815 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -35,12 +35,7 @@
#include "offline_states.h"
/* This version can't take the spinlock, because it never returns */
-static struct rtas_args rtas_stop_self_args = {
- .token = RTAS_UNKNOWN_SERVICE,
- .nargs = 0,
- .nret = 1,
- .rets = &rtas_stop_self_args.args[0],
-};
+static int rtas_stop_self_token = RTAS_UNKNOWN_SERVICE;
static DEFINE_PER_CPU(enum cpu_state_vals, preferred_offline_state) =
CPU_STATE_OFFLINE;
@@ -93,15 +88,20 @@ void set_default_offline_state(int cpu)
static void rtas_stop_self(void)
{
- struct rtas_args *args = &rtas_stop_self_args;
+ struct rtas_args args = {
+ .token = cpu_to_be32(rtas_stop_self_token),
+ .nargs = 0,
+ .nret = 1,
+ .rets = &args.args[0],
+ };
local_irq_disable();
- BUG_ON(args->token == RTAS_UNKNOWN_SERVICE);
+ BUG_ON(rtas_stop_self_token == RTAS_UNKNOWN_SERVICE);
printk("cpu %u (hwid %u) Ready to die...\n",
smp_processor_id(), hard_smp_processor_id());
- enter_rtas(__pa(args));
+ enter_rtas(__pa(&args));
panic("Alas, I survived.\n");
}
@@ -392,10 +392,10 @@ static int __init pseries_cpu_hotplug_init(void)
}
}
- rtas_stop_self_args.token = rtas_token("stop-self");
+ rtas_stop_self_token = rtas_token("stop-self");
qcss_tok = rtas_token("query-cpu-stopped-state");
- if (rtas_stop_self_args.token == RTAS_UNKNOWN_SERVICE ||
+ if (rtas_stop_self_token == RTAS_UNKNOWN_SERVICE ||
qcss_tok == RTAS_UNKNOWN_SERVICE) {
printk(KERN_INFO "CPU Hotplug not supported by firmware "
"- disabling.\n");
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 70670a2d9cf2..c413ec158ff5 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -113,7 +113,8 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
{
struct device_node *dn, *pdn;
struct pci_bus *bus;
- const __be32 *pcie_link_speed_stats;
+ u32 pcie_link_speed_stats[2];
+ int rc;
bus = bridge->bus;
@@ -122,38 +123,45 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
return 0;
for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
- pcie_link_speed_stats = of_get_property(pdn,
- "ibm,pcie-link-speed-stats", NULL);
- if (pcie_link_speed_stats)
+ rc = of_property_read_u32_array(pdn,
+ "ibm,pcie-link-speed-stats",
+ &pcie_link_speed_stats[0], 2);
+ if (!rc)
break;
}
of_node_put(pdn);
- if (!pcie_link_speed_stats) {
+ if (rc) {
pr_err("no ibm,pcie-link-speed-stats property\n");
return 0;
}
- switch (be32_to_cpup(pcie_link_speed_stats)) {
+ switch (pcie_link_speed_stats[0]) {
case 0x01:
bus->max_bus_speed = PCIE_SPEED_2_5GT;
break;
case 0x02:
bus->max_bus_speed = PCIE_SPEED_5_0GT;
break;
+ case 0x04:
+ bus->max_bus_speed = PCIE_SPEED_8_0GT;
+ break;
default:
bus->max_bus_speed = PCI_SPEED_UNKNOWN;
break;
}
- switch (be32_to_cpup(pcie_link_speed_stats)) {
+ switch (pcie_link_speed_stats[1]) {
case 0x01:
bus->cur_bus_speed = PCIE_SPEED_2_5GT;
break;
case 0x02:
bus->cur_bus_speed = PCIE_SPEED_5_0GT;
break;
+ case 0x04:
+ bus->cur_bus_speed = PCIE_SPEED_8_0GT;
+ break;
default:
bus->cur_bus_speed = PCI_SPEED_UNKNOWN;
break;
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 8e639d7cbda7..972df0ffd4dc 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -430,8 +430,7 @@ static void pSeries_machine_kexec(struct kimage *image)
{
long rc;
- if (firmware_has_feature(FW_FEATURE_SET_MODE) &&
- (image->type != KEXEC_TYPE_CRASH)) {
+ if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
rc = pSeries_disable_reloc_on_exc();
if (rc != H_SUCCESS)
pr_warning("Warning: Failed to disable relocation on "
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 0e166ed4cd16..8209744b2829 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -886,25 +886,25 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
/* Default: read HW settings */
if (flow_type == IRQ_TYPE_DEFAULT) {
- switch(vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
- MPIC_INFO(VECPRI_SENSE_MASK))) {
- case MPIC_INFO(VECPRI_SENSE_EDGE) |
- MPIC_INFO(VECPRI_POLARITY_POSITIVE):
- flow_type = IRQ_TYPE_EDGE_RISING;
- break;
- case MPIC_INFO(VECPRI_SENSE_EDGE) |
- MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
- flow_type = IRQ_TYPE_EDGE_FALLING;
- break;
- case MPIC_INFO(VECPRI_SENSE_LEVEL) |
- MPIC_INFO(VECPRI_POLARITY_POSITIVE):
- flow_type = IRQ_TYPE_LEVEL_HIGH;
- break;
- case MPIC_INFO(VECPRI_SENSE_LEVEL) |
- MPIC_INFO(VECPRI_POLARITY_NEGATIVE):
- flow_type = IRQ_TYPE_LEVEL_LOW;
- break;
- }
+ int vold_ps;
+
+ vold_ps = vold & (MPIC_INFO(VECPRI_POLARITY_MASK) |
+ MPIC_INFO(VECPRI_SENSE_MASK));
+
+ if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
+ MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
+ flow_type = IRQ_TYPE_EDGE_RISING;
+ else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_EDGE) |
+ MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
+ flow_type = IRQ_TYPE_EDGE_FALLING;
+ else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
+ MPIC_INFO(VECPRI_POLARITY_POSITIVE)))
+ flow_type = IRQ_TYPE_LEVEL_HIGH;
+ else if (vold_ps == (MPIC_INFO(VECPRI_SENSE_LEVEL) |
+ MPIC_INFO(VECPRI_POLARITY_NEGATIVE)))
+ flow_type = IRQ_TYPE_LEVEL_LOW;
+ else
+ WARN_ONCE(1, "mpic: unknown IRQ type %d\n", vold);
}
/* Apply to irq desc */
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index a90731b3d44a..b07909850f77 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -309,16 +309,23 @@ static void get_output_lock(void)
if (xmon_speaker == me)
return;
+
for (;;) {
- if (xmon_speaker == 0) {
- last_speaker = cmpxchg(&xmon_speaker, 0, me);
- if (last_speaker == 0)
- return;
- }
- timeout = 10000000;
+ last_speaker = cmpxchg(&xmon_speaker, 0, me);
+ if (last_speaker == 0)
+ return;
+
+ /*
+ * Wait a full second for the lock, we might be on a slow
+ * console, but check every 100us.
+ */
+ timeout = 10000;
while (xmon_speaker == last_speaker) {
- if (--timeout > 0)
+ if (--timeout > 0) {
+ udelay(100);
continue;
+ }
+
/* hostile takeover */
prev = cmpxchg(&xmon_speaker, last_speaker, me);
if (prev == last_speaker)
@@ -397,7 +404,6 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
}
xmon_fault_jmp[cpu] = recurse_jmp;
- cpumask_set_cpu(cpu, &cpus_in_xmon);
bp = NULL;
if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT))
@@ -419,6 +425,8 @@ static int xmon_core(struct pt_regs *regs, int fromipi)
release_output_lock();
}
+ cpumask_set_cpu(cpu, &cpus_in_xmon);
+
waiting:
secondary = 1;
while (secondary && !xmon_gate) {
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c
index 4c4a1cef5208..47c8630c93cd 100644
--- a/arch/s390/appldata/appldata_base.c
+++ b/arch/s390/appldata/appldata_base.c
@@ -529,6 +529,7 @@ static int __init appldata_init(void)
{
int rc;
+ init_virt_timer(&appldata_timer);
appldata_timer.function = appldata_timer_function;
appldata_timer.data = (unsigned long) &appldata_work;
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c
index b3feabd39f31..cf3c0089bef2 100644
--- a/arch/s390/crypto/aes_s390.c
+++ b/arch/s390/crypto/aes_s390.c
@@ -25,6 +25,7 @@
#include <linux/err.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/spinlock.h>
#include "crypt_s390.h"
#define AES_KEYLEN_128 1
@@ -32,6 +33,7 @@
#define AES_KEYLEN_256 4
static u8 *ctrblk;
+static DEFINE_SPINLOCK(ctrblk_lock);
static char keylen_flag;
struct s390_aes_ctx {
@@ -758,43 +760,67 @@ static int ctr_aes_set_key(struct crypto_tfm *tfm, const u8 *in_key,
return aes_set_key(tfm, in_key, key_len);
}
+static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+{
+ unsigned int i, n;
+
+ /* only use complete blocks, max. PAGE_SIZE */
+ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(AES_BLOCK_SIZE - 1);
+ for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
+ memcpy(ctrptr + i, ctrptr + i - AES_BLOCK_SIZE,
+ AES_BLOCK_SIZE);
+ crypto_inc(ctrptr + i, AES_BLOCK_SIZE);
+ }
+ return n;
+}
+
static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
struct s390_aes_ctx *sctx, struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
- unsigned int i, n, nbytes;
- u8 buf[AES_BLOCK_SIZE];
- u8 *out, *in;
+ unsigned int n, nbytes;
+ u8 buf[AES_BLOCK_SIZE], ctrbuf[AES_BLOCK_SIZE];
+ u8 *out, *in, *ctrptr = ctrbuf;
if (!walk->nbytes)
return ret;
- memcpy(ctrblk, walk->iv, AES_BLOCK_SIZE);
+ if (spin_trylock(&ctrblk_lock))
+ ctrptr = ctrblk;
+
+ memcpy(ctrptr, walk->iv, AES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
while (nbytes >= AES_BLOCK_SIZE) {
- /* only use complete blocks, max. PAGE_SIZE */
- n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
- nbytes & ~(AES_BLOCK_SIZE - 1);
- for (i = AES_BLOCK_SIZE; i < n; i += AES_BLOCK_SIZE) {
- memcpy(ctrblk + i, ctrblk + i - AES_BLOCK_SIZE,
- AES_BLOCK_SIZE);
- crypto_inc(ctrblk + i, AES_BLOCK_SIZE);
- }
- ret = crypt_s390_kmctr(func, sctx->key, out, in, n, ctrblk);
- if (ret < 0 || ret != n)
+ if (ctrptr == ctrblk)
+ n = __ctrblk_init(ctrptr, nbytes);
+ else
+ n = AES_BLOCK_SIZE;
+ ret = crypt_s390_kmctr(func, sctx->key, out, in,
+ n, ctrptr);
+ if (ret < 0 || ret != n) {
+ if (ctrptr == ctrblk)
+ spin_unlock(&ctrblk_lock);
return -EIO;
+ }
if (n > AES_BLOCK_SIZE)
- memcpy(ctrblk, ctrblk + n - AES_BLOCK_SIZE,
+ memcpy(ctrptr, ctrptr + n - AES_BLOCK_SIZE,
AES_BLOCK_SIZE);
- crypto_inc(ctrblk, AES_BLOCK_SIZE);
+ crypto_inc(ctrptr, AES_BLOCK_SIZE);
out += n;
in += n;
nbytes -= n;
}
ret = blkcipher_walk_done(desc, walk, nbytes);
}
+ if (ctrptr == ctrblk) {
+ if (nbytes)
+ memcpy(ctrbuf, ctrptr, AES_BLOCK_SIZE);
+ else
+ memcpy(walk->iv, ctrptr, AES_BLOCK_SIZE);
+ spin_unlock(&ctrblk_lock);
+ }
/*
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
@@ -802,14 +828,15 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, long func,
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, sctx->key, buf, in,
- AES_BLOCK_SIZE, ctrblk);
+ AES_BLOCK_SIZE, ctrbuf);
if (ret < 0 || ret != AES_BLOCK_SIZE)
return -EIO;
memcpy(out, buf, nbytes);
- crypto_inc(ctrblk, AES_BLOCK_SIZE);
+ crypto_inc(ctrbuf, AES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
+ memcpy(walk->iv, ctrbuf, AES_BLOCK_SIZE);
}
- memcpy(walk->iv, ctrblk, AES_BLOCK_SIZE);
+
return ret;
}
diff --git a/arch/s390/crypto/des_s390.c b/arch/s390/crypto/des_s390.c
index 200f2a1b599d..0a5aac8a9412 100644
--- a/arch/s390/crypto/des_s390.c
+++ b/arch/s390/crypto/des_s390.c
@@ -25,6 +25,7 @@
#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)
static u8 *ctrblk;
+static DEFINE_SPINLOCK(ctrblk_lock);
struct s390_des_ctx {
u8 iv[DES_BLOCK_SIZE];
@@ -105,29 +106,35 @@ static int ecb_desall_crypt(struct blkcipher_desc *desc, long func,
}
static int cbc_desall_crypt(struct blkcipher_desc *desc, long func,
- u8 *iv, struct blkcipher_walk *walk)
+ struct blkcipher_walk *walk)
{
+ struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
int ret = blkcipher_walk_virt(desc, walk);
unsigned int nbytes = walk->nbytes;
+ struct {
+ u8 iv[DES_BLOCK_SIZE];
+ u8 key[DES3_KEY_SIZE];
+ } param;
if (!nbytes)
goto out;
- memcpy(iv, walk->iv, DES_BLOCK_SIZE);
+ memcpy(param.iv, walk->iv, DES_BLOCK_SIZE);
+ memcpy(param.key, ctx->key, DES3_KEY_SIZE);
do {
/* only use complete blocks */
unsigned int n = nbytes & ~(DES_BLOCK_SIZE - 1);
u8 *out = walk->dst.virt.addr;
u8 *in = walk->src.virt.addr;
- ret = crypt_s390_kmc(func, iv, out, in, n);
+ ret = crypt_s390_kmc(func, &param, out, in, n);
if (ret < 0 || ret != n)
return -EIO;
nbytes &= DES_BLOCK_SIZE - 1;
ret = blkcipher_walk_done(desc, walk, nbytes);
} while ((nbytes = walk->nbytes));
- memcpy(walk->iv, iv, DES_BLOCK_SIZE);
+ memcpy(walk->iv, param.iv, DES_BLOCK_SIZE);
out:
return ret;
@@ -179,22 +186,20 @@ static int cbc_des_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_DEA_ENCRYPT, &walk);
}
static int cbc_des_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_DEA_DECRYPT, &walk);
}
static struct crypto_alg cbc_des_alg = {
@@ -327,22 +332,20 @@ static int cbc_des3_encrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_TDEA_192_ENCRYPT, &walk);
}
static int cbc_des3_decrypt(struct blkcipher_desc *desc,
struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
- struct s390_des_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
struct blkcipher_walk walk;
blkcipher_walk_init(&walk, dst, src, nbytes);
- return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, ctx->iv, &walk);
+ return cbc_desall_crypt(desc, KMC_TDEA_192_DECRYPT, &walk);
}
static struct crypto_alg cbc_des3_alg = {
@@ -366,54 +369,80 @@ static struct crypto_alg cbc_des3_alg = {
}
};
+static unsigned int __ctrblk_init(u8 *ctrptr, unsigned int nbytes)
+{
+ unsigned int i, n;
+
+ /* align to block size, max. PAGE_SIZE */
+ n = (nbytes > PAGE_SIZE) ? PAGE_SIZE : nbytes & ~(DES_BLOCK_SIZE - 1);
+ for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
+ memcpy(ctrptr + i, ctrptr + i - DES_BLOCK_SIZE, DES_BLOCK_SIZE);
+ crypto_inc(ctrptr + i, DES_BLOCK_SIZE);
+ }
+ return n;
+}
+
static int ctr_desall_crypt(struct blkcipher_desc *desc, long func,
- struct s390_des_ctx *ctx, struct blkcipher_walk *walk)
+ struct s390_des_ctx *ctx,
+ struct blkcipher_walk *walk)
{
int ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
- unsigned int i, n, nbytes;
- u8 buf[DES_BLOCK_SIZE];
- u8 *out, *in;
+ unsigned int n, nbytes;
+ u8 buf[DES_BLOCK_SIZE], ctrbuf[DES_BLOCK_SIZE];
+ u8 *out, *in, *ctrptr = ctrbuf;
+
+ if (!walk->nbytes)
+ return ret;
- memcpy(ctrblk, walk->iv, DES_BLOCK_SIZE);
+ if (spin_trylock(&ctrblk_lock))
+ ctrptr = ctrblk;
+
+ memcpy(ctrptr, walk->iv, DES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
while (nbytes >= DES_BLOCK_SIZE) {
- /* align to block size, max. PAGE_SIZE */
- n = (nbytes > PAGE_SIZE) ? PAGE_SIZE :
- nbytes & ~(DES_BLOCK_SIZE - 1);
- for (i = DES_BLOCK_SIZE; i < n; i += DES_BLOCK_SIZE) {
- memcpy(ctrblk + i, ctrblk + i - DES_BLOCK_SIZE,
- DES_BLOCK_SIZE);
- crypto_inc(ctrblk + i, DES_BLOCK_SIZE);
- }
- ret = crypt_s390_kmctr(func, ctx->key, out, in, n, ctrblk);
- if (ret < 0 || ret != n)
+ if (ctrptr == ctrblk)
+ n = __ctrblk_init(ctrptr, nbytes);
+ else
+ n = DES_BLOCK_SIZE;
+ ret = crypt_s390_kmctr(func, ctx->key, out, in,
+ n, ctrptr);
+ if (ret < 0 || ret != n) {
+ if (ctrptr == ctrblk)
+ spin_unlock(&ctrblk_lock);
return -EIO;
+ }
if (n > DES_BLOCK_SIZE)
- memcpy(ctrblk, ctrblk + n - DES_BLOCK_SIZE,
+ memcpy(ctrptr, ctrptr + n - DES_BLOCK_SIZE,
DES_BLOCK_SIZE);
- crypto_inc(ctrblk, DES_BLOCK_SIZE);
+ crypto_inc(ctrptr, DES_BLOCK_SIZE);
out += n;
in += n;
nbytes -= n;
}
ret = blkcipher_walk_done(desc, walk, nbytes);
}
-
+ if (ctrptr == ctrblk) {
+ if (nbytes)
+ memcpy(ctrbuf, ctrptr, DES_BLOCK_SIZE);
+ else
+ memcpy(walk->iv, ctrptr, DES_BLOCK_SIZE);
+ spin_unlock(&ctrblk_lock);
+ }
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
if (nbytes) {
out = walk->dst.virt.addr;
in = walk->src.virt.addr;
ret = crypt_s390_kmctr(func, ctx->key, buf, in,
- DES_BLOCK_SIZE, ctrblk);
+ DES_BLOCK_SIZE, ctrbuf);
if (ret < 0 || ret != DES_BLOCK_SIZE)
return -EIO;
memcpy(out, buf, nbytes);
- crypto_inc(ctrblk, DES_BLOCK_SIZE);
+ crypto_inc(ctrbuf, DES_BLOCK_SIZE);
ret = blkcipher_walk_done(desc, walk, 0);
+ memcpy(walk->iv, ctrbuf, DES_BLOCK_SIZE);
}
- memcpy(walk->iv, ctrblk, DES_BLOCK_SIZE);
return ret;
}
diff --git a/arch/s390/kernel/compat_wrapper.S b/arch/s390/kernel/compat_wrapper.S
index 59c8efce1b99..0248949a756d 100644
--- a/arch/s390/kernel/compat_wrapper.S
+++ b/arch/s390/kernel/compat_wrapper.S
@@ -1421,5 +1421,5 @@ ENTRY(sys_sched_setattr_wrapper)
ENTRY(sys_sched_getattr_wrapper)
lgfr %r2,%r2 # pid_t
llgtr %r3,%r3 # const char __user *
- llgfr %r3,%r3 # unsigned int
+ llgfr %r4,%r4 # unsigned int
jg sys_sched_getattr
diff --git a/arch/s390/kernel/head64.S b/arch/s390/kernel/head64.S
index b9e25ae2579c..d7c00507568a 100644
--- a/arch/s390/kernel/head64.S
+++ b/arch/s390/kernel/head64.S
@@ -59,7 +59,7 @@ ENTRY(startup_continue)
.quad 0 # cr12: tracing off
.quad 0 # cr13: home space segment table
.quad 0xc0000000 # cr14: machine check handling off
- .quad 0 # cr15: linkage stack operations
+ .quad .Llinkage_stack # cr15: linkage stack operations
.Lpcmsk:.quad 0x0000000180000000
.L4malign:.quad 0xffffffffffc00000
.Lscan2g:.quad 0x80000000 + 0x20000 - 8 # 2GB + 128K - 8
@@ -67,12 +67,15 @@ ENTRY(startup_continue)
.Lparmaddr:
.quad PARMAREA
.align 64
-.Lduct: .long 0,0,0,0,.Lduald,0,0,0
+.Lduct: .long 0,.Laste,.Laste,0,.Lduald,0,0,0
.long 0,0,0,0,0,0,0,0
+.Laste: .quad 0,0xffffffffffffffff,0,0,0,0,0,0
.align 128
.Lduald:.rept 8
.long 0x80000000,0,0,0 # invalid access-list entries
.endr
+.Llinkage_stack:
+ .long 0,0,0x89000000,0,0,0,0x8a000000,0
ENTRY(_ehead)
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index a90d45e9dfb0..27c50f4d90cb 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -12,6 +12,8 @@
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/init.h>
+#include <asm/setup.h>
+#include <asm/ipl.h>
#define ESSA_SET_STABLE 1
#define ESSA_SET_UNUSED 2
@@ -41,6 +43,14 @@ void __init cmma_init(void)
if (!cmma_flag)
return;
+ /*
+ * Disable CMM for dump, otherwise the tprot based memory
+ * detection can fail because of unstable pages.
+ */
+ if (OLDMEM_BASE || ipl_info.type == IPL_TYPE_FCP_DUMP) {
+ cmma_flag = 0;
+ return;
+ }
asm volatile(
" .insn rrf,0xb9ab0000,%1,%1,0,0\n"
"0: la %0,0\n"
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 60c11a629d96..f91c03119804 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -206,11 +206,13 @@ static void dma_cleanup_tables(struct zpci_dev *zdev)
zdev->dma_table = NULL;
}
-static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev, unsigned long start,
- int size)
+static unsigned long __dma_alloc_iommu(struct zpci_dev *zdev,
+ unsigned long start, int size)
{
- unsigned long boundary_size = 0x1000000;
+ unsigned long boundary_size;
+ boundary_size = ALIGN(dma_get_seg_boundary(&zdev->pdev->dev) + 1,
+ PAGE_SIZE) >> PAGE_SHIFT;
return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
start, size, 0, boundary_size, 0);
}
diff --git a/arch/sh/include/cpu-sh2/cpu/cache.h b/arch/sh/include/cpu-sh2/cpu/cache.h
index 673515bc4135..aa1b2b9088a7 100644
--- a/arch/sh/include/cpu-sh2/cpu/cache.h
+++ b/arch/sh/include/cpu-sh2/cpu/cache.h
@@ -18,7 +18,7 @@
#define SH_CACHE_ASSOC 8
#if defined(CONFIG_CPU_SUBTYPE_SH7619)
-#define CCR 0xffffffec
+#define SH_CCR 0xffffffec
#define CCR_CACHE_CE 0x01 /* Cache enable */
#define CCR_CACHE_WT 0x02 /* CCR[bit1=1,bit2=1] */
diff --git a/arch/sh/include/cpu-sh2a/cpu/cache.h b/arch/sh/include/cpu-sh2a/cpu/cache.h
index defb0baa5a06..b27ce92cb600 100644
--- a/arch/sh/include/cpu-sh2a/cpu/cache.h
+++ b/arch/sh/include/cpu-sh2a/cpu/cache.h
@@ -17,8 +17,8 @@
#define SH_CACHE_COMBINED 4
#define SH_CACHE_ASSOC 8
-#define CCR 0xfffc1000 /* CCR1 */
-#define CCR2 0xfffc1004
+#define SH_CCR 0xfffc1000 /* CCR1 */
+#define SH_CCR2 0xfffc1004
/*
* Most of the SH-2A CCR1 definitions resemble the SH-4 ones. All others not
diff --git a/arch/sh/include/cpu-sh3/cpu/cache.h b/arch/sh/include/cpu-sh3/cpu/cache.h
index bee2d81c56bf..29700fd88c75 100644
--- a/arch/sh/include/cpu-sh3/cpu/cache.h
+++ b/arch/sh/include/cpu-sh3/cpu/cache.h
@@ -17,7 +17,7 @@
#define SH_CACHE_COMBINED 4
#define SH_CACHE_ASSOC 8
-#define CCR 0xffffffec /* Address of Cache Control Register */
+#define SH_CCR 0xffffffec /* Address of Cache Control Register */
#define CCR_CACHE_CE 0x01 /* Cache Enable */
#define CCR_CACHE_WT 0x02 /* Write-Through (for P0,U0,P3) (else writeback) */
diff --git a/arch/sh/include/cpu-sh4/cpu/cache.h b/arch/sh/include/cpu-sh4/cpu/cache.h
index 7bfb9e8b069c..92c4cd119b66 100644
--- a/arch/sh/include/cpu-sh4/cpu/cache.h
+++ b/arch/sh/include/cpu-sh4/cpu/cache.h
@@ -17,7 +17,7 @@
#define SH_CACHE_COMBINED 4
#define SH_CACHE_ASSOC 8
-#define CCR 0xff00001c /* Address of Cache Control Register */
+#define SH_CCR 0xff00001c /* Address of Cache Control Register */
#define CCR_CACHE_OCE 0x0001 /* Operand Cache Enable */
#define CCR_CACHE_WT 0x0002 /* Write-Through (for P0,U0,P3) (else writeback)*/
#define CCR_CACHE_CB 0x0004 /* Copy-Back (for P1) (else writethrough) */
diff --git a/arch/sh/kernel/cpu/init.c b/arch/sh/kernel/cpu/init.c
index ecf83cd158dc..0d7360d549c1 100644
--- a/arch/sh/kernel/cpu/init.c
+++ b/arch/sh/kernel/cpu/init.c
@@ -112,7 +112,7 @@ static void cache_init(void)
unsigned long ccr, flags;
jump_to_uncached();
- ccr = __raw_readl(CCR);
+ ccr = __raw_readl(SH_CCR);
/*
* At this point we don't know whether the cache is enabled or not - a
@@ -189,7 +189,7 @@ static void cache_init(void)
l2_cache_init();
- __raw_writel(flags, CCR);
+ __raw_writel(flags, SH_CCR);
back_to_cached();
}
#else
diff --git a/arch/sh/mm/cache-debugfs.c b/arch/sh/mm/cache-debugfs.c
index 115725198038..777e50f33c00 100644
--- a/arch/sh/mm/cache-debugfs.c
+++ b/arch/sh/mm/cache-debugfs.c
@@ -36,7 +36,7 @@ static int cache_seq_show(struct seq_file *file, void *iter)
*/
jump_to_uncached();
- ccr = __raw_readl(CCR);
+ ccr = __raw_readl(SH_CCR);
if ((ccr & CCR_CACHE_ENABLE) == 0) {
back_to_cached();
diff --git a/arch/sh/mm/cache-sh2.c b/arch/sh/mm/cache-sh2.c
index defcf719f2e8..a74259f2f981 100644
--- a/arch/sh/mm/cache-sh2.c
+++ b/arch/sh/mm/cache-sh2.c
@@ -63,9 +63,9 @@ static void sh2__flush_invalidate_region(void *start, int size)
local_irq_save(flags);
jump_to_uncached();
- ccr = __raw_readl(CCR);
+ ccr = __raw_readl(SH_CCR);
ccr |= CCR_CACHE_INVALIDATE;
- __raw_writel(ccr, CCR);
+ __raw_writel(ccr, SH_CCR);
back_to_cached();
local_irq_restore(flags);
diff --git a/arch/sh/mm/cache-sh2a.c b/arch/sh/mm/cache-sh2a.c
index 949e2d3138a0..ee87d081259b 100644
--- a/arch/sh/mm/cache-sh2a.c
+++ b/arch/sh/mm/cache-sh2a.c
@@ -134,7 +134,8 @@ static void sh2a__flush_invalidate_region(void *start, int size)
/* If there are too many pages then just blow the cache */
if (((end - begin) >> PAGE_SHIFT) >= MAX_OCACHE_PAGES) {
- __raw_writel(__raw_readl(CCR) | CCR_OCACHE_INVALIDATE, CCR);
+ __raw_writel(__raw_readl(SH_CCR) | CCR_OCACHE_INVALIDATE,
+ SH_CCR);
} else {
for (v = begin; v < end; v += L1_CACHE_BYTES)
sh2a_invalidate_line(CACHE_OC_ADDRESS_ARRAY, v);
@@ -167,7 +168,8 @@ static void sh2a_flush_icache_range(void *args)
/* I-Cache invalidate */
/* If there are too many pages then just blow the cache */
if (((end - start) >> PAGE_SHIFT) >= MAX_ICACHE_PAGES) {
- __raw_writel(__raw_readl(CCR) | CCR_ICACHE_INVALIDATE, CCR);
+ __raw_writel(__raw_readl(SH_CCR) | CCR_ICACHE_INVALIDATE,
+ SH_CCR);
} else {
for (v = start; v < end; v += L1_CACHE_BYTES)
sh2a_invalidate_line(CACHE_IC_ADDRESS_ARRAY, v);
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 0e529285b28d..51d8f7f31d1d 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -133,9 +133,9 @@ static void flush_icache_all(void)
jump_to_uncached();
/* Flush I-cache */
- ccr = __raw_readl(CCR);
+ ccr = __raw_readl(SH_CCR);
ccr |= CCR_CACHE_ICI;
- __raw_writel(ccr, CCR);
+ __raw_writel(ccr, SH_CCR);
/*
* back_to_cached() will take care of the barrier for us, don't add
diff --git a/arch/sh/mm/cache-shx3.c b/arch/sh/mm/cache-shx3.c
index c0adbee97b5f..24c58b7dc022 100644
--- a/arch/sh/mm/cache-shx3.c
+++ b/arch/sh/mm/cache-shx3.c
@@ -19,7 +19,7 @@ void __init shx3_cache_init(void)
{
unsigned int ccr;
- ccr = __raw_readl(CCR);
+ ccr = __raw_readl(SH_CCR);
/*
* If we've got cache aliases, resolve them in hardware.
@@ -40,5 +40,5 @@ void __init shx3_cache_init(void)
ccr |= CCR_CACHE_IBE;
#endif
- writel_uncached(ccr, CCR);
+ writel_uncached(ccr, SH_CCR);
}
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 616966a96cba..097c2cdd117f 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -285,8 +285,8 @@ void __init cpu_cache_init(void)
{
unsigned int cache_disabled = 0;
-#ifdef CCR
- cache_disabled = !(__raw_readl(CCR) & CCR_CACHE_ENABLE);
+#ifdef SH_CCR
+ cache_disabled = !(__raw_readl(SH_CCR) & CCR_CACHE_ENABLE);
#endif
compute_alias(&boot_cpu_data.icache);
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index c51efdcd07a2..7d8b7e94b93b 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -27,7 +27,7 @@ config SPARC
select RTC_DRV_M48T59
select HAVE_DMA_ATTRS
select HAVE_DMA_API_DEBUG
- select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_JUMP_LABEL if SPARC64
select GENERIC_IRQ_SHOW
select ARCH_WANT_IPC_PARSE_VERSION
select GENERIC_PCI_IOMAP
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index 32a280ec38c1..d7b4967f8fa6 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -58,9 +58,12 @@ void arch_cpu_idle(void)
{
if (tlb_type != hypervisor) {
touch_nmi_watchdog();
+ local_irq_enable();
} else {
unsigned long pstate;
+ local_irq_enable();
+
/* The sun4v sleeping code requires that we have PSTATE.IE cleared over
* the cpu sleep hypervisor call.
*/
@@ -82,7 +85,6 @@ void arch_cpu_idle(void)
: "=&r" (pstate)
: "i" (PSTATE_IE));
}
- local_irq_enable();
}
#ifdef CONFIG_HOTPLUG_CPU
diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
index 87729fff13b9..33a17e7b3ccd 100644
--- a/arch/sparc/kernel/syscalls.S
+++ b/arch/sparc/kernel/syscalls.S
@@ -189,7 +189,8 @@ linux_sparc_syscall32:
mov %i0, %l5 ! IEU1
5: call %l7 ! CTI Group brk forced
srl %i5, 0, %o5 ! IEU1
- ba,a,pt %xcc, 3f
+ ba,pt %xcc, 3f
+ sra %o0, 0, %o0
/* Linux native system calls enter here... */
.align 32
@@ -217,7 +218,6 @@ linux_sparc_syscall:
3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0]
ret_sys_call:
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3
- sra %o0, 0, %o0
mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2
sllx %g2, 32, %g2
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 869023abe5a4..cfbe53c17b0d 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -14,6 +14,7 @@
#include <linux/pagemap.h>
#include <linux/vmalloc.h>
#include <linux/kdebug.h>
+#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/log2.h>
@@ -62,6 +63,7 @@ extern unsigned long last_valid_pfn;
static pgd_t *srmmu_swapper_pg_dir;
const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
+EXPORT_SYMBOL(sparc32_cachetlb_ops);
#ifdef CONFIG_SMP
const struct sparc32_cachetlb_ops *local_ops;
diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c
index 3b3a360b429a..f5d506fdddad 100644
--- a/arch/sparc/mm/tsb.c
+++ b/arch/sparc/mm/tsb.c
@@ -273,7 +273,7 @@ void __init pgtable_cache_init(void)
prom_halt();
}
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < ARRAY_SIZE(tsb_cache_names); i++) {
unsigned long size = 8192 << i;
const char *name = tsb_cache_names[i];
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 940e50ebfafa..0af5250d914f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -444,6 +444,7 @@ config X86_INTEL_MID
bool "Intel MID platform support"
depends on X86_32
depends on X86_EXTENDED_PLATFORM
+ depends on X86_PLATFORM_DEVICES
depends on PCI
depends on PCI_GOANY
depends on X86_IO_APIC
@@ -1051,9 +1052,9 @@ config MICROCODE_INTEL
This options enables microcode patch loading support for Intel
processors.
- For latest news and information on obtaining all the required
- Intel ingredients for this driver, check:
- <http://www.urbanmyth.org/microcode/>.
+ For the current Intel microcode data package go to
+ <https://downloadcenter.intel.com> and search for
+ 'Linux Processor Microcode Data File'.
config MICROCODE_AMD
bool "AMD microcode loading support"
diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
index c026cca5602c..f3aaf231b4e5 100644
--- a/arch/x86/Kconfig.cpu
+++ b/arch/x86/Kconfig.cpu
@@ -341,10 +341,6 @@ config X86_USE_3DNOW
def_bool y
depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML
-config X86_OOSTORE
- def_bool y
- depends on (MWINCHIP3D || MWINCHIPC6) && MTRR
-
#
# P6_NOPs are a relatively minor optimization that require a family >=
# 6 processor, except that it is broken on certain VIA chips.
diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
index 0f3621ed1db6..321a52ccf63a 100644
--- a/arch/x86/Kconfig.debug
+++ b/arch/x86/Kconfig.debug
@@ -184,6 +184,7 @@ config HAVE_MMIOTRACE_SUPPORT
config X86_DECODER_SELFTEST
bool "x86 instruction decoder selftest"
depends on DEBUG_KERNEL && KPROBES
+ depends on !COMPILE_TEST
---help---
Perform x86 instruction decoder selftests at build time.
This option is useful for checking the sanity of x86 instruction
diff --git a/arch/x86/boot/compressed/aslr.c b/arch/x86/boot/compressed/aslr.c
index 90a21f430117..4dbf967da50d 100644
--- a/arch/x86/boot/compressed/aslr.c
+++ b/arch/x86/boot/compressed/aslr.c
@@ -111,7 +111,7 @@ struct mem_vector {
};
#define MEM_AVOID_MAX 5
-struct mem_vector mem_avoid[MEM_AVOID_MAX];
+static struct mem_vector mem_avoid[MEM_AVOID_MAX];
static bool mem_contains(struct mem_vector *region, struct mem_vector *item)
{
@@ -180,7 +180,7 @@ static void mem_avoid_init(unsigned long input, unsigned long input_size,
}
/* Does this memory vector overlap a known avoided area? */
-bool mem_avoid_overlap(struct mem_vector *img)
+static bool mem_avoid_overlap(struct mem_vector *img)
{
int i;
@@ -192,8 +192,9 @@ bool mem_avoid_overlap(struct mem_vector *img)
return false;
}
-unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET / CONFIG_PHYSICAL_ALIGN];
-unsigned long slot_max = 0;
+static unsigned long slots[CONFIG_RANDOMIZE_BASE_MAX_OFFSET /
+ CONFIG_PHYSICAL_ALIGN];
+static unsigned long slot_max;
static void slots_append(unsigned long addr)
{
diff --git a/arch/x86/include/asm/amd_nb.h b/arch/x86/include/asm/amd_nb.h
index a54ee1d054d9..aaac3b2fb746 100644
--- a/arch/x86/include/asm/amd_nb.h
+++ b/arch/x86/include/asm/amd_nb.h
@@ -19,7 +19,7 @@ extern int amd_cache_northbridges(void);
extern void amd_flush_garts(void);
extern int amd_numa_init(void);
extern int amd_get_subcaches(int);
-extern int amd_set_subcaches(int, int);
+extern int amd_set_subcaches(int, unsigned long);
struct amd_l3_cache {
unsigned indices;
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 04a48903b2eb..69bbb4845020 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -85,11 +85,7 @@
#else
# define smp_rmb() barrier()
#endif
-#ifdef CONFIG_X86_OOSTORE
-# define smp_wmb() wmb()
-#else
-# define smp_wmb() barrier()
-#endif
+#define smp_wmb() barrier()
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
#else /* !SMP */
@@ -100,7 +96,7 @@
#define set_mb(var, value) do { var = value; barrier(); } while (0)
#endif /* SMP */
-#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+#if defined(CONFIG_X86_PPRO_FENCE)
/*
* For either of these options x86 doesn't have a strong TSO memory
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 3b978c472d08..acd86c850414 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -132,6 +132,9 @@ extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
extern void efi_sync_low_kernel_mappings(void);
extern void efi_setup_page_tables(void);
extern void __init old_map_region(efi_memory_desc_t *md);
+extern void __init runtime_code_page_mkexec(void);
+extern void __init efi_runtime_mkexec(void);
+extern void __init efi_apply_memmap_quirks(void);
struct efi_setup_data {
u64 fw_vendor;
diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
index 34f69cb9350a..91d9c69a629e 100644
--- a/arch/x86/include/asm/io.h
+++ b/arch/x86/include/asm/io.h
@@ -237,7 +237,7 @@ memcpy_toio(volatile void __iomem *dst, const void *src, size_t count)
static inline void flush_write_buffers(void)
{
-#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)
+#if defined(CONFIG_X86_PPRO_FENCE)
asm volatile("lock; addl $0,0(%%esp)": : :"memory");
#endif
}
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index bbc8b12fa443..5ad38ad07890 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -445,10 +445,20 @@ static inline int pte_same(pte_t a, pte_t b)
return a.pte == b.pte;
}
+static inline int pteval_present(pteval_t pteval)
+{
+ /*
+ * Yes Linus, _PAGE_PROTNONE == _PAGE_NUMA. Expressing it this
+ * way clearly states that the intent is that protnone and numa
+ * hinting ptes are considered present for the purposes of
+ * pagetable operations like zapping, protection changes, gup etc.
+ */
+ return pteval & (_PAGE_PRESENT | _PAGE_PROTNONE | _PAGE_NUMA);
+}
+
static inline int pte_present(pte_t a)
{
- return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE |
- _PAGE_NUMA);
+ return pteval_present(pte_flags(a));
}
#define pte_accessible pte_accessible
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index bf156ded74b5..0f62f5482d91 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -26,10 +26,9 @@
# define LOCK_PTR_REG "D"
#endif
-#if defined(CONFIG_X86_32) && \
- (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE))
+#if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE))
/*
- * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
+ * On PPro SMP, we use a locked operation to unlock
* (PPro errata 66, 92)
*/
# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
index e6d90babc245..04905bfc508b 100644
--- a/arch/x86/include/asm/tlbflush.h
+++ b/arch/x86/include/asm/tlbflush.h
@@ -62,7 +62,7 @@ static inline void __flush_tlb_all(void)
static inline void __flush_tlb_one(unsigned long addr)
{
- count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr);
}
@@ -93,13 +93,13 @@ static inline void __flush_tlb_one(unsigned long addr)
*/
static inline void __flush_tlb_up(void)
{
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb();
}
static inline void flush_tlb_all(void)
{
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb_all();
}
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 57ae63cd6ee2..94605c0e9cee 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -66,6 +66,6 @@ extern void tsc_save_sched_clock_state(void);
extern void tsc_restore_sched_clock_state(void);
/* MSR based TSC calibration for Intel Atom SoC platforms */
-int try_msr_calibrate_tsc(unsigned long *fast_calibrate);
+unsigned long try_msr_calibrate_tsc(void);
#endif /* _ASM_X86_TSC_H */
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 787e1bb5aafc..3e276eb23d1b 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -52,8 +52,7 @@ extern unsigned long set_phys_range_identity(unsigned long pfn_s,
extern int m2p_add_override(unsigned long mfn, struct page *page,
struct gnttab_map_grant_ref *kmap_op);
extern int m2p_remove_override(struct page *page,
- struct gnttab_map_grant_ref *kmap_op,
- unsigned long mfn);
+ struct gnttab_map_grant_ref *kmap_op);
extern struct page *m2p_find_override(unsigned long mfn);
extern unsigned long m2p_find_override_pfn(unsigned long mfn, unsigned long pfn);
@@ -122,7 +121,7 @@ static inline unsigned long mfn_to_pfn(unsigned long mfn)
pfn = m2p_find_override_pfn(mfn, ~0);
}
- /*
+ /*
* pfn is ~0 if there are no entries in the m2p for mfn or if the
* entry doesn't map back to the mfn and m2p_override doesn't have a
* valid entry for it.
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index 59554dca96ec..dec8de4e1663 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -179,7 +179,7 @@ int amd_get_subcaches(int cpu)
return (mask >> (4 * cuid)) & 0xf;
}
-int amd_set_subcaches(int cpu, int mask)
+int amd_set_subcaches(int cpu, unsigned long mask)
{
static unsigned int reset, ban;
struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index fd972a3e4cbb..9fa8aa051f54 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -18,7 +18,6 @@
#include <linux/pci_ids.h>
#include <linux/pci.h>
#include <linux/bitops.h>
-#include <linux/ioport.h>
#include <linux/suspend.h>
#include <asm/e820.h>
#include <asm/io.h>
@@ -54,18 +53,6 @@ int fallback_aper_force __initdata;
int fix_aperture __initdata = 1;
-static struct resource gart_resource = {
- .name = "GART",
- .flags = IORESOURCE_MEM,
-};
-
-static void __init insert_aperture_resource(u32 aper_base, u32 aper_size)
-{
- gart_resource.start = aper_base;
- gart_resource.end = aper_base + aper_size - 1;
- insert_resource(&iomem_resource, &gart_resource);
-}
-
/* This code runs before the PCI subsystem is initialized, so just
access the northbridge directly. */
@@ -96,7 +83,6 @@ static u32 __init allocate_aperture(void)
memblock_reserve(addr, aper_size);
printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
aper_size >> 10, addr);
- insert_aperture_resource((u32)addr, aper_size);
register_nosave_region(addr >> PAGE_SHIFT,
(addr+aper_size) >> PAGE_SHIFT);
@@ -444,12 +430,8 @@ int __init gart_iommu_hole_init(void)
out:
if (!fix && !fallback_aper_force) {
- if (last_aper_base) {
- unsigned long n = (32 * 1024 * 1024) << last_aper_order;
-
- insert_aperture_resource((u32)last_aper_base, n);
+ if (last_aper_base)
return 1;
- }
return 0;
}
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index d3153e281d72..c67ffa686064 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -767,10 +767,7 @@ static unsigned int amd_size_cache(struct cpuinfo_x86 *c, unsigned int size)
static void cpu_set_tlb_flushall_shift(struct cpuinfo_x86 *c)
{
- tlb_flushall_shift = 5;
-
- if (c->x86 <= 0x11)
- tlb_flushall_shift = 4;
+ tlb_flushall_shift = 6;
}
static void cpu_detect_tlb_amd(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c
index 8779edab684e..d8fba5c15fbd 100644
--- a/arch/x86/kernel/cpu/centaur.c
+++ b/arch/x86/kernel/cpu/centaur.c
@@ -8,236 +8,6 @@
#include "cpu.h"
-#ifdef CONFIG_X86_OOSTORE
-
-static u32 power2(u32 x)
-{
- u32 s = 1;
-
- while (s <= x)
- s <<= 1;
-
- return s >>= 1;
-}
-
-
-/*
- * Set up an actual MCR
- */
-static void centaur_mcr_insert(int reg, u32 base, u32 size, int key)
-{
- u32 lo, hi;
-
- hi = base & ~0xFFF;
- lo = ~(size-1); /* Size is a power of 2 so this makes a mask */
- lo &= ~0xFFF; /* Remove the ctrl value bits */
- lo |= key; /* Attribute we wish to set */
- wrmsr(reg+MSR_IDT_MCR0, lo, hi);
- mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */
-}
-
-/*
- * Figure what we can cover with MCR's
- *
- * Shortcut: We know you can't put 4Gig of RAM on a winchip
- */
-static u32 ramtop(void)
-{
- u32 clip = 0xFFFFFFFFUL;
- u32 top = 0;
- int i;
-
- for (i = 0; i < e820.nr_map; i++) {
- unsigned long start, end;
-
- if (e820.map[i].addr > 0xFFFFFFFFUL)
- continue;
- /*
- * Don't MCR over reserved space. Ignore the ISA hole
- * we frob around that catastrophe already
- */
- if (e820.map[i].type == E820_RESERVED) {
- if (e820.map[i].addr >= 0x100000UL &&
- e820.map[i].addr < clip)
- clip = e820.map[i].addr;
- continue;
- }
- start = e820.map[i].addr;
- end = e820.map[i].addr + e820.map[i].size;
- if (start >= end)
- continue;
- if (end > top)
- top = end;
- }
- /*
- * Everything below 'top' should be RAM except for the ISA hole.
- * Because of the limited MCR's we want to map NV/ACPI into our
- * MCR range for gunk in RAM
- *
- * Clip might cause us to MCR insufficient RAM but that is an
- * acceptable failure mode and should only bite obscure boxes with
- * a VESA hole at 15Mb
- *
- * The second case Clip sometimes kicks in is when the EBDA is marked
- * as reserved. Again we fail safe with reasonable results
- */
- if (top > clip)
- top = clip;
-
- return top;
-}
-
-/*
- * Compute a set of MCR's to give maximum coverage
- */
-static int centaur_mcr_compute(int nr, int key)
-{
- u32 mem = ramtop();
- u32 root = power2(mem);
- u32 base = root;
- u32 top = root;
- u32 floor = 0;
- int ct = 0;
-
- while (ct < nr) {
- u32 fspace = 0;
- u32 high;
- u32 low;
-
- /*
- * Find the largest block we will fill going upwards
- */
- high = power2(mem-top);
-
- /*
- * Find the largest block we will fill going downwards
- */
- low = base/2;
-
- /*
- * Don't fill below 1Mb going downwards as there
- * is an ISA hole in the way.
- */
- if (base <= 1024*1024)
- low = 0;
-
- /*
- * See how much space we could cover by filling below
- * the ISA hole
- */
-
- if (floor == 0)
- fspace = 512*1024;
- else if (floor == 512*1024)
- fspace = 128*1024;
-
- /* And forget ROM space */
-
- /*
- * Now install the largest coverage we get
- */
- if (fspace > high && fspace > low) {
- centaur_mcr_insert(ct, floor, fspace, key);
- floor += fspace;
- } else if (high > low) {
- centaur_mcr_insert(ct, top, high, key);
- top += high;
- } else if (low > 0) {
- base -= low;
- centaur_mcr_insert(ct, base, low, key);
- } else
- break;
- ct++;
- }
- /*
- * We loaded ct values. We now need to set the mask. The caller
- * must do this bit.
- */
- return ct;
-}
-
-static void centaur_create_optimal_mcr(void)
-{
- int used;
- int i;
-
- /*
- * Allocate up to 6 mcrs to mark as much of ram as possible
- * as write combining and weak write ordered.
- *
- * To experiment with: Linux never uses stack operations for
- * mmio spaces so we could globally enable stack operation wc
- *
- * Load the registers with type 31 - full write combining, all
- * writes weakly ordered.
- */
- used = centaur_mcr_compute(6, 31);
-
- /*
- * Wipe unused MCRs
- */
- for (i = used; i < 8; i++)
- wrmsr(MSR_IDT_MCR0+i, 0, 0);
-}
-
-static void winchip2_create_optimal_mcr(void)
-{
- u32 lo, hi;
- int used;
- int i;
-
- /*
- * Allocate up to 6 mcrs to mark as much of ram as possible
- * as write combining, weak store ordered.
- *
- * Load the registers with type 25
- * 8 - weak write ordering
- * 16 - weak read ordering
- * 1 - write combining
- */
- used = centaur_mcr_compute(6, 25);
-
- /*
- * Mark the registers we are using.
- */
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- for (i = 0; i < used; i++)
- lo |= 1<<(9+i);
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-
- /*
- * Wipe unused MCRs
- */
-
- for (i = used; i < 8; i++)
- wrmsr(MSR_IDT_MCR0+i, 0, 0);
-}
-
-/*
- * Handle the MCR key on the Winchip 2.
- */
-static void winchip2_unprotect_mcr(void)
-{
- u32 lo, hi;
- u32 key;
-
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- lo &= ~0x1C0; /* blank bits 8-6 */
- key = (lo>>17) & 7;
- lo |= key<<6; /* replace with unlock key */
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-}
-
-static void winchip2_protect_mcr(void)
-{
- u32 lo, hi;
-
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- lo &= ~0x1C0; /* blank bits 8-6 */
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
-}
-#endif /* CONFIG_X86_OOSTORE */
-
#define ACE_PRESENT (1 << 6)
#define ACE_ENABLED (1 << 7)
#define ACE_FCR (1 << 28) /* MSR_VIA_FCR */
@@ -362,20 +132,6 @@ static void init_centaur(struct cpuinfo_x86 *c)
fcr_clr = DPDC;
printk(KERN_NOTICE "Disabling bugged TSC.\n");
clear_cpu_cap(c, X86_FEATURE_TSC);
-#ifdef CONFIG_X86_OOSTORE
- centaur_create_optimal_mcr();
- /*
- * Enable:
- * write combining on non-stack, non-string
- * write combining on string, all types
- * weak write ordering
- *
- * The C6 original lacks weak read order
- *
- * Note 0x120 is write only on Winchip 1
- */
- wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0);
-#endif
break;
case 8:
switch (c->x86_mask) {
@@ -392,40 +148,12 @@ static void init_centaur(struct cpuinfo_x86 *c)
fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
E2MMX|EAMD3D;
fcr_clr = DPDC;
-#ifdef CONFIG_X86_OOSTORE
- winchip2_unprotect_mcr();
- winchip2_create_optimal_mcr();
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- /*
- * Enable:
- * write combining on non-stack, non-string
- * write combining on string, all types
- * weak write ordering
- */
- lo |= 31;
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
- winchip2_protect_mcr();
-#endif
break;
case 9:
name = "3";
fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK|
E2MMX|EAMD3D;
fcr_clr = DPDC;
-#ifdef CONFIG_X86_OOSTORE
- winchip2_unprotect_mcr();
- winchip2_create_optimal_mcr();
- rdmsr(MSR_IDT_MCR_CTRL, lo, hi);
- /*
- * Enable:
- * write combining on non-stack, non-string
- * write combining on string, all types
- * weak write ordering
- */
- lo |= 31;
- wrmsr(MSR_IDT_MCR_CTRL, lo, hi);
- winchip2_protect_mcr();
-#endif
break;
default:
name = "??";
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 24b6fd10625a..8e28bf2fc3ef 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -284,8 +284,13 @@ static __always_inline void setup_smap(struct cpuinfo_x86 *c)
raw_local_save_flags(eflags);
BUG_ON(eflags & X86_EFLAGS_AC);
- if (cpu_has(c, X86_FEATURE_SMAP))
+ if (cpu_has(c, X86_FEATURE_SMAP)) {
+#ifdef CONFIG_X86_SMAP
set_in_cr4(X86_CR4_SMAP);
+#else
+ clear_in_cr4(X86_CR4_SMAP);
+#endif
+ }
}
/*
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 3db61c644e44..5cd9bfabd645 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -640,21 +640,17 @@ static void intel_tlb_flushall_shift_set(struct cpuinfo_x86 *c)
case 0x61d: /* six-core 45 nm xeon "Dunnington" */
tlb_flushall_shift = -1;
break;
+ case 0x63a: /* Ivybridge */
+ tlb_flushall_shift = 2;
+ break;
case 0x61a: /* 45 nm nehalem, "Bloomfield" */
case 0x61e: /* 45 nm nehalem, "Lynnfield" */
case 0x625: /* 32 nm nehalem, "Clarkdale" */
case 0x62c: /* 32 nm nehalem, "Gulftown" */
case 0x62e: /* 45 nm nehalem-ex, "Beckton" */
case 0x62f: /* 32 nm Xeon E7 */
- tlb_flushall_shift = 6;
- break;
case 0x62a: /* SandyBridge */
case 0x62d: /* SandyBridge, "Romely-EP" */
- tlb_flushall_shift = 5;
- break;
- case 0x63a: /* Ivybridge */
- tlb_flushall_shift = 1;
- break;
default:
tlb_flushall_shift = 6;
}
diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c
index 8384c0fa206f..617a9e284245 100644
--- a/arch/x86/kernel/cpu/microcode/amd_early.c
+++ b/arch/x86/kernel/cpu/microcode/amd_early.c
@@ -285,6 +285,15 @@ static void __init collect_cpu_sig_on_bsp(void *arg)
uci->cpu_sig.sig = cpuid_eax(0x00000001);
}
+
+static void __init get_bsp_sig(void)
+{
+ unsigned int bsp = boot_cpu_data.cpu_index;
+ struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
+
+ if (!uci->cpu_sig.sig)
+ smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
+}
#else
void load_ucode_amd_ap(void)
{
@@ -337,31 +346,37 @@ void load_ucode_amd_ap(void)
int __init save_microcode_in_initrd_amd(void)
{
+ unsigned long cont;
enum ucode_state ret;
u32 eax;
-#ifdef CONFIG_X86_32
- unsigned int bsp = boot_cpu_data.cpu_index;
- struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
-
- if (!uci->cpu_sig.sig)
- smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
+ if (!container)
+ return -EINVAL;
+#ifdef CONFIG_X86_32
+ get_bsp_sig();
+ cont = (unsigned long)container;
+#else
/*
- * Take into account the fact that the ramdisk might get relocated
- * and therefore we need to recompute the container's position in
- * virtual memory space.
+ * We need the physical address of the container for both bitness since
+ * boot_params.hdr.ramdisk_image is a physical address.
*/
- container = (u8 *)(__va((u32)relocated_ramdisk) +
- ((u32)container - boot_params.hdr.ramdisk_image));
+ cont = __pa(container);
#endif
+
+ /*
+ * Take into account the fact that the ramdisk might get relocated and
+ * therefore we need to recompute the container's position in virtual
+ * memory space.
+ */
+ if (relocated_ramdisk)
+ container = (u8 *)(__va(relocated_ramdisk) +
+ (cont - boot_params.hdr.ramdisk_image));
+
if (ucode_new_rev)
pr_info("microcode: updated early to new patch_level=0x%08x\n",
ucode_new_rev);
- if (!container)
- return -EINVAL;
-
eax = cpuid_eax(0x00000001);
eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
index ce2d0a2c3e4f..0e25a1bc5ab5 100644
--- a/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/arch/x86/kernel/cpu/mtrr/generic.c
@@ -683,7 +683,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
}
/* Flush all TLBs via a mov %cr3, %reg; mov %reg, %cr3 */
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb();
/* Save MTRR state */
@@ -697,7 +697,7 @@ static void prepare_set(void) __acquires(set_atomicity_lock)
static void post_set(void) __releases(set_atomicity_lock)
{
/* Flush TLBs (no need to flush caches - they are disabled) */
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
__flush_tlb();
/* Intel (P6) standard MTRRs */
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index b88645191fe5..79f9f848bee4 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1192,6 +1192,9 @@ static void x86_pmu_del(struct perf_event *event, int flags)
for (i = 0; i < cpuc->n_events; i++) {
if (event == cpuc->event_list[i]) {
+ if (i >= cpuc->n_events - cpuc->n_added)
+ --cpuc->n_added;
+
if (x86_pmu.put_event_constraints)
x86_pmu.put_event_constraints(cpuc, event);
@@ -1521,6 +1524,8 @@ static int __init init_hw_perf_events(void)
pr_cont("%s PMU driver.\n", x86_pmu.name);
+ x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
+
for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
quirk->func();
@@ -1534,7 +1539,6 @@ static int __init init_hw_perf_events(void)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
0, x86_pmu.num_counters, 0, 0);
- x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
x86_pmu_format_group.attrs = x86_pmu.format_attrs;
if (x86_pmu.event_attrs)
@@ -1820,9 +1824,12 @@ static ssize_t set_attr_rdpmc(struct device *cdev,
if (ret)
return ret;
+ if (x86_pmu.attr_rdpmc_broken)
+ return -ENOTSUPP;
+
if (!!val != !!x86_pmu.attr_rdpmc) {
x86_pmu.attr_rdpmc = !!val;
- smp_call_function(change_rdpmc, (void *)val, 1);
+ on_each_cpu(change_rdpmc, (void *)val, 1);
}
return count;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index c1a861829d81..4972c244d0bc 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -409,6 +409,7 @@ struct x86_pmu {
/*
* sysfs attrs
*/
+ int attr_rdpmc_broken;
int attr_rdpmc;
struct attribute **format_attrs;
struct attribute **event_attrs;
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 0fa4f242f050..aa333d966886 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1361,10 +1361,8 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
intel_pmu_disable_all();
handled = intel_pmu_drain_bts_buffer();
status = intel_pmu_get_status();
- if (!status) {
- intel_pmu_enable_all(0);
- return handled;
- }
+ if (!status)
+ goto done;
loops = 0;
again:
@@ -2310,10 +2308,7 @@ __init int intel_pmu_init(void)
if (version > 1)
x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
- /*
- * v2 and above have a perf capabilities MSR
- */
- if (version > 1) {
+ if (boot_cpu_has(X86_FEATURE_PDCM)) {
u64 capabilities;
rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 29c248799ced..047f540cf3f7 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -501,8 +501,11 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
@@ -1178,10 +1181,15 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x0334, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4334, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x0534, 0xffff, 0x4),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4534, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x0934, 0xffff, 0x4),
- SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
+ SNBEP_CBO_EVENT_EXTRA_REG(0x4934, 0xffff, 0xc),
SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x10),
SNBEP_CBO_EVENT_EXTRA_REG(0x2135, 0xffff, 0x10),
@@ -3326,6 +3334,8 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
if (!pmus)
return -ENOMEM;
+ type->pmus = pmus;
+
type->unconstrainted = (struct event_constraint)
__EVENT_CONSTRAINT(0, (1ULL << type->num_counters) - 1,
0, type->num_counters, 0, 0);
@@ -3361,7 +3371,6 @@ static int __init uncore_type_init(struct intel_uncore_type *type)
}
type->pmu_group = &uncore_pmu_attr_group;
- type->pmus = pmus;
return 0;
fail:
uncore_type_exit(type);
diff --git a/arch/x86/kernel/cpu/perf_event_p6.c b/arch/x86/kernel/cpu/perf_event_p6.c
index b1e2fe115323..7c1a0c07b607 100644
--- a/arch/x86/kernel/cpu/perf_event_p6.c
+++ b/arch/x86/kernel/cpu/perf_event_p6.c
@@ -231,31 +231,49 @@ static __initconst const struct x86_pmu p6_pmu = {
};
+static __init void p6_pmu_rdpmc_quirk(void)
+{
+ if (boot_cpu_data.x86_mask < 9) {
+ /*
+ * PPro erratum 26; fixed in stepping 9 and above.
+ */
+ pr_warn("Userspace RDPMC support disabled due to a CPU erratum\n");
+ x86_pmu.attr_rdpmc_broken = 1;
+ x86_pmu.attr_rdpmc = 0;
+ }
+}
+
__init int p6_pmu_init(void)
{
+ x86_pmu = p6_pmu;
+
switch (boot_cpu_data.x86_model) {
- case 1:
- case 3: /* Pentium Pro */
- case 5:
- case 6: /* Pentium II */
- case 7:
- case 8:
- case 11: /* Pentium III */
- case 9:
- case 13:
- /* Pentium M */
+ case 1: /* Pentium Pro */
+ x86_add_quirk(p6_pmu_rdpmc_quirk);
+ break;
+
+ case 3: /* Pentium II - Klamath */
+ case 5: /* Pentium II - Deschutes */
+ case 6: /* Pentium II - Mendocino */
break;
+
+ case 7: /* Pentium III - Katmai */
+ case 8: /* Pentium III - Coppermine */
+ case 10: /* Pentium III Xeon */
+ case 11: /* Pentium III - Tualatin */
+ break;
+
+ case 9: /* Pentium M - Banias */
+ case 13: /* Pentium M - Dothan */
+ break;
+
default:
- pr_cont("unsupported p6 CPU model %d ",
- boot_cpu_data.x86_model);
+ pr_cont("unsupported p6 CPU model %d ", boot_cpu_data.x86_model);
return -ENODEV;
}
- x86_pmu = p6_pmu;
-
memcpy(hw_cache_event_ids, p6_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
-
return 0;
}
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index d4bdd253fea7..e6253195a301 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -77,8 +77,7 @@ within(unsigned long addr, unsigned long start, unsigned long end)
return addr >= start && addr < end;
}
-static int
-do_ftrace_mod_code(unsigned long ip, const void *new_code)
+static unsigned long text_ip_addr(unsigned long ip)
{
/*
* On x86_64, kernel text mappings are mapped read-only with
@@ -91,7 +90,7 @@ do_ftrace_mod_code(unsigned long ip, const void *new_code)
if (within(ip, (unsigned long)_text, (unsigned long)_etext))
ip = (unsigned long)__va(__pa_symbol(ip));
- return probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE);
+ return ip;
}
static const unsigned char *ftrace_nop_replace(void)
@@ -123,8 +122,10 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0)
return -EINVAL;
+ ip = text_ip_addr(ip);
+
/* replace the text with the new text */
- if (do_ftrace_mod_code(ip, new_code))
+ if (probe_kernel_write((void *)ip, new_code, MCOUNT_INSN_SIZE))
return -EPERM;
sync_core();
@@ -221,37 +222,51 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
return -EINVAL;
}
-int ftrace_update_ftrace_func(ftrace_func_t func)
+static unsigned long ftrace_update_func;
+
+static int update_ftrace_func(unsigned long ip, void *new)
{
- unsigned long ip = (unsigned long)(&ftrace_call);
- unsigned char old[MCOUNT_INSN_SIZE], *new;
+ unsigned char old[MCOUNT_INSN_SIZE];
int ret;
- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
- new = ftrace_call_replace(ip, (unsigned long)func);
+ memcpy(old, (void *)ip, MCOUNT_INSN_SIZE);
+
+ ftrace_update_func = ip;
+ /* Make sure the breakpoints see the ftrace_update_func update */
+ smp_wmb();
/* See comment above by declaration of modifying_ftrace_code */
atomic_inc(&modifying_ftrace_code);
ret = ftrace_modify_code(ip, old, new);
+ atomic_dec(&modifying_ftrace_code);
+
+ return ret;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned long ip = (unsigned long)(&ftrace_call);
+ unsigned char *new;
+ int ret;
+
+ new = ftrace_call_replace(ip, (unsigned long)func);
+ ret = update_ftrace_func(ip, new);
+
/* Also update the regs callback function */
if (!ret) {
ip = (unsigned long)(&ftrace_regs_call);
- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
new = ftrace_call_replace(ip, (unsigned long)func);
- ret = ftrace_modify_code(ip, old, new);
+ ret = update_ftrace_func(ip, new);
}
- atomic_dec(&modifying_ftrace_code);
-
return ret;
}
static int is_ftrace_caller(unsigned long ip)
{
- if (ip == (unsigned long)(&ftrace_call) ||
- ip == (unsigned long)(&ftrace_regs_call))
+ if (ip == ftrace_update_func)
return 1;
return 0;
@@ -677,45 +692,41 @@ int __init ftrace_dyn_arch_init(void *data)
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
-static int ftrace_mod_jmp(unsigned long ip,
- int old_offset, int new_offset)
+static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
{
- unsigned char code[MCOUNT_INSN_SIZE];
+ static union ftrace_code_union calc;
- if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
- return -EFAULT;
+ /* Jmp not a call (ignore the .e8) */
+ calc.e8 = 0xe9;
+ calc.offset = ftrace_calc_offset(ip + MCOUNT_INSN_SIZE, addr);
- if (code[0] != 0xe9 || old_offset != *(int *)(&code[1]))
- return -EINVAL;
+ /*
+ * ftrace external locks synchronize the access to the static variable.
+ */
+ return calc.code;
+}
- *(int *)(&code[1]) = new_offset;
+static int ftrace_mod_jmp(unsigned long ip, void *func)
+{
+ unsigned char *new;
- if (do_ftrace_mod_code(ip, &code))
- return -EPERM;
+ new = ftrace_jmp_replace(ip, (unsigned long)func);
- return 0;
+ return update_ftrace_func(ip, new);
}
int ftrace_enable_ftrace_graph_caller(void)
{
unsigned long ip = (unsigned long)(&ftrace_graph_call);
- int old_offset, new_offset;
- old_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
- new_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
-
- return ftrace_mod_jmp(ip, old_offset, new_offset);
+ return ftrace_mod_jmp(ip, &ftrace_graph_caller);
}
int ftrace_disable_ftrace_graph_caller(void)
{
unsigned long ip = (unsigned long)(&ftrace_graph_call);
- int old_offset, new_offset;
-
- old_offset = (unsigned long)(&ftrace_graph_caller) - (ip + MCOUNT_INSN_SIZE);
- new_offset = (unsigned long)(&ftrace_stub) - (ip + MCOUNT_INSN_SIZE);
- return ftrace_mod_jmp(ip, old_offset, new_offset);
+ return ftrace_mod_jmp(ip, &ftrace_stub);
}
#endif /* !CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 81ba27679f18..f36bd42d6f0c 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -544,6 +544,10 @@ ENDPROC(early_idt_handlers)
/* This is global to keep gas from relaxing the jumps */
ENTRY(early_idt_handler)
cld
+
+ cmpl $2,(%esp) # X86_TRAP_NMI
+ je is_nmi # Ignore NMI
+
cmpl $2,%ss:early_recursion_flag
je hlt_loop
incl %ss:early_recursion_flag
@@ -594,8 +598,9 @@ ex_entry:
pop %edx
pop %ecx
pop %eax
- addl $8,%esp /* drop vector number and error code */
decl %ss:early_recursion_flag
+is_nmi:
+ addl $8,%esp /* drop vector number and error code */
iret
ENDPROC(early_idt_handler)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index e1aabdb314c8..a468c0a65c42 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -343,6 +343,9 @@ early_idt_handlers:
ENTRY(early_idt_handler)
cld
+ cmpl $2,(%rsp) # X86_TRAP_NMI
+ je is_nmi # Ignore NMI
+
cmpl $2,early_recursion_flag(%rip)
jz 1f
incl early_recursion_flag(%rip)
@@ -405,8 +408,9 @@ ENTRY(early_idt_handler)
popq %rdx
popq %rcx
popq %rax
- addq $16,%rsp # drop vector number and error code
decl early_recursion_flag(%rip)
+is_nmi:
+ addq $16,%rsp # drop vector number and error code
INTERRUPT_RETURN
ENDPROC(early_idt_handler)
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
index e8368c6dd2a2..d5dd80814419 100644
--- a/arch/x86/kernel/i387.c
+++ b/arch/x86/kernel/i387.c
@@ -86,10 +86,19 @@ EXPORT_SYMBOL(__kernel_fpu_begin);
void __kernel_fpu_end(void)
{
- if (use_eager_fpu())
- math_state_restore();
- else
+ if (use_eager_fpu()) {
+ /*
+ * For eager fpu, most the time, tsk_used_math() is true.
+ * Restore the user math as we are done with the kernel usage.
+ * At few instances during thread exit, signal handling etc,
+ * tsk_used_math() is false. Those few places will take proper
+ * actions, so we don't need to restore the math here.
+ */
+ if (likely(tsk_used_math(current)))
+ math_state_restore();
+ } else {
stts();
+ }
}
EXPORT_SYMBOL(__kernel_fpu_end);
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index dbb60878b744..d99f31d9a750 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -266,6 +266,14 @@ __visible void smp_trace_x86_platform_ipi(struct pt_regs *regs)
EXPORT_SYMBOL_GPL(vector_used_by_percpu_irq);
#ifdef CONFIG_HOTPLUG_CPU
+
+/* These two declarations are only used in check_irq_vectors_for_cpu_disable()
+ * below, which is protected by stop_machine(). Putting them on the stack
+ * results in a stack frame overflow. Dynamically allocating could result in a
+ * failure so declare these two cpumasks as global.
+ */
+static struct cpumask affinity_new, online_new;
+
/*
* This cpu is going to be removed and its vectors migrated to the remaining
* online cpus. Check to see if there are enough vectors in the remaining cpus.
@@ -277,7 +285,6 @@ int check_irq_vectors_for_cpu_disable(void)
unsigned int this_cpu, vector, this_count, count;
struct irq_desc *desc;
struct irq_data *data;
- struct cpumask affinity_new, online_new;
this_cpu = smp_processor_id();
cpumask_copy(&online_new, cpu_online_mask);
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 4eabc160696f..679cef0791cd 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -279,5 +279,7 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_SYMBOL(node_data);
VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
#endif
+ vmcoreinfo_append_str("KERNELOFFSET=%lx\n",
+ (unsigned long)&_text - __START_KERNEL);
}
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 872079a67e4d..f7d0672481fd 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -100,8 +100,10 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
flag |= __GFP_ZERO;
again:
page = NULL;
- if (!(flag & GFP_ATOMIC))
+ /* CMA can be used only in the context which permits sleeping */
+ if (flag & __GFP_WAIT)
page = dma_alloc_from_contiguous(dev, count, get_order(size));
+ /* fallback */
if (!page)
page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
if (!page)
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 04ee1e2e4c02..ff898bbf579d 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -529,7 +529,7 @@ static void quirk_amd_nb_node(struct pci_dev *dev)
return;
pci_read_config_dword(nb_ht, 0x60, &val);
- node = val & 7;
+ node = pcibus_to_node(dev->bus) | (val & 7);
/*
* Some hardware may return an invalid node ID,
* so check it first:
@@ -571,3 +571,40 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F5,
quirk_amd_nb_node);
#endif
+
+#ifdef CONFIG_PCI
+/*
+ * Processor does not ensure DRAM scrub read/write sequence
+ * is atomic wrt accesses to CC6 save state area. Therefore
+ * if a concurrent scrub read/write access is to same address
+ * the entry may appear as if it is not written. This quirk
+ * applies to Fam16h models 00h-0Fh
+ *
+ * See "Revision Guide" for AMD F16h models 00h-0fh,
+ * document 51810 rev. 3.04, Nov 2013
+ */
+static void amd_disable_seq_and_redirect_scrub(struct pci_dev *dev)
+{
+ u32 val;
+
+ /*
+ * Suggested workaround:
+ * set D18F3x58[4:0] = 00h and set D18F3x5C[0] = 0b
+ */
+ pci_read_config_dword(dev, 0x58, &val);
+ if (val & 0x1F) {
+ val &= ~(0x1F);
+ pci_write_config_dword(dev, 0x58, val);
+ }
+
+ pci_read_config_dword(dev, 0x5C, &val);
+ if (val & BIT(0)) {
+ val &= ~BIT(0);
+ pci_write_config_dword(dev, 0x5c, val);
+ }
+}
+
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3,
+ amd_disable_seq_and_redirect_scrub);
+
+#endif
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 06853e670354..ce72964b2f46 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1239,14 +1239,8 @@ void __init setup_arch(char **cmdline_p)
register_refined_jiffies(CLOCK_TICK_RATE);
#ifdef CONFIG_EFI
- /* Once setup is done above, unmap the EFI memory map on
- * mismatched firmware/kernel archtectures since there is no
- * support for runtime services.
- */
- if (efi_enabled(EFI_BOOT) && !efi_is_native()) {
- pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
- efi_unmap_memmap();
- }
+ if (efi_enabled(EFI_BOOT))
+ efi_apply_memmap_quirks();
#endif
}
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 19e5adb49a27..cfbe99f88830 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -209,7 +209,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
* dance when its actually needed.
*/
- preempt_disable();
+ preempt_disable_notrace();
data = this_cpu_read(cyc2ns.head);
tail = this_cpu_read(cyc2ns.tail);
@@ -229,7 +229,7 @@ static inline unsigned long long cycles_2_ns(unsigned long long cyc)
if (!--data->__count)
this_cpu_write(cyc2ns.tail, data);
}
- preempt_enable();
+ preempt_enable_notrace();
return ns;
}
@@ -653,13 +653,10 @@ unsigned long native_calibrate_tsc(void)
/* Calibrate TSC using MSR for Intel Atom SoCs */
local_irq_save(flags);
- i = try_msr_calibrate_tsc(&fast_calibrate);
+ fast_calibrate = try_msr_calibrate_tsc();
local_irq_restore(flags);
- if (i >= 0) {
- if (i == 0)
- pr_warn("Fast TSC calibration using MSR failed\n");
+ if (fast_calibrate)
return fast_calibrate;
- }
local_irq_save(flags);
fast_calibrate = quick_pit_calibrate();
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 8b5434f4389f..92ae6acac8a7 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -53,7 +53,7 @@ static struct freq_desc freq_desc_tables[] = {
/* TNG */
{ 6, 0x4a, 1, { 0, FREQ_100, FREQ_133, 0, 0, 0, 0, 0 } },
/* VLV2 */
- { 6, 0x37, 1, { 0, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } },
+ { 6, 0x37, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_166, 0, 0, 0, 0 } },
/* ANN */
{ 6, 0x5a, 1, { FREQ_83, FREQ_100, FREQ_133, FREQ_100, 0, 0, 0, 0 } },
};
@@ -77,21 +77,18 @@ static int match_cpu(u8 family, u8 model)
/*
* Do MSR calibration only for known/supported CPUs.
- * Return values:
- * -1: CPU is unknown/unsupported for MSR based calibration
- * 0: CPU is known/supported, but calibration failed
- * 1: CPU is known/supported, and calibration succeeded
+ *
+ * Returns the calibration value or 0 if MSR calibration failed.
*/
-int try_msr_calibrate_tsc(unsigned long *fast_calibrate)
+unsigned long try_msr_calibrate_tsc(void)
{
- int cpu_index;
u32 lo, hi, ratio, freq_id, freq;
+ unsigned long res;
+ int cpu_index;
cpu_index = match_cpu(boot_cpu_data.x86, boot_cpu_data.x86_model);
if (cpu_index < 0)
- return -1;
-
- *fast_calibrate = 0;
+ return 0;
if (freq_desc_tables[cpu_index].msr_plat) {
rdmsr(MSR_PLATFORM_INFO, lo, hi);
@@ -103,7 +100,7 @@ int try_msr_calibrate_tsc(unsigned long *fast_calibrate)
pr_info("Maximum core-clock to bus-clock ratio: 0x%x\n", ratio);
if (!ratio)
- return 0;
+ goto fail;
/* Get FSB FREQ ID */
rdmsr(MSR_FSB_FREQ, lo, hi);
@@ -112,16 +109,19 @@ int try_msr_calibrate_tsc(unsigned long *fast_calibrate)
pr_info("Resolved frequency ID: %u, frequency: %u KHz\n",
freq_id, freq);
if (!freq)
- return 0;
+ goto fail;
/* TSC frequency = maximum resolved freq * maximum resolved bus ratio */
- *fast_calibrate = freq * ratio;
- pr_info("TSC runs at %lu KHz\n", *fast_calibrate);
+ res = freq * ratio;
+ pr_info("TSC runs at %lu KHz\n", res);
#ifdef CONFIG_X86_LOCAL_APIC
lapic_timer_frequency = (freq * 1000) / HZ;
pr_info("lapic_timer_frequency = %d\n", lapic_timer_frequency);
#endif
+ return res;
- return 1;
+fail:
+ pr_warn("Fast TSC calibration using MSR failed\n");
+ return 0;
}
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index e50425d0f5f7..9b531351a587 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2672,6 +2672,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t v, int write,
break;
}
+ drop_large_spte(vcpu, iterator.sptep);
if (!is_shadow_present_pte(*iterator.sptep)) {
u64 base_addr = iterator.addr;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e81df8fce027..2de1bc09a8d4 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3002,10 +3002,8 @@ static int cr8_write_interception(struct vcpu_svm *svm)
u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
/* instruction emulation calls kvm_set_cr8() */
r = cr_interception(svm);
- if (irqchip_in_kernel(svm->vcpu.kvm)) {
- clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
+ if (irqchip_in_kernel(svm->vcpu.kvm))
return r;
- }
if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
return r;
kvm_run->exit_reason = KVM_EXIT_SET_TPR;
@@ -3567,6 +3565,8 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK))
return;
+ clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
+
if (irr == -1)
return;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a06f101ef64b..392752834751 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6688,7 +6688,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
else if (is_page_fault(intr_info))
return enable_ept;
else if (is_no_device(intr_info) &&
- !(nested_read_cr0(vmcs12) & X86_CR0_TS))
+ !(vmcs12->guest_cr0 & X86_CR0_TS))
return 0;
return vmcs12->exception_bitmap &
(1u << (intr_info & INTR_INFO_VECTOR_MASK));
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 39c28f09dfd5..2b8578432d5b 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6186,7 +6186,7 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
frag->len -= len;
}
- if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
+ if (vcpu->mmio_cur_fragment >= vcpu->mmio_nr_fragments) {
vcpu->mmio_needed = 0;
/* FIXME: return into emulator if single-stepping. */
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 9d591c895803..a10c8c792161 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -1001,6 +1001,12 @@ static int fault_in_kernel_space(unsigned long address)
static inline bool smap_violation(int error_code, struct pt_regs *regs)
{
+ if (!IS_ENABLED(CONFIG_X86_SMAP))
+ return false;
+
+ if (!static_cpu_has(X86_FEATURE_SMAP))
+ return false;
+
if (error_code & PF_USER)
return false;
@@ -1014,13 +1020,17 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
+ *
+ * This function must have noinline because both callers
+ * {,trace_}do_page_fault() have notrace on. Having this an actual function
+ * guarantees there's a function trace entry.
*/
-static void __kprobes
-__do_page_fault(struct pt_regs *regs, unsigned long error_code)
+static void __kprobes noinline
+__do_page_fault(struct pt_regs *regs, unsigned long error_code,
+ unsigned long address)
{
struct vm_area_struct *vma;
struct task_struct *tsk;
- unsigned long address;
struct mm_struct *mm;
int fault;
unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
@@ -1028,9 +1038,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
tsk = current;
mm = tsk->mm;
- /* Get the faulting address: */
- address = read_cr2();
-
/*
* Detect and handle instructions that would cause a page fault for
* both a tracked kernel page and a userspace page.
@@ -1087,11 +1094,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (unlikely(error_code & PF_RSVD))
pgtable_bad(regs, error_code, address);
- if (static_cpu_has(X86_FEATURE_SMAP)) {
- if (unlikely(smap_violation(error_code, regs))) {
- bad_area_nosemaphore(regs, error_code, address);
- return;
- }
+ if (unlikely(smap_violation(error_code, regs))) {
+ bad_area_nosemaphore(regs, error_code, address);
+ return;
}
/*
@@ -1244,32 +1249,50 @@ good_area:
up_read(&mm->mmap_sem);
}
-dotraplinkage void __kprobes
+dotraplinkage void __kprobes notrace
do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
+ unsigned long address = read_cr2(); /* Get the faulting address */
enum ctx_state prev_state;
+ /*
+ * We must have this function tagged with __kprobes, notrace and call
+ * read_cr2() before calling anything else. To avoid calling any kind
+ * of tracing machinery before we've observed the CR2 value.
+ *
+ * exception_{enter,exit}() contain all sorts of tracepoints.
+ */
+
prev_state = exception_enter();
- __do_page_fault(regs, error_code);
+ __do_page_fault(regs, error_code, address);
exception_exit(prev_state);
}
-static void trace_page_fault_entries(struct pt_regs *regs,
+#ifdef CONFIG_TRACING
+static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs,
unsigned long error_code)
{
if (user_mode(regs))
- trace_page_fault_user(read_cr2(), regs, error_code);
+ trace_page_fault_user(address, regs, error_code);
else
- trace_page_fault_kernel(read_cr2(), regs, error_code);
+ trace_page_fault_kernel(address, regs, error_code);
}
-dotraplinkage void __kprobes
+dotraplinkage void __kprobes notrace
trace_do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
+ /*
+ * The exception_enter and tracepoint processing could
+ * trigger another page faults (user space callchain
+ * reading) and destroy the original cr2 value, so read
+ * the faulting address now.
+ */
+ unsigned long address = read_cr2();
enum ctx_state prev_state;
prev_state = exception_enter();
- trace_page_fault_entries(regs, error_code);
- __do_page_fault(regs, error_code);
+ trace_page_fault_entries(address, regs, error_code);
+ __do_page_fault(regs, error_code, address);
exception_exit(prev_state);
}
+#endif /* CONFIG_TRACING */
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 81b2750f3666..27aa0455fab3 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -493,14 +493,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
struct numa_memblk *mb = &mi->blk[i];
memblock_set_node(mb->start, mb->end - mb->start,
&memblock.memory, mb->nid);
-
- /*
- * At this time, all memory regions reserved by memblock are
- * used by the kernel. Set the nid in memblock.reserved will
- * mark out all the nodes the kernel resides in.
- */
- memblock_set_node(mb->start, mb->end - mb->start,
- &memblock.reserved, mb->nid);
}
/*
@@ -565,10 +557,21 @@ static void __init numa_init_array(void)
static void __init numa_clear_kernel_node_hotplug(void)
{
int i, nid;
- nodemask_t numa_kernel_nodes;
+ nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
unsigned long start, end;
struct memblock_type *type = &memblock.reserved;
+ /*
+ * At this time, all memory regions reserved by memblock are
+ * used by the kernel. Set the nid in memblock.reserved will
+ * mark out all the nodes the kernel resides in.
+ */
+ for (i = 0; i < numa_meminfo.nr_blks; i++) {
+ struct numa_memblk *mb = &numa_meminfo.blk[i];
+ memblock_set_node(mb->start, mb->end - mb->start,
+ &memblock.reserved, mb->nid);
+ }
+
/* Mark all kernel nodes. */
for (i = 0; i < type->cnt; i++)
node_set(type->regions[i].nid, numa_kernel_nodes);
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 0342d27ca798..47b6436e41c2 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -52,6 +52,8 @@ void memory_present(int nid, unsigned long start, unsigned long end)
nid, start, end);
printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid);
printk(KERN_DEBUG " ");
+ start = round_down(start, PAGES_PER_SECTION);
+ end = round_up(end, PAGES_PER_SECTION);
for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
physnode_map[pfn / PAGES_PER_SECTION] = nid;
printk(KERN_CONT "%lx ", pfn);
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 1a25187e151e..1953e9c9391a 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -42,15 +42,25 @@ static __init inline int srat_disabled(void)
return acpi_numa < 0;
}
-/* Callback for SLIT parsing */
+/*
+ * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
+ * I/O localities since SRAT does not list them. I/O localities are
+ * not supported at this point.
+ */
void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
{
int i, j;
- for (i = 0; i < slit->locality_count; i++)
- for (j = 0; j < slit->locality_count; j++)
+ for (i = 0; i < slit->locality_count; i++) {
+ if (pxm_to_node(i) == NUMA_NO_NODE)
+ continue;
+ for (j = 0; j < slit->locality_count; j++) {
+ if (pxm_to_node(j) == NUMA_NO_NODE)
+ continue;
numa_set_distance(pxm_to_node(i), pxm_to_node(j),
slit->entry[slit->locality_count * i + j]);
+ }
+ }
}
/* Callback for Proximity Domain -> x2APIC mapping */
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index ae699b3bbac8..dd8dda167a24 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -103,7 +103,7 @@ static void flush_tlb_func(void *info)
if (f->flush_mm != this_cpu_read(cpu_tlbstate.active_mm))
return;
- count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK) {
if (f->flush_end == TLB_FLUSH_ALL)
local_flush_tlb();
@@ -131,7 +131,7 @@ void native_flush_tlb_others(const struct cpumask *cpumask,
info.flush_start = start;
info.flush_end = end;
- count_vm_event(NR_TLB_REMOTE_FLUSH);
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
if (is_uv_system()) {
unsigned int cpu;
@@ -151,44 +151,19 @@ void flush_tlb_current_task(void)
preempt_disable();
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb();
if (cpumask_any_but(mm_cpumask(mm), smp_processor_id()) < nr_cpu_ids)
flush_tlb_others(mm_cpumask(mm), mm, 0UL, TLB_FLUSH_ALL);
preempt_enable();
}
-/*
- * It can find out the THP large page, or
- * HUGETLB page in tlb_flush when THP disabled
- */
-static inline unsigned long has_large_page(struct mm_struct *mm,
- unsigned long start, unsigned long end)
-{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- unsigned long addr = ALIGN(start, HPAGE_SIZE);
- for (; addr < end; addr += HPAGE_SIZE) {
- pgd = pgd_offset(mm, addr);
- if (likely(!pgd_none(*pgd))) {
- pud = pud_offset(pgd, addr);
- if (likely(!pud_none(*pud))) {
- pmd = pmd_offset(pud, addr);
- if (likely(!pmd_none(*pmd)))
- if (pmd_large(*pmd))
- return addr;
- }
- }
- }
- return 0;
-}
-
void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long vmflag)
{
unsigned long addr;
unsigned act_entries, tlb_entries = 0;
+ unsigned long nr_base_pages;
preempt_disable();
if (current->active_mm != mm)
@@ -210,21 +185,20 @@ void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
tlb_entries = tlb_lli_4k[ENTRIES];
else
tlb_entries = tlb_lld_4k[ENTRIES];
+
/* Assume all of TLB entries was occupied by this task */
- act_entries = mm->total_vm > tlb_entries ? tlb_entries : mm->total_vm;
+ act_entries = tlb_entries >> tlb_flushall_shift;
+ act_entries = mm->total_vm > act_entries ? act_entries : mm->total_vm;
+ nr_base_pages = (end - start) >> PAGE_SHIFT;
/* tlb_flushall_shift is on balance point, details in commit log */
- if ((end - start) >> PAGE_SHIFT > act_entries >> tlb_flushall_shift) {
- count_vm_event(NR_TLB_LOCAL_FLUSH_ALL);
+ if (nr_base_pages > act_entries) {
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ALL);
local_flush_tlb();
} else {
- if (has_large_page(mm, start, end)) {
- local_flush_tlb();
- goto flush_all;
- }
/* flush range by one by one 'invlpg' */
for (addr = start; addr < end; addr += PAGE_SIZE) {
- count_vm_event(NR_TLB_LOCAL_FLUSH_ONE);
+ count_vm_tlb_event(NR_TLB_LOCAL_FLUSH_ONE);
__flush_tlb_single(addr);
}
@@ -262,7 +236,7 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long start)
static void do_flush_tlb_all(void *info)
{
- count_vm_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH_RECEIVED);
__flush_tlb_all();
if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_LAZY)
leave_mm(smp_processor_id());
@@ -270,7 +244,7 @@ static void do_flush_tlb_all(void *info)
void flush_tlb_all(void)
{
- count_vm_event(NR_TLB_REMOTE_FLUSH);
+ count_vm_tlb_event(NR_TLB_REMOTE_FLUSH);
on_each_cpu(do_flush_tlb_all, NULL, 1);
}
diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
index 877b9a1b2152..01495755701b 100644
--- a/arch/x86/net/bpf_jit.S
+++ b/arch/x86/net/bpf_jit.S
@@ -140,7 +140,7 @@ bpf_slow_path_byte_msh:
push %r9; \
push SKBDATA; \
/* rsi already has offset */ \
- mov $SIZE,%ecx; /* size */ \
+ mov $SIZE,%edx; /* size */ \
call bpf_internal_load_pointer_neg_helper; \
test %rax,%rax; \
pop SKBDATA; \
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
index 7145ec63c520..f15103dff4b4 100644
--- a/arch/x86/platform/efi/efi-bgrt.c
+++ b/arch/x86/platform/efi/efi-bgrt.c
@@ -42,14 +42,15 @@ void __init efi_bgrt_init(void)
if (bgrt_tab->header.length < sizeof(*bgrt_tab))
return;
- if (bgrt_tab->version != 1)
+ if (bgrt_tab->version != 1 || bgrt_tab->status != 1)
return;
if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address)
return;
image = efi_lookup_mapped_addr(bgrt_tab->image_address);
if (!image) {
- image = ioremap(bgrt_tab->image_address, sizeof(bmp_header));
+ image = early_memremap(bgrt_tab->image_address,
+ sizeof(bmp_header));
ioremapped = true;
if (!image)
return;
@@ -57,7 +58,7 @@ void __init efi_bgrt_init(void)
memcpy_fromio(&bmp_header, image, sizeof(bmp_header));
if (ioremapped)
- iounmap(image);
+ early_iounmap(image, sizeof(bmp_header));
bgrt_image_size = bmp_header.size;
bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL);
@@ -65,7 +66,8 @@ void __init efi_bgrt_init(void)
return;
if (ioremapped) {
- image = ioremap(bgrt_tab->image_address, bmp_header.size);
+ image = early_memremap(bgrt_tab->image_address,
+ bmp_header.size);
if (!image) {
kfree(bgrt_image);
bgrt_image = NULL;
@@ -75,5 +77,5 @@ void __init efi_bgrt_init(void)
memcpy_fromio(bgrt_image, image, bgrt_image_size);
if (ioremapped)
- iounmap(image);
+ early_iounmap(image, bmp_header.size);
}
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index d62ec87a2b26..b97acecf3fd9 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -52,6 +52,7 @@
#include <asm/tlbflush.h>
#include <asm/x86_init.h>
#include <asm/rtc.h>
+#include <asm/uv/uv.h>
#define EFI_DEBUG
@@ -792,7 +793,7 @@ void __init efi_set_executable(efi_memory_desc_t *md, bool executable)
set_memory_nx(addr, npages);
}
-static void __init runtime_code_page_mkexec(void)
+void __init runtime_code_page_mkexec(void)
{
efi_memory_desc_t *md;
void *p;
@@ -1069,8 +1070,7 @@ void __init efi_enter_virtual_mode(void)
efi.update_capsule = virt_efi_update_capsule;
efi.query_capsule_caps = virt_efi_query_capsule_caps;
- if (efi_enabled(EFI_OLD_MEMMAP) && (__supported_pte_mask & _PAGE_NX))
- runtime_code_page_mkexec();
+ efi_runtime_mkexec();
kfree(new_memmap);
@@ -1211,3 +1211,22 @@ static int __init parse_efi_cmdline(char *str)
return 0;
}
early_param("efi", parse_efi_cmdline);
+
+void __init efi_apply_memmap_quirks(void)
+{
+ /*
+ * Once setup is done earlier, unmap the EFI memory map on mismatched
+ * firmware/kernel architectures since there is no support for runtime
+ * services.
+ */
+ if (!efi_is_native()) {
+ pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
+ efi_unmap_memmap();
+ }
+
+ /*
+ * UV doesn't support the new EFI pagetable mapping yet.
+ */
+ if (is_uv_system())
+ set_bit(EFI_OLD_MEMMAP, &x86_efi_facility);
+}
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 249b183cf417..0b74cdf7f816 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -77,3 +77,9 @@ void efi_call_phys_epilog(void)
local_irq_restore(efi_rt_eflags);
}
+
+void __init efi_runtime_mkexec(void)
+{
+ if (__supported_pte_mask & _PAGE_NX)
+ runtime_code_page_mkexec();
+}
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 6284f158a47d..0c2a234fef1e 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -233,3 +233,12 @@ void __init parse_efi_setup(u64 phys_addr, u32 data_len)
{
efi_setup = phys_addr + sizeof(struct setup_data);
}
+
+void __init efi_runtime_mkexec(void)
+{
+ if (!efi_enabled(EFI_OLD_MEMMAP))
+ return;
+
+ if (__supported_pte_mask & _PAGE_NX)
+ runtime_code_page_mkexec();
+}
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index 7d01b8c56c00..cc04e67bfd05 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -40,11 +40,7 @@
#define smp_rmb() barrier()
#endif /* CONFIG_X86_PPRO_FENCE */
-#ifdef CONFIG_X86_OOSTORE
-#define smp_wmb() wmb()
-#else /* CONFIG_X86_OOSTORE */
#define smp_wmb() barrier()
-#endif /* CONFIG_X86_OOSTORE */
#define smp_read_barrier_depends() read_barrier_depends()
#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index a4d7b647867f..201d09a7c46b 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1473,6 +1473,18 @@ static void xen_pvh_set_cr_flags(int cpu)
* X86_CR0_TS, X86_CR0_PE, X86_CR0_ET are set by Xen for HVM guests
* (which PVH shared codepaths), while X86_CR0_PG is for PVH. */
write_cr0(read_cr0() | X86_CR0_MP | X86_CR0_NE | X86_CR0_WP | X86_CR0_AM);
+
+ if (!cpu)
+ return;
+ /*
+ * For BSP, PSE PGE are set in probe_page_size_mask(), for APs
+ * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu_init.
+ */
+ if (cpu_has_pse)
+ set_in_cr4(X86_CR4_PSE);
+
+ if (cpu_has_pge)
+ set_in_cr4(X86_CR4_PGE);
}
/*
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 2423ef04ffea..256282e7888b 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -365,7 +365,7 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr,
/* Assume pteval_t is equivalent to all the other *val_t types. */
static pteval_t pte_mfn_to_pfn(pteval_t val)
{
- if (val & _PAGE_PRESENT) {
+ if (pteval_present(val)) {
unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
unsigned long pfn = mfn_to_pfn(mfn);
@@ -381,7 +381,7 @@ static pteval_t pte_mfn_to_pfn(pteval_t val)
static pteval_t pte_pfn_to_mfn(pteval_t val)
{
- if (val & _PAGE_PRESENT) {
+ if (pteval_present(val)) {
unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT;
pteval_t flags = val & PTE_FLAGS_MASK;
unsigned long mfn;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 8009acbe41e4..696c694986d0 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -899,6 +899,13 @@ int m2p_add_override(unsigned long mfn, struct page *page,
"m2p_add_override: pfn %lx not mapped", pfn))
return -EINVAL;
}
+ WARN_ON(PagePrivate(page));
+ SetPagePrivate(page);
+ set_page_private(page, mfn);
+ page->index = pfn_to_mfn(pfn);
+
+ if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn))))
+ return -ENOMEM;
if (kmap_op != NULL) {
if (!PageHighMem(page)) {
@@ -937,16 +944,19 @@ int m2p_add_override(unsigned long mfn, struct page *page,
}
EXPORT_SYMBOL_GPL(m2p_add_override);
int m2p_remove_override(struct page *page,
- struct gnttab_map_grant_ref *kmap_op,
- unsigned long mfn)
+ struct gnttab_map_grant_ref *kmap_op)
{
unsigned long flags;
+ unsigned long mfn;
unsigned long pfn;
unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
pfn = page_to_pfn(page);
+ mfn = get_phys_to_machine(pfn);
+ if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT))
+ return -EINVAL;
if (!PageHighMem(page)) {
address = (unsigned long)__va(pfn << PAGE_SHIFT);
@@ -960,7 +970,10 @@ int m2p_remove_override(struct page *page,
spin_lock_irqsave(&m2p_override_lock, flags);
list_del(&page->lru);
spin_unlock_irqrestore(&m2p_override_lock, flags);
+ WARN_ON(!PagePrivate(page));
+ ClearPagePrivate(page);
+ set_phys_to_machine(pfn, page->index);
if (kmap_op != NULL) {
if (!PageHighMem(page)) {
struct multicall_space mcs;
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index ba56e11cbf77..c87ae7c6e5f9 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -20,6 +20,7 @@ config XTENSA
select HAVE_FUNCTION_TRACER
select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_PERF_EVENTS
+ select COMMON_CLK
help
Xtensa processors are 32-bit RISC machines designed by Tensilica
primarily for embedded systems. These processors are both
@@ -80,7 +81,6 @@ choice
config XTENSA_VARIANT_FSF
bool "fsf - default (not generic) configuration"
select MMU
- select HAVE_XTENSA_GPIO32
config XTENSA_VARIANT_DC232B
bool "dc232b - Diamond 232L Standard Core Rev.B (LE)"
@@ -135,7 +135,6 @@ config HAVE_SMP
config SMP
bool "Enable Symmetric multi-processing support"
depends on HAVE_SMP
- select USE_GENERIC_SMP_HELPERS
select GENERIC_SMP_IDLE_THREAD
help
Enabled SMP Software; allows more than one CPU/CORE
diff --git a/arch/xtensa/boot/dts/xtfpga.dtsi b/arch/xtensa/boot/dts/xtfpga.dtsi
index 46b4f5eab421..e7370b11348e 100644
--- a/arch/xtensa/boot/dts/xtfpga.dtsi
+++ b/arch/xtensa/boot/dts/xtfpga.dtsi
@@ -35,6 +35,13 @@
interrupt-controller;
};
+ clocks {
+ osc: main-oscillator {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ };
+ };
+
serial0: serial@fd050020 {
device_type = "serial";
compatible = "ns16550a";
@@ -42,9 +49,7 @@
reg = <0xfd050020 0x20>;
reg-shift = <2>;
interrupts = <0 1>; /* external irq 0 */
- /* Filled in by platform_setup from FPGA register
- * clock-frequency = <100000000>;
- */
+ clocks = <&osc>;
};
enet0: ethoc@fd030000 {
@@ -52,5 +57,6 @@
reg = <0xfd030000 0x4000 0xfd800000 0x4000>;
interrupts = <1 1>; /* external irq 1 */
local-mac-address = [00 50 c2 13 6f 00];
+ clocks = <&osc>;
};
};
diff --git a/arch/xtensa/include/asm/io.h b/arch/xtensa/include/asm/io.h
index 2a042d430c25..74944207167e 100644
--- a/arch/xtensa/include/asm/io.h
+++ b/arch/xtensa/include/asm/io.h
@@ -25,7 +25,7 @@
#ifdef CONFIG_MMU
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
extern unsigned long xtensa_kio_paddr;
static inline unsigned long xtensa_get_kio_paddr(void)
diff --git a/arch/xtensa/include/asm/traps.h b/arch/xtensa/include/asm/traps.h
index 8c194f6af45e..677bfcf4ee5d 100644
--- a/arch/xtensa/include/asm/traps.h
+++ b/arch/xtensa/include/asm/traps.h
@@ -23,25 +23,37 @@ void secondary_trap_init(void);
static inline void spill_registers(void)
{
-
+#if XCHAL_NUM_AREGS > 16
__asm__ __volatile__ (
- "movi a14, "__stringify((1 << PS_EXCM_BIT) | LOCKLEVEL)"\n\t"
- "mov a12, a0\n\t"
- "rsr a13, sar\n\t"
- "xsr a14, ps\n\t"
- "movi a0, _spill_registers\n\t"
- "rsync\n\t"
- "callx0 a0\n\t"
- "mov a0, a12\n\t"
- "wsr a13, sar\n\t"
- "wsr a14, ps\n\t"
- : :
-#if defined(CONFIG_FRAME_POINTER)
- : "a2", "a3", "a4", "a11", "a12", "a13", "a14", "a15",
+ " call12 1f\n"
+ " _j 2f\n"
+ " retw\n"
+ " .align 4\n"
+ "1:\n"
+ " _entry a1, 48\n"
+ " addi a12, a0, 3\n"
+#if XCHAL_NUM_AREGS > 32
+ " .rept (" __stringify(XCHAL_NUM_AREGS) " - 32) / 12\n"
+ " _entry a1, 48\n"
+ " mov a12, a0\n"
+ " .endr\n"
+#endif
+ " _entry a1, 48\n"
+#if XCHAL_NUM_AREGS % 12 == 0
+ " mov a8, a8\n"
+#elif XCHAL_NUM_AREGS % 12 == 4
+ " mov a12, a12\n"
+#elif XCHAL_NUM_AREGS % 12 == 8
+ " mov a4, a4\n"
+#endif
+ " retw\n"
+ "2:\n"
+ : : : "a12", "a13", "memory");
#else
- : "a2", "a3", "a4", "a7", "a11", "a12", "a13", "a14", "a15",
+ __asm__ __volatile__ (
+ " mov a12, a12\n"
+ : : : "memory");
#endif
- "memory");
}
#endif /* _XTENSA_TRAPS_H */
diff --git a/arch/xtensa/include/asm/vectors.h b/arch/xtensa/include/asm/vectors.h
index 5791b45d5a5d..f74ddfbb92ef 100644
--- a/arch/xtensa/include/asm/vectors.h
+++ b/arch/xtensa/include/asm/vectors.h
@@ -25,7 +25,7 @@
#define XCHAL_KIO_DEFAULT_PADDR 0xf0000000
#define XCHAL_KIO_SIZE 0x10000000
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
#define XCHAL_KIO_PADDR xtensa_get_kio_paddr()
#else
#define XCHAL_KIO_PADDR XCHAL_KIO_DEFAULT_PADDR
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index 51940fec6990..b9395529f02d 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -734,7 +734,12 @@ __SYSCALL(332, sys_finit_module, 3)
#define __NR_accept4 333
__SYSCALL(333, sys_accept4, 4)
-#define __NR_syscall_count 334
+#define __NR_sched_setattr 334
+__SYSCALL(334, sys_sched_setattr, 2)
+#define __NR_sched_getattr 335
+__SYSCALL(335, sys_sched_getattr, 3)
+
+#define __NR_syscall_count 336
/*
* sysxtensa syscall handler
diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S
index 21dbe6bdb8ed..ef7f4990722b 100644
--- a/arch/xtensa/kernel/entry.S
+++ b/arch/xtensa/kernel/entry.S
@@ -1081,196 +1081,53 @@ ENTRY(fast_syscall_spill_registers)
rsr a0, sar
s32i a3, a2, PT_AREG3
- s32i a4, a2, PT_AREG4
- s32i a0, a2, PT_AREG5 # store SAR to PT_AREG5
+ s32i a0, a2, PT_SAR
- /* The spill routine might clobber a7, a11, and a15. */
+ /* The spill routine might clobber a4, a7, a8, a11, a12, and a15. */
+ s32i a4, a2, PT_AREG4
s32i a7, a2, PT_AREG7
+ s32i a8, a2, PT_AREG8
s32i a11, a2, PT_AREG11
+ s32i a12, a2, PT_AREG12
s32i a15, a2, PT_AREG15
- call0 _spill_registers # destroys a3, a4, and SAR
-
- /* Advance PC, restore registers and SAR, and return from exception. */
-
- l32i a3, a2, PT_AREG5
- l32i a4, a2, PT_AREG4
- l32i a0, a2, PT_AREG0
- wsr a3, sar
- l32i a3, a2, PT_AREG3
-
- /* Restore clobbered registers. */
-
- l32i a7, a2, PT_AREG7
- l32i a11, a2, PT_AREG11
- l32i a15, a2, PT_AREG15
-
- movi a2, 0
- rfe
-
-ENDPROC(fast_syscall_spill_registers)
-
-/* Fixup handler.
- *
- * We get here if the spill routine causes an exception, e.g. tlb miss.
- * We basically restore WINDOWBASE and WINDOWSTART to the condition when
- * we entered the spill routine and jump to the user exception handler.
- *
- * a0: value of depc, original value in depc
- * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
- * a3: exctable, original value in excsave1
- */
-
-ENTRY(fast_syscall_spill_registers_fixup)
-
- rsr a2, windowbase # get current windowbase (a2 is saved)
- xsr a0, depc # restore depc and a0
- ssl a2 # set shift (32 - WB)
-
- /* We need to make sure the current registers (a0-a3) are preserved.
- * To do this, we simply set the bit for the current window frame
- * in WS, so that the exception handlers save them to the task stack.
- */
-
- xsr a3, excsave1 # get spill-mask
- slli a3, a3, 1 # shift left by one
-
- slli a2, a3, 32-WSBITS
- src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy......
- wsr a2, windowstart # set corrected windowstart
-
- srli a3, a3, 1
- rsr a2, excsave1
- l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2
- xsr a2, excsave1
- s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3
- l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task)
- xsr a2, excsave1
-
- /* Return to the original (user task) WINDOWBASE.
- * We leave the following frame behind:
- * a0, a1, a2 same
- * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE)
- * depc: depc (we have to return to that address)
- * excsave_1: exctable
- */
-
- wsr a3, windowbase
- rsync
-
- /* We are now in the original frame when we entered _spill_registers:
- * a0: return address
- * a1: used, stack pointer
- * a2: kernel stack pointer
- * a3: available
- * depc: exception address
- * excsave: exctable
- * Note: This frame might be the same as above.
- */
-
- /* Setup stack pointer. */
-
- addi a2, a2, -PT_USER_SIZE
- s32i a0, a2, PT_AREG0
-
- /* Make sure we return to this fixup handler. */
-
- movi a3, fast_syscall_spill_registers_fixup_return
- s32i a3, a2, PT_DEPC # setup depc
-
- /* Jump to the exception handler. */
-
- rsr a3, excsave1
- rsr a0, exccause
- addx4 a0, a0, a3 # find entry in table
- l32i a0, a0, EXC_TABLE_FAST_USER # load handler
- l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
- jx a0
-
-ENDPROC(fast_syscall_spill_registers_fixup)
-
-ENTRY(fast_syscall_spill_registers_fixup_return)
-
- /* When we return here, all registers have been restored (a2: DEPC) */
-
- wsr a2, depc # exception address
-
- /* Restore fixup handler. */
-
- rsr a2, excsave1
- s32i a3, a2, EXC_TABLE_DOUBLE_SAVE
- movi a3, fast_syscall_spill_registers_fixup
- s32i a3, a2, EXC_TABLE_FIXUP
- rsr a3, windowbase
- s32i a3, a2, EXC_TABLE_PARAM
- l32i a2, a2, EXC_TABLE_KSTK
-
- /* Load WB at the time the exception occurred. */
-
- rsr a3, sar # WB is still in SAR
- neg a3, a3
- wsr a3, windowbase
- rsync
-
- rsr a3, excsave1
- l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
-
- rfde
-
-ENDPROC(fast_syscall_spill_registers_fixup_return)
-
-/*
- * spill all registers.
- *
- * This is not a real function. The following conditions must be met:
- *
- * - must be called with call0.
- * - uses a3, a4 and SAR.
- * - the last 'valid' register of each frame are clobbered.
- * - the caller must have registered a fixup handler
- * (or be inside a critical section)
- * - PS_EXCM must be set (PS_WOE cleared?)
- */
-
-ENTRY(_spill_registers)
-
/*
* Rotate ws so that the current windowbase is at bit 0.
* Assume ws = xxxwww1yy (www1 current window frame).
* Rotate ws right so that a4 = yyxxxwww1.
*/
- rsr a4, windowbase
+ rsr a0, windowbase
rsr a3, windowstart # a3 = xxxwww1yy
- ssr a4 # holds WB
- slli a4, a3, WSBITS
- or a3, a3, a4 # a3 = xxxwww1yyxxxwww1yy
+ ssr a0 # holds WB
+ slli a0, a3, WSBITS
+ or a3, a3, a0 # a3 = xxxwww1yyxxxwww1yy
srl a3, a3 # a3 = 00xxxwww1yyxxxwww1
/* We are done if there are no more than the current register frame. */
extui a3, a3, 1, WSBITS-1 # a3 = 0yyxxxwww
- movi a4, (1 << (WSBITS-1))
+ movi a0, (1 << (WSBITS-1))
_beqz a3, .Lnospill # only one active frame? jump
/* We want 1 at the top, so that we return to the current windowbase */
- or a3, a3, a4 # 1yyxxxwww
+ or a3, a3, a0 # 1yyxxxwww
/* Skip empty frames - get 'oldest' WINDOWSTART-bit. */
wsr a3, windowstart # save shifted windowstart
- neg a4, a3
- and a3, a4, a3 # first bit set from right: 000010000
+ neg a0, a3
+ and a3, a0, a3 # first bit set from right: 000010000
- ffs_ws a4, a3 # a4: shifts to skip empty frames
+ ffs_ws a0, a3 # a0: shifts to skip empty frames
movi a3, WSBITS
- sub a4, a3, a4 # WSBITS-a4:number of 0-bits from right
- ssr a4 # save in SAR for later.
+ sub a0, a3, a0 # WSBITS-a0:number of 0-bits from right
+ ssr a0 # save in SAR for later.
rsr a3, windowbase
- add a3, a3, a4
+ add a3, a3, a0
wsr a3, windowbase
rsync
@@ -1285,22 +1142,6 @@ ENTRY(_spill_registers)
* we have to save 4,8. or 12 registers.
*/
- _bbsi.l a3, 1, .Lc4
- _bbsi.l a3, 2, .Lc8
-
- /* Special case: we have a call12-frame starting at a4. */
-
- _bbci.l a3, 3, .Lc12 # bit 3 shouldn't be zero! (Jump to Lc12 first)
-
- s32e a4, a1, -16 # a1 is valid with an empty spill area
- l32e a4, a5, -12
- s32e a8, a4, -48
- mov a8, a4
- l32e a4, a1, -16
- j .Lc12c
-
-.Lnospill:
- ret
.Lloop: _bbsi.l a3, 1, .Lc4
_bbci.l a3, 2, .Lc12
@@ -1314,20 +1155,10 @@ ENTRY(_spill_registers)
s32e a9, a4, -28
s32e a10, a4, -24
s32e a11, a4, -20
-
srli a11, a3, 2 # shift windowbase by 2
rotw 2
_bnei a3, 1, .Lloop
-
-.Lexit: /* Done. Do the final rotation, set WS, and return. */
-
- rotw 1
- rsr a3, windowbase
- ssl a3
- movi a3, 1
- sll a3, a3
- wsr a3, windowstart
- ret
+ j .Lexit
.Lc4: s32e a4, a9, -16
s32e a5, a9, -12
@@ -1343,11 +1174,11 @@ ENTRY(_spill_registers)
/* 12-register frame (call12) */
- l32e a2, a5, -12
- s32e a8, a2, -48
- mov a8, a2
+ l32e a0, a5, -12
+ s32e a8, a0, -48
+ mov a8, a0
-.Lc12c: s32e a9, a8, -44
+ s32e a9, a8, -44
s32e a10, a8, -40
s32e a11, a8, -36
s32e a12, a8, -32
@@ -1367,30 +1198,54 @@ ENTRY(_spill_registers)
*/
rotw 1
- mov a5, a13
+ mov a4, a13
rotw -1
- s32e a4, a9, -16
- s32e a5, a9, -12
- s32e a6, a9, -8
- s32e a7, a9, -4
+ s32e a4, a8, -16
+ s32e a5, a8, -12
+ s32e a6, a8, -8
+ s32e a7, a8, -4
rotw 3
_beqi a3, 1, .Lexit
j .Lloop
-.Linvalid_mask:
+.Lexit:
- /* We get here because of an unrecoverable error in the window
- * registers. If we are in user space, we kill the application,
- * however, this condition is unrecoverable in kernel space.
- */
+ /* Done. Do the final rotation and set WS */
+
+ rotw 1
+ rsr a3, windowbase
+ ssl a3
+ movi a3, 1
+ sll a3, a3
+ wsr a3, windowstart
+.Lnospill:
+
+ /* Advance PC, restore registers and SAR, and return from exception. */
+
+ l32i a3, a2, PT_SAR
+ l32i a0, a2, PT_AREG0
+ wsr a3, sar
+ l32i a3, a2, PT_AREG3
- rsr a0, ps
- _bbci.l a0, PS_UM_BIT, 1f
+ /* Restore clobbered registers. */
- /* User space: Setup a dummy frame and kill application.
+ l32i a4, a2, PT_AREG4
+ l32i a7, a2, PT_AREG7
+ l32i a8, a2, PT_AREG8
+ l32i a11, a2, PT_AREG11
+ l32i a12, a2, PT_AREG12
+ l32i a15, a2, PT_AREG15
+
+ movi a2, 0
+ rfe
+
+.Linvalid_mask:
+
+ /* We get here because of an unrecoverable error in the window
+ * registers, so set up a dummy frame and kill the user application.
* Note: We assume EXC_TABLE_KSTK contains a valid stack pointer.
*/
@@ -1414,14 +1269,136 @@ ENTRY(_spill_registers)
movi a4, do_exit
callx4 a4
-1: /* Kernel space: PANIC! */
+ /* shouldn't return, so panic */
wsr a0, excsave1
movi a0, unrecoverable_exception
callx0 a0 # should not return
1: j 1b
-ENDPROC(_spill_registers)
+
+ENDPROC(fast_syscall_spill_registers)
+
+/* Fixup handler.
+ *
+ * We get here if the spill routine causes an exception, e.g. tlb miss.
+ * We basically restore WINDOWBASE and WINDOWSTART to the condition when
+ * we entered the spill routine and jump to the user exception handler.
+ *
+ * Note that we only need to restore the bits in windowstart that have not
+ * been spilled yet by the _spill_register routine. Luckily, a3 contains a
+ * rotated windowstart with only those bits set for frames that haven't been
+ * spilled yet. Because a3 is rotated such that bit 0 represents the register
+ * frame for the current windowbase - 1, we need to rotate a3 left by the
+ * value of the current windowbase + 1 and move it to windowstart.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+ENTRY(fast_syscall_spill_registers_fixup)
+
+ rsr a2, windowbase # get current windowbase (a2 is saved)
+ xsr a0, depc # restore depc and a0
+ ssl a2 # set shift (32 - WB)
+
+ /* We need to make sure the current registers (a0-a3) are preserved.
+ * To do this, we simply set the bit for the current window frame
+ * in WS, so that the exception handlers save them to the task stack.
+ *
+ * Note: we use a3 to set the windowbase, so we take a special care
+ * of it, saving it in the original _spill_registers frame across
+ * the exception handler call.
+ */
+
+ xsr a3, excsave1 # get spill-mask
+ slli a3, a3, 1 # shift left by one
+ addi a3, a3, 1 # set the bit for the current window frame
+
+ slli a2, a3, 32-WSBITS
+ src a2, a3, a2 # a2 = xxwww1yyxxxwww1yy......
+ wsr a2, windowstart # set corrected windowstart
+
+ srli a3, a3, 1
+ rsr a2, excsave1
+ l32i a2, a2, EXC_TABLE_DOUBLE_SAVE # restore a2
+ xsr a2, excsave1
+ s32i a3, a2, EXC_TABLE_DOUBLE_SAVE # save a3
+ l32i a3, a2, EXC_TABLE_PARAM # original WB (in user task)
+ xsr a2, excsave1
+
+ /* Return to the original (user task) WINDOWBASE.
+ * We leave the following frame behind:
+ * a0, a1, a2 same
+ * a3: trashed (saved in EXC_TABLE_DOUBLE_SAVE)
+ * depc: depc (we have to return to that address)
+ * excsave_1: exctable
+ */
+
+ wsr a3, windowbase
+ rsync
+
+ /* We are now in the original frame when we entered _spill_registers:
+ * a0: return address
+ * a1: used, stack pointer
+ * a2: kernel stack pointer
+ * a3: available
+ * depc: exception address
+ * excsave: exctable
+ * Note: This frame might be the same as above.
+ */
+
+ /* Setup stack pointer. */
+
+ addi a2, a2, -PT_USER_SIZE
+ s32i a0, a2, PT_AREG0
+
+ /* Make sure we return to this fixup handler. */
+
+ movi a3, fast_syscall_spill_registers_fixup_return
+ s32i a3, a2, PT_DEPC # setup depc
+
+ /* Jump to the exception handler. */
+
+ rsr a3, excsave1
+ rsr a0, exccause
+ addx4 a0, a0, a3 # find entry in table
+ l32i a0, a0, EXC_TABLE_FAST_USER # load handler
+ l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
+ jx a0
+
+ENDPROC(fast_syscall_spill_registers_fixup)
+
+ENTRY(fast_syscall_spill_registers_fixup_return)
+
+ /* When we return here, all registers have been restored (a2: DEPC) */
+
+ wsr a2, depc # exception address
+
+ /* Restore fixup handler. */
+
+ rsr a2, excsave1
+ s32i a3, a2, EXC_TABLE_DOUBLE_SAVE
+ movi a3, fast_syscall_spill_registers_fixup
+ s32i a3, a2, EXC_TABLE_FIXUP
+ rsr a3, windowbase
+ s32i a3, a2, EXC_TABLE_PARAM
+ l32i a2, a2, EXC_TABLE_KSTK
+
+ /* Load WB at the time the exception occurred. */
+
+ rsr a3, sar # WB is still in SAR
+ neg a3, a3
+ wsr a3, windowbase
+ rsync
+
+ rsr a3, excsave1
+ l32i a3, a3, EXC_TABLE_DOUBLE_SAVE
+
+ rfde
+
+ENDPROC(fast_syscall_spill_registers_fixup_return)
#ifdef CONFIG_MMU
/*
@@ -1794,6 +1771,43 @@ ENTRY(system_call)
ENDPROC(system_call)
+/*
+ * Spill live registers on the kernel stack macro.
+ *
+ * Entry condition: ps.woe is set, ps.excm is cleared
+ * Exit condition: windowstart has single bit set
+ * May clobber: a12, a13
+ */
+ .macro spill_registers_kernel
+
+#if XCHAL_NUM_AREGS > 16
+ call12 1f
+ _j 2f
+ retw
+ .align 4
+1:
+ _entry a1, 48
+ addi a12, a0, 3
+#if XCHAL_NUM_AREGS > 32
+ .rept (XCHAL_NUM_AREGS - 32) / 12
+ _entry a1, 48
+ mov a12, a0
+ .endr
+#endif
+ _entry a1, 48
+#if XCHAL_NUM_AREGS % 12 == 0
+ mov a8, a8
+#elif XCHAL_NUM_AREGS % 12 == 4
+ mov a12, a12
+#elif XCHAL_NUM_AREGS % 12 == 8
+ mov a4, a4
+#endif
+ retw
+2:
+#else
+ mov a12, a12
+#endif
+ .endm
/*
* Task switch.
@@ -1806,21 +1820,20 @@ ENTRY(_switch_to)
entry a1, 16
- mov a12, a2 # preserve 'prev' (a2)
- mov a13, a3 # and 'next' (a3)
+ mov a10, a2 # preserve 'prev' (a2)
+ mov a11, a3 # and 'next' (a3)
l32i a4, a2, TASK_THREAD_INFO
l32i a5, a3, TASK_THREAD_INFO
- save_xtregs_user a4 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
+ save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
- s32i a0, a12, THREAD_RA # save return address
- s32i a1, a12, THREAD_SP # save stack pointer
+ s32i a0, a10, THREAD_RA # save return address
+ s32i a1, a10, THREAD_SP # save stack pointer
/* Disable ints while we manipulate the stack pointer. */
- movi a14, (1 << PS_EXCM_BIT) | LOCKLEVEL
- xsr a14, ps
+ rsil a14, LOCKLEVEL
rsr a3, excsave1
rsync
s32i a3, a3, EXC_TABLE_FIXUP /* enter critical section */
@@ -1835,7 +1848,7 @@ ENTRY(_switch_to)
/* Flush register file. */
- call0 _spill_registers # destroys a3, a4, and SAR
+ spill_registers_kernel
/* Set kernel stack (and leave critical section)
* Note: It's save to set it here. The stack will not be overwritten
@@ -1851,13 +1864,13 @@ ENTRY(_switch_to)
/* restore context of the task 'next' */
- l32i a0, a13, THREAD_RA # restore return address
- l32i a1, a13, THREAD_SP # restore stack pointer
+ l32i a0, a11, THREAD_RA # restore return address
+ l32i a1, a11, THREAD_SP # restore stack pointer
- load_xtregs_user a5 a6 a8 a9 a10 a11 THREAD_XTREGS_USER
+ load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER
wsr a14, ps
- mov a2, a12 # return 'prev'
+ mov a2, a10 # return 'prev'
rsync
retw
diff --git a/arch/xtensa/kernel/setup.c b/arch/xtensa/kernel/setup.c
index 7d12af1317f1..84fe931bb60e 100644
--- a/arch/xtensa/kernel/setup.c
+++ b/arch/xtensa/kernel/setup.c
@@ -22,6 +22,7 @@
#include <linux/bootmem.h>
#include <linux/kernel.h>
#include <linux/percpu.h>
+#include <linux/clk-provider.h>
#include <linux/cpu.h>
#include <linux/of_fdt.h>
#include <linux/of_platform.h>
@@ -276,6 +277,7 @@ void __init early_init_devtree(void *params)
static int __init xtensa_device_probe(void)
{
+ of_clk_init(NULL);
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
return 0;
}
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index 08b769d3b3a1..2a1823de69cc 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -30,6 +30,7 @@
#include <asm/platform.h>
unsigned long ccount_freq; /* ccount Hz */
+EXPORT_SYMBOL(ccount_freq);
static cycle_t ccount_read(struct clocksource *cs)
{
diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S
index cb8fd44caabc..f9e1ec346e35 100644
--- a/arch/xtensa/kernel/vectors.S
+++ b/arch/xtensa/kernel/vectors.S
@@ -235,7 +235,7 @@ ENTRY(_DoubleExceptionVector)
/* Check for overflow/underflow exception, jump if overflow. */
- _bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow
+ bbci.l a0, 6, _DoubleExceptionVector_WindowOverflow
/*
* Restart window underflow exception.
diff --git a/arch/xtensa/kernel/xtensa_ksyms.c b/arch/xtensa/kernel/xtensa_ksyms.c
index 74a60c7e085e..80b33ed51f31 100644
--- a/arch/xtensa/kernel/xtensa_ksyms.c
+++ b/arch/xtensa/kernel/xtensa_ksyms.c
@@ -122,9 +122,7 @@ EXPORT_SYMBOL(insw);
EXPORT_SYMBOL(insl);
extern long common_exception_return;
-extern long _spill_registers;
EXPORT_SYMBOL(common_exception_return);
-EXPORT_SYMBOL(_spill_registers);
#ifdef CONFIG_FUNCTION_TRACER
EXPORT_SYMBOL(_mcount);
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index 479d7537a32a..aff108df92d3 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -90,7 +90,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
/*
- * Initialize the bootmem system and give it all the memory we have available.
+ * Initialize the bootmem system and give it all low memory we have available.
*/
void __init bootmem_init(void)
@@ -142,9 +142,14 @@ void __init bootmem_init(void)
/* Add all remaining memory pieces into the bootmem map */
- for (i=0; i<sysmem.nr_banks; i++)
- free_bootmem(sysmem.bank[i].start,
- sysmem.bank[i].end - sysmem.bank[i].start);
+ for (i = 0; i < sysmem.nr_banks; i++) {
+ if (sysmem.bank[i].start >> PAGE_SHIFT < max_low_pfn) {
+ unsigned long end = min(max_low_pfn << PAGE_SHIFT,
+ sysmem.bank[i].end);
+ free_bootmem(sysmem.bank[i].start,
+ end - sysmem.bank[i].start);
+ }
+ }
}
diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c
index 36ec171698b8..861203e958da 100644
--- a/arch/xtensa/mm/mmu.c
+++ b/arch/xtensa/mm/mmu.c
@@ -39,7 +39,7 @@ void init_mmu(void)
set_itlbcfg_register(0);
set_dtlbcfg_register(0);
#endif
-#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && CONFIG_OF
+#if XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY && defined(CONFIG_OF)
/*
* Update the IO area mapping in case xtensa_kio_paddr has changed
*/
diff --git a/arch/xtensa/platforms/xtfpga/setup.c b/arch/xtensa/platforms/xtfpga/setup.c
index 800227862fe8..57fd08b36f51 100644
--- a/arch/xtensa/platforms/xtfpga/setup.c
+++ b/arch/xtensa/platforms/xtfpga/setup.c
@@ -135,11 +135,11 @@ static void __init update_local_mac(struct device_node *node)
static int __init machine_setup(void)
{
- struct device_node *serial;
+ struct device_node *clock;
struct device_node *eth = NULL;
- for_each_compatible_node(serial, NULL, "ns16550a")
- update_clock_frequency(serial);
+ for_each_node_by_name(clock, "main-oscillator")
+ update_clock_frequency(clock);
if ((eth = of_find_compatible_node(eth, NULL, "opencores,ethoc")))
update_local_mac(eth);
@@ -290,6 +290,7 @@ static int __init xtavnet_init(void)
* knows whether they set it correctly on the DIP switches.
*/
pr_info("XTFPGA: Ethernet MAC %pM\n", ethoc_pdata.hwaddr);
+ ethoc_pdata.eth_clkfreq = *(long *)XTFPGA_CLKFRQ_VADDR;
return 0;
}
diff --git a/arch/xtensa/variants/fsf/include/variant/tie.h b/arch/xtensa/variants/fsf/include/variant/tie.h
index bf4020116df5..244cdea4dee5 100644
--- a/arch/xtensa/variants/fsf/include/variant/tie.h
+++ b/arch/xtensa/variants/fsf/include/variant/tie.h
@@ -18,13 +18,6 @@
#define XCHAL_CP_MASK 0x00 /* bitmask of all CPs by ID */
#define XCHAL_CP_PORT_MASK 0x00 /* bitmask of only port CPs */
-/* Basic parameters of each coprocessor: */
-#define XCHAL_CP7_NAME "XTIOP"
-#define XCHAL_CP7_IDENT XTIOP
-#define XCHAL_CP7_SA_SIZE 0 /* size of state save area */
-#define XCHAL_CP7_SA_ALIGN 1 /* min alignment of save area */
-#define XCHAL_CP_ID_XTIOP 7 /* coprocessor ID (0..7) */
-
/* Filler info for unassigned coprocessors, to simplify arrays etc: */
#define XCHAL_NCP_SA_SIZE 0
#define XCHAL_NCP_SA_ALIGN 1
@@ -42,6 +35,8 @@
#define XCHAL_CP5_SA_ALIGN 1
#define XCHAL_CP6_SA_SIZE 0
#define XCHAL_CP6_SA_ALIGN 1
+#define XCHAL_CP7_SA_SIZE 0
+#define XCHAL_CP7_SA_ALIGN 1
/* Save area for non-coprocessor optional and custom (TIE) state: */
#define XCHAL_NCP_SA_SIZE 0
diff --git a/block/blk-core.c b/block/blk-core.c
index c00e0bdeab4a..bfe16d5af9f9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -708,9 +708,13 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
if (!q)
return NULL;
- if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
+ q->flush_rq = kzalloc(sizeof(struct request), GFP_KERNEL);
+ if (!q->flush_rq)
return NULL;
+ if (blk_init_rl(&q->root_rl, q, GFP_KERNEL))
+ goto fail;
+
q->request_fn = rfn;
q->prep_rq_fn = NULL;
q->unprep_rq_fn = NULL;
@@ -733,12 +737,16 @@ blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
/* init elevator */
if (elevator_init(q, NULL)) {
mutex_unlock(&q->sysfs_lock);
- return NULL;
+ goto fail;
}
mutex_unlock(&q->sysfs_lock);
return q;
+
+fail:
+ kfree(q->flush_rq);
+ return NULL;
}
EXPORT_SYMBOL(blk_init_allocated_queue);
@@ -1127,7 +1135,7 @@ static struct request *blk_old_get_request(struct request_queue *q, int rw,
struct request *blk_get_request(struct request_queue *q, int rw, gfp_t gfp_mask)
{
if (q->mq_ops)
- return blk_mq_alloc_request(q, rw, gfp_mask, false);
+ return blk_mq_alloc_request(q, rw, gfp_mask);
else
return blk_old_get_request(q, rw, gfp_mask);
}
@@ -1278,6 +1286,11 @@ void __blk_put_request(struct request_queue *q, struct request *req)
if (unlikely(!q))
return;
+ if (q->mq_ops) {
+ blk_mq_free_request(req);
+ return;
+ }
+
blk_pm_put_request(req);
elv_completed_request(q, req);
diff --git a/block/blk-exec.c b/block/blk-exec.c
index bbfc072a79c2..dbf4502b1d67 100644
--- a/block/blk-exec.c
+++ b/block/blk-exec.c
@@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
* be resued after dying flag is set
*/
if (q->mq_ops) {
- blk_mq_insert_request(q, rq, true);
+ blk_mq_insert_request(rq, at_head, true, false);
return;
}
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 9288aaf35c21..43e6b4755e9a 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -130,20 +130,29 @@ static void blk_flush_restore_request(struct request *rq)
blk_clear_rq_complete(rq);
}
-static void mq_flush_data_run(struct work_struct *work)
+static void mq_flush_run(struct work_struct *work)
{
struct request *rq;
- rq = container_of(work, struct request, mq_flush_data);
+ rq = container_of(work, struct request, mq_flush_work);
memset(&rq->csd, 0, sizeof(rq->csd));
- blk_mq_run_request(rq, true, false);
+ blk_mq_insert_request(rq, false, true, false);
}
-static void blk_mq_flush_data_insert(struct request *rq)
+static bool blk_flush_queue_rq(struct request *rq, bool add_front)
{
- INIT_WORK(&rq->mq_flush_data, mq_flush_data_run);
- kblockd_schedule_work(rq->q, &rq->mq_flush_data);
+ if (rq->q->mq_ops) {
+ INIT_WORK(&rq->mq_flush_work, mq_flush_run);
+ kblockd_schedule_work(rq->q, &rq->mq_flush_work);
+ return false;
+ } else {
+ if (add_front)
+ list_add(&rq->queuelist, &rq->q->queue_head);
+ else
+ list_add_tail(&rq->queuelist, &rq->q->queue_head);
+ return true;
+ }
}
/**
@@ -187,12 +196,7 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
case REQ_FSEQ_DATA:
list_move_tail(&rq->flush.list, &q->flush_data_in_flight);
- if (q->mq_ops)
- blk_mq_flush_data_insert(rq);
- else {
- list_add(&rq->queuelist, &q->queue_head);
- queued = true;
- }
+ queued = blk_flush_queue_rq(rq, true);
break;
case REQ_FSEQ_DONE:
@@ -216,9 +220,6 @@ static bool blk_flush_complete_seq(struct request *rq, unsigned int seq,
}
kicked = blk_kick_flush(q);
- /* blk_mq_run_flush will run queue */
- if (q->mq_ops)
- return queued;
return kicked | queued;
}
@@ -230,10 +231,9 @@ static void flush_end_io(struct request *flush_rq, int error)
struct request *rq, *n;
unsigned long flags = 0;
- if (q->mq_ops) {
- blk_mq_free_request(flush_rq);
+ if (q->mq_ops)
spin_lock_irqsave(&q->mq_flush_lock, flags);
- }
+
running = &q->flush_queue[q->flush_running_idx];
BUG_ON(q->flush_pending_idx == q->flush_running_idx);
@@ -263,49 +263,14 @@ static void flush_end_io(struct request *flush_rq, int error)
* kblockd.
*/
if (queued || q->flush_queue_delayed) {
- if (!q->mq_ops)
- blk_run_queue_async(q);
- else
- /*
- * This can be optimized to only run queues with requests
- * queued if necessary.
- */
- blk_mq_run_queues(q, true);
+ WARN_ON(q->mq_ops);
+ blk_run_queue_async(q);
}
q->flush_queue_delayed = 0;
if (q->mq_ops)
spin_unlock_irqrestore(&q->mq_flush_lock, flags);
}
-static void mq_flush_work(struct work_struct *work)
-{
- struct request_queue *q;
- struct request *rq;
-
- q = container_of(work, struct request_queue, mq_flush_work);
-
- /* We don't need set REQ_FLUSH_SEQ, it's for consistency */
- rq = blk_mq_alloc_request(q, WRITE_FLUSH|REQ_FLUSH_SEQ,
- __GFP_WAIT|GFP_ATOMIC, true);
- rq->cmd_type = REQ_TYPE_FS;
- rq->end_io = flush_end_io;
-
- blk_mq_run_request(rq, true, false);
-}
-
-/*
- * We can't directly use q->flush_rq, because it doesn't have tag and is not in
- * hctx->rqs[]. so we must allocate a new request, since we can't sleep here,
- * so offload the work to workqueue.
- *
- * Note: we assume a flush request finished in any hardware queue will flush
- * the whole disk cache.
- */
-static void mq_run_flush(struct request_queue *q)
-{
- kblockd_schedule_work(q, &q->mq_flush_work);
-}
-
/**
* blk_kick_flush - consider issuing flush request
* @q: request_queue being kicked
@@ -340,19 +305,31 @@ static bool blk_kick_flush(struct request_queue *q)
* different from running_idx, which means flush is in flight.
*/
q->flush_pending_idx ^= 1;
+
if (q->mq_ops) {
- mq_run_flush(q);
- return true;
+ struct blk_mq_ctx *ctx = first_rq->mq_ctx;
+ struct blk_mq_hw_ctx *hctx = q->mq_ops->map_queue(q, ctx->cpu);
+
+ blk_mq_rq_init(hctx, q->flush_rq);
+ q->flush_rq->mq_ctx = ctx;
+
+ /*
+ * Reuse the tag value from the fist waiting request,
+ * with blk-mq the tag is generated during request
+ * allocation and drivers can rely on it being inside
+ * the range they asked for.
+ */
+ q->flush_rq->tag = first_rq->tag;
+ } else {
+ blk_rq_init(q, q->flush_rq);
}
- blk_rq_init(q, &q->flush_rq);
- q->flush_rq.cmd_type = REQ_TYPE_FS;
- q->flush_rq.cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
- q->flush_rq.rq_disk = first_rq->rq_disk;
- q->flush_rq.end_io = flush_end_io;
+ q->flush_rq->cmd_type = REQ_TYPE_FS;
+ q->flush_rq->cmd_flags = WRITE_FLUSH | REQ_FLUSH_SEQ;
+ q->flush_rq->rq_disk = first_rq->rq_disk;
+ q->flush_rq->end_io = flush_end_io;
- list_add_tail(&q->flush_rq.queuelist, &q->queue_head);
- return true;
+ return blk_flush_queue_rq(q->flush_rq, false);
}
static void flush_data_end_io(struct request *rq, int error)
@@ -437,7 +414,7 @@ void blk_insert_flush(struct request *rq)
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
if (q->mq_ops) {
- blk_mq_run_request(rq, false, true);
+ blk_mq_insert_request(rq, false, false, true);
} else
list_add_tail(&rq->queuelist, &q->queue_head);
return;
@@ -558,5 +535,4 @@ EXPORT_SYMBOL(blkdev_issue_flush);
void blk_mq_init_flush(struct request_queue *q)
{
spin_lock_init(&q->mq_flush_lock);
- INIT_WORK(&q->mq_flush_work, mq_flush_work);
}
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 2da76c999ef3..97a733cf3d5f 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -119,6 +119,14 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
atomic_inc(&bb.done);
submit_bio(type, bio);
+
+ /*
+ * We can loop for a long time in here, if someone does
+ * full device discards (like mkfs). Be nice and allow
+ * us to schedule out to avoid softlocking if preempt
+ * is disabled.
+ */
+ cond_resched();
}
blk_finish_plug(&plug);
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 8f8adaa95466..6c583f9c5b65 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -21,6 +21,16 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
if (!bio)
return 0;
+ /*
+ * This should probably be returning 0, but blk_add_request_payload()
+ * (Christoph!!!!)
+ */
+ if (bio->bi_rw & REQ_DISCARD)
+ return 1;
+
+ if (bio->bi_rw & REQ_WRITE_SAME)
+ return 1;
+
fbio = bio;
cluster = blk_queue_cluster(q);
seg_size = 0;
@@ -161,30 +171,60 @@ new_segment:
*bvprv = *bvec;
}
-/*
- * map a request to scatterlist, return number of sg entries setup. Caller
- * must make sure sg can hold rq->nr_phys_segments entries
- */
-int blk_rq_map_sg(struct request_queue *q, struct request *rq,
- struct scatterlist *sglist)
+static int __blk_bios_map_sg(struct request_queue *q, struct bio *bio,
+ struct scatterlist *sglist,
+ struct scatterlist **sg)
{
struct bio_vec bvec, bvprv = { NULL };
- struct req_iterator iter;
- struct scatterlist *sg;
+ struct bvec_iter iter;
int nsegs, cluster;
nsegs = 0;
cluster = blk_queue_cluster(q);
- /*
- * for each bio in rq
- */
- sg = NULL;
- rq_for_each_segment(bvec, rq, iter) {
- __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
- &nsegs, &cluster);
- } /* segments in rq */
+ if (bio->bi_rw & REQ_DISCARD) {
+ /*
+ * This is a hack - drivers should be neither modifying the
+ * biovec, nor relying on bi_vcnt - but because of
+ * blk_add_request_payload(), a discard bio may or may not have
+ * a payload we need to set up here (thank you Christoph) and
+ * bi_vcnt is really the only way of telling if we need to.
+ */
+
+ if (bio->bi_vcnt)
+ goto single_segment;
+
+ return 0;
+ }
+
+ if (bio->bi_rw & REQ_WRITE_SAME) {
+single_segment:
+ *sg = sglist;
+ bvec = bio_iovec(bio);
+ sg_set_page(*sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
+ return 1;
+ }
+
+ for_each_bio(bio)
+ bio_for_each_segment(bvec, bio, iter)
+ __blk_segment_map_sg(q, &bvec, sglist, &bvprv, sg,
+ &nsegs, &cluster);
+ return nsegs;
+}
+
+/*
+ * map a request to scatterlist, return number of sg entries setup. Caller
+ * must make sure sg can hold rq->nr_phys_segments entries
+ */
+int blk_rq_map_sg(struct request_queue *q, struct request *rq,
+ struct scatterlist *sglist)
+{
+ struct scatterlist *sg = NULL;
+ int nsegs = 0;
+
+ if (rq->bio)
+ nsegs = __blk_bios_map_sg(q, rq->bio, sglist, &sg);
if (unlikely(rq->cmd_flags & REQ_COPY_USER) &&
(blk_rq_bytes(rq) & q->dma_pad_mask)) {
@@ -230,20 +270,13 @@ EXPORT_SYMBOL(blk_rq_map_sg);
int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
struct scatterlist *sglist)
{
- struct bio_vec bvec, bvprv = { NULL };
- struct scatterlist *sg;
- int nsegs, cluster;
- struct bvec_iter iter;
-
- nsegs = 0;
- cluster = blk_queue_cluster(q);
-
- sg = NULL;
- bio_for_each_segment(bvec, bio, iter) {
- __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
- &nsegs, &cluster);
- } /* segments in bio */
+ struct scatterlist *sg = NULL;
+ int nsegs;
+ struct bio *next = bio->bi_next;
+ bio->bi_next = NULL;
+ nsegs = __blk_bios_map_sg(q, bio, sglist, &sg);
+ bio->bi_next = next;
if (sg)
sg_mark_end(sg);
diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c
index 3146befb56aa..136ef8643bba 100644
--- a/block/blk-mq-cpu.c
+++ b/block/blk-mq-cpu.c
@@ -11,7 +11,7 @@
#include "blk-mq.h"
static LIST_HEAD(blk_mq_cpu_notify_list);
-static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
+static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
static int blk_mq_main_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
@@ -19,12 +19,12 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self,
unsigned int cpu = (unsigned long) hcpu;
struct blk_mq_cpu_notifier *notify;
- spin_lock(&blk_mq_cpu_notify_lock);
+ raw_spin_lock(&blk_mq_cpu_notify_lock);
list_for_each_entry(notify, &blk_mq_cpu_notify_list, list)
notify->notify(notify->data, action, cpu);
- spin_unlock(&blk_mq_cpu_notify_lock);
+ raw_spin_unlock(&blk_mq_cpu_notify_lock);
return NOTIFY_OK;
}
@@ -32,16 +32,16 @@ void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
BUG_ON(!notifier->notify);
- spin_lock(&blk_mq_cpu_notify_lock);
+ raw_spin_lock(&blk_mq_cpu_notify_lock);
list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
- spin_unlock(&blk_mq_cpu_notify_lock);
+ raw_spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
{
- spin_lock(&blk_mq_cpu_notify_lock);
+ raw_spin_lock(&blk_mq_cpu_notify_lock);
list_del(&notifier->list);
- spin_unlock(&blk_mq_cpu_notify_lock);
+ raw_spin_unlock(&blk_mq_cpu_notify_lock);
}
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 5d70edc9855f..83ae96c51a27 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -184,7 +184,7 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
{
char *orig_page = page;
- int cpu;
+ unsigned int cpu;
if (!tags)
return 0;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 57039fcd9c93..883f72089015 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -73,8 +73,8 @@ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx,
set_bit(ctx->index_hw, hctx->ctx_map);
}
-static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp,
- bool reserved)
+static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
+ gfp_t gfp, bool reserved)
{
struct request *rq;
unsigned int tag;
@@ -193,12 +193,6 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
ctx->rq_dispatched[rw_is_sync(rw_flags)]++;
}
-static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx,
- gfp_t gfp, bool reserved)
-{
- return blk_mq_alloc_rq(hctx, gfp, reserved);
-}
-
static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
int rw, gfp_t gfp,
bool reserved)
@@ -226,15 +220,14 @@ static struct request *blk_mq_alloc_request_pinned(struct request_queue *q,
return rq;
}
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
- gfp_t gfp, bool reserved)
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp)
{
struct request *rq;
if (blk_mq_queue_enter(q))
return NULL;
- rq = blk_mq_alloc_request_pinned(q, rw, gfp, reserved);
+ rq = blk_mq_alloc_request_pinned(q, rw, gfp, false);
if (rq)
blk_mq_put_ctx(rq->mq_ctx);
return rq;
@@ -258,7 +251,7 @@ EXPORT_SYMBOL(blk_mq_alloc_reserved_request);
/*
* Re-init and set pdu, if we have it
*/
-static void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
+void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
blk_rq_init(hctx->queue, rq);
@@ -290,38 +283,10 @@ void blk_mq_free_request(struct request *rq)
__blk_mq_free_request(hctx, ctx, rq);
}
-static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error)
-{
- if (error)
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
- else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
- error = -EIO;
-
- if (unlikely(rq->cmd_flags & REQ_QUIET))
- set_bit(BIO_QUIET, &bio->bi_flags);
-
- /* don't actually finish bio if it's part of flush sequence */
- if (!(rq->cmd_flags & REQ_FLUSH_SEQ))
- bio_endio(bio, error);
-}
-
-void blk_mq_complete_request(struct request *rq, int error)
+bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes)
{
- struct bio *bio = rq->bio;
- unsigned int bytes = 0;
-
- trace_block_rq_complete(rq->q, rq);
-
- while (bio) {
- struct bio *next = bio->bi_next;
-
- bio->bi_next = NULL;
- bytes += bio->bi_iter.bi_size;
- blk_mq_bio_endio(rq, bio, error);
- bio = next;
- }
-
- blk_account_io_completion(rq, bytes);
+ if (blk_update_request(rq, error, blk_rq_bytes(rq)))
+ return true;
blk_account_io_done(rq);
@@ -329,49 +294,57 @@ void blk_mq_complete_request(struct request *rq, int error)
rq->end_io(rq, error);
else
blk_mq_free_request(rq);
+ return false;
}
+EXPORT_SYMBOL(blk_mq_end_io_partial);
-void __blk_mq_end_io(struct request *rq, int error)
-{
- if (!blk_mark_rq_complete(rq))
- blk_mq_complete_request(rq, error);
-}
-
-static void blk_mq_end_io_remote(void *data)
+static void __blk_mq_complete_request_remote(void *data)
{
struct request *rq = data;
- __blk_mq_end_io(rq, rq->errors);
+ rq->q->softirq_done_fn(rq);
}
-/*
- * End IO on this request on a multiqueue enabled driver. We'll either do
- * it directly inline, or punt to a local IPI handler on the matching
- * remote CPU.
- */
-void blk_mq_end_io(struct request *rq, int error)
+void __blk_mq_complete_request(struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
int cpu;
- if (!ctx->ipi_redirect)
- return __blk_mq_end_io(rq, error);
+ if (!ctx->ipi_redirect) {
+ rq->q->softirq_done_fn(rq);
+ return;
+ }
cpu = get_cpu();
if (cpu != ctx->cpu && cpu_online(ctx->cpu)) {
- rq->errors = error;
- rq->csd.func = blk_mq_end_io_remote;
+ rq->csd.func = __blk_mq_complete_request_remote;
rq->csd.info = rq;
rq->csd.flags = 0;
__smp_call_function_single(ctx->cpu, &rq->csd, 0);
} else {
- __blk_mq_end_io(rq, error);
+ rq->q->softirq_done_fn(rq);
}
put_cpu();
}
-EXPORT_SYMBOL(blk_mq_end_io);
-static void blk_mq_start_request(struct request *rq)
+/**
+ * blk_mq_complete_request - end I/O on a request
+ * @rq: the request being processed
+ *
+ * Description:
+ * Ends all I/O on a request. It does not handle partial completions.
+ * The actual completion happens out-of-order, through a IPI handler.
+ **/
+void blk_mq_complete_request(struct request *rq)
+{
+ if (unlikely(blk_should_fake_timeout(rq->q)))
+ return;
+ if (!blk_mark_rq_complete(rq))
+ __blk_mq_complete_request(rq);
+}
+EXPORT_SYMBOL(blk_mq_complete_request);
+
+static void blk_mq_start_request(struct request *rq, bool last)
{
struct request_queue *q = rq->q;
@@ -384,6 +357,25 @@ static void blk_mq_start_request(struct request *rq)
*/
rq->deadline = jiffies + q->rq_timeout;
set_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+
+ if (q->dma_drain_size && blk_rq_bytes(rq)) {
+ /*
+ * Make sure space for the drain appears. We know we can do
+ * this because max_hw_segments has been adjusted to be one
+ * fewer than the device can handle.
+ */
+ rq->nr_phys_segments++;
+ }
+
+ /*
+ * Flag the last request in the series so that drivers know when IO
+ * should be kicked off, if they don't do it on a per-request basis.
+ *
+ * Note: the flag isn't the only condition drivers should do kick off.
+ * If drive is busy, the last request might not have the bit set.
+ */
+ if (last)
+ rq->cmd_flags |= REQ_END;
}
static void blk_mq_requeue_request(struct request *rq)
@@ -392,6 +384,11 @@ static void blk_mq_requeue_request(struct request *rq)
trace_block_rq_requeue(q, rq);
clear_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+
+ rq->cmd_flags &= ~REQ_END;
+
+ if (q->dma_drain_size && blk_rq_bytes(rq))
+ rq->nr_phys_segments--;
}
struct blk_mq_timeout_data {
@@ -559,19 +556,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
rq = list_first_entry(&rq_list, struct request, queuelist);
list_del_init(&rq->queuelist);
- blk_mq_start_request(rq);
- /*
- * Last request in the series. Flag it as such, this
- * enables drivers to know when IO should be kicked off,
- * if they don't do it on a per-request basis.
- *
- * Note: the flag isn't the only condition drivers
- * should do kick off. If drive is busy, the last
- * request might not have the bit set.
- */
- if (list_empty(&rq_list))
- rq->cmd_flags |= REQ_END;
+ blk_mq_start_request(rq, list_empty(&rq_list));
ret = q->mq_ops->queue_rq(hctx, rq);
switch (ret) {
@@ -589,8 +575,8 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
break;
default:
pr_err("blk-mq: bad return on queue: %d\n", ret);
- rq->errors = -EIO;
case BLK_MQ_RQ_QUEUE_ERROR:
+ rq->errors = -EIO;
blk_mq_end_io(rq, rq->errors);
break;
}
@@ -693,13 +679,16 @@ static void blk_mq_work_fn(struct work_struct *work)
}
static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
- struct request *rq)
+ struct request *rq, bool at_head)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
trace_block_rq_insert(hctx->queue, rq);
- list_add_tail(&rq->queuelist, &ctx->rq_list);
+ if (at_head)
+ list_add(&rq->queuelist, &ctx->rq_list);
+ else
+ list_add_tail(&rq->queuelist, &ctx->rq_list);
blk_mq_hctx_mark_pending(hctx, ctx);
/*
@@ -708,61 +697,28 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx,
blk_mq_add_timer(rq);
}
-void blk_mq_insert_request(struct request_queue *q, struct request *rq,
- bool run_queue)
+void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue,
+ bool async)
{
+ struct request_queue *q = rq->q;
struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx, *current_ctx;
+ struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx;
+
+ current_ctx = blk_mq_get_ctx(q);
+ if (!cpu_online(ctx->cpu))
+ rq->mq_ctx = ctx = current_ctx;
- ctx = rq->mq_ctx;
hctx = q->mq_ops->map_queue(q, ctx->cpu);
- if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) {
+ if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) &&
+ !(rq->cmd_flags & (REQ_FLUSH_SEQ))) {
blk_insert_flush(rq);
} else {
- current_ctx = blk_mq_get_ctx(q);
-
- if (!cpu_online(ctx->cpu)) {
- ctx = current_ctx;
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
- rq->mq_ctx = ctx;
- }
spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, at_head);
spin_unlock(&ctx->lock);
-
- blk_mq_put_ctx(current_ctx);
}
- if (run_queue)
- __blk_mq_run_hw_queue(hctx);
-}
-EXPORT_SYMBOL(blk_mq_insert_request);
-
-/*
- * This is a special version of blk_mq_insert_request to bypass FLUSH request
- * check. Should only be used internally.
- */
-void blk_mq_run_request(struct request *rq, bool run_queue, bool async)
-{
- struct request_queue *q = rq->q;
- struct blk_mq_hw_ctx *hctx;
- struct blk_mq_ctx *ctx, *current_ctx;
-
- current_ctx = blk_mq_get_ctx(q);
-
- ctx = rq->mq_ctx;
- if (!cpu_online(ctx->cpu)) {
- ctx = current_ctx;
- rq->mq_ctx = ctx;
- }
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
-
- /* ctx->cpu might be offline */
- spin_lock(&ctx->lock);
- __blk_mq_insert_request(hctx, rq);
- spin_unlock(&ctx->lock);
-
blk_mq_put_ctx(current_ctx);
if (run_queue)
@@ -798,7 +754,7 @@ static void blk_mq_insert_requests(struct request_queue *q,
rq = list_first_entry(list, struct request, queuelist);
list_del_init(&rq->queuelist);
rq->mq_ctx = ctx;
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, false);
}
spin_unlock(&ctx->lock);
@@ -888,6 +844,11 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
blk_queue_bounce(q, &bio);
+ if (bio_integrity_enabled(bio) && bio_integrity_prep(bio)) {
+ bio_endio(bio, -EIO);
+ return;
+ }
+
if (use_plug && blk_attempt_plug_merge(q, bio, &request_count))
return;
@@ -899,6 +860,8 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
ctx = blk_mq_get_ctx(q);
hctx = q->mq_ops->map_queue(q, ctx->cpu);
+ if (is_sync)
+ rw |= REQ_SYNC;
trace_block_getrq(q, bio, rw);
rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false);
if (likely(rq))
@@ -950,7 +913,7 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio)
__blk_mq_free_request(hctx, ctx, rq);
else {
blk_mq_bio_to_request(rq, bio);
- __blk_mq_insert_request(hctx, rq);
+ __blk_mq_insert_request(hctx, rq, false);
}
spin_unlock(&ctx->lock);
@@ -1309,15 +1272,6 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
reg->queue_depth = BLK_MQ_MAX_DEPTH;
}
- /*
- * Set aside a tag for flush requests. It will only be used while
- * another flush request is in progress but outside the driver.
- *
- * TODO: only allocate if flushes are supported
- */
- reg->queue_depth++;
- reg->reserved_tags++;
-
if (reg->queue_depth < (reg->reserved_tags + BLK_MQ_TAG_MIN))
return ERR_PTR(-EINVAL);
@@ -1360,17 +1314,27 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
q->mq_ops = reg->ops;
q->queue_flags |= QUEUE_FLAG_MQ_DEFAULT;
+ q->sg_reserved_size = INT_MAX;
+
blk_queue_make_request(q, blk_mq_make_request);
blk_queue_rq_timed_out(q, reg->ops->timeout);
if (reg->timeout)
blk_queue_rq_timeout(q, reg->timeout);
+ if (reg->ops->complete)
+ blk_queue_softirq_done(q, reg->ops->complete);
+
blk_mq_init_flush(q);
blk_mq_init_cpu_queues(q, reg->nr_hw_queues);
- if (blk_mq_init_hw_queues(q, reg, driver_data))
+ q->flush_rq = kzalloc(round_up(sizeof(struct request) + reg->cmd_size,
+ cache_line_size()), GFP_KERNEL);
+ if (!q->flush_rq)
goto err_hw;
+ if (blk_mq_init_hw_queues(q, reg, driver_data))
+ goto err_flush_rq;
+
blk_mq_map_swqueue(q);
mutex_lock(&all_q_mutex);
@@ -1378,6 +1342,9 @@ struct request_queue *blk_mq_init_queue(struct blk_mq_reg *reg,
mutex_unlock(&all_q_mutex);
return q;
+
+err_flush_rq:
+ kfree(q->flush_rq);
err_hw:
kfree(q->mq_map);
err_map:
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 5c3917984b00..72beba1f9d55 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -22,13 +22,12 @@ struct blk_mq_ctx {
struct kobject kobj;
};
-void __blk_mq_end_io(struct request *rq, int error);
-void blk_mq_complete_request(struct request *rq, int error);
-void blk_mq_run_request(struct request *rq, bool run_queue, bool async);
+void __blk_mq_complete_request(struct request *rq);
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_init_flush(struct request_queue *q);
void blk_mq_drain_queue(struct request_queue *q);
void blk_mq_free_queue(struct request_queue *q);
+void blk_mq_rq_init(struct blk_mq_hw_ctx *hctx, struct request *rq);
/*
* CPU hotplug helpers
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 8095c4a21fc0..7500f876dae4 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -549,6 +549,8 @@ static void blk_release_queue(struct kobject *kobj)
if (q->mq_ops)
blk_mq_free_queue(q);
+ kfree(q->flush_rq);
+
blk_trace_shutdown(q);
bdi_destroy(&q->backing_dev_info);
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index bba81c9348e1..d96f7061c6fd 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -91,7 +91,7 @@ static void blk_rq_timed_out(struct request *req)
case BLK_EH_HANDLED:
/* Can we use req->errors here? */
if (q->mq_ops)
- blk_mq_complete_request(req, req->errors);
+ __blk_mq_complete_request(req);
else
__blk_complete_request(req);
break;
diff --git a/block/blk.h b/block/blk.h
index c90e1d8f7a2b..d23b415b8a28 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -113,7 +113,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
q->flush_queue_delayed = 1;
return NULL;
}
- if (unlikely(blk_queue_dying(q)) ||
+ if (unlikely(blk_queue_bypass(q)) ||
!q->elevator->type->ops.elevator_dispatch_fn(q, 0))
return NULL;
}
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index e7515aa43d6b..6f190bc2b8b7 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -243,6 +243,8 @@ static int acpi_ac_resume(struct device *dev)
kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE);
return 0;
}
+#else
+#define acpi_ac_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_ac_pm_ops, NULL, acpi_ac_resume);
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 470e7542bf31..797a6938d051 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -549,7 +549,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
{
unsigned long x;
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
- if (sscanf(buf, "%ld\n", &x) == 1)
+ if (sscanf(buf, "%lu\n", &x) == 1)
battery->alarm = x/1000;
if (acpi_battery_present(battery))
acpi_battery_set_alarm(battery);
@@ -841,6 +841,8 @@ static int acpi_battery_resume(struct device *dev)
acpi_battery_update(battery);
return 0;
}
+#else
+#define acpi_battery_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_battery_pm, NULL, acpi_battery_resume);
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index 10e4964d051a..afec4526c48a 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -260,14 +260,6 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
},
{
.callback = dmi_disable_osi_win8,
- .ident = "Dell Inspiron 15R SE",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 7520"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
.ident = "ThinkPad Edge E530",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
@@ -322,56 +314,6 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_VERSION, "2349D15"),
},
},
- {
- .callback = dmi_disable_osi_win8,
- .ident = "HP ProBook 2013 models",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "),
- DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "HP EliteBook 2013 models",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
- DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "HP ZBook 14",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "HP ZBook 15",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "HP ZBook 17",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"),
- },
- },
- {
- .callback = dmi_disable_osi_win8,
- .ident = "HP EliteBook 8780w",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"),
- },
- },
/*
* BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/button.c b/drivers/acpi/button.c
index 11c11f6b8fa1..714e957a871a 100644
--- a/drivers/acpi/button.c
+++ b/drivers/acpi/button.c
@@ -80,6 +80,8 @@ static void acpi_button_notify(struct acpi_device *device, u32 event);
#ifdef CONFIG_PM_SLEEP
static int acpi_button_resume(struct device *dev);
+#else
+#define acpi_button_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_button_pm, NULL, acpi_button_resume);
diff --git a/drivers/acpi/container.c b/drivers/acpi/container.c
index 0b6ae6eb5c4a..368f9ddb8480 100644
--- a/drivers/acpi/container.c
+++ b/drivers/acpi/container.c
@@ -79,9 +79,10 @@ static int container_device_attach(struct acpi_device *adev,
ACPI_COMPANION_SET(dev, adev);
dev->release = acpi_container_release;
ret = device_register(dev);
- if (ret)
+ if (ret) {
+ put_device(dev);
return ret;
-
+ }
adev->driver_data = dev;
return 1;
}
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index c431c88faaff..5bfd769fc91f 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -609,7 +609,7 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
static void dock_notify(struct dock_station *ds, u32 event)
{
acpi_handle handle = ds->handle;
- struct acpi_device *ad;
+ struct acpi_device *adev = NULL;
int surprise_removal = 0;
/*
@@ -632,7 +632,8 @@ static void dock_notify(struct dock_station *ds, u32 event)
switch (event) {
case ACPI_NOTIFY_BUS_CHECK:
case ACPI_NOTIFY_DEVICE_CHECK:
- if (!dock_in_progress(ds) && acpi_bus_get_device(handle, &ad)) {
+ acpi_bus_get_device(handle, &adev);
+ if (!dock_in_progress(ds) && !acpi_device_enumerated(adev)) {
begin_dock(ds);
dock(ds);
if (!dock_present(ds)) {
@@ -712,13 +713,11 @@ static acpi_status __init find_dock_devices(acpi_handle handle, u32 lvl,
static ssize_t show_docked(struct device *dev,
struct device_attribute *attr, char *buf)
{
- struct acpi_device *tmp;
-
struct dock_station *dock_station = dev->platform_data;
+ struct acpi_device *adev = NULL;
- if (!acpi_bus_get_device(dock_station->handle, &tmp))
- return snprintf(buf, PAGE_SIZE, "1\n");
- return snprintf(buf, PAGE_SIZE, "0\n");
+ acpi_bus_get_device(dock_station->handle, &adev);
+ return snprintf(buf, PAGE_SIZE, "%u\n", acpi_device_enumerated(adev));
}
static DEVICE_ATTR(docked, S_IRUGO, show_docked, NULL);
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 959d41acc108..d7d32c28829b 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -67,6 +67,8 @@ enum ec_command {
#define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */
#define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */
#define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */
+#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query
+ * when trying to clear the EC */
enum {
EC_FLAGS_QUERY_PENDING, /* Query is pending */
@@ -116,6 +118,7 @@ EXPORT_SYMBOL(first_ec);
static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
+static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
/* --------------------------------------------------------------------------
Transaction Management
@@ -440,6 +443,29 @@ acpi_handle ec_get_handle(void)
EXPORT_SYMBOL(ec_get_handle);
+static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data);
+
+/*
+ * Clears stale _Q events that might have accumulated in the EC.
+ * Run with locked ec mutex.
+ */
+static void acpi_ec_clear(struct acpi_ec *ec)
+{
+ int i, status;
+ u8 value = 0;
+
+ for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) {
+ status = acpi_ec_query_unlocked(ec, &value);
+ if (status || !value)
+ break;
+ }
+
+ if (unlikely(i == ACPI_EC_CLEAR_MAX))
+ pr_warn("Warning: Maximum of %d stale EC events cleared\n", i);
+ else
+ pr_info("%d stale EC events cleared\n", i);
+}
+
void acpi_ec_block_transactions(void)
{
struct acpi_ec *ec = first_ec;
@@ -463,6 +489,10 @@ void acpi_ec_unblock_transactions(void)
mutex_lock(&ec->mutex);
/* Allow transactions to be carried out again */
clear_bit(EC_FLAGS_BLOCKED, &ec->flags);
+
+ if (EC_FLAGS_CLEAR_ON_RESUME)
+ acpi_ec_clear(ec);
+
mutex_unlock(&ec->mutex);
}
@@ -821,6 +851,13 @@ static int acpi_ec_add(struct acpi_device *device)
/* EC is fully operational, allow queries */
clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
+
+ /* Clear stale _Q events if hardware might require that */
+ if (EC_FLAGS_CLEAR_ON_RESUME) {
+ mutex_lock(&ec->mutex);
+ acpi_ec_clear(ec);
+ mutex_unlock(&ec->mutex);
+ }
return ret;
}
@@ -922,6 +959,30 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
return 0;
}
+/*
+ * On some hardware it is necessary to clear events accumulated by the EC during
+ * sleep. These ECs stop reporting GPEs until they are manually polled, if too
+ * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=44161
+ *
+ * Ideally, the EC should also be instructed NOT to accumulate events during
+ * sleep (which Windows seems to do somehow), but the interface to control this
+ * behaviour is not known at this time.
+ *
+ * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx,
+ * however it is very likely that other Samsung models are affected.
+ *
+ * On systems which don't accumulate _Q events during sleep, this extra check
+ * should be harmless.
+ */
+static int ec_clear_on_resume(const struct dmi_system_id *id)
+{
+ pr_debug("Detected system needing EC poll on resume.\n");
+ EC_FLAGS_CLEAR_ON_RESUME = 1;
+ return 0;
+}
+
static struct dmi_system_id ec_dmi_table[] __initdata = {
{
ec_skip_dsdt_scan, "Compal JFL92", {
@@ -965,6 +1026,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
ec_validate_ecdt, "ASUS hardware", {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL},
+ {
+ ec_clear_on_resume, "Samsung hardware", {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
{},
};
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 1fb62900f32a..09e423f3d8ad 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -55,6 +55,9 @@ MODULE_DEVICE_TABLE(acpi, fan_device_ids);
#ifdef CONFIG_PM_SLEEP
static int acpi_fan_suspend(struct device *dev);
static int acpi_fan_resume(struct device *dev);
+#else
+#define acpi_fan_suspend NULL
+#define acpi_fan_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_fan_pm, acpi_fan_suspend, acpi_fan_resume);
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 52d45ea2bc4f..361b40c10c3f 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -430,6 +430,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev)
pin_name(pin));
}
+ kfree(entry);
return 0;
}
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index 50fe34ffe932..75c28eae8860 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -60,7 +60,7 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset)
seq_printf(seq, "%c%-8s %s:%s\n",
dev->wakeup.flags.run_wake ? '*' : ' ',
(device_may_wakeup(&dev->dev) ||
- (ldev && device_may_wakeup(ldev))) ?
+ device_may_wakeup(ldev)) ?
"enabled" : "disabled",
ldev->bus ? ldev->bus->name :
"no-bus", dev_name(ldev));
diff --git a/drivers/acpi/processor_throttling.c b/drivers/acpi/processor_throttling.c
index 28baa05b8018..84243c32e29c 100644
--- a/drivers/acpi/processor_throttling.c
+++ b/drivers/acpi/processor_throttling.c
@@ -56,6 +56,12 @@ struct throttling_tstate {
int target_state; /* target T-state */
};
+struct acpi_processor_throttling_arg {
+ struct acpi_processor *pr;
+ int target_state;
+ bool force;
+};
+
#define THROTTLING_PRECHANGE (1)
#define THROTTLING_POSTCHANGE (2)
@@ -1060,16 +1066,24 @@ static int acpi_processor_set_throttling_ptc(struct acpi_processor *pr,
return 0;
}
+static long acpi_processor_throttling_fn(void *data)
+{
+ struct acpi_processor_throttling_arg *arg = data;
+ struct acpi_processor *pr = arg->pr;
+
+ return pr->throttling.acpi_processor_set_throttling(pr,
+ arg->target_state, arg->force);
+}
+
int acpi_processor_set_throttling(struct acpi_processor *pr,
int state, bool force)
{
- cpumask_var_t saved_mask;
int ret = 0;
unsigned int i;
struct acpi_processor *match_pr;
struct acpi_processor_throttling *p_throttling;
+ struct acpi_processor_throttling_arg arg;
struct throttling_tstate t_state;
- cpumask_var_t online_throttling_cpus;
if (!pr)
return -EINVAL;
@@ -1080,14 +1094,6 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
if ((state < 0) || (state > (pr->throttling.state_count - 1)))
return -EINVAL;
- if (!alloc_cpumask_var(&saved_mask, GFP_KERNEL))
- return -ENOMEM;
-
- if (!alloc_cpumask_var(&online_throttling_cpus, GFP_KERNEL)) {
- free_cpumask_var(saved_mask);
- return -ENOMEM;
- }
-
if (cpu_is_offline(pr->id)) {
/*
* the cpu pointed by pr->id is offline. Unnecessary to change
@@ -1096,17 +1102,15 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
return -ENODEV;
}
- cpumask_copy(saved_mask, &current->cpus_allowed);
t_state.target_state = state;
p_throttling = &(pr->throttling);
- cpumask_and(online_throttling_cpus, cpu_online_mask,
- p_throttling->shared_cpu_map);
+
/*
* The throttling notifier will be called for every
* affected cpu in order to get one proper T-state.
* The notifier event is THROTTLING_PRECHANGE.
*/
- for_each_cpu(i, online_throttling_cpus) {
+ for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_PRECHANGE,
&t_state);
@@ -1118,21 +1122,18 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
* it can be called only for the cpu pointed by pr.
*/
if (p_throttling->shared_type == DOMAIN_COORD_TYPE_SW_ANY) {
- /* FIXME: use work_on_cpu() */
- if (set_cpus_allowed_ptr(current, cpumask_of(pr->id))) {
- /* Can't migrate to the pr->id CPU. Exit */
- ret = -ENODEV;
- goto exit;
- }
- ret = p_throttling->acpi_processor_set_throttling(pr,
- t_state.target_state, force);
+ arg.pr = pr;
+ arg.target_state = state;
+ arg.force = force;
+ ret = work_on_cpu(pr->id, acpi_processor_throttling_fn, &arg);
} else {
/*
* When the T-state coordination is SW_ALL or HW_ALL,
* it is necessary to set T-state for every affected
* cpus.
*/
- for_each_cpu(i, online_throttling_cpus) {
+ for_each_cpu_and(i, cpu_online_mask,
+ p_throttling->shared_cpu_map) {
match_pr = per_cpu(processors, i);
/*
* If the pointer is invalid, we will report the
@@ -1153,13 +1154,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
"on CPU %d\n", i));
continue;
}
- t_state.cpu = i;
- /* FIXME: use work_on_cpu() */
- if (set_cpus_allowed_ptr(current, cpumask_of(i)))
- continue;
- ret = match_pr->throttling.
- acpi_processor_set_throttling(
- match_pr, t_state.target_state, force);
+
+ arg.pr = match_pr;
+ arg.target_state = state;
+ arg.force = force;
+ ret = work_on_cpu(pr->id, acpi_processor_throttling_fn,
+ &arg);
}
}
/*
@@ -1168,17 +1168,12 @@ int acpi_processor_set_throttling(struct acpi_processor *pr,
* affected cpu to update the T-states.
* The notifier event is THROTTLING_POSTCHANGE
*/
- for_each_cpu(i, online_throttling_cpus) {
+ for_each_cpu_and(i, cpu_online_mask, p_throttling->shared_cpu_map) {
t_state.cpu = i;
acpi_processor_throttling_notifier(THROTTLING_POSTCHANGE,
&t_state);
}
- /* restore the previous state */
- /* FIXME: use work_on_cpu() */
- set_cpus_allowed_ptr(current, saved_mask);
-exit:
- free_cpumask_var(online_throttling_cpus);
- free_cpumask_var(saved_mask);
+
return ret;
}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index b7201fc6f1e1..0bdacc5e26a3 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -77,18 +77,24 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
switch (ares->type) {
case ACPI_RESOURCE_TYPE_MEMORY24:
memory24 = &ares->data.memory24;
+ if (!memory24->address_length)
+ return false;
acpi_dev_get_memresource(res, memory24->minimum,
memory24->address_length,
memory24->write_protect);
break;
case ACPI_RESOURCE_TYPE_MEMORY32:
memory32 = &ares->data.memory32;
+ if (!memory32->address_length)
+ return false;
acpi_dev_get_memresource(res, memory32->minimum,
memory32->address_length,
memory32->write_protect);
break;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
fixed_memory32 = &ares->data.fixed_memory32;
+ if (!fixed_memory32->address_length)
+ return false;
acpi_dev_get_memresource(res, fixed_memory32->address,
fixed_memory32->address_length,
fixed_memory32->write_protect);
@@ -144,12 +150,16 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
switch (ares->type) {
case ACPI_RESOURCE_TYPE_IO:
io = &ares->data.io;
+ if (!io->address_length)
+ return false;
acpi_dev_get_ioresource(res, io->minimum,
io->address_length,
io->io_decode);
break;
case ACPI_RESOURCE_TYPE_FIXED_IO:
fixed_io = &ares->data.fixed_io;
+ if (!fixed_io->address_length)
+ return false;
acpi_dev_get_ioresource(res, fixed_io->address,
fixed_io->address_length,
ACPI_DECODE_10);
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index d465ae6cdd00..dbd48498b938 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -450,7 +450,7 @@ static ssize_t acpi_battery_alarm_store(struct device *dev,
{
unsigned long x;
struct acpi_battery *battery = to_acpi_battery(dev_get_drvdata(dev));
- if (sscanf(buf, "%ld\n", &x) == 1)
+ if (sscanf(buf, "%lu\n", &x) == 1)
battery->alarm_capacity = x /
(1000 * acpi_battery_scale(battery));
if (battery->present)
@@ -668,6 +668,8 @@ static int acpi_sbs_resume(struct device *dev)
acpi_sbs_callback(sbs);
return 0;
}
+#else
+#define acpi_sbs_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_sbs_pm, NULL, acpi_sbs_resume);
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 7384158c7f87..57b053f424d1 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -484,7 +484,6 @@ static void acpi_device_hotplug(void *data, u32 src)
static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
{
u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
- struct acpi_scan_handler *handler = data;
struct acpi_device *adev;
acpi_status status;
@@ -500,7 +499,10 @@ static void acpi_hotplug_notify_cb(acpi_handle handle, u32 type, void *data)
break;
case ACPI_NOTIFY_EJECT_REQUEST:
acpi_handle_debug(handle, "ACPI_NOTIFY_EJECT_REQUEST event\n");
- if (!handler->hotplug.enabled) {
+ if (!adev->handler)
+ goto err_out;
+
+ if (!adev->handler->hotplug.enabled) {
acpi_handle_err(handle, "Eject disabled\n");
ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
goto err_out;
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index b718806657cd..c40fb2e81bbc 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -71,6 +71,17 @@ static int acpi_sleep_prepare(u32 acpi_state)
return 0;
}
+static bool acpi_sleep_state_supported(u8 sleep_state)
+{
+ acpi_status status;
+ u8 type_a, type_b;
+
+ status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b);
+ return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware
+ || (acpi_gbl_FADT.sleep_control.address
+ && acpi_gbl_FADT.sleep_status.address));
+}
+
#ifdef CONFIG_ACPI_SLEEP
static u32 acpi_target_sleep_state = ACPI_STATE_S0;
@@ -604,15 +615,9 @@ static void acpi_sleep_suspend_setup(void)
{
int i;
- for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) {
- acpi_status status;
- u8 type_a, type_b;
-
- status = acpi_get_sleep_type_data(i, &type_a, &type_b);
- if (ACPI_SUCCESS(status)) {
+ for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++)
+ if (acpi_sleep_state_supported(i))
sleep_states[i] = 1;
- }
- }
suspend_set_ops(old_suspend_ordering ?
&acpi_suspend_ops_old : &acpi_suspend_ops);
@@ -740,11 +745,7 @@ static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
static void acpi_sleep_hibernate_setup(void)
{
- acpi_status status;
- u8 type_a, type_b;
-
- status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b);
- if (ACPI_FAILURE(status))
+ if (!acpi_sleep_state_supported(ACPI_STATE_S4))
return;
hibernation_set_ops(old_suspend_ordering ?
@@ -793,8 +794,6 @@ static void acpi_power_off(void)
int __init acpi_sleep_init(void)
{
- acpi_status status;
- u8 type_a, type_b;
char supported[ACPI_S_STATE_COUNT * 3 + 1];
char *pos = supported;
int i;
@@ -806,8 +805,7 @@ int __init acpi_sleep_init(void)
acpi_sleep_suspend_setup();
acpi_sleep_hibernate_setup();
- status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b);
- if (ACPI_SUCCESS(status)) {
+ if (acpi_sleep_state_supported(ACPI_STATE_S5)) {
sleep_states[ACPI_STATE_S5] = 1;
pm_power_off_prepare = acpi_power_off_prepare;
pm_power_off = acpi_power_off;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 8349a555b92b..08626c851be7 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -102,6 +102,8 @@ MODULE_DEVICE_TABLE(acpi, thermal_device_ids);
#ifdef CONFIG_PM_SLEEP
static int acpi_thermal_resume(struct device *dev);
+#else
+#define acpi_thermal_resume NULL
#endif
static SIMPLE_DEV_PM_OPS(acpi_thermal_pm, NULL, acpi_thermal_resume);
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 0347a37eb438..85e3b612bdc0 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -99,10 +99,6 @@ acpi_extract_package(union acpi_object *package,
union acpi_object *element = &(package->package.elements[i]);
- if (!element) {
- return AE_BAD_DATA;
- }
-
switch (element->type) {
case ACPI_TYPE_INTEGER:
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index b727d105046d..b6ba88ed31ae 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -81,11 +81,12 @@ static bool allow_duplicates;
module_param(allow_duplicates, bool, 0644);
/*
- * For Windows 8 systems: if set ture and the GPU driver has
- * registered a backlight interface, skip registering ACPI video's.
+ * For Windows 8 systems: used to decide if video module
+ * should skip registering backlight interface of its own.
*/
-static bool use_native_backlight = false;
-module_param(use_native_backlight, bool, 0644);
+static int use_native_backlight_param = -1;
+module_param_named(use_native_backlight, use_native_backlight_param, int, 0444);
+static bool use_native_backlight_dmi = false;
static int register_count;
static struct mutex video_list_lock;
@@ -231,9 +232,17 @@ static int acpi_video_get_next_level(struct acpi_video_device *device,
static int acpi_video_switch_brightness(struct acpi_video_device *device,
int event);
+static bool acpi_video_use_native_backlight(void)
+{
+ if (use_native_backlight_param != -1)
+ return use_native_backlight_param;
+ else
+ return use_native_backlight_dmi;
+}
+
static bool acpi_video_verify_backlight_support(void)
{
- if (acpi_osi_is_win8() && use_native_backlight &&
+ if (acpi_osi_is_win8() && acpi_video_use_native_backlight() &&
backlight_device_registered(BACKLIGHT_RAW))
return false;
return acpi_video_backlight_support();
@@ -398,6 +407,12 @@ static int __init video_set_bqc_offset(const struct dmi_system_id *d)
return 0;
}
+static int __init video_set_use_native_backlight(const struct dmi_system_id *d)
+{
+ use_native_backlight_dmi = true;
+ return 0;
+}
+
static struct dmi_system_id video_dmi_table[] __initdata = {
/*
* Broken _BQC workaround http://bugzilla.kernel.org/show_bug.cgi?id=13121
@@ -442,6 +457,120 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7720"),
},
},
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad T430s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T430s"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad X230",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X230"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "ThinkPad X1 Carbon",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X1 Carbon"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "Lenovo Yoga 13",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "Dell Inspiron 7520",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Inspiron 7520"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "Acer Aspire 5733Z",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5733Z"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "Acer Aspire V5-431",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-431"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ProBook 4340s",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4340s"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ProBook 2013 models",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ProBook "),
+ DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP EliteBook 2013 models",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook "),
+ DMI_MATCH(DMI_PRODUCT_NAME, " G1"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ZBook 14",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 14"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ZBook 15",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 15"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP ZBook 17",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP ZBook 17"),
+ },
+ },
+ {
+ .callback = video_set_use_native_backlight,
+ .ident = "HP EliteBook 8780w",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "HP EliteBook 8780w"),
+ },
+ },
{}
};
@@ -685,6 +814,7 @@ acpi_video_init_brightness(struct acpi_video_device *device)
union acpi_object *o;
struct acpi_video_device_brightness *br = NULL;
int result = -EINVAL;
+ u32 value;
if (!ACPI_SUCCESS(acpi_video_device_lcd_query_levels(device, &obj))) {
ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Could not query available "
@@ -715,7 +845,12 @@ acpi_video_init_brightness(struct acpi_video_device *device)
printk(KERN_ERR PREFIX "Invalid data\n");
continue;
}
- br->levels[count] = (u32) o->integer.value;
+ value = (u32) o->integer.value;
+ /* Skip duplicate entries */
+ if (count > 2 && br->levels[count - 1] == value)
+ continue;
+
+ br->levels[count] = value;
if (br->levels[count] > max_level)
max_level = br->levels[count];
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index f0447d3daf2c..19080c8e2f2a 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -168,14 +168,6 @@ static struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
},
},
- {
- .callback = video_detect_force_vendor,
- .ident = "Lenovo Yoga 13",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
- DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo IdeaPad Yoga 13"),
- },
- },
{ },
};
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index 4e737728aee2..868429a47be4 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -247,6 +247,7 @@ config SATA_HIGHBANK
config SATA_MV
tristate "Marvell SATA support"
+ select GENERIC_PHY
help
This option enables support for the Marvell Serial ATA family.
Currently supports 88SX[56]0[48][01] PCI(-X) chips,
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index dc2756fb6f33..c81d809c111b 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -61,6 +61,7 @@ enum board_ids {
/* board IDs by feature in alphabetical order */
board_ahci,
board_ahci_ign_iferr,
+ board_ahci_noncq,
board_ahci_nosntf,
board_ahci_yes_fbs,
@@ -121,6 +122,13 @@ static const struct ata_port_info ahci_port_info[] = {
.udma_mask = ATA_UDMA6,
.port_ops = &ahci_ops,
},
+ [board_ahci_noncq] = {
+ AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
+ .flags = AHCI_FLAG_COMMON,
+ .pio_mask = ATA_PIO4,
+ .udma_mask = ATA_UDMA6,
+ .port_ops = &ahci_ops,
+ },
[board_ahci_nosntf] = {
AHCI_HFLAGS (AHCI_HFLAG_NO_SNTF),
.flags = AHCI_FLAG_COMMON,
@@ -452,6 +460,12 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(ASMEDIA, 0x0611), board_ahci }, /* ASM1061 */
{ PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
+ /*
+ * Samsung SSDs found on some macbooks. NCQ times out.
+ * https://bugzilla.kernel.org/show_bug.cgi?id=60731
+ */
+ { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq },
+
/* Enmotus */
{ PCI_DEVICE(0x1c44, 0x8000), board_ahci },
@@ -1170,8 +1184,10 @@ static int ahci_init_interrupts(struct pci_dev *pdev, unsigned int n_ports,
nvec = rc;
rc = pci_enable_msi_block(pdev, nvec);
- if (rc)
+ if (rc < 0)
goto intx;
+ else if (rc > 0)
+ goto single_msi;
return nvec;
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 1a3dbd1b196e..8cb2522d592a 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4175,6 +4175,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
{ "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+ { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
@@ -4224,7 +4225,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
/* devices that don't properly handle queued TRIM commands */
{ "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
/*
* Some WD SATA-I drives spin up and down erratically when the link
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 20fd337a5731..7ccc084bf1df 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -447,8 +447,11 @@ static void sata_pmp_quirks(struct ata_port *ap)
* otherwise. Don't try hard to recover it.
*/
ap->pmp_link[ap->nr_pmp_links - 1].flags |= ATA_LFLAG_NO_RETRY;
- } else if (vendor == 0x197b && devid == 0x2352) {
- /* chip found in Thermaltake BlackX Duet, jmicron JMB350? */
+ } else if (vendor == 0x197b && (devid == 0x2352 || devid == 0x0325)) {
+ /*
+ * 0x2352: found in Thermaltake BlackX Duet, jmicron JMB350?
+ * 0x0325: jmicron JMB394.
+ */
ata_for_each_link(link, ap, EDGE) {
/* SRST breaks detection and disks get misclassified
* LPM disabled to avoid potential problems
diff --git a/drivers/ata/pata_imx.c b/drivers/ata/pata_imx.c
index 26386f0b89a8..b0b18ec5465f 100644
--- a/drivers/ata/pata_imx.c
+++ b/drivers/ata/pata_imx.c
@@ -119,7 +119,9 @@ static int pata_imx_probe(struct platform_device *pdev)
return PTR_ERR(priv->clk);
}
- clk_prepare_enable(priv->clk);
+ ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
host = ata_host_alloc(&pdev->dev, 1);
if (!host) {
@@ -212,7 +214,9 @@ static int pata_imx_resume(struct device *dev)
struct ata_host *host = dev_get_drvdata(dev);
struct pata_imx_priv *priv = host->private_data;
- clk_prepare_enable(priv->clk);
+ int ret = clk_prepare_enable(priv->clk);
+ if (ret)
+ return ret;
__raw_writel(priv->ata_ctl, priv->host_regs + PATA_IMX_ATA_CONTROL);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 20a7517bd339..05c8a44adf8e 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -4104,7 +4104,6 @@ static int mv_platform_probe(struct platform_device *pdev)
if (!hpriv->port_phys)
return -ENOMEM;
host->private_data = hpriv;
- hpriv->n_ports = n_ports;
hpriv->board_idx = chip_soc;
host->iomap = NULL;
@@ -4126,17 +4125,24 @@ static int mv_platform_probe(struct platform_device *pdev)
clk_prepare_enable(hpriv->port_clks[port]);
sprintf(port_number, "port%d", port);
- hpriv->port_phys[port] = devm_phy_get(&pdev->dev, port_number);
+ hpriv->port_phys[port] = devm_phy_optional_get(&pdev->dev,
+ port_number);
if (IS_ERR(hpriv->port_phys[port])) {
rc = PTR_ERR(hpriv->port_phys[port]);
hpriv->port_phys[port] = NULL;
- if ((rc != -EPROBE_DEFER) && (rc != -ENODEV))
- dev_warn(&pdev->dev, "error getting phy");
+ if (rc != -EPROBE_DEFER)
+ dev_warn(&pdev->dev, "error getting phy %d", rc);
+
+ /* Cleanup only the initialized ports */
+ hpriv->n_ports = port;
goto err;
} else
phy_power_on(hpriv->port_phys[port]);
}
+ /* All the ports have been initialized */
+ hpriv->n_ports = n_ports;
+
/*
* (Re-)program MBUS remapping windows if we are asked to.
*/
@@ -4174,7 +4180,7 @@ err:
clk_disable_unprepare(hpriv->clk);
clk_put(hpriv->clk);
}
- for (port = 0; port < n_ports; port++) {
+ for (port = 0; port < hpriv->n_ports; port++) {
if (!IS_ERR(hpriv->port_clks[port])) {
clk_disable_unprepare(hpriv->port_clks[port]);
clk_put(hpriv->port_clks[port]);
diff --git a/drivers/ata/sata_sil.c b/drivers/ata/sata_sil.c
index d67fc351343c..b7695e804635 100644
--- a/drivers/ata/sata_sil.c
+++ b/drivers/ata/sata_sil.c
@@ -157,6 +157,7 @@ static const struct sil_drivelist {
{ "ST380011ASL", SIL_QUIRK_MOD15WRITE },
{ "ST3120022ASL", SIL_QUIRK_MOD15WRITE },
{ "ST3160021ASL", SIL_QUIRK_MOD15WRITE },
+ { "TOSHIBA MK2561GSYN", SIL_QUIRK_MOD15WRITE },
{ "Maxtor 4D060H3", SIL_QUIRK_UDMA5MAX },
{ }
};
diff --git a/drivers/base/component.c b/drivers/base/component.c
index c53efe6c6d8e..c4778995cd72 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -133,9 +133,16 @@ static int try_to_bring_up_master(struct master *master,
goto out;
}
+ if (!devres_open_group(master->dev, NULL, GFP_KERNEL)) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
/* Found all components */
ret = master->ops->bind(master->dev);
if (ret < 0) {
+ devres_release_group(master->dev, NULL);
+ dev_info(master->dev, "master bind failed: %d\n", ret);
master_remove_components(master);
goto out;
}
@@ -166,6 +173,7 @@ static void take_down_master(struct master *master)
{
if (master->bound) {
master->ops->unbind(master->dev);
+ devres_release_group(master->dev, NULL);
master->bound = false;
}
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 1e16cbd61da2..61d6d62cc0d3 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -616,36 +616,35 @@ static int dma_buf_describe(struct seq_file *s)
if (ret)
return ret;
- seq_printf(s, "\nDma-buf Objects:\n");
- seq_printf(s, "\texp_name\tsize\tflags\tmode\tcount\n");
+ seq_puts(s, "\nDma-buf Objects:\n");
+ seq_puts(s, "size\tflags\tmode\tcount\texp_name\n");
list_for_each_entry(buf_obj, &db_list.head, list_node) {
ret = mutex_lock_interruptible(&buf_obj->lock);
if (ret) {
- seq_printf(s,
- "\tERROR locking buffer object: skipping\n");
+ seq_puts(s,
+ "\tERROR locking buffer object: skipping\n");
continue;
}
- seq_printf(s, "\t");
-
- seq_printf(s, "\t%s\t%08zu\t%08x\t%08x\t%08ld\n",
- buf_obj->exp_name, buf_obj->size,
+ seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\n",
+ buf_obj->size,
buf_obj->file->f_flags, buf_obj->file->f_mode,
- (long)(buf_obj->file->f_count.counter));
+ (long)(buf_obj->file->f_count.counter),
+ buf_obj->exp_name);
- seq_printf(s, "\t\tAttached Devices:\n");
+ seq_puts(s, "\tAttached Devices:\n");
attach_count = 0;
list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
- seq_printf(s, "\t\t");
+ seq_puts(s, "\t");
- seq_printf(s, "%s\n", attach_obj->dev->init_name);
+ seq_printf(s, "%s\n", dev_name(attach_obj->dev));
attach_count++;
}
- seq_printf(s, "\n\t\tTotal %d devices attached\n",
+ seq_printf(s, "Total %d devices attached\n\n",
attach_count);
count++;
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 8a97ddfa6122..c30df50e4440 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -1580,6 +1580,7 @@ static int fw_pm_notify(struct notifier_block *notify_block,
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
+ case PM_RESTORE_PREPARE:
kill_requests_without_uevent();
device_cache_fw_images();
break;
diff --git a/drivers/base/power/Makefile b/drivers/base/power/Makefile
index 2e58ebb1f6c0..1cb8544598d5 100644
--- a/drivers/base/power/Makefile
+++ b/drivers/base/power/Makefile
@@ -1,6 +1,5 @@
-obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o
+obj-$(CONFIG_PM) += sysfs.o generic_ops.o common.o qos.o runtime.o
obj-$(CONFIG_PM_SLEEP) += main.o wakeup.o
-obj-$(CONFIG_PM_RUNTIME) += runtime.o
obj-$(CONFIG_PM_TRACE_RTC) += trace.o
obj-$(CONFIG_PM_OPP) += opp.o
obj-$(CONFIG_PM_GENERIC_DOMAINS) += domain.o domain_governor.o
diff --git a/drivers/base/power/runtime.c b/drivers/base/power/runtime.c
index 72e00e66ecc5..4776cf528d08 100644
--- a/drivers/base/power/runtime.c
+++ b/drivers/base/power/runtime.c
@@ -13,6 +13,43 @@
#include <trace/events/rpm.h>
#include "power.h"
+#define RPM_GET_CALLBACK(dev, cb) \
+({ \
+ int (*__rpm_cb)(struct device *__d); \
+ \
+ if (dev->pm_domain) \
+ __rpm_cb = dev->pm_domain->ops.cb; \
+ else if (dev->type && dev->type->pm) \
+ __rpm_cb = dev->type->pm->cb; \
+ else if (dev->class && dev->class->pm) \
+ __rpm_cb = dev->class->pm->cb; \
+ else if (dev->bus && dev->bus->pm) \
+ __rpm_cb = dev->bus->pm->cb; \
+ else \
+ __rpm_cb = NULL; \
+ \
+ if (!__rpm_cb && dev->driver && dev->driver->pm) \
+ __rpm_cb = dev->driver->pm->cb; \
+ \
+ __rpm_cb; \
+})
+
+static int (*rpm_get_suspend_cb(struct device *dev))(struct device *)
+{
+ return RPM_GET_CALLBACK(dev, runtime_suspend);
+}
+
+static int (*rpm_get_resume_cb(struct device *dev))(struct device *)
+{
+ return RPM_GET_CALLBACK(dev, runtime_resume);
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int (*rpm_get_idle_cb(struct device *dev))(struct device *)
+{
+ return RPM_GET_CALLBACK(dev, runtime_idle);
+}
+
static int rpm_resume(struct device *dev, int rpmflags);
static int rpm_suspend(struct device *dev, int rpmflags);
@@ -310,19 +347,7 @@ static int rpm_idle(struct device *dev, int rpmflags)
dev->power.idle_notification = true;
- if (dev->pm_domain)
- callback = dev->pm_domain->ops.runtime_idle;
- else if (dev->type && dev->type->pm)
- callback = dev->type->pm->runtime_idle;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_idle;
- else if (dev->bus && dev->bus->pm)
- callback = dev->bus->pm->runtime_idle;
- else
- callback = NULL;
-
- if (!callback && dev->driver && dev->driver->pm)
- callback = dev->driver->pm->runtime_idle;
+ callback = rpm_get_idle_cb(dev);
if (callback)
retval = __rpm_callback(callback, dev);
@@ -492,19 +517,7 @@ static int rpm_suspend(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_SUSPENDING);
- if (dev->pm_domain)
- callback = dev->pm_domain->ops.runtime_suspend;
- else if (dev->type && dev->type->pm)
- callback = dev->type->pm->runtime_suspend;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_suspend;
- else if (dev->bus && dev->bus->pm)
- callback = dev->bus->pm->runtime_suspend;
- else
- callback = NULL;
-
- if (!callback && dev->driver && dev->driver->pm)
- callback = dev->driver->pm->runtime_suspend;
+ callback = rpm_get_suspend_cb(dev);
retval = rpm_callback(callback, dev);
if (retval)
@@ -724,19 +737,7 @@ static int rpm_resume(struct device *dev, int rpmflags)
__update_runtime_status(dev, RPM_RESUMING);
- if (dev->pm_domain)
- callback = dev->pm_domain->ops.runtime_resume;
- else if (dev->type && dev->type->pm)
- callback = dev->type->pm->runtime_resume;
- else if (dev->class && dev->class->pm)
- callback = dev->class->pm->runtime_resume;
- else if (dev->bus && dev->bus->pm)
- callback = dev->bus->pm->runtime_resume;
- else
- callback = NULL;
-
- if (!callback && dev->driver && dev->driver->pm)
- callback = dev->driver->pm->runtime_resume;
+ callback = rpm_get_resume_cb(dev);
retval = rpm_callback(callback, dev);
if (retval) {
@@ -1401,3 +1402,86 @@ void pm_runtime_remove(struct device *dev)
if (dev->power.irq_safe && dev->parent)
pm_runtime_put(dev->parent);
}
+#endif
+
+/**
+ * pm_runtime_force_suspend - Force a device into suspend state if needed.
+ * @dev: Device to suspend.
+ *
+ * Disable runtime PM so we safely can check the device's runtime PM status and
+ * if it is active, invoke it's .runtime_suspend callback to bring it into
+ * suspend state. Keep runtime PM disabled to preserve the state unless we
+ * encounter errors.
+ *
+ * Typically this function may be invoked from a system suspend callback to make
+ * sure the device is put into low power state.
+ */
+int pm_runtime_force_suspend(struct device *dev)
+{
+ int (*callback)(struct device *);
+ int ret = 0;
+
+ pm_runtime_disable(dev);
+
+ /*
+ * Note that pm_runtime_status_suspended() returns false while
+ * !CONFIG_PM_RUNTIME, which means the device will be put into low
+ * power state.
+ */
+ if (pm_runtime_status_suspended(dev))
+ return 0;
+
+ callback = rpm_get_suspend_cb(dev);
+
+ if (!callback) {
+ ret = -ENOSYS;
+ goto err;
+ }
+
+ ret = callback(dev);
+ if (ret)
+ goto err;
+
+ pm_runtime_set_suspended(dev);
+ return 0;
+err:
+ pm_runtime_enable(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_suspend);
+
+/**
+ * pm_runtime_force_resume - Force a device into resume state.
+ * @dev: Device to resume.
+ *
+ * Prior invoking this function we expect the user to have brought the device
+ * into low power state by a call to pm_runtime_force_suspend(). Here we reverse
+ * those actions and brings the device into full power. We update the runtime PM
+ * status and re-enables runtime PM.
+ *
+ * Typically this function may be invoked from a system resume callback to make
+ * sure the device is put into full power state.
+ */
+int pm_runtime_force_resume(struct device *dev)
+{
+ int (*callback)(struct device *);
+ int ret = 0;
+
+ callback = rpm_get_resume_cb(dev);
+
+ if (!callback) {
+ ret = -ENOSYS;
+ goto out;
+ }
+
+ ret = callback(dev);
+ if (ret)
+ goto out;
+
+ pm_runtime_set_active(dev);
+ pm_runtime_mark_last_busy(dev);
+out:
+ pm_runtime_enable(dev);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pm_runtime_force_resume);
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index 8184451b57c0..422b7d84f686 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -874,7 +874,7 @@ bio_pageinc(struct bio *bio)
/* Non-zero page count for non-head members of
* compound pages is no longer allowed by the kernel.
*/
- page = compound_trans_head(bv.bv_page);
+ page = compound_head(bv.bv_page);
atomic_inc(&page->_count);
}
}
@@ -887,7 +887,7 @@ bio_pagedec(struct bio *bio)
struct bvec_iter iter;
bio_for_each_segment(bv, bio, iter) {
- page = compound_trans_head(bv.bv_page);
+ page = compound_head(bv.bv_page);
atomic_dec(&page->_count);
}
}
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 516026954be6..d777bb7cea93 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -4498,7 +4498,7 @@ static int mtip_pci_probe(struct pci_dev *pdev,
}
dev_info(&pdev->dev, "NUMA node %d (closest: %d,%d, probe on %d:%d)\n",
my_node, pcibus_to_node(pdev->bus), dev_to_node(&pdev->dev),
- cpu_to_node(smp_processor_id()), smp_processor_id());
+ cpu_to_node(raw_smp_processor_id()), raw_smp_processor_id());
dd = kzalloc_node(sizeof(struct driver_data), GFP_KERNEL, my_node);
if (dd == NULL) {
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index b52e9a6d6aad..54174cb32feb 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -53,7 +53,7 @@
#define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000
/* unaligned IO handling */
-#define MTIP_MAX_UNALIGNED_SLOTS 8
+#define MTIP_MAX_UNALIGNED_SLOTS 2
/* Macro to extract the tag bit number from a tag value. */
#define MTIP_TAG_BIT(tag) (tag & 0x1F)
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 3107282a9741..091b9ea14feb 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -60,7 +60,9 @@ enum {
NULL_IRQ_NONE = 0,
NULL_IRQ_SOFTIRQ = 1,
NULL_IRQ_TIMER = 2,
+};
+enum {
NULL_Q_BIO = 0,
NULL_Q_RQ = 1,
NULL_Q_MQ = 2,
@@ -172,18 +174,20 @@ static struct nullb_cmd *alloc_cmd(struct nullb_queue *nq, int can_wait)
static void end_cmd(struct nullb_cmd *cmd)
{
- if (cmd->rq) {
- if (queue_mode == NULL_Q_MQ)
- blk_mq_end_io(cmd->rq, 0);
- else {
- INIT_LIST_HEAD(&cmd->rq->queuelist);
- blk_end_request_all(cmd->rq, 0);
- }
- } else if (cmd->bio)
+ switch (queue_mode) {
+ case NULL_Q_MQ:
+ blk_mq_end_io(cmd->rq, 0);
+ return;
+ case NULL_Q_RQ:
+ INIT_LIST_HEAD(&cmd->rq->queuelist);
+ blk_end_request_all(cmd->rq, 0);
+ break;
+ case NULL_Q_BIO:
bio_endio(cmd->bio, 0);
+ break;
+ }
- if (queue_mode != NULL_Q_MQ)
- free_cmd(cmd);
+ free_cmd(cmd);
}
static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
@@ -195,6 +199,7 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
cq = &per_cpu(completion_queues, smp_processor_id());
while ((entry = llist_del_all(&cq->list)) != NULL) {
+ entry = llist_reverse_order(entry);
do {
cmd = container_of(entry, struct nullb_cmd, ll_list);
end_cmd(cmd);
@@ -221,61 +226,31 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd)
static void null_softirq_done_fn(struct request *rq)
{
- blk_end_request_all(rq, 0);
-}
-
-#ifdef CONFIG_SMP
-
-static void null_ipi_cmd_end_io(void *data)
-{
- struct completion_queue *cq;
- struct llist_node *entry, *next;
- struct nullb_cmd *cmd;
-
- cq = &per_cpu(completion_queues, smp_processor_id());
-
- entry = llist_del_all(&cq->list);
-
- while (entry) {
- next = entry->next;
- cmd = llist_entry(entry, struct nullb_cmd, ll_list);
- end_cmd(cmd);
- entry = next;
- }
-}
-
-static void null_cmd_end_ipi(struct nullb_cmd *cmd)
-{
- struct call_single_data *data = &cmd->csd;
- int cpu = get_cpu();
- struct completion_queue *cq = &per_cpu(completion_queues, cpu);
-
- cmd->ll_list.next = NULL;
-
- if (llist_add(&cmd->ll_list, &cq->list)) {
- data->func = null_ipi_cmd_end_io;
- data->flags = 0;
- __smp_call_function_single(cpu, data, 0);
- }
-
- put_cpu();
+ end_cmd(rq->special);
}
-#endif /* CONFIG_SMP */
-
static inline void null_handle_cmd(struct nullb_cmd *cmd)
{
/* Complete IO by inline, softirq or timer */
switch (irqmode) {
- case NULL_IRQ_NONE:
- end_cmd(cmd);
- break;
case NULL_IRQ_SOFTIRQ:
-#ifdef CONFIG_SMP
- null_cmd_end_ipi(cmd);
-#else
+ switch (queue_mode) {
+ case NULL_Q_MQ:
+ blk_mq_complete_request(cmd->rq);
+ break;
+ case NULL_Q_RQ:
+ blk_complete_request(cmd->rq);
+ break;
+ case NULL_Q_BIO:
+ /*
+ * XXX: no proper submitting cpu information available.
+ */
+ end_cmd(cmd);
+ break;
+ }
+ break;
+ case NULL_IRQ_NONE:
end_cmd(cmd);
-#endif
break;
case NULL_IRQ_TIMER:
null_cmd_end_timer(cmd);
@@ -411,6 +386,7 @@ static struct blk_mq_ops null_mq_ops = {
.queue_rq = null_queue_rq,
.map_queue = blk_mq_map_queue,
.init_hctx = null_init_hctx,
+ .complete = null_softirq_done_fn,
};
static struct blk_mq_reg null_mq_reg = {
@@ -609,13 +585,6 @@ static int __init null_init(void)
{
unsigned int i;
-#if !defined(CONFIG_SMP)
- if (irqmode == NULL_IRQ_SOFTIRQ) {
- pr_warn("null_blk: softirq completions not available.\n");
- pr_warn("null_blk: using direct completions.\n");
- irqmode = NULL_IRQ_NONE;
- }
-#endif
if (bs > PAGE_SIZE) {
pr_warn("null_blk: invalid block size\n");
pr_warn("null_blk: defaults block size to %lu\n", PAGE_SIZE);
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 1f14ac403945..51824d1f23ea 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -46,7 +46,6 @@
#define NVME_Q_DEPTH 1024
#define SQ_SIZE(depth) (depth * sizeof(struct nvme_command))
#define CQ_SIZE(depth) (depth * sizeof(struct nvme_completion))
-#define NVME_MINORS 64
#define ADMIN_TIMEOUT (60 * HZ)
static int nvme_major;
@@ -58,6 +57,17 @@ module_param(use_threaded_interrupts, int, 0);
static DEFINE_SPINLOCK(dev_list_lock);
static LIST_HEAD(dev_list);
static struct task_struct *nvme_thread;
+static struct workqueue_struct *nvme_workq;
+
+static void nvme_reset_failed_dev(struct work_struct *ws);
+
+struct async_cmd_info {
+ struct kthread_work work;
+ struct kthread_worker *worker;
+ u32 result;
+ int status;
+ void *ctx;
+};
/*
* An NVM Express queue. Each device has at least two (one for admin
@@ -66,6 +76,7 @@ static struct task_struct *nvme_thread;
struct nvme_queue {
struct device *q_dmadev;
struct nvme_dev *dev;
+ char irqname[24]; /* nvme4294967295-65535\0 */
spinlock_t q_lock;
struct nvme_command *sq_cmds;
volatile struct nvme_completion *cqes;
@@ -80,9 +91,11 @@ struct nvme_queue {
u16 sq_head;
u16 sq_tail;
u16 cq_head;
+ u16 qid;
u8 cq_phase;
u8 cqe_seen;
u8 q_suspended;
+ struct async_cmd_info cmdinfo;
unsigned long cmdid_data[];
};
@@ -97,6 +110,7 @@ static inline void _nvme_check_size(void)
BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
+ BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
@@ -111,6 +125,7 @@ struct nvme_cmd_info {
nvme_completion_fn fn;
void *ctx;
unsigned long timeout;
+ int aborted;
};
static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
@@ -154,6 +169,7 @@ static int alloc_cmdid(struct nvme_queue *nvmeq, void *ctx,
info[cmdid].fn = handler;
info[cmdid].ctx = ctx;
info[cmdid].timeout = jiffies + timeout;
+ info[cmdid].aborted = 0;
return cmdid;
}
@@ -172,6 +188,7 @@ static int alloc_cmdid_killable(struct nvme_queue *nvmeq, void *ctx,
#define CMD_CTX_COMPLETED (0x310 + CMD_CTX_BASE)
#define CMD_CTX_INVALID (0x314 + CMD_CTX_BASE)
#define CMD_CTX_FLUSH (0x318 + CMD_CTX_BASE)
+#define CMD_CTX_ABORT (0x31C + CMD_CTX_BASE)
static void special_completion(struct nvme_dev *dev, void *ctx,
struct nvme_completion *cqe)
@@ -180,6 +197,10 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
return;
if (ctx == CMD_CTX_FLUSH)
return;
+ if (ctx == CMD_CTX_ABORT) {
+ ++dev->abort_limit;
+ return;
+ }
if (ctx == CMD_CTX_COMPLETED) {
dev_warn(&dev->pci_dev->dev,
"completed id %d twice on queue %d\n",
@@ -196,6 +217,15 @@ static void special_completion(struct nvme_dev *dev, void *ctx,
dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
}
+static void async_completion(struct nvme_dev *dev, void *ctx,
+ struct nvme_completion *cqe)
+{
+ struct async_cmd_info *cmdinfo = ctx;
+ cmdinfo->result = le32_to_cpup(&cqe->result);
+ cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
+ queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
+}
+
/*
* Called with local interrupts disabled and the q_lock held. May not sleep.
*/
@@ -693,7 +723,7 @@ static int nvme_process_cq(struct nvme_queue *nvmeq)
if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
return 0;
- writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
+ writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
nvmeq->cq_head = head;
nvmeq->cq_phase = phase;
@@ -804,12 +834,34 @@ int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
return cmdinfo.status;
}
+static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
+ struct nvme_command *cmd,
+ struct async_cmd_info *cmdinfo, unsigned timeout)
+{
+ int cmdid;
+
+ cmdid = alloc_cmdid_killable(nvmeq, cmdinfo, async_completion, timeout);
+ if (cmdid < 0)
+ return cmdid;
+ cmdinfo->status = -EINTR;
+ cmd->common.command_id = cmdid;
+ nvme_submit_cmd(nvmeq, cmd);
+ return 0;
+}
+
int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
u32 *result)
{
return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
}
+static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
+ struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
+{
+ return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo,
+ ADMIN_TIMEOUT);
+}
+
static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
{
int status;
@@ -920,6 +972,56 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
}
/**
+ * nvme_abort_cmd - Attempt aborting a command
+ * @cmdid: Command id of a timed out IO
+ * @queue: The queue with timed out IO
+ *
+ * Schedule controller reset if the command was already aborted once before and
+ * still hasn't been returned to the driver, or if this is the admin queue.
+ */
+static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
+{
+ int a_cmdid;
+ struct nvme_command cmd;
+ struct nvme_dev *dev = nvmeq->dev;
+ struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+
+ if (!nvmeq->qid || info[cmdid].aborted) {
+ if (work_busy(&dev->reset_work))
+ return;
+ list_del_init(&dev->node);
+ dev_warn(&dev->pci_dev->dev,
+ "I/O %d QID %d timeout, reset controller\n", cmdid,
+ nvmeq->qid);
+ PREPARE_WORK(&dev->reset_work, nvme_reset_failed_dev);
+ queue_work(nvme_workq, &dev->reset_work);
+ return;
+ }
+
+ if (!dev->abort_limit)
+ return;
+
+ a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion,
+ ADMIN_TIMEOUT);
+ if (a_cmdid < 0)
+ return;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.abort.opcode = nvme_admin_abort_cmd;
+ cmd.abort.cid = cmdid;
+ cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
+ cmd.abort.command_id = a_cmdid;
+
+ --dev->abort_limit;
+ info[cmdid].aborted = 1;
+ info[cmdid].timeout = jiffies + ADMIN_TIMEOUT;
+
+ dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
+ nvmeq->qid);
+ nvme_submit_cmd(dev->queues[0], &cmd);
+}
+
+/**
* nvme_cancel_ios - Cancel outstanding I/Os
* @queue: The queue to cancel I/Os on
* @timeout: True to only cancel I/Os which have timed out
@@ -942,7 +1044,12 @@ static void nvme_cancel_ios(struct nvme_queue *nvmeq, bool timeout)
continue;
if (info[cmdid].ctx == CMD_CTX_CANCELLED)
continue;
- dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
+ if (timeout && nvmeq->dev->initialized) {
+ nvme_abort_cmd(cmdid, nvmeq);
+ continue;
+ }
+ dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
+ nvmeq->qid);
ctx = cancel_cmdid(nvmeq, cmdid, &fn);
fn(nvmeq->dev, ctx, &cqe);
}
@@ -964,26 +1071,31 @@ static void nvme_free_queue(struct nvme_queue *nvmeq)
kfree(nvmeq);
}
-static void nvme_free_queues(struct nvme_dev *dev)
+static void nvme_free_queues(struct nvme_dev *dev, int lowest)
{
int i;
- for (i = dev->queue_count - 1; i >= 0; i--) {
+ for (i = dev->queue_count - 1; i >= lowest; i--) {
nvme_free_queue(dev->queues[i]);
dev->queue_count--;
dev->queues[i] = NULL;
}
}
-static void nvme_disable_queue(struct nvme_dev *dev, int qid)
+/**
+ * nvme_suspend_queue - put queue into suspended state
+ * @nvmeq - queue to suspend
+ *
+ * Returns 1 if already suspended, 0 otherwise.
+ */
+static int nvme_suspend_queue(struct nvme_queue *nvmeq)
{
- struct nvme_queue *nvmeq = dev->queues[qid];
- int vector = dev->entry[nvmeq->cq_vector].vector;
+ int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
spin_lock_irq(&nvmeq->q_lock);
if (nvmeq->q_suspended) {
spin_unlock_irq(&nvmeq->q_lock);
- return;
+ return 1;
}
nvmeq->q_suspended = 1;
spin_unlock_irq(&nvmeq->q_lock);
@@ -991,18 +1103,35 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
irq_set_affinity_hint(vector, NULL);
free_irq(vector, nvmeq);
- /* Don't tell the adapter to delete the admin queue */
- if (qid) {
- adapter_delete_sq(dev, qid);
- adapter_delete_cq(dev, qid);
- }
+ return 0;
+}
+static void nvme_clear_queue(struct nvme_queue *nvmeq)
+{
spin_lock_irq(&nvmeq->q_lock);
nvme_process_cq(nvmeq);
nvme_cancel_ios(nvmeq, false);
spin_unlock_irq(&nvmeq->q_lock);
}
+static void nvme_disable_queue(struct nvme_dev *dev, int qid)
+{
+ struct nvme_queue *nvmeq = dev->queues[qid];
+
+ if (!nvmeq)
+ return;
+ if (nvme_suspend_queue(nvmeq))
+ return;
+
+ /* Don't tell the adapter to delete the admin queue.
+ * Don't tell a removed adapter to delete IO queues. */
+ if (qid && readl(&dev->bar->csts) != -1) {
+ adapter_delete_sq(dev, qid);
+ adapter_delete_cq(dev, qid);
+ }
+ nvme_clear_queue(nvmeq);
+}
+
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
int depth, int vector)
{
@@ -1025,15 +1154,18 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->q_dmadev = dmadev;
nvmeq->dev = dev;
+ snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
+ dev->instance, qid);
spin_lock_init(&nvmeq->q_lock);
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
init_waitqueue_head(&nvmeq->sq_full);
init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
bio_list_init(&nvmeq->sq_cong);
- nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+ nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
nvmeq->q_depth = depth;
nvmeq->cq_vector = vector;
+ nvmeq->qid = qid;
nvmeq->q_suspended = 1;
dev->queue_count++;
@@ -1052,11 +1184,10 @@ static int queue_request_irq(struct nvme_dev *dev, struct nvme_queue *nvmeq,
{
if (use_threaded_interrupts)
return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
- nvme_irq_check, nvme_irq,
- IRQF_DISABLED | IRQF_SHARED,
+ nvme_irq_check, nvme_irq, IRQF_SHARED,
name, nvmeq);
return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
- IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
+ IRQF_SHARED, name, nvmeq);
}
static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
@@ -1067,7 +1198,7 @@ static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
nvmeq->sq_tail = 0;
nvmeq->cq_head = 0;
nvmeq->cq_phase = 1;
- nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+ nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
memset(nvmeq->cmdid_data, 0, extra);
memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
nvme_cancel_ios(nvmeq, false);
@@ -1087,13 +1218,13 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
if (result < 0)
goto release_cq;
- result = queue_request_irq(dev, nvmeq, "nvme");
+ result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
if (result < 0)
goto release_sq;
- spin_lock(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->q_lock);
nvme_init_queue(nvmeq, qid);
- spin_unlock(&nvmeq->q_lock);
+ spin_unlock_irq(&nvmeq->q_lock);
return result;
@@ -1205,13 +1336,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
if (result)
return result;
- result = queue_request_irq(dev, nvmeq, "nvme admin");
+ result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
if (result)
return result;
- spin_lock(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->q_lock);
nvme_init_queue(nvmeq, 0);
- spin_unlock(&nvmeq->q_lock);
+ spin_unlock_irq(&nvmeq->q_lock);
return result;
}
@@ -1487,10 +1618,47 @@ static int nvme_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd,
}
}
+#ifdef CONFIG_COMPAT
+static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg)
+{
+ struct nvme_ns *ns = bdev->bd_disk->private_data;
+
+ switch (cmd) {
+ case SG_IO:
+ return nvme_sg_io32(ns, arg);
+ }
+ return nvme_ioctl(bdev, mode, cmd, arg);
+}
+#else
+#define nvme_compat_ioctl NULL
+#endif
+
+static int nvme_open(struct block_device *bdev, fmode_t mode)
+{
+ struct nvme_ns *ns = bdev->bd_disk->private_data;
+ struct nvme_dev *dev = ns->dev;
+
+ kref_get(&dev->kref);
+ return 0;
+}
+
+static void nvme_free_dev(struct kref *kref);
+
+static void nvme_release(struct gendisk *disk, fmode_t mode)
+{
+ struct nvme_ns *ns = disk->private_data;
+ struct nvme_dev *dev = ns->dev;
+
+ kref_put(&dev->kref, nvme_free_dev);
+}
+
static const struct block_device_operations nvme_fops = {
.owner = THIS_MODULE,
.ioctl = nvme_ioctl,
- .compat_ioctl = nvme_ioctl,
+ .compat_ioctl = nvme_compat_ioctl,
+ .open = nvme_open,
+ .release = nvme_release,
};
static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
@@ -1514,13 +1682,25 @@ static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
static int nvme_kthread(void *data)
{
- struct nvme_dev *dev;
+ struct nvme_dev *dev, *next;
while (!kthread_should_stop()) {
set_current_state(TASK_INTERRUPTIBLE);
spin_lock(&dev_list_lock);
- list_for_each_entry(dev, &dev_list, node) {
+ list_for_each_entry_safe(dev, next, &dev_list, node) {
int i;
+ if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
+ dev->initialized) {
+ if (work_busy(&dev->reset_work))
+ continue;
+ list_del_init(&dev->node);
+ dev_warn(&dev->pci_dev->dev,
+ "Failed status, reset controller\n");
+ PREPARE_WORK(&dev->reset_work,
+ nvme_reset_failed_dev);
+ queue_work(nvme_workq, &dev->reset_work);
+ continue;
+ }
for (i = 0; i < dev->queue_count; i++) {
struct nvme_queue *nvmeq = dev->queues[i];
if (!nvmeq)
@@ -1541,33 +1721,6 @@ static int nvme_kthread(void *data)
return 0;
}
-static DEFINE_IDA(nvme_index_ida);
-
-static int nvme_get_ns_idx(void)
-{
- int index, error;
-
- do {
- if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
- return -1;
-
- spin_lock(&dev_list_lock);
- error = ida_get_new(&nvme_index_ida, &index);
- spin_unlock(&dev_list_lock);
- } while (error == -EAGAIN);
-
- if (error)
- index = -1;
- return index;
-}
-
-static void nvme_put_ns_idx(int index)
-{
- spin_lock(&dev_list_lock);
- ida_remove(&nvme_index_ida, index);
- spin_unlock(&dev_list_lock);
-}
-
static void nvme_config_discard(struct nvme_ns *ns)
{
u32 logical_block_size = queue_logical_block_size(ns->queue);
@@ -1601,7 +1754,7 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
ns->dev = dev;
ns->queue->queuedata = ns;
- disk = alloc_disk(NVME_MINORS);
+ disk = alloc_disk(0);
if (!disk)
goto out_free_queue;
ns->ns_id = nsid;
@@ -1614,12 +1767,12 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
disk->major = nvme_major;
- disk->minors = NVME_MINORS;
- disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
+ disk->first_minor = 0;
disk->fops = &nvme_fops;
disk->private_data = ns;
disk->queue = ns->queue;
disk->driverfs_dev = &dev->pci_dev->dev;
+ disk->flags = GENHD_FL_EXT_DEVT;
sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
@@ -1635,15 +1788,6 @@ static struct nvme_ns *nvme_alloc_ns(struct nvme_dev *dev, unsigned nsid,
return NULL;
}
-static void nvme_ns_free(struct nvme_ns *ns)
-{
- int index = ns->disk->first_minor / NVME_MINORS;
- put_disk(ns->disk);
- nvme_put_ns_idx(index);
- blk_cleanup_queue(ns->queue);
- kfree(ns);
-}
-
static int set_queue_count(struct nvme_dev *dev, int count)
{
int status;
@@ -1659,11 +1803,12 @@ static int set_queue_count(struct nvme_dev *dev, int count)
static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
{
- return 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+ return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
}
static int nvme_setup_io_queues(struct nvme_dev *dev)
{
+ struct nvme_queue *adminq = dev->queues[0];
struct pci_dev *pdev = dev->pci_dev;
int result, cpu, i, vecs, nr_io_queues, size, q_depth;
@@ -1690,7 +1835,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
}
/* Deregister the admin queue's interrupt */
- free_irq(dev->entry[0].vector, dev->queues[0]);
+ free_irq(dev->entry[0].vector, adminq);
vecs = nr_io_queues;
for (i = 0; i < vecs; i++)
@@ -1728,9 +1873,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
*/
nr_io_queues = vecs;
- result = queue_request_irq(dev, dev->queues[0], "nvme admin");
+ result = queue_request_irq(dev, adminq, adminq->irqname);
if (result) {
- dev->queues[0]->q_suspended = 1;
+ adminq->q_suspended = 1;
goto free_queues;
}
@@ -1739,9 +1884,9 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
struct nvme_queue *nvmeq = dev->queues[i];
- spin_lock(&nvmeq->q_lock);
+ spin_lock_irq(&nvmeq->q_lock);
nvme_cancel_ios(nvmeq, false);
- spin_unlock(&nvmeq->q_lock);
+ spin_unlock_irq(&nvmeq->q_lock);
nvme_free_queue(nvmeq);
dev->queue_count--;
@@ -1782,7 +1927,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
return 0;
free_queues:
- nvme_free_queues(dev);
+ nvme_free_queues(dev, 1);
return result;
}
@@ -1794,6 +1939,7 @@ static int nvme_setup_io_queues(struct nvme_dev *dev)
*/
static int nvme_dev_add(struct nvme_dev *dev)
{
+ struct pci_dev *pdev = dev->pci_dev;
int res;
unsigned nn, i;
struct nvme_ns *ns;
@@ -1803,8 +1949,7 @@ static int nvme_dev_add(struct nvme_dev *dev)
dma_addr_t dma_addr;
int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
- mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
- GFP_KERNEL);
+ mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL);
if (!mem)
return -ENOMEM;
@@ -1817,13 +1962,14 @@ static int nvme_dev_add(struct nvme_dev *dev)
ctrl = mem;
nn = le32_to_cpup(&ctrl->nn);
dev->oncs = le16_to_cpup(&ctrl->oncs);
+ dev->abort_limit = ctrl->acl + 1;
memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
if (ctrl->mdts)
dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
- if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) &&
- (dev->pci_dev->device == 0x0953) && ctrl->vs[3])
+ if ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
+ (pdev->device == 0x0953) && ctrl->vs[3])
dev->stripe_size = 1 << (ctrl->vs[3] + shift);
id_ns = mem;
@@ -1871,16 +2017,21 @@ static int nvme_dev_map(struct nvme_dev *dev)
dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
goto disable;
- pci_set_drvdata(pdev, dev);
dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
if (!dev->bar)
goto disable;
-
- dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap));
+ if (readl(&dev->bar->csts) == -1) {
+ result = -ENODEV;
+ goto unmap;
+ }
+ dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap));
dev->dbs = ((void __iomem *)dev->bar) + 4096;
return 0;
+ unmap:
+ iounmap(dev->bar);
+ dev->bar = NULL;
disable:
pci_release_regions(pdev);
disable_pci:
@@ -1898,37 +2049,183 @@ static void nvme_dev_unmap(struct nvme_dev *dev)
if (dev->bar) {
iounmap(dev->bar);
dev->bar = NULL;
+ pci_release_regions(dev->pci_dev);
}
- pci_release_regions(dev->pci_dev);
if (pci_is_enabled(dev->pci_dev))
pci_disable_device(dev->pci_dev);
}
+struct nvme_delq_ctx {
+ struct task_struct *waiter;
+ struct kthread_worker *worker;
+ atomic_t refcount;
+};
+
+static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
+{
+ dq->waiter = current;
+ mb();
+
+ for (;;) {
+ set_current_state(TASK_KILLABLE);
+ if (!atomic_read(&dq->refcount))
+ break;
+ if (!schedule_timeout(ADMIN_TIMEOUT) ||
+ fatal_signal_pending(current)) {
+ set_current_state(TASK_RUNNING);
+
+ nvme_disable_ctrl(dev, readq(&dev->bar->cap));
+ nvme_disable_queue(dev, 0);
+
+ send_sig(SIGKILL, dq->worker->task, 1);
+ flush_kthread_worker(dq->worker);
+ return;
+ }
+ }
+ set_current_state(TASK_RUNNING);
+}
+
+static void nvme_put_dq(struct nvme_delq_ctx *dq)
+{
+ atomic_dec(&dq->refcount);
+ if (dq->waiter)
+ wake_up_process(dq->waiter);
+}
+
+static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
+{
+ atomic_inc(&dq->refcount);
+ return dq;
+}
+
+static void nvme_del_queue_end(struct nvme_queue *nvmeq)
+{
+ struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
+
+ nvme_clear_queue(nvmeq);
+ nvme_put_dq(dq);
+}
+
+static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
+ kthread_work_func_t fn)
+{
+ struct nvme_command c;
+
+ memset(&c, 0, sizeof(c));
+ c.delete_queue.opcode = opcode;
+ c.delete_queue.qid = cpu_to_le16(nvmeq->qid);
+
+ init_kthread_work(&nvmeq->cmdinfo.work, fn);
+ return nvme_submit_admin_cmd_async(nvmeq->dev, &c, &nvmeq->cmdinfo);
+}
+
+static void nvme_del_cq_work_handler(struct kthread_work *work)
+{
+ struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+ cmdinfo.work);
+ nvme_del_queue_end(nvmeq);
+}
+
+static int nvme_delete_cq(struct nvme_queue *nvmeq)
+{
+ return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq,
+ nvme_del_cq_work_handler);
+}
+
+static void nvme_del_sq_work_handler(struct kthread_work *work)
+{
+ struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+ cmdinfo.work);
+ int status = nvmeq->cmdinfo.status;
+
+ if (!status)
+ status = nvme_delete_cq(nvmeq);
+ if (status)
+ nvme_del_queue_end(nvmeq);
+}
+
+static int nvme_delete_sq(struct nvme_queue *nvmeq)
+{
+ return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq,
+ nvme_del_sq_work_handler);
+}
+
+static void nvme_del_queue_start(struct kthread_work *work)
+{
+ struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+ cmdinfo.work);
+ allow_signal(SIGKILL);
+ if (nvme_delete_sq(nvmeq))
+ nvme_del_queue_end(nvmeq);
+}
+
+static void nvme_disable_io_queues(struct nvme_dev *dev)
+{
+ int i;
+ DEFINE_KTHREAD_WORKER_ONSTACK(worker);
+ struct nvme_delq_ctx dq;
+ struct task_struct *kworker_task = kthread_run(kthread_worker_fn,
+ &worker, "nvme%d", dev->instance);
+
+ if (IS_ERR(kworker_task)) {
+ dev_err(&dev->pci_dev->dev,
+ "Failed to create queue del task\n");
+ for (i = dev->queue_count - 1; i > 0; i--)
+ nvme_disable_queue(dev, i);
+ return;
+ }
+
+ dq.waiter = NULL;
+ atomic_set(&dq.refcount, 0);
+ dq.worker = &worker;
+ for (i = dev->queue_count - 1; i > 0; i--) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+
+ if (nvme_suspend_queue(nvmeq))
+ continue;
+ nvmeq->cmdinfo.ctx = nvme_get_dq(&dq);
+ nvmeq->cmdinfo.worker = dq.worker;
+ init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start);
+ queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work);
+ }
+ nvme_wait_dq(&dq, dev);
+ kthread_stop(kworker_task);
+}
+
static void nvme_dev_shutdown(struct nvme_dev *dev)
{
int i;
- for (i = dev->queue_count - 1; i >= 0; i--)
- nvme_disable_queue(dev, i);
+ dev->initialized = 0;
spin_lock(&dev_list_lock);
list_del_init(&dev->node);
spin_unlock(&dev_list_lock);
- if (dev->bar)
+ if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
+ for (i = dev->queue_count - 1; i >= 0; i--) {
+ struct nvme_queue *nvmeq = dev->queues[i];
+ nvme_suspend_queue(nvmeq);
+ nvme_clear_queue(nvmeq);
+ }
+ } else {
+ nvme_disable_io_queues(dev);
nvme_shutdown_ctrl(dev);
+ nvme_disable_queue(dev, 0);
+ }
nvme_dev_unmap(dev);
}
static void nvme_dev_remove(struct nvme_dev *dev)
{
- struct nvme_ns *ns, *next;
+ struct nvme_ns *ns;
- list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
- list_del(&ns->list);
- del_gendisk(ns->disk);
- nvme_ns_free(ns);
+ list_for_each_entry(ns, &dev->namespaces, list) {
+ if (ns->disk->flags & GENHD_FL_UP)
+ del_gendisk(ns->disk);
+ if (!blk_queue_dying(ns->queue))
+ blk_cleanup_queue(ns->queue);
}
}
@@ -1985,14 +2282,22 @@ static void nvme_release_instance(struct nvme_dev *dev)
spin_unlock(&dev_list_lock);
}
+static void nvme_free_namespaces(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns, *next;
+
+ list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+ list_del(&ns->list);
+ put_disk(ns->disk);
+ kfree(ns);
+ }
+}
+
static void nvme_free_dev(struct kref *kref)
{
struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
- nvme_dev_remove(dev);
- nvme_dev_shutdown(dev);
- nvme_free_queues(dev);
- nvme_release_instance(dev);
- nvme_release_prp_pools(dev);
+
+ nvme_free_namespaces(dev);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
@@ -2056,6 +2361,7 @@ static int nvme_dev_start(struct nvme_dev *dev)
return result;
disable:
+ nvme_disable_queue(dev, 0);
spin_lock(&dev_list_lock);
list_del_init(&dev->node);
spin_unlock(&dev_list_lock);
@@ -2064,6 +2370,71 @@ static int nvme_dev_start(struct nvme_dev *dev)
return result;
}
+static int nvme_remove_dead_ctrl(void *arg)
+{
+ struct nvme_dev *dev = (struct nvme_dev *)arg;
+ struct pci_dev *pdev = dev->pci_dev;
+
+ if (pci_get_drvdata(pdev))
+ pci_stop_and_remove_bus_device(pdev);
+ kref_put(&dev->kref, nvme_free_dev);
+ return 0;
+}
+
+static void nvme_remove_disks(struct work_struct *ws)
+{
+ int i;
+ struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
+
+ nvme_dev_remove(dev);
+ spin_lock(&dev_list_lock);
+ for (i = dev->queue_count - 1; i > 0; i--) {
+ BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
+ nvme_free_queue(dev->queues[i]);
+ dev->queue_count--;
+ dev->queues[i] = NULL;
+ }
+ spin_unlock(&dev_list_lock);
+}
+
+static int nvme_dev_resume(struct nvme_dev *dev)
+{
+ int ret;
+
+ ret = nvme_dev_start(dev);
+ if (ret && ret != -EBUSY)
+ return ret;
+ if (ret == -EBUSY) {
+ spin_lock(&dev_list_lock);
+ PREPARE_WORK(&dev->reset_work, nvme_remove_disks);
+ queue_work(nvme_workq, &dev->reset_work);
+ spin_unlock(&dev_list_lock);
+ }
+ dev->initialized = 1;
+ return 0;
+}
+
+static void nvme_dev_reset(struct nvme_dev *dev)
+{
+ nvme_dev_shutdown(dev);
+ if (nvme_dev_resume(dev)) {
+ dev_err(&dev->pci_dev->dev, "Device failed to resume\n");
+ kref_get(&dev->kref);
+ if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
+ dev->instance))) {
+ dev_err(&dev->pci_dev->dev,
+ "Failed to start controller remove task\n");
+ kref_put(&dev->kref, nvme_free_dev);
+ }
+ }
+}
+
+static void nvme_reset_failed_dev(struct work_struct *ws)
+{
+ struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
+ nvme_dev_reset(dev);
+}
+
static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int result = -ENOMEM;
@@ -2082,8 +2453,9 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto free;
INIT_LIST_HEAD(&dev->namespaces);
+ INIT_WORK(&dev->reset_work, nvme_reset_failed_dev);
dev->pci_dev = pdev;
-
+ pci_set_drvdata(pdev, dev);
result = nvme_set_instance(dev);
if (result)
goto free;
@@ -2099,6 +2471,7 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
goto release_pools;
}
+ kref_init(&dev->kref);
result = nvme_dev_add(dev);
if (result)
goto shutdown;
@@ -2113,15 +2486,16 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (result)
goto remove;
- kref_init(&dev->kref);
+ dev->initialized = 1;
return 0;
remove:
nvme_dev_remove(dev);
+ nvme_free_namespaces(dev);
shutdown:
nvme_dev_shutdown(dev);
release_pools:
- nvme_free_queues(dev);
+ nvme_free_queues(dev, 0);
nvme_release_prp_pools(dev);
release:
nvme_release_instance(dev);
@@ -2132,10 +2506,28 @@ static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return result;
}
+static void nvme_shutdown(struct pci_dev *pdev)
+{
+ struct nvme_dev *dev = pci_get_drvdata(pdev);
+ nvme_dev_shutdown(dev);
+}
+
static void nvme_remove(struct pci_dev *pdev)
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
+
+ spin_lock(&dev_list_lock);
+ list_del_init(&dev->node);
+ spin_unlock(&dev_list_lock);
+
+ pci_set_drvdata(pdev, NULL);
+ flush_work(&dev->reset_work);
misc_deregister(&dev->miscdev);
+ nvme_dev_remove(dev);
+ nvme_dev_shutdown(dev);
+ nvme_free_queues(dev, 0);
+ nvme_release_instance(dev);
+ nvme_release_prp_pools(dev);
kref_put(&dev->kref, nvme_free_dev);
}
@@ -2159,13 +2551,12 @@ static int nvme_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct nvme_dev *ndev = pci_get_drvdata(pdev);
- int ret;
- ret = nvme_dev_start(ndev);
- /* XXX: should remove gendisks if resume fails */
- if (ret)
- nvme_free_queues(ndev);
- return ret;
+ if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) {
+ PREPARE_WORK(&ndev->reset_work, nvme_reset_failed_dev);
+ queue_work(nvme_workq, &ndev->reset_work);
+ }
+ return 0;
}
static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
@@ -2192,6 +2583,7 @@ static struct pci_driver nvme_driver = {
.id_table = nvme_id_table,
.probe = nvme_probe,
.remove = nvme_remove,
+ .shutdown = nvme_shutdown,
.driver = {
.pm = &nvme_dev_pm_ops,
},
@@ -2206,9 +2598,14 @@ static int __init nvme_init(void)
if (IS_ERR(nvme_thread))
return PTR_ERR(nvme_thread);
+ result = -ENOMEM;
+ nvme_workq = create_singlethread_workqueue("nvme");
+ if (!nvme_workq)
+ goto kill_kthread;
+
result = register_blkdev(nvme_major, "nvme");
if (result < 0)
- goto kill_kthread;
+ goto kill_workq;
else if (result > 0)
nvme_major = result;
@@ -2219,6 +2616,8 @@ static int __init nvme_init(void)
unregister_blkdev:
unregister_blkdev(nvme_major, "nvme");
+ kill_workq:
+ destroy_workqueue(nvme_workq);
kill_kthread:
kthread_stop(nvme_thread);
return result;
@@ -2228,6 +2627,7 @@ static void __exit nvme_exit(void)
{
pci_unregister_driver(&nvme_driver);
unregister_blkdev(nvme_major, "nvme");
+ destroy_workqueue(nvme_workq);
kthread_stop(nvme_thread);
}
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 4a4ff4eb8e23..4a0ceb64e269 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -25,6 +25,7 @@
#include <linux/bio.h>
#include <linux/bitops.h>
#include <linux/blkdev.h>
+#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/fs.h>
@@ -3038,6 +3039,152 @@ int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
return retcode;
}
+#ifdef CONFIG_COMPAT
+typedef struct sg_io_hdr32 {
+ compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
+ compat_int_t dxfer_direction; /* [i] data transfer direction */
+ unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */
+ unsigned char mx_sb_len; /* [i] max length to write to sbp */
+ unsigned short iovec_count; /* [i] 0 implies no scatter gather */
+ compat_uint_t dxfer_len; /* [i] byte count of data transfer */
+ compat_uint_t dxferp; /* [i], [*io] points to data transfer memory
+ or scatter gather list */
+ compat_uptr_t cmdp; /* [i], [*i] points to command to perform */
+ compat_uptr_t sbp; /* [i], [*o] points to sense_buffer memory */
+ compat_uint_t timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */
+ compat_uint_t flags; /* [i] 0 -> default, see SG_FLAG... */
+ compat_int_t pack_id; /* [i->o] unused internally (normally) */
+ compat_uptr_t usr_ptr; /* [i->o] unused internally */
+ unsigned char status; /* [o] scsi status */
+ unsigned char masked_status; /* [o] shifted, masked scsi status */
+ unsigned char msg_status; /* [o] messaging level data (optional) */
+ unsigned char sb_len_wr; /* [o] byte count actually written to sbp */
+ unsigned short host_status; /* [o] errors from host adapter */
+ unsigned short driver_status; /* [o] errors from software driver */
+ compat_int_t resid; /* [o] dxfer_len - actual_transferred */
+ compat_uint_t duration; /* [o] time taken by cmd (unit: millisec) */
+ compat_uint_t info; /* [o] auxiliary information */
+} sg_io_hdr32_t; /* 64 bytes long (on sparc32) */
+
+typedef struct sg_iovec32 {
+ compat_uint_t iov_base;
+ compat_uint_t iov_len;
+} sg_iovec32_t;
+
+static int sg_build_iovec(sg_io_hdr_t __user *sgio, void __user *dxferp, u16 iovec_count)
+{
+ sg_iovec_t __user *iov = (sg_iovec_t __user *) (sgio + 1);
+ sg_iovec32_t __user *iov32 = dxferp;
+ int i;
+
+ for (i = 0; i < iovec_count; i++) {
+ u32 base, len;
+
+ if (get_user(base, &iov32[i].iov_base) ||
+ get_user(len, &iov32[i].iov_len) ||
+ put_user(compat_ptr(base), &iov[i].iov_base) ||
+ put_user(len, &iov[i].iov_len))
+ return -EFAULT;
+ }
+
+ if (put_user(iov, &sgio->dxferp))
+ return -EFAULT;
+ return 0;
+}
+
+int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg)
+{
+ sg_io_hdr32_t __user *sgio32 = (sg_io_hdr32_t __user *)arg;
+ sg_io_hdr_t __user *sgio;
+ u16 iovec_count;
+ u32 data;
+ void __user *dxferp;
+ int err;
+ int interface_id;
+
+ if (get_user(interface_id, &sgio32->interface_id))
+ return -EFAULT;
+ if (interface_id != 'S')
+ return -EINVAL;
+
+ if (get_user(iovec_count, &sgio32->iovec_count))
+ return -EFAULT;
+
+ {
+ void __user *top = compat_alloc_user_space(0);
+ void __user *new = compat_alloc_user_space(sizeof(sg_io_hdr_t) +
+ (iovec_count * sizeof(sg_iovec_t)));
+ if (new > top)
+ return -EINVAL;
+
+ sgio = new;
+ }
+
+ /* Ok, now construct. */
+ if (copy_in_user(&sgio->interface_id, &sgio32->interface_id,
+ (2 * sizeof(int)) +
+ (2 * sizeof(unsigned char)) +
+ (1 * sizeof(unsigned short)) +
+ (1 * sizeof(unsigned int))))
+ return -EFAULT;
+
+ if (get_user(data, &sgio32->dxferp))
+ return -EFAULT;
+ dxferp = compat_ptr(data);
+ if (iovec_count) {
+ if (sg_build_iovec(sgio, dxferp, iovec_count))
+ return -EFAULT;
+ } else {
+ if (put_user(dxferp, &sgio->dxferp))
+ return -EFAULT;
+ }
+
+ {
+ unsigned char __user *cmdp;
+ unsigned char __user *sbp;
+
+ if (get_user(data, &sgio32->cmdp))
+ return -EFAULT;
+ cmdp = compat_ptr(data);
+
+ if (get_user(data, &sgio32->sbp))
+ return -EFAULT;
+ sbp = compat_ptr(data);
+
+ if (put_user(cmdp, &sgio->cmdp) ||
+ put_user(sbp, &sgio->sbp))
+ return -EFAULT;
+ }
+
+ if (copy_in_user(&sgio->timeout, &sgio32->timeout,
+ 3 * sizeof(int)))
+ return -EFAULT;
+
+ if (get_user(data, &sgio32->usr_ptr))
+ return -EFAULT;
+ if (put_user(compat_ptr(data), &sgio->usr_ptr))
+ return -EFAULT;
+
+ err = nvme_sg_io(ns, sgio);
+ if (err >= 0) {
+ void __user *datap;
+
+ if (copy_in_user(&sgio32->pack_id, &sgio->pack_id,
+ sizeof(int)) ||
+ get_user(datap, &sgio->usr_ptr) ||
+ put_user((u32)(unsigned long)datap,
+ &sgio32->usr_ptr) ||
+ copy_in_user(&sgio32->status, &sgio->status,
+ (4 * sizeof(unsigned char)) +
+ (2 * sizeof(unsigned short)) +
+ (3 * sizeof(int))))
+ err = -EFAULT;
+ }
+
+ return err;
+}
+#endif
+
int nvme_sg_get_version_num(int __user *ip)
{
return put_user(sg_version_num, ip);
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 6a680d4de7f1..b1cb3f4c4db4 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -110,9 +110,9 @@ static int __virtblk_add_req(struct virtqueue *vq,
return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
-static inline void virtblk_request_done(struct virtblk_req *vbr)
+static inline void virtblk_request_done(struct request *req)
{
- struct request *req = vbr->req;
+ struct virtblk_req *vbr = req->special;
int error = virtblk_result(vbr);
if (req->cmd_type == REQ_TYPE_BLOCK_PC) {
@@ -138,7 +138,7 @@ static void virtblk_done(struct virtqueue *vq)
do {
virtqueue_disable_cb(vq);
while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
- virtblk_request_done(vbr);
+ blk_mq_complete_request(vbr->req);
req_done = true;
}
if (unlikely(virtqueue_is_broken(vq)))
@@ -479,6 +479,7 @@ static struct blk_mq_ops virtio_mq_ops = {
.map_queue = blk_mq_map_queue,
.alloc_hctx = blk_mq_alloc_single_hw_queue,
.free_hctx = blk_mq_free_single_hw_queue,
+ .complete = virtblk_request_done,
};
static struct blk_mq_reg virtio_mq_reg = {
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index da18046d0e07..64c60edcdfbc 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -285,7 +285,8 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
!rb_next(&persistent_gnt->node)) {
- ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
+ ret = gnttab_unmap_refs(unmap, NULL, pages,
+ segs_to_unmap);
BUG_ON(ret);
put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0;
@@ -298,7 +299,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
BUG_ON(num != 0);
}
-static void unmap_purged_grants(struct work_struct *work)
+void xen_blkbk_unmap_purged_grants(struct work_struct *work)
{
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
@@ -320,7 +321,8 @@ static void unmap_purged_grants(struct work_struct *work)
pages[segs_to_unmap] = persistent_gnt->page;
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
- ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
+ ret = gnttab_unmap_refs(unmap, NULL, pages,
+ segs_to_unmap);
BUG_ON(ret);
put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0;
@@ -328,7 +330,7 @@ static void unmap_purged_grants(struct work_struct *work)
kfree(persistent_gnt);
}
if (segs_to_unmap > 0) {
- ret = gnttab_unmap_refs(unmap, pages, segs_to_unmap);
+ ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
BUG_ON(ret);
put_free_pages(blkif, pages, segs_to_unmap);
}
@@ -373,7 +375,7 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
- INIT_LIST_HEAD(&blkif->persistent_purge_list);
+ BUG_ON(!list_empty(&blkif->persistent_purge_list));
root = &blkif->persistent_gnts;
purge_list:
foreach_grant_safe(persistent_gnt, n, root, node) {
@@ -418,7 +420,6 @@ finished:
blkif->vbd.overflow_max_grants = 0;
/* We can defer this work */
- INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
schedule_work(&blkif->persistent_purge_work);
pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
return;
@@ -623,9 +624,23 @@ purge_gnt_list:
print_stats(blkif);
}
- /* Since we are shutting down remove all pages from the buffer */
- shrink_free_pagepool(blkif, 0 /* All */);
+ /* Drain pending purge work */
+ flush_work(&blkif->persistent_purge_work);
+ if (log_stats)
+ print_stats(blkif);
+
+ blkif->xenblkd = NULL;
+ xen_blkif_put(blkif);
+
+ return 0;
+}
+
+/*
+ * Remove persistent grants and empty the pool of free pages
+ */
+void xen_blkbk_free_caches(struct xen_blkif *blkif)
+{
/* Free all persistent grant pages */
if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
free_persistent_gnts(blkif, &blkif->persistent_gnts,
@@ -634,13 +649,8 @@ purge_gnt_list:
BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
blkif->persistent_gnt_c = 0;
- if (log_stats)
- print_stats(blkif);
-
- blkif->xenblkd = NULL;
- xen_blkif_put(blkif);
-
- return 0;
+ /* Since we are shutting down remove all pages from the buffer */
+ shrink_free_pagepool(blkif, 0 /* All */);
}
/*
@@ -668,14 +678,15 @@ static void xen_blkbk_unmap(struct xen_blkif *blkif,
GNTMAP_host_map, pages[i]->handle);
pages[i]->handle = BLKBACK_INVALID_HANDLE;
if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
- ret = gnttab_unmap_refs(unmap, unmap_pages, invcount);
+ ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
+ invcount);
BUG_ON(ret);
put_free_pages(blkif, unmap_pages, invcount);
invcount = 0;
}
}
if (invcount) {
- ret = gnttab_unmap_refs(unmap, unmap_pages, invcount);
+ ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
BUG_ON(ret);
put_free_pages(blkif, unmap_pages, invcount);
}
@@ -737,7 +748,7 @@ again:
}
if (segs_to_map) {
- ret = gnttab_map_refs(map, pages_to_gnt, segs_to_map);
+ ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
BUG_ON(ret);
}
@@ -835,7 +846,7 @@ static int xen_blkbk_parse_indirect(struct blkif_request *req,
struct grant_page **pages = pending_req->indirect_pages;
struct xen_blkif *blkif = pending_req->blkif;
int indirect_grefs, rc, n, nseg, i;
- struct blkif_request_segment_aligned *segments = NULL;
+ struct blkif_request_segment *segments = NULL;
nseg = pending_req->nr_pages;
indirect_grefs = INDIRECT_PAGES(nseg);
@@ -931,9 +942,7 @@ static void xen_blk_drain_io(struct xen_blkif *blkif)
{
atomic_set(&blkif->drain, 1);
do {
- /* The initial value is one, and one refcnt taken at the
- * start of the xen_blkif_schedule thread. */
- if (atomic_read(&blkif->refcnt) <= 2)
+ if (atomic_read(&blkif->inflight) == 0)
break;
wait_for_completion_interruptible_timeout(
&blkif->drain_complete, HZ);
@@ -973,17 +982,30 @@ static void __end_block_io_op(struct pending_req *pending_req, int error)
* the proper response on the ring.
*/
if (atomic_dec_and_test(&pending_req->pendcnt)) {
- xen_blkbk_unmap(pending_req->blkif,
+ struct xen_blkif *blkif = pending_req->blkif;
+
+ xen_blkbk_unmap(blkif,
pending_req->segments,
pending_req->nr_pages);
- make_response(pending_req->blkif, pending_req->id,
+ make_response(blkif, pending_req->id,
pending_req->operation, pending_req->status);
- xen_blkif_put(pending_req->blkif);
- if (atomic_read(&pending_req->blkif->refcnt) <= 2) {
- if (atomic_read(&pending_req->blkif->drain))
- complete(&pending_req->blkif->drain_complete);
+ free_req(blkif, pending_req);
+ /*
+ * Make sure the request is freed before releasing blkif,
+ * or there could be a race between free_req and the
+ * cleanup done in xen_blkif_free during shutdown.
+ *
+ * NB: The fact that we might try to wake up pending_free_wq
+ * before drain_complete (in case there's a drain going on)
+ * it's not a problem with our current implementation
+ * because we can assure there's no thread waiting on
+ * pending_free_wq if there's a drain going on, but it has
+ * to be taken into account if the current model is changed.
+ */
+ if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
+ complete(&blkif->drain_complete);
}
- free_req(pending_req->blkif, pending_req);
+ xen_blkif_put(blkif);
}
}
@@ -1237,6 +1259,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
* below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
*/
xen_blkif_get(blkif);
+ atomic_inc(&blkif->inflight);
for (i = 0; i < nseg; i++) {
while ((bio == NULL) ||
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index 8d8807563d99..be052773ad03 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -57,7 +57,7 @@
#define MAX_INDIRECT_SEGMENTS 256
#define SEGS_PER_INDIRECT_FRAME \
- (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
+ (PAGE_SIZE/sizeof(struct blkif_request_segment))
#define MAX_INDIRECT_PAGES \
((MAX_INDIRECT_SEGMENTS + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
#define INDIRECT_PAGES(_segs) \
@@ -278,6 +278,7 @@ struct xen_blkif {
/* for barrier (drain) requests */
struct completion drain_complete;
atomic_t drain;
+ atomic_t inflight;
/* One thread per one blkif. */
struct task_struct *xenblkd;
unsigned int waiting_reqs;
@@ -376,6 +377,7 @@ int xen_blkif_xenbus_init(void);
irqreturn_t xen_blkif_be_int(int irq, void *dev_id);
int xen_blkif_schedule(void *arg);
int xen_blkif_purge_persistent(void *arg);
+void xen_blkbk_free_caches(struct xen_blkif *blkif);
int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
struct backend_info *be, int state);
@@ -383,6 +385,7 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
int xen_blkbk_barrier(struct xenbus_transaction xbt,
struct backend_info *be, int state);
struct xenbus_device *xen_blkbk_xenbus(struct backend_info *be);
+void xen_blkbk_unmap_purged_grants(struct work_struct *work);
static inline void blkif_get_x86_32_req(struct blkif_request *dst,
struct blkif_x86_32_request *src)
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index c2014a0aa206..9a547e6b6ebf 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -125,8 +125,11 @@ static struct xen_blkif *xen_blkif_alloc(domid_t domid)
blkif->persistent_gnts.rb_node = NULL;
spin_lock_init(&blkif->free_pages_lock);
INIT_LIST_HEAD(&blkif->free_pages);
+ INIT_LIST_HEAD(&blkif->persistent_purge_list);
blkif->free_pages_num = 0;
atomic_set(&blkif->persistent_gnt_in_use, 0);
+ atomic_set(&blkif->inflight, 0);
+ INIT_WORK(&blkif->persistent_purge_work, xen_blkbk_unmap_purged_grants);
INIT_LIST_HEAD(&blkif->pending_free);
@@ -259,6 +262,17 @@ static void xen_blkif_free(struct xen_blkif *blkif)
if (!atomic_dec_and_test(&blkif->refcnt))
BUG();
+ /* Remove all persistent grants and the cache of ballooned pages. */
+ xen_blkbk_free_caches(blkif);
+
+ /* Make sure everything is drained before shutting down */
+ BUG_ON(blkif->persistent_gnt_c != 0);
+ BUG_ON(atomic_read(&blkif->persistent_gnt_in_use) != 0);
+ BUG_ON(blkif->free_pages_num != 0);
+ BUG_ON(!list_empty(&blkif->persistent_purge_list));
+ BUG_ON(!list_empty(&blkif->free_pages));
+ BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
+
/* Check that there is no request in use */
list_for_each_entry_safe(req, n, &blkif->pending_free, free_list) {
list_del(&req->free_list);
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8dcfb54f1603..efe1b4761735 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -162,7 +162,7 @@ static DEFINE_SPINLOCK(minor_lock);
#define DEV_NAME "xvd" /* name in /dev */
#define SEGS_PER_INDIRECT_FRAME \
- (PAGE_SIZE/sizeof(struct blkif_request_segment_aligned))
+ (PAGE_SIZE/sizeof(struct blkif_request_segment))
#define INDIRECT_GREFS(_segs) \
((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
@@ -393,7 +393,7 @@ static int blkif_queue_request(struct request *req)
unsigned long id;
unsigned int fsect, lsect;
int i, ref, n;
- struct blkif_request_segment_aligned *segments = NULL;
+ struct blkif_request_segment *segments = NULL;
/*
* Used to store if we are able to queue the request by just using
@@ -550,7 +550,7 @@ static int blkif_queue_request(struct request *req)
} else {
n = i % SEGS_PER_INDIRECT_FRAME;
segments[n] =
- (struct blkif_request_segment_aligned) {
+ (struct blkif_request_segment) {
.gref = ref,
.first_sect = fsect,
.last_sect = lsect };
@@ -1904,13 +1904,16 @@ static void blkback_changed(struct xenbus_device *dev,
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
- case XenbusStateClosed:
break;
case XenbusStateConnected:
blkfront_connect(info);
break;
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's Closing state -- fallthrough */
case XenbusStateClosing:
blkfront_closing(info);
break;
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 011e55d820b1..51c557cfd92b 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -612,6 +612,8 @@ static ssize_t disksize_store(struct device *dev,
disksize = PAGE_ALIGN(disksize);
meta = zram_meta_alloc(disksize);
+ if (!meta)
+ return -ENOMEM;
down_write(&zram->init_lock);
if (zram->init_done) {
up_write(&zram->init_lock);
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index fa3243d71c76..1386749b48ff 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -499,6 +499,7 @@ config RAW_DRIVER
config MAX_RAW_DEVS
int "Maximum number of RAW devices to support (1-65536)"
depends on RAW_DRIVER
+ range 1 65536
default "256"
help
The maximum number of RAW devices that are supported.
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index f3223aac4df1..6e8d65e9b1d3 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -190,7 +190,7 @@ static int bind_get(int number, dev_t *dev)
struct raw_device_data *rawdev;
struct block_device *bdev;
- if (number <= 0 || number >= MAX_RAW_MINORS)
+ if (number <= 0 || number >= max_raw_minors)
return -EINVAL;
rawdev = &raw_devices[number];
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index feea87cc6b8f..6928d094451d 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -890,12 +890,10 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
} else {
/* Failback to copying a page */
struct page *page = alloc_page(GFP_KERNEL);
- char *src = buf->ops->map(pipe, buf, 1);
- char *dst;
+ char *src;
if (!page)
return -ENOMEM;
- dst = kmap(page);
offset = sd->pos & ~PAGE_MASK;
@@ -903,9 +901,8 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
if (len + offset > PAGE_SIZE)
len = PAGE_SIZE - offset;
- memcpy(dst + offset, src + buf->offset, len);
-
- kunmap(page);
+ src = buf->ops->map(pipe, buf, 1);
+ memcpy(page_address(page) + offset, src + buf->offset, len);
buf->ops->unmap(pipe, buf, src);
sg_set_page(&(sgl->sg[sgl->n]), page, len, offset);
diff --git a/drivers/clk/at91/clk-master.c b/drivers/clk/at91/clk-master.c
index bd313f7816a8..c1af80bcdf20 100644
--- a/drivers/clk/at91/clk-master.c
+++ b/drivers/clk/at91/clk-master.c
@@ -242,7 +242,7 @@ of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc,
irq = irq_of_parse_and_map(np, 0);
if (!irq)
- return;
+ goto out_free_characteristics;
clk = at91_clk_register_master(pmc, irq, name, num_parents,
parent_names, layout,
diff --git a/drivers/clk/clk-nomadik.c b/drivers/clk/clk-nomadik.c
index 6a934a5296bd..05e04ce0f148 100644
--- a/drivers/clk/clk-nomadik.c
+++ b/drivers/clk/clk-nomadik.c
@@ -494,6 +494,9 @@ static const struct file_operations nomadik_src_clk_debugfs_ops = {
static int __init nomadik_src_clk_init_debugfs(void)
{
+ /* Vital for multiplatform */
+ if (!src_base)
+ return -ENODEV;
src_pcksr0_boot = readl(src_base + SRC_PCKSR0);
src_pcksr1_boot = readl(src_base + SRC_PCKSR1);
debugfs_create_file("nomadik-src-clk", S_IFREG | S_IRUGO,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 5517944495d8..c42e608af6bb 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -2226,24 +2226,25 @@ EXPORT_SYMBOL_GPL(devm_clk_unregister);
*/
int __clk_get(struct clk *clk)
{
- if (clk && !try_module_get(clk->owner))
- return 0;
+ if (clk) {
+ if (!try_module_get(clk->owner))
+ return 0;
- kref_get(&clk->ref);
+ kref_get(&clk->ref);
+ }
return 1;
}
void __clk_put(struct clk *clk)
{
- if (WARN_ON_ONCE(IS_ERR(clk)))
+ if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
return;
clk_prepare_lock();
kref_put(&clk->ref, __clk_release);
clk_prepare_unlock();
- if (clk)
- module_put(clk->owner);
+ module_put(clk->owner);
}
/*** clk rate change notifiers ***/
diff --git a/drivers/clk/keystone/gate.c b/drivers/clk/keystone/gate.c
index 17a598398a53..86f1e362eafb 100644
--- a/drivers/clk/keystone/gate.c
+++ b/drivers/clk/keystone/gate.c
@@ -179,6 +179,7 @@ static struct clk *clk_register_psc(struct device *dev,
init.name = name;
init.ops = &clk_psc_ops;
+ init.flags = 0;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/mvebu/armada-370.c b/drivers/clk/mvebu/armada-370.c
index 81a202d12a7a..bef198a83863 100644
--- a/drivers/clk/mvebu/armada-370.c
+++ b/drivers/clk/mvebu/armada-370.c
@@ -141,13 +141,6 @@ static const struct coreclk_soc_desc a370_coreclks = {
.num_ratios = ARRAY_SIZE(a370_coreclk_ratios),
};
-static void __init a370_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &a370_coreclks);
-}
-CLK_OF_DECLARE(a370_core_clk, "marvell,armada-370-core-clock",
- a370_coreclk_init);
-
/*
* Clock Gating Control
*/
@@ -168,9 +161,15 @@ static const struct clk_gating_soc_desc a370_gating_desc[] __initconst = {
{ }
};
-static void __init a370_clk_gating_init(struct device_node *np)
+static void __init a370_clk_init(struct device_node *np)
{
- mvebu_clk_gating_setup(np, a370_gating_desc);
+ struct device_node *cgnp =
+ of_find_compatible_node(NULL, NULL, "marvell,armada-370-gating-clock");
+
+ mvebu_coreclk_setup(np, &a370_coreclks);
+
+ if (cgnp)
+ mvebu_clk_gating_setup(cgnp, a370_gating_desc);
}
-CLK_OF_DECLARE(a370_clk_gating, "marvell,armada-370-gating-clock",
- a370_clk_gating_init);
+CLK_OF_DECLARE(a370_clk, "marvell,armada-370-core-clock", a370_clk_init);
+
diff --git a/drivers/clk/mvebu/armada-xp.c b/drivers/clk/mvebu/armada-xp.c
index 9922c4475aa8..b3094315a3c0 100644
--- a/drivers/clk/mvebu/armada-xp.c
+++ b/drivers/clk/mvebu/armada-xp.c
@@ -158,13 +158,6 @@ static const struct coreclk_soc_desc axp_coreclks = {
.num_ratios = ARRAY_SIZE(axp_coreclk_ratios),
};
-static void __init axp_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &axp_coreclks);
-}
-CLK_OF_DECLARE(axp_core_clk, "marvell,armada-xp-core-clock",
- axp_coreclk_init);
-
/*
* Clock Gating Control
*/
@@ -202,9 +195,14 @@ static const struct clk_gating_soc_desc axp_gating_desc[] __initconst = {
{ }
};
-static void __init axp_clk_gating_init(struct device_node *np)
+static void __init axp_clk_init(struct device_node *np)
{
- mvebu_clk_gating_setup(np, axp_gating_desc);
+ struct device_node *cgnp =
+ of_find_compatible_node(NULL, NULL, "marvell,armada-xp-gating-clock");
+
+ mvebu_coreclk_setup(np, &axp_coreclks);
+
+ if (cgnp)
+ mvebu_clk_gating_setup(cgnp, axp_gating_desc);
}
-CLK_OF_DECLARE(axp_clk_gating, "marvell,armada-xp-gating-clock",
- axp_clk_gating_init);
+CLK_OF_DECLARE(axp_clk, "marvell,armada-xp-core-clock", axp_clk_init);
diff --git a/drivers/clk/mvebu/dove.c b/drivers/clk/mvebu/dove.c
index 38aee1e3f242..b8c2424ac926 100644
--- a/drivers/clk/mvebu/dove.c
+++ b/drivers/clk/mvebu/dove.c
@@ -154,12 +154,6 @@ static const struct coreclk_soc_desc dove_coreclks = {
.num_ratios = ARRAY_SIZE(dove_coreclk_ratios),
};
-static void __init dove_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &dove_coreclks);
-}
-CLK_OF_DECLARE(dove_core_clk, "marvell,dove-core-clock", dove_coreclk_init);
-
/*
* Clock Gating Control
*/
@@ -186,9 +180,14 @@ static const struct clk_gating_soc_desc dove_gating_desc[] __initconst = {
{ }
};
-static void __init dove_clk_gating_init(struct device_node *np)
+static void __init dove_clk_init(struct device_node *np)
{
- mvebu_clk_gating_setup(np, dove_gating_desc);
+ struct device_node *cgnp =
+ of_find_compatible_node(NULL, NULL, "marvell,dove-gating-clock");
+
+ mvebu_coreclk_setup(np, &dove_coreclks);
+
+ if (cgnp)
+ mvebu_clk_gating_setup(cgnp, dove_gating_desc);
}
-CLK_OF_DECLARE(dove_clk_gating, "marvell,dove-gating-clock",
- dove_clk_gating_init);
+CLK_OF_DECLARE(dove_clk, "marvell,dove-core-clock", dove_clk_init);
diff --git a/drivers/clk/mvebu/kirkwood.c b/drivers/clk/mvebu/kirkwood.c
index 2636a55f29f9..ddb666a86500 100644
--- a/drivers/clk/mvebu/kirkwood.c
+++ b/drivers/clk/mvebu/kirkwood.c
@@ -193,13 +193,6 @@ static const struct coreclk_soc_desc kirkwood_coreclks = {
.num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios),
};
-static void __init kirkwood_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &kirkwood_coreclks);
-}
-CLK_OF_DECLARE(kirkwood_core_clk, "marvell,kirkwood-core-clock",
- kirkwood_coreclk_init);
-
static const struct coreclk_soc_desc mv88f6180_coreclks = {
.get_tclk_freq = kirkwood_get_tclk_freq,
.get_cpu_freq = mv88f6180_get_cpu_freq,
@@ -208,13 +201,6 @@ static const struct coreclk_soc_desc mv88f6180_coreclks = {
.num_ratios = ARRAY_SIZE(kirkwood_coreclk_ratios),
};
-static void __init mv88f6180_coreclk_init(struct device_node *np)
-{
- mvebu_coreclk_setup(np, &mv88f6180_coreclks);
-}
-CLK_OF_DECLARE(mv88f6180_core_clk, "marvell,mv88f6180-core-clock",
- mv88f6180_coreclk_init);
-
/*
* Clock Gating Control
*/
@@ -239,9 +225,21 @@ static const struct clk_gating_soc_desc kirkwood_gating_desc[] __initconst = {
{ }
};
-static void __init kirkwood_clk_gating_init(struct device_node *np)
+static void __init kirkwood_clk_init(struct device_node *np)
{
- mvebu_clk_gating_setup(np, kirkwood_gating_desc);
+ struct device_node *cgnp =
+ of_find_compatible_node(NULL, NULL, "marvell,kirkwood-gating-clock");
+
+
+ if (of_device_is_compatible(np, "marvell,mv88f6180-core-clock"))
+ mvebu_coreclk_setup(np, &mv88f6180_coreclks);
+ else
+ mvebu_coreclk_setup(np, &kirkwood_coreclks);
+
+ if (cgnp)
+ mvebu_clk_gating_setup(cgnp, kirkwood_gating_desc);
}
-CLK_OF_DECLARE(kirkwood_clk_gating, "marvell,kirkwood-gating-clock",
- kirkwood_clk_gating_init);
+CLK_OF_DECLARE(kirkwood_clk, "marvell,kirkwood-core-clock",
+ kirkwood_clk_init);
+CLK_OF_DECLARE(mv88f6180_clk, "marvell,mv88f6180-core-clock",
+ kirkwood_clk_init);
diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c
index a59ec217a124..99c27b1c625b 100644
--- a/drivers/clk/shmobile/clk-rcar-gen2.c
+++ b/drivers/clk/shmobile/clk-rcar-gen2.c
@@ -26,6 +26,8 @@ struct rcar_gen2_cpg {
void __iomem *reg;
};
+#define CPG_FRQCRB 0x00000004
+#define CPG_FRQCRB_KICK BIT(31)
#define CPG_SDCKCR 0x00000074
#define CPG_PLL0CR 0x000000d8
#define CPG_FRQCRC 0x000000e0
@@ -45,6 +47,7 @@ struct rcar_gen2_cpg {
struct cpg_z_clk {
struct clk_hw hw;
void __iomem *reg;
+ void __iomem *kick_reg;
};
#define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw)
@@ -83,17 +86,45 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate,
{
struct cpg_z_clk *zclk = to_z_clk(hw);
unsigned int mult;
- u32 val;
+ u32 val, kick;
+ unsigned int i;
mult = div_u64((u64)rate * 32, parent_rate);
mult = clamp(mult, 1U, 32U);
+ if (clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK)
+ return -EBUSY;
+
val = clk_readl(zclk->reg);
val &= ~CPG_FRQCRC_ZFC_MASK;
val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT;
clk_writel(val, zclk->reg);
- return 0;
+ /*
+ * Set KICK bit in FRQCRB to update hardware setting and wait for
+ * clock change completion.
+ */
+ kick = clk_readl(zclk->kick_reg);
+ kick |= CPG_FRQCRB_KICK;
+ clk_writel(kick, zclk->kick_reg);
+
+ /*
+ * Note: There is no HW information about the worst case latency.
+ *
+ * Using experimental measurements, it seems that no more than
+ * ~10 iterations are needed, independently of the CPU rate.
+ * Since this value might be dependant of external xtal rate, pll1
+ * rate or even the other emulation clocks rate, use 1000 as a
+ * "super" safe value.
+ */
+ for (i = 1000; i; i--) {
+ if (!(clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK))
+ return 0;
+
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
}
static const struct clk_ops cpg_z_clk_ops = {
@@ -120,6 +151,7 @@ static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg)
init.num_parents = 1;
zclk->reg = cpg->reg + CPG_FRQCRC;
+ zclk->kick_reg = cpg->reg + CPG_FRQCRB;
zclk->hw.init = &init;
clk = clk_register(NULL, &zclk->hw);
@@ -186,7 +218,7 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
const char *name)
{
const struct clk_div_table *table = NULL;
- const char *parent_name = "main";
+ const char *parent_name;
unsigned int shift;
unsigned int mult = 1;
unsigned int div = 1;
@@ -201,23 +233,31 @@ rcar_gen2_cpg_register_clock(struct device_node *np, struct rcar_gen2_cpg *cpg,
* the multiplier value.
*/
u32 value = clk_readl(cpg->reg + CPG_PLL0CR);
+ parent_name = "main";
mult = ((value >> 24) & ((1 << 7) - 1)) + 1;
} else if (!strcmp(name, "pll1")) {
+ parent_name = "main";
mult = config->pll1_mult / 2;
} else if (!strcmp(name, "pll3")) {
+ parent_name = "main";
mult = config->pll3_mult;
} else if (!strcmp(name, "lb")) {
+ parent_name = "pll1_div2";
div = cpg_mode & BIT(18) ? 36 : 24;
} else if (!strcmp(name, "qspi")) {
+ parent_name = "pll1_div2";
div = (cpg_mode & (BIT(3) | BIT(2) | BIT(1))) == BIT(2)
- ? 16 : 20;
+ ? 8 : 10;
} else if (!strcmp(name, "sdh")) {
+ parent_name = "pll1_div2";
table = cpg_sdh_div_table;
shift = 8;
} else if (!strcmp(name, "sd0")) {
+ parent_name = "pll1_div2";
table = cpg_sd01_div_table;
shift = 4;
} else if (!strcmp(name, "sd1")) {
+ parent_name = "pll1_div2";
table = cpg_sd01_div_table;
shift = 0;
} else if (!strcmp(name, "z")) {
diff --git a/drivers/clk/tegra/clk-divider.c b/drivers/clk/tegra/clk-divider.c
index 4d75b1f37e3a..290f9c1a3749 100644
--- a/drivers/clk/tegra/clk-divider.c
+++ b/drivers/clk/tegra/clk-divider.c
@@ -59,7 +59,7 @@ static int get_div(struct tegra_clk_frac_div *divider, unsigned long rate,
return 0;
if (divider_ux1 > get_max_div(divider))
- return -EINVAL;
+ return get_max_div(divider);
return divider_ux1;
}
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index cf0c323f2c36..c39613c519af 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -180,9 +180,13 @@ enum clk_id {
tegra_clk_sbc6_8,
tegra_clk_sclk,
tegra_clk_sdmmc1,
+ tegra_clk_sdmmc1_8,
tegra_clk_sdmmc2,
+ tegra_clk_sdmmc2_8,
tegra_clk_sdmmc3,
+ tegra_clk_sdmmc3_8,
tegra_clk_sdmmc4,
+ tegra_clk_sdmmc4_8,
tegra_clk_se,
tegra_clk_soc_therm,
tegra_clk_sor0,
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 5c35885f4a7c..1fa5c3f33b20 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -371,9 +371,7 @@ static const char *mux_pllp3_pllc_clkm[] = {
static const char *mux_pllm_pllc_pllp_plla_pllc2_c3_clkm[] = {
"pll_m", "pll_c", "pll_p", "pll_a", "pll_c2", "pll_c3", "clk_m"
};
-static u32 mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx[] = {
- [0] = 0, [1] = 1, [2] = 2, [3] = 3, [4] = 4, [5] = 6,
-};
+#define mux_pllm_pllc_pllp_plla_pllc2_c3_clkm_idx NULL
static const char *mux_pllm_pllc2_c_c3_pllp_plla_pllc4[] = {
"pll_m", "pll_c2", "pll_c", "pll_c3", "pll_p", "pll_a_out0", "pll_c4",
@@ -465,6 +463,10 @@ static struct tegra_periph_init_data periph_clks[] = {
MUX("adx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_ADX1, 180, TEGRA_PERIPH_ON_APB, tegra_clk_adx1),
MUX("amx1", mux_plla_pllc_pllp_clkm, CLK_SOURCE_AMX1, 185, TEGRA_PERIPH_ON_APB, tegra_clk_amx1),
MUX("vi_sensor2", mux_pllm_pllc2_c_c3_pllp_plla, CLK_SOURCE_VI_SENSOR2, 20, TEGRA_PERIPH_NO_RESET, tegra_clk_vi_sensor2),
+ MUX8("sdmmc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC1, 14, 0, tegra_clk_sdmmc1_8),
+ MUX8("sdmmc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC2, 9, 0, tegra_clk_sdmmc2_8),
+ MUX8("sdmmc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC3, 69, 0, tegra_clk_sdmmc3_8),
+ MUX8("sdmmc4", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SDMMC4, 15, 0, tegra_clk_sdmmc4_8),
MUX8("sbc1", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC1, 41, TEGRA_PERIPH_ON_APB, tegra_clk_sbc1_8),
MUX8("sbc2", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC2, 44, TEGRA_PERIPH_ON_APB, tegra_clk_sbc2_8),
MUX8("sbc3", mux_pllp_pllc2_c_c3_pllm_clkm, CLK_SOURCE_SBC3, 46, TEGRA_PERIPH_ON_APB, tegra_clk_sbc3_8),
@@ -492,7 +494,7 @@ static struct tegra_periph_init_data periph_clks[] = {
UART("uartb", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTB, 7, tegra_clk_uartb),
UART("uartc", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTC, 55, tegra_clk_uartc),
UART("uartd", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTD, 65, tegra_clk_uartd),
- UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 65, tegra_clk_uarte),
+ UART("uarte", mux_pllp_pllc_pllm_clkm, CLK_SOURCE_UARTE, 66, tegra_clk_uarte),
XUSB("xusb_host_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_HOST_SRC, 143, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_host_src),
XUSB("xusb_falcon_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_FALCON_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_falcon_src),
XUSB("xusb_fs_src", mux_clkm_48M_pllp_480M, CLK_SOURCE_XUSB_FS_SRC, 143, TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_fs_src),
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index 05dce4aa2c11..feb3201c85ce 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -120,7 +120,7 @@ void __init tegra_super_clk_gen4_init(void __iomem *clk_base,
ARRAY_SIZE(cclk_lp_parents),
CLK_SET_RATE_PARENT,
clk_base + CCLKLP_BURST_POLICY,
- 0, 4, 8, 9, NULL);
+ TEGRA_DIVIDER_2, 4, 8, 9, NULL);
*dt_clk = clk;
}
diff --git a/drivers/clk/tegra/clk-tegra114.c b/drivers/clk/tegra/clk-tegra114.c
index 90d9d25f2228..80431f0fb268 100644
--- a/drivers/clk/tegra/clk-tegra114.c
+++ b/drivers/clk/tegra/clk-tegra114.c
@@ -682,12 +682,12 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
[tegra_clk_timer] = { .dt_id = TEGRA114_CLK_TIMER, .present = true },
[tegra_clk_uarta] = { .dt_id = TEGRA114_CLK_UARTA, .present = true },
[tegra_clk_uartd] = { .dt_id = TEGRA114_CLK_UARTD, .present = true },
- [tegra_clk_sdmmc2] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true },
+ [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA114_CLK_SDMMC2, .present = true },
[tegra_clk_i2s1] = { .dt_id = TEGRA114_CLK_I2S1, .present = true },
[tegra_clk_i2c1] = { .dt_id = TEGRA114_CLK_I2C1, .present = true },
[tegra_clk_ndflash] = { .dt_id = TEGRA114_CLK_NDFLASH, .present = true },
- [tegra_clk_sdmmc1] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true },
- [tegra_clk_sdmmc4] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true },
+ [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA114_CLK_SDMMC1, .present = true },
+ [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA114_CLK_SDMMC4, .present = true },
[tegra_clk_pwm] = { .dt_id = TEGRA114_CLK_PWM, .present = true },
[tegra_clk_i2s0] = { .dt_id = TEGRA114_CLK_I2S0, .present = true },
[tegra_clk_i2s2] = { .dt_id = TEGRA114_CLK_I2S2, .present = true },
@@ -723,7 +723,7 @@ static struct tegra_clk tegra114_clks[tegra_clk_max] __initdata = {
[tegra_clk_bsev] = { .dt_id = TEGRA114_CLK_BSEV, .present = true },
[tegra_clk_i2c3] = { .dt_id = TEGRA114_CLK_I2C3, .present = true },
[tegra_clk_sbc4_8] = { .dt_id = TEGRA114_CLK_SBC4, .present = true },
- [tegra_clk_sdmmc3] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true },
+ [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA114_CLK_SDMMC3, .present = true },
[tegra_clk_owr] = { .dt_id = TEGRA114_CLK_OWR, .present = true },
[tegra_clk_csite] = { .dt_id = TEGRA114_CLK_CSITE, .present = true },
[tegra_clk_la] = { .dt_id = TEGRA114_CLK_LA, .present = true },
diff --git a/drivers/clk/tegra/clk-tegra124.c b/drivers/clk/tegra/clk-tegra124.c
index aff86b5bc745..166e02f16c8a 100644
--- a/drivers/clk/tegra/clk-tegra124.c
+++ b/drivers/clk/tegra/clk-tegra124.c
@@ -516,11 +516,11 @@ static struct div_nmp pllp_nmp = {
};
static struct tegra_clk_pll_freq_table pll_p_freq_table[] = {
- {12000000, 216000000, 432, 12, 1, 8},
- {13000000, 216000000, 432, 13, 1, 8},
- {16800000, 216000000, 360, 14, 1, 8},
- {19200000, 216000000, 360, 16, 1, 8},
- {26000000, 216000000, 432, 26, 1, 8},
+ {12000000, 408000000, 408, 12, 0, 8},
+ {13000000, 408000000, 408, 13, 0, 8},
+ {16800000, 408000000, 340, 14, 0, 8},
+ {19200000, 408000000, 340, 16, 0, 8},
+ {26000000, 408000000, 408, 26, 0, 8},
{0, 0, 0, 0, 0, 0},
};
@@ -570,6 +570,15 @@ static struct tegra_clk_pll_params pll_a_params = {
.flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_USE_LOCK,
};
+static struct div_nmp plld_nmp = {
+ .divm_shift = 0,
+ .divm_width = 5,
+ .divn_shift = 8,
+ .divn_width = 11,
+ .divp_shift = 20,
+ .divp_width = 3,
+};
+
static struct tegra_clk_pll_freq_table pll_d_freq_table[] = {
{12000000, 216000000, 864, 12, 4, 12},
{13000000, 216000000, 864, 13, 4, 12},
@@ -603,19 +612,18 @@ static struct tegra_clk_pll_params pll_d_params = {
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLLDU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
- .div_nmp = &pllp_nmp,
+ .div_nmp = &plld_nmp,
.freq_table = pll_d_freq_table,
.flags = TEGRA_PLL_HAS_CPCON | TEGRA_PLL_SET_LFCON |
TEGRA_PLL_USE_LOCK,
};
static struct tegra_clk_pll_freq_table tegra124_pll_d2_freq_table[] = {
- { 12000000, 148500000, 99, 1, 8},
- { 12000000, 594000000, 99, 1, 1},
- { 13000000, 594000000, 91, 1, 1}, /* actual: 591.5 MHz */
- { 16800000, 594000000, 71, 1, 1}, /* actual: 596.4 MHz */
- { 19200000, 594000000, 62, 1, 1}, /* actual: 595.2 MHz */
- { 26000000, 594000000, 91, 2, 1}, /* actual: 591.5 MHz */
+ { 12000000, 594000000, 99, 1, 2},
+ { 13000000, 594000000, 91, 1, 2}, /* actual: 591.5 MHz */
+ { 16800000, 594000000, 71, 1, 2}, /* actual: 596.4 MHz */
+ { 19200000, 594000000, 62, 1, 2}, /* actual: 595.2 MHz */
+ { 26000000, 594000000, 91, 2, 2}, /* actual: 591.5 MHz */
{ 0, 0, 0, 0, 0, 0 },
};
@@ -753,21 +761,19 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
[tegra_clk_rtc] = { .dt_id = TEGRA124_CLK_RTC, .present = true },
[tegra_clk_timer] = { .dt_id = TEGRA124_CLK_TIMER, .present = true },
[tegra_clk_uarta] = { .dt_id = TEGRA124_CLK_UARTA, .present = true },
- [tegra_clk_sdmmc2] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true },
+ [tegra_clk_sdmmc2_8] = { .dt_id = TEGRA124_CLK_SDMMC2, .present = true },
[tegra_clk_i2s1] = { .dt_id = TEGRA124_CLK_I2S1, .present = true },
[tegra_clk_i2c1] = { .dt_id = TEGRA124_CLK_I2C1, .present = true },
[tegra_clk_ndflash] = { .dt_id = TEGRA124_CLK_NDFLASH, .present = true },
- [tegra_clk_sdmmc1] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true },
- [tegra_clk_sdmmc4] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true },
+ [tegra_clk_sdmmc1_8] = { .dt_id = TEGRA124_CLK_SDMMC1, .present = true },
+ [tegra_clk_sdmmc4_8] = { .dt_id = TEGRA124_CLK_SDMMC4, .present = true },
[tegra_clk_pwm] = { .dt_id = TEGRA124_CLK_PWM, .present = true },
[tegra_clk_i2s2] = { .dt_id = TEGRA124_CLK_I2S2, .present = true },
- [tegra_clk_gr2d] = { .dt_id = TEGRA124_CLK_GR_2D, .present = true },
[tegra_clk_usbd] = { .dt_id = TEGRA124_CLK_USBD, .present = true },
[tegra_clk_isp_8] = { .dt_id = TEGRA124_CLK_ISP, .present = true },
- [tegra_clk_gr3d] = { .dt_id = TEGRA124_CLK_GR_3D, .present = true },
[tegra_clk_disp2] = { .dt_id = TEGRA124_CLK_DISP2, .present = true },
[tegra_clk_disp1] = { .dt_id = TEGRA124_CLK_DISP1, .present = true },
- [tegra_clk_host1x] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true },
+ [tegra_clk_host1x_8] = { .dt_id = TEGRA124_CLK_HOST1X, .present = true },
[tegra_clk_vcp] = { .dt_id = TEGRA124_CLK_VCP, .present = true },
[tegra_clk_i2s0] = { .dt_id = TEGRA124_CLK_I2S0, .present = true },
[tegra_clk_apbdma] = { .dt_id = TEGRA124_CLK_APBDMA, .present = true },
@@ -794,7 +800,7 @@ static struct tegra_clk tegra124_clks[tegra_clk_max] __initdata = {
[tegra_clk_uartd] = { .dt_id = TEGRA124_CLK_UARTD, .present = true },
[tegra_clk_i2c3] = { .dt_id = TEGRA124_CLK_I2C3, .present = true },
[tegra_clk_sbc4] = { .dt_id = TEGRA124_CLK_SBC4, .present = true },
- [tegra_clk_sdmmc3] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true },
+ [tegra_clk_sdmmc3_8] = { .dt_id = TEGRA124_CLK_SDMMC3, .present = true },
[tegra_clk_pcie] = { .dt_id = TEGRA124_CLK_PCIE, .present = true },
[tegra_clk_owr] = { .dt_id = TEGRA124_CLK_OWR, .present = true },
[tegra_clk_afi] = { .dt_id = TEGRA124_CLK_AFI, .present = true },
@@ -1286,9 +1292,9 @@ static void __init tegra124_pll_init(void __iomem *clk_base,
clk_register_clkdev(clk, "pll_d2", NULL);
clks[TEGRA124_CLK_PLL_D2] = clk;
- /* PLLD2_OUT0 ?? */
+ /* PLLD2_OUT0 */
clk = clk_register_fixed_factor(NULL, "pll_d2_out0", "pll_d2",
- CLK_SET_RATE_PARENT, 1, 2);
+ CLK_SET_RATE_PARENT, 1, 1);
clk_register_clkdev(clk, "pll_d2_out0", NULL);
clks[TEGRA124_CLK_PLL_D2_OUT0] = clk;
diff --git a/drivers/clk/tegra/clk-tegra20.c b/drivers/clk/tegra/clk-tegra20.c
index dbace152b2fa..dace2b1b5ae6 100644
--- a/drivers/clk/tegra/clk-tegra20.c
+++ b/drivers/clk/tegra/clk-tegra20.c
@@ -574,6 +574,8 @@ static struct tegra_clk tegra20_clks[tegra_clk_max] __initdata = {
[tegra_clk_tvdac] = { .dt_id = TEGRA20_CLK_TVDAC, .present = true },
[tegra_clk_vi_sensor] = { .dt_id = TEGRA20_CLK_VI_SENSOR, .present = true },
[tegra_clk_afi] = { .dt_id = TEGRA20_CLK_AFI, .present = true },
+ [tegra_clk_fuse] = { .dt_id = TEGRA20_CLK_FUSE, .present = true },
+ [tegra_clk_kfuse] = { .dt_id = TEGRA20_CLK_KFUSE, .present = true },
};
static unsigned long tegra20_clk_measure_input_freq(void)
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index 974b2db2fe10..0595dc6c453e 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -99,31 +99,6 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
return;
}
-static void __init kona_timers_init(struct device_node *node)
-{
- u32 freq;
- struct clk *external_clk;
-
- external_clk = of_clk_get_by_name(node, NULL);
-
- if (!IS_ERR(external_clk)) {
- arch_timer_rate = clk_get_rate(external_clk);
- clk_prepare_enable(external_clk);
- } else if (!of_property_read_u32(node, "clock-frequency", &freq)) {
- arch_timer_rate = freq;
- } else {
- panic("unable to determine clock-frequency");
- }
-
- /* Setup IRQ numbers */
- timers.tmr_irq = irq_of_parse_and_map(node, 0);
-
- /* Setup IO addresses */
- timers.tmr_regs = of_iomap(node, 0);
-
- kona_timer_disable_and_clear(timers.tmr_regs);
-}
-
static int kona_timer_set_next_event(unsigned long clc,
struct clock_event_device *unused)
{
@@ -198,7 +173,34 @@ static struct irqaction kona_timer_irq = {
static void __init kona_timer_init(struct device_node *node)
{
- kona_timers_init(node);
+ u32 freq;
+ struct clk *external_clk;
+
+ if (!of_device_is_available(node)) {
+ pr_info("Kona Timer v1 marked as disabled in device tree\n");
+ return;
+ }
+
+ external_clk = of_clk_get_by_name(node, NULL);
+
+ if (!IS_ERR(external_clk)) {
+ arch_timer_rate = clk_get_rate(external_clk);
+ clk_prepare_enable(external_clk);
+ } else if (!of_property_read_u32(node, "clock-frequency", &freq)) {
+ arch_timer_rate = freq;
+ } else {
+ pr_err("Kona Timer v1 unable to determine clock-frequency");
+ return;
+ }
+
+ /* Setup IRQ numbers */
+ timers.tmr_irq = irq_of_parse_and_map(node, 0);
+
+ /* Setup IO addresses */
+ timers.tmr_regs = of_iomap(node, 0);
+
+ kona_timer_disable_and_clear(timers.tmr_regs);
+
kona_timer_clockevents_init();
setup_irq(timers.tmr_irq, &kona_timer_irq);
kona_timer_set_next_event((arch_timer_rate / HZ), NULL);
diff --git a/drivers/clocksource/vf_pit_timer.c b/drivers/clocksource/vf_pit_timer.c
index 02821b06a39e..a918bc481c52 100644
--- a/drivers/clocksource/vf_pit_timer.c
+++ b/drivers/clocksource/vf_pit_timer.c
@@ -54,7 +54,7 @@ static inline void pit_irq_acknowledge(void)
static u64 pit_read_sched_clock(void)
{
- return __raw_readl(clksrc_base + PITCVAL);
+ return ~__raw_readl(clksrc_base + PITCVAL);
}
static int __init pit_clocksource_init(unsigned long rate)
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 08ca8c9f41cd..199b52b7c3e1 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1109,12 +1109,27 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
goto err_set_policy_cpu;
}
+ /* related cpus should atleast have policy->cpus */
+ cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
+
+ /*
+ * affected cpus must always be the one, which are online. We aren't
+ * managing offline cpus here.
+ */
+ cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
+
+ if (!frozen) {
+ policy->user_policy.min = policy->min;
+ policy->user_policy.max = policy->max;
+ }
+
+ down_write(&policy->rwsem);
write_lock_irqsave(&cpufreq_driver_lock, flags);
for_each_cpu(j, policy->cpus)
per_cpu(cpufreq_cpu_data, j) = policy;
write_unlock_irqrestore(&cpufreq_driver_lock, flags);
- if (cpufreq_driver->get) {
+ if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
policy->cur = cpufreq_driver->get(policy->cpu);
if (!policy->cur) {
pr_err("%s: ->get() failed\n", __func__);
@@ -1162,20 +1177,6 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
}
}
- /* related cpus should atleast have policy->cpus */
- cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
-
- /*
- * affected cpus must always be the one, which are online. We aren't
- * managing offline cpus here.
- */
- cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
-
- if (!frozen) {
- policy->user_policy.min = policy->min;
- policy->user_policy.max = policy->max;
- }
-
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
@@ -1206,6 +1207,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
policy->user_policy.policy = policy->policy;
policy->user_policy.governor = policy->governor;
}
+ up_write(&policy->rwsem);
kobject_uevent(&policy->kobj, KOBJ_ADD);
up_read(&cpufreq_rwsem);
@@ -1323,8 +1325,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
up_read(&policy->rwsem);
if (cpu != policy->cpu) {
- if (!frozen)
- sysfs_remove_link(&dev->kobj, "cpufreq");
+ sysfs_remove_link(&dev->kobj, "cpufreq");
} else if (cpus > 1) {
new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
if (new_cpu >= 0) {
@@ -1547,23 +1548,16 @@ static unsigned int __cpufreq_get(unsigned int cpu)
*/
unsigned int cpufreq_get(unsigned int cpu)
{
- struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
+ struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
unsigned int ret_freq = 0;
- if (cpufreq_disabled() || !cpufreq_driver)
- return -ENOENT;
-
- BUG_ON(!policy);
-
- if (!down_read_trylock(&cpufreq_rwsem))
- return 0;
-
- down_read(&policy->rwsem);
-
- ret_freq = __cpufreq_get(cpu);
+ if (policy) {
+ down_read(&policy->rwsem);
+ ret_freq = __cpufreq_get(cpu);
+ up_read(&policy->rwsem);
- up_read(&policy->rwsem);
- up_read(&cpufreq_rwsem);
+ cpufreq_cpu_put(policy);
+ }
return ret_freq;
}
@@ -2149,7 +2143,7 @@ int cpufreq_update_policy(unsigned int cpu)
* BIOS might change freq behind our back
* -> ask driver for current freq and notify governors about a change
*/
- if (cpufreq_driver->get) {
+ if (cpufreq_driver->get && !cpufreq_driver->setpolicy) {
new_policy.cur = cpufreq_driver->get(cpu);
if (!policy->cur) {
pr_debug("Driver did not initialize current freq");
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 7e257b233602..2cd36b9297f3 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -34,12 +34,15 @@
#define SAMPLE_COUNT 3
-#define BYT_RATIOS 0x66a
-#define BYT_VIDS 0x66b
+#define BYT_RATIOS 0x66a
+#define BYT_VIDS 0x66b
+#define BYT_TURBO_RATIOS 0x66c
-#define FRAC_BITS 8
+
+#define FRAC_BITS 6
#define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
#define fp_toint(X) ((X) >> FRAC_BITS)
+#define FP_ROUNDUP(X) ((X) += 1 << FRAC_BITS)
static inline int32_t mul_fp(int32_t x, int32_t y)
{
@@ -51,12 +54,11 @@ static inline int32_t div_fp(int32_t x, int32_t y)
return div_s64((int64_t)x << FRAC_BITS, (int64_t)y);
}
-static u64 energy_divisor;
-
struct sample {
int32_t core_pct_busy;
u64 aperf;
u64 mperf;
+ unsigned long long tsc;
int freq;
};
@@ -96,6 +98,7 @@ struct cpudata {
u64 prev_aperf;
u64 prev_mperf;
+ unsigned long long prev_tsc;
int sample_ptr;
struct sample samples[SAMPLE_COUNT];
};
@@ -357,7 +360,7 @@ static int byt_get_min_pstate(void)
{
u64 value;
rdmsrl(BYT_RATIOS, value);
- return value & 0xFF;
+ return (value >> 8) & 0xFF;
}
static int byt_get_max_pstate(void)
@@ -367,6 +370,13 @@ static int byt_get_max_pstate(void)
return (value >> 16) & 0xFF;
}
+static int byt_get_turbo_pstate(void)
+{
+ u64 value;
+ rdmsrl(BYT_TURBO_RATIOS, value);
+ return value & 0x3F;
+}
+
static void byt_set_pstate(struct cpudata *cpudata, int pstate)
{
u64 val;
@@ -469,7 +479,7 @@ static struct cpu_defaults byt_params = {
.funcs = {
.get_max = byt_get_max_pstate,
.get_min = byt_get_min_pstate,
- .get_turbo = byt_get_max_pstate,
+ .get_turbo = byt_get_turbo_pstate,
.set = byt_set_pstate,
.get_vid = byt_get_vid,
},
@@ -547,31 +557,48 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
static inline void intel_pstate_calc_busy(struct cpudata *cpu,
struct sample *sample)
{
- u64 core_pct;
- core_pct = div64_u64(int_tofp(sample->aperf * 100),
- sample->mperf);
- sample->freq = fp_toint(cpu->pstate.max_pstate * core_pct * 1000);
+ int32_t core_pct;
+ int32_t c0_pct;
+
+ core_pct = div_fp(int_tofp((sample->aperf)),
+ int_tofp((sample->mperf)));
+ core_pct = mul_fp(core_pct, int_tofp(100));
+ FP_ROUNDUP(core_pct);
+
+ c0_pct = div_fp(int_tofp(sample->mperf), int_tofp(sample->tsc));
- sample->core_pct_busy = core_pct;
+ sample->freq = fp_toint(
+ mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct));
+
+ sample->core_pct_busy = mul_fp(core_pct, c0_pct);
}
static inline void intel_pstate_sample(struct cpudata *cpu)
{
u64 aperf, mperf;
+ unsigned long long tsc;
rdmsrl(MSR_IA32_APERF, aperf);
rdmsrl(MSR_IA32_MPERF, mperf);
+ tsc = native_read_tsc();
+
+ aperf = aperf >> FRAC_BITS;
+ mperf = mperf >> FRAC_BITS;
+ tsc = tsc >> FRAC_BITS;
cpu->sample_ptr = (cpu->sample_ptr + 1) % SAMPLE_COUNT;
cpu->samples[cpu->sample_ptr].aperf = aperf;
cpu->samples[cpu->sample_ptr].mperf = mperf;
+ cpu->samples[cpu->sample_ptr].tsc = tsc;
cpu->samples[cpu->sample_ptr].aperf -= cpu->prev_aperf;
cpu->samples[cpu->sample_ptr].mperf -= cpu->prev_mperf;
+ cpu->samples[cpu->sample_ptr].tsc -= cpu->prev_tsc;
intel_pstate_calc_busy(cpu, &cpu->samples[cpu->sample_ptr]);
cpu->prev_aperf = aperf;
cpu->prev_mperf = mperf;
+ cpu->prev_tsc = tsc;
}
static inline void intel_pstate_set_sample_time(struct cpudata *cpu)
@@ -590,7 +617,8 @@ static inline int32_t intel_pstate_get_scaled_busy(struct cpudata *cpu)
core_busy = cpu->samples[cpu->sample_ptr].core_pct_busy;
max_pstate = int_tofp(cpu->pstate.max_pstate);
current_pstate = int_tofp(cpu->pstate.current_pstate);
- return mul_fp(core_busy, div_fp(max_pstate, current_pstate));
+ core_busy = mul_fp(core_busy, div_fp(max_pstate, current_pstate));
+ return FP_ROUNDUP(core_busy);
}
static inline void intel_pstate_adjust_busy_pstate(struct cpudata *cpu)
@@ -617,12 +645,10 @@ static void intel_pstate_timer_func(unsigned long __data)
{
struct cpudata *cpu = (struct cpudata *) __data;
struct sample *sample;
- u64 energy;
intel_pstate_sample(cpu);
sample = &cpu->samples[cpu->sample_ptr];
- rdmsrl(MSR_PKG_ENERGY_STATUS, energy);
intel_pstate_adjust_busy_pstate(cpu);
@@ -631,7 +657,6 @@ static void intel_pstate_timer_func(unsigned long __data)
cpu->pstate.current_pstate,
sample->mperf,
sample->aperf,
- div64_u64(energy, energy_divisor),
sample->freq);
intel_pstate_set_sample_time(cpu);
@@ -913,7 +938,6 @@ static int __init intel_pstate_init(void)
int cpu, rc = 0;
const struct x86_cpu_id *id;
struct cpu_defaults *cpu_info;
- u64 units;
if (no_load)
return -ENODEV;
@@ -947,9 +971,6 @@ static int __init intel_pstate_init(void)
if (rc)
goto out;
- rdmsrl(MSR_RAPL_POWER_UNIT, units);
- energy_divisor = 1 << ((units >> 8) & 0x1f); /* bits{12:8} */
-
intel_pstate_debug_expose_params();
intel_pstate_sysfs_expose_params();
diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c
index e10b646634d7..6684e0342792 100644
--- a/drivers/cpufreq/powernow-k8.c
+++ b/drivers/cpufreq/powernow-k8.c
@@ -1076,7 +1076,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data;
struct init_on_cpu init_on_cpu;
- int rc;
+ int rc, cpu;
smp_call_function_single(pol->cpu, check_supported_cpu, &rc, 1);
if (rc)
@@ -1140,7 +1140,9 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
pr_debug("cpu_init done, current fid 0x%x, vid 0x%x\n",
data->currfid, data->currvid);
- per_cpu(powernow_data, pol->cpu) = data;
+ /* Point all the CPUs in this policy to the same data */
+ for_each_cpu(cpu, pol->cpus)
+ per_cpu(powernow_data, cpu) = data;
return 0;
@@ -1155,6 +1157,7 @@ err_out:
static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
{
struct powernow_k8_data *data = per_cpu(powernow_data, pol->cpu);
+ int cpu;
if (!data)
return -EINVAL;
@@ -1165,7 +1168,8 @@ static int powernowk8_cpu_exit(struct cpufreq_policy *pol)
kfree(data->powernow_table);
kfree(data);
- per_cpu(powernow_data, pol->cpu) = NULL;
+ for_each_cpu(cpu, pol->cpus)
+ per_cpu(powernow_data, cpu) = NULL;
return 0;
}
diff --git a/drivers/crypto/nx/nx-842.c b/drivers/crypto/nx/nx-842.c
index 6c4c000671c5..1e5481d88a26 100644
--- a/drivers/crypto/nx/nx-842.c
+++ b/drivers/crypto/nx/nx-842.c
@@ -158,6 +158,15 @@ static inline unsigned long nx842_get_scatterlist_size(
return sl->entry_nr * sizeof(struct nx842_slentry);
}
+static inline unsigned long nx842_get_pa(void *addr)
+{
+ if (is_vmalloc_addr(addr))
+ return page_to_phys(vmalloc_to_page(addr))
+ + offset_in_page(addr);
+ else
+ return __pa(addr);
+}
+
static int nx842_build_scatterlist(unsigned long buf, int len,
struct nx842_scatterlist *sl)
{
@@ -168,7 +177,7 @@ static int nx842_build_scatterlist(unsigned long buf, int len,
entry = sl->entries;
while (len) {
- entry->ptr = __pa(buf);
+ entry->ptr = nx842_get_pa((void *)buf);
nextpage = ALIGN(buf + 1, NX842_HW_PAGE_SIZE);
if (nextpage < buf + len) {
/* we aren't at the end yet */
@@ -370,8 +379,8 @@ int nx842_compress(const unsigned char *in, unsigned int inlen,
op.flags = NX842_OP_COMPRESS;
csbcpb = &workmem->csbcpb;
memset(csbcpb, 0, sizeof(*csbcpb));
- op.csbcpb = __pa(csbcpb);
- op.out = __pa(slout.entries);
+ op.csbcpb = nx842_get_pa(csbcpb);
+ op.out = nx842_get_pa(slout.entries);
for (i = 0; i < hdr->blocks_nr; i++) {
/*
@@ -401,13 +410,13 @@ int nx842_compress(const unsigned char *in, unsigned int inlen,
*/
if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
/* Create direct DDE */
- op.in = __pa(inbuf);
+ op.in = nx842_get_pa((void *)inbuf);
op.inlen = max_sync_size;
} else {
/* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(inbuf, max_sync_size, &slin);
- op.in = __pa(slin.entries);
+ op.in = nx842_get_pa(slin.entries);
op.inlen = -nx842_get_scatterlist_size(&slin);
}
@@ -565,7 +574,7 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
op.flags = NX842_OP_DECOMPRESS;
csbcpb = &workmem->csbcpb;
memset(csbcpb, 0, sizeof(*csbcpb));
- op.csbcpb = __pa(csbcpb);
+ op.csbcpb = nx842_get_pa(csbcpb);
/*
* max_sync_size may have changed since compression,
@@ -597,12 +606,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
if (likely((inbuf & NX842_HW_PAGE_MASK) ==
((inbuf + hdr->sizes[i] - 1) & NX842_HW_PAGE_MASK))) {
/* Create direct DDE */
- op.in = __pa(inbuf);
+ op.in = nx842_get_pa((void *)inbuf);
op.inlen = hdr->sizes[i];
} else {
/* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(inbuf, hdr->sizes[i] , &slin);
- op.in = __pa(slin.entries);
+ op.in = nx842_get_pa(slin.entries);
op.inlen = -nx842_get_scatterlist_size(&slin);
}
@@ -613,12 +622,12 @@ int nx842_decompress(const unsigned char *in, unsigned int inlen,
*/
if (likely(max_sync_size == NX842_HW_PAGE_SIZE)) {
/* Create direct DDE */
- op.out = __pa(outbuf);
+ op.out = nx842_get_pa((void *)outbuf);
op.outlen = max_sync_size;
} else {
/* Create indirect DDE (scatterlist) */
nx842_build_scatterlist(outbuf, max_sync_size, &slout);
- op.out = __pa(slout.entries);
+ op.out = nx842_get_pa(slout.entries);
op.outlen = -nx842_get_scatterlist_size(&slout);
}
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 9bed1a2a67a1..605b016bcea4 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -346,6 +346,7 @@ config MOXART_DMA
tristate "MOXART DMA support"
depends on ARCH_MOXART
select DMA_ENGINE
+ select DMA_OF
select DMA_VIRTUAL_CHANNELS
help
Enable support for the MOXA ART SoC DMA controller.
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 4e7918339b12..19041cefabb1 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -449,6 +449,7 @@ static const struct of_device_id sdma_dt_ids[] = {
{ .compatible = "fsl,imx51-sdma", .data = &sdma_imx51, },
{ .compatible = "fsl,imx35-sdma", .data = &sdma_imx35, },
{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
+ { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, sdma_dt_ids);
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 87529181efcc..4e3549a16132 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -77,7 +77,8 @@ static irqreturn_t ioat_dma_do_interrupt(int irq, void *data)
attnstatus = readl(instance->reg_base + IOAT_ATTNSTATUS_OFFSET);
for_each_set_bit(bit, &attnstatus, BITS_PER_LONG) {
chan = ioat_chan_by_index(instance, bit);
- tasklet_schedule(&chan->cleanup_task);
+ if (test_bit(IOAT_RUN, &chan->state))
+ tasklet_schedule(&chan->cleanup_task);
}
writeb(intrctrl, instance->reg_base + IOAT_INTRCTRL_OFFSET);
@@ -93,7 +94,8 @@ static irqreturn_t ioat_dma_do_interrupt_msix(int irq, void *data)
{
struct ioat_chan_common *chan = data;
- tasklet_schedule(&chan->cleanup_task);
+ if (test_bit(IOAT_RUN, &chan->state))
+ tasklet_schedule(&chan->cleanup_task);
return IRQ_HANDLED;
}
@@ -116,7 +118,6 @@ void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *c
chan->timer.function = device->timer_fn;
chan->timer.data = data;
tasklet_init(&chan->cleanup_task, device->cleanup_fn, data);
- tasklet_disable(&chan->cleanup_task);
}
/**
@@ -354,13 +355,49 @@ static int ioat1_dma_alloc_chan_resources(struct dma_chan *c)
writel(((u64) chan->completion_dma) >> 32,
chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
- tasklet_enable(&chan->cleanup_task);
+ set_bit(IOAT_RUN, &chan->state);
ioat1_dma_start_null_desc(ioat); /* give chain to dma device */
dev_dbg(to_dev(chan), "%s: allocated %d descriptors\n",
__func__, ioat->desccount);
return ioat->desccount;
}
+void ioat_stop(struct ioat_chan_common *chan)
+{
+ struct ioatdma_device *device = chan->device;
+ struct pci_dev *pdev = device->pdev;
+ int chan_id = chan_num(chan);
+ struct msix_entry *msix;
+
+ /* 1/ stop irq from firing tasklets
+ * 2/ stop the tasklet from re-arming irqs
+ */
+ clear_bit(IOAT_RUN, &chan->state);
+
+ /* flush inflight interrupts */
+ switch (device->irq_mode) {
+ case IOAT_MSIX:
+ msix = &device->msix_entries[chan_id];
+ synchronize_irq(msix->vector);
+ break;
+ case IOAT_MSI:
+ case IOAT_INTX:
+ synchronize_irq(pdev->irq);
+ break;
+ default:
+ break;
+ }
+
+ /* flush inflight timers */
+ del_timer_sync(&chan->timer);
+
+ /* flush inflight tasklet runs */
+ tasklet_kill(&chan->cleanup_task);
+
+ /* final cleanup now that everything is quiesced and can't re-arm */
+ device->cleanup_fn((unsigned long) &chan->common);
+}
+
/**
* ioat1_dma_free_chan_resources - release all the descriptors
* @chan: the channel to be cleaned
@@ -379,9 +416,7 @@ static void ioat1_dma_free_chan_resources(struct dma_chan *c)
if (ioat->desccount == 0)
return;
- tasklet_disable(&chan->cleanup_task);
- del_timer_sync(&chan->timer);
- ioat1_cleanup(ioat);
+ ioat_stop(chan);
/* Delay 100ms after reset to allow internal DMA logic to quiesce
* before removing DMA descriptor resources.
@@ -526,8 +561,11 @@ ioat1_dma_prep_memcpy(struct dma_chan *c, dma_addr_t dma_dest,
static void ioat1_cleanup_event(unsigned long data)
{
struct ioat_dma_chan *ioat = to_ioat_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
ioat1_cleanup(ioat);
+ if (!test_bit(IOAT_RUN, &chan->state))
+ return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h
index 11fb877ddca9..e982f00a9843 100644
--- a/drivers/dma/ioat/dma.h
+++ b/drivers/dma/ioat/dma.h
@@ -356,6 +356,7 @@ bool ioat_cleanup_preamble(struct ioat_chan_common *chan,
void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
void ioat_kobject_del(struct ioatdma_device *device);
int ioat_dma_setup_interrupts(struct ioatdma_device *device);
+void ioat_stop(struct ioat_chan_common *chan);
extern const struct sysfs_ops ioat_sysfs_ops;
extern struct ioat_sysfs_entry ioat_version_attr;
extern struct ioat_sysfs_entry ioat_cap_attr;
diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c
index 5d3affe7e976..8d1058085eeb 100644
--- a/drivers/dma/ioat/dma_v2.c
+++ b/drivers/dma/ioat/dma_v2.c
@@ -190,8 +190,11 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat)
void ioat2_cleanup_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
ioat2_cleanup(ioat);
+ if (!test_bit(IOAT_RUN, &chan->state))
+ return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
@@ -553,10 +556,10 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
ioat->issued = 0;
ioat->tail = 0;
ioat->alloc_order = order;
+ set_bit(IOAT_RUN, &chan->state);
spin_unlock_bh(&ioat->prep_lock);
spin_unlock_bh(&chan->cleanup_lock);
- tasklet_enable(&chan->cleanup_task);
ioat2_start_null_desc(ioat);
/* check that we got off the ground */
@@ -566,7 +569,6 @@ int ioat2_alloc_chan_resources(struct dma_chan *c)
} while (i++ < 20 && !is_ioat_active(status) && !is_ioat_idle(status));
if (is_ioat_active(status) || is_ioat_idle(status)) {
- set_bit(IOAT_RUN, &chan->state);
return 1 << ioat->alloc_order;
} else {
u32 chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
@@ -809,11 +811,8 @@ void ioat2_free_chan_resources(struct dma_chan *c)
if (!ioat->ring)
return;
- tasklet_disable(&chan->cleanup_task);
- del_timer_sync(&chan->timer);
- device->cleanup_fn((unsigned long) c);
+ ioat_stop(chan);
device->reset_hw(chan);
- clear_bit(IOAT_RUN, &chan->state);
spin_lock_bh(&chan->cleanup_lock);
spin_lock_bh(&ioat->prep_lock);
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 820817e97e62..b9b38a1cf92f 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -464,8 +464,11 @@ static void ioat3_cleanup(struct ioat2_dma_chan *ioat)
static void ioat3_cleanup_event(unsigned long data)
{
struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data);
+ struct ioat_chan_common *chan = &ioat->base;
ioat3_cleanup(ioat);
+ if (!test_bit(IOAT_RUN, &chan->state))
+ return;
writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET);
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 53fb0c8365b0..766b68ed505c 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -497,8 +497,8 @@ mv_xor_tx_submit(struct dma_async_tx_descriptor *tx)
if (!mv_can_chain(grp_start))
goto submit_done;
- dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %x\n",
- old_chain_tail->async_tx.phys);
+ dev_dbg(mv_chan_to_devp(mv_chan), "Append to last desc %pa\n",
+ &old_chain_tail->async_tx.phys);
/* fix up the hardware chain */
mv_desc_set_next_desc(old_chain_tail, grp_start->async_tx.phys);
@@ -527,7 +527,8 @@ submit_done:
/* returns the number of allocated descriptors */
static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
{
- char *hw_desc;
+ void *virt_desc;
+ dma_addr_t dma_desc;
int idx;
struct mv_xor_chan *mv_chan = to_mv_xor_chan(chan);
struct mv_xor_desc_slot *slot = NULL;
@@ -542,17 +543,16 @@ static int mv_xor_alloc_chan_resources(struct dma_chan *chan)
" %d descriptor slots", idx);
break;
}
- hw_desc = (char *) mv_chan->dma_desc_pool_virt;
- slot->hw_desc = (void *) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+ virt_desc = mv_chan->dma_desc_pool_virt;
+ slot->hw_desc = virt_desc + idx * MV_XOR_SLOT_SIZE;
dma_async_tx_descriptor_init(&slot->async_tx, chan);
slot->async_tx.tx_submit = mv_xor_tx_submit;
INIT_LIST_HEAD(&slot->chain_node);
INIT_LIST_HEAD(&slot->slot_node);
INIT_LIST_HEAD(&slot->tx_list);
- hw_desc = (char *) mv_chan->dma_desc_pool;
- slot->async_tx.phys =
- (dma_addr_t) &hw_desc[idx * MV_XOR_SLOT_SIZE];
+ dma_desc = mv_chan->dma_desc_pool;
+ slot->async_tx.phys = dma_desc + idx * MV_XOR_SLOT_SIZE;
slot->idx = idx++;
spin_lock_bh(&mv_chan->lock);
@@ -582,8 +582,8 @@ mv_xor_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
int slot_cnt;
dev_dbg(mv_chan_to_devp(mv_chan),
- "%s dest: %x src %x len: %u flags: %ld\n",
- __func__, dest, src, len, flags);
+ "%s dest: %pad src %pad len: %u flags: %ld\n",
+ __func__, &dest, &src, len, flags);
if (unlikely(len < MV_XOR_MIN_BYTE_COUNT))
return NULL;
@@ -626,8 +626,8 @@ mv_xor_prep_dma_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src,
BUG_ON(len > MV_XOR_MAX_BYTE_COUNT);
dev_dbg(mv_chan_to_devp(mv_chan),
- "%s src_cnt: %d len: dest %x %u flags: %ld\n",
- __func__, src_cnt, len, dest, flags);
+ "%s src_cnt: %d len: %u dest %pad flags: %ld\n",
+ __func__, src_cnt, len, &dest, flags);
spin_lock_bh(&mv_chan->lock);
slot_cnt = mv_chan_xor_slot_count(len, src_cnt);
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 00a2de957b23..bf18c786ed40 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -1641,6 +1641,7 @@ static void dma_tasklet(unsigned long data)
struct d40_chan *d40c = (struct d40_chan *) data;
struct d40_desc *d40d;
unsigned long flags;
+ bool callback_active;
dma_async_tx_callback callback;
void *callback_param;
@@ -1668,6 +1669,7 @@ static void dma_tasklet(unsigned long data)
}
/* Callback to client */
+ callback_active = !!(d40d->txd.flags & DMA_PREP_INTERRUPT);
callback = d40d->txd.callback;
callback_param = d40d->txd.callback_param;
@@ -1690,7 +1692,7 @@ static void dma_tasklet(unsigned long data)
spin_unlock_irqrestore(&d40c->lock, flags);
- if (callback && (d40d->txd.flags & DMA_PREP_INTERRUPT))
+ if (callback_active && callback)
callback(callback_param);
return;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index e8c9ef03495b..33edd6766344 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -559,7 +559,8 @@ static void edac_mc_workq_function(struct work_struct *work_req)
*
* called with the mem_ctls_mutex held
*/
-static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
+static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec,
+ bool init)
{
edac_dbg(0, "\n");
@@ -567,7 +568,9 @@ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec)
if (mci->op_state != OP_RUNNING_POLL)
return;
- INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+ if (init)
+ INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function);
+
mod_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec));
}
@@ -601,7 +604,7 @@ static void edac_mc_workq_teardown(struct mem_ctl_info *mci)
* user space has updated our poll period value, need to
* reset our workq delays
*/
-void edac_mc_reset_delay_period(int value)
+void edac_mc_reset_delay_period(unsigned long value)
{
struct mem_ctl_info *mci;
struct list_head *item;
@@ -611,7 +614,7 @@ void edac_mc_reset_delay_period(int value)
list_for_each(item, &mc_devices) {
mci = list_entry(item, struct mem_ctl_info, link);
- edac_mc_workq_setup(mci, (unsigned long) value);
+ edac_mc_workq_setup(mci, value, false);
}
mutex_unlock(&mem_ctls_mutex);
@@ -782,7 +785,7 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
/* This instance is NOW RUNNING */
mci->op_state = OP_RUNNING_POLL;
- edac_mc_workq_setup(mci, edac_mc_get_poll_msec());
+ edac_mc_workq_setup(mci, edac_mc_get_poll_msec(), true);
} else {
mci->op_state = OP_RUNNING_INTERRUPT;
}
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 51c0362acf5c..b335c6ab5efe 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -52,18 +52,20 @@ int edac_mc_get_poll_msec(void)
static int edac_set_poll_msec(const char *val, struct kernel_param *kp)
{
- long l;
+ unsigned long l;
int ret;
if (!val)
return -EINVAL;
- ret = kstrtol(val, 0, &l);
+ ret = kstrtoul(val, 0, &l);
if (ret)
return ret;
- if ((int)l != l)
+
+ if (l < 1000)
return -EINVAL;
- *((int *)kp->arg) = l;
+
+ *((unsigned long *)kp->arg) = l;
/* notify edac_mc engine to reset the poll period */
edac_mc_reset_delay_period(l);
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 3d139c6e7fe3..f2118bfcf8df 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -52,7 +52,7 @@ extern void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
extern void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev);
extern void edac_device_reset_delay_period(struct edac_device_ctl_info
*edac_dev, unsigned long value);
-extern void edac_mc_reset_delay_period(int value);
+extern void edac_mc_reset_delay_period(unsigned long value);
extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index d63f4798f7d0..57e96a3350f0 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -943,33 +943,35 @@ static int i7300_get_devices(struct mem_ctl_info *mci)
/* Attempt to 'get' the MCH register we want */
pdev = NULL;
- while (!pvt->pci_dev_16_1_fsb_addr_map ||
- !pvt->pci_dev_16_2_fsb_err_regs) {
- pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I7300_MCH_ERR, pdev);
- if (!pdev) {
- /* End of list, leave */
- i7300_printk(KERN_ERR,
- "'system address,Process Bus' "
- "device not found:"
- "vendor 0x%x device 0x%x ERR funcs "
- "(broken BIOS?)\n",
- PCI_VENDOR_ID_INTEL,
- PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
- goto error;
- }
-
+ while ((pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I7300_MCH_ERR,
+ pdev))) {
/* Store device 16 funcs 1 and 2 */
switch (PCI_FUNC(pdev->devfn)) {
case 1:
- pvt->pci_dev_16_1_fsb_addr_map = pdev;
+ if (!pvt->pci_dev_16_1_fsb_addr_map)
+ pvt->pci_dev_16_1_fsb_addr_map =
+ pci_dev_get(pdev);
break;
case 2:
- pvt->pci_dev_16_2_fsb_err_regs = pdev;
+ if (!pvt->pci_dev_16_2_fsb_err_regs)
+ pvt->pci_dev_16_2_fsb_err_regs =
+ pci_dev_get(pdev);
break;
}
}
+ if (!pvt->pci_dev_16_1_fsb_addr_map ||
+ !pvt->pci_dev_16_2_fsb_err_regs) {
+ /* At least one device was not found */
+ i7300_printk(KERN_ERR,
+ "'system address,Process Bus' device not found:"
+ "vendor 0x%x device 0x%x ERR funcs (broken BIOS?)\n",
+ PCI_VENDOR_ID_INTEL,
+ PCI_DEVICE_ID_INTEL_I7300_MCH_ERR);
+ goto error;
+ }
+
edac_dbg(1, "System Address, processor bus- PCI Bus ID: %s %x:%x\n",
pci_name(pvt->pci_dev_16_0_fsb_ctlr),
pvt->pci_dev_16_0_fsb_ctlr->vendor,
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 87533ca7752e..d871275196f6 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1334,14 +1334,19 @@ static int i7core_get_onedevice(struct pci_dev **prev,
* is at addr 8086:2c40, instead of 8086:2c41. So, we need
* to probe for the alternate address in case of failure
*/
- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev)
+ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_I7_NONCORE && !pdev) {
+ pci_dev_get(*prev); /* pci_get_device will put it */
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_I7_NONCORE_ALT, *prev);
+ }
- if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE && !pdev)
+ if (dev_descr->dev_id == PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE &&
+ !pdev) {
+ pci_dev_get(*prev); /* pci_get_device will put it */
pdev = pci_get_device(PCI_VENDOR_ID_INTEL,
PCI_DEVICE_ID_INTEL_LYNNFIELD_NONCORE_ALT,
*prev);
+ }
if (!pdev) {
if (*prev) {
diff --git a/drivers/extcon/extcon-arizona.c b/drivers/extcon/extcon-arizona.c
index c20602f601ee..98a14f6143a7 100644
--- a/drivers/extcon/extcon-arizona.c
+++ b/drivers/extcon/extcon-arizona.c
@@ -222,27 +222,19 @@ static void arizona_extcon_pulse_micbias(struct arizona_extcon_info *info)
struct snd_soc_dapm_context *dapm = arizona->dapm;
int ret;
- mutex_lock(&dapm->card->dapm_mutex);
-
ret = snd_soc_dapm_force_enable_pin(dapm, widget);
if (ret != 0)
dev_warn(arizona->dev, "Failed to enable %s: %d\n",
widget, ret);
- mutex_unlock(&dapm->card->dapm_mutex);
-
snd_soc_dapm_sync(dapm);
if (!arizona->pdata.micd_force_micbias) {
- mutex_lock(&dapm->card->dapm_mutex);
-
ret = snd_soc_dapm_disable_pin(arizona->dapm, widget);
if (ret != 0)
dev_warn(arizona->dev, "Failed to disable %s: %d\n",
widget, ret);
- mutex_unlock(&dapm->card->dapm_mutex);
-
snd_soc_dapm_sync(dapm);
}
}
@@ -304,16 +296,12 @@ static void arizona_stop_mic(struct arizona_extcon_info *info)
ARIZONA_MICD_ENA, 0,
&change);
- mutex_lock(&dapm->card->dapm_mutex);
-
ret = snd_soc_dapm_disable_pin(dapm, widget);
if (ret != 0)
dev_warn(arizona->dev,
"Failed to disable %s: %d\n",
widget, ret);
- mutex_unlock(&dapm->card->dapm_mutex);
-
snd_soc_dapm_sync(dapm);
if (info->micd_reva) {
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index de4aa409abe2..2c6d5e118ac1 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -916,7 +916,7 @@ static int lookup_existing_device(struct device *dev, void *data)
old->config_rom_retries = 0;
fw_notice(card, "rediscovered device %s\n", dev_name(dev));
- PREPARE_DELAYED_WORK(&old->work, fw_device_update);
+ old->workfn = fw_device_update;
fw_schedule_device_work(old, 0);
if (current_node == card->root_node)
@@ -1075,7 +1075,7 @@ static void fw_device_init(struct work_struct *work)
if (atomic_cmpxchg(&device->state,
FW_DEVICE_INITIALIZING,
FW_DEVICE_RUNNING) == FW_DEVICE_GONE) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+ device->workfn = fw_device_shutdown;
fw_schedule_device_work(device, SHUTDOWN_DELAY);
} else {
fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n",
@@ -1196,13 +1196,20 @@ static void fw_device_refresh(struct work_struct *work)
dev_name(&device->device), fw_rcode_string(ret));
gone:
atomic_set(&device->state, FW_DEVICE_GONE);
- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+ device->workfn = fw_device_shutdown;
fw_schedule_device_work(device, SHUTDOWN_DELAY);
out:
if (node_id == card->root_node->node_id)
fw_schedule_bm_work(card, 0);
}
+static void fw_device_workfn(struct work_struct *work)
+{
+ struct fw_device *device = container_of(to_delayed_work(work),
+ struct fw_device, work);
+ device->workfn(work);
+}
+
void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
{
struct fw_device *device;
@@ -1252,7 +1259,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
* power-up after getting plugged in. We schedule the
* first config rom scan half a second after bus reset.
*/
- INIT_DELAYED_WORK(&device->work, fw_device_init);
+ device->workfn = fw_device_init;
+ INIT_DELAYED_WORK(&device->work, fw_device_workfn);
fw_schedule_device_work(device, INITIAL_DELAY);
break;
@@ -1268,7 +1276,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
if (atomic_cmpxchg(&device->state,
FW_DEVICE_RUNNING,
FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_refresh);
+ device->workfn = fw_device_refresh;
fw_schedule_device_work(device,
device->is_local ? 0 : INITIAL_DELAY);
}
@@ -1283,7 +1291,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
smp_wmb(); /* update node_id before generation */
device->generation = card->generation;
if (atomic_read(&device->state) == FW_DEVICE_RUNNING) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_update);
+ device->workfn = fw_device_update;
fw_schedule_device_work(device, 0);
}
break;
@@ -1308,7 +1316,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event)
device = node->data;
if (atomic_xchg(&device->state,
FW_DEVICE_GONE) == FW_DEVICE_RUNNING) {
- PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown);
+ device->workfn = fw_device_shutdown;
fw_schedule_device_work(device,
list_empty(&card->link) ? 0 : SHUTDOWN_DELAY);
}
diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c
index 6b895986dc22..4af0a7bad7f2 100644
--- a/drivers/firewire/net.c
+++ b/drivers/firewire/net.c
@@ -929,8 +929,6 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
if (rcode == RCODE_COMPLETE) {
fwnet_transmit_packet_done(ptask);
} else {
- fwnet_transmit_packet_failed(ptask);
-
if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) {
dev_err(&ptask->dev->netdev->dev,
"fwnet_write_complete failed: %x (skipped %d)\n",
@@ -938,8 +936,10 @@ static void fwnet_write_complete(struct fw_card *card, int rcode,
errors_skipped = 0;
last_rcode = rcode;
- } else
+ } else {
errors_skipped++;
+ }
+ fwnet_transmit_packet_failed(ptask);
}
}
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 6f74d8d3f700..8db663219560 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -290,7 +290,6 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
#define QUIRK_NO_MSI 0x10
#define QUIRK_TI_SLLZ059 0x20
#define QUIRK_IR_WAKE 0x40
-#define QUIRK_PHY_LCTRL_TIMEOUT 0x80
/* In case of multiple matches in ohci_quirks[], only the first one is used. */
static const struct {
@@ -303,10 +302,7 @@ static const struct {
QUIRK_BE_HEADERS},
{PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
- QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI},
-
- {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID,
- QUIRK_PHY_LCTRL_TIMEOUT},
+ QUIRK_NO_MSI},
{PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
QUIRK_RESET_PACKET},
@@ -353,7 +349,6 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
", disable MSI = " __stringify(QUIRK_NO_MSI)
", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
- ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT)
")");
#define OHCI_PARAM_DEBUG_AT_AR 1
@@ -2299,9 +2294,6 @@ static int ohci_enable(struct fw_card *card,
* TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but
* cannot actually use the phy at that time. These need tens of
* millisecods pause between LPS write and first phy access too.
- *
- * But do not wait for 50msec on Agere/LSI cards. Their phy
- * arbitration state machine may time out during such a long wait.
*/
reg_write(ohci, OHCI1394_HCControlSet,
@@ -2309,11 +2301,8 @@ static int ohci_enable(struct fw_card *card,
OHCI1394_HCControl_postedWriteEnable);
flush_writes(ohci);
- if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT))
+ for (lps = 0, i = 0; !lps && i < 3; i++) {
msleep(50);
-
- for (lps = 0, i = 0; !lps && i < 150; i++) {
- msleep(1);
lps = reg_read(ohci, OHCI1394_HCControlSet) &
OHCI1394_HCControl_LPS;
}
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index 281029daf98c..7aef911fdc71 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -146,6 +146,7 @@ struct sbp2_logical_unit {
*/
int generation;
int retries;
+ work_func_t workfn;
struct delayed_work work;
bool has_sdev;
bool blocked;
@@ -864,7 +865,7 @@ static void sbp2_login(struct work_struct *work)
/* set appropriate retry limit(s) in BUSY_TIMEOUT register */
sbp2_set_busy_timeout(lu);
- PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect);
+ lu->workfn = sbp2_reconnect;
sbp2_agent_reset(lu);
/* This was a re-login. */
@@ -918,7 +919,7 @@ static void sbp2_login(struct work_struct *work)
* If a bus reset happened, sbp2_update will have requeued
* lu->work already. Reset the work from reconnect to login.
*/
- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ lu->workfn = sbp2_login;
}
static void sbp2_reconnect(struct work_struct *work)
@@ -952,7 +953,7 @@ static void sbp2_reconnect(struct work_struct *work)
lu->retries++ >= 5) {
dev_err(tgt_dev(tgt), "failed to reconnect\n");
lu->retries = 0;
- PREPARE_DELAYED_WORK(&lu->work, sbp2_login);
+ lu->workfn = sbp2_login;
}
sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5));
@@ -972,6 +973,13 @@ static void sbp2_reconnect(struct work_struct *work)
sbp2_conditionally_unblock(lu);
}
+static void sbp2_lu_workfn(struct work_struct *work)
+{
+ struct sbp2_logical_unit *lu = container_of(to_delayed_work(work),
+ struct sbp2_logical_unit, work);
+ lu->workfn(work);
+}
+
static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
{
struct sbp2_logical_unit *lu;
@@ -998,7 +1006,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry)
lu->blocked = false;
++tgt->dont_block;
INIT_LIST_HEAD(&lu->orb_list);
- INIT_DELAYED_WORK(&lu->work, sbp2_login);
+ lu->workfn = sbp2_login;
+ INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn);
list_add_tail(&lu->link, &tgt->lu_list);
return 0;
diff --git a/drivers/fmc/fmc-write-eeprom.c b/drivers/fmc/fmc-write-eeprom.c
index ee5b47904130..9bb2cbd5c9f2 100644
--- a/drivers/fmc/fmc-write-eeprom.c
+++ b/drivers/fmc/fmc-write-eeprom.c
@@ -27,7 +27,7 @@ FMC_PARAM_BUSID(fwe_drv);
/* The "file=" is like the generic "gateware=" used elsewhere */
static char *fwe_file[FMC_MAX_CARDS];
static int fwe_file_n;
-module_param_array_named(file, fwe_file, charp, &fwe_file_n, 444);
+module_param_array_named(file, fwe_file, charp, &fwe_file_n, 0444);
static int fwe_run_tlv(struct fmc_device *fmc, const struct firmware *fw,
int write)
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index 697338772b64..903f24d28ba0 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -403,6 +403,7 @@ config GPIO_GRGPIO
config GPIO_TB10X
bool
+ select GENERIC_IRQ_CHIP
select OF_GPIO
comment "I2C GPIO expanders:"
diff --git a/drivers/gpio/gpio-bcm-kona.c b/drivers/gpio/gpio-bcm-kona.c
index 233d088ac59f..f32357e2d78d 100644
--- a/drivers/gpio/gpio-bcm-kona.c
+++ b/drivers/gpio/gpio-bcm-kona.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012-2013 Broadcom Corporation
+ * Copyright (C) 2012-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -657,6 +657,6 @@ static struct platform_driver bcm_kona_gpio_driver = {
module_platform_driver(bcm_kona_gpio_driver);
-MODULE_AUTHOR("Broadcom");
+MODULE_AUTHOR("Broadcom Corporation <bcm-kernel-feedback-list@broadcom.com>");
MODULE_DESCRIPTION("Broadcom Kona GPIO Driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpio/gpio-clps711x.c b/drivers/gpio/gpio-clps711x.c
index d3550274b8f7..3c2ba2ad0ada 100644
--- a/drivers/gpio/gpio-clps711x.c
+++ b/drivers/gpio/gpio-clps711x.c
@@ -97,3 +97,4 @@ module_platform_driver(clps711x_gpio_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
MODULE_DESCRIPTION("CLPS711X GPIO driver");
+MODULE_ALIAS("platform:clps711x-gpio");
diff --git a/drivers/gpio/gpio-intel-mid.c b/drivers/gpio/gpio-intel-mid.c
index d1b50ef5fab8..e585163f1ad5 100644
--- a/drivers/gpio/gpio-intel-mid.c
+++ b/drivers/gpio/gpio-intel-mid.c
@@ -394,8 +394,8 @@ static const struct irq_domain_ops intel_gpio_irq_ops = {
static int intel_gpio_runtime_idle(struct device *dev)
{
- pm_schedule_suspend(dev, 500);
- return -EBUSY;
+ int err = pm_schedule_suspend(dev, 500);
+ return err ?: -EBUSY;
}
static const struct dev_pm_ops intel_gpio_pm_ops = {
diff --git a/drivers/gpio/gpio-xtensa.c b/drivers/gpio/gpio-xtensa.c
index 1d136eceda62..7081304d6797 100644
--- a/drivers/gpio/gpio-xtensa.c
+++ b/drivers/gpio/gpio-xtensa.c
@@ -40,6 +40,8 @@
#error GPIO32 option is not enabled for your xtensa core variant
#endif
+#if XCHAL_HAVE_CP
+
static inline unsigned long enable_cp(unsigned long *cpenable)
{
unsigned long flags;
@@ -57,6 +59,20 @@ static inline void disable_cp(unsigned long flags, unsigned long cpenable)
local_irq_restore(flags);
}
+#else
+
+static inline unsigned long enable_cp(unsigned long *cpenable)
+{
+ *cpenable = 0; /* avoid uninitialized value warning */
+ return 0;
+}
+
+static inline void disable_cp(unsigned long flags, unsigned long cpenable)
+{
+}
+
+#endif /* XCHAL_HAVE_CP */
+
static int xtensa_impwire_get_direction(struct gpio_chip *gc, unsigned offset)
{
return 1; /* input only */
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index acf3a36c9ebc..32982da82694 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -68,15 +68,7 @@ void __armada_drm_queue_unref_work(struct drm_device *dev,
{
struct armada_private *priv = dev->dev_private;
- /*
- * Yes, we really must jump through these hoops just to store a
- * _pointer_ to something into the kfifo. This is utterly insane
- * and idiotic, because it kfifo requires the _data_ pointed to by
- * the pointer const, not the pointer itself. Not only that, but
- * you have to pass a pointer _to_ the pointer you want stored.
- */
- const struct drm_framebuffer *silly_api_alert = fb;
- WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert));
+ WARN_ON(!kfifo_put(&priv->fb_unref, fb));
schedule_work(&priv->fb_unref_work);
}
diff --git a/drivers/gpu/drm/ast/ast_fb.c b/drivers/gpu/drm/ast/ast_fb.c
index 3f65dd6676b2..a28640f47c27 100644
--- a/drivers/gpu/drm/ast/ast_fb.c
+++ b/drivers/gpu/drm/ast/ast_fb.c
@@ -65,7 +65,7 @@ static void ast_dirty_update(struct ast_fbdev *afbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
- if (!drm_can_sleep())
+ if (drm_can_sleep())
ret = ast_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig
index c8fcf12019f0..5f8b0c2b9a44 100644
--- a/drivers/gpu/drm/bochs/Kconfig
+++ b/drivers/gpu/drm/bochs/Kconfig
@@ -2,6 +2,7 @@ config DRM_BOCHS
tristate "DRM Support for bochs dispi vga interface (qemu stdvga)"
depends on DRM && PCI
select DRM_KMS_HELPER
+ select DRM_KMS_FB_HELPER
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index 2fd4a92162cb..32bbba0a787b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -39,7 +39,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
- if (!drm_can_sleep())
+ if (drm_can_sleep())
ret = cirrus_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index dffc836144cc..f4dc9b7a3831 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -296,6 +296,18 @@ int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_priv)
case DRM_CAP_ASYNC_PAGE_FLIP:
req->value = dev->mode_config.async_page_flip;
break;
+ case DRM_CAP_CURSOR_WIDTH:
+ if (dev->mode_config.cursor_width)
+ req->value = dev->mode_config.cursor_width;
+ else
+ req->value = 64;
+ break;
+ case DRM_CAP_CURSOR_HEIGHT:
+ if (dev->mode_config.cursor_height)
+ req->value = dev->mode_config.cursor_height;
+ else
+ req->value = 64;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index 5736aaa7e86c..f7af69bcf3f4 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -468,8 +468,8 @@ void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver)
} else {
list_for_each_entry_safe(dev, tmp, &driver->legacy_dev_list,
legacy_dev_list) {
- drm_put_dev(dev);
list_del(&dev->legacy_dev_list);
+ drm_put_dev(dev);
}
}
DRM_INFO("Module unloaded\n");
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index f227f544aa36..6e1a1a20cf6b 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -51,7 +51,7 @@ config DRM_EXYNOS_G2D
config DRM_EXYNOS_IPP
bool "Exynos DRM IPP"
- depends on DRM_EXYNOS && !ARCH_MULTIPLATFORM
+ depends on DRM_EXYNOS
help
Choose this option if you want to use IPP feature for DRM.
@@ -69,6 +69,6 @@ config DRM_EXYNOS_ROTATOR
config DRM_EXYNOS_GSC
bool "Exynos DRM GSC"
- depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5
+ depends on DRM_EXYNOS_IPP && ARCH_EXYNOS5 && !ARCH_MULTIPLATFORM
help
Choose this option if you want to use Exynos GSC for DRM.
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 9d096a0c5f8d..c204b4e3356e 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -171,22 +171,28 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
file->driver_priv = file_priv;
ret = exynos_drm_subdrv_open(dev, file);
- if (ret) {
- kfree(file_priv);
- file->driver_priv = NULL;
- }
+ if (ret)
+ goto err_file_priv_free;
anon_filp = anon_inode_getfile("exynos_gem", &exynos_drm_gem_fops,
NULL, 0);
if (IS_ERR(anon_filp)) {
- kfree(file_priv);
- return PTR_ERR(anon_filp);
+ ret = PTR_ERR(anon_filp);
+ goto err_subdrv_close;
}
anon_filp->f_mode = FMODE_READ | FMODE_WRITE;
file_priv->anon_filp = anon_filp;
return ret;
+
+err_subdrv_close:
+ exynos_drm_subdrv_close(dev, file);
+
+err_file_priv_free:
+ kfree(file_priv);
+ file->driver_priv = NULL;
+ return ret;
}
static void exynos_drm_preclose(struct drm_device *dev,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index 380aec28840b..6c1885eedfdf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -607,7 +607,7 @@ static enum g2d_reg_type g2d_get_reg_type(int reg_offset)
reg_type = REG_TYPE_NONE;
DRM_ERROR("Unknown register offset![%d]\n", reg_offset);
break;
- };
+ }
return reg_type;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index d519a4e5fe40..09312b877470 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -16,7 +16,6 @@
#include <linux/types.h>
#include <linux/clk.h>
#include <linux/pm_runtime.h>
-#include <plat/map-base.h>
#include <drm/drmP.h>
#include <drm/exynos_drm.h>
@@ -826,7 +825,7 @@ static void ipp_put_event(struct drm_exynos_ipp_cmd_node *c_node,
DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
/*
- * quf == NULL condition means all event deletion.
+ * qbuf == NULL condition means all event deletion.
* stop operations want to delete all event list.
* another case delete only same buf id.
*/
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index a0e10aeb0e67..c021ddc1ffb4 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -34,6 +34,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
+#include <linux/hdmi.h>
#include <drm/exynos_drm.h>
@@ -59,19 +60,6 @@
#define HDMI_AUI_VERSION 0x01
#define HDMI_AUI_LENGTH 0x0A
-/* HDMI infoframe to configure HDMI out packet header, AUI and AVI */
-enum HDMI_PACKET_TYPE {
- /* refer to Table 5-8 Packet Type in HDMI specification v1.4a */
- /* InfoFrame packet type */
- HDMI_PACKET_TYPE_INFOFRAME = 0x80,
- /* Vendor-Specific InfoFrame */
- HDMI_PACKET_TYPE_VSI = HDMI_PACKET_TYPE_INFOFRAME + 1,
- /* Auxiliary Video information InfoFrame */
- HDMI_PACKET_TYPE_AVI = HDMI_PACKET_TYPE_INFOFRAME + 2,
- /* Audio information InfoFrame */
- HDMI_PACKET_TYPE_AUI = HDMI_PACKET_TYPE_INFOFRAME + 4
-};
-
enum hdmi_type {
HDMI_TYPE13,
HDMI_TYPE14,
@@ -379,12 +367,6 @@ static const struct hdmiphy_config hdmiphy_v14_configs[] = {
},
};
-struct hdmi_infoframe {
- enum HDMI_PACKET_TYPE type;
- u8 ver;
- u8 len;
-};
-
static inline u32 hdmi_reg_read(struct hdmi_context *hdata, u32 reg_id)
{
return readl(hdata->regs + reg_id);
@@ -682,7 +664,7 @@ static u8 hdmi_chksum(struct hdmi_context *hdata,
}
static void hdmi_reg_infoframe(struct hdmi_context *hdata,
- struct hdmi_infoframe *infoframe)
+ union hdmi_infoframe *infoframe)
{
u32 hdr_sum;
u8 chksum;
@@ -700,13 +682,15 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
return;
}
- switch (infoframe->type) {
- case HDMI_PACKET_TYPE_AVI:
+ switch (infoframe->any.type) {
+ case HDMI_INFOFRAME_TYPE_AVI:
hdmi_reg_writeb(hdata, HDMI_AVI_CON, HDMI_AVI_CON_EVERY_VSYNC);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->type);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1, infoframe->ver);
- hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->len);
- hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER0, infoframe->any.type);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER1,
+ infoframe->any.version);
+ hdmi_reg_writeb(hdata, HDMI_AVI_HEADER2, infoframe->any.length);
+ hdr_sum = infoframe->any.type + infoframe->any.version +
+ infoframe->any.length;
/* Output format zero hardcoded ,RGB YBCR selection */
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(1), 0 << 5 |
@@ -722,18 +706,20 @@ static void hdmi_reg_infoframe(struct hdmi_context *hdata,
hdmi_reg_writeb(hdata, HDMI_AVI_BYTE(4), vic);
chksum = hdmi_chksum(hdata, HDMI_AVI_BYTE(1),
- infoframe->len, hdr_sum);
+ infoframe->any.length, hdr_sum);
DRM_DEBUG_KMS("AVI checksum = 0x%x\n", chksum);
hdmi_reg_writeb(hdata, HDMI_AVI_CHECK_SUM, chksum);
break;
- case HDMI_PACKET_TYPE_AUI:
+ case HDMI_INFOFRAME_TYPE_AUDIO:
hdmi_reg_writeb(hdata, HDMI_AUI_CON, 0x02);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->type);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1, infoframe->ver);
- hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->len);
- hdr_sum = infoframe->type + infoframe->ver + infoframe->len;
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER0, infoframe->any.type);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER1,
+ infoframe->any.version);
+ hdmi_reg_writeb(hdata, HDMI_AUI_HEADER2, infoframe->any.length);
+ hdr_sum = infoframe->any.type + infoframe->any.version +
+ infoframe->any.length;
chksum = hdmi_chksum(hdata, HDMI_AUI_BYTE(1),
- infoframe->len, hdr_sum);
+ infoframe->any.length, hdr_sum);
DRM_DEBUG_KMS("AUI checksum = 0x%x\n", chksum);
hdmi_reg_writeb(hdata, HDMI_AUI_CHECK_SUM, chksum);
break;
@@ -985,7 +971,7 @@ static void hdmi_conf_reset(struct hdmi_context *hdata)
static void hdmi_conf_init(struct hdmi_context *hdata)
{
- struct hdmi_infoframe infoframe;
+ union hdmi_infoframe infoframe;
/* disable HPD interrupts from HDMI IP block, use GPIO instead */
hdmi_reg_writemask(hdata, HDMI_INTC_CON, 0, HDMI_INTC_EN_GLOBAL |
@@ -1021,14 +1007,14 @@ static void hdmi_conf_init(struct hdmi_context *hdata)
hdmi_reg_writeb(hdata, HDMI_V13_AUI_CON, 0x02);
hdmi_reg_writeb(hdata, HDMI_V13_ACR_CON, 0x04);
} else {
- infoframe.type = HDMI_PACKET_TYPE_AVI;
- infoframe.ver = HDMI_AVI_VERSION;
- infoframe.len = HDMI_AVI_LENGTH;
+ infoframe.any.type = HDMI_INFOFRAME_TYPE_AVI;
+ infoframe.any.version = HDMI_AVI_VERSION;
+ infoframe.any.length = HDMI_AVI_LENGTH;
hdmi_reg_infoframe(hdata, &infoframe);
- infoframe.type = HDMI_PACKET_TYPE_AUI;
- infoframe.ver = HDMI_AUI_VERSION;
- infoframe.len = HDMI_AUI_LENGTH;
+ infoframe.any.type = HDMI_INFOFRAME_TYPE_AUDIO;
+ infoframe.any.version = HDMI_AUI_VERSION;
+ infoframe.any.length = HDMI_AUI_LENGTH;
hdmi_reg_infoframe(hdata, &infoframe);
/* enable AVI packet every vsync, fixes purple line problem */
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 400b0c4a10fb..faa77f543a07 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -208,7 +208,7 @@ struct tda998x_priv {
# define PLL_SERIAL_1_SRL_IZ(x) (((x) & 3) << 1)
# define PLL_SERIAL_1_SRL_MAN_IZ (1 << 6)
#define REG_PLL_SERIAL_2 REG(0x02, 0x01) /* read/write */
-# define PLL_SERIAL_2_SRL_NOSC(x) (((x) & 3) << 0)
+# define PLL_SERIAL_2_SRL_NOSC(x) ((x) << 0)
# define PLL_SERIAL_2_SRL_PR(x) (((x) & 0xf) << 4)
#define REG_PLL_SERIAL_3 REG(0x02, 0x02) /* read/write */
# define PLL_SERIAL_3_SRL_CCIR (1 << 0)
@@ -528,10 +528,10 @@ tda998x_write_aif(struct drm_encoder *encoder, struct tda998x_encoder_params *p)
{
uint8_t buf[PB(5) + 1];
+ memset(buf, 0, sizeof(buf));
buf[HB(0)] = 0x84;
buf[HB(1)] = 0x01;
buf[HB(2)] = 10;
- buf[PB(0)] = 0;
buf[PB(1)] = p->audio_frame[1] & 0x07; /* CC */
buf[PB(2)] = p->audio_frame[2] & 0x1c; /* SF */
buf[PB(4)] = p->audio_frame[4];
@@ -824,6 +824,11 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
}
div = 148500 / mode->clock;
+ if (div != 0) {
+ div--;
+ if (div > 3)
+ div = 3;
+ }
/* mute the audio FIFO: */
reg_set(encoder, REG_AIP_CNTRL_0, AIP_CNTRL_0_RST_FIFO);
@@ -913,7 +918,7 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
if (priv->rev == TDA19988) {
/* let incoming pixels fill the active space (if any) */
- reg_write(encoder, REG_ENABLE_SPACE, 0x01);
+ reg_write(encoder, REG_ENABLE_SPACE, 0x00);
}
/* must be last register set: */
@@ -1094,6 +1099,8 @@ tda998x_encoder_destroy(struct drm_encoder *encoder)
{
struct tda998x_priv *priv = to_tda998x_priv(encoder);
drm_i2c_encoder_destroy(encoder);
+ if (priv->cec)
+ i2c_unregister_device(priv->cec);
kfree(priv);
}
@@ -1142,8 +1149,12 @@ tda998x_encoder_init(struct i2c_client *client,
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(4) | VIP_CNTRL_2_SWAP_F(5);
- priv->current_page = 0;
+ priv->current_page = 0xff;
priv->cec = i2c_new_dummy(client->adapter, 0x34);
+ if (!priv->cec) {
+ kfree(priv);
+ return -ENODEV;
+ }
priv->dpms = DRM_MODE_DPMS_OFF;
encoder_slave->slave_priv = priv;
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 04f1f02c4019..ec7bb0fc71bc 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -403,7 +403,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
void intel_detect_pch(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct pci_dev *pch;
+ struct pci_dev *pch = NULL;
/* In all current cases, num_pipes is equivalent to the PCH_NOP setting
* (which really amounts to a PCH but no South Display).
@@ -424,12 +424,9 @@ void intel_detect_pch(struct drm_device *dev)
* all the ISA bridge devices and check for the first match, instead
* of only checking the first one.
*/
- pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
- while (pch) {
- struct pci_dev *curr = pch;
+ while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) {
if (pch->vendor == PCI_VENDOR_ID_INTEL) {
- unsigned short id;
- id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
+ unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK;
dev_priv->pch_id = id;
if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) {
@@ -461,18 +458,16 @@ void intel_detect_pch(struct drm_device *dev)
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
WARN_ON(!IS_HASWELL(dev));
WARN_ON(!IS_ULT(dev));
- } else {
- goto check_next;
- }
- pci_dev_put(pch);
+ } else
+ continue;
+
break;
}
-check_next:
- pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr);
- pci_dev_put(curr);
}
if (!pch)
- DRM_DEBUG_KMS("No PCH found?\n");
+ DRM_DEBUG_KMS("No PCH found.\n");
+
+ pci_dev_put(pch);
}
bool i915_semaphore_is_enabled(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 4a2bf8e3f739..df77e20e3c3d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1831,6 +1831,14 @@ struct drm_i915_file_private {
/* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev) (IS_I830(dev) || IS_845G(dev))
+/*
+ * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
+ * even when in MSI mode. This results in spurious interrupt warnings if the
+ * legacy irq no. is shared with another device. The kernel then disables that
+ * interrupt source and so prevents the other device from working properly.
+ */
+#define HAS_AUX_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
+#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
* rows, which changed the alignment requirements and fence programming.
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 1a24e84f2315..28d24caa49f3 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -82,9 +82,22 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size,
"Graphics Stolen Memory");
if (r == NULL) {
- DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
- base, base + (uint32_t)dev_priv->gtt.stolen_size);
- base = 0;
+ /*
+ * One more attempt but this time requesting region from
+ * base + 1, as we have seen that this resolves the region
+ * conflict with the PCI Bus.
+ * This is a BIOS w/a: Some BIOS wrap stolen in the root
+ * PCI bus, but have an off-by-one error. Hence retry the
+ * reservation starting from 1 instead of 0.
+ */
+ r = devm_request_mem_region(dev->dev, base + 1,
+ dev_priv->gtt.stolen_size - 1,
+ "Graphics Stolen Memory");
+ if (r == NULL) {
+ DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n",
+ base, base + (uint32_t)dev_priv->gtt.stolen_size);
+ base = 0;
+ }
}
return base;
@@ -201,6 +214,13 @@ int i915_gem_init_stolen(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int bios_reserved = 0;
+#ifdef CONFIG_INTEL_IOMMU
+ if (intel_iommu_gfx_mapped) {
+ DRM_INFO("DMAR active, disabling use of stolen memory\n");
+ return 0;
+ }
+#endif
+
if (dev_priv->gtt.stolen_size == 0)
return 0;
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index d7fd2fd2f0a5..990cf8f43efd 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -146,7 +146,10 @@ static void i915_error_vprintf(struct drm_i915_error_state_buf *e,
va_list tmp;
va_copy(tmp, args);
- if (!__i915_error_seek(e, vsnprintf(NULL, 0, f, tmp)))
+ len = vsnprintf(NULL, 0, f, tmp);
+ va_end(tmp);
+
+ if (!__i915_error_seek(e, len))
return;
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 17d8fcb1b6f7..d554169ac592 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -567,8 +567,7 @@ static u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
vbl_start = mode->crtc_vblank_start * mode->crtc_htotal;
} else {
- enum transcoder cpu_transcoder =
- intel_pipe_to_cpu_transcoder(dev_priv, pipe);
+ enum transcoder cpu_transcoder = (enum transcoder) pipe;
u32 htotal;
htotal = ((I915_READ(HTOTAL(cpu_transcoder)) >> 16) & 0x1fff) + 1;
@@ -619,33 +618,25 @@ static u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
/* raw reads, only for fast reads of display block, no need for forcewake etc. */
#define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
-#define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
static bool ilk_pipe_in_vblank_locked(struct drm_device *dev, enum pipe pipe)
{
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t status;
-
- if (INTEL_INFO(dev)->gen < 7) {
- status = pipe == PIPE_A ?
- DE_PIPEA_VBLANK :
- DE_PIPEB_VBLANK;
+ int reg;
+
+ if (INTEL_INFO(dev)->gen >= 8) {
+ status = GEN8_PIPE_VBLANK;
+ reg = GEN8_DE_PIPE_ISR(pipe);
+ } else if (INTEL_INFO(dev)->gen >= 7) {
+ status = DE_PIPE_VBLANK_IVB(pipe);
+ reg = DEISR;
} else {
- switch (pipe) {
- default:
- case PIPE_A:
- status = DE_PIPEA_VBLANK_IVB;
- break;
- case PIPE_B:
- status = DE_PIPEB_VBLANK_IVB;
- break;
- case PIPE_C:
- status = DE_PIPEC_VBLANK_IVB;
- break;
- }
+ status = DE_PIPE_VBLANK(pipe);
+ reg = DEISR;
}
- return __raw_i915_read32(dev_priv, DEISR) & status;
+ return __raw_i915_read32(dev_priv, reg) & status;
}
static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
@@ -703,7 +694,28 @@ static int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
else
position = __raw_i915_read32(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
- if (HAS_PCH_SPLIT(dev)) {
+ if (HAS_DDI(dev)) {
+ /*
+ * On HSW HDMI outputs there seems to be a 2 line
+ * difference, whereas eDP has the normal 1 line
+ * difference that earlier platforms have. External
+ * DP is unknown. For now just check for the 2 line
+ * difference case on all output types on HSW+.
+ *
+ * This might misinterpret the scanline counter being
+ * one line too far along on eDP, but that's less
+ * dangerous than the alternative since that would lead
+ * the vblank timestamp code astray when it sees a
+ * scanline count before vblank_start during a vblank
+ * interrupt.
+ */
+ in_vbl = ilk_pipe_in_vblank_locked(dev, pipe);
+ if ((in_vbl && (position == vbl_start - 2 ||
+ position == vbl_start - 1)) ||
+ (!in_vbl && (position == vbl_end - 2 ||
+ position == vbl_end - 1)))
+ position = (position + 2) % vtotal;
+ } else if (HAS_PCH_SPLIT(dev)) {
/*
* The scanline counter increments at the leading edge
* of hsync, ie. it completely misses the active portion
@@ -2770,10 +2782,9 @@ static void ibx_irq_postinstall(struct drm_device *dev)
return;
if (HAS_PCH_IBX(dev)) {
- mask = SDE_GMBUS | SDE_AUX_MASK | SDE_TRANSB_FIFO_UNDER |
- SDE_TRANSA_FIFO_UNDER | SDE_POISON;
+ mask = SDE_GMBUS | SDE_AUX_MASK | SDE_POISON;
} else {
- mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT | SDE_ERROR_CPT;
+ mask = SDE_GMBUS_CPT | SDE_AUX_MASK_CPT;
I915_WRITE(SERR_INT, I915_READ(SERR_INT));
}
@@ -2833,20 +2844,19 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE_IVB |
DE_PCH_EVENT_IVB | DE_PLANEC_FLIP_DONE_IVB |
DE_PLANEB_FLIP_DONE_IVB |
- DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB |
- DE_ERR_INT_IVB);
+ DE_PLANEA_FLIP_DONE_IVB | DE_AUX_CHANNEL_A_IVB);
extra_mask = (DE_PIPEC_VBLANK_IVB | DE_PIPEB_VBLANK_IVB |
- DE_PIPEA_VBLANK_IVB);
+ DE_PIPEA_VBLANK_IVB | DE_ERR_INT_IVB);
I915_WRITE(GEN7_ERR_INT, I915_READ(GEN7_ERR_INT));
} else {
display_mask = (DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE |
DE_AUX_CHANNEL_A |
- DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN |
DE_PIPEB_CRC_DONE | DE_PIPEA_CRC_DONE |
DE_POISON);
- extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT;
+ extra_mask = DE_PIPEA_VBLANK | DE_PIPEB_VBLANK | DE_PCU_EVENT |
+ DE_PIPEB_FIFO_UNDERRUN | DE_PIPEA_FIFO_UNDERRUN;
}
dev_priv->irq_mask = ~display_mask;
@@ -2962,9 +2972,9 @@ static void gen8_de_irq_postinstall(struct drm_i915_private *dev_priv)
struct drm_device *dev = dev_priv->dev;
uint32_t de_pipe_masked = GEN8_PIPE_FLIP_DONE |
GEN8_PIPE_CDCLK_CRC_DONE |
- GEN8_PIPE_FIFO_UNDERRUN |
GEN8_DE_PIPE_IRQ_FAULT_ERRORS;
- uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK;
+ uint32_t de_pipe_enables = de_pipe_masked | GEN8_PIPE_VBLANK |
+ GEN8_PIPE_FIFO_UNDERRUN;
int pipe;
dev_priv->de_irq_mask[PIPE_A] = ~de_pipe_masked;
dev_priv->de_irq_mask[PIPE_B] = ~de_pipe_masked;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index e06b9e017d6b..234ac5f7bc5a 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1244,6 +1244,7 @@ static void intel_ddi_post_disable(struct intel_encoder *intel_encoder)
if (type == INTEL_OUTPUT_DISPLAYPORT || type == INTEL_OUTPUT_EDP) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
+ ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_panel_off(intel_dp);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 9fa24347963a..9b8a7c7ea7fc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1092,12 +1092,12 @@ static void assert_cursor(struct drm_i915_private *dev_priv,
struct drm_device *dev = dev_priv->dev;
bool cur_state;
- if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
- cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
- else if (IS_845G(dev) || IS_I865G(dev))
+ if (IS_845G(dev) || IS_I865G(dev))
cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE;
- else
+ else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev))
cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE;
+ else
+ cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE;
WARN(cur_state != state,
"cursor on pipe %c assertion failure (expected %s, current %s)\n",
@@ -8586,6 +8586,20 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
if (ring->id == RCS)
len += 6;
+ /*
+ * BSpec MI_DISPLAY_FLIP for IVB:
+ * "The full packet must be contained within the same cache line."
+ *
+ * Currently the LRI+SRM+MI_DISPLAY_FLIP all fit within the same
+ * cacheline, if we ever start emitting more commands before
+ * the MI_DISPLAY_FLIP we may need to first emit everything else,
+ * then do the cacheline alignment, and finally emit the
+ * MI_DISPLAY_FLIP.
+ */
+ ret = intel_ring_cacheline_align(ring);
+ if (ret)
+ goto err_unpin;
+
ret = intel_ring_begin(ring, len);
if (ret)
goto err_unpin;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 5ede4e8e290d..2688f6d64bb9 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -404,7 +404,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
int i, ret, recv_bytes;
uint32_t status;
int try, precharge, clock = 0;
- bool has_aux_irq = true;
+ bool has_aux_irq = HAS_AUX_IRQ(dev);
uint32_t timeout;
/* dp aux is extremely sensitive to irq latency, hence request the
@@ -537,6 +537,7 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
uint8_t msg[20];
int msg_bytes;
uint8_t ack;
+ int retry;
if (WARN_ON(send_bytes > 16))
return -E2BIG;
@@ -548,19 +549,21 @@ intel_dp_aux_native_write(struct intel_dp *intel_dp,
msg[3] = send_bytes - 1;
memcpy(&msg[4], send, send_bytes);
msg_bytes = send_bytes + 4;
- for (;;) {
+ for (retry = 0; retry < 7; retry++) {
ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1);
if (ret < 0)
return ret;
ack >>= 4;
if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_ACK)
- break;
+ return send_bytes;
else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
- udelay(100);
+ usleep_range(400, 500);
else
return -EIO;
}
- return send_bytes;
+
+ DRM_ERROR("too many retries, giving up\n");
+ return -EIO;
}
/* Write a single byte to the aux channel in native mode */
@@ -582,6 +585,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
int reply_bytes;
uint8_t ack;
int ret;
+ int retry;
if (WARN_ON(recv_bytes > 19))
return -E2BIG;
@@ -595,7 +599,7 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
msg_bytes = 4;
reply_bytes = recv_bytes + 1;
- for (;;) {
+ for (retry = 0; retry < 7; retry++) {
ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes,
reply, reply_bytes);
if (ret == 0)
@@ -608,10 +612,13 @@ intel_dp_aux_native_read(struct intel_dp *intel_dp,
return ret - 1;
}
else if ((ack & DP_AUX_NATIVE_REPLY_MASK) == DP_AUX_NATIVE_REPLY_DEFER)
- udelay(100);
+ usleep_range(400, 500);
else
return -EIO;
}
+
+ DRM_ERROR("too many retries, giving up\n");
+ return -EIO;
}
static int
@@ -1242,17 +1249,24 @@ void ironlake_edp_panel_off(struct intel_dp *intel_dp)
DRM_DEBUG_KMS("Turn eDP power off\n");
+ WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n");
+
pp = ironlake_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
* panels get very unhappy and cease to work. */
- pp &= ~(POWER_TARGET_ON | PANEL_POWER_RESET | EDP_BLC_ENABLE);
+ pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
I915_WRITE(pp_ctrl_reg, pp);
POSTING_READ(pp_ctrl_reg);
+ intel_dp->want_panel_vdd = false;
+
ironlake_wait_panel_off(intel_dp);
+
+ /* We got a reference when we enabled the VDD. */
+ intel_runtime_pm_put(dev_priv);
}
void ironlake_edp_backlight_on(struct intel_dp *intel_dp)
@@ -1632,7 +1646,7 @@ static void intel_edp_psr_enable_source(struct intel_dp *intel_dp)
val |= EDP_PSR_LINK_DISABLE;
I915_WRITE(EDP_PSR_CTL(dev), val |
- IS_BROADWELL(dev) ? 0 : link_entry_time |
+ (IS_BROADWELL(dev) ? 0 : link_entry_time) |
max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT |
idle_frames << EDP_PSR_IDLE_FRAME_SHIFT |
EDP_PSR_ENABLE);
@@ -1777,6 +1791,7 @@ static void intel_disable_dp(struct intel_encoder *encoder)
/* Make sure the panel is off before trying to change the mode. But also
* ensure that we have vdd while we switch off the panel. */
+ ironlake_edp_panel_vdd_on(intel_dp);
ironlake_edp_backlight_off(intel_dp);
intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_OFF);
ironlake_edp_panel_off(intel_dp);
@@ -1869,10 +1884,12 @@ static void vlv_pre_enable_dp(struct intel_encoder *encoder)
mutex_unlock(&dev_priv->dpio_lock);
- /* init power sequencer on this pipe and port */
- intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
- intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
- &power_seq);
+ if (is_edp(intel_dp)) {
+ /* init power sequencer on this pipe and port */
+ intel_dp_init_panel_power_sequencer(dev, intel_dp, &power_seq);
+ intel_dp_init_panel_power_sequencer_registers(dev, intel_dp,
+ &power_seq);
+ }
intel_enable_dp(encoder);
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 6db0d9d17f47..ee3181ebcc92 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -845,7 +845,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi)
{
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
- if (IS_G4X(dev))
+ if (!hdmi->has_hdmi_sink || IS_G4X(dev))
return 165000;
else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8)
return 300000;
@@ -899,8 +899,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder,
* outputs. We also need to check that the higher clock still fits
* within limits.
*/
- if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit
- && HAS_PCH_SPLIT(dev)) {
+ if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink &&
+ clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) {
DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n");
desired_bpp = 12*3;
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index b1dc33f47899..d33b61d0dd33 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -258,13 +258,6 @@ intel_gpio_setup(struct intel_gmbus *bus, u32 pin)
algo->data = bus;
}
-/*
- * gmbus on gen4 seems to be able to generate legacy interrupts even when in MSI
- * mode. This results in spurious interrupt warnings if the legacy irq no. is
- * shared with another device. The kernel then disables that interrupt source
- * and so prevents the other device from working properly.
- */
-#define HAS_GMBUS_IRQ(dev) (INTEL_INFO(dev)->gen >= 5)
static int
gmbus_wait_hw_status(struct drm_i915_private *dev_priv,
u32 gmbus2_status,
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 4e960ec7419f..acde2945eb8a 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -226,6 +226,8 @@ struct opregion_asle {
#define ACPI_DIGITAL_OUTPUT (3<<8)
#define ACPI_LVDS_OUTPUT (4<<8)
+#define MAX_DSLP 1500
+
#ifdef CONFIG_ACPI
static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
{
@@ -260,10 +262,11 @@ static int swsci(struct drm_device *dev, u32 function, u32 parm, u32 *parm_out)
/* The spec says 2ms should be the default, but it's too small
* for some machines. */
dslp = 50;
- } else if (dslp > 500) {
+ } else if (dslp > MAX_DSLP) {
/* Hey bios, trust must be earned. */
- WARN_ONCE(1, "excessive driver sleep timeout (DSPL) %u\n", dslp);
- dslp = 500;
+ DRM_INFO_ONCE("ACPI BIOS requests an excessive sleep of %u ms, "
+ "using %u ms instead\n", dslp, MAX_DSLP);
+ dslp = MAX_DSLP;
}
/* The spec tells us to do this, but we are the only user... */
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 350de359123a..079ea38f14d9 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -698,7 +698,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector)
freq /= 0xff;
ctl = freq << 17;
- if (IS_GEN2(dev) && panel->backlight.combination_mode)
+ if (panel->backlight.combination_mode)
ctl |= BLM_LEGACY_MODE;
if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm)
ctl |= BLM_POLARITY_PNV;
@@ -979,7 +979,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector)
ctl = I915_READ(BLC_PWM_CTL);
- if (IS_GEN2(dev))
+ if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev))
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
if (IS_PINEVIEW(dev))
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d77cc81900f9..e1fc35a72656 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3493,6 +3493,8 @@ static void valleyview_setup_pctx(struct drm_device *dev)
u32 pcbr;
int pctx_size = 24*1024;
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
pcbr = I915_READ(VLV_PCBR);
if (pcbr) {
/* BIOS set it up already, grab the pre-alloc'd space */
@@ -3542,8 +3544,6 @@ static void valleyview_enable_rps(struct drm_device *dev)
I915_WRITE(GTFIFODBG, gtfifodbg);
}
- valleyview_setup_pctx(dev);
-
/* If VLV, Forcewake all wells, else re-direct to regular path */
gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL);
@@ -4395,6 +4395,8 @@ void intel_enable_gt_powersave(struct drm_device *dev)
ironlake_enable_rc6(dev);
intel_init_emon(dev);
} else if (IS_GEN6(dev) || IS_GEN7(dev)) {
+ if (IS_VALLEYVIEW(dev))
+ valleyview_setup_pctx(dev);
/*
* PCU communication is slow and this doesn't need to be
* done at any specific time, so do this out of our fast path
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b7f1742caf87..31b36c5ac894 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1653,6 +1653,27 @@ int intel_ring_begin(struct intel_ring_buffer *ring,
return 0;
}
+/* Align the ring tail to a cacheline boundary */
+int intel_ring_cacheline_align(struct intel_ring_buffer *ring)
+{
+ int num_dwords = (64 - (ring->tail & 63)) / sizeof(uint32_t);
+ int ret;
+
+ if (num_dwords == 0)
+ return 0;
+
+ ret = intel_ring_begin(ring, num_dwords);
+ if (ret)
+ return ret;
+
+ while (num_dwords--)
+ intel_ring_emit(ring, MI_NOOP);
+
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
{
struct drm_i915_private *dev_priv = ring->dev->dev_private;
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 71a73f4fe252..0b243ce33714 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -233,6 +233,7 @@ intel_write_status_page(struct intel_ring_buffer *ring,
void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
+int __must_check intel_ring_cacheline_align(struct intel_ring_buffer *ring);
static inline void intel_ring_emit(struct intel_ring_buffer *ring,
u32 data)
{
diff --git a/drivers/gpu/drm/mgag200/mgag200_fb.c b/drivers/gpu/drm/mgag200/mgag200_fb.c
index f9adc27ef32a..13b7dd83faa9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_fb.c
+++ b/drivers/gpu/drm/mgag200/mgag200_fb.c
@@ -41,7 +41,7 @@ static void mga_dirty_update(struct mga_fbdev *mfbdev,
* then the BO is being moved and we should
* store up the damage until later.
*/
- if (!drm_can_sleep())
+ if (drm_can_sleep())
ret = mgag200_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index b8583f275e80..968374776db9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -1519,11 +1519,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
(mga_vga_calculate_mode_bandwidth(mode, bpp)
> (32700 * 1024))) {
return MODE_BANDWIDTH;
- } else if (mode->type == G200_EH &&
+ } else if (mdev->type == G200_EH &&
(mga_vga_calculate_mode_bandwidth(mode, bpp)
> (37500 * 1024))) {
return MODE_BANDWIDTH;
- } else if (mode->type == G200_ER &&
+ } else if (mdev->type == G200_ER &&
(mga_vga_calculate_mode_bandwidth(mode,
bpp) > (55000 * 1024))) {
return MODE_BANDWIDTH;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 1964f4f0d452..84c5b13b33c9 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -39,6 +39,7 @@ struct mdp4_crtc {
spinlock_t lock;
bool stale;
uint32_t width, height;
+ uint32_t x, y;
/* next cursor to scan-out: */
uint32_t next_iova;
@@ -57,9 +58,16 @@ struct mdp4_crtc {
#define PENDING_FLIP 0x2
atomic_t pending;
- /* the fb that we currently hold a scanout ref to: */
+ /* the fb that we logically (from PoV of KMS API) hold a ref
+ * to. Which we may not yet be scanning out (we may still
+ * be scanning out previous in case of page_flip while waiting
+ * for gpu rendering to complete:
+ */
struct drm_framebuffer *fb;
+ /* the fb that we currently hold a scanout ref to: */
+ struct drm_framebuffer *scanout_fb;
+
/* for unref'ing framebuffers after scanout completes: */
struct drm_flip_work unref_fb_work;
@@ -77,24 +85,73 @@ static struct mdp4_kms *get_kms(struct drm_crtc *crtc)
return to_mdp4_kms(to_mdp_kms(priv->kms));
}
-static void update_fb(struct drm_crtc *crtc, bool async,
- struct drm_framebuffer *new_fb)
+static void request_pending(struct drm_crtc *crtc, uint32_t pending)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct drm_framebuffer *old_fb = mdp4_crtc->fb;
- if (old_fb)
- drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
+ atomic_or(pending, &mdp4_crtc->pending);
+ mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
+}
+
+static void crtc_flush(struct drm_crtc *crtc)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
+ uint32_t i, flush = 0;
+
+ for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
+ struct drm_plane *plane = mdp4_crtc->planes[i];
+ if (plane) {
+ enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
+ flush |= pipe2flush(pipe_id);
+ }
+ }
+ flush |= ovlp2flush(mdp4_crtc->ovlp);
+
+ DBG("%s: flush=%08x", mdp4_crtc->name, flush);
+
+ mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
+}
+
+static void update_fb(struct drm_crtc *crtc, struct drm_framebuffer *new_fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct drm_framebuffer *old_fb = mdp4_crtc->fb;
/* grab reference to incoming scanout fb: */
drm_framebuffer_reference(new_fb);
mdp4_crtc->base.fb = new_fb;
mdp4_crtc->fb = new_fb;
- if (!async) {
- /* enable vblank to pick up the old_fb */
- mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
- }
+ if (old_fb)
+ drm_flip_work_queue(&mdp4_crtc->unref_fb_work, old_fb);
+}
+
+/* unlike update_fb(), take a ref to the new scanout fb *before* updating
+ * plane, then call this. Needed to ensure we don't unref the buffer that
+ * is actually still being scanned out.
+ *
+ * Note that this whole thing goes away with atomic.. since we can defer
+ * calling into driver until rendering is done.
+ */
+static void update_scanout(struct drm_crtc *crtc, struct drm_framebuffer *fb)
+{
+ struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+
+ /* flush updates, to make sure hw is updated to new scanout fb,
+ * so that we can safely queue unref to current fb (ie. next
+ * vblank we know hw is done w/ previous scanout_fb).
+ */
+ crtc_flush(crtc);
+
+ if (mdp4_crtc->scanout_fb)
+ drm_flip_work_queue(&mdp4_crtc->unref_fb_work,
+ mdp4_crtc->scanout_fb);
+
+ mdp4_crtc->scanout_fb = fb;
+
+ /* enable vblank to complete flip: */
+ request_pending(crtc, PENDING_FLIP);
}
/* if file!=NULL, this is preclose potential cancel-flip path */
@@ -120,34 +177,6 @@ static void complete_flip(struct drm_crtc *crtc, struct drm_file *file)
spin_unlock_irqrestore(&dev->event_lock, flags);
}
-static void crtc_flush(struct drm_crtc *crtc)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
- uint32_t i, flush = 0;
-
- for (i = 0; i < ARRAY_SIZE(mdp4_crtc->planes); i++) {
- struct drm_plane *plane = mdp4_crtc->planes[i];
- if (plane) {
- enum mdp4_pipe pipe_id = mdp4_plane_pipe(plane);
- flush |= pipe2flush(pipe_id);
- }
- }
- flush |= ovlp2flush(mdp4_crtc->ovlp);
-
- DBG("%s: flush=%08x", mdp4_crtc->name, flush);
-
- mdp4_write(mdp4_kms, REG_MDP4_OVERLAY_FLUSH, flush);
-}
-
-static void request_pending(struct drm_crtc *crtc, uint32_t pending)
-{
- struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
-
- atomic_or(pending, &mdp4_crtc->pending);
- mdp_irq_register(&get_kms(crtc)->base, &mdp4_crtc->vblank);
-}
-
static void pageflip_cb(struct msm_fence_cb *cb)
{
struct mdp4_crtc *mdp4_crtc =
@@ -158,11 +187,9 @@ static void pageflip_cb(struct msm_fence_cb *cb)
if (!fb)
return;
+ drm_framebuffer_reference(fb);
mdp4_plane_set_scanout(mdp4_crtc->plane, fb);
- crtc_flush(crtc);
-
- /* enable vblank to complete flip: */
- request_pending(crtc, PENDING_FLIP);
+ update_scanout(crtc, fb);
}
static void unref_fb_worker(struct drm_flip_work *work, void *val)
@@ -320,6 +347,20 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->fb);
+
+ ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
+ 0, 0, mode->hdisplay, mode->vdisplay,
+ x << 16, y << 16,
+ mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
+ mdp4_crtc->name, ret);
+ return ret;
+ }
+
mdp4_write(mdp4_kms, REG_MDP4_DMA_SRC_SIZE(dma),
MDP4_DMA_SRC_SIZE_WIDTH(mode->hdisplay) |
MDP4_DMA_SRC_SIZE_HEIGHT(mode->vdisplay));
@@ -341,24 +382,15 @@ static int mdp4_crtc_mode_set(struct drm_crtc *crtc,
mdp4_write(mdp4_kms, REG_MDP4_OVLP_CFG(ovlp), 1);
- update_fb(crtc, false, crtc->fb);
-
- ret = mdp4_plane_mode_set(mdp4_crtc->plane, crtc, crtc->fb,
- 0, 0, mode->hdisplay, mode->vdisplay,
- x << 16, y << 16,
- mode->hdisplay << 16, mode->vdisplay << 16);
- if (ret) {
- dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
- mdp4_crtc->name, ret);
- return ret;
- }
-
if (dma == DMA_E) {
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(0), 0x00ff0000);
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(1), 0x00ff0000);
mdp4_write(mdp4_kms, REG_MDP4_DMA_E_QUANT(2), 0x00ff0000);
}
+ update_fb(crtc, crtc->fb);
+ update_scanout(crtc, crtc->fb);
+
return 0;
}
@@ -385,13 +417,24 @@ static int mdp4_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
struct drm_plane *plane = mdp4_crtc->plane;
struct drm_display_mode *mode = &crtc->mode;
+ int ret;
- update_fb(crtc, false, crtc->fb);
+ /* grab extra ref for update_scanout() */
+ drm_framebuffer_reference(crtc->fb);
- return mdp4_plane_mode_set(plane, crtc, crtc->fb,
+ ret = mdp4_plane_mode_set(plane, crtc, crtc->fb,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ return ret;
+ }
+
+ update_fb(crtc, crtc->fb);
+ update_scanout(crtc, crtc->fb);
+
+ return 0;
}
static void mdp4_crtc_load_lut(struct drm_crtc *crtc)
@@ -419,7 +462,7 @@ static int mdp4_crtc_page_flip(struct drm_crtc *crtc,
mdp4_crtc->event = event;
spin_unlock_irqrestore(&dev->event_lock, flags);
- update_fb(crtc, true, new_fb);
+ update_fb(crtc, new_fb);
return msm_gem_queue_inactive_cb(obj, &mdp4_crtc->pageflip_cb);
}
@@ -442,12 +485,12 @@ static int mdp4_crtc_set_property(struct drm_crtc *crtc,
static void update_cursor(struct drm_crtc *crtc)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
+ struct mdp4_kms *mdp4_kms = get_kms(crtc);
enum mdp4_dma dma = mdp4_crtc->dma;
unsigned long flags;
spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
if (mdp4_crtc->cursor.stale) {
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
uint32_t iova = mdp4_crtc->cursor.next_iova;
@@ -479,6 +522,11 @@ static void update_cursor(struct drm_crtc *crtc)
mdp4_crtc->cursor.scanout_bo = next_bo;
mdp4_crtc->cursor.stale = false;
}
+
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
+ MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) |
+ MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y));
+
spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
}
@@ -530,6 +578,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
drm_gem_object_unreference_unlocked(old_bo);
}
+ crtc_flush(crtc);
request_pending(crtc, PENDING_CURSOR);
return 0;
@@ -542,12 +591,15 @@ fail:
static int mdp4_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct mdp4_kms *mdp4_kms = get_kms(crtc);
- enum mdp4_dma dma = mdp4_crtc->dma;
+ unsigned long flags;
- mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma),
- MDP4_DMA_CURSOR_POS_X(x) |
- MDP4_DMA_CURSOR_POS_Y(y));
+ spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags);
+ mdp4_crtc->cursor.x = x;
+ mdp4_crtc->cursor.y = y;
+ spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags);
+
+ crtc_flush(crtc);
+ request_pending(crtc, PENDING_CURSOR);
return 0;
}
@@ -713,6 +765,7 @@ struct drm_crtc *mdp4_crtc_init(struct drm_device *dev,
crtc = &mdp4_crtc->base;
mdp4_crtc->plane = plane;
+ mdp4_crtc->id = id;
mdp4_crtc->ovlp = ovlp_id;
mdp4_crtc->dma = dma_id;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 2406027200ec..1e893dd13859 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -170,8 +170,8 @@ int mdp4_plane_mode_set(struct drm_plane *plane,
MDP4_PIPE_DST_SIZE_HEIGHT(crtc_h));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_DST_XY(pipe),
- MDP4_PIPE_SRC_XY_X(crtc_x) |
- MDP4_PIPE_SRC_XY_Y(crtc_y));
+ MDP4_PIPE_DST_XY_X(crtc_x) |
+ MDP4_PIPE_DST_XY_Y(crtc_y));
mdp4_plane_set_scanout(plane, fb);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 71a3b2345eb3..f2794021f086 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -296,6 +296,7 @@ static int mdp5_crtc_mode_set(struct drm_crtc *crtc,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
dev_err(crtc->dev->dev, "%s: failed to set mode on plane: %d\n",
mdp5_crtc->name, ret);
return ret;
@@ -343,11 +344,15 @@ static int mdp5_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
0, 0, mode->hdisplay, mode->vdisplay,
x << 16, y << 16,
mode->hdisplay << 16, mode->vdisplay << 16);
+ if (ret) {
+ drm_framebuffer_unreference(crtc->fb);
+ return ret;
+ }
update_fb(crtc, crtc->fb);
update_scanout(crtc, crtc->fb);
- return ret;
+ return 0;
}
static void mdp5_crtc_load_lut(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index d8d60c969ac7..3da8264d3039 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -644,7 +644,7 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
fail:
if (obj)
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference(obj);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 5281d4bc37f7..5423e914e491 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -163,7 +163,7 @@ retry:
/* if locking succeeded, pin bo: */
- ret = msm_gem_get_iova(&msm_obj->base,
+ ret = msm_gem_get_iova_locked(&msm_obj->base,
submit->gpu->id, &iova);
/* this would break the logic in the fail path.. there is no
@@ -247,7 +247,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
*/
- ptr = msm_gem_vaddr(&obj->base);
+ ptr = msm_gem_vaddr_locked(&obj->base);
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
@@ -307,14 +307,12 @@ static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
{
unsigned i;
- mutex_lock(&submit->dev->struct_mutex);
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
submit_unlock_unpin_bo(submit, i);
list_del_init(&msm_obj->submit_entry);
drm_gem_object_unreference(&msm_obj->base);
}
- mutex_unlock(&submit->dev->struct_mutex);
ww_acquire_fini(&submit->ticket);
kfree(submit);
@@ -342,6 +340,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (args->nr_cmds > MAX_CMDS)
return -EINVAL;
+ mutex_lock(&dev->struct_mutex);
+
submit = submit_create(dev, gpu, args->nr_bos);
if (!submit) {
ret = -ENOMEM;
@@ -410,5 +410,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
out:
if (submit)
submit_cleanup(submit, !!ret);
+ mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 4ebce8be489d..0cfe3f426ee4 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -298,8 +298,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_drm_private *priv = dev->dev_private;
int i, ret;
- mutex_lock(&dev->struct_mutex);
-
submit->fence = ++priv->next_fence;
gpu->submitted_fence = submit->fence;
@@ -331,7 +329,6 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
}
hangcheck_timer_reset(gpu);
- mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index e88145ba1bf5..d310c195bdfe 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -141,6 +141,7 @@ nouveau-y += core/subdev/mc/base.o
nouveau-y += core/subdev/mc/nv04.o
nouveau-y += core/subdev/mc/nv40.o
nouveau-y += core/subdev/mc/nv44.o
+nouveau-y += core/subdev/mc/nv4c.o
nouveau-y += core/subdev/mc/nv50.o
nouveau-y += core/subdev/mc/nv94.o
nouveau-y += core/subdev/mc/nv98.o
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
index 1b653dd74a70..08b88591ed60 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nv40.c
@@ -311,7 +311,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
@@ -334,7 +334,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv4e_fb_oclass;
@@ -357,7 +357,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
@@ -380,7 +380,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
@@ -403,7 +403,7 @@ nv40_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_CLOCK ] = &nv40_clock_oclass;
device->oclass[NVDEV_SUBDEV_THERM ] = &nv40_therm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = nv1a_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nv44_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nv4c_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nv31_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nv46_fb_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 940eaa5d8b9a..9ad722e4e087 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -1142,7 +1142,7 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
if (conf != ~0) {
if (outp.location == 0 && outp.type == DCB_OUTPUT_DP) {
u32 soff = (ffs(outp.or) - 1) * 0x08;
- u32 ctrl = nv_rd32(priv, 0x610798 + soff);
+ u32 ctrl = nv_rd32(priv, 0x610794 + soff);
u32 datarate;
switch ((ctrl & 0x000f0000) >> 16) {
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index 9a850fe19515..54c1b5b471cd 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -112,7 +112,7 @@ nve0_fifo_runlist_update(struct nve0_fifo_priv *priv, u32 engine)
nv_wr32(priv, 0x002270, cur->addr >> 12);
nv_wr32(priv, 0x002274, (engine << 20) | (p >> 3));
- if (!nv_wait(priv, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
+ if (!nv_wait(priv, 0x002284 + (engine * 8), 0x00100000, 0x00000000))
nv_error(priv, "runlist %d update timeout\n", engine);
mutex_unlock(&nv_subdev(priv)->mutex);
}
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
index 30ed19c52e05..7a367c402978 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
@@ -539,7 +539,7 @@ nv50_priv_tp_trap(struct nv50_graph_priv *priv, int type, u32 ustatus_old,
ustatus &= ~0x04030000;
}
if (ustatus && display) {
- nv_error("%s - TP%d:", name, i);
+ nv_error(priv, "%s - TP%d:", name, i);
nouveau_bitfield_print(nv50_mpc_traps, ustatus);
pr_cont("\n");
ustatus = 0;
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
index adc88b73d911..3c6738edd127 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h
@@ -47,6 +47,7 @@ struct nouveau_mc_oclass {
extern struct nouveau_oclass *nv04_mc_oclass;
extern struct nouveau_oclass *nv40_mc_oclass;
extern struct nouveau_oclass *nv44_mc_oclass;
+extern struct nouveau_oclass *nv4c_mc_oclass;
extern struct nouveau_oclass *nv50_mc_oclass;
extern struct nouveau_oclass *nv94_mc_oclass;
extern struct nouveau_oclass *nv98_mc_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
index aa0fbbec7f08..ef0c9c4a8cc3 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/base.c
@@ -130,6 +130,10 @@ nouveau_bios_shadow_prom(struct nouveau_bios *bios)
u16 pcir;
int i;
+ /* there is no prom on nv4x IGP's */
+ if (device->card_type == NV_40 && device->chipset >= 0x4c)
+ return;
+
/* enable access to rom */
if (device->card_type >= NV_50)
pcireg = 0x088050;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
index 9159a5ccee93..265d1253624a 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/nv1a.c
@@ -36,7 +36,7 @@ nv1a_fb_oclass = &(struct nv04_fb_impl) {
.fini = _nouveau_fb_fini,
},
.base.memtype = nv04_fb_memtype_valid,
- .base.ram = &nv10_ram_oclass,
+ .base.ram = &nv1a_ram_oclass,
.tile.regions = 8,
.tile.init = nv10_fb_tile_init,
.tile.fini = nv10_fb_tile_fini,
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
index b0d5c31606c1..81a408e7d034 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.h
@@ -14,6 +14,7 @@ int nv04_mc_ctor(struct nouveau_object *, struct nouveau_object *,
extern const struct nouveau_mc_intr nv04_mc_intr[];
int nv04_mc_init(struct nouveau_object *);
void nv40_mc_msi_rearm(struct nouveau_mc *);
+int nv44_mc_init(struct nouveau_object *object);
int nv50_mc_init(struct nouveau_object *);
extern const struct nouveau_mc_intr nv50_mc_intr[];
extern const struct nouveau_mc_intr nvc0_mc_intr[];
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
index 3bfee5c6c4f2..cc4d0d2d886e 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c
@@ -24,7 +24,7 @@
#include "nv04.h"
-static int
+int
nv44_mc_init(struct nouveau_object *object)
{
struct nv04_mc_priv *priv = (void *)object;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
new file mode 100644
index 000000000000..a75c35ccf25c
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright 2014 Ilia Mirkin
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ilia Mirkin
+ */
+
+#include "nv04.h"
+
+static void
+nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
+{
+ struct nv04_mc_priv *priv = (void *)pmc;
+ nv_wr08(priv, 0x088050, 0xff);
+}
+
+struct nouveau_oclass *
+nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
+ .base.handle = NV_SUBDEV(MC, 0x4c),
+ .base.ofuncs = &(struct nouveau_ofuncs) {
+ .ctor = nv04_mc_ctor,
+ .dtor = _nouveau_mc_dtor,
+ .init = nv44_mc_init,
+ .fini = _nouveau_mc_fini,
+ },
+ .intr = nv04_mc_intr,
+ .msi_rearm = nv4c_mc_msi_rearm,
+}.base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 4ef83df2b246..83face3f608f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -106,6 +106,29 @@ static int nouveau_optimus_dsm(acpi_handle handle, int func, int arg, uint32_t *
return 0;
}
+/*
+ * On some platforms, _DSM(nouveau_op_dsm_muid, func0) has special
+ * requirements on the fourth parameter, so a private implementation
+ * instead of using acpi_check_dsm().
+ */
+static int nouveau_check_optimus_dsm(acpi_handle handle)
+{
+ int result;
+
+ /*
+ * Function 0 returns a Buffer containing available functions.
+ * The args parameter is ignored for function 0, so just put 0 in it
+ */
+ if (nouveau_optimus_dsm(handle, 0, 0, &result))
+ return 0;
+
+ /*
+ * ACPI Spec v4 9.14.1: if bit 0 is zero, no function is supported.
+ * If the n-th bit is enabled, function n is supported
+ */
+ return result & 1 && result & (1 << NOUVEAU_DSM_OPTIMUS_CAPS);
+}
+
static int nouveau_dsm(acpi_handle handle, int func, int arg)
{
int ret = 0;
@@ -207,8 +230,7 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
1 << NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
- if (acpi_check_dsm(dhandle, nouveau_op_dsm_muid, 0x00000100,
- 1 << NOUVEAU_DSM_OPTIMUS_CAPS))
+ if (nouveau_check_optimus_dsm(dhandle))
retval |= NOUVEAU_DSM_HAS_OPT;
if (retval & NOUVEAU_DSM_HAS_OPT) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 488686d490c0..4aed1714b9ab 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1249,7 +1249,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
mem->bus.is_iomem = !dev->agp->cant_use_aperture;
}
#endif
- if (!node->memtype)
+ if (nv_device(drm->device)->card_type < NV_50 || !node->memtype)
/* untiled */
break;
/* fallthrough, tiled memory */
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 78c8e7146d56..89c484d8ac26 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -376,6 +376,8 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto fail_device;
+ dev->irq_enabled = true;
+
/* workaround an odd issue on nvc1 by disabling the device's
* nosnoop capability. hopefully won't cause issues until a
* better fix is found - assuming there is one...
@@ -475,6 +477,7 @@ nouveau_drm_remove(struct pci_dev *pdev)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_object *device;
+ dev->irq_enabled = false;
device = drm->client.base.device;
drm_put_dev(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index 81638d7f2eff..471347edc27e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -14,7 +14,9 @@ nouveau_vga_set_decode(void *priv, bool state)
{
struct nouveau_device *device = nouveau_dev(priv);
- if (device->chipset >= 0x40)
+ if (device->card_type == NV_40 && device->chipset >= 0x4c)
+ nv_wr32(device, 0x088060, state);
+ else if (device->chipset >= 0x40)
nv_wr32(device, 0x088054, state);
else
nv_wr32(device, 0x001854, state);
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a9338c85630f..daa4dd375ab1 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -559,7 +559,7 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
u32 adjusted_clock = mode->clock;
int encoder_mode = atombios_get_encoder_mode(encoder);
u32 dp_clock = mode->clock;
- int bpc = radeon_get_monitor_bpc(connector);
+ int bpc = radeon_crtc->bpc;
bool is_duallink = radeon_dig_monitor_is_duallink(encoder, mode->clock);
/* reset the pll flags */
@@ -1176,7 +1176,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
/* Set NUM_BANKS. */
- if (rdev->family >= CHIP_BONAIRE) {
+ if (rdev->family >= CHIP_TAHITI) {
unsigned tileb, index, num_banks, tile_split_bytes;
/* Calculate the macrotile mode index. */
@@ -1194,13 +1194,14 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
return -EINVAL;
}
- num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
+ if (rdev->family >= CHIP_BONAIRE)
+ num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
+ else
+ num_banks = (rdev->config.si.tile_mode_array[index] >> 20) & 0x3;
fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
} else {
- /* SI and older. */
- if (rdev->family >= CHIP_TAHITI)
- tmp = rdev->config.si.tile_config;
- else if (rdev->family >= CHIP_CAYMAN)
+ /* NI and older. */
+ if (rdev->family >= CHIP_CAYMAN)
tmp = rdev->config.cayman.tile_config;
else
tmp = rdev->config.evergreen.tile_config;
@@ -1773,6 +1774,20 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
return ATOM_PPLL1;
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
+ } else if (ASIC_IS_DCE41(rdev)) {
+ /* Don't share PLLs on DCE4.1 chips */
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(radeon_crtc->encoder))) {
+ if (rdev->clock.dp_extclk)
+ /* skip PPLL programming if using ext clock */
+ return ATOM_PPLL_INVALID;
+ }
+ pll_in_use = radeon_get_pll_use_mask(crtc);
+ if (!(pll_in_use & (1 << ATOM_PPLL1)))
+ return ATOM_PPLL1;
+ if (!(pll_in_use & (1 << ATOM_PPLL2)))
+ return ATOM_PPLL2;
+ DRM_ERROR("unable to allocate a PPLL\n");
+ return ATOM_PPLL_INVALID;
} else if (ASIC_IS_DCE4(rdev)) {
/* in DP mode, the DP ref clock can come from PPLL, DCPLL, or ext clock,
* depending on the asic:
@@ -1800,7 +1815,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
if (pll != ATOM_PPLL_INVALID)
return pll;
}
- } else if (!ASIC_IS_DCE41(rdev)) { /* Don't share PLLs on DCE4.1 chips */
+ } else {
/* use the same PPLL for all monitors with the same clock */
pll = radeon_get_shared_nondp_ppll(crtc);
if (pll != ATOM_PPLL_INVALID)
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index a42d61571f49..607dc14d195e 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -464,11 +464,12 @@ atombios_tv_setup(struct drm_encoder *encoder, int action)
static u8 radeon_atom_get_bpc(struct drm_encoder *encoder)
{
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
int bpc = 8;
- if (connector)
- bpc = radeon_get_monitor_bpc(connector);
+ if (encoder->crtc) {
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
+ bpc = radeon_crtc->bpc;
+ }
switch (bpc) {
case 0:
@@ -1313,7 +1314,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
if (is_dp)
args.v5.ucLaneNum = dp_lane_count;
- else if (radeon_encoder->pixel_clock > 165000)
+ else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock))
args.v5.ucLaneNum = 8;
else
args.v5.ucLaneNum = 4;
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 0fbd36f3d4e9..ea103ccdf4bd 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -29,6 +29,7 @@
#include "cypress_dpm.h"
#include "btc_dpm.h"
#include "atom.h"
+#include <linux/seq_file.h>
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
@@ -2756,6 +2757,37 @@ void btc_dpm_fini(struct radeon_device *rdev)
r600_free_extended_power_table(rdev);
}
+void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m)
+{
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
+ struct rv7xx_ps *ps = rv770_get_ps(rps);
+ struct rv7xx_pl *pl;
+ u32 current_index =
+ (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
+ CURRENT_PROFILE_INDEX_SHIFT;
+
+ if (current_index > 2) {
+ seq_printf(m, "invalid dpm profile %d\n", current_index);
+ } else {
+ if (current_index == 0)
+ pl = &ps->low;
+ else if (current_index == 1)
+ pl = &ps->medium;
+ else /* current_index == 2 */
+ pl = &ps->high;
+ seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
+ if (rdev->family >= CHIP_CEDAR) {
+ seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
+ current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
+ } else {
+ seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n",
+ current_index, pl->sclk, pl->mclk, pl->vddc);
+ }
+ }
+}
+
u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low)
{
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
diff --git a/drivers/gpu/drm/radeon/btcd.h b/drivers/gpu/drm/radeon/btcd.h
index 29e32de7e025..9c65be2d55a9 100644
--- a/drivers/gpu/drm/radeon/btcd.h
+++ b/drivers/gpu/drm/radeon/btcd.h
@@ -44,6 +44,10 @@
# define DYN_SPREAD_SPECTRUM_EN (1 << 23)
# define AC_DC_SW (1 << 24)
+#define TARGET_AND_CURRENT_PROFILE_INDEX 0x66c
+# define CURRENT_PROFILE_INDEX_MASK (0xf << 4)
+# define CURRENT_PROFILE_INDEX_SHIFT 4
+
#define CG_BIF_REQ_AND_RSP 0x7f4
#define CG_CLIENT_REQ(x) ((x) << 0)
#define CG_CLIENT_REQ_MASK (0xff << 0)
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index e6419ca7cd37..bbb17841a9e5 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3046,7 +3046,7 @@ static u32 cik_create_bitmask(u32 bit_width)
}
/**
- * cik_select_se_sh - select which SE, SH to address
+ * cik_get_rb_disabled - computes the mask of disabled RBs
*
* @rdev: radeon_device pointer
* @max_rb_num: max RBs (render backends) for the asic
@@ -4134,8 +4134,11 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable)
{
if (enable)
WREG32(CP_MEC_CNTL, 0);
- else
+ else {
WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT));
+ rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false;
+ }
udelay(50);
}
@@ -7902,7 +7905,8 @@ int cik_resume(struct radeon_device *rdev)
/* init golden registers */
cik_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = cik_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 1ecb3f1070e3..94626ea90fa5 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -264,6 +264,8 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev)
WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl);
WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0);
}
+ rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false;
+ rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false;
}
/**
@@ -291,6 +293,11 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable)
u32 me_cntl, reg_offset;
int i;
+ if (enable == false) {
+ cik_sdma_gfx_stop(rdev);
+ cik_sdma_rlc_stop(rdev);
+ }
+
for (i = 0; i < 2; i++) {
if (i == 0)
reg_offset = SDMA0_REGISTER_OFFSET;
@@ -420,10 +427,6 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev)
if (!rdev->sdma_fw)
return -EINVAL;
- /* stop the gfx rings and rlc compute queues */
- cik_sdma_gfx_stop(rdev);
- cik_sdma_rlc_stop(rdev);
-
/* halt the MEs */
cik_sdma_enable(rdev, false);
@@ -492,9 +495,6 @@ int cik_sdma_resume(struct radeon_device *rdev)
*/
void cik_sdma_fini(struct radeon_device *rdev)
{
- /* stop the gfx rings and rlc compute queues */
- cik_sdma_gfx_stop(rdev);
- cik_sdma_rlc_stop(rdev);
/* halt the MEs */
cik_sdma_enable(rdev, false);
radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]);
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 713a5d359901..94e858751994 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -278,13 +278,15 @@ static int dce6_audio_chipset_supported(struct radeon_device *rdev)
return !ASIC_IS_NODCE(rdev);
}
-static void dce6_audio_enable(struct radeon_device *rdev,
- struct r600_audio_pin *pin,
- bool enable)
+void dce6_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable)
{
+ if (!pin)
+ return;
+
WREG32_ENDPOINT(pin->offset, AZ_F0_CODEC_PIN_CONTROL_HOTPLUG_CONTROL,
- AUDIO_ENABLED);
- DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
+ enable ? AUDIO_ENABLED : 0);
}
static const u32 pin_offsets[7] =
@@ -323,7 +325,8 @@ int dce6_audio_init(struct radeon_device *rdev)
rdev->audio.pin[i].connected = false;
rdev->audio.pin[i].offset = pin_offsets[i];
rdev->audio.pin[i].id = i;
- dce6_audio_enable(rdev, &rdev->audio.pin[i], true);
+ /* disable audio. it will be set up later */
+ dce6_audio_enable(rdev, &rdev->audio.pin[i], false);
}
return 0;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index f2b9e21ce4da..27b0ff16082e 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1680,7 +1680,7 @@ bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
case RADEON_HPD_6:
if (RREG32(DC_HPD6_INT_STATUS) & DC_HPDx_SENSE)
connected = true;
- break;
+ break;
default:
break;
}
@@ -5299,7 +5299,8 @@ int evergreen_resume(struct radeon_device *rdev)
/* init golden registers */
evergreen_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = evergreen_startup(rdev);
@@ -5475,9 +5476,9 @@ void evergreen_fini(struct radeon_device *rdev)
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- evergreen_pcie_gart_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
+ evergreen_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 0c6d5cef4cf1..05b0c95813fd 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -306,6 +306,15 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
return;
offset = dig->afmt->offset;
+ /* disable audio prior to setting up hw */
+ if (ASIC_IS_DCE6(rdev)) {
+ dig->afmt->pin = dce6_audio_get_pin(rdev);
+ dce6_audio_enable(rdev, dig->afmt->pin, false);
+ } else {
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ r600_audio_enable(rdev, dig->afmt->pin, false);
+ }
+
evergreen_audio_set_dto(encoder, mode->clock);
WREG32(HDMI_VBI_PACKET_CONTROL + offset,
@@ -409,12 +418,16 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
WREG32(AFMT_RAMP_CONTROL1 + offset, 0x007FFFFF);
WREG32(AFMT_RAMP_CONTROL2 + offset, 0x00000001);
WREG32(AFMT_RAMP_CONTROL3 + offset, 0x00000001);
+
+ /* enable audio after to setting up hw */
+ if (ASIC_IS_DCE6(rdev))
+ dce6_audio_enable(rdev, dig->afmt->pin, true);
+ else
+ r600_audio_enable(rdev, dig->afmt->pin, true);
}
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -427,15 +440,6 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!enable && !dig->afmt->enabled)
return;
- if (enable) {
- if (ASIC_IS_DCE6(rdev))
- dig->afmt->pin = dce6_audio_get_pin(rdev);
- else
- dig->afmt->pin = r600_audio_get_pin(rdev);
- } else {
- dig->afmt->pin = NULL;
- }
-
dig->afmt->enabled = enable;
DRM_DEBUG("%sabling HDMI interface @ 0x%04X for encoder 0x%x\n",
diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h
index 76ada8cfe902..3a03ba37d043 100644
--- a/drivers/gpu/drm/radeon/evergreen_smc.h
+++ b/drivers/gpu/drm/radeon/evergreen_smc.h
@@ -57,7 +57,7 @@ typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters;
#define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100
-#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0
+#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x8
#define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC
#define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index b6e01d5d2cce..351db361239d 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -1223,7 +1223,7 @@ int kv_dpm_enable(struct radeon_device *rdev)
int kv_dpm_late_enable(struct radeon_device *rdev)
{
- int ret;
+ int ret = 0;
if (rdev->irq.installed &&
r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index ea932ac66fc6..bf6300cfd62d 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2105,7 +2105,8 @@ int cayman_resume(struct radeon_device *rdev)
/* init golden registers */
ni_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = cayman_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index c351226ecb31..ca814276b075 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -2588,7 +2588,7 @@ static int ni_populate_sq_ramping_values(struct radeon_device *rdev,
if (NISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
enable_sq_ramping = false;
- if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ if (NISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
enable_sq_ramping = false;
for (i = 0; i < state->performance_level_count; i++) {
@@ -3945,7 +3945,6 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
struct rv7xx_power_info *pi = rv770_get_pi(rdev);
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct ni_ps *ps = ni_get_ps(rps);
- u16 vddc;
struct rv7xx_pl *pl = &ps->performance_levels[index];
ps->performance_level_count = index + 1;
@@ -3961,8 +3960,8 @@ static void ni_parse_pplib_clock_info(struct radeon_device *rdev,
/* patch up vddc if necessary */
if (pl->vddc == 0xff01) {
- if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
- pl->vddc = vddc;
+ if (pi->max_vddc)
+ pl->vddc = pi->max_vddc;
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
@@ -4322,7 +4321,8 @@ void ni_dpm_print_power_state(struct radeon_device *rdev,
void ni_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
struct ni_ps *ps = ni_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index ef024ce3f7cc..3cc78bb66042 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3942,8 +3942,6 @@ int r100_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = r100_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 7c63ef840e86..0b658b34b33a 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -1430,8 +1430,6 @@ int r300_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = r300_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index 3768aab2710b..802b19220a21 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -325,8 +325,6 @@ int r420_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = r420_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index e209eb75024f..98d6053c36c6 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -240,8 +240,6 @@ int r520_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = r520_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 56140b4e5bb2..647ef4079217 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -2968,7 +2968,8 @@ int r600_resume(struct radeon_device *rdev)
/* post card */
atom_asic_init(rdev->mode_info.atom_context);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = r600_startup(rdev);
@@ -3991,6 +3992,10 @@ restart_ih:
break;
}
break;
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+ break;
case 176: /* CP_INT in ring buffer */
case 177: /* CP_INT in IB1 */
case 178: /* CP_INT in IB2 */
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 47fc2b886979..bffac10c4296 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -142,12 +142,15 @@ void r600_audio_update_hdmi(struct work_struct *work)
}
/* enable the audio stream */
-static void r600_audio_enable(struct radeon_device *rdev,
- struct r600_audio_pin *pin,
- bool enable)
+void r600_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable)
{
u32 value = 0;
+ if (!pin)
+ return;
+
if (ASIC_IS_DCE4(rdev)) {
if (enable) {
value |= 0x81000000; /* Required to enable audio */
@@ -158,7 +161,6 @@ static void r600_audio_enable(struct radeon_device *rdev,
WREG32_P(R600_AUDIO_ENABLE,
enable ? 0x81000000 : 0x0, ~0x81000000);
}
- DRM_INFO("%s audio %d support\n", enable ? "Enabling" : "Disabling", pin->id);
}
/*
@@ -178,8 +180,8 @@ int r600_audio_init(struct radeon_device *rdev)
rdev->audio.pin[0].status_bits = 0;
rdev->audio.pin[0].category_code = 0;
rdev->audio.pin[0].id = 0;
-
- r600_audio_enable(rdev, &rdev->audio.pin[0], true);
+ /* disable audio. it will be set up later */
+ r600_audio_enable(rdev, &rdev->audio.pin[0], false);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 7b399dc5fd54..2812c7d1ae6f 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -1007,8 +1007,22 @@ static int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
case R_008C64_SQ_VSTMP_RING_SIZE:
case R_0288C8_SQ_GS_VERT_ITEMSIZE:
/* get value to populate the IB don't remove */
- tmp =radeon_get_ib_value(p, idx);
- ib[idx] = 0;
+ /*tmp =radeon_get_ib_value(p, idx);
+ ib[idx] = 0;*/
+ break;
+ case SQ_ESGS_RING_BASE:
+ case SQ_GSVS_RING_BASE:
+ case SQ_ESTMP_RING_BASE:
+ case SQ_GSTMP_RING_BASE:
+ case SQ_PSTMP_RING_BASE:
+ case SQ_VSTMP_RING_BASE:
+ r = radeon_cs_packet_next_reloc(p, &reloc, 0);
+ if (r) {
+ dev_warn(p->dev, "bad SET_CONTEXT_REG "
+ "0x%04X\n", reg);
+ return -EINVAL;
+ }
+ ib[idx] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
break;
case SQ_CONFIG:
track->sq_config = radeon_get_ib_value(p, idx);
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 3016fc14f502..85a2bb28aed2 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -329,9 +329,6 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
u8 *sadb;
int sad_count;
- /* XXX: setting this register causes hangs on some asics */
- return;
-
list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
radeon_connector = to_radeon_connector(connector);
@@ -460,6 +457,10 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
return;
offset = dig->afmt->offset;
+ /* disable audio prior to setting up hw */
+ dig->afmt->pin = r600_audio_get_pin(rdev);
+ r600_audio_enable(rdev, dig->afmt->pin, false);
+
r600_audio_set_dto(encoder, mode->clock);
WREG32(HDMI0_VBI_PACKET_CONTROL + offset,
@@ -531,6 +532,9 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
WREG32(HDMI0_RAMP_CONTROL3 + offset, 0x00000001);
r600_hdmi_audio_workaround(encoder);
+
+ /* enable audio after to setting up hw */
+ r600_audio_enable(rdev, dig->afmt->pin, true);
}
/*
@@ -651,11 +655,6 @@ void r600_hdmi_enable(struct drm_encoder *encoder, bool enable)
if (!enable && !dig->afmt->enabled)
return;
- if (enable)
- dig->afmt->pin = r600_audio_get_pin(rdev);
- else
- dig->afmt->pin = NULL;
-
/* Older chipsets require setting HDMI and routing manually */
if (!ASIC_IS_DCE3(rdev)) {
if (enable)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 4a8ac1cd6b4c..e887d027b6d0 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -135,6 +135,9 @@ extern int radeon_hard_reset;
/* R600+ */
#define R600_RING_TYPE_UVD_INDEX 5
+/* number of hw syncs before falling back on blocking */
+#define RADEON_NUM_SYNCS 4
+
/* hardcode those limit for now */
#define RADEON_VA_IB_OFFSET (1 << 20)
#define RADEON_VA_RESERVED_SIZE (8 << 20)
@@ -554,7 +557,6 @@ int radeon_mode_dumb_mmap(struct drm_file *filp,
/*
* Semaphores.
*/
-/* everything here is constant */
struct radeon_semaphore {
struct radeon_sa_bo *sa_bo;
signed waiters;
@@ -2745,6 +2747,12 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
void r600_audio_update_hdmi(struct work_struct *work);
struct r600_audio_pin *r600_audio_get_pin(struct radeon_device *rdev);
struct r600_audio_pin *dce6_audio_get_pin(struct radeon_device *rdev);
+void r600_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable);
+void dce6_audio_enable(struct radeon_device *rdev,
+ struct r600_audio_pin *pin,
+ bool enable);
/*
* R600 vram scratch functions
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index f74db43346fd..dda02bfc10a4 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1555,7 +1555,7 @@ static struct radeon_asic btc_asic = {
.get_sclk = &btc_dpm_get_sclk,
.get_mclk = &btc_dpm_get_mclk,
.print_power_state = &rv770_dpm_print_power_state,
- .debugfs_print_current_performance_level = &rv770_dpm_debugfs_print_current_performance_level,
+ .debugfs_print_current_performance_level = &btc_dpm_debugfs_print_current_performance_level,
.force_performance_level = &rv770_dpm_force_performance_level,
.vblank_too_short = &btc_dpm_vblank_too_short,
},
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index b3bc433eed4c..ae637cfda783 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -551,6 +551,8 @@ void btc_dpm_fini(struct radeon_device *rdev);
u32 btc_dpm_get_sclk(struct radeon_device *rdev, bool low);
u32 btc_dpm_get_mclk(struct radeon_device *rdev, bool low);
bool btc_dpm_vblank_too_short(struct radeon_device *rdev);
+void btc_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
+ struct seq_file *m);
int sumo_dpm_init(struct radeon_device *rdev);
int sumo_dpm_enable(struct radeon_device *rdev);
int sumo_dpm_late_enable(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 485848f889f5..fa9a9c02751e 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -219,7 +219,8 @@ static int radeon_atpx_verify_interface(struct radeon_atpx *atpx)
memcpy(&output, info->buffer.pointer, size);
/* TODO: check version? */
- printk("ATPX version %u\n", output.version);
+ printk("ATPX version %u, functions 0x%08x\n",
+ output.version, output.function_bits);
radeon_atpx_parse_functions(&atpx->functions, output.function_bits);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index b012cbbc3ed5..044bc98fb459 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1521,13 +1521,16 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon)
if (r)
DRM_ERROR("ib ring test failed (%d).\n", r);
- if (rdev->pm.dpm_enabled) {
+ if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) {
/* do dpm late init */
r = radeon_pm_late_init(rdev);
if (r) {
rdev->pm.dpm_enabled = false;
DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n");
}
+ } else {
+ /* resume old pm late */
+ radeon_pm_resume(rdev);
}
radeon_restore_bios_scratch_regs(rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index d680608f6f5b..fbd8b930f2be 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -571,6 +571,8 @@ static void radeon_crtc_init(struct drm_device *dev, int index)
radeon_crtc->max_cursor_width = CURSOR_WIDTH;
radeon_crtc->max_cursor_height = CURSOR_HEIGHT;
}
+ dev->mode_config.cursor_width = radeon_crtc->max_cursor_width;
+ dev->mode_config.cursor_height = radeon_crtc->max_cursor_height;
#if 0
radeon_crtc->mode_set.crtc = &radeon_crtc->base;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index ec8c388eec17..84a1bbb75f91 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -78,9 +78,10 @@
* 2.34.0 - Add CIK tiling mode array query
* 2.35.0 - Add CIK macrotile mode array query
* 2.36.0 - Fix CIK DCE tiling setup
+ * 2.37.0 - allow GS ring setup on r6xx/r7xx
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 36
+#define KMS_DRIVER_MINOR 37
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 114d1672d616..66ed3ea71440 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -33,6 +33,13 @@
#include <linux/vga_switcheroo.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
+
+#if defined(CONFIG_VGA_SWITCHEROO)
+bool radeon_is_px(void);
+#else
+static inline bool radeon_is_px(void) { return false; }
+#endif
+
/**
* radeon_driver_unload_kms - Main unload function for KMS.
*
@@ -130,7 +137,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
"Error during ACPI methods call\n");
}
- if (radeon_runtime_pm != 0) {
+ if ((radeon_runtime_pm == 1) ||
+ ((radeon_runtime_pm == -1) && radeon_is_px())) {
pm_runtime_use_autosuspend(dev->dev);
pm_runtime_set_autosuspend_delay(dev->dev, 5000);
pm_runtime_set_active(dev->dev);
@@ -537,6 +545,10 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
radeon_vm_init(rdev, &fpriv->vm);
+ r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
+ if (r)
+ return r;
+
/* map the ib pool buffer read only into
* virtual address space */
bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
@@ -544,6 +556,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
RADEON_VM_PAGE_READABLE |
RADEON_VM_PAGE_SNOOPED);
+
+ radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
if (r) {
radeon_vm_fini(rdev, &fpriv->vm);
kfree(fpriv);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 1b783f0e6d3a..15e44a7281ab 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -139,7 +139,7 @@ int radeon_ib_schedule(struct radeon_device *rdev, struct radeon_ib *ib,
}
/* 64 dwords should be enough for fence too */
- r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_RINGS * 8);
+ r = radeon_ring_lock(rdev, ring, 64 + RADEON_NUM_SYNCS * 8);
if (r) {
dev_err(rdev->dev, "scheduling IB failed (%d).\n", r);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_semaphore.c b/drivers/gpu/drm/radeon/radeon_semaphore.c
index 2b42aa1914f2..9006b32d5eed 100644
--- a/drivers/gpu/drm/radeon/radeon_semaphore.c
+++ b/drivers/gpu/drm/radeon/radeon_semaphore.c
@@ -34,14 +34,15 @@
int radeon_semaphore_create(struct radeon_device *rdev,
struct radeon_semaphore **semaphore)
{
+ uint32_t *cpu_addr;
int i, r;
*semaphore = kmalloc(sizeof(struct radeon_semaphore), GFP_KERNEL);
if (*semaphore == NULL) {
return -ENOMEM;
}
- r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo,
- &(*semaphore)->sa_bo, 8, 8, true);
+ r = radeon_sa_bo_new(rdev, &rdev->ring_tmp_bo, &(*semaphore)->sa_bo,
+ 8 * RADEON_NUM_SYNCS, 8, true);
if (r) {
kfree(*semaphore);
*semaphore = NULL;
@@ -49,7 +50,10 @@ int radeon_semaphore_create(struct radeon_device *rdev,
}
(*semaphore)->waiters = 0;
(*semaphore)->gpu_addr = radeon_sa_bo_gpu_addr((*semaphore)->sa_bo);
- *((uint64_t*)radeon_sa_bo_cpu_addr((*semaphore)->sa_bo)) = 0;
+
+ cpu_addr = radeon_sa_bo_cpu_addr((*semaphore)->sa_bo);
+ for (i = 0; i < RADEON_NUM_SYNCS; ++i)
+ cpu_addr[i] = 0;
for (i = 0; i < RADEON_NUM_RINGS; ++i)
(*semaphore)->sync_to[i] = NULL;
@@ -125,6 +129,7 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
int ring)
{
+ unsigned count = 0;
int i, r;
for (i = 0; i < RADEON_NUM_RINGS; ++i) {
@@ -140,6 +145,12 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
return -EINVAL;
}
+ if (++count > RADEON_NUM_SYNCS) {
+ /* not enough room, wait manually */
+ radeon_fence_wait_locked(fence);
+ continue;
+ }
+
/* allocate enough space for sync command */
r = radeon_ring_alloc(rdev, &rdev->ring[i], 16);
if (r) {
@@ -164,6 +175,8 @@ int radeon_semaphore_sync_rings(struct radeon_device *rdev,
radeon_ring_commit(rdev, &rdev->ring[i]);
radeon_fence_note_sync(fence, ring);
+
+ semaphore->gpu_addr += 8;
}
return 0;
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 77f5b0c3edb8..040a2a10ea17 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -714,6 +714,9 @@ int radeon_ttm_init(struct radeon_device *rdev)
DRM_ERROR("Failed initializing VRAM heap.\n");
return r;
}
+ /* Change the size here instead of the init above so only lpfn is affected */
+ radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size);
+
r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true,
RADEON_GEM_DOMAIN_VRAM,
NULL, &rdev->stollen_vga_memory);
@@ -935,7 +938,7 @@ static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf,
while (size) {
loff_t p = *pos / PAGE_SIZE;
unsigned off = *pos & ~PAGE_MASK;
- ssize_t cur_size = min(size, PAGE_SIZE - off);
+ size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
struct page *page;
void *ptr;
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 6781fee1eaad..3e6804b2b2ef 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -171,6 +171,8 @@ void radeon_uvd_fini(struct radeon_device *rdev)
radeon_bo_unref(&rdev->uvd.vcpu_bo);
+ radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX]);
+
release_firmware(rdev->uvd_fw);
}
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 20bfbda7b3f1..ec0c6829c1dc 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -18,6 +18,7 @@ r600 0x9400
0x00028A3C VGT_GROUP_VECT_1_FMT_CNTL
0x00028A40 VGT_GS_MODE
0x00028A6C VGT_GS_OUT_PRIM_TYPE
+0x00028B38 VGT_GS_MAX_VERT_OUT
0x000088C8 VGT_GS_PER_ES
0x000088E8 VGT_GS_PER_VS
0x000088D4 VGT_GS_VERTEX_REUSE
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index b5c2369cda2f..130d5cc50d43 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -474,8 +474,6 @@ int rs400_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = rs400_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index fdcde7693032..72d3616de08e 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -1048,8 +1048,6 @@ int rs600_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = rs600_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 35950738bd5e..3462b64369bf 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -756,8 +756,6 @@ int rs690_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = rs690_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 98e8138ff779..237dd29d9f1c 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -586,8 +586,6 @@ int rv515_resume(struct radeon_device *rdev)
/* Initialize surface registers */
radeon_surface_init(rdev);
- radeon_pm_resume(rdev);
-
rdev->accel_working = true;
r = rv515_startup(rdev);
if (r) {
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 6c772e58c784..fef310773aad 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -1811,7 +1811,8 @@ int rv770_resume(struct radeon_device *rdev)
/* init golden registers */
rv770_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = rv770_startup(rdev);
@@ -1955,9 +1956,9 @@ void rv770_fini(struct radeon_device *rdev)
radeon_wb_fini(rdev);
radeon_ib_pool_fini(rdev);
radeon_irq_kms_fini(rdev);
- rv770_pcie_gart_fini(rdev);
uvd_v1_0_fini(rdev);
radeon_uvd_fini(rdev);
+ rv770_pcie_gart_fini(rdev);
r600_vram_scratch_fini(rdev);
radeon_gem_fini(rdev);
radeon_fence_driver_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 80c595aba359..b5f63f5e22a3 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2174,7 +2174,6 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
struct rv7xx_ps *ps = rv770_get_ps(rps);
u32 sclk, mclk;
- u16 vddc;
struct rv7xx_pl *pl;
switch (index) {
@@ -2214,8 +2213,8 @@ static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
/* patch up vddc if necessary */
if (pl->vddc == 0xff01) {
- if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc) == 0)
- pl->vddc = vddc;
+ if (pi->max_vddc)
+ pl->vddc = pi->max_vddc;
}
if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
@@ -2527,14 +2526,7 @@ u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low)
bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
{
u32 vblank_time = r600_dpm_get_vblank_time(rdev);
- u32 switch_limit = 300;
-
- /* quirks */
- /* ASUS K70AF */
- if ((rdev->pdev->device == 0x9553) &&
- (rdev->pdev->subsystem_vendor == 0x1043) &&
- (rdev->pdev->subsystem_device == 0x1c42))
- switch_limit = 200;
+ u32 switch_limit = 200; /* 300 */
/* RV770 */
/* mclk switching doesn't seem to work reliably on desktop RV770s */
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 09ec4f6c53bb..9a124d0608b3 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6338,6 +6338,10 @@ restart_ih:
break;
}
break;
+ case 124: /* UVD */
+ DRM_DEBUG("IH: UVD int: 0x%08x\n", src_data);
+ radeon_fence_process(rdev, R600_RING_TYPE_UVD_INDEX);
+ break;
case 146:
case 147:
addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
@@ -6614,7 +6618,8 @@ int si_resume(struct radeon_device *rdev)
/* init golden registers */
si_init_golden_registers(rdev);
- radeon_pm_resume(rdev);
+ if (rdev->pm.pm_method == PM_METHOD_DPM)
+ radeon_pm_resume(rdev);
rdev->accel_working = true;
r = si_startup(rdev);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 0471501338fb..0a2f5b4bca43 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2395,7 +2395,7 @@ static int si_populate_sq_ramping_values(struct radeon_device *rdev,
if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
enable_sq_ramping = false;
- if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO <= (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
+ if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
enable_sq_ramping = false;
for (i = 0; i < state->performance_level_count; i++) {
@@ -6472,7 +6472,8 @@ void si_dpm_fini(struct radeon_device *rdev)
void si_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
+ struct radeon_ps *rps = &eg_pi->current_rps;
struct ni_ps *ps = ni_get_ps(rps);
struct rv7xx_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index f121efe12dc5..8b47b3cd0357 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -1807,7 +1807,7 @@ void sumo_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev
struct seq_file *m)
{
struct sumo_power_info *pi = sumo_get_pi(rdev);
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct radeon_ps *rps = &pi->current_rps;
struct sumo_ps *ps = sumo_get_ps(rps);
struct sumo_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 2d447192d6f7..2da0e17eb960 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1926,7 +1926,8 @@ void trinity_dpm_print_power_state(struct radeon_device *rdev,
void trinity_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
struct seq_file *m)
{
- struct radeon_ps *rps = rdev->pm.dpm.current_ps;
+ struct trinity_power_info *pi = trinity_get_pi(rdev);
+ struct radeon_ps *rps = &pi->current_rps;
struct trinity_ps *ps = trinity_get_ps(rps);
struct trinity_pl *pl;
u32 current_index =
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index 824550db3fed..d1771004cb52 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -57,7 +57,6 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
radeon_ring_write(ring, 0);
radeon_ring_write(ring, PACKET0(UVD_GPCOM_VCPU_CMD, 0));
radeon_ring_write(ring, 2);
- return;
}
/**
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 88a529008ce0..c71594754f46 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -104,7 +104,7 @@ static void tegra_drm_context_free(struct tegra_drm_context *context)
static void tegra_drm_lastclose(struct drm_device *drm)
{
-#ifdef CONFIG_TEGRA_DRM_FBDEV
+#ifdef CONFIG_DRM_TEGRA_FBDEV
struct tegra_drm *tegra = drm->dev_private;
tegra_fbdev_restore_mode(tegra->fbdev);
diff --git a/drivers/gpu/drm/tegra/rgb.c b/drivers/gpu/drm/tegra/rgb.c
index 338f7f6561d7..0266fb40479e 100644
--- a/drivers/gpu/drm/tegra/rgb.c
+++ b/drivers/gpu/drm/tegra/rgb.c
@@ -15,6 +15,7 @@
struct tegra_rgb {
struct tegra_output output;
struct tegra_dc *dc;
+ bool enabled;
struct clk *clk_parent;
struct clk *clk;
@@ -89,6 +90,9 @@ static int tegra_output_rgb_enable(struct tegra_output *output)
struct tegra_rgb *rgb = to_rgb(output);
unsigned long value;
+ if (rgb->enabled)
+ return 0;
+
tegra_dc_write_regs(rgb->dc, rgb_enable, ARRAY_SIZE(rgb_enable));
value = DE_SELECT_ACTIVE | DE_CONTROL_NORMAL;
@@ -122,6 +126,8 @@ static int tegra_output_rgb_enable(struct tegra_output *output)
tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ << 8, DC_CMD_STATE_CONTROL);
tegra_dc_writel(rgb->dc, GENERAL_ACT_REQ, DC_CMD_STATE_CONTROL);
+ rgb->enabled = true;
+
return 0;
}
@@ -130,6 +136,9 @@ static int tegra_output_rgb_disable(struct tegra_output *output)
struct tegra_rgb *rgb = to_rgb(output);
unsigned long value;
+ if (!rgb->enabled)
+ return 0;
+
value = tegra_dc_readl(rgb->dc, DC_CMD_DISPLAY_POWER_CONTROL);
value &= ~(PW0_ENABLE | PW1_ENABLE | PW2_ENABLE | PW3_ENABLE |
PW4_ENABLE | PM0_ENABLE | PM1_ENABLE);
@@ -144,6 +153,8 @@ static int tegra_output_rgb_disable(struct tegra_output *output)
tegra_dc_write_regs(rgb->dc, rgb_disable, ARRAY_SIZE(rgb_disable));
+ rgb->enabled = false;
+
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
index 3302f99e7497..764be36397fd 100644
--- a/drivers/gpu/drm/ttm/ttm_agp_backend.c
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -126,6 +126,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_bo_device *bdev,
agp_be->ttm.func = &ttm_agp_func;
if (ttm_tt_init(&agp_be->ttm, bdev, size, page_flags, dummy_read_page)) {
+ kfree(agp_be);
return NULL;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index a06651309388..214b7992a3aa 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -351,9 +351,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
moved:
if (bo->evicted) {
- ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
- if (ret)
- pr_err("Can not flush read caches\n");
+ if (bdev->driver->invalidate_caches) {
+ ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
+ if (ret)
+ pr_err("Can not flush read caches\n");
+ }
bo->evicted = false;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 801231c9ae48..0ce48e5a9cb4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -339,11 +339,13 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma,
vma->vm_private_data = bo;
/*
- * PFNMAP is faster than MIXEDMAP due to reduced page
- * administration. So use MIXEDMAP only if private VMA, where
- * we need to support COW.
+ * We'd like to use VM_PFNMAP on shared mappings, where
+ * (vma->vm_flags & VM_SHARED) != 0, for performance reasons,
+ * but for some reason VM_PFNMAP + x86 PAT + write-combine is very
+ * bad for performance. Until that has been sorted out, use
+ * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719
*/
- vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
return 0;
out_unref:
@@ -359,7 +361,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo)
vma->vm_ops = &ttm_bo_vm_ops;
vma->vm_private_data = ttm_bo_reference(bo);
- vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP;
+ vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags |= VM_IO | VM_DONTEXPAND;
return 0;
}
diff --git a/drivers/gpu/drm/ttm/ttm_object.c b/drivers/gpu/drm/ttm/ttm_object.c
index 37079859afc8..53b51c4e671a 100644
--- a/drivers/gpu/drm/ttm/ttm_object.c
+++ b/drivers/gpu/drm/ttm/ttm_object.c
@@ -292,7 +292,7 @@ int ttm_ref_object_add(struct ttm_object_file *tfile,
if (ret == 0) {
ref = drm_hash_entry(hash, struct ttm_ref_object, hash);
- if (!kref_get_unless_zero(&ref->kref)) {
+ if (kref_get_unless_zero(&ref->kref)) {
rcu_read_unlock();
break;
}
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 9af99084b344..75f319090043 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -380,6 +380,9 @@ static void ttm_tt_clear_mapping(struct ttm_tt *ttm)
pgoff_t i;
struct page **page = ttm->pages;
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG)
+ return;
+
for (i = 0; i < ttm->num_pages; ++i) {
(*page)->mapping = NULL;
(*page++)->index = 0;
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_reg.h b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
index d95335cb90bd..f58dc7dd15c5 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_reg.h
@@ -261,12 +261,7 @@ typedef enum SVGA3dSurfaceFormat {
/* Planar video formats. */
SVGA3D_YV12 = 121,
- /* Shader constant formats. */
- SVGA3D_SURFACE_SHADERCONST_FLOAT = 122,
- SVGA3D_SURFACE_SHADERCONST_INT = 123,
- SVGA3D_SURFACE_SHADERCONST_BOOL = 124,
-
- SVGA3D_FORMAT_MAX = 125,
+ SVGA3D_FORMAT_MAX = 122,
} SVGA3dSurfaceFormat;
typedef uint32 SVGA3dColor; /* a, r, g, b */
@@ -1223,9 +1218,19 @@ typedef enum {
#define SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL 1129
#define SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE 1130
-
+#define SVGA_3D_CMD_GB_SCREEN_DMA 1131
+#define SVGA_3D_CMD_BIND_GB_SURFACE_WITH_PITCH 1132
+#define SVGA_3D_CMD_GB_MOB_FENCE 1133
+#define SVGA_3D_CMD_DEFINE_GB_SURFACE_V2 1134
#define SVGA_3D_CMD_DEFINE_GB_MOB64 1135
#define SVGA_3D_CMD_REDEFINE_GB_MOB64 1136
+#define SVGA_3D_CMD_NOP_ERROR 1137
+
+#define SVGA_3D_CMD_RESERVED1 1138
+#define SVGA_3D_CMD_RESERVED2 1139
+#define SVGA_3D_CMD_RESERVED3 1140
+#define SVGA_3D_CMD_RESERVED4 1141
+#define SVGA_3D_CMD_RESERVED5 1142
#define SVGA_3D_CMD_MAX 1142
#define SVGA_3D_CMD_FUTURE_MAX 3000
@@ -1973,8 +1978,7 @@ struct {
uint32 sizeInBytes;
uint32 validSizeInBytes;
SVGAMobFormat ptDepth;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdSetOTableBase; /* SVGA_3D_CMD_SET_OTABLE_BASE */
typedef
@@ -1984,15 +1988,13 @@ struct {
uint32 sizeInBytes;
uint32 validSizeInBytes;
SVGAMobFormat ptDepth;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdSetOTableBase64; /* SVGA_3D_CMD_SET_OTABLE_BASE64 */
typedef
struct {
SVGAOTableType type;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdReadbackOTable; /* SVGA_3D_CMD_READBACK_OTABLE */
/*
@@ -2005,8 +2007,7 @@ struct SVGA3dCmdDefineGBMob {
SVGAMobFormat ptDepth;
PPN base;
uint32 sizeInBytes;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
@@ -2017,8 +2018,7 @@ SVGA3dCmdDefineGBMob; /* SVGA_3D_CMD_DEFINE_GB_MOB */
typedef
struct SVGA3dCmdDestroyGBMob {
SVGAMobId mobid;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdDestroyGBMob; /* SVGA_3D_CMD_DESTROY_GB_MOB */
/*
@@ -2031,8 +2031,7 @@ struct SVGA3dCmdRedefineGBMob {
SVGAMobFormat ptDepth;
PPN base;
uint32 sizeInBytes;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdRedefineGBMob; /* SVGA_3D_CMD_REDEFINE_GB_MOB */
/*
@@ -2045,8 +2044,7 @@ struct SVGA3dCmdDefineGBMob64 {
SVGAMobFormat ptDepth;
PPN64 base;
uint32 sizeInBytes;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdDefineGBMob64; /* SVGA_3D_CMD_DEFINE_GB_MOB64 */
/*
@@ -2059,8 +2057,7 @@ struct SVGA3dCmdRedefineGBMob64 {
SVGAMobFormat ptDepth;
PPN64 base;
uint32 sizeInBytes;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
/*
@@ -2070,8 +2067,7 @@ SVGA3dCmdRedefineGBMob64; /* SVGA_3D_CMD_REDEFINE_GB_MOB64 */
typedef
struct SVGA3dCmdUpdateGBMobMapping {
SVGAMobId mobid;
-}
-__attribute__((__packed__))
+} __packed
SVGA3dCmdUpdateGBMobMapping; /* SVGA_3D_CMD_UPDATE_GB_MOB_MAPPING */
/*
@@ -2087,7 +2083,8 @@ struct SVGA3dCmdDefineGBSurface {
uint32 multisampleCount;
SVGA3dTextureFilter autogenFilter;
SVGA3dSize size;
-} SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
+} __packed
+SVGA3dCmdDefineGBSurface; /* SVGA_3D_CMD_DEFINE_GB_SURFACE */
/*
* Destroy a guest-backed surface.
@@ -2096,7 +2093,8 @@ struct SVGA3dCmdDefineGBSurface {
typedef
struct SVGA3dCmdDestroyGBSurface {
uint32 sid;
-} SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
+} __packed
+SVGA3dCmdDestroyGBSurface; /* SVGA_3D_CMD_DESTROY_GB_SURFACE */
/*
* Bind a guest-backed surface to an object.
@@ -2106,7 +2104,8 @@ typedef
struct SVGA3dCmdBindGBSurface {
uint32 sid;
SVGAMobId mobid;
-} SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
+} __packed
+SVGA3dCmdBindGBSurface; /* SVGA_3D_CMD_BIND_GB_SURFACE */
/*
* Conditionally bind a mob to a guest backed surface if testMobid
@@ -2123,7 +2122,7 @@ struct{
SVGAMobId testMobid;
SVGAMobId mobid;
uint32 flags;
-}
+} __packed
SVGA3dCmdCondBindGBSurface; /* SVGA_3D_CMD_COND_BIND_GB_SURFACE */
/*
@@ -2135,7 +2134,8 @@ typedef
struct SVGA3dCmdUpdateGBImage {
SVGA3dSurfaceImageId image;
SVGA3dBox box;
-} SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
+} __packed
+SVGA3dCmdUpdateGBImage; /* SVGA_3D_CMD_UPDATE_GB_IMAGE */
/*
* Update an entire guest-backed surface.
@@ -2145,7 +2145,8 @@ struct SVGA3dCmdUpdateGBImage {
typedef
struct SVGA3dCmdUpdateGBSurface {
uint32 sid;
-} SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
+} __packed
+SVGA3dCmdUpdateGBSurface; /* SVGA_3D_CMD_UPDATE_GB_SURFACE */
/*
* Readback an image in a guest-backed surface.
@@ -2155,7 +2156,8 @@ struct SVGA3dCmdUpdateGBSurface {
typedef
struct SVGA3dCmdReadbackGBImage {
SVGA3dSurfaceImageId image;
-} SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
+} __packed
+SVGA3dCmdReadbackGBImage; /* SVGA_3D_CMD_READBACK_GB_IMAGE*/
/*
* Readback an entire guest-backed surface.
@@ -2165,7 +2167,8 @@ struct SVGA3dCmdReadbackGBImage {
typedef
struct SVGA3dCmdReadbackGBSurface {
uint32 sid;
-} SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
+} __packed
+SVGA3dCmdReadbackGBSurface; /* SVGA_3D_CMD_READBACK_GB_SURFACE */
/*
* Readback a sub rect of an image in a guest-backed surface. After
@@ -2179,7 +2182,7 @@ struct SVGA3dCmdReadbackGBImagePartial {
SVGA3dSurfaceImageId image;
SVGA3dBox box;
uint32 invertBox;
-}
+} __packed
SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
/*
@@ -2190,7 +2193,8 @@ SVGA3dCmdReadbackGBImagePartial; /* SVGA_3D_CMD_READBACK_GB_IMAGE_PARTIAL */
typedef
struct SVGA3dCmdInvalidateGBImage {
SVGA3dSurfaceImageId image;
-} SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
+} __packed
+SVGA3dCmdInvalidateGBImage; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE */
/*
* Invalidate an entire guest-backed surface.
@@ -2200,7 +2204,8 @@ struct SVGA3dCmdInvalidateGBImage {
typedef
struct SVGA3dCmdInvalidateGBSurface {
uint32 sid;
-} SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
+} __packed
+SVGA3dCmdInvalidateGBSurface; /* SVGA_3D_CMD_INVALIDATE_GB_SURFACE */
/*
* Invalidate a sub rect of an image in a guest-backed surface. After
@@ -2214,7 +2219,7 @@ struct SVGA3dCmdInvalidateGBImagePartial {
SVGA3dSurfaceImageId image;
SVGA3dBox box;
uint32 invertBox;
-}
+} __packed
SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
/*
@@ -2224,7 +2229,8 @@ SVGA3dCmdInvalidateGBImagePartial; /* SVGA_3D_CMD_INVALIDATE_GB_IMAGE_PARTIAL */
typedef
struct SVGA3dCmdDefineGBContext {
uint32 cid;
-} SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
+} __packed
+SVGA3dCmdDefineGBContext; /* SVGA_3D_CMD_DEFINE_GB_CONTEXT */
/*
* Destroy a guest-backed context.
@@ -2233,7 +2239,8 @@ struct SVGA3dCmdDefineGBContext {
typedef
struct SVGA3dCmdDestroyGBContext {
uint32 cid;
-} SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
+} __packed
+SVGA3dCmdDestroyGBContext; /* SVGA_3D_CMD_DESTROY_GB_CONTEXT */
/*
* Bind a guest-backed context.
@@ -2252,7 +2259,8 @@ struct SVGA3dCmdBindGBContext {
uint32 cid;
SVGAMobId mobid;
uint32 validContents;
-} SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
+} __packed
+SVGA3dCmdBindGBContext; /* SVGA_3D_CMD_BIND_GB_CONTEXT */
/*
* Readback a guest-backed context.
@@ -2262,7 +2270,8 @@ struct SVGA3dCmdBindGBContext {
typedef
struct SVGA3dCmdReadbackGBContext {
uint32 cid;
-} SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
+} __packed
+SVGA3dCmdReadbackGBContext; /* SVGA_3D_CMD_READBACK_GB_CONTEXT */
/*
* Invalidate a guest-backed context.
@@ -2270,7 +2279,8 @@ struct SVGA3dCmdReadbackGBContext {
typedef
struct SVGA3dCmdInvalidateGBContext {
uint32 cid;
-} SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
+} __packed
+SVGA3dCmdInvalidateGBContext; /* SVGA_3D_CMD_INVALIDATE_GB_CONTEXT */
/*
* Define a guest-backed shader.
@@ -2281,7 +2291,8 @@ struct SVGA3dCmdDefineGBShader {
uint32 shid;
SVGA3dShaderType type;
uint32 sizeInBytes;
-} SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
+} __packed
+SVGA3dCmdDefineGBShader; /* SVGA_3D_CMD_DEFINE_GB_SHADER */
/*
* Bind a guest-backed shader.
@@ -2291,7 +2302,8 @@ typedef struct SVGA3dCmdBindGBShader {
uint32 shid;
SVGAMobId mobid;
uint32 offsetInBytes;
-} SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
+} __packed
+SVGA3dCmdBindGBShader; /* SVGA_3D_CMD_BIND_GB_SHADER */
/*
* Destroy a guest-backed shader.
@@ -2299,7 +2311,8 @@ typedef struct SVGA3dCmdBindGBShader {
typedef struct SVGA3dCmdDestroyGBShader {
uint32 shid;
-} SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
+} __packed
+SVGA3dCmdDestroyGBShader; /* SVGA_3D_CMD_DESTROY_GB_SHADER */
typedef
struct {
@@ -2314,14 +2327,16 @@ struct {
* Note that FLOAT and INT constants are 4-dwords in length, while
* BOOL constants are 1-dword in length.
*/
-} SVGA3dCmdSetGBShaderConstInline;
+} __packed
+SVGA3dCmdSetGBShaderConstInline;
/* SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE */
typedef
struct {
uint32 cid;
SVGA3dQueryType type;
-} SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
+} __packed
+SVGA3dCmdBeginGBQuery; /* SVGA_3D_CMD_BEGIN_GB_QUERY */
typedef
struct {
@@ -2329,7 +2344,8 @@ struct {
SVGA3dQueryType type;
SVGAMobId mobid;
uint32 offset;
-} SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
+} __packed
+SVGA3dCmdEndGBQuery; /* SVGA_3D_CMD_END_GB_QUERY */
/*
@@ -2346,21 +2362,22 @@ struct {
SVGA3dQueryType type;
SVGAMobId mobid;
uint32 offset;
-} SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
+} __packed
+SVGA3dCmdWaitForGBQuery; /* SVGA_3D_CMD_WAIT_FOR_GB_QUERY */
typedef
struct {
SVGAMobId mobid;
uint32 fbOffset;
uint32 initalized;
-}
+} __packed
SVGA3dCmdEnableGart; /* SVGA_3D_CMD_ENABLE_GART */
typedef
struct {
SVGAMobId mobid;
uint32 gartOffset;
-}
+} __packed
SVGA3dCmdMapMobIntoGart; /* SVGA_3D_CMD_MAP_MOB_INTO_GART */
@@ -2368,7 +2385,7 @@ typedef
struct {
uint32 gartOffset;
uint32 numPages;
-}
+} __packed
SVGA3dCmdUnmapGartRange; /* SVGA_3D_CMD_UNMAP_GART_RANGE */
@@ -2385,27 +2402,27 @@ struct {
int32 xRoot;
int32 yRoot;
uint32 flags;
-}
+} __packed
SVGA3dCmdDefineGBScreenTarget; /* SVGA_3D_CMD_DEFINE_GB_SCREENTARGET */
typedef
struct {
uint32 stid;
-}
+} __packed
SVGA3dCmdDestroyGBScreenTarget; /* SVGA_3D_CMD_DESTROY_GB_SCREENTARGET */
typedef
struct {
uint32 stid;
SVGA3dSurfaceImageId image;
-}
+} __packed
SVGA3dCmdBindGBScreenTarget; /* SVGA_3D_CMD_BIND_GB_SCREENTARGET */
typedef
struct {
uint32 stid;
SVGA3dBox box;
-}
+} __packed
SVGA3dCmdUpdateGBScreenTarget; /* SVGA_3D_CMD_UPDATE_GB_SCREENTARGET */
/*
@@ -2583,4 +2600,28 @@ typedef union {
float f;
} SVGA3dDevCapResult;
+typedef enum {
+ SVGA3DCAPS_RECORD_UNKNOWN = 0,
+ SVGA3DCAPS_RECORD_DEVCAPS_MIN = 0x100,
+ SVGA3DCAPS_RECORD_DEVCAPS = 0x100,
+ SVGA3DCAPS_RECORD_DEVCAPS_MAX = 0x1ff,
+} SVGA3dCapsRecordType;
+
+typedef
+struct SVGA3dCapsRecordHeader {
+ uint32 length;
+ SVGA3dCapsRecordType type;
+}
+SVGA3dCapsRecordHeader;
+
+typedef
+struct SVGA3dCapsRecord {
+ SVGA3dCapsRecordHeader header;
+ uint32 data[1];
+}
+SVGA3dCapsRecord;
+
+
+typedef uint32 SVGA3dCapPair[2];
+
#endif /* _SVGA3D_REG_H_ */
diff --git a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
index 8369c3ba10fe..ef3385096145 100644
--- a/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
+++ b/drivers/gpu/drm/vmwgfx/svga3d_surfacedefs.h
@@ -38,8 +38,11 @@
#define DIV_ROUND_UP(x, y) (((x) + (y) - 1) / (y))
#define max_t(type, x, y) ((x) > (y) ? (x) : (y))
+#define min_t(type, x, y) ((x) < (y) ? (x) : (y))
#define surf_size_struct SVGA3dSize
#define u32 uint32
+#define u64 uint64_t
+#define U32_MAX ((u32)~0U)
#endif /* __KERNEL__ */
@@ -704,8 +707,8 @@ static const struct svga3d_surface_desc svga3d_surface_descs[] = {
static inline u32 clamped_umul32(u32 a, u32 b)
{
- uint64_t tmp = (uint64_t) a*b;
- return (tmp > (uint64_t) ((u32) -1)) ? (u32) -1 : tmp;
+ u64 tmp = (u64) a*b;
+ return (tmp > (u64) U32_MAX) ? U32_MAX : tmp;
}
static inline const struct svga3d_surface_desc *
@@ -834,7 +837,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
bool cubemap)
{
const struct svga3d_surface_desc *desc = svga3dsurface_get_desc(format);
- u32 total_size = 0;
+ u64 total_size = 0;
u32 mip;
for (mip = 0; mip < num_mip_levels; mip++) {
@@ -847,7 +850,7 @@ svga3dsurface_get_serialized_size(SVGA3dSurfaceFormat format,
if (cubemap)
total_size *= SVGA3D_MAX_SURFACE_FACES;
- return total_size;
+ return (u32) min_t(u64, total_size, (u64) U32_MAX);
}
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
index 71defa4d2d75..11323dd5196f 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -169,10 +169,17 @@ enum {
SVGA_REG_TRACES = 45, /* Enable trace-based updates even when FIFO is on */
SVGA_REG_GMRS_MAX_PAGES = 46, /* Maximum number of 4KB pages for all GMRs */
SVGA_REG_MEMORY_SIZE = 47, /* Total dedicated device memory excluding FIFO */
+ SVGA_REG_COMMAND_LOW = 48, /* Lower 32 bits and submits commands */
+ SVGA_REG_COMMAND_HIGH = 49, /* Upper 32 bits of command buffer PA */
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM = 50, /* Max primary memory */
SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB = 51, /* Suggested limit on mob mem */
SVGA_REG_DEV_CAP = 52, /* Write dev cap index, read value */
- SVGA_REG_TOP = 53, /* Must be 1 more than the last register */
+ SVGA_REG_CMD_PREPEND_LOW = 53,
+ SVGA_REG_CMD_PREPEND_HIGH = 54,
+ SVGA_REG_SCREENTARGET_MAX_WIDTH = 55,
+ SVGA_REG_SCREENTARGET_MAX_HEIGHT = 56,
+ SVGA_REG_MOB_MAX_SIZE = 57,
+ SVGA_REG_TOP = 58, /* Must be 1 more than the last register */
SVGA_PALETTE_BASE = 1024, /* Base of SVGA color map */
/* Next 768 (== 256*3) registers exist for colormap */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
index 82c41daebc0e..1e80152674b5 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_context.c
@@ -37,7 +37,7 @@ struct vmw_user_context {
-typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *);
+typedef int (*vmw_scrub_func)(struct vmw_ctx_bindinfo *, bool);
static void vmw_user_context_free(struct vmw_resource *res);
static struct vmw_resource *
@@ -50,9 +50,11 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
bool readback,
struct ttm_validate_buffer *val_buf);
static int vmw_gb_context_destroy(struct vmw_resource *res);
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi);
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi);
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi);
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind);
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+ bool rebind);
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi, bool rebind);
+static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs);
static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs);
static uint64_t vmw_user_context_size;
@@ -111,10 +113,14 @@ static void vmw_hw_context_destroy(struct vmw_resource *res)
if (res->func->destroy == vmw_gb_context_destroy) {
mutex_lock(&dev_priv->cmdbuf_mutex);
+ mutex_lock(&dev_priv->binding_mutex);
+ (void) vmw_context_binding_state_kill
+ (&container_of(res, struct vmw_user_context, res)->cbs);
(void) vmw_gb_context_destroy(res);
if (dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid)
__vmw_execbuf_release_pinned_bo(dev_priv, NULL);
+ mutex_unlock(&dev_priv->binding_mutex);
mutex_unlock(&dev_priv->cmdbuf_mutex);
return;
}
@@ -328,7 +334,7 @@ static int vmw_gb_context_unbind(struct vmw_resource *res,
BUG_ON(bo->mem.mem_type != VMW_PL_MOB);
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_state_kill(&uctx->cbs);
+ vmw_context_binding_state_scrub(&uctx->cbs);
submit_size = sizeof(*cmd2) + (readback ? sizeof(*cmd1) : 0);
@@ -378,10 +384,6 @@ static int vmw_gb_context_destroy(struct vmw_resource *res)
SVGA3dCmdHeader header;
SVGA3dCmdDestroyGBContext body;
} *cmd;
- struct vmw_user_context *uctx =
- container_of(res, struct vmw_user_context, res);
-
- BUG_ON(!list_empty(&uctx->cbs.list));
if (likely(res->id == -1))
return 0;
@@ -528,8 +530,9 @@ out_unlock:
* vmw_context_scrub_shader - scrub a shader binding from a context.
*
* @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
*/
-static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
+static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi, bool rebind)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
@@ -548,7 +551,7 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id;
cmd->body.type = bi->i1.shader_type;
- cmd->body.shid = SVGA3D_INVALID_ID;
+ cmd->body.shid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
return 0;
@@ -559,8 +562,10 @@ static int vmw_context_scrub_shader(struct vmw_ctx_bindinfo *bi)
* from a context.
*
* @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
*/
-static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
+static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi,
+ bool rebind)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
@@ -579,7 +584,7 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
cmd->header.size = sizeof(cmd->body);
cmd->body.cid = bi->ctx->id;
cmd->body.type = bi->i1.rt_type;
- cmd->body.target.sid = SVGA3D_INVALID_ID;
+ cmd->body.target.sid = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
cmd->body.target.face = 0;
cmd->body.target.mipmap = 0;
vmw_fifo_commit(dev_priv, sizeof(*cmd));
@@ -591,11 +596,13 @@ static int vmw_context_scrub_render_target(struct vmw_ctx_bindinfo *bi)
* vmw_context_scrub_texture - scrub a texture binding from a context.
*
* @bi: single binding information.
+ * @rebind: Whether to issue a bind instead of scrub command.
*
* TODO: Possibly complement this function with a function that takes
* a list of texture bindings and combines them to a single command.
*/
-static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
+static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi,
+ bool rebind)
{
struct vmw_private *dev_priv = bi->ctx->dev_priv;
struct {
@@ -619,7 +626,7 @@ static int vmw_context_scrub_texture(struct vmw_ctx_bindinfo *bi)
cmd->body.c.cid = bi->ctx->id;
cmd->body.s1.stage = bi->i1.texture_stage;
cmd->body.s1.name = SVGA3D_TS_BIND_TEXTURE;
- cmd->body.s1.value = (uint32) SVGA3D_INVALID_ID;
+ cmd->body.s1.value = ((rebind) ? bi->res->id : SVGA3D_INVALID_ID);
vmw_fifo_commit(dev_priv, sizeof(*cmd));
return 0;
@@ -692,6 +699,7 @@ int vmw_context_binding_add(struct vmw_ctx_binding_state *cbs,
vmw_context_binding_drop(loc);
loc->bi = *bi;
+ loc->bi.scrubbed = false;
list_add_tail(&loc->ctx_list, &cbs->list);
INIT_LIST_HEAD(&loc->res_list);
@@ -727,12 +735,11 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
if (loc->bi.ctx != NULL)
vmw_context_binding_drop(loc);
- loc->bi = *bi;
- list_add_tail(&loc->ctx_list, &cbs->list);
- if (bi->res != NULL)
+ if (bi->res != NULL) {
+ loc->bi = *bi;
+ list_add_tail(&loc->ctx_list, &cbs->list);
list_add_tail(&loc->res_list, &bi->res->binding_head);
- else
- INIT_LIST_HEAD(&loc->res_list);
+ }
}
/**
@@ -746,7 +753,10 @@ static void vmw_context_binding_transfer(struct vmw_ctx_binding_state *cbs,
*/
static void vmw_context_binding_kill(struct vmw_ctx_binding *cb)
{
- (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi);
+ if (!cb->bi.scrubbed) {
+ (void) vmw_scrub_funcs[cb->bi.bt](&cb->bi, false);
+ cb->bi.scrubbed = true;
+ }
vmw_context_binding_drop(cb);
}
@@ -768,6 +778,27 @@ static void vmw_context_binding_state_kill(struct vmw_ctx_binding_state *cbs)
}
/**
+ * vmw_context_binding_state_scrub - Scrub all bindings associated with a
+ * struct vmw_ctx_binding state structure.
+ *
+ * @cbs: Pointer to the context binding state tracker.
+ *
+ * Emits commands to scrub all bindings associated with the
+ * context binding state tracker.
+ */
+static void vmw_context_binding_state_scrub(struct vmw_ctx_binding_state *cbs)
+{
+ struct vmw_ctx_binding *entry;
+
+ list_for_each_entry(entry, &cbs->list, ctx_list) {
+ if (!entry->bi.scrubbed) {
+ (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
+ entry->bi.scrubbed = true;
+ }
+ }
+}
+
+/**
* vmw_context_binding_res_list_kill - Kill all bindings on a
* resource binding list
*
@@ -785,6 +816,27 @@ void vmw_context_binding_res_list_kill(struct list_head *head)
}
/**
+ * vmw_context_binding_res_list_scrub - Scrub all bindings on a
+ * resource binding list
+ *
+ * @head: list head of resource binding list
+ *
+ * Scrub all bindings associated with a specific resource. Typically
+ * called before the resource is evicted.
+ */
+void vmw_context_binding_res_list_scrub(struct list_head *head)
+{
+ struct vmw_ctx_binding *entry;
+
+ list_for_each_entry(entry, head, res_list) {
+ if (!entry->bi.scrubbed) {
+ (void) vmw_scrub_funcs[entry->bi.bt](&entry->bi, false);
+ entry->bi.scrubbed = true;
+ }
+ }
+}
+
+/**
* vmw_context_binding_state_transfer - Commit staged binding info
*
* @ctx: Pointer to context to commit the staged binding info to.
@@ -803,3 +855,50 @@ void vmw_context_binding_state_transfer(struct vmw_resource *ctx,
list_for_each_entry_safe(entry, next, &from->list, ctx_list)
vmw_context_binding_transfer(&uctx->cbs, &entry->bi);
}
+
+/**
+ * vmw_context_rebind_all - Rebind all scrubbed bindings of a context
+ *
+ * @ctx: The context resource
+ *
+ * Walks through the context binding list and rebinds all scrubbed
+ * resources.
+ */
+int vmw_context_rebind_all(struct vmw_resource *ctx)
+{
+ struct vmw_ctx_binding *entry;
+ struct vmw_user_context *uctx =
+ container_of(ctx, struct vmw_user_context, res);
+ struct vmw_ctx_binding_state *cbs = &uctx->cbs;
+ int ret;
+
+ list_for_each_entry(entry, &cbs->list, ctx_list) {
+ if (likely(!entry->bi.scrubbed))
+ continue;
+
+ if (WARN_ON(entry->bi.res == NULL || entry->bi.res->id ==
+ SVGA3D_INVALID_ID))
+ continue;
+
+ ret = vmw_scrub_funcs[entry->bi.bt](&entry->bi, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ entry->bi.scrubbed = false;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_context_binding_list - Return a list of context bindings
+ *
+ * @ctx: The context resource
+ *
+ * Returns the current list of bindings of the given context. Note that
+ * this list becomes stale as soon as the dev_priv::binding_mutex is unlocked.
+ */
+struct list_head *vmw_context_binding_list(struct vmw_resource *ctx)
+{
+ return &(container_of(ctx, struct vmw_user_context, res)->cbs.list);
+}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 9893328f8fdc..0083cbf99edf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -667,6 +667,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->memory_size = 512*1024*1024;
}
dev_priv->max_mob_pages = 0;
+ dev_priv->max_mob_size = 0;
if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
uint64_t mem_size =
vmw_read(dev_priv,
@@ -676,6 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->prim_bb_mem =
vmw_read(dev_priv,
SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM);
+ dev_priv->max_mob_size =
+ vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE);
} else
dev_priv->prim_bb_mem = dev_priv->vram_size;
@@ -941,6 +944,7 @@ static void vmw_postclose(struct drm_device *dev,
drm_master_put(&vmw_fp->locked_master);
}
+ vmw_compat_shader_man_destroy(vmw_fp->shman);
ttm_object_file_release(&vmw_fp->tfile);
kfree(vmw_fp);
}
@@ -960,11 +964,17 @@ static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv)
if (unlikely(vmw_fp->tfile == NULL))
goto out_no_tfile;
+ vmw_fp->shman = vmw_compat_shader_man_create(dev_priv);
+ if (IS_ERR(vmw_fp->shman))
+ goto out_no_shman;
+
file_priv->driver_priv = vmw_fp;
dev_priv->bdev.dev_mapping = dev->dev_mapping;
return 0;
+out_no_shman:
+ ttm_object_file_release(&vmw_fp->tfile);
out_no_tfile:
kfree(vmw_fp);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 554e7fa33082..07831554dad7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -40,7 +40,7 @@
#include <drm/ttm/ttm_module.h>
#include "vmwgfx_fence.h"
-#define VMWGFX_DRIVER_DATE "20121114"
+#define VMWGFX_DRIVER_DATE "20140228"
#define VMWGFX_DRIVER_MAJOR 2
#define VMWGFX_DRIVER_MINOR 5
#define VMWGFX_DRIVER_PATCHLEVEL 0
@@ -75,10 +75,14 @@
#define VMW_RES_FENCE ttm_driver_type3
#define VMW_RES_SHADER ttm_driver_type4
+struct vmw_compat_shader_manager;
+
struct vmw_fpriv {
struct drm_master *locked_master;
struct ttm_object_file *tfile;
struct list_head fence_events;
+ bool gb_aware;
+ struct vmw_compat_shader_manager *shman;
};
struct vmw_dma_buffer {
@@ -272,6 +276,7 @@ struct vmw_ctx_bindinfo {
struct vmw_resource *ctx;
struct vmw_resource *res;
enum vmw_ctx_binding_type bt;
+ bool scrubbed;
union {
SVGA3dShaderType shader_type;
SVGA3dRenderTargetType rt_type;
@@ -318,7 +323,7 @@ struct vmw_sw_context{
struct drm_open_hash res_ht;
bool res_ht_initialized;
bool kernel; /**< is the called made from the kernel */
- struct ttm_object_file *tfile;
+ struct vmw_fpriv *fp;
struct list_head validate_nodes;
struct vmw_relocation relocs[VMWGFX_MAX_RELOCATIONS];
uint32_t cur_reloc;
@@ -336,6 +341,7 @@ struct vmw_sw_context{
bool needs_post_query_barrier;
struct vmw_resource *error_resource;
struct vmw_ctx_binding_state staged_bindings;
+ struct list_head staged_shaders;
};
struct vmw_legacy_display;
@@ -380,6 +386,7 @@ struct vmw_private {
uint32_t max_gmr_ids;
uint32_t max_gmr_pages;
uint32_t max_mob_pages;
+ uint32_t max_mob_size;
uint32_t memory_size;
bool has_gmr;
bool has_mob;
@@ -569,6 +576,8 @@ struct vmw_user_resource_conv;
extern void vmw_resource_unreference(struct vmw_resource **p_res);
extern struct vmw_resource *vmw_resource_reference(struct vmw_resource *res);
+extern struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res);
extern int vmw_resource_validate(struct vmw_resource *res);
extern int vmw_resource_reserve(struct vmw_resource *res, bool no_backup);
extern bool vmw_resource_needs_backup(const struct vmw_resource *res);
@@ -957,6 +966,9 @@ extern void
vmw_context_binding_state_transfer(struct vmw_resource *res,
struct vmw_ctx_binding_state *cbs);
extern void vmw_context_binding_res_list_kill(struct list_head *head);
+extern void vmw_context_binding_res_list_scrub(struct list_head *head);
+extern int vmw_context_rebind_all(struct vmw_resource *ctx);
+extern struct list_head *vmw_context_binding_list(struct vmw_resource *ctx);
/*
* Surface management - vmwgfx_surface.c
@@ -991,6 +1003,28 @@ extern int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+extern int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
+ SVGA3dShaderType shader_type,
+ u32 *user_key);
+extern void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
+ struct list_head *list);
+extern void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
+ struct list_head *list);
+extern int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+ u32 user_key,
+ SVGA3dShaderType shader_type,
+ struct list_head *list);
+extern int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+ u32 user_key, const void *bytecode,
+ SVGA3dShaderType shader_type,
+ size_t size,
+ struct ttm_object_file *tfile,
+ struct list_head *list);
+extern struct vmw_compat_shader_manager *
+vmw_compat_shader_man_create(struct vmw_private *dev_priv);
+extern void
+vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man);
+
/**
* Inline helper functions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 7a5f1eb55c5a..efb575a7996c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -114,8 +114,10 @@ static void vmw_resource_list_unreserve(struct list_head *list,
* persistent context binding tracker.
*/
if (unlikely(val->staged_bindings)) {
- vmw_context_binding_state_transfer
- (val->res, val->staged_bindings);
+ if (!backoff) {
+ vmw_context_binding_state_transfer
+ (val->res, val->staged_bindings);
+ }
kfree(val->staged_bindings);
val->staged_bindings = NULL;
}
@@ -178,6 +180,44 @@ static int vmw_resource_val_add(struct vmw_sw_context *sw_context,
}
/**
+ * vmw_resource_context_res_add - Put resources previously bound to a context on
+ * the validation list
+ *
+ * @dev_priv: Pointer to a device private structure
+ * @sw_context: Pointer to a software context used for this command submission
+ * @ctx: Pointer to the context resource
+ *
+ * This function puts all resources that were previously bound to @ctx on
+ * the resource validation list. This is part of the context state reemission
+ */
+static int vmw_resource_context_res_add(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ struct vmw_resource *ctx)
+{
+ struct list_head *binding_list;
+ struct vmw_ctx_binding *entry;
+ int ret = 0;
+ struct vmw_resource *res;
+
+ mutex_lock(&dev_priv->binding_mutex);
+ binding_list = vmw_context_binding_list(ctx);
+
+ list_for_each_entry(entry, binding_list, ctx_list) {
+ res = vmw_resource_reference_unless_doomed(entry->bi.res);
+ if (unlikely(res == NULL))
+ continue;
+
+ ret = vmw_resource_val_add(sw_context, entry->bi.res, NULL);
+ vmw_resource_unreference(&res);
+ if (unlikely(ret != 0))
+ break;
+ }
+
+ mutex_unlock(&dev_priv->binding_mutex);
+ return ret;
+}
+
+/**
* vmw_resource_relocation_add - Add a relocation to the relocation list
*
* @list: Pointer to head of relocation list.
@@ -233,8 +273,12 @@ static void vmw_resource_relocations_apply(uint32_t *cb,
{
struct vmw_resource_relocation *rel;
- list_for_each_entry(rel, list, head)
- cb[rel->offset] = rel->res->id;
+ list_for_each_entry(rel, list, head) {
+ if (likely(rel->res != NULL))
+ cb[rel->offset] = rel->res->id;
+ else
+ cb[rel->offset] = SVGA_3D_CMD_NOP;
+ }
}
static int vmw_cmd_invalid(struct vmw_private *dev_priv,
@@ -379,22 +423,27 @@ static int vmw_resources_validate(struct vmw_sw_context *sw_context)
}
/**
- * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * vmw_cmd_compat_res_check - Check that a resource is present and if so, put it
* on the resource validate list unless it's already there.
*
* @dev_priv: Pointer to a device private structure.
* @sw_context: Pointer to the software context.
* @res_type: Resource type.
* @converter: User-space visisble type specific information.
- * @id: Pointer to the location in the command buffer currently being
+ * @id: user-space resource id handle.
+ * @id_loc: Pointer to the location in the command buffer currently being
* parsed from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated
+ * on exit.
*/
-static int vmw_cmd_res_check(struct vmw_private *dev_priv,
- struct vmw_sw_context *sw_context,
- enum vmw_res_type res_type,
- const struct vmw_user_resource_conv *converter,
- uint32_t *id,
- struct vmw_resource_val_node **p_val)
+static int
+vmw_cmd_compat_res_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t id,
+ uint32_t *id_loc,
+ struct vmw_resource_val_node **p_val)
{
struct vmw_res_cache_entry *rcache =
&sw_context->res_cache[res_type];
@@ -402,7 +451,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
struct vmw_resource_val_node *node;
int ret;
- if (*id == SVGA3D_INVALID_ID) {
+ if (id == SVGA3D_INVALID_ID) {
if (p_val)
*p_val = NULL;
if (res_type == vmw_res_context) {
@@ -417,7 +466,7 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
* resource
*/
- if (likely(rcache->valid && *id == rcache->handle)) {
+ if (likely(rcache->valid && id == rcache->handle)) {
const struct vmw_resource *res = rcache->res;
rcache->node->first_usage = false;
@@ -426,28 +475,28 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
return vmw_resource_relocation_add
(&sw_context->res_relocations, res,
- id - sw_context->buf_start);
+ id_loc - sw_context->buf_start);
}
ret = vmw_user_resource_lookup_handle(dev_priv,
- sw_context->tfile,
- *id,
+ sw_context->fp->tfile,
+ id,
converter,
&res);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use resource 0x%08x.\n",
- (unsigned) *id);
+ (unsigned) id);
dump_stack();
return ret;
}
rcache->valid = true;
rcache->res = res;
- rcache->handle = *id;
+ rcache->handle = id;
ret = vmw_resource_relocation_add(&sw_context->res_relocations,
res,
- id - sw_context->buf_start);
+ id_loc - sw_context->buf_start);
if (unlikely(ret != 0))
goto out_no_reloc;
@@ -459,7 +508,11 @@ static int vmw_cmd_res_check(struct vmw_private *dev_priv,
if (p_val)
*p_val = node;
- if (node->first_usage && res_type == vmw_res_context) {
+ if (dev_priv->has_mob && node->first_usage &&
+ res_type == vmw_res_context) {
+ ret = vmw_resource_context_res_add(dev_priv, sw_context, res);
+ if (unlikely(ret != 0))
+ goto out_no_reloc;
node->staged_bindings =
kzalloc(sizeof(*node->staged_bindings), GFP_KERNEL);
if (node->staged_bindings == NULL) {
@@ -481,6 +534,59 @@ out_no_reloc:
}
/**
+ * vmw_cmd_res_check - Check that a resource is present and if so, put it
+ * on the resource validate list unless it's already there.
+ *
+ * @dev_priv: Pointer to a device private structure.
+ * @sw_context: Pointer to the software context.
+ * @res_type: Resource type.
+ * @converter: User-space visisble type specific information.
+ * @id_loc: Pointer to the location in the command buffer currently being
+ * parsed from where the user-space resource id handle is located.
+ * @p_val: Pointer to pointer to resource validalidation node. Populated
+ * on exit.
+ */
+static int
+vmw_cmd_res_check(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ enum vmw_res_type res_type,
+ const struct vmw_user_resource_conv *converter,
+ uint32_t *id_loc,
+ struct vmw_resource_val_node **p_val)
+{
+ return vmw_cmd_compat_res_check(dev_priv, sw_context, res_type,
+ converter, *id_loc, id_loc, p_val);
+}
+
+/**
+ * vmw_rebind_contexts - Rebind all resources previously bound to
+ * referenced contexts.
+ *
+ * @sw_context: Pointer to the software context.
+ *
+ * Rebind context binding points that have been scrubbed because of eviction.
+ */
+static int vmw_rebind_contexts(struct vmw_sw_context *sw_context)
+{
+ struct vmw_resource_val_node *val;
+ int ret;
+
+ list_for_each_entry(val, &sw_context->resource_list, head) {
+ if (likely(!val->staged_bindings))
+ continue;
+
+ ret = vmw_context_rebind_all(val->res);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Failed to rebind context.\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/**
* vmw_cmd_cid_check - Check a command header for valid context information.
*
* @dev_priv: Pointer to a device private structure.
@@ -496,7 +602,7 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
{
struct vmw_cid_cmd {
SVGA3dCmdHeader header;
- __le32 cid;
+ uint32_t cid;
} *cmd;
cmd = container_of(header, struct vmw_cid_cmd, header);
@@ -767,7 +873,7 @@ static int vmw_translate_mob_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+ ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use MOB buffer.\n");
return -EINVAL;
@@ -828,7 +934,7 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
struct vmw_relocation *reloc;
int ret;
- ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
+ ret = vmw_user_dmabuf_lookup(sw_context->fp->tfile, handle, &vmw_bo);
if (unlikely(ret != 0)) {
DRM_ERROR("Could not find or use GMR region.\n");
return -EINVAL;
@@ -1127,7 +1233,8 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
srf = vmw_res_to_srf(sw_context->res_cache[vmw_res_surface].res);
- vmw_kms_cursor_snoop(srf, sw_context->tfile, &vmw_bo->base, header);
+ vmw_kms_cursor_snoop(srf, sw_context->fp->tfile, &vmw_bo->base,
+ header);
out_no_surface:
vmw_dmabuf_unreference(&vmw_bo);
@@ -1478,6 +1585,98 @@ static int vmw_cmd_invalidate_gb_surface(struct vmw_private *dev_priv,
&cmd->body.sid, NULL);
}
+
+/**
+ * vmw_cmd_shader_define - Validate an SVGA_3D_CMD_SHADER_DEFINE
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_define(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_shader_define_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDefineShader body;
+ } *cmd;
+ int ret;
+ size_t size;
+
+ cmd = container_of(header, struct vmw_shader_define_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(!dev_priv->has_mob))
+ return 0;
+
+ size = cmd->header.size - sizeof(cmd->body);
+ ret = vmw_compat_shader_add(sw_context->fp->shman,
+ cmd->body.shid, cmd + 1,
+ cmd->body.type, size,
+ sw_context->fp->tfile,
+ &sw_context->staged_shaders);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return vmw_resource_relocation_add(&sw_context->res_relocations,
+ NULL, &cmd->header.id -
+ sw_context->buf_start);
+
+ return 0;
+}
+
+/**
+ * vmw_cmd_shader_destroy - Validate an SVGA_3D_CMD_SHADER_DESTROY
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_shader_destroy(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_shader_destroy_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdDestroyShader body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_shader_destroy_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(!dev_priv->has_mob))
+ return 0;
+
+ ret = vmw_compat_shader_remove(sw_context->fp->shman,
+ cmd->body.shid,
+ cmd->body.type,
+ &sw_context->staged_shaders);
+ if (unlikely(ret != 0))
+ return ret;
+
+ return vmw_resource_relocation_add(&sw_context->res_relocations,
+ NULL, &cmd->header.id -
+ sw_context->buf_start);
+
+ return 0;
+}
+
/**
* vmw_cmd_set_shader - Validate an SVGA_3D_CMD_SET_SHADER
* command
@@ -1509,10 +1708,18 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
if (dev_priv->has_mob) {
struct vmw_ctx_bindinfo bi;
struct vmw_resource_val_node *res_node;
-
- ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_shader,
- user_shader_converter,
- &cmd->body.shid, &res_node);
+ u32 shid = cmd->body.shid;
+
+ if (shid != SVGA3D_INVALID_ID)
+ (void) vmw_compat_shader_lookup(sw_context->fp->shman,
+ cmd->body.type,
+ &shid);
+
+ ret = vmw_cmd_compat_res_check(dev_priv, sw_context,
+ vmw_res_shader,
+ user_shader_converter,
+ shid,
+ &cmd->body.shid, &res_node);
if (unlikely(ret != 0))
return ret;
@@ -1527,6 +1734,39 @@ static int vmw_cmd_set_shader(struct vmw_private *dev_priv,
}
/**
+ * vmw_cmd_set_shader_const - Validate an SVGA_3D_CMD_SET_SHADER_CONST
+ * command
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @sw_context: The software context being used for this batch.
+ * @header: Pointer to the command header in the command stream.
+ */
+static int vmw_cmd_set_shader_const(struct vmw_private *dev_priv,
+ struct vmw_sw_context *sw_context,
+ SVGA3dCmdHeader *header)
+{
+ struct vmw_set_shader_const_cmd {
+ SVGA3dCmdHeader header;
+ SVGA3dCmdSetShaderConst body;
+ } *cmd;
+ int ret;
+
+ cmd = container_of(header, struct vmw_set_shader_const_cmd,
+ header);
+
+ ret = vmw_cmd_res_check(dev_priv, sw_context, vmw_res_context,
+ user_context_converter, &cmd->body.cid,
+ NULL);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (dev_priv->has_mob)
+ header->id = SVGA_3D_CMD_SET_GB_SHADERCONSTS_INLINE;
+
+ return 0;
+}
+
+/**
* vmw_cmd_bind_gb_shader - Validate an SVGA_3D_CMD_BIND_GB_SHADER
* command
*
@@ -1595,7 +1835,7 @@ static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
return 0;
}
-static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
+static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid,
false, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid,
@@ -1634,14 +1874,14 @@ static const struct vmw_cmd_entry const vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check,
false, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check,
- true, true, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check,
- true, true, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_shader_define,
+ true, false, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_shader_destroy,
+ true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_set_shader,
true, false, false),
- VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check,
- true, true, false),
+ VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_set_shader_const,
+ true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw,
true, false, false),
VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check,
@@ -1792,6 +2032,9 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
goto out_invalid;
entry = &vmw_cmd_entries[cmd_id];
+ if (unlikely(!entry->func))
+ goto out_invalid;
+
if (unlikely(!entry->user_allow && !sw_context->kernel))
goto out_privileged;
@@ -2171,7 +2414,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
} else
sw_context->kernel = true;
- sw_context->tfile = vmw_fpriv(file_priv)->tfile;
+ sw_context->fp = vmw_fpriv(file_priv);
sw_context->cur_reloc = 0;
sw_context->cur_val_buf = 0;
sw_context->fence_flags = 0;
@@ -2188,16 +2431,17 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_unlock;
sw_context->res_ht_initialized = true;
}
+ INIT_LIST_HEAD(&sw_context->staged_shaders);
INIT_LIST_HEAD(&resource_list);
ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
command_size);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = vmw_resources_reserve(sw_context);
if (unlikely(ret != 0))
- goto out_err;
+ goto out_err_nores;
ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes);
if (unlikely(ret != 0))
@@ -2225,6 +2469,12 @@ int vmw_execbuf_process(struct drm_file *file_priv,
goto out_err;
}
+ if (dev_priv->has_mob) {
+ ret = vmw_rebind_contexts(sw_context);
+ if (unlikely(ret != 0))
+ goto out_unlock_binding;
+ }
+
cmd = vmw_fifo_reserve(dev_priv, command_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving fifo space for commands.\n");
@@ -2276,6 +2526,8 @@ int vmw_execbuf_process(struct drm_file *file_priv,
}
list_splice_init(&sw_context->resource_list, &resource_list);
+ vmw_compat_shaders_commit(sw_context->fp->shman,
+ &sw_context->staged_shaders);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
@@ -2289,10 +2541,11 @@ int vmw_execbuf_process(struct drm_file *file_priv,
out_unlock_binding:
mutex_unlock(&dev_priv->binding_mutex);
out_err:
- vmw_resource_relocations_free(&sw_context->res_relocations);
- vmw_free_relocations(sw_context);
ttm_eu_backoff_reservation(&ticket, &sw_context->validate_nodes);
+out_err_nores:
vmw_resource_list_unreserve(&sw_context->resource_list, true);
+ vmw_resource_relocations_free(&sw_context->res_relocations);
+ vmw_free_relocations(sw_context);
vmw_clear_validations(sw_context);
if (unlikely(dev_priv->pinned_bo != NULL &&
!dev_priv->query_cid_valid))
@@ -2301,6 +2554,8 @@ out_unlock:
list_splice_init(&sw_context->resource_list, &resource_list);
error_resource = sw_context->error_resource;
sw_context->error_resource = NULL;
+ vmw_compat_shaders_revert(sw_context->fp->shman,
+ &sw_context->staged_shaders);
mutex_unlock(&dev_priv->cmdbuf_mutex);
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 116c49736763..47b70949bf3a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -29,12 +29,18 @@
#include <drm/vmwgfx_drm.h>
#include "vmwgfx_kms.h"
+struct svga_3d_compat_cap {
+ SVGA3dCapsRecordHeader header;
+ SVGA3dCapPair pairs[SVGA3D_DEVCAP_MAX];
+};
+
int vmw_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_getparam_arg *param =
(struct drm_vmw_getparam_arg *)data;
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
switch (param->param) {
case DRM_VMW_PARAM_NUM_STREAMS:
@@ -60,6 +66,11 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
const struct vmw_fifo_state *fifo = &dev_priv->fifo;
+ if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS)) {
+ param->value = SVGA3D_HWVERSION_WS8_B1;
+ break;
+ }
+
param->value =
ioread32(fifo_mem +
((fifo->capabilities &
@@ -69,19 +80,31 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
break;
}
case DRM_VMW_PARAM_MAX_SURF_MEMORY:
- param->value = dev_priv->memory_size;
+ if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+ !vmw_fp->gb_aware)
+ param->value = dev_priv->max_mob_pages * PAGE_SIZE / 2;
+ else
+ param->value = dev_priv->memory_size;
break;
case DRM_VMW_PARAM_3D_CAPS_SIZE:
- if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
- param->value = SVGA3D_DEVCAP_MAX;
+ if ((dev_priv->capabilities & SVGA_CAP_GBOBJECTS) &&
+ vmw_fp->gb_aware)
+ param->value = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+ else if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
+ param->value = sizeof(struct svga_3d_compat_cap) +
+ sizeof(uint32_t);
else
param->value = (SVGA_FIFO_3D_CAPS_LAST -
- SVGA_FIFO_3D_CAPS + 1);
- param->value *= sizeof(uint32_t);
+ SVGA_FIFO_3D_CAPS + 1) *
+ sizeof(uint32_t);
break;
case DRM_VMW_PARAM_MAX_MOB_MEMORY:
+ vmw_fp->gb_aware = true;
param->value = dev_priv->max_mob_pages * PAGE_SIZE;
break;
+ case DRM_VMW_PARAM_MAX_MOB_SIZE:
+ param->value = dev_priv->max_mob_size;
+ break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
@@ -91,6 +114,38 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
return 0;
}
+static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
+ size_t size)
+{
+ struct svga_3d_compat_cap *compat_cap =
+ (struct svga_3d_compat_cap *) bounce;
+ unsigned int i;
+ size_t pair_offset = offsetof(struct svga_3d_compat_cap, pairs);
+ unsigned int max_size;
+
+ if (size < pair_offset)
+ return -EINVAL;
+
+ max_size = (size - pair_offset) / sizeof(SVGA3dCapPair);
+
+ if (max_size > SVGA3D_DEVCAP_MAX)
+ max_size = SVGA3D_DEVCAP_MAX;
+
+ compat_cap->header.length =
+ (pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
+ compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
+
+ mutex_lock(&dev_priv->hw_mutex);
+ for (i = 0; i < max_size; ++i) {
+ vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
+ compat_cap->pairs[i][0] = i;
+ compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
+ }
+ mutex_unlock(&dev_priv->hw_mutex);
+
+ return 0;
+}
+
int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -104,41 +159,49 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
void *bounce;
int ret;
bool gb_objects = !!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS);
+ struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
if (unlikely(arg->pad64 != 0)) {
DRM_ERROR("Illegal GET_3D_CAP argument.\n");
return -EINVAL;
}
- if (gb_objects)
- size = SVGA3D_DEVCAP_MAX;
+ if (gb_objects && vmw_fp->gb_aware)
+ size = SVGA3D_DEVCAP_MAX * sizeof(uint32_t);
+ else if (gb_objects)
+ size = sizeof(struct svga_3d_compat_cap) + sizeof(uint32_t);
else
- size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1);
-
- size *= sizeof(uint32_t);
+ size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) *
+ sizeof(uint32_t);
if (arg->max_size < size)
size = arg->max_size;
- bounce = vmalloc(size);
+ bounce = vzalloc(size);
if (unlikely(bounce == NULL)) {
DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n");
return -ENOMEM;
}
- if (gb_objects) {
- int i;
+ if (gb_objects && vmw_fp->gb_aware) {
+ int i, num;
uint32_t *bounce32 = (uint32_t *) bounce;
+ num = size / sizeof(uint32_t);
+ if (num > SVGA3D_DEVCAP_MAX)
+ num = SVGA3D_DEVCAP_MAX;
+
mutex_lock(&dev_priv->hw_mutex);
- for (i = 0; i < SVGA3D_DEVCAP_MAX; ++i) {
+ for (i = 0; i < num; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
}
mutex_unlock(&dev_priv->hw_mutex);
-
+ } else if (gb_objects) {
+ ret = vmw_fill_compat_cap(dev_priv, bounce, size);
+ if (unlikely(ret != 0))
+ goto out_err;
} else {
-
fifo_mem = dev_priv->mmio_virt;
memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size);
}
@@ -146,6 +209,7 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
ret = copy_to_user(buffer, bounce, size);
if (ret)
ret = -EFAULT;
+out_err:
vfree(bounce);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
index 4910e7b81811..04a64b8cd3cd 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_mob.c
@@ -134,6 +134,7 @@ static int vmw_setup_otable_base(struct vmw_private *dev_priv,
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
+ ret = -ENOMEM;
goto out_no_fifo;
}
@@ -187,18 +188,20 @@ static void vmw_takedown_otable_base(struct vmw_private *dev_priv,
bo = otable->page_table->pt_bo;
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL))
- DRM_ERROR("Failed reserving FIFO space for OTable setup.\n");
-
- memset(cmd, 0, sizeof(*cmd));
- cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.type = type;
- cmd->body.baseAddress = 0;
- cmd->body.sizeInBytes = 0;
- cmd->body.validSizeInBytes = 0;
- cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ if (unlikely(cmd == NULL)) {
+ DRM_ERROR("Failed reserving FIFO space for OTable "
+ "takedown.\n");
+ } else {
+ memset(cmd, 0, sizeof(*cmd));
+ cmd->header.id = SVGA_3D_CMD_SET_OTABLE_BASE;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.type = type;
+ cmd->body.baseAddress = 0;
+ cmd->body.sizeInBytes = 0;
+ cmd->body.validSizeInBytes = 0;
+ cmd->body.ptDepth = SVGA3D_MOBFMT_INVALID;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
+ }
if (bo) {
int ret;
@@ -561,11 +564,12 @@ void vmw_mob_unbind(struct vmw_private *dev_priv,
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed reserving FIFO space for Memory "
"Object unbinding.\n");
+ } else {
+ cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
+ cmd->header.size = sizeof(cmd->body);
+ cmd->body.mobid = mob->id;
+ vmw_fifo_commit(dev_priv, sizeof(*cmd));
}
- cmd->header.id = SVGA_3D_CMD_DESTROY_GB_MOB;
- cmd->header.size = sizeof(cmd->body);
- cmd->body.mobid = mob->id;
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
if (bo) {
vmw_fence_single_bo(bo, NULL);
ttm_bo_unreserve(bo);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 6fdd82d42f65..9757b57f8388 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -88,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
return res;
}
+struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res)
+{
+ return kref_get_unless_zero(&res->kref) ? res : NULL;
+}
/**
* vmw_resource_release_id - release a resource id to the id manager.
@@ -136,8 +141,12 @@ static void vmw_resource_release(struct kref *kref)
vmw_dmabuf_unreference(&res->backup);
}
- if (likely(res->hw_destroy != NULL))
+ if (likely(res->hw_destroy != NULL)) {
res->hw_destroy(res);
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_context_binding_res_list_kill(&res->binding_head);
+ mutex_unlock(&dev_priv->binding_mutex);
+ }
id = res->id;
if (res->res_free != NULL)
@@ -418,8 +427,7 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv,
INIT_LIST_HEAD(&vmw_bo->res_list);
ret = ttm_bo_init(bdev, &vmw_bo->base, size,
- (user) ? ttm_bo_type_device :
- ttm_bo_type_kernel, placement,
+ ttm_bo_type_device, placement,
0, interruptible,
NULL, acc_size, NULL, bo_free);
return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
index 1457ec4b7125..ee3856578a12 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_shader.c
@@ -29,6 +29,8 @@
#include "vmwgfx_resource_priv.h"
#include "ttm/ttm_placement.h"
+#define VMW_COMPAT_SHADER_HT_ORDER 12
+
struct vmw_shader {
struct vmw_resource res;
SVGA3dShaderType type;
@@ -40,6 +42,50 @@ struct vmw_user_shader {
struct vmw_shader shader;
};
+/**
+ * enum vmw_compat_shader_state - Staging state for compat shaders
+ */
+enum vmw_compat_shader_state {
+ VMW_COMPAT_COMMITED,
+ VMW_COMPAT_ADD,
+ VMW_COMPAT_DEL
+};
+
+/**
+ * struct vmw_compat_shader - Metadata for compat shaders.
+ *
+ * @handle: The TTM handle of the guest backed shader.
+ * @tfile: The struct ttm_object_file the guest backed shader is registered
+ * with.
+ * @hash: Hash item for lookup.
+ * @head: List head for staging lists or the compat shader manager list.
+ * @state: Staging state.
+ *
+ * The structure is protected by the cmdbuf lock.
+ */
+struct vmw_compat_shader {
+ u32 handle;
+ struct ttm_object_file *tfile;
+ struct drm_hash_item hash;
+ struct list_head head;
+ enum vmw_compat_shader_state state;
+};
+
+/**
+ * struct vmw_compat_shader_manager - Compat shader manager.
+ *
+ * @shaders: Hash table containing staged and commited compat shaders
+ * @list: List of commited shaders.
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * @shaders and @list are protected by the cmdbuf mutex for now.
+ */
+struct vmw_compat_shader_manager {
+ struct drm_open_hash shaders;
+ struct list_head list;
+ struct vmw_private *dev_priv;
+};
+
static void vmw_user_shader_free(struct vmw_resource *res);
static struct vmw_resource *
vmw_user_shader_base_to_res(struct ttm_base_object *base);
@@ -258,7 +304,7 @@ static int vmw_gb_shader_destroy(struct vmw_resource *res)
return 0;
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_res_list_kill(&res->binding_head);
+ vmw_context_binding_res_list_scrub(&res->binding_head);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
@@ -325,13 +371,81 @@ int vmw_shader_destroy_ioctl(struct drm_device *dev, void *data,
TTM_REF_USAGE);
}
+static int vmw_shader_alloc(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *buffer,
+ size_t shader_size,
+ size_t offset,
+ SVGA3dShaderType shader_type,
+ struct ttm_object_file *tfile,
+ u32 *handle)
+{
+ struct vmw_user_shader *ushader;
+ struct vmw_resource *res, *tmp;
+ int ret;
+
+ /*
+ * Approximate idr memory usage with 128 bytes. It will be limited
+ * by maximum number_of shaders anyway.
+ */
+ if (unlikely(vmw_user_shader_size == 0))
+ vmw_user_shader_size =
+ ttm_round_pot(sizeof(struct vmw_user_shader)) + 128;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_user_shader_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for shader "
+ "creation.\n");
+ goto out;
+ }
+
+ ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
+ if (unlikely(ushader == NULL)) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_shader_size);
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ res = &ushader->shader.res;
+ ushader->base.shareable = false;
+ ushader->base.tfile = NULL;
+
+ /*
+ * From here on, the destructor takes over resource freeing.
+ */
+
+ ret = vmw_gb_shader_init(dev_priv, res, shader_size,
+ offset, shader_type, buffer,
+ vmw_user_shader_free);
+ if (unlikely(ret != 0))
+ goto out;
+
+ tmp = vmw_resource_reference(res);
+ ret = ttm_base_object_init(tfile, &ushader->base, false,
+ VMW_RES_SHADER,
+ &vmw_user_shader_base_release, NULL);
+
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&tmp);
+ goto out_err;
+ }
+
+ if (handle)
+ *handle = ushader->base.hash.key;
+out_err:
+ vmw_resource_unreference(&res);
+out:
+ return ret;
+}
+
+
int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_shader *ushader;
- struct vmw_resource *res;
- struct vmw_resource *tmp;
struct drm_vmw_shader_create_arg *arg =
(struct drm_vmw_shader_create_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
@@ -373,69 +487,326 @@ int vmw_shader_define_ioctl(struct drm_device *dev, void *data,
goto out_bad_arg;
}
- /*
- * Approximate idr memory usage with 128 bytes. It will be limited
- * by maximum number_of shaders anyway.
- */
+ ret = ttm_read_lock(&vmaster->lock, true);
+ if (unlikely(ret != 0))
+ goto out_bad_arg;
- if (unlikely(vmw_user_shader_size == 0))
- vmw_user_shader_size = ttm_round_pot(sizeof(*ushader))
- + 128;
+ ret = vmw_shader_alloc(dev_priv, buffer, arg->size, arg->offset,
+ shader_type, tfile, &arg->shader_handle);
- ret = ttm_read_lock(&vmaster->lock, true);
+ ttm_read_unlock(&vmaster->lock);
+out_bad_arg:
+ vmw_dmabuf_unreference(&buffer);
+ return ret;
+}
+
+/**
+ * vmw_compat_shader_lookup - Look up a compat shader
+ *
+ * @man: Pointer to the compat shader manager.
+ * @shader_type: The shader type, that combined with the user_key identifies
+ * the shader.
+ * @user_key: On entry, this should be a pointer to the user_key.
+ * On successful exit, it will contain the guest-backed shader's TTM handle.
+ *
+ * Returns 0 on success. Non-zero on failure, in which case the value pointed
+ * to by @user_key is unmodified.
+ */
+int vmw_compat_shader_lookup(struct vmw_compat_shader_manager *man,
+ SVGA3dShaderType shader_type,
+ u32 *user_key)
+{
+ struct drm_hash_item *hash;
+ int ret;
+ unsigned long key = *user_key | (shader_type << 24);
+
+ ret = drm_ht_find_item(&man->shaders, key, &hash);
if (unlikely(ret != 0))
return ret;
- ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
- vmw_user_shader_size,
- false, true);
- if (unlikely(ret != 0)) {
- if (ret != -ERESTARTSYS)
- DRM_ERROR("Out of graphics memory for shader"
- " creation.\n");
- goto out_unlock;
+ *user_key = drm_hash_entry(hash, struct vmw_compat_shader,
+ hash)->handle;
+
+ return 0;
+}
+
+/**
+ * vmw_compat_shader_free - Free a compat shader.
+ *
+ * @man: Pointer to the compat shader manager.
+ * @entry: Pointer to a struct vmw_compat_shader.
+ *
+ * Frees a struct vmw_compat_shder entry and drops its reference to the
+ * guest backed shader.
+ */
+static void vmw_compat_shader_free(struct vmw_compat_shader_manager *man,
+ struct vmw_compat_shader *entry)
+{
+ list_del(&entry->head);
+ WARN_ON(drm_ht_remove_item(&man->shaders, &entry->hash));
+ WARN_ON(ttm_ref_object_base_unref(entry->tfile, entry->handle,
+ TTM_REF_USAGE));
+ kfree(entry);
+}
+
+/**
+ * vmw_compat_shaders_commit - Commit a list of compat shader actions.
+ *
+ * @man: Pointer to the compat shader manager.
+ * @list: Caller's list of compat shader actions.
+ *
+ * This function commits a list of compat shader additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions has commited the fifo contents to the device.
+ */
+void vmw_compat_shaders_commit(struct vmw_compat_shader_manager *man,
+ struct list_head *list)
+{
+ struct vmw_compat_shader *entry, *next;
+
+ list_for_each_entry_safe(entry, next, list, head) {
+ list_del(&entry->head);
+ switch (entry->state) {
+ case VMW_COMPAT_ADD:
+ entry->state = VMW_COMPAT_COMMITED;
+ list_add_tail(&entry->head, &man->list);
+ break;
+ case VMW_COMPAT_DEL:
+ ttm_ref_object_base_unref(entry->tfile, entry->handle,
+ TTM_REF_USAGE);
+ kfree(entry);
+ break;
+ default:
+ BUG();
+ break;
+ }
}
+}
- ushader = kzalloc(sizeof(*ushader), GFP_KERNEL);
- if (unlikely(ushader == NULL)) {
- ttm_mem_global_free(vmw_mem_glob(dev_priv),
- vmw_user_shader_size);
- ret = -ENOMEM;
- goto out_unlock;
+/**
+ * vmw_compat_shaders_revert - Revert a list of compat shader actions
+ *
+ * @man: Pointer to the compat shader manager.
+ * @list: Caller's list of compat shader actions.
+ *
+ * This function reverts a list of compat shader additions or removals.
+ * It is typically called when the execbuf ioctl call triggering these
+ * actions failed for some reason, and the command stream was never
+ * submitted.
+ */
+void vmw_compat_shaders_revert(struct vmw_compat_shader_manager *man,
+ struct list_head *list)
+{
+ struct vmw_compat_shader *entry, *next;
+ int ret;
+
+ list_for_each_entry_safe(entry, next, list, head) {
+ switch (entry->state) {
+ case VMW_COMPAT_ADD:
+ vmw_compat_shader_free(man, entry);
+ break;
+ case VMW_COMPAT_DEL:
+ ret = drm_ht_insert_item(&man->shaders, &entry->hash);
+ list_del(&entry->head);
+ list_add_tail(&entry->head, &man->list);
+ entry->state = VMW_COMPAT_COMMITED;
+ break;
+ default:
+ BUG();
+ break;
+ }
}
+}
- res = &ushader->shader.res;
- ushader->base.shareable = false;
- ushader->base.tfile = NULL;
+/**
+ * vmw_compat_shader_remove - Stage a compat shader for removal.
+ *
+ * @man: Pointer to the compat shader manager
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @shader_type: Shader type.
+ * @list: Caller's list of staged shader actions.
+ *
+ * This function stages a compat shader for removal and removes the key from
+ * the shader manager's hash table. If the shader was previously only staged
+ * for addition it is completely removed (But the execbuf code may keep a
+ * reference if it was bound to a context between addition and removal). If
+ * it was previously commited to the manager, it is staged for removal.
+ */
+int vmw_compat_shader_remove(struct vmw_compat_shader_manager *man,
+ u32 user_key, SVGA3dShaderType shader_type,
+ struct list_head *list)
+{
+ struct vmw_compat_shader *entry;
+ struct drm_hash_item *hash;
+ int ret;
- /*
- * From here on, the destructor takes over resource freeing.
- */
+ ret = drm_ht_find_item(&man->shaders, user_key | (shader_type << 24),
+ &hash);
+ if (likely(ret != 0))
+ return -EINVAL;
- ret = vmw_gb_shader_init(dev_priv, res, arg->size,
- arg->offset, shader_type, buffer,
- vmw_user_shader_free);
+ entry = drm_hash_entry(hash, struct vmw_compat_shader, hash);
+
+ switch (entry->state) {
+ case VMW_COMPAT_ADD:
+ vmw_compat_shader_free(man, entry);
+ break;
+ case VMW_COMPAT_COMMITED:
+ (void) drm_ht_remove_item(&man->shaders, &entry->hash);
+ list_del(&entry->head);
+ entry->state = VMW_COMPAT_DEL;
+ list_add_tail(&entry->head, list);
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_compat_shader_add - Create a compat shader and add the
+ * key to the manager
+ *
+ * @man: Pointer to the compat shader manager
+ * @user_key: The key that is used to identify the shader. The key is
+ * unique to the shader type.
+ * @bytecode: Pointer to the bytecode of the shader.
+ * @shader_type: Shader type.
+ * @tfile: Pointer to a struct ttm_object_file that the guest-backed shader is
+ * to be created with.
+ * @list: Caller's list of staged shader actions.
+ *
+ * Note that only the key is added to the shader manager's hash table.
+ * The shader is not yet added to the shader manager's list of shaders.
+ */
+int vmw_compat_shader_add(struct vmw_compat_shader_manager *man,
+ u32 user_key, const void *bytecode,
+ SVGA3dShaderType shader_type,
+ size_t size,
+ struct ttm_object_file *tfile,
+ struct list_head *list)
+{
+ struct vmw_dma_buffer *buf;
+ struct ttm_bo_kmap_obj map;
+ bool is_iomem;
+ struct vmw_compat_shader *compat;
+ u32 handle;
+ int ret;
+
+ if (user_key > ((1 << 24) - 1) || (unsigned) shader_type > 16)
+ return -EINVAL;
+
+ /* Allocate and pin a DMA buffer */
+ buf = kzalloc(sizeof(*buf), GFP_KERNEL);
+ if (unlikely(buf == NULL))
+ return -ENOMEM;
+
+ ret = vmw_dmabuf_init(man->dev_priv, buf, size, &vmw_sys_ne_placement,
+ true, vmw_dmabuf_bo_free);
if (unlikely(ret != 0))
- goto out_unlock;
+ goto out;
- tmp = vmw_resource_reference(res);
- ret = ttm_base_object_init(tfile, &ushader->base, false,
- VMW_RES_SHADER,
- &vmw_user_shader_base_release, NULL);
+ ret = ttm_bo_reserve(&buf->base, false, true, false, NULL);
+ if (unlikely(ret != 0))
+ goto no_reserve;
+ /* Map and copy shader bytecode. */
+ ret = ttm_bo_kmap(&buf->base, 0, PAGE_ALIGN(size) >> PAGE_SHIFT,
+ &map);
if (unlikely(ret != 0)) {
- vmw_resource_unreference(&tmp);
- goto out_err;
+ ttm_bo_unreserve(&buf->base);
+ goto no_reserve;
}
- arg->shader_handle = ushader->base.hash.key;
-out_err:
- vmw_resource_unreference(&res);
-out_unlock:
- ttm_read_unlock(&vmaster->lock);
-out_bad_arg:
- vmw_dmabuf_unreference(&buffer);
+ memcpy(ttm_kmap_obj_virtual(&map, &is_iomem), bytecode, size);
+ WARN_ON(is_iomem);
+
+ ttm_bo_kunmap(&map);
+ ret = ttm_bo_validate(&buf->base, &vmw_sys_placement, false, true);
+ WARN_ON(ret != 0);
+ ttm_bo_unreserve(&buf->base);
+
+ /* Create a guest-backed shader container backed by the dma buffer */
+ ret = vmw_shader_alloc(man->dev_priv, buf, size, 0, shader_type,
+ tfile, &handle);
+ vmw_dmabuf_unreference(&buf);
+ if (unlikely(ret != 0))
+ goto no_reserve;
+ /*
+ * Create a compat shader structure and stage it for insertion
+ * in the manager
+ */
+ compat = kzalloc(sizeof(*compat), GFP_KERNEL);
+ if (compat == NULL)
+ goto no_compat;
+
+ compat->hash.key = user_key | (shader_type << 24);
+ ret = drm_ht_insert_item(&man->shaders, &compat->hash);
+ if (unlikely(ret != 0))
+ goto out_invalid_key;
+
+ compat->state = VMW_COMPAT_ADD;
+ compat->handle = handle;
+ compat->tfile = tfile;
+ list_add_tail(&compat->head, list);
+ return 0;
+
+out_invalid_key:
+ kfree(compat);
+no_compat:
+ ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
+no_reserve:
+out:
return ret;
+}
+
+/**
+ * vmw_compat_shader_man_create - Create a compat shader manager
+ *
+ * @dev_priv: Pointer to a device private structure.
+ *
+ * Typically done at file open time. If successful returns a pointer to a
+ * compat shader manager. Otherwise returns an error pointer.
+ */
+struct vmw_compat_shader_manager *
+vmw_compat_shader_man_create(struct vmw_private *dev_priv)
+{
+ struct vmw_compat_shader_manager *man;
+ int ret;
+
+ man = kzalloc(sizeof(*man), GFP_KERNEL);
+ if (man == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ man->dev_priv = dev_priv;
+ INIT_LIST_HEAD(&man->list);
+ ret = drm_ht_create(&man->shaders, VMW_COMPAT_SHADER_HT_ORDER);
+ if (ret == 0)
+ return man;
+
+ kfree(man);
+ return ERR_PTR(ret);
+}
+
+/**
+ * vmw_compat_shader_man_destroy - Destroy a compat shader manager
+ *
+ * @man: Pointer to the shader manager to destroy.
+ *
+ * Typically done at file close time.
+ */
+void vmw_compat_shader_man_destroy(struct vmw_compat_shader_manager *man)
+{
+ struct vmw_compat_shader *entry, *next;
+
+ mutex_lock(&man->dev_priv->cmdbuf_mutex);
+ list_for_each_entry_safe(entry, next, &man->list, head)
+ vmw_compat_shader_free(man, entry);
+ mutex_unlock(&man->dev_priv->cmdbuf_mutex);
+ kfree(man);
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 979da1c246a5..e7af580ab977 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -830,6 +830,24 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
if (unlikely(ret != 0))
goto out_unlock;
+ /*
+ * A gb-aware client referencing a shared surface will
+ * expect a backup buffer to be present.
+ */
+ if (dev_priv->has_mob && req->shareable) {
+ uint32_t backup_handle;
+
+ ret = vmw_user_dmabuf_alloc(dev_priv, tfile,
+ res->backup_size,
+ true,
+ &backup_handle,
+ &res->backup);
+ if (unlikely(ret != 0)) {
+ vmw_resource_unreference(&res);
+ goto out_unlock;
+ }
+ }
+
tmp = vmw_resource_reference(&srf->res);
ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
req->shareable, VMW_RES_SURFACE,
@@ -908,8 +926,8 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
rep->size_addr;
if (user_sizes)
- ret = copy_to_user(user_sizes, srf->sizes,
- srf->num_sizes * sizeof(*srf->sizes));
+ ret = copy_to_user(user_sizes, &srf->base_size,
+ sizeof(srf->base_size));
if (unlikely(ret != 0)) {
DRM_ERROR("copy_to_user failed %p %u\n",
user_sizes, srf->num_sizes);
@@ -1111,7 +1129,7 @@ static int vmw_gb_surface_destroy(struct vmw_resource *res)
return 0;
mutex_lock(&dev_priv->binding_mutex);
- vmw_context_binding_res_list_kill(&res->binding_head);
+ vmw_context_binding_res_list_scrub(&res->binding_head);
cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
if (unlikely(cmd == NULL)) {
diff --git a/drivers/gpu/host1x/job.c b/drivers/gpu/host1x/job.c
index 1146e3bba6e1..112f27e51bc7 100644
--- a/drivers/gpu/host1x/job.c
+++ b/drivers/gpu/host1x/job.c
@@ -538,7 +538,7 @@ int host1x_job_pin(struct host1x_job *job, struct device *dev)
g->base = job->gather_addr_phys[i];
- for (j = 0; j < job->num_gathers; j++)
+ for (j = i + 1; j < job->num_gathers; j++)
if (job->gathers[j].bo == g->bo)
job->gathers[j].handled = true;
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 497558127bb3..f822fd2a1ada 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -469,6 +469,9 @@ static const struct hid_device_id apple_devices[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS),
+ .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_JIS),
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3bfac3accd22..cc32a6f96c64 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1679,6 +1679,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_AUREAL, USB_DEVICE_ID_AUREAL_W01RN) },
@@ -1779,6 +1780,8 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_USB) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
{ HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN_1) },
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index 8fae6d1414cc..c24908f14934 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -157,6 +157,7 @@ struct mousevsc_dev {
u32 report_desc_size;
struct hv_input_dev_info hid_dev_info;
struct hid_device *hid_device;
+ u8 input_buf[HID_MAX_BUFFER_SIZE];
};
@@ -256,6 +257,7 @@ static void mousevsc_on_receive(struct hv_device *device,
struct synthhid_msg *hid_msg;
struct mousevsc_dev *input_dev = hv_get_drvdata(device);
struct synthhid_input_report *input_report;
+ size_t len;
pipe_msg = (struct pipe_prt_msg *)((unsigned long)packet +
(packet->offset8 << 3));
@@ -300,9 +302,12 @@ static void mousevsc_on_receive(struct hv_device *device,
(struct synthhid_input_report *)pipe_msg->data;
if (!input_dev->init_complete)
break;
- hid_input_report(input_dev->hid_device,
- HID_INPUT_REPORT, input_report->buffer,
- input_report->header.size, 1);
+
+ len = min(input_report->header.size,
+ (u32)sizeof(input_dev->input_buf));
+ memcpy(input_dev->input_buf, input_report->buffer, len);
+ hid_input_report(input_dev->hid_device, HID_INPUT_REPORT,
+ input_dev->input_buf, len, 1);
break;
default:
pr_err("unsupported hid msg type - type %d len %d",
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 5a5248f2cc07..22f28d6b33a8 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -135,6 +135,7 @@
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS 0x023b
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI 0x0255
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
+#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
@@ -240,6 +241,7 @@
#define USB_VENDOR_ID_CYGNAL 0x10c4
#define USB_DEVICE_ID_CYGNAL_RADIO_SI470X 0x818a
+#define USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH 0x81b9
#define USB_DEVICE_ID_CYGNAL_RADIO_SI4713 0x8244
@@ -451,6 +453,9 @@
#define USB_VENDOR_ID_INTEL_1 0x8087
#define USB_DEVICE_ID_INTEL_HID_SENSOR 0x09fa
+#define USB_VENDOR_ID_STM_0 0x0483
+#define USB_DEVICE_ID_STM_HID_SENSOR 0x91d1
+
#define USB_VENDOR_ID_ION 0x15e4
#define USB_DEVICE_ID_ICADE 0x0132
@@ -619,6 +624,8 @@
#define USB_DEVICE_ID_MS_PRESENTER_8K_USB 0x0713
#define USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K 0x0730
#define USB_DEVICE_ID_MS_COMFORT_MOUSE_4500 0x076c
+#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
+#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
#define USB_VENDOR_ID_MOJO 0x8282
#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@@ -644,6 +651,7 @@
#define USB_VENDOR_ID_NEXIO 0x1870
#define USB_DEVICE_ID_NEXIO_MULTITOUCH_420 0x010d
+#define USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750 0x0110
#define USB_VENDOR_ID_NEXTWINDOW 0x1926
#define USB_DEVICE_ID_NEXTWINDOW_TOUCHSCREEN 0x0003
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index d50e7313b171..a713e6211419 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -1178,7 +1178,7 @@ static void hidinput_led_worker(struct work_struct *work)
/* fall back to generic raw-output-report */
len = ((report->size - 1) >> 3) + 1 + (report->id > 0);
- buf = kmalloc(len, GFP_KERNEL);
+ buf = hid_alloc_report_buf(report, GFP_KERNEL);
if (!buf)
return;
diff --git a/drivers/hid/hid-lg4ff.c b/drivers/hid/hid-lg4ff.c
index befe0e336471..24883b4d1a49 100644
--- a/drivers/hid/hid-lg4ff.c
+++ b/drivers/hid/hid-lg4ff.c
@@ -43,6 +43,7 @@
#define G25_REV_MIN 0x22
#define G27_REV_MAJ 0x12
#define G27_REV_MIN 0x38
+#define G27_2_REV_MIN 0x39
#define to_hid_device(pdev) container_of(pdev, struct hid_device, dev)
@@ -130,6 +131,7 @@ static const struct lg4ff_usb_revision lg4ff_revs[] = {
{DFP_REV_MAJ, DFP_REV_MIN, &native_dfp}, /* Driving Force Pro */
{G25_REV_MAJ, G25_REV_MIN, &native_g25}, /* G25 */
{G27_REV_MAJ, G27_REV_MIN, &native_g27}, /* G27 */
+ {G27_REV_MAJ, G27_2_REV_MIN, &native_g27}, /* G27 v2 */
};
/* Recalculates X axis value accordingly to currently selected range */
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index c6ef6eed3091..404a3a8a82f1 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -208,6 +208,10 @@ static const struct hid_device_id ms_devices[] = {
.driver_data = MS_NOGET },
{ HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
.driver_data = MS_DUPLICATE_USAGES },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2),
+ .driver_data = 0 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2),
+ .driver_data = 0 },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
.driver_data = MS_PRESENTER },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index f134d73beca1..221d503f1c24 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -1166,6 +1166,11 @@ static const struct hid_device_id mt_devices[] = {
MT_USB_DEVICE(USB_VENDOR_ID_FLATFROG,
USB_DEVICE_ID_MULTITOUCH_3200) },
+ /* FocalTech Panels */
+ { .driver_data = MT_CLS_SERIAL,
+ MT_USB_DEVICE(USB_VENDOR_ID_CYGNAL,
+ USB_DEVICE_ID_FOCALTECH_FTXXXX_MULTITOUCH) },
+
/* GeneralTouch panel */
{ .driver_data = MT_CLS_GENERALTOUCH_TWOFINGERS,
MT_USB_DEVICE(USB_VENDOR_ID_GENERAL_TOUCH,
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index 46f4480035bc..9c22e14c57f0 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -665,6 +665,9 @@ static const struct hid_device_id sensor_hub_devices[] = {
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_INTEL_1,
USB_DEVICE_ID_INTEL_HID_SENSOR),
.driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
+ { HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, USB_VENDOR_ID_STM_0,
+ USB_DEVICE_ID_STM_HID_SENSOR),
+ .driver_data = HID_SENSOR_HUB_ENUM_QUIRK},
{ HID_DEVICE(HID_BUS_ANY, HID_GROUP_SENSOR_HUB, HID_ANY_ID,
HID_ANY_ID) },
{ }
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 12354055d474..2f19b15f47f2 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -42,6 +42,7 @@
#define DUALSHOCK4_CONTROLLER_BT BIT(6)
#define SONY_LED_SUPPORT (SIXAXIS_CONTROLLER_USB | BUZZ_CONTROLLER | DUALSHOCK4_CONTROLLER_USB)
+#define SONY_FF_SUPPORT (SIXAXIS_CONTROLLER_USB | DUALSHOCK4_CONTROLLER_USB)
#define MAX_LEDS 4
@@ -499,6 +500,7 @@ struct sony_sc {
__u8 right;
#endif
+ __u8 worker_initialized;
__u8 led_state[MAX_LEDS];
__u8 led_count;
};
@@ -993,22 +995,11 @@ static int sony_init_ff(struct hid_device *hdev)
return input_ff_create_memless(input_dev, NULL, sony_play_effect);
}
-static void sony_destroy_ff(struct hid_device *hdev)
-{
- struct sony_sc *sc = hid_get_drvdata(hdev);
-
- cancel_work_sync(&sc->state_worker);
-}
-
#else
static int sony_init_ff(struct hid_device *hdev)
{
return 0;
}
-
-static void sony_destroy_ff(struct hid_device *hdev)
-{
-}
#endif
static int sony_set_output_report(struct sony_sc *sc, int req_id, int req_size)
@@ -1077,6 +1068,8 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
hdev->hid_output_raw_report = sixaxis_usb_output_raw_report;
ret = sixaxis_set_operational_usb(hdev);
+
+ sc->worker_initialized = 1;
INIT_WORK(&sc->state_worker, sixaxis_state_worker);
}
else if (sc->quirks & SIXAXIS_CONTROLLER_BT)
@@ -1087,6 +1080,7 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
if (ret < 0)
goto err_stop;
+ sc->worker_initialized = 1;
INIT_WORK(&sc->state_worker, dualshock4_state_worker);
} else {
ret = 0;
@@ -1101,9 +1095,11 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
goto err_stop;
}
- ret = sony_init_ff(hdev);
- if (ret < 0)
- goto err_stop;
+ if (sc->quirks & SONY_FF_SUPPORT) {
+ ret = sony_init_ff(hdev);
+ if (ret < 0)
+ goto err_stop;
+ }
return 0;
err_stop:
@@ -1120,7 +1116,8 @@ static void sony_remove(struct hid_device *hdev)
if (sc->quirks & SONY_LED_SUPPORT)
sony_leds_remove(hdev);
- sony_destroy_ff(hdev);
+ if (sc->worker_initialized)
+ cancel_work_sync(&sc->state_worker);
hid_hw_stop(hdev);
}
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index cb0137b3718d..ab24ce2eb28f 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -320,13 +320,13 @@ static void drop_ref(struct hidraw *hidraw, int exists_bit)
hid_hw_close(hidraw->hid);
wake_up_interruptible(&hidraw->wait);
}
+ device_destroy(hidraw_class,
+ MKDEV(hidraw_major, hidraw->minor));
} else {
--hidraw->open;
}
if (!hidraw->open) {
if (!hidraw->exist) {
- device_destroy(hidraw_class,
- MKDEV(hidraw_major, hidraw->minor));
hidraw_table[hidraw->minor] = NULL;
kfree(hidraw);
} else {
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index d1f81f52481a..42eebd14de1f 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -582,7 +582,7 @@ static void i2c_hid_request(struct hid_device *hid, struct hid_report *rep,
int ret;
int len = i2c_hid_get_report_length(rep) - 2;
- buf = kzalloc(len, GFP_KERNEL);
+ buf = hid_alloc_report_buf(rep, GFP_KERNEL);
if (!buf)
return;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 175ec0afb70c..dbd83878ff99 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -74,6 +74,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
+ { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index af6edf9b1936..f2d7bf90c9fe 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -67,7 +67,6 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
int ret = 0;
struct vmbus_channel_initiate_contact *msg;
unsigned long flags;
- int t;
init_completion(&msginfo->waitevent);
@@ -78,6 +77,8 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
msg->interrupt_page = virt_to_phys(vmbus_connection.int_page);
msg->monitor_page1 = virt_to_phys(vmbus_connection.monitor_pages[0]);
msg->monitor_page2 = virt_to_phys(vmbus_connection.monitor_pages[1]);
+ if (version == VERSION_WIN8)
+ msg->target_vcpu = hv_context.vp_index[smp_processor_id()];
/*
* Add to list before we send the request since we may
@@ -100,15 +101,7 @@ static int vmbus_negotiate_version(struct vmbus_channel_msginfo *msginfo,
}
/* Wait for the connection response */
- t = wait_for_completion_timeout(&msginfo->waitevent, 5*HZ);
- if (t == 0) {
- spin_lock_irqsave(&vmbus_connection.channelmsg_lock,
- flags);
- list_del(&msginfo->msglistentry);
- spin_unlock_irqrestore(&vmbus_connection.channelmsg_lock,
- flags);
- return -ETIMEDOUT;
- }
+ wait_for_completion(&msginfo->waitevent);
spin_lock_irqsave(&vmbus_connection.channelmsg_lock, flags);
list_del(&msginfo->msglistentry);
diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
index 029ecabc4380..73b3865f1207 100644
--- a/drivers/hwmon/da9055-hwmon.c
+++ b/drivers/hwmon/da9055-hwmon.c
@@ -278,10 +278,6 @@ static int da9055_hwmon_probe(struct platform_device *pdev)
if (hwmon_irq < 0)
return hwmon_irq;
- hwmon_irq = regmap_irq_get_virq(hwmon->da9055->irq_data, hwmon_irq);
- if (hwmon_irq < 0)
- return hwmon_irq;
-
ret = devm_request_threaded_irq(&pdev->dev, hwmon_irq,
NULL, da9055_auxadc_irq,
IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
diff --git a/drivers/hwmon/max1668.c b/drivers/hwmon/max1668.c
index a7626358c95d..029b65e6c589 100644
--- a/drivers/hwmon/max1668.c
+++ b/drivers/hwmon/max1668.c
@@ -243,7 +243,7 @@ static ssize_t set_temp_min(struct device *dev,
data->temp_min[index] = clamp_val(temp/1000, -128, 127);
if (i2c_smbus_write_byte_data(client,
MAX1668_REG_LIML_WR(index),
- data->temp_max[index]))
+ data->temp_min[index]))
count = -EIO;
mutex_unlock(&data->update_lock);
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index 8c23203915af..8a17f01e8672 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -145,7 +145,7 @@ struct ntc_data {
static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
{
struct iio_channel *channel = pdata->chan;
- unsigned int result;
+ s64 result;
int val, ret;
ret = iio_read_channel_raw(channel, &val);
@@ -155,10 +155,10 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
}
/* unit: mV */
- result = pdata->pullup_uv * val;
+ result = pdata->pullup_uv * (s64) val;
result >>= 12;
- return result;
+ return (int)result;
}
static const struct of_device_id ntc_match[] = {
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index 3cbf66e9d861..291d11fe93e7 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -90,7 +90,8 @@ struct pmbus_data {
u32 flags; /* from platform data */
- int exponent; /* linear mode: exponent for output voltages */
+ int exponent[PMBUS_PAGES];
+ /* linear mode: exponent for output voltages */
const struct pmbus_driver_info *info;
@@ -410,7 +411,7 @@ static long pmbus_reg2data_linear(struct pmbus_data *data,
long val;
if (sensor->class == PSC_VOLTAGE_OUT) { /* LINEAR16 */
- exponent = data->exponent;
+ exponent = data->exponent[sensor->page];
mantissa = (u16) sensor->data;
} else { /* LINEAR11 */
exponent = ((s16)sensor->data) >> 11;
@@ -516,7 +517,7 @@ static long pmbus_reg2data(struct pmbus_data *data, struct pmbus_sensor *sensor)
#define MIN_MANTISSA (511 * 1000)
static u16 pmbus_data2reg_linear(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
s16 exponent = 0, mantissa;
bool negative = false;
@@ -525,7 +526,7 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
if (val == 0)
return 0;
- if (class == PSC_VOLTAGE_OUT) {
+ if (sensor->class == PSC_VOLTAGE_OUT) {
/* LINEAR16 does not support negative voltages */
if (val < 0)
return 0;
@@ -534,10 +535,10 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
* For a static exponents, we don't have a choice
* but to adjust the value to it.
*/
- if (data->exponent < 0)
- val <<= -data->exponent;
+ if (data->exponent[sensor->page] < 0)
+ val <<= -data->exponent[sensor->page];
else
- val >>= data->exponent;
+ val >>= data->exponent[sensor->page];
val = DIV_ROUND_CLOSEST(val, 1000);
return val & 0xffff;
}
@@ -548,14 +549,14 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
}
/* Power is in uW. Convert to mW before converting. */
- if (class == PSC_POWER)
+ if (sensor->class == PSC_POWER)
val = DIV_ROUND_CLOSEST(val, 1000L);
/*
* For simplicity, convert fan data to milli-units
* before calculating the exponent.
*/
- if (class == PSC_FAN)
+ if (sensor->class == PSC_FAN)
val = val * 1000;
/* Reduce large mantissa until it fits into 10 bit */
@@ -585,22 +586,22 @@ static u16 pmbus_data2reg_linear(struct pmbus_data *data,
}
static u16 pmbus_data2reg_direct(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
long m, b, R;
- m = data->info->m[class];
- b = data->info->b[class];
- R = data->info->R[class];
+ m = data->info->m[sensor->class];
+ b = data->info->b[sensor->class];
+ R = data->info->R[sensor->class];
/* Power is in uW. Adjust R and b. */
- if (class == PSC_POWER) {
+ if (sensor->class == PSC_POWER) {
R -= 3;
b *= 1000;
}
/* Calculate Y = (m * X + b) * 10^R */
- if (class != PSC_FAN) {
+ if (sensor->class != PSC_FAN) {
R -= 3; /* Adjust R and b for data in milli-units */
b *= 1000;
}
@@ -619,7 +620,7 @@ static u16 pmbus_data2reg_direct(struct pmbus_data *data,
}
static u16 pmbus_data2reg_vid(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
val = clamp_val(val, 500, 1600);
@@ -627,20 +628,20 @@ static u16 pmbus_data2reg_vid(struct pmbus_data *data,
}
static u16 pmbus_data2reg(struct pmbus_data *data,
- enum pmbus_sensor_classes class, long val)
+ struct pmbus_sensor *sensor, long val)
{
u16 regval;
- switch (data->info->format[class]) {
+ switch (data->info->format[sensor->class]) {
case direct:
- regval = pmbus_data2reg_direct(data, class, val);
+ regval = pmbus_data2reg_direct(data, sensor, val);
break;
case vid:
- regval = pmbus_data2reg_vid(data, class, val);
+ regval = pmbus_data2reg_vid(data, sensor, val);
break;
case linear:
default:
- regval = pmbus_data2reg_linear(data, class, val);
+ regval = pmbus_data2reg_linear(data, sensor, val);
break;
}
return regval;
@@ -746,7 +747,7 @@ static ssize_t pmbus_set_sensor(struct device *dev,
return -EINVAL;
mutex_lock(&data->update_lock);
- regval = pmbus_data2reg(data, sensor->class, val);
+ regval = pmbus_data2reg(data, sensor, val);
ret = _pmbus_write_word_data(client, sensor->page, sensor->reg, regval);
if (ret < 0)
rv = ret;
@@ -1643,12 +1644,13 @@ static int pmbus_find_attributes(struct i2c_client *client,
* This function is called for all chips.
*/
static int pmbus_identify_common(struct i2c_client *client,
- struct pmbus_data *data)
+ struct pmbus_data *data, int page)
{
int vout_mode = -1;
- if (pmbus_check_byte_register(client, 0, PMBUS_VOUT_MODE))
- vout_mode = _pmbus_read_byte_data(client, 0, PMBUS_VOUT_MODE);
+ if (pmbus_check_byte_register(client, page, PMBUS_VOUT_MODE))
+ vout_mode = _pmbus_read_byte_data(client, page,
+ PMBUS_VOUT_MODE);
if (vout_mode >= 0 && vout_mode != 0xff) {
/*
* Not all chips support the VOUT_MODE command,
@@ -1659,7 +1661,7 @@ static int pmbus_identify_common(struct i2c_client *client,
if (data->info->format[PSC_VOLTAGE_OUT] != linear)
return -ENODEV;
- data->exponent = ((s8)(vout_mode << 3)) >> 3;
+ data->exponent[page] = ((s8)(vout_mode << 3)) >> 3;
break;
case 1: /* VID mode */
if (data->info->format[PSC_VOLTAGE_OUT] != vid)
@@ -1674,7 +1676,7 @@ static int pmbus_identify_common(struct i2c_client *client,
}
}
- pmbus_clear_fault_page(client, 0);
+ pmbus_clear_fault_page(client, page);
return 0;
}
@@ -1682,7 +1684,7 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
struct pmbus_driver_info *info)
{
struct device *dev = &client->dev;
- int ret;
+ int page, ret;
/*
* Some PMBus chips don't support PMBUS_STATUS_BYTE, so try
@@ -1715,10 +1717,12 @@ static int pmbus_init_common(struct i2c_client *client, struct pmbus_data *data,
return -ENODEV;
}
- ret = pmbus_identify_common(client, data);
- if (ret < 0) {
- dev_err(dev, "Failed to identify chip capabilities\n");
- return ret;
+ for (page = 0; page < info->pages; page++) {
+ ret = pmbus_identify_common(client, data, page);
+ if (ret < 0) {
+ dev_err(dev, "Failed to identify chip capabilities\n");
+ return ret;
+ }
}
return 0;
}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index f5ed03164d86..de17c5593d97 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -387,7 +387,7 @@ config I2C_CBUS_GPIO
config I2C_CPM
tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)"
- depends on (CPM1 || CPM2) && OF_I2C
+ depends on CPM1 || CPM2
help
This supports the use of the I2C interface on Freescale
processors with CPM1 or CPM2.
diff --git a/drivers/i2c/busses/i2c-mv64xxx.c b/drivers/i2c/busses/i2c-mv64xxx.c
index b8c5187b9ee0..d52d84937ad3 100644
--- a/drivers/i2c/busses/i2c-mv64xxx.c
+++ b/drivers/i2c/busses/i2c-mv64xxx.c
@@ -97,7 +97,6 @@ enum {
enum {
MV64XXX_I2C_ACTION_INVALID,
MV64XXX_I2C_ACTION_CONTINUE,
- MV64XXX_I2C_ACTION_OFFLOAD_SEND_START,
MV64XXX_I2C_ACTION_SEND_START,
MV64XXX_I2C_ACTION_SEND_RESTART,
MV64XXX_I2C_ACTION_OFFLOAD_RESTART,
@@ -204,6 +203,9 @@ static int mv64xxx_i2c_offload_msg(struct mv64xxx_i2c_data *drv_data)
unsigned long ctrl_reg;
struct i2c_msg *msg = drv_data->msgs;
+ if (!drv_data->offload_enabled)
+ return -EOPNOTSUPP;
+
drv_data->msg = msg;
drv_data->byte_posn = 0;
drv_data->bytes_left = msg->len;
@@ -433,8 +435,7 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
drv_data->msgs++;
drv_data->num_msgs--;
- if (!(drv_data->offload_enabled &&
- mv64xxx_i2c_offload_msg(drv_data))) {
+ if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
drv_data->cntl_bits |= MV64XXX_I2C_REG_CONTROL_START;
writel(drv_data->cntl_bits,
drv_data->reg_base + drv_data->reg_offsets.control);
@@ -458,15 +459,14 @@ mv64xxx_i2c_do_action(struct mv64xxx_i2c_data *drv_data)
drv_data->reg_base + drv_data->reg_offsets.control);
break;
- case MV64XXX_I2C_ACTION_OFFLOAD_SEND_START:
- if (!mv64xxx_i2c_offload_msg(drv_data))
- break;
- else
- drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
- /* FALLTHRU */
case MV64XXX_I2C_ACTION_SEND_START:
- writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
- drv_data->reg_base + drv_data->reg_offsets.control);
+ /* Can we offload this msg ? */
+ if (mv64xxx_i2c_offload_msg(drv_data) < 0) {
+ /* No, switch to standard path */
+ mv64xxx_i2c_prepare_for_io(drv_data, drv_data->msgs);
+ writel(drv_data->cntl_bits | MV64XXX_I2C_REG_CONTROL_START,
+ drv_data->reg_base + drv_data->reg_offsets.control);
+ }
break;
case MV64XXX_I2C_ACTION_SEND_ADDR_1:
@@ -625,15 +625,10 @@ mv64xxx_i2c_execute_msg(struct mv64xxx_i2c_data *drv_data, struct i2c_msg *msg,
unsigned long flags;
spin_lock_irqsave(&drv_data->lock, flags);
- if (drv_data->offload_enabled) {
- drv_data->action = MV64XXX_I2C_ACTION_OFFLOAD_SEND_START;
- drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
- } else {
- mv64xxx_i2c_prepare_for_io(drv_data, msg);
- drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
- drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
- }
+ drv_data->action = MV64XXX_I2C_ACTION_SEND_START;
+ drv_data->state = MV64XXX_I2C_STATE_WAITING_FOR_START_COND;
+
drv_data->send_stop = is_last;
drv_data->block = 1;
mv64xxx_i2c_do_action(drv_data);
diff --git a/drivers/iio/accel/bma180.c b/drivers/iio/accel/bma180.c
index 3bec9220df04..bfec313492b3 100644
--- a/drivers/iio/accel/bma180.c
+++ b/drivers/iio/accel/bma180.c
@@ -447,14 +447,14 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
{ },
};
-#define BMA180_CHANNEL(_index) { \
+#define BMA180_CHANNEL(_axis) { \
.type = IIO_ACCEL, \
- .indexed = 1, \
- .channel = (_index), \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
- .scan_index = (_index), \
+ .scan_index = AXIS_##_axis, \
.scan_type = { \
.sign = 's', \
.realbits = 14, \
@@ -465,10 +465,10 @@ static const struct iio_chan_spec_ext_info bma180_ext_info[] = {
}
static const struct iio_chan_spec bma180_channels[] = {
- BMA180_CHANNEL(AXIS_X),
- BMA180_CHANNEL(AXIS_Y),
- BMA180_CHANNEL(AXIS_Z),
- IIO_CHAN_SOFT_TIMESTAMP(4),
+ BMA180_CHANNEL(X),
+ BMA180_CHANNEL(Y),
+ BMA180_CHANNEL(Z),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
};
static irqreturn_t bma180_trigger_handler(int irq, void *p)
diff --git a/drivers/iio/adc/max1363.c b/drivers/iio/adc/max1363.c
index e283f2f2ee2f..360259266d4f 100644
--- a/drivers/iio/adc/max1363.c
+++ b/drivers/iio/adc/max1363.c
@@ -1560,7 +1560,7 @@ static int max1363_probe(struct i2c_client *client,
st->client = client;
st->vref_uv = st->chip_info->int_vref_mv * 1000;
- vref = devm_regulator_get(&client->dev, "vref");
+ vref = devm_regulator_get_optional(&client->dev, "vref");
if (!IS_ERR(vref)) {
int vref_uv;
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index 41c64a43bcab..ac2d69e34c8c 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -70,7 +70,7 @@ config IIO_ST_GYRO_3AXIS
select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
help
Say yes here to build support for STMicroelectronics gyroscopes:
- L3G4200D, LSM330DL, L3GD20, L3GD20H, LSM330DLC, L3G4IS, LSM330.
+ L3G4200D, LSM330DL, L3GD20, LSM330DLC, L3G4IS, LSM330.
This driver can also be built as a module. If so, these modules
will be created:
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
index f8f2bf84a5a2..c197360c450b 100644
--- a/drivers/iio/gyro/st_gyro.h
+++ b/drivers/iio/gyro/st_gyro.h
@@ -19,7 +19,6 @@
#define LSM330DL_GYRO_DEV_NAME "lsm330dl_gyro"
#define LSM330DLC_GYRO_DEV_NAME "lsm330dlc_gyro"
#define L3GD20_GYRO_DEV_NAME "l3gd20"
-#define L3GD20H_GYRO_DEV_NAME "l3gd20h"
#define L3G4IS_GYRO_DEV_NAME "l3g4is_ui"
#define LSM330_GYRO_DEV_NAME "lsm330_gyro"
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index d53d91adfb55..a8e174a47bc4 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -167,11 +167,10 @@ static const struct st_sensors st_gyro_sensors[] = {
.wai = ST_GYRO_2_WAI_EXP,
.sensors_supported = {
[0] = L3GD20_GYRO_DEV_NAME,
- [1] = L3GD20H_GYRO_DEV_NAME,
- [2] = LSM330D_GYRO_DEV_NAME,
- [3] = LSM330DLC_GYRO_DEV_NAME,
- [4] = L3G4IS_GYRO_DEV_NAME,
- [5] = LSM330_GYRO_DEV_NAME,
+ [1] = LSM330D_GYRO_DEV_NAME,
+ [2] = LSM330DLC_GYRO_DEV_NAME,
+ [3] = L3G4IS_GYRO_DEV_NAME,
+ [4] = LSM330_GYRO_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 16b8b8d70bf1..23c12f361b05 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -55,7 +55,6 @@ static const struct i2c_device_id st_gyro_id_table[] = {
{ LSM330DL_GYRO_DEV_NAME },
{ LSM330DLC_GYRO_DEV_NAME },
{ L3GD20_GYRO_DEV_NAME },
- { L3GD20H_GYRO_DEV_NAME },
{ L3G4IS_GYRO_DEV_NAME },
{ LSM330_GYRO_DEV_NAME },
{},
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index 94763e25caf9..b4ad3be26687 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -54,7 +54,6 @@ static const struct spi_device_id st_gyro_id_table[] = {
{ LSM330DL_GYRO_DEV_NAME },
{ LSM330DLC_GYRO_DEV_NAME },
{ L3GD20_GYRO_DEV_NAME },
- { L3GD20H_GYRO_DEV_NAME },
{ L3G4IS_GYRO_DEV_NAME },
{ LSM330_GYRO_DEV_NAME },
{},
diff --git a/drivers/iio/imu/adis16400.h b/drivers/iio/imu/adis16400.h
index 2f8f9d632386..0916bf6b6c31 100644
--- a/drivers/iio/imu/adis16400.h
+++ b/drivers/iio/imu/adis16400.h
@@ -189,6 +189,7 @@ enum {
ADIS16300_SCAN_INCLI_X,
ADIS16300_SCAN_INCLI_Y,
ADIS16400_SCAN_ADC,
+ ADIS16400_SCAN_TIMESTAMP,
};
#ifdef CONFIG_IIO_BUFFER
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 368660dfe135..7c582f7ae34e 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -632,7 +632,7 @@ static const struct iio_chan_spec adis16400_channels[] = {
ADIS16400_MAGN_CHAN(Z, ADIS16400_ZMAGN_OUT, 14),
ADIS16400_TEMP_CHAN(ADIS16400_TEMP_OUT, 12),
ADIS16400_AUX_ADC_CHAN(ADIS16400_AUX_ADC, 12),
- IIO_CHAN_SOFT_TIMESTAMP(12)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16448_channels[] = {
@@ -659,7 +659,7 @@ static const struct iio_chan_spec adis16448_channels[] = {
},
},
ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
- IIO_CHAN_SOFT_TIMESTAMP(11)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16350_channels[] = {
@@ -677,7 +677,7 @@ static const struct iio_chan_spec adis16350_channels[] = {
ADIS16400_MOD_TEMP_CHAN(X, ADIS16350_XTEMP_OUT, 12),
ADIS16400_MOD_TEMP_CHAN(Y, ADIS16350_YTEMP_OUT, 12),
ADIS16400_MOD_TEMP_CHAN(Z, ADIS16350_ZTEMP_OUT, 12),
- IIO_CHAN_SOFT_TIMESTAMP(11)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16300_channels[] = {
@@ -690,7 +690,7 @@ static const struct iio_chan_spec adis16300_channels[] = {
ADIS16400_AUX_ADC_CHAN(ADIS16300_AUX_ADC, 12),
ADIS16400_INCLI_CHAN(X, ADIS16300_PITCH_OUT, 13),
ADIS16400_INCLI_CHAN(Y, ADIS16300_ROLL_OUT, 13),
- IIO_CHAN_SOFT_TIMESTAMP(14)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static const struct iio_chan_spec adis16334_channels[] = {
@@ -701,7 +701,7 @@ static const struct iio_chan_spec adis16334_channels[] = {
ADIS16400_ACCEL_CHAN(Y, ADIS16400_YACCL_OUT, 14),
ADIS16400_ACCEL_CHAN(Z, ADIS16400_ZACCL_OUT, 14),
ADIS16400_TEMP_CHAN(ADIS16350_XTEMP_OUT, 12),
- IIO_CHAN_SOFT_TIMESTAMP(8)
+ IIO_CHAN_SOFT_TIMESTAMP(ADIS16400_SCAN_TIMESTAMP),
};
static struct attribute *adis16400_attributes[] = {
diff --git a/drivers/iio/light/cm32181.c b/drivers/iio/light/cm32181.c
index f17b4e6183c6..47a6dbac2d0c 100644
--- a/drivers/iio/light/cm32181.c
+++ b/drivers/iio/light/cm32181.c
@@ -103,13 +103,13 @@ static int cm32181_reg_init(struct cm32181_chip *cm32181)
/**
* cm32181_read_als_it() - Get sensor integration time (ms)
* @cm32181: pointer of struct cm32181
- * @val: pointer of int to load the als_it value.
+ * @val2: pointer of int to load the als_it value.
*
* Report the current integartion time by millisecond.
*
- * Return: IIO_VAL_INT for success, otherwise -EINVAL.
+ * Return: IIO_VAL_INT_PLUS_MICRO for success, otherwise -EINVAL.
*/
-static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val)
+static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val2)
{
u16 als_it;
int i;
@@ -119,8 +119,8 @@ static int cm32181_read_als_it(struct cm32181_chip *cm32181, int *val)
als_it >>= CM32181_CMD_ALS_IT_SHIFT;
for (i = 0; i < ARRAY_SIZE(als_it_bits); i++) {
if (als_it == als_it_bits[i]) {
- *val = als_it_value[i];
- return IIO_VAL_INT;
+ *val2 = als_it_value[i];
+ return IIO_VAL_INT_PLUS_MICRO;
}
}
@@ -221,7 +221,7 @@ static int cm32181_read_raw(struct iio_dev *indio_dev,
*val = cm32181->calibscale;
return IIO_VAL_INT;
case IIO_CHAN_INFO_INT_TIME:
- ret = cm32181_read_als_it(cm32181, val);
+ ret = cm32181_read_als_it(cm32181, val2);
return ret;
}
@@ -240,7 +240,7 @@ static int cm32181_write_raw(struct iio_dev *indio_dev,
cm32181->calibscale = val;
return val;
case IIO_CHAN_INFO_INT_TIME:
- ret = cm32181_write_als_it(cm32181, val);
+ ret = cm32181_write_als_it(cm32181, val2);
return ret;
}
@@ -264,7 +264,7 @@ static ssize_t cm32181_get_it_available(struct device *dev,
n = ARRAY_SIZE(als_it_value);
for (i = 0, len = 0; i < n; i++)
- len += sprintf(buf + len, "%d ", als_it_value[i]);
+ len += sprintf(buf + len, "0.%06u ", als_it_value[i]);
return len + sprintf(buf + len, "\n");
}
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 0a142af83e25..a45e07492db3 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -50,10 +50,10 @@
#define CM36651_CS_CONF2_DEFAULT_BIT 0x08
/* CS_CONF3 channel integration time */
-#define CM36651_CS_IT1 0x00 /* Integration time 80000 usec */
-#define CM36651_CS_IT2 0x40 /* Integration time 160000 usec */
-#define CM36651_CS_IT3 0x80 /* Integration time 320000 usec */
-#define CM36651_CS_IT4 0xC0 /* Integration time 640000 usec */
+#define CM36651_CS_IT1 0x00 /* Integration time 80 msec */
+#define CM36651_CS_IT2 0x40 /* Integration time 160 msec */
+#define CM36651_CS_IT3 0x80 /* Integration time 320 msec */
+#define CM36651_CS_IT4 0xC0 /* Integration time 640 msec */
/* PS_CONF1 command code */
#define CM36651_PS_ENABLE 0x00
@@ -64,10 +64,10 @@
#define CM36651_PS_PERS4 0x0C
/* PS_CONF1 command code: integration time */
-#define CM36651_PS_IT1 0x00 /* Integration time 320 usec */
-#define CM36651_PS_IT2 0x10 /* Integration time 420 usec */
-#define CM36651_PS_IT3 0x20 /* Integration time 520 usec */
-#define CM36651_PS_IT4 0x30 /* Integration time 640 usec */
+#define CM36651_PS_IT1 0x00 /* Integration time 0.32 msec */
+#define CM36651_PS_IT2 0x10 /* Integration time 0.42 msec */
+#define CM36651_PS_IT3 0x20 /* Integration time 0.52 msec */
+#define CM36651_PS_IT4 0x30 /* Integration time 0.64 msec */
/* PS_CONF1 command code: duty ratio */
#define CM36651_PS_DR1 0x00 /* Duty ratio 1/80 */
@@ -93,8 +93,8 @@
#define CM36651_CLOSE_PROXIMITY 0x32
#define CM36651_FAR_PROXIMITY 0x33
-#define CM36651_CS_INT_TIME_AVAIL "80000 160000 320000 640000"
-#define CM36651_PS_INT_TIME_AVAIL "320 420 520 640"
+#define CM36651_CS_INT_TIME_AVAIL "0.08 0.16 0.32 0.64"
+#define CM36651_PS_INT_TIME_AVAIL "0.000320 0.000420 0.000520 0.000640"
enum cm36651_operation_mode {
CM36651_LIGHT_EN,
@@ -356,30 +356,30 @@ static int cm36651_read_channel(struct cm36651_data *cm36651,
}
static int cm36651_read_int_time(struct cm36651_data *cm36651,
- struct iio_chan_spec const *chan, int *val)
+ struct iio_chan_spec const *chan, int *val2)
{
switch (chan->type) {
case IIO_LIGHT:
if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT1)
- *val = 80000;
+ *val2 = 80000;
else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT2)
- *val = 160000;
+ *val2 = 160000;
else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT3)
- *val = 320000;
+ *val2 = 320000;
else if (cm36651->cs_int_time[chan->address] == CM36651_CS_IT4)
- *val = 640000;
+ *val2 = 640000;
else
return -EINVAL;
break;
case IIO_PROXIMITY:
if (cm36651->ps_int_time == CM36651_PS_IT1)
- *val = 320;
+ *val2 = 320;
else if (cm36651->ps_int_time == CM36651_PS_IT2)
- *val = 420;
+ *val2 = 420;
else if (cm36651->ps_int_time == CM36651_PS_IT3)
- *val = 520;
+ *val2 = 520;
else if (cm36651->ps_int_time == CM36651_PS_IT4)
- *val = 640;
+ *val2 = 640;
else
return -EINVAL;
break;
@@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651,
return -EINVAL;
}
- return IIO_VAL_INT;
+ return IIO_VAL_INT_PLUS_MICRO;
}
static int cm36651_write_int_time(struct cm36651_data *cm36651,
@@ -459,7 +459,8 @@ static int cm36651_read_raw(struct iio_dev *indio_dev,
ret = cm36651_read_channel(cm36651, chan, val);
break;
case IIO_CHAN_INFO_INT_TIME:
- ret = cm36651_read_int_time(cm36651, chan, val);
+ *val = 0;
+ ret = cm36651_read_int_time(cm36651, chan, val2);
break;
default:
ret = -EINVAL;
@@ -479,7 +480,7 @@ static int cm36651_write_raw(struct iio_dev *indio_dev,
int ret = -EINVAL;
if (mask == IIO_CHAN_INFO_INT_TIME) {
- ret = cm36651_write_int_time(cm36651, chan, val);
+ ret = cm36651_write_int_time(cm36651, chan, val2);
if (ret < 0)
dev_err(&client->dev, "Integration time write failed\n");
}
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 3d8110157f2d..94daa9fc1247 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -460,10 +460,14 @@ static int tsl2563_write_raw(struct iio_dev *indio_dev,
{
struct tsl2563_chip *chip = iio_priv(indio_dev);
- if (chan->channel == IIO_MOD_LIGHT_BOTH)
+ if (mask != IIO_CHAN_INFO_CALIBSCALE)
+ return -EINVAL;
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
chip->calib0 = calib_from_sysfs(val);
- else
+ else if (chan->channel2 == IIO_MOD_LIGHT_IR)
chip->calib1 = calib_from_sysfs(val);
+ else
+ return -EINVAL;
return 0;
}
@@ -472,14 +476,14 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
- long m)
+ long mask)
{
int ret = -EINVAL;
u32 calib0, calib1;
struct tsl2563_chip *chip = iio_priv(indio_dev);
mutex_lock(&chip->lock);
- switch (m) {
+ switch (mask) {
case IIO_CHAN_INFO_RAW:
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
@@ -498,7 +502,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
ret = tsl2563_get_adc(chip);
if (ret)
goto error_ret;
- if (chan->channel == 0)
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
*val = chip->data0;
else
*val = chip->data1;
@@ -510,7 +514,7 @@ static int tsl2563_read_raw(struct iio_dev *indio_dev,
break;
case IIO_CHAN_INFO_CALIBSCALE:
- if (chan->channel == 0)
+ if (chan->channel2 == IIO_MOD_LIGHT_BOTH)
*val = calib_to_sysfs(chip->calib0);
else
*val = calib_to_sysfs(chip->calib1);
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index ff284e5afd95..05423543f89d 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -85,6 +85,7 @@
#define AK8975_MAX_CONVERSION_TIMEOUT 500
#define AK8975_CONVERSION_DONE_POLL_TIME 10
#define AK8975_DATA_READY_TIMEOUT ((100*HZ)/1000)
+#define RAW_TO_GAUSS(asa) ((((asa) + 128) * 3000) / 256)
/*
* Per-instance context data for the device.
@@ -265,15 +266,15 @@ static int ak8975_setup(struct i2c_client *client)
*
* Since 1uT = 0.01 gauss, our final scale factor becomes:
*
- * Hadj = H * ((ASA + 128) / 256) * 3/10 * 100
- * Hadj = H * ((ASA + 128) * 30 / 256
+ * Hadj = H * ((ASA + 128) / 256) * 3/10 * 1/100
+ * Hadj = H * ((ASA + 128) * 0.003) / 256
*
* Since ASA doesn't change, we cache the resultant scale factor into the
* device context in ak8975_setup().
*/
- data->raw_to_gauss[0] = ((data->asa[0] + 128) * 30) >> 8;
- data->raw_to_gauss[1] = ((data->asa[1] + 128) * 30) >> 8;
- data->raw_to_gauss[2] = ((data->asa[2] + 128) * 30) >> 8;
+ data->raw_to_gauss[0] = RAW_TO_GAUSS(data->asa[0]);
+ data->raw_to_gauss[1] = RAW_TO_GAUSS(data->asa[1]);
+ data->raw_to_gauss[2] = RAW_TO_GAUSS(data->asa[2]);
return 0;
}
@@ -428,8 +429,9 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_RAW:
return ak8975_read_axis(indio_dev, chan->address, val);
case IIO_CHAN_INFO_SCALE:
- *val = data->raw_to_gauss[chan->address];
- return IIO_VAL_INT;
+ *val = 0;
+ *val2 = data->raw_to_gauss[chan->address];
+ return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
}
diff --git a/drivers/iio/magnetometer/mag3110.c b/drivers/iio/magnetometer/mag3110.c
index 4b65b6d3bdb1..f66955fb3509 100644
--- a/drivers/iio/magnetometer/mag3110.c
+++ b/drivers/iio/magnetometer/mag3110.c
@@ -106,7 +106,7 @@ static ssize_t mag3110_show_int_plus_micros(char *buf,
while (n-- > 0)
len += scnprintf(buf + len, PAGE_SIZE - len,
- "%d.%d ", vals[n][0], vals[n][1]);
+ "%d.%06d ", vals[n][0], vals[n][1]);
/* replace trailing space by newline */
buf[len - 1] = '\n';
@@ -154,6 +154,9 @@ static int mag3110_read_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
switch (chan->type) {
case IIO_MAGN: /* in 0.1 uT / LSB */
ret = mag3110_read(data, buffer);
@@ -199,6 +202,9 @@ static int mag3110_write_raw(struct iio_dev *indio_dev,
struct mag3110_data *data = iio_priv(indio_dev);
int rate;
+ if (iio_buffer_enabled(indio_dev))
+ return -EBUSY;
+
switch (mask) {
case IIO_CHAN_INFO_SAMP_FREQ:
rate = mag3110_get_samp_freq_index(data, val, val2);
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c
index d53cf519f42a..00400c352c1a 100644
--- a/drivers/infiniband/hw/amso1100/c2.c
+++ b/drivers/infiniband/hw/amso1100/c2.c
@@ -1082,6 +1082,7 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
/* Initialize network device */
if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) {
+ ret = -ENOMEM;
iounmap(mmio_regs);
goto bail4;
}
@@ -1151,7 +1152,8 @@ static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
goto bail10;
}
- if (c2_register_device(c2dev))
+ ret = c2_register_device(c2dev);
+ if (ret)
goto bail10;
return 0;
diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c
index b7c986990053..d2a6d961344b 100644
--- a/drivers/infiniband/hw/amso1100/c2_rnic.c
+++ b/drivers/infiniband/hw/amso1100/c2_rnic.c
@@ -576,7 +576,8 @@ int c2_rnic_init(struct c2_dev *c2dev)
goto bail4;
/* Initialize cached the adapter limits */
- if (c2_rnic_query(c2dev, &c2dev->props))
+ err = c2_rnic_query(c2dev, &c2dev->props);
+ if (err)
goto bail5;
/* Initialize the PD pool */
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 45126879ad28..d286bdebe2ab 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3352,6 +3352,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
goto free_dst;
}
+ neigh_release(neigh);
step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan;
rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step];
window = (__force u16) htons((__force u16)tcph->window);
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index c2702f549f10..f9c12e92fdd6 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -53,8 +53,8 @@
#include "user.h"
#define DRV_NAME MLX4_IB_DRV_NAME
-#define DRV_VERSION "1.0"
-#define DRV_RELDATE "April 4, 2008"
+#define DRV_VERSION "2.2-1"
+#define DRV_RELDATE "Feb 2014"
#define MLX4_IB_FLOW_MAX_PRIO 0xFFF
#define MLX4_IB_FLOW_QPN_MASK 0xFFFFFF
@@ -347,7 +347,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->active_width = (((u8 *)mailbox->buf)[5] == 0x40) ?
IB_WIDTH_4X : IB_WIDTH_1X;
props->active_speed = IB_SPEED_QDR;
- props->port_cap_flags = IB_PORT_CM_SUP;
+ props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
props->max_msg_sz = mdev->dev->caps.max_msg_sz;
props->pkey_tbl_len = 1;
@@ -1357,6 +1357,21 @@ static struct device_attribute *mlx4_class_attributes[] = {
&dev_attr_board_id
};
+static void mlx4_addrconf_ifid_eui48(u8 *eui, u16 vlan_id,
+ struct net_device *dev)
+{
+ memcpy(eui, dev->dev_addr, 3);
+ memcpy(eui + 5, dev->dev_addr + 3, 3);
+ if (vlan_id < 0x1000) {
+ eui[3] = vlan_id >> 8;
+ eui[4] = vlan_id & 0xff;
+ } else {
+ eui[3] = 0xff;
+ eui[4] = 0xfe;
+ }
+ eui[0] ^= 2;
+}
+
static void update_gids_task(struct work_struct *work)
{
struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
@@ -1393,7 +1408,6 @@ static void reset_gids_task(struct work_struct *work)
struct mlx4_cmd_mailbox *mailbox;
union ib_gid *gids;
int err;
- int i;
struct mlx4_dev *dev = gw->dev->dev;
mailbox = mlx4_alloc_cmd_mailbox(dev);
@@ -1405,18 +1419,16 @@ static void reset_gids_task(struct work_struct *work)
gids = mailbox->buf;
memcpy(gids, gw->gids, sizeof(gw->gids));
- for (i = 1; i < gw->dev->num_ports + 1; i++) {
- if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, i) ==
- IB_LINK_LAYER_ETHERNET) {
- err = mlx4_cmd(dev, mailbox->dma,
- MLX4_SET_PORT_GID_TABLE << 8 | i,
- 1, MLX4_CMD_SET_PORT,
- MLX4_CMD_TIME_CLASS_B,
- MLX4_CMD_WRAPPED);
- if (err)
- pr_warn(KERN_WARNING
- "set port %d command failed\n", i);
- }
+ if (mlx4_ib_port_link_layer(&gw->dev->ib_dev, gw->port) ==
+ IB_LINK_LAYER_ETHERNET) {
+ err = mlx4_cmd(dev, mailbox->dma,
+ MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
+ 1, MLX4_CMD_SET_PORT,
+ MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+ if (err)
+ pr_warn(KERN_WARNING
+ "set port %d command failed\n", gw->port);
}
mlx4_free_cmd_mailbox(dev, mailbox);
@@ -1425,7 +1437,8 @@ free:
}
static int update_gid_table(struct mlx4_ib_dev *dev, int port,
- union ib_gid *gid, int clear)
+ union ib_gid *gid, int clear,
+ int default_gid)
{
struct update_gid_work *work;
int i;
@@ -1434,26 +1447,31 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port,
int found = -1;
int max_gids;
- max_gids = dev->dev->caps.gid_table_len[port];
- for (i = 0; i < max_gids; ++i) {
- if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
- sizeof(*gid)))
- found = i;
-
- if (clear) {
- if (found >= 0) {
- need_update = 1;
- dev->iboe.gid_table[port - 1][found] = zgid;
- break;
- }
- } else {
- if (found >= 0)
- break;
-
- if (free < 0 &&
- !memcmp(&dev->iboe.gid_table[port - 1][i], &zgid,
+ if (default_gid) {
+ free = 0;
+ } else {
+ max_gids = dev->dev->caps.gid_table_len[port];
+ for (i = 1; i < max_gids; ++i) {
+ if (!memcmp(&dev->iboe.gid_table[port - 1][i], gid,
sizeof(*gid)))
- free = i;
+ found = i;
+
+ if (clear) {
+ if (found >= 0) {
+ need_update = 1;
+ dev->iboe.gid_table[port - 1][found] =
+ zgid;
+ break;
+ }
+ } else {
+ if (found >= 0)
+ break;
+
+ if (free < 0 &&
+ !memcmp(&dev->iboe.gid_table[port - 1][i],
+ &zgid, sizeof(*gid)))
+ free = i;
+ }
}
}
@@ -1478,18 +1496,26 @@ static int update_gid_table(struct mlx4_ib_dev *dev, int port,
return 0;
}
-static int reset_gid_table(struct mlx4_ib_dev *dev)
+static void mlx4_make_default_gid(struct net_device *dev, union ib_gid *gid)
{
- struct update_gid_work *work;
+ gid->global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
+ mlx4_addrconf_ifid_eui48(&gid->raw[8], 0xffff, dev);
+}
+
+static int reset_gid_table(struct mlx4_ib_dev *dev, u8 port)
+{
+ struct update_gid_work *work;
work = kzalloc(sizeof(*work), GFP_ATOMIC);
if (!work)
return -ENOMEM;
- memset(dev->iboe.gid_table, 0, sizeof(dev->iboe.gid_table));
+
+ memset(dev->iboe.gid_table[port - 1], 0, sizeof(work->gids));
memset(work->gids, 0, sizeof(work->gids));
INIT_WORK(&work->work, reset_gids_task);
work->dev = dev;
+ work->port = port;
queue_work(wq, &work->work);
return 0;
}
@@ -1502,6 +1528,12 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
struct net_device *real_dev = rdma_vlan_dev_real_dev(event_netdev) ?
rdma_vlan_dev_real_dev(event_netdev) :
event_netdev;
+ union ib_gid default_gid;
+
+ mlx4_make_default_gid(real_dev, &default_gid);
+
+ if (!memcmp(gid, &default_gid, sizeof(*gid)))
+ return 0;
if (event != NETDEV_DOWN && event != NETDEV_UP)
return 0;
@@ -1520,7 +1552,7 @@ static int mlx4_ib_addr_event(int event, struct net_device *event_netdev,
(!netif_is_bond_master(real_dev) &&
(real_dev == iboe->netdevs[port - 1])))
update_gid_table(ibdev, port, gid,
- event == NETDEV_DOWN);
+ event == NETDEV_DOWN, 0);
spin_unlock(&iboe->lock);
return 0;
@@ -1536,7 +1568,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev,
rdma_vlan_dev_real_dev(dev) : dev;
iboe = &ibdev->iboe;
- spin_lock(&iboe->lock);
for (port = 1; port <= MLX4_MAX_PORTS; ++port)
if ((netif_is_bond_master(real_dev) &&
@@ -1545,8 +1576,6 @@ static u8 mlx4_ib_get_dev_port(struct net_device *dev,
(real_dev == iboe->netdevs[port - 1])))
break;
- spin_unlock(&iboe->lock);
-
if ((port == 0) || (port > MLX4_MAX_PORTS))
return 0;
else
@@ -1607,7 +1636,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
/*ifa->ifa_address;*/
ipv6_addr_set_v4mapped(ifa->ifa_address,
(struct in6_addr *)&gid);
- update_gid_table(ibdev, port, &gid, 0);
+ update_gid_table(ibdev, port, &gid, 0, 0);
}
endfor_ifa(in_dev);
in_dev_put(in_dev);
@@ -1619,7 +1648,7 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
read_lock_bh(&in6_dev->lock);
list_for_each_entry(ifp, &in6_dev->addr_list, if_list) {
pgid = (union ib_gid *)&ifp->addr;
- update_gid_table(ibdev, port, pgid, 0);
+ update_gid_table(ibdev, port, pgid, 0, 0);
}
read_unlock_bh(&in6_dev->lock);
in6_dev_put(in6_dev);
@@ -1627,14 +1656,26 @@ static void mlx4_ib_get_dev_addr(struct net_device *dev,
#endif
}
+static void mlx4_ib_set_default_gid(struct mlx4_ib_dev *ibdev,
+ struct net_device *dev, u8 port)
+{
+ union ib_gid gid;
+ mlx4_make_default_gid(dev, &gid);
+ update_gid_table(ibdev, port, &gid, 0, 1);
+}
+
static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
{
struct net_device *dev;
+ struct mlx4_ib_iboe *iboe = &ibdev->iboe;
+ int i;
- if (reset_gid_table(ibdev))
- return -1;
+ for (i = 1; i <= ibdev->num_ports; ++i)
+ if (reset_gid_table(ibdev, i))
+ return -1;
read_lock(&dev_base_lock);
+ spin_lock(&iboe->lock);
for_each_netdev(&init_net, dev) {
u8 port = mlx4_ib_get_dev_port(dev, ibdev);
@@ -1642,6 +1683,7 @@ static int mlx4_ib_init_gid_table(struct mlx4_ib_dev *ibdev)
mlx4_ib_get_dev_addr(dev, ibdev, port);
}
+ spin_unlock(&iboe->lock);
read_unlock(&dev_base_lock);
return 0;
@@ -1656,25 +1698,57 @@ static void mlx4_ib_scan_netdevs(struct mlx4_ib_dev *ibdev)
spin_lock(&iboe->lock);
mlx4_foreach_ib_transport_port(port, ibdev->dev) {
+ enum ib_port_state port_state = IB_PORT_NOP;
struct net_device *old_master = iboe->masters[port - 1];
+ struct net_device *curr_netdev;
struct net_device *curr_master;
+
iboe->netdevs[port - 1] =
mlx4_get_protocol_dev(ibdev->dev, MLX4_PROT_ETH, port);
+ if (iboe->netdevs[port - 1])
+ mlx4_ib_set_default_gid(ibdev,
+ iboe->netdevs[port - 1], port);
+ curr_netdev = iboe->netdevs[port - 1];
if (iboe->netdevs[port - 1] &&
netif_is_bond_slave(iboe->netdevs[port - 1])) {
- rtnl_lock();
iboe->masters[port - 1] = netdev_master_upper_dev_get(
iboe->netdevs[port - 1]);
- rtnl_unlock();
+ } else {
+ iboe->masters[port - 1] = NULL;
}
curr_master = iboe->masters[port - 1];
+ if (curr_netdev) {
+ port_state = (netif_running(curr_netdev) && netif_carrier_ok(curr_netdev)) ?
+ IB_PORT_ACTIVE : IB_PORT_DOWN;
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ } else {
+ reset_gid_table(ibdev, port);
+ }
+ /* if using bonding/team and a slave port is down, we don't the bond IP
+ * based gids in the table since flows that select port by gid may get
+ * the down port.
+ */
+ if (curr_master && (port_state == IB_PORT_DOWN)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ }
/* if bonding is used it is possible that we add it to masters
- only after IP address is assigned to the net bonding
- interface */
- if (curr_master && (old_master != curr_master))
+ * only after IP address is assigned to the net bonding
+ * interface.
+ */
+ if (curr_master && (old_master != curr_master)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
mlx4_ib_get_dev_addr(curr_master, ibdev, port);
+ }
+
+ if (!curr_master && (old_master != curr_master)) {
+ reset_gid_table(ibdev, port);
+ mlx4_ib_set_default_gid(ibdev, curr_netdev, port);
+ mlx4_ib_get_dev_addr(curr_netdev, ibdev, port);
+ }
}
spin_unlock(&iboe->lock);
@@ -1810,6 +1884,7 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
int i, j;
int err;
struct mlx4_ib_iboe *iboe;
+ int ib_num_ports = 0;
pr_info_once("%s", mlx4_ib_version);
@@ -1985,10 +2060,14 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
ibdev->counters[i] = -1;
}
+ mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
+ ib_num_ports++;
+
spin_lock_init(&ibdev->sm_lock);
mutex_init(&ibdev->cap_mask_mutex);
- if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED) {
+ if (ibdev->steering_support == MLX4_STEERING_MODE_DEVICE_MANAGED &&
+ ib_num_ports) {
ibdev->steer_qpn_count = MLX4_IB_UC_MAX_NUM_QPS;
err = mlx4_qp_reserve_range(dev, ibdev->steer_qpn_count,
MLX4_IB_UC_STEER_QPN_ALIGN,
@@ -2051,7 +2130,11 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
}
}
#endif
+ for (i = 1 ; i <= ibdev->num_ports ; ++i)
+ reset_gid_table(ibdev, i);
+ rtnl_lock();
mlx4_ib_scan_netdevs(ibdev);
+ rtnl_unlock();
mlx4_ib_init_gid_table(ibdev);
}
diff --git a/drivers/infiniband/hw/mlx5/Kconfig b/drivers/infiniband/hw/mlx5/Kconfig
index 8e6aebfaf8a4..10df386c6344 100644
--- a/drivers/infiniband/hw/mlx5/Kconfig
+++ b/drivers/infiniband/hw/mlx5/Kconfig
@@ -1,6 +1,6 @@
config MLX5_INFINIBAND
tristate "Mellanox Connect-IB HCA support"
- depends on NETDEVICES && ETHERNET && PCI && X86
+ depends on NETDEVICES && ETHERNET && PCI
select NET_VENDOR_MELLANOX
select MLX5_CORE
---help---
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 9660d093f8cf..bf900579ac08 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -46,8 +46,8 @@
#include "mlx5_ib.h"
#define DRIVER_NAME "mlx5_ib"
-#define DRIVER_VERSION "1.0"
-#define DRIVER_RELDATE "June 2013"
+#define DRIVER_VERSION "2.2-1"
+#define DRIVER_RELDATE "Feb 2014"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
@@ -261,8 +261,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
IB_DEVICE_PORT_ACTIVE_EVENT |
IB_DEVICE_SYS_IMAGE_GUID |
- IB_DEVICE_RC_RNR_NAK_GEN |
- IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
+ IB_DEVICE_RC_RNR_NAK_GEN;
flags = dev->mdev.caps.flags;
if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
@@ -536,24 +535,38 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
struct ib_udata *udata)
{
struct mlx5_ib_dev *dev = to_mdev(ibdev);
- struct mlx5_ib_alloc_ucontext_req req;
+ struct mlx5_ib_alloc_ucontext_req_v2 req;
struct mlx5_ib_alloc_ucontext_resp resp;
struct mlx5_ib_ucontext *context;
struct mlx5_uuar_info *uuari;
struct mlx5_uar *uars;
int gross_uuars;
int num_uars;
+ int ver;
int uuarn;
int err;
int i;
+ int reqlen;
if (!dev->ib_active)
return ERR_PTR(-EAGAIN);
- err = ib_copy_from_udata(&req, udata, sizeof(req));
+ memset(&req, 0, sizeof(req));
+ reqlen = udata->inlen - sizeof(struct ib_uverbs_cmd_hdr);
+ if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req))
+ ver = 0;
+ else if (reqlen == sizeof(struct mlx5_ib_alloc_ucontext_req_v2))
+ ver = 2;
+ else
+ return ERR_PTR(-EINVAL);
+
+ err = ib_copy_from_udata(&req, udata, reqlen);
if (err)
return ERR_PTR(err);
+ if (req.flags || req.reserved)
+ return ERR_PTR(-EINVAL);
+
if (req.total_num_uuars > MLX5_MAX_UUARS)
return ERR_PTR(-ENOMEM);
@@ -626,6 +639,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
if (err)
goto out_uars;
+ uuari->ver = ver;
uuari->num_low_latency_uuars = req.num_low_latency_uuars;
uuari->uars = uars;
uuari->num_uars = num_uars;
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index ae37fb9bf262..7dfe8a1c84cf 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -216,7 +216,9 @@ static int sq_overhead(enum ib_qp_type qp_type)
case IB_QPT_UC:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_raddr_seg);
+ sizeof(struct mlx5_wqe_raddr_seg) +
+ sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg);
break;
case IB_QPT_UD:
@@ -428,11 +430,17 @@ static int alloc_uuar(struct mlx5_uuar_info *uuari,
break;
case MLX5_IB_LATENCY_CLASS_MEDIUM:
- uuarn = alloc_med_class_uuar(uuari);
+ if (uuari->ver < 2)
+ uuarn = -ENOMEM;
+ else
+ uuarn = alloc_med_class_uuar(uuari);
break;
case MLX5_IB_LATENCY_CLASS_HIGH:
- uuarn = alloc_high_class_uuar(uuari);
+ if (uuari->ver < 2)
+ uuarn = -ENOMEM;
+ else
+ uuarn = alloc_high_class_uuar(uuari);
break;
case MLX5_IB_LATENCY_CLASS_FAST_PATH:
@@ -657,8 +665,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
int err;
uuari = &dev->mdev.priv.uuari;
- if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK)
- qp->flags |= MLX5_IB_QP_BLOCK_MULTICAST_LOOPBACK;
+ if (init_attr->create_flags)
+ return -EINVAL;
if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
lc = MLX5_IB_LATENCY_CLASS_FAST_PATH;
diff --git a/drivers/infiniband/hw/mlx5/user.h b/drivers/infiniband/hw/mlx5/user.h
index 32a2a5dfc523..0f4f8e42a17f 100644
--- a/drivers/infiniband/hw/mlx5/user.h
+++ b/drivers/infiniband/hw/mlx5/user.h
@@ -62,6 +62,13 @@ struct mlx5_ib_alloc_ucontext_req {
__u32 num_low_latency_uuars;
};
+struct mlx5_ib_alloc_ucontext_req_v2 {
+ __u32 total_num_uuars;
+ __u32 num_low_latency_uuars;
+ __u32 flags;
+ __u32 reserved;
+};
+
struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 bf_reg_size;
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 429141078eec..353c7b05a90a 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -675,8 +675,11 @@ static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)
INIT_DELAYED_WORK(&nesdev->work, nes_recheck_link_status);
/* Initialize network devices */
- if ((netdev = nes_netdev_init(nesdev, mmio_regs)) == NULL)
+ netdev = nes_netdev_init(nesdev, mmio_regs);
+ if (netdev == NULL) {
+ ret = -ENOMEM;
goto bail7;
+ }
/* Register network device */
ret = register_netdev(netdev);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 2ca86ca818bd..1a8a945efa60 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -127,7 +127,7 @@ static int ocrdma_addr_event(unsigned long event, struct net_device *netdev,
is_vlan = netdev->priv_flags & IFF_802_1Q_VLAN;
if (is_vlan)
- netdev = vlan_dev_real_dev(netdev);
+ netdev = rdma_vlan_dev_real_dev(netdev);
rcu_read_lock();
list_for_each_entry_rcu(dev, &ocrdma_dev_list, entry) {
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index aa92f40c9d50..e0cc201be41a 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -176,7 +176,7 @@ int ocrdma_query_port(struct ib_device *ibdev,
props->port_cap_flags =
IB_PORT_CM_SUP |
IB_PORT_REINIT_SUP |
- IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP;
+ IB_PORT_DEVICE_MGMT_SUP | IB_PORT_VENDOR_CLASS_SUP | IB_PORT_IP_BASED_GIDS;
props->gid_tbl_len = OCRDMA_MAX_SGID;
props->pkey_tbl_len = 1;
props->bad_pkey_cntr = 0;
@@ -1416,7 +1416,7 @@ int ocrdma_query_qp(struct ib_qp *ibqp,
OCRDMA_QP_PARAMS_HOP_LMT_MASK) >>
OCRDMA_QP_PARAMS_HOP_LMT_SHIFT;
qp_attr->ah_attr.grh.traffic_class = (params.tclass_sq_psn &
- OCRDMA_QP_PARAMS_SQ_PSN_MASK) >>
+ OCRDMA_QP_PARAMS_TCLASS_MASK) >>
OCRDMA_QP_PARAMS_TCLASS_SHIFT;
qp_attr->ah_attr.ah_flags = IB_AH_GRH;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index 5bfc02f450e6..d1bd21319d7d 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -2395,6 +2395,11 @@ static int qib_7322_bringup_serdes(struct qib_pportdata *ppd)
qib_write_kreg_port(ppd, krp_ibcctrl_a, ppd->cpspec->ibcctrl_a);
qib_write_kreg(dd, kr_scratch, 0ULL);
+ /* ensure previous Tx parameters are not still forced */
+ qib_write_kreg_port(ppd, krp_tx_deemph_override,
+ SYM_MASK(IBSD_TX_DEEMPHASIS_OVERRIDE_0,
+ reset_tx_deemphasis_override));
+
if (qib_compat_ddr_negotiate) {
ppd->cpspec->ibdeltainprog = 1;
ppd->cpspec->ibsymsnap = read_7322_creg32_port(ppd,
diff --git a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
index 7ecc6061f1f4..f8dfd76be89f 100644
--- a/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
+++ b/drivers/infiniband/hw/usnic/usnic_ib_qp_grp.c
@@ -629,6 +629,7 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
{
enum usnic_transport_type trans_type = qp_flow->trans_type;
int err;
+ uint16_t port_num = 0;
switch (trans_type) {
case USNIC_TRANSPORT_ROCE_CUSTOM:
@@ -637,9 +638,15 @@ static int qp_grp_id_from_flow(struct usnic_ib_qp_grp_flow *qp_flow,
case USNIC_TRANSPORT_IPV4_UDP:
err = usnic_transport_sock_get_addr(qp_flow->udp.sock,
NULL, NULL,
- (uint16_t *) id);
+ &port_num);
if (err)
return err;
+ /*
+ * Copy port_num to stack first and then to *id,
+ * so that the short to int cast works for little
+ * and big endian systems.
+ */
+ *id = port_num;
break;
default:
usnic_err("Unsupported transport %u\n", trans_type);
diff --git a/drivers/infiniband/ulp/iser/iser_initiator.c b/drivers/infiniband/ulp/iser/iser_initiator.c
index 538822684d5b..334f34b1cd46 100644
--- a/drivers/infiniband/ulp/iser/iser_initiator.c
+++ b/drivers/infiniband/ulp/iser/iser_initiator.c
@@ -610,11 +610,12 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc,
ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
ISER_HEADERS_LEN, DMA_TO_DEVICE);
kmem_cache_free(ig.desc_cache, tx_desc);
+ tx_desc = NULL;
}
atomic_dec(&ib_conn->post_send_buf_count);
- if (tx_desc->type == ISCSI_TX_CONTROL) {
+ if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */
task = (void *) ((long)(void *)tx_desc -
sizeof(struct iscsi_task));
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index afe95674008b..ca37edef2791 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -652,9 +652,13 @@ static int iser_disconnected_handler(struct rdma_cm_id *cma_id)
/* getting here when the state is UP means that the conn is being *
* terminated asynchronously from the iSCSI layer's perspective. */
if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
- ISER_CONN_TERMINATING))
- iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
- ISCSI_ERR_CONN_FAILED);
+ ISER_CONN_TERMINATING)){
+ if (ib_conn->iser_conn)
+ iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
+ ISCSI_ERR_CONN_FAILED);
+ else
+ iser_err("iscsi_iser connection isn't bound\n");
+ }
/* Complete the termination process if no posts are pending */
if (ib_conn->post_recv_buf_count == 0 &&
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 2b161be3c1a3..8ee228e9ab5a 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -453,6 +453,7 @@ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
if (ret) {
pr_err("Failed to create fastreg descriptor err=%d\n",
ret);
+ kfree(fr_desc);
goto err;
}
@@ -491,12 +492,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
isert_conn->state = ISER_CONN_INIT;
INIT_LIST_HEAD(&isert_conn->conn_accept_node);
init_completion(&isert_conn->conn_login_comp);
- init_waitqueue_head(&isert_conn->conn_wait);
- init_waitqueue_head(&isert_conn->conn_wait_comp_err);
+ init_completion(&isert_conn->conn_wait);
+ init_completion(&isert_conn->conn_wait_comp_err);
kref_init(&isert_conn->conn_kref);
kref_get(&isert_conn->conn_kref);
mutex_init(&isert_conn->conn_mutex);
- mutex_init(&isert_conn->conn_comp_mutex);
spin_lock_init(&isert_conn->conn_lock);
cma_id->context = isert_conn;
@@ -687,11 +687,11 @@ isert_disconnect_work(struct work_struct *work)
pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
mutex_lock(&isert_conn->conn_mutex);
- isert_conn->state = ISER_CONN_DOWN;
+ if (isert_conn->state == ISER_CONN_UP)
+ isert_conn->state = ISER_CONN_TERMINATING;
if (isert_conn->post_recv_buf_count == 0 &&
atomic_read(&isert_conn->post_send_buf_count) == 0) {
- pr_debug("Calling wake_up(&isert_conn->conn_wait);\n");
mutex_unlock(&isert_conn->conn_mutex);
goto wake_up;
}
@@ -711,7 +711,7 @@ isert_disconnect_work(struct work_struct *work)
mutex_unlock(&isert_conn->conn_mutex);
wake_up:
- wake_up(&isert_conn->conn_wait);
+ complete(&isert_conn->conn_wait);
isert_put_conn(isert_conn);
}
@@ -887,16 +887,17 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
* Coalesce send completion interrupts by only setting IB_SEND_SIGNALED
* bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls.
*/
- mutex_lock(&isert_conn->conn_comp_mutex);
- if (coalesce &&
+ mutex_lock(&isert_conn->conn_mutex);
+ if (coalesce && isert_conn->state == ISER_CONN_UP &&
++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) {
+ tx_desc->llnode_active = true;
llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist);
- mutex_unlock(&isert_conn->conn_comp_mutex);
+ mutex_unlock(&isert_conn->conn_mutex);
return;
}
isert_conn->conn_comp_batch = 0;
tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist);
- mutex_unlock(&isert_conn->conn_comp_mutex);
+ mutex_unlock(&isert_conn->conn_mutex);
send_wr->send_flags = IB_SEND_SIGNALED;
}
@@ -1463,7 +1464,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
case ISCSI_OP_SCSI_CMD:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
if (cmd->data_direction == DMA_TO_DEVICE)
@@ -1475,7 +1476,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
case ISCSI_OP_SCSI_TMFUNC:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
transport_generic_free_cmd(&cmd->se_cmd, 0);
@@ -1485,7 +1486,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd)
case ISCSI_OP_TEXT:
spin_lock_bh(&conn->cmd_lock);
if (!list_empty(&cmd->i_conn_node))
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
/*
@@ -1548,6 +1549,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc,
iscsit_stop_dataout_timer(cmd);
device->unreg_rdma_mem(isert_cmd, isert_conn);
cmd->write_data_done = wr->cur_rdma_length;
+ wr->send_wr_num = 0;
pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd);
spin_lock_bh(&cmd->istate_lock);
@@ -1588,7 +1590,7 @@ isert_do_control_comp(struct work_struct *work)
pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n");
/*
* Call atomic_dec(&isert_conn->post_send_buf_count)
- * from isert_free_conn()
+ * from isert_wait_conn()
*/
isert_conn->logout_posted = true;
iscsit_logout_post_handler(cmd, cmd->conn);
@@ -1612,6 +1614,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
struct ib_device *ib_dev)
{
struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
+ struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
if (cmd->i_state == ISTATE_SEND_TASKMGTRSP ||
cmd->i_state == ISTATE_SEND_LOGOUTRSP ||
@@ -1623,7 +1626,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc,
queue_work(isert_comp_wq, &isert_cmd->comp_work);
return;
}
- atomic_dec(&isert_conn->post_send_buf_count);
+ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
cmd->i_state = ISTATE_SENT_STATUS;
isert_completion_put(tx_desc, isert_cmd, ib_dev);
@@ -1661,7 +1664,7 @@ __isert_send_completion(struct iser_tx_desc *tx_desc,
case ISER_IB_RDMA_READ:
pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n");
- atomic_dec(&isert_conn->post_send_buf_count);
+ atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
isert_completion_rdma_read(tx_desc, isert_cmd);
break;
default:
@@ -1690,31 +1693,76 @@ isert_send_completion(struct iser_tx_desc *tx_desc,
}
static void
-isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
+isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev)
+{
+ struct llist_node *llnode;
+ struct isert_rdma_wr *wr;
+ struct iser_tx_desc *t;
+
+ mutex_lock(&isert_conn->conn_mutex);
+ llnode = llist_del_all(&isert_conn->conn_comp_llist);
+ isert_conn->conn_comp_batch = 0;
+ mutex_unlock(&isert_conn->conn_mutex);
+
+ while (llnode) {
+ t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
+ llnode = llist_next(llnode);
+ wr = &t->isert_cmd->rdma_wr;
+
+ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+ isert_completion_put(t, t->isert_cmd, ib_dev);
+ }
+}
+
+static void
+isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn)
{
struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
+ struct llist_node *llnode = tx_desc->comp_llnode_batch;
+ struct isert_rdma_wr *wr;
+ struct iser_tx_desc *t;
- if (tx_desc) {
- struct isert_cmd *isert_cmd = tx_desc->isert_cmd;
+ while (llnode) {
+ t = llist_entry(llnode, struct iser_tx_desc, comp_llnode);
+ llnode = llist_next(llnode);
+ wr = &t->isert_cmd->rdma_wr;
- if (!isert_cmd)
- isert_unmap_tx_desc(tx_desc, ib_dev);
- else
- isert_completion_put(tx_desc, isert_cmd, ib_dev);
+ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
+ isert_completion_put(t, t->isert_cmd, ib_dev);
}
+ tx_desc->comp_llnode_batch = NULL;
- if (isert_conn->post_recv_buf_count == 0 &&
- atomic_read(&isert_conn->post_send_buf_count) == 0) {
- pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
- pr_debug("Calling wake_up from isert_cq_comp_err\n");
+ if (!isert_cmd)
+ isert_unmap_tx_desc(tx_desc, ib_dev);
+ else
+ isert_completion_put(tx_desc, isert_cmd, ib_dev);
+}
- mutex_lock(&isert_conn->conn_mutex);
- if (isert_conn->state != ISER_CONN_DOWN)
- isert_conn->state = ISER_CONN_TERMINATING;
- mutex_unlock(&isert_conn->conn_mutex);
+static void
+isert_cq_rx_comp_err(struct isert_conn *isert_conn)
+{
+ struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
+ struct iscsi_conn *conn = isert_conn->conn;
- wake_up(&isert_conn->conn_wait_comp_err);
+ if (isert_conn->post_recv_buf_count)
+ return;
+
+ isert_cq_drain_comp_llist(isert_conn, ib_dev);
+
+ if (conn->sess) {
+ target_sess_cmd_list_set_waiting(conn->sess->se_sess);
+ target_wait_for_sess_cmds(conn->sess->se_sess);
}
+
+ while (atomic_read(&isert_conn->post_send_buf_count))
+ msleep(3000);
+
+ mutex_lock(&isert_conn->conn_mutex);
+ isert_conn->state = ISER_CONN_DOWN;
+ mutex_unlock(&isert_conn->conn_mutex);
+
+ complete(&isert_conn->conn_wait_comp_err);
}
static void
@@ -1739,8 +1787,14 @@ isert_cq_tx_work(struct work_struct *work)
pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n");
pr_debug("TX wc.status: 0x%08x\n", wc.status);
pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err);
- atomic_dec(&isert_conn->post_send_buf_count);
- isert_cq_comp_err(tx_desc, isert_conn);
+
+ if (wc.wr_id != ISER_FASTREG_LI_WRID) {
+ if (tx_desc->llnode_active)
+ continue;
+
+ atomic_dec(&isert_conn->post_send_buf_count);
+ isert_cq_tx_comp_err(tx_desc, isert_conn);
+ }
}
}
@@ -1783,7 +1837,7 @@ isert_cq_rx_work(struct work_struct *work)
wc.vendor_err);
}
isert_conn->post_recv_buf_count--;
- isert_cq_comp_err(NULL, isert_conn);
+ isert_cq_rx_comp_err(isert_conn);
}
}
@@ -2201,6 +2255,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
if (!fr_desc->valid) {
memset(&inv_wr, 0, sizeof(inv_wr));
+ inv_wr.wr_id = ISER_FASTREG_LI_WRID;
inv_wr.opcode = IB_WR_LOCAL_INV;
inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey;
wr = &inv_wr;
@@ -2211,6 +2266,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
/* Prepare FASTREG WR */
memset(&fr_wr, 0, sizeof(fr_wr));
+ fr_wr.wr_id = ISER_FASTREG_LI_WRID;
fr_wr.opcode = IB_WR_FAST_REG_MR;
fr_wr.wr.fast_reg.iova_start =
fr_desc->data_frpl->page_list[0] + page_off;
@@ -2376,12 +2432,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
isert_init_send_wr(isert_conn, isert_cmd,
&isert_cmd->tx_desc.send_wr, true);
- atomic_inc(&isert_conn->post_send_buf_count);
+ atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
if (rc) {
pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n");
- atomic_dec(&isert_conn->post_send_buf_count);
+ atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count);
}
pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n",
isert_cmd);
@@ -2409,12 +2465,12 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery)
return rc;
}
- atomic_inc(&isert_conn->post_send_buf_count);
+ atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count);
rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed);
if (rc) {
pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n");
- atomic_dec(&isert_conn->post_send_buf_count);
+ atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count);
}
pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n",
isert_cmd);
@@ -2701,22 +2757,11 @@ isert_free_np(struct iscsi_np *np)
kfree(isert_np);
}
-static int isert_check_state(struct isert_conn *isert_conn, int state)
-{
- int ret;
-
- mutex_lock(&isert_conn->conn_mutex);
- ret = (isert_conn->state == state);
- mutex_unlock(&isert_conn->conn_mutex);
-
- return ret;
-}
-
-static void isert_free_conn(struct iscsi_conn *conn)
+static void isert_wait_conn(struct iscsi_conn *conn)
{
struct isert_conn *isert_conn = conn->context;
- pr_debug("isert_free_conn: Starting \n");
+ pr_debug("isert_wait_conn: Starting \n");
/*
* Decrement post_send_buf_count for special case when called
* from isert_do_control_comp() -> iscsit_logout_post_handler()
@@ -2726,38 +2771,29 @@ static void isert_free_conn(struct iscsi_conn *conn)
atomic_dec(&isert_conn->post_send_buf_count);
if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) {
- pr_debug("Calling rdma_disconnect from isert_free_conn\n");
+ pr_debug("Calling rdma_disconnect from isert_wait_conn\n");
rdma_disconnect(isert_conn->conn_cm_id);
}
/*
* Only wait for conn_wait_comp_err if the isert_conn made it
* into full feature phase..
*/
- if (isert_conn->state == ISER_CONN_UP) {
- pr_debug("isert_free_conn: Before wait_event comp_err %d\n",
- isert_conn->state);
- mutex_unlock(&isert_conn->conn_mutex);
-
- wait_event(isert_conn->conn_wait_comp_err,
- (isert_check_state(isert_conn, ISER_CONN_TERMINATING)));
-
- wait_event(isert_conn->conn_wait,
- (isert_check_state(isert_conn, ISER_CONN_DOWN)));
-
- isert_put_conn(isert_conn);
- return;
- }
if (isert_conn->state == ISER_CONN_INIT) {
mutex_unlock(&isert_conn->conn_mutex);
- isert_put_conn(isert_conn);
return;
}
- pr_debug("isert_free_conn: wait_event conn_wait %d\n",
- isert_conn->state);
+ if (isert_conn->state == ISER_CONN_UP)
+ isert_conn->state = ISER_CONN_TERMINATING;
mutex_unlock(&isert_conn->conn_mutex);
- wait_event(isert_conn->conn_wait,
- (isert_check_state(isert_conn, ISER_CONN_DOWN)));
+ wait_for_completion(&isert_conn->conn_wait_comp_err);
+
+ wait_for_completion(&isert_conn->conn_wait);
+}
+
+static void isert_free_conn(struct iscsi_conn *conn)
+{
+ struct isert_conn *isert_conn = conn->context;
isert_put_conn(isert_conn);
}
@@ -2770,6 +2806,7 @@ static struct iscsit_transport iser_target_transport = {
.iscsit_setup_np = isert_setup_np,
.iscsit_accept_np = isert_accept_np,
.iscsit_free_np = isert_free_np,
+ .iscsit_wait_conn = isert_wait_conn,
.iscsit_free_conn = isert_free_conn,
.iscsit_get_login_rx = isert_get_login_rx,
.iscsit_put_login_tx = isert_put_login_tx,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h
index 708a069002f3..f6ae7f5dd408 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.h
+++ b/drivers/infiniband/ulp/isert/ib_isert.h
@@ -6,6 +6,7 @@
#define ISERT_RDMA_LISTEN_BACKLOG 10
#define ISCSI_ISER_SG_TABLESIZE 256
+#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL
enum isert_desc_type {
ISCSI_TX_CONTROL,
@@ -45,6 +46,7 @@ struct iser_tx_desc {
struct isert_cmd *isert_cmd;
struct llist_node *comp_llnode_batch;
struct llist_node comp_llnode;
+ bool llnode_active;
struct ib_send_wr send_wr;
} __packed;
@@ -116,8 +118,8 @@ struct isert_conn {
struct isert_device *conn_device;
struct work_struct conn_logout_work;
struct mutex conn_mutex;
- wait_queue_head_t conn_wait;
- wait_queue_head_t conn_wait_comp_err;
+ struct completion conn_wait;
+ struct completion conn_wait_comp_err;
struct kref conn_kref;
struct list_head conn_fr_pool;
int conn_fr_pool_size;
@@ -126,7 +128,6 @@ struct isert_conn {
#define ISERT_COMP_BATCH_COUNT 8
int conn_comp_batch;
struct llist_head conn_comp_llist;
- struct mutex conn_comp_mutex;
};
#define ISERT_MAX_CQ 64
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 520a7e5a490b..0e537d8d0e47 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -3666,9 +3666,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rdma_size(
unsigned long val;
int ret;
- ret = strict_strtoul(page, 0, &val);
+ ret = kstrtoul(page, 0, &val);
if (ret < 0) {
- pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_RDMA_SIZE) {
@@ -3706,9 +3706,9 @@ static ssize_t srpt_tpg_attrib_store_srp_max_rsp_size(
unsigned long val;
int ret;
- ret = strict_strtoul(page, 0, &val);
+ ret = kstrtoul(page, 0, &val);
if (ret < 0) {
- pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_RSP_SIZE) {
@@ -3746,9 +3746,9 @@ static ssize_t srpt_tpg_attrib_store_srp_sq_size(
unsigned long val;
int ret;
- ret = strict_strtoul(page, 0, &val);
+ ret = kstrtoul(page, 0, &val);
if (ret < 0) {
- pr_err("strict_strtoul() failed with ret: %d\n", ret);
+ pr_err("kstrtoul() failed with ret: %d\n", ret);
return -EINVAL;
}
if (val > MAX_SRPT_SRQ_SIZE) {
@@ -3793,7 +3793,7 @@ static ssize_t srpt_tpg_store_enable(
unsigned long tmp;
int ret;
- ret = strict_strtoul(page, 0, &tmp);
+ ret = kstrtoul(page, 0, &tmp);
if (ret < 0) {
printk(KERN_ERR "Unable to extract srpt_tpg_store_enable\n");
return -EINVAL;
diff --git a/drivers/input/misc/arizona-haptics.c b/drivers/input/misc/arizona-haptics.c
index 7a04f54ef961..ef2e281b0a43 100644
--- a/drivers/input/misc/arizona-haptics.c
+++ b/drivers/input/misc/arizona-haptics.c
@@ -37,7 +37,6 @@ static void arizona_haptics_work(struct work_struct *work)
struct arizona_haptics,
work);
struct arizona *arizona = haptics->arizona;
- struct mutex *dapm_mutex = &arizona->dapm->card->dapm_mutex;
int ret;
if (!haptics->arizona->dapm) {
@@ -67,13 +66,10 @@ static void arizona_haptics_work(struct work_struct *work)
return;
}
- mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
-
ret = snd_soc_dapm_enable_pin(arizona->dapm, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to start HAPTICS: %d\n",
ret);
- mutex_unlock(dapm_mutex);
return;
}
@@ -81,21 +77,14 @@ static void arizona_haptics_work(struct work_struct *work)
if (ret != 0) {
dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
ret);
- mutex_unlock(dapm_mutex);
return;
}
-
- mutex_unlock(dapm_mutex);
-
} else {
/* This disable sequence will be a noop if already enabled */
- mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
-
ret = snd_soc_dapm_disable_pin(arizona->dapm, "HAPTICS");
if (ret != 0) {
dev_err(arizona->dev, "Failed to disable HAPTICS: %d\n",
ret);
- mutex_unlock(dapm_mutex);
return;
}
@@ -103,12 +92,9 @@ static void arizona_haptics_work(struct work_struct *work)
if (ret != 0) {
dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
ret);
- mutex_unlock(dapm_mutex);
return;
}
- mutex_unlock(dapm_mutex);
-
ret = regmap_update_bits(arizona->regmap,
ARIZONA_HAPTICS_CONTROL_1,
ARIZONA_HAP_CTRL_MASK,
@@ -155,16 +141,11 @@ static int arizona_haptics_play(struct input_dev *input, void *data,
static void arizona_haptics_close(struct input_dev *input)
{
struct arizona_haptics *haptics = input_get_drvdata(input);
- struct mutex *dapm_mutex = &haptics->arizona->dapm->card->dapm_mutex;
cancel_work_sync(&haptics->work);
- mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
-
if (haptics->arizona->dapm)
snd_soc_dapm_disable_pin(haptics->arizona->dapm, "HAPTICS");
-
- mutex_unlock(dapm_mutex);
}
static int arizona_haptics_probe(struct platform_device *pdev)
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 8911850c9444..1d9ab39af29f 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -79,7 +79,6 @@
#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES)
#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1))
-#define ARM_SMMU_PTE_HWTABLE_SIZE (PTRS_PER_PTE * sizeof(pte_t))
/* Stage-1 PTE */
#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6)
@@ -191,6 +190,9 @@
#define ARM_SMMU_GR1_CBAR(n) (0x0 + ((n) << 2))
#define CBAR_VMID_SHIFT 0
#define CBAR_VMID_MASK 0xff
+#define CBAR_S1_BPSHCFG_SHIFT 8
+#define CBAR_S1_BPSHCFG_MASK 3
+#define CBAR_S1_BPSHCFG_NSH 3
#define CBAR_S1_MEMATTR_SHIFT 12
#define CBAR_S1_MEMATTR_MASK 0xf
#define CBAR_S1_MEMATTR_WB 0xf
@@ -393,7 +395,7 @@ struct arm_smmu_domain {
struct arm_smmu_cfg root_cfg;
phys_addr_t output_mask;
- struct mutex lock;
+ spinlock_t lock;
};
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@@ -632,6 +634,28 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev)
return IRQ_HANDLED;
}
+static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
+ size_t size)
+{
+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
+
+
+ /* Ensure new page tables are visible to the hardware walker */
+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
+ dsb();
+ } else {
+ /*
+ * If the SMMU can't walk tables in the CPU caches, treat them
+ * like non-coherent DMA since we need to flush the new entries
+ * all the way out to memory. There's no possibility of
+ * recursion here as the SMMU table walker will not be wired
+ * through another SMMU.
+ */
+ dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
+ DMA_TO_DEVICE);
+ }
+}
+
static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
{
u32 reg;
@@ -650,11 +674,16 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
if (smmu->version == 1)
reg |= root_cfg->irptndx << CBAR_IRPTNDX_SHIFT;
- /* Use the weakest memory type, so it is overridden by the pte */
- if (stage1)
- reg |= (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
- else
+ /*
+ * Use the weakest shareability/memory types, so they are
+ * overridden by the ttbcr/pte.
+ */
+ if (stage1) {
+ reg |= (CBAR_S1_BPSHCFG_NSH << CBAR_S1_BPSHCFG_SHIFT) |
+ (CBAR_S1_MEMATTR_WB << CBAR_S1_MEMATTR_SHIFT);
+ } else {
reg |= ARM_SMMU_CB_VMID(root_cfg) << CBAR_VMID_SHIFT;
+ }
writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(root_cfg->cbndx));
if (smmu->version > 1) {
@@ -715,6 +744,8 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain)
}
/* TTBR0 */
+ arm_smmu_flush_pgtable(smmu, root_cfg->pgd,
+ PTRS_PER_PGD * sizeof(pgd_t));
reg = __pa(root_cfg->pgd);
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO);
reg = (phys_addr_t)__pa(root_cfg->pgd) >> 32;
@@ -901,7 +932,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
goto out_free_domain;
smmu_domain->root_cfg.pgd = pgd;
- mutex_init(&smmu_domain->lock);
+ spin_lock_init(&smmu_domain->lock);
domain->priv = smmu_domain;
return 0;
@@ -1128,6 +1159,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_device *device_smmu = dev->archdata.iommu;
struct arm_smmu_master *master;
+ unsigned long flags;
if (!device_smmu) {
dev_err(dev, "cannot attach to SMMU, is it on the same bus?\n");
@@ -1138,7 +1170,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
* Sanity check the domain. We don't currently support domains
* that cross between different SMMU chains.
*/
- mutex_lock(&smmu_domain->lock);
+ spin_lock_irqsave(&smmu_domain->lock, flags);
if (!smmu_domain->leaf_smmu) {
/* Now that we have a master, we can finalise the domain */
ret = arm_smmu_init_domain_context(domain, dev);
@@ -1153,7 +1185,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
dev_name(device_smmu->dev));
goto err_unlock;
}
- mutex_unlock(&smmu_domain->lock);
+ spin_unlock_irqrestore(&smmu_domain->lock, flags);
/* Looks ok, so add the device to the domain */
master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
@@ -1163,7 +1195,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return arm_smmu_domain_add_master(smmu_domain, master);
err_unlock:
- mutex_unlock(&smmu_domain->lock);
+ spin_unlock_irqrestore(&smmu_domain->lock, flags);
return ret;
}
@@ -1177,23 +1209,6 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev)
arm_smmu_domain_remove_master(smmu_domain, master);
}
-static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr,
- size_t size)
-{
- unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
-
- /*
- * If the SMMU can't walk tables in the CPU caches, treat them
- * like non-coherent DMA since we need to flush the new entries
- * all the way out to memory. There's no possibility of recursion
- * here as the SMMU table walker will not be wired through another
- * SMMU.
- */
- if (!(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK))
- dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
- DMA_TO_DEVICE);
-}
-
static bool arm_smmu_pte_is_contiguous_range(unsigned long addr,
unsigned long end)
{
@@ -1210,12 +1225,11 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
if (pmd_none(*pmd)) {
/* Allocate a new set of tables */
- pgtable_t table = alloc_page(PGALLOC_GFP);
+ pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO);
if (!table)
return -ENOMEM;
- arm_smmu_flush_pgtable(smmu, page_address(table),
- ARM_SMMU_PTE_HWTABLE_SIZE);
+ arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE);
if (!pgtable_page_ctor(table)) {
__free_page(table);
return -ENOMEM;
@@ -1317,9 +1331,15 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
#ifndef __PAGETABLE_PMD_FOLDED
if (pud_none(*pud)) {
- pmd = pmd_alloc_one(NULL, addr);
+ pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC);
if (!pmd)
return -ENOMEM;
+
+ arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE);
+ pud_populate(NULL, pud, pmd);
+ arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
+
+ pmd += pmd_index(addr);
} else
#endif
pmd = pmd_offset(pud, addr);
@@ -1328,8 +1348,6 @@ static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud,
next = pmd_addr_end(addr, end);
ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, end, pfn,
flags, stage);
- pud_populate(NULL, pud, pmd);
- arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud));
phys += next - addr;
} while (pmd++, addr = next, addr < end);
@@ -1346,9 +1364,15 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
#ifndef __PAGETABLE_PUD_FOLDED
if (pgd_none(*pgd)) {
- pud = pud_alloc_one(NULL, addr);
+ pud = (pud_t *)get_zeroed_page(GFP_ATOMIC);
if (!pud)
return -ENOMEM;
+
+ arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE);
+ pgd_populate(NULL, pgd, pud);
+ arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
+
+ pud += pud_index(addr);
} else
#endif
pud = pud_offset(pgd, addr);
@@ -1357,8 +1381,6 @@ static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd,
next = pud_addr_end(addr, end);
ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys,
flags, stage);
- pgd_populate(NULL, pud, pgd);
- arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd));
phys += next - addr;
} while (pud++, addr = next, addr < end);
@@ -1375,6 +1397,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
pgd_t *pgd = root_cfg->pgd;
struct arm_smmu_device *smmu = root_cfg->smmu;
+ unsigned long irqflags;
if (root_cfg->cbar == CBAR_TYPE_S2_TRANS) {
stage = 2;
@@ -1397,7 +1420,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
if (paddr & ~output_mask)
return -ERANGE;
- mutex_lock(&smmu_domain->lock);
+ spin_lock_irqsave(&smmu_domain->lock, irqflags);
pgd += pgd_index(iova);
end = iova + size;
do {
@@ -1413,11 +1436,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
} while (pgd++, iova != end);
out_unlock:
- mutex_unlock(&smmu_domain->lock);
-
- /* Ensure new page tables are visible to the hardware walker */
- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
- dsb();
+ spin_unlock_irqrestore(&smmu_domain->lock, irqflags);
return ret;
}
@@ -1987,8 +2006,10 @@ static int __init arm_smmu_init(void)
if (!iommu_present(&platform_bus_type))
bus_set_iommu(&platform_bus_type, &arm_smmu_ops);
+#ifdef CONFIG_ARM_AMBA
if (!iommu_present(&amba_bustype))
bus_set_iommu(&amba_bustype, &arm_smmu_ops);
+#endif
return 0;
}
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index d97fbe4fb9b1..80fffba7f12d 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -354,8 +354,8 @@ DEBUG_FOPS(mem);
return -ENOMEM; \
}
-#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 600)
-#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 400)
+#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600)
+#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
static int iommu_debug_register(struct device *dev, void *data)
{
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index 86b484cb3ec2..5194afb39e78 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_SIRF_IRQ) += irq-sirfsoc.o
obj-$(CONFIG_RENESAS_INTC_IRQPIN) += irq-renesas-intc-irqpin.o
obj-$(CONFIG_RENESAS_IRQC) += irq-renesas-irqc.o
obj-$(CONFIG_VERSATILE_FPGA_IRQ) += irq-versatile-fpga.o
+obj-$(CONFIG_ARCH_NSPIRE) += irq-zevio.o
obj-$(CONFIG_ARCH_VT8500) += irq-vt8500.o
obj-$(CONFIG_TB10X_IRQC) += irq-tb10x.o
obj-$(CONFIG_XTENSA) += irq-xtensa-pic.o
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 9300bc32784e..540956465ed2 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -381,7 +381,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
& PCI_MSI_DOORBELL_MASK;
- writel(~PCI_MSI_DOORBELL_MASK, per_cpu_int_base +
+ writel(~msimask, per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
for (msinr = PCI_MSI_DOORBELL_START;
@@ -407,7 +407,7 @@ armada_370_xp_handle_irq(struct pt_regs *regs)
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS)
& IPI_DOORBELL_MASK;
- writel(~IPI_DOORBELL_MASK, per_cpu_int_base +
+ writel(~ipimask, per_cpu_int_base +
ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
/* Handle all pending doorbells */
diff --git a/drivers/irqchip/irq-metag-ext.c b/drivers/irqchip/irq-metag-ext.c
index 92c41ab4dbfd..2cb474ad8809 100644
--- a/drivers/irqchip/irq-metag-ext.c
+++ b/drivers/irqchip/irq-metag-ext.c
@@ -515,7 +515,7 @@ static int meta_intc_set_affinity(struct irq_data *data,
* one cpu (the interrupt code doesn't support it), so we just
* pick the first cpu we find in 'cpumask'.
*/
- cpu = cpumask_any(cpumask);
+ cpu = cpumask_any_and(cpumask, cpu_online_mask);
thread = cpu_2_hwthread_id[cpu];
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
diff --git a/drivers/irqchip/irq-metag.c b/drivers/irqchip/irq-metag.c
index 8e94d7a3b20d..c16c186d97d3 100644
--- a/drivers/irqchip/irq-metag.c
+++ b/drivers/irqchip/irq-metag.c
@@ -201,7 +201,7 @@ static int metag_internal_irq_set_affinity(struct irq_data *data,
* one cpu (the interrupt code doesn't support it), so we just
* pick the first cpu we find in 'cpumask'.
*/
- cpu = cpumask_any(cpumask);
+ cpu = cpumask_any_and(cpumask, cpu_online_mask);
thread = cpu_2_hwthread_id[cpu];
metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)),
diff --git a/drivers/irqchip/irq-orion.c b/drivers/irqchip/irq-orion.c
index e51d40031884..8e41be62812e 100644
--- a/drivers/irqchip/irq-orion.c
+++ b/drivers/irqchip/irq-orion.c
@@ -111,7 +111,8 @@ IRQCHIP_DECLARE(orion_intc, "marvell,orion-intc", orion_irq_init);
static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
{
struct irq_domain *d = irq_get_handler_data(irq);
- struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, irq);
+
+ struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, 0);
u32 stat = readl_relaxed(gc->reg_base + ORION_BRIDGE_IRQ_CAUSE) &
gc->mask_cache;
@@ -123,6 +124,19 @@ static void orion_bridge_irq_handler(unsigned int irq, struct irq_desc *desc)
}
}
+/*
+ * Bridge IRQ_CAUSE is asserted regardless of IRQ_MASK register.
+ * To avoid interrupt events on stale irqs, we clear them before unmask.
+ */
+static unsigned int orion_bridge_irq_startup(struct irq_data *d)
+{
+ struct irq_chip_type *ct = irq_data_get_chip_type(d);
+
+ ct->chip.irq_ack(d);
+ ct->chip.irq_unmask(d);
+ return 0;
+}
+
static int __init orion_bridge_irq_init(struct device_node *np,
struct device_node *parent)
{
@@ -143,7 +157,7 @@ static int __init orion_bridge_irq_init(struct device_node *np,
}
ret = irq_alloc_domain_generic_chips(domain, nrirqs, 1, np->name,
- handle_level_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
+ handle_edge_irq, clr, 0, IRQ_GC_INIT_MASK_CACHE);
if (ret) {
pr_err("%s: unable to alloc irq domain gc\n", np->name);
return ret;
@@ -176,12 +190,14 @@ static int __init orion_bridge_irq_init(struct device_node *np,
gc->chip_types[0].regs.ack = ORION_BRIDGE_IRQ_CAUSE;
gc->chip_types[0].regs.mask = ORION_BRIDGE_IRQ_MASK;
+ gc->chip_types[0].chip.irq_startup = orion_bridge_irq_startup;
gc->chip_types[0].chip.irq_ack = irq_gc_ack_clr_bit;
gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
- /* mask all interrupts */
+ /* mask and clear all interrupts */
writel(0, gc->reg_base + ORION_BRIDGE_IRQ_MASK);
+ writel(0, gc->reg_base + ORION_BRIDGE_IRQ_CAUSE);
irq_set_handler_data(irq, domain);
irq_set_chained_handler(irq, orion_bridge_irq_handler);
diff --git a/drivers/irqchip/irq-zevio.c b/drivers/irqchip/irq-zevio.c
new file mode 100644
index 000000000000..8ed04c4a43ee
--- /dev/null
+++ b/drivers/irqchip/irq-zevio.c
@@ -0,0 +1,127 @@
+/*
+ * linux/drivers/irqchip/irq-zevio.c
+ *
+ * Copyright (C) 2013 Daniel Tang <tangrs@tangrs.id.au>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+
+#include <asm/mach/irq.h>
+#include <asm/exception.h>
+
+#include "irqchip.h"
+
+#define IO_STATUS 0x000
+#define IO_RAW_STATUS 0x004
+#define IO_ENABLE 0x008
+#define IO_DISABLE 0x00C
+#define IO_CURRENT 0x020
+#define IO_RESET 0x028
+#define IO_MAX_PRIOTY 0x02C
+
+#define IO_IRQ_BASE 0x000
+#define IO_FIQ_BASE 0x100
+
+#define IO_INVERT_SEL 0x200
+#define IO_STICKY_SEL 0x204
+#define IO_PRIORITY_SEL 0x300
+
+#define MAX_INTRS 32
+#define FIQ_START MAX_INTRS
+
+static struct irq_domain *zevio_irq_domain;
+static void __iomem *zevio_irq_io;
+
+static void zevio_irq_ack(struct irq_data *irqd)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(irqd);
+ struct irq_chip_regs *regs =
+ &container_of(irqd->chip, struct irq_chip_type, chip)->regs;
+
+ readl(gc->reg_base + regs->ack);
+}
+
+static asmlinkage void __exception_irq_entry zevio_handle_irq(struct pt_regs *regs)
+{
+ int irqnr;
+
+ while (readl(zevio_irq_io + IO_STATUS)) {
+ irqnr = readl(zevio_irq_io + IO_CURRENT);
+ irqnr = irq_find_mapping(zevio_irq_domain, irqnr);
+ handle_IRQ(irqnr, regs);
+ };
+}
+
+static void __init zevio_init_irq_base(void __iomem *base)
+{
+ /* Disable all interrupts */
+ writel(~0, base + IO_DISABLE);
+
+ /* Accept interrupts of all priorities */
+ writel(0xF, base + IO_MAX_PRIOTY);
+
+ /* Reset existing interrupts */
+ readl(base + IO_RESET);
+}
+
+static int __init zevio_of_init(struct device_node *node,
+ struct device_node *parent)
+{
+ unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
+ struct irq_chip_generic *gc;
+ int ret;
+
+ if (WARN_ON(zevio_irq_io || zevio_irq_domain))
+ return -EBUSY;
+
+ zevio_irq_io = of_iomap(node, 0);
+ BUG_ON(!zevio_irq_io);
+
+ /* Do not invert interrupt status bits */
+ writel(~0, zevio_irq_io + IO_INVERT_SEL);
+
+ /* Disable sticky interrupts */
+ writel(0, zevio_irq_io + IO_STICKY_SEL);
+
+ /* We don't use IRQ priorities. Set each IRQ to highest priority. */
+ memset_io(zevio_irq_io + IO_PRIORITY_SEL, 0, MAX_INTRS * sizeof(u32));
+
+ /* Init IRQ and FIQ */
+ zevio_init_irq_base(zevio_irq_io + IO_IRQ_BASE);
+ zevio_init_irq_base(zevio_irq_io + IO_FIQ_BASE);
+
+ zevio_irq_domain = irq_domain_add_linear(node, MAX_INTRS,
+ &irq_generic_chip_ops, NULL);
+ BUG_ON(!zevio_irq_domain);
+
+ ret = irq_alloc_domain_generic_chips(zevio_irq_domain, MAX_INTRS, 1,
+ "zevio_intc", handle_level_irq,
+ clr, 0, IRQ_GC_INIT_MASK_CACHE);
+ BUG_ON(ret);
+
+ gc = irq_get_domain_generic_chip(zevio_irq_domain, 0);
+ gc->reg_base = zevio_irq_io;
+ gc->chip_types[0].chip.irq_ack = zevio_irq_ack;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_disable_reg;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_unmask_enable_reg;
+ gc->chip_types[0].regs.mask = IO_IRQ_BASE + IO_ENABLE;
+ gc->chip_types[0].regs.enable = IO_IRQ_BASE + IO_ENABLE;
+ gc->chip_types[0].regs.disable = IO_IRQ_BASE + IO_DISABLE;
+ gc->chip_types[0].regs.ack = IO_IRQ_BASE + IO_RESET;
+
+ set_handle_irq(zevio_handle_irq);
+
+ pr_info("TI-NSPIRE classic IRQ controller\n");
+ return 0;
+}
+
+IRQCHIP_DECLARE(zevio_irq, "lsi,zevio-intc", zevio_of_init);
diff --git a/drivers/isdn/capi/Kconfig b/drivers/isdn/capi/Kconfig
index f04686580040..9816c51eb5c2 100644
--- a/drivers/isdn/capi/Kconfig
+++ b/drivers/isdn/capi/Kconfig
@@ -16,9 +16,17 @@ config CAPI_TRACE
This will increase the size of the kernelcapi module by 20 KB.
If unsure, say Y.
+config ISDN_CAPI_CAPI20
+ tristate "CAPI2.0 /dev/capi support"
+ help
+ This option will provide the CAPI 2.0 interface to userspace
+ applications via /dev/capi20. Applications should use the
+ standardized libcapi20 to access this functionality. You should say
+ Y/M here.
+
config ISDN_CAPI_MIDDLEWARE
bool "CAPI2.0 Middleware support"
- depends on TTY
+ depends on ISDN_CAPI_CAPI20 && TTY
help
This option will enhance the capabilities of the /dev/capi20
interface. It will provide a means of moving a data connection,
@@ -26,14 +34,6 @@ config ISDN_CAPI_MIDDLEWARE
device. If you want to use pppd with pppdcapiplugin to dial up to
your ISP, say Y here.
-config ISDN_CAPI_CAPI20
- tristate "CAPI2.0 /dev/capi support"
- help
- This option will provide the CAPI 2.0 interface to userspace
- applications via /dev/capi20. Applications should use the
- standardized libcapi20 to access this functionality. You should say
- Y/M here.
-
config ISDN_CAPI_CAPIDRV
tristate "CAPI2.0 capidrv interface support"
depends on ISDN_I4L
diff --git a/drivers/isdn/hisax/q931.c b/drivers/isdn/hisax/q931.c
index af1b020a81f1..b420f8bd862e 100644
--- a/drivers/isdn/hisax/q931.c
+++ b/drivers/isdn/hisax/q931.c
@@ -810,7 +810,7 @@ prfeatureind(char *dest, u_char *p)
dp += sprintf(dp, " octet 3 ");
dp += prbits(dp, *p, 8, 8);
*dp++ = '\n';
- if (!(*p++ & 80)) {
+ if (!(*p++ & 0x80)) {
dp += sprintf(dp, " octet 4 ");
dp += prbits(dp, *p++, 8, 8);
*dp++ = '\n';
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index 9a06fe883766..95ad936e6048 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -254,16 +254,6 @@ config DM_THIN_PROVISIONING
---help---
Provides thin provisioning and snapshots that share a data store.
-config DM_DEBUG_BLOCK_STACK_TRACING
- boolean "Keep stack trace of persistent data block lock holders"
- depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA
- select STACKTRACE
- ---help---
- Enable this for messages that may help debug problems with the
- block manager locking used by thin provisioning and caching.
-
- If unsure, say N.
-
config DM_CACHE
tristate "Cache target (EXPERIMENTAL)"
depends on BLK_DEV_DM
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 0c707e4f4eaf..a4c7306ff43d 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -210,7 +210,9 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
#define GC_MARK_RECLAIMABLE 0
#define GC_MARK_DIRTY 1
#define GC_MARK_METADATA 2
-BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
+#define GC_SECTORS_USED_SIZE 13
+#define MAX_GC_SECTORS_USED (~(~0ULL << GC_SECTORS_USED_SIZE))
+BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, GC_SECTORS_USED_SIZE);
BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
#include "journal.h"
diff --git a/drivers/md/bcache/bset.c b/drivers/md/bcache/bset.c
index 4f6b5940e609..3f74b4b0747b 100644
--- a/drivers/md/bcache/bset.c
+++ b/drivers/md/bcache/bset.c
@@ -23,7 +23,7 @@ void bch_dump_bset(struct btree_keys *b, struct bset *i, unsigned set)
for (k = i->start; k < bset_bkey_last(i); k = next) {
next = bkey_next(k);
- printk(KERN_ERR "block %u key %zi/%u: ", set,
+ printk(KERN_ERR "block %u key %li/%u: ", set,
(uint64_t *) k - i->d, i->keys);
if (b->ops->key_dump)
@@ -1185,9 +1185,12 @@ static void __btree_sort(struct btree_keys *b, struct btree_iter *iter,
struct bset *out = (void *) __get_free_pages(__GFP_NOWARN|GFP_NOIO,
order);
if (!out) {
+ struct page *outp;
+
BUG_ON(order > state->page_order);
- out = page_address(mempool_alloc(state->pool, GFP_NOIO));
+ outp = mempool_alloc(state->pool, GFP_NOIO);
+ out = page_address(outp);
used_mempool = true;
order = state->page_order;
}
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 98cc0a810a36..5f9c2a665ca5 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1167,7 +1167,7 @@ uint8_t __bch_btree_mark_key(struct cache_set *c, int level, struct bkey *k)
/* guard against overflow */
SET_GC_SECTORS_USED(g, min_t(unsigned,
GC_SECTORS_USED(g) + KEY_SIZE(k),
- (1 << 14) - 1));
+ MAX_GC_SECTORS_USED));
BUG_ON(!GC_SECTORS_USED(g));
}
@@ -1805,7 +1805,7 @@ static bool btree_insert_key(struct btree *b, struct bkey *k,
static size_t insert_u64s_remaining(struct btree *b)
{
- ssize_t ret = bch_btree_keys_u64s_remaining(&b->keys);
+ long ret = bch_btree_keys_u64s_remaining(&b->keys);
/*
* Might land in the middle of an existing extent and have to split it
diff --git a/drivers/md/bcache/extents.c b/drivers/md/bcache/extents.c
index c3ead586dc27..416d1a3e028e 100644
--- a/drivers/md/bcache/extents.c
+++ b/drivers/md/bcache/extents.c
@@ -194,7 +194,7 @@ err:
mutex_unlock(&b->c->bucket_lock);
bch_extent_to_text(buf, sizeof(buf), k);
btree_bug(b,
-"inconsistent btree pointer %s: bucket %li pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
+"inconsistent btree pointer %s: bucket %zi pin %i prio %i gen %i last_gc %i mark %llu gc_gen %i",
buf, PTR_BUCKET_NR(b->c, k, i), atomic_read(&g->pin),
g->prio, g->gen, g->last_gc, GC_MARK(g), g->gc_gen);
return true;
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 72cd213f213f..5d5d031cf381 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -353,14 +353,14 @@ static void bch_data_insert_start(struct closure *cl)
struct data_insert_op *op = container_of(cl, struct data_insert_op, cl);
struct bio *bio = op->bio, *n;
- if (op->bypass)
- return bch_data_invalidate(cl);
-
if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) {
set_gc_sectors(op->c);
wake_up_gc(op->c);
}
+ if (op->bypass)
+ return bch_data_invalidate(cl);
+
/*
* Journal writes are marked REQ_FLUSH; if the original write was a
* flush, it'll wait on the journal write.
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index c6ab69333a6d..d8458d477a12 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -416,7 +416,7 @@ static int btree_bset_stats(struct btree_op *b_op, struct btree *b)
return MAP_CONTINUE;
}
-int bch_bset_print_stats(struct cache_set *c, char *buf)
+static int bch_bset_print_stats(struct cache_set *c, char *buf)
{
struct bset_stats_op op;
int ret;
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 1e018e986610..0e385e40909e 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -872,7 +872,7 @@ static void mq_destroy(struct dm_cache_policy *p)
{
struct mq_policy *mq = to_mq_policy(p);
- kfree(mq->table);
+ vfree(mq->table);
epool_exit(&mq->cache_pool);
epool_exit(&mq->pre_cache_pool);
kfree(mq);
@@ -1245,7 +1245,7 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
mq->hash_bits = ffs(mq->nr_buckets) - 1;
- mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL);
+ mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);
if (!mq->table)
goto bad_alloc_table;
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index ffd472e015ca..074b9c8e4cf0 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -289,6 +289,7 @@ struct per_bio_data {
bool tick:1;
unsigned req_nr:2;
struct dm_deferred_entry *all_io_entry;
+ struct dm_hook_info hook_info;
/*
* writethrough fields. These MUST remain at the end of this
@@ -297,7 +298,6 @@ struct per_bio_data {
*/
struct cache *cache;
dm_cblock_t cblock;
- struct dm_hook_info hook_info;
struct dm_bio_details bio_details;
};
@@ -671,15 +671,16 @@ static void remap_to_cache(struct cache *cache, struct bio *bio,
dm_cblock_t cblock)
{
sector_t bi_sector = bio->bi_iter.bi_sector;
+ sector_t block = from_cblock(cblock);
bio->bi_bdev = cache->cache_dev->bdev;
if (!block_size_is_power_of_two(cache))
bio->bi_iter.bi_sector =
- (from_cblock(cblock) * cache->sectors_per_block) +
+ (block * cache->sectors_per_block) +
sector_div(bi_sector, cache->sectors_per_block);
else
bio->bi_iter.bi_sector =
- (from_cblock(cblock) << cache->sectors_per_block_shift) |
+ (block << cache->sectors_per_block_shift) |
(bi_sector & (cache->sectors_per_block - 1));
}
@@ -978,12 +979,13 @@ static void issue_copy_real(struct dm_cache_migration *mg)
int r;
struct dm_io_region o_region, c_region;
struct cache *cache = mg->cache;
+ sector_t cblock = from_cblock(mg->cblock);
o_region.bdev = cache->origin_dev->bdev;
o_region.count = cache->sectors_per_block;
c_region.bdev = cache->cache_dev->bdev;
- c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
+ c_region.sector = cblock * cache->sectors_per_block;
c_region.count = cache->sectors_per_block;
if (mg->writeback || mg->demote) {
@@ -1010,13 +1012,15 @@ static void overwrite_endio(struct bio *bio, int err)
struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
unsigned long flags;
+ dm_unhook_bio(&pb->hook_info, bio);
+
if (err)
mg->err = true;
+ mg->requeue_holder = false;
+
spin_lock_irqsave(&cache->lock, flags);
list_add_tail(&mg->list, &cache->completed_migrations);
- dm_unhook_bio(&pb->hook_info, bio);
- mg->requeue_holder = false;
spin_unlock_irqrestore(&cache->lock, flags);
wake_worker(cache);
@@ -2461,20 +2465,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio)
bool discarded_block;
struct dm_bio_prison_cell *cell;
struct policy_result lookup_result;
- struct per_bio_data *pb;
+ struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size);
- if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
+ if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) {
/*
* This can only occur if the io goes to a partial block at
* the end of the origin device. We don't cache these.
* Just remap to the origin and carry on.
*/
- remap_to_origin_clear_discard(cache, bio, block);
+ remap_to_origin(cache, bio);
return DM_MAPIO_REMAPPED;
}
- pb = init_per_bio_data(bio, pb_data_size);
-
if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
defer_bio(cache, bio);
return DM_MAPIO_SUBMITTED;
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index b2b8a10e8427..3842ac738f98 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -201,29 +201,28 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
/*
* Functions for getting the pages from a bvec.
*/
-static void bio_get_page(struct dpages *dp,
- struct page **p, unsigned long *len, unsigned *offset)
+static void bio_get_page(struct dpages *dp, struct page **p,
+ unsigned long *len, unsigned *offset)
{
- struct bio *bio = dp->context_ptr;
- struct bio_vec bvec = bio_iovec(bio);
- *p = bvec.bv_page;
- *len = bvec.bv_len;
- *offset = bvec.bv_offset;
+ struct bio_vec *bvec = dp->context_ptr;
+ *p = bvec->bv_page;
+ *len = bvec->bv_len - dp->context_u;
+ *offset = bvec->bv_offset + dp->context_u;
}
static void bio_next_page(struct dpages *dp)
{
- struct bio *bio = dp->context_ptr;
- struct bio_vec bvec = bio_iovec(bio);
-
- bio_advance(bio, bvec.bv_len);
+ struct bio_vec *bvec = dp->context_ptr;
+ dp->context_ptr = bvec + 1;
+ dp->context_u = 0;
}
static void bio_dp_init(struct dpages *dp, struct bio *bio)
{
dp->get_page = bio_get_page;
dp->next_page = bio_next_page;
- dp->context_ptr = bio;
+ dp->context_ptr = __bvec_iter_bvec(bio->bi_io_vec, bio->bi_iter);
+ dp->context_u = bio->bi_iter.bi_bvec_done;
}
/*
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 6eb9dc9ef8f3..422a9fdeb53e 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1626,8 +1626,11 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
/*
* Only pass ioctls through if the device sizes match exactly.
*/
- if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
- r = scsi_verify_blk_ioctl(NULL, cmd);
+ if (!bdev || ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) {
+ int err = scsi_verify_blk_ioctl(NULL, cmd);
+ if (err)
+ r = err;
+ }
if (r == -ENOTCONN && !fatal_signal_pending(current))
queue_work(kmultipathd, &m->process_queued_ios);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index f284e0bfb25f..7dfdb5c746d6 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1244,6 +1244,9 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio, int error)
dm_bio_restore(bd, bio);
bio_record->details.bi_bdev = NULL;
+
+ atomic_inc(&bio->bi_remaining);
+
queue_bio(ms, bio, rw);
return DM_ENDIO_INCOMPLETE;
}
diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c
index afc3d017de4c..d6e88178d22c 100644
--- a/drivers/md/dm-snap-persistent.c
+++ b/drivers/md/dm-snap-persistent.c
@@ -546,6 +546,9 @@ static int read_exceptions(struct pstore *ps,
r = insert_exceptions(ps, area, callback, callback_context,
&full);
+ if (!full)
+ memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT);
+
dm_bufio_release(bp);
dm_bufio_forget(client, chunk);
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 7da347665552..fb9efc829182 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -76,7 +76,7 @@
#define THIN_SUPERBLOCK_MAGIC 27022010
#define THIN_SUPERBLOCK_LOCATION 0
-#define THIN_VERSION 1
+#define THIN_VERSION 2
#define THIN_METADATA_CACHE_SIZE 64
#define SECTOR_TO_BLOCK_SHIFT 3
@@ -483,7 +483,7 @@ static int __write_initial_superblock(struct dm_pool_metadata *pmd)
disk_super->data_mapping_root = cpu_to_le64(pmd->root);
disk_super->device_details_root = cpu_to_le64(pmd->details_root);
- disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+ disk_super->metadata_block_size = cpu_to_le32(THIN_METADATA_BLOCK_SIZE);
disk_super->metadata_nr_blocks = cpu_to_le64(bdev_size >> SECTOR_TO_BLOCK_SHIFT);
disk_super->data_block_size = cpu_to_le32(pmd->data_block_size);
@@ -651,7 +651,7 @@ static int __create_persistent_data_objects(struct dm_pool_metadata *pmd, bool f
{
int r;
- pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE,
+ pmd->bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
THIN_METADATA_CACHE_SIZE,
THIN_MAX_CONCURRENT_LOCKS);
if (IS_ERR(pmd->bm)) {
@@ -1489,6 +1489,23 @@ bool dm_thin_changed_this_transaction(struct dm_thin_device *td)
return r;
}
+bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd)
+{
+ bool r = false;
+ struct dm_thin_device *td, *tmp;
+
+ down_read(&pmd->root_lock);
+ list_for_each_entry_safe(td, tmp, &pmd->thin_devices, list) {
+ if (td->changed) {
+ r = td->changed;
+ break;
+ }
+ }
+ up_read(&pmd->root_lock);
+
+ return r;
+}
+
bool dm_thin_aborted_changes(struct dm_thin_device *td)
{
bool r;
@@ -1738,3 +1755,38 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
return r;
}
+
+int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd)
+{
+ int r;
+ struct dm_block *sblock;
+ struct thin_disk_superblock *disk_super;
+
+ down_write(&pmd->root_lock);
+ pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG;
+
+ r = superblock_lock(pmd, &sblock);
+ if (r) {
+ DMERR("couldn't read superblock");
+ goto out;
+ }
+
+ disk_super = dm_block_data(sblock);
+ disk_super->flags = cpu_to_le32(pmd->flags);
+
+ dm_bm_unlock(sblock);
+out:
+ up_write(&pmd->root_lock);
+ return r;
+}
+
+bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd)
+{
+ bool needs_check;
+
+ down_read(&pmd->root_lock);
+ needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG;
+ up_read(&pmd->root_lock);
+
+ return needs_check;
+}
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 9a368567632f..e3c857db195a 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -9,16 +9,14 @@
#include "persistent-data/dm-block-manager.h"
#include "persistent-data/dm-space-map.h"
+#include "persistent-data/dm-space-map-metadata.h"
-#define THIN_METADATA_BLOCK_SIZE 4096
+#define THIN_METADATA_BLOCK_SIZE DM_SM_METADATA_BLOCK_SIZE
/*
* The metadata device is currently limited in size.
- *
- * We have one block of index, which can hold 255 index entries. Each
- * index entry contains allocation info about 16k metadata blocks.
*/
-#define THIN_METADATA_MAX_SECTORS (255 * (1 << 14) * (THIN_METADATA_BLOCK_SIZE / (1 << SECTOR_SHIFT)))
+#define THIN_METADATA_MAX_SECTORS DM_SM_METADATA_MAX_SECTORS
/*
* A metadata device larger than 16GB triggers a warning.
@@ -27,6 +25,11 @@
/*----------------------------------------------------------------*/
+/*
+ * Thin metadata superblock flags.
+ */
+#define THIN_METADATA_NEEDS_CHECK_FLAG (1 << 0)
+
struct dm_pool_metadata;
struct dm_thin_device;
@@ -161,6 +164,8 @@ int dm_thin_remove_block(struct dm_thin_device *td, dm_block_t block);
*/
bool dm_thin_changed_this_transaction(struct dm_thin_device *td);
+bool dm_pool_changed_this_transaction(struct dm_pool_metadata *pmd);
+
bool dm_thin_aborted_changes(struct dm_thin_device *td);
int dm_thin_get_highest_mapped_block(struct dm_thin_device *td,
@@ -202,6 +207,12 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
dm_sm_threshold_fn fn,
void *context);
+/*
+ * Updates the superblock immediately.
+ */
+int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd);
+bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd);
+
/*----------------------------------------------------------------*/
#endif
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index faaf944597ab..be70d38745f7 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -130,10 +130,11 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
struct dm_thin_new_mapping;
/*
- * The pool runs in 3 modes. Ordered in degraded order for comparisons.
+ * The pool runs in 4 modes. Ordered in degraded order for comparisons.
*/
enum pool_mode {
PM_WRITE, /* metadata may be changed */
+ PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
PM_READ_ONLY, /* metadata may not be changed */
PM_FAIL, /* all I/O fails */
};
@@ -198,7 +199,6 @@ struct pool {
};
static enum pool_mode get_pool_mode(struct pool *pool);
-static void out_of_data_space(struct pool *pool);
static void metadata_operation_failed(struct pool *pool, const char *op, int r);
/*
@@ -226,6 +226,7 @@ struct thin_c {
struct pool *pool;
struct dm_thin_device *td;
+ bool requeue_mode:1;
};
/*----------------------------------------------------------------*/
@@ -369,14 +370,18 @@ struct dm_thin_endio_hook {
struct dm_thin_new_mapping *overwrite_mapping;
};
-static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
+static void requeue_bio_list(struct thin_c *tc, struct bio_list *master)
{
struct bio *bio;
struct bio_list bios;
+ unsigned long flags;
bio_list_init(&bios);
+
+ spin_lock_irqsave(&tc->pool->lock, flags);
bio_list_merge(&bios, master);
bio_list_init(master);
+ spin_unlock_irqrestore(&tc->pool->lock, flags);
while ((bio = bio_list_pop(&bios))) {
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -391,12 +396,26 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
static void requeue_io(struct thin_c *tc)
{
struct pool *pool = tc->pool;
+
+ requeue_bio_list(tc, &pool->deferred_bios);
+ requeue_bio_list(tc, &pool->retry_on_resume_list);
+}
+
+static void error_retry_list(struct pool *pool)
+{
+ struct bio *bio;
unsigned long flags;
+ struct bio_list bios;
+
+ bio_list_init(&bios);
spin_lock_irqsave(&pool->lock, flags);
- __requeue_bio_list(tc, &pool->deferred_bios);
- __requeue_bio_list(tc, &pool->retry_on_resume_list);
+ bio_list_merge(&bios, &pool->retry_on_resume_list);
+ bio_list_init(&pool->retry_on_resume_list);
spin_unlock_irqrestore(&pool->lock, flags);
+
+ while ((bio = bio_list_pop(&bios)))
+ bio_io_error(bio);
}
/*
@@ -925,13 +944,15 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
}
}
+static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
+
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
int r;
dm_block_t free_blocks;
struct pool *pool = tc->pool;
- if (get_pool_mode(pool) != PM_WRITE)
+ if (WARN_ON(get_pool_mode(pool) != PM_WRITE))
return -EINVAL;
r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
@@ -958,7 +979,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
}
if (!free_blocks) {
- out_of_data_space(pool);
+ set_pool_mode(pool, PM_OUT_OF_DATA_SPACE);
return -ENOSPC;
}
}
@@ -988,15 +1009,32 @@ static void retry_on_resume(struct bio *bio)
spin_unlock_irqrestore(&pool->lock, flags);
}
-static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
+static bool should_error_unserviceable_bio(struct pool *pool)
{
- /*
- * When pool is read-only, no cell locking is needed because
- * nothing is changing.
- */
- WARN_ON_ONCE(get_pool_mode(pool) != PM_READ_ONLY);
+ enum pool_mode m = get_pool_mode(pool);
- if (pool->pf.error_if_no_space)
+ switch (m) {
+ case PM_WRITE:
+ /* Shouldn't get here */
+ DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode");
+ return true;
+
+ case PM_OUT_OF_DATA_SPACE:
+ return pool->pf.error_if_no_space;
+
+ case PM_READ_ONLY:
+ case PM_FAIL:
+ return true;
+ default:
+ /* Shouldn't get here */
+ DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode");
+ return true;
+ }
+}
+
+static void handle_unserviceable_bio(struct pool *pool, struct bio *bio)
+{
+ if (should_error_unserviceable_bio(pool))
bio_io_error(bio);
else
retry_on_resume(bio);
@@ -1007,11 +1045,20 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c
struct bio *bio;
struct bio_list bios;
+ if (should_error_unserviceable_bio(pool)) {
+ cell_error(pool, cell);
+ return;
+ }
+
bio_list_init(&bios);
cell_release(pool, cell, &bios);
- while ((bio = bio_list_pop(&bios)))
- handle_unserviceable_bio(pool, bio);
+ if (should_error_unserviceable_bio(pool))
+ while ((bio = bio_list_pop(&bios)))
+ bio_io_error(bio);
+ else
+ while ((bio = bio_list_pop(&bios)))
+ retry_on_resume(bio);
}
static void process_discard(struct thin_c *tc, struct bio *bio)
@@ -1296,6 +1343,11 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
}
}
+static void process_bio_success(struct thin_c *tc, struct bio *bio)
+{
+ bio_endio(bio, 0);
+}
+
static void process_bio_fail(struct thin_c *tc, struct bio *bio)
{
bio_io_error(bio);
@@ -1328,6 +1380,11 @@ static void process_deferred_bios(struct pool *pool)
struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
struct thin_c *tc = h->tc;
+ if (tc->requeue_mode) {
+ bio_endio(bio, DM_ENDIO_REQUEUE);
+ continue;
+ }
+
/*
* If we've got no free new_mapping structs, and processing
* this bio might require one, we pause until there are some
@@ -1357,7 +1414,8 @@ static void process_deferred_bios(struct pool *pool)
bio_list_init(&pool->deferred_flush_bios);
spin_unlock_irqrestore(&pool->lock, flags);
- if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
+ if (bio_list_empty(&bios) &&
+ !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool)))
return;
if (commit(pool)) {
@@ -1393,51 +1451,134 @@ static void do_waker(struct work_struct *ws)
/*----------------------------------------------------------------*/
+struct noflush_work {
+ struct work_struct worker;
+ struct thin_c *tc;
+
+ atomic_t complete;
+ wait_queue_head_t wait;
+};
+
+static void complete_noflush_work(struct noflush_work *w)
+{
+ atomic_set(&w->complete, 1);
+ wake_up(&w->wait);
+}
+
+static void do_noflush_start(struct work_struct *ws)
+{
+ struct noflush_work *w = container_of(ws, struct noflush_work, worker);
+ w->tc->requeue_mode = true;
+ requeue_io(w->tc);
+ complete_noflush_work(w);
+}
+
+static void do_noflush_stop(struct work_struct *ws)
+{
+ struct noflush_work *w = container_of(ws, struct noflush_work, worker);
+ w->tc->requeue_mode = false;
+ complete_noflush_work(w);
+}
+
+static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
+{
+ struct noflush_work w;
+
+ INIT_WORK(&w.worker, fn);
+ w.tc = tc;
+ atomic_set(&w.complete, 0);
+ init_waitqueue_head(&w.wait);
+
+ queue_work(tc->pool->wq, &w.worker);
+
+ wait_event(w.wait, atomic_read(&w.complete));
+}
+
+/*----------------------------------------------------------------*/
+
static enum pool_mode get_pool_mode(struct pool *pool)
{
return pool->pf.mode;
}
+static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode)
+{
+ dm_table_event(pool->ti->table);
+ DMINFO("%s: switching pool to %s mode",
+ dm_device_name(pool->pool_md), new_mode);
+}
+
static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
{
- int r;
- enum pool_mode old_mode = pool->pf.mode;
+ struct pool_c *pt = pool->ti->private;
+ bool needs_check = dm_pool_metadata_needs_check(pool->pmd);
+ enum pool_mode old_mode = get_pool_mode(pool);
+
+ /*
+ * Never allow the pool to transition to PM_WRITE mode if user
+ * intervention is required to verify metadata and data consistency.
+ */
+ if (new_mode == PM_WRITE && needs_check) {
+ DMERR("%s: unable to switch pool to write mode until repaired.",
+ dm_device_name(pool->pool_md));
+ if (old_mode != new_mode)
+ new_mode = old_mode;
+ else
+ new_mode = PM_READ_ONLY;
+ }
+ /*
+ * If we were in PM_FAIL mode, rollback of metadata failed. We're
+ * not going to recover without a thin_repair. So we never let the
+ * pool move out of the old mode.
+ */
+ if (old_mode == PM_FAIL)
+ new_mode = old_mode;
switch (new_mode) {
case PM_FAIL:
if (old_mode != new_mode)
- DMERR("%s: switching pool to failure mode",
- dm_device_name(pool->pool_md));
+ notify_of_pool_mode_change(pool, "failure");
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_fail;
pool->process_discard = process_bio_fail;
pool->process_prepared_mapping = process_prepared_mapping_fail;
pool->process_prepared_discard = process_prepared_discard_fail;
+
+ error_retry_list(pool);
break;
case PM_READ_ONLY:
if (old_mode != new_mode)
- DMERR("%s: switching pool to read-only mode",
- dm_device_name(pool->pool_md));
- r = dm_pool_abort_metadata(pool->pmd);
- if (r) {
- DMERR("%s: aborting transaction failed",
- dm_device_name(pool->pool_md));
- new_mode = PM_FAIL;
- set_pool_mode(pool, new_mode);
- } else {
- dm_pool_metadata_read_only(pool->pmd);
- pool->process_bio = process_bio_read_only;
- pool->process_discard = process_discard;
- pool->process_prepared_mapping = process_prepared_mapping_fail;
- pool->process_prepared_discard = process_prepared_discard_passdown;
- }
+ notify_of_pool_mode_change(pool, "read-only");
+ dm_pool_metadata_read_only(pool->pmd);
+ pool->process_bio = process_bio_read_only;
+ pool->process_discard = process_bio_success;
+ pool->process_prepared_mapping = process_prepared_mapping_fail;
+ pool->process_prepared_discard = process_prepared_discard_passdown;
+
+ error_retry_list(pool);
+ break;
+
+ case PM_OUT_OF_DATA_SPACE:
+ /*
+ * Ideally we'd never hit this state; the low water mark
+ * would trigger userland to extend the pool before we
+ * completely run out of data space. However, many small
+ * IOs to unprovisioned space can consume data space at an
+ * alarming rate. Adjust your low water mark if you're
+ * frequently seeing this mode.
+ */
+ if (old_mode != new_mode)
+ notify_of_pool_mode_change(pool, "out-of-data-space");
+ pool->process_bio = process_bio_read_only;
+ pool->process_discard = process_discard;
+ pool->process_prepared_mapping = process_prepared_mapping;
+ pool->process_prepared_discard = process_prepared_discard_passdown;
break;
case PM_WRITE:
if (old_mode != new_mode)
- DMINFO("%s: switching pool to write mode",
- dm_device_name(pool->pool_md));
+ notify_of_pool_mode_change(pool, "write");
dm_pool_metadata_read_write(pool->pmd);
pool->process_bio = process_bio;
pool->process_discard = process_discard;
@@ -1447,32 +1588,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
}
pool->pf.mode = new_mode;
+ /*
+ * The pool mode may have changed, sync it so bind_control_target()
+ * doesn't cause an unexpected mode transition on resume.
+ */
+ pt->adjusted_pf.mode = new_mode;
}
-/*
- * Rather than calling set_pool_mode directly, use these which describe the
- * reason for mode degradation.
- */
-static void out_of_data_space(struct pool *pool)
+static void abort_transaction(struct pool *pool)
{
- DMERR_LIMIT("%s: no free data space available.",
- dm_device_name(pool->pool_md));
- set_pool_mode(pool, PM_READ_ONLY);
+ const char *dev_name = dm_device_name(pool->pool_md);
+
+ DMERR_LIMIT("%s: aborting current metadata transaction", dev_name);
+ if (dm_pool_abort_metadata(pool->pmd)) {
+ DMERR("%s: failed to abort metadata transaction", dev_name);
+ set_pool_mode(pool, PM_FAIL);
+ }
+
+ if (dm_pool_metadata_set_needs_check(pool->pmd)) {
+ DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name);
+ set_pool_mode(pool, PM_FAIL);
+ }
}
static void metadata_operation_failed(struct pool *pool, const char *op, int r)
{
- dm_block_t free_blocks;
-
DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d",
dm_device_name(pool->pool_md), op, r);
- if (r == -ENOSPC &&
- !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
- !free_blocks)
- DMERR_LIMIT("%s: no free metadata space available.",
- dm_device_name(pool->pool_md));
-
+ abort_transaction(pool);
set_pool_mode(pool, PM_READ_ONLY);
}
@@ -1523,6 +1667,11 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
thin_hook_bio(tc, bio);
+ if (tc->requeue_mode) {
+ bio_endio(bio, DM_ENDIO_REQUEUE);
+ return DM_MAPIO_SUBMITTED;
+ }
+
if (get_pool_mode(tc->pool) == PM_FAIL) {
bio_io_error(bio);
return DM_MAPIO_SUBMITTED;
@@ -1686,7 +1835,7 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
/*
* We want to make sure that a pool in PM_FAIL mode is never upgraded.
*/
- enum pool_mode old_mode = pool->pf.mode;
+ enum pool_mode old_mode = get_pool_mode(pool);
enum pool_mode new_mode = pt->adjusted_pf.mode;
/*
@@ -1700,16 +1849,6 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
pool->pf = pt->adjusted_pf;
pool->low_water_blocks = pt->low_water_blocks;
- /*
- * If we were in PM_FAIL mode, rollback of metadata failed. We're
- * not going to recover without a thin_repair. So we never let the
- * pool move out of the old mode. On the other hand a PM_READ_ONLY
- * may have been due to a lack of metadata or data space, and may
- * now work (ie. if the underlying devices have been resized).
- */
- if (old_mode == PM_FAIL)
- new_mode = old_mode;
-
set_pool_mode(pool, new_mode);
return 0;
@@ -1999,16 +2138,27 @@ static void metadata_low_callback(void *context)
dm_table_event(pool->ti->table);
}
-static sector_t get_metadata_dev_size(struct block_device *bdev)
+static sector_t get_dev_size(struct block_device *bdev)
+{
+ return i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+}
+
+static void warn_if_metadata_device_too_big(struct block_device *bdev)
{
- sector_t metadata_dev_size = i_size_read(bdev->bd_inode) >> SECTOR_SHIFT;
+ sector_t metadata_dev_size = get_dev_size(bdev);
char buffer[BDEVNAME_SIZE];
- if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING) {
+ if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
bdevname(bdev, buffer), THIN_METADATA_MAX_SECTORS);
- metadata_dev_size = THIN_METADATA_MAX_SECTORS_WARNING;
- }
+}
+
+static sector_t get_metadata_dev_size(struct block_device *bdev)
+{
+ sector_t metadata_dev_size = get_dev_size(bdev);
+
+ if (metadata_dev_size > THIN_METADATA_MAX_SECTORS)
+ metadata_dev_size = THIN_METADATA_MAX_SECTORS;
return metadata_dev_size;
}
@@ -2017,7 +2167,7 @@ static dm_block_t get_metadata_dev_size_in_blocks(struct block_device *bdev)
{
sector_t metadata_dev_size = get_metadata_dev_size(bdev);
- sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE >> SECTOR_SHIFT);
+ sector_div(metadata_dev_size, THIN_METADATA_BLOCK_SIZE);
return metadata_dev_size;
}
@@ -2095,12 +2245,7 @@ static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
ti->error = "Error opening metadata block device";
goto out_unlock;
}
-
- /*
- * Run for the side-effect of possibly issuing a warning if the
- * device is too big.
- */
- (void) get_metadata_dev_size(metadata_dev->bdev);
+ warn_if_metadata_device_too_big(metadata_dev->bdev);
r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
if (r) {
@@ -2246,6 +2391,12 @@ static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit)
return -EINVAL;
} else if (data_size > sb_data_size) {
+ if (dm_pool_metadata_needs_check(pool->pmd)) {
+ DMERR("%s: unable to grow the data device until repaired.",
+ dm_device_name(pool->pool_md));
+ return 0;
+ }
+
if (sb_data_size)
DMINFO("%s: growing the data device from %llu to %llu blocks",
dm_device_name(pool->pool_md),
@@ -2287,6 +2438,13 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit)
return -EINVAL;
} else if (metadata_dev_size > sb_metadata_dev_size) {
+ if (dm_pool_metadata_needs_check(pool->pmd)) {
+ DMERR("%s: unable to grow the metadata device until repaired.",
+ dm_device_name(pool->pool_md));
+ return 0;
+ }
+
+ warn_if_metadata_device_too_big(pool->md_dev);
DMINFO("%s: growing the metadata device from %llu to %llu blocks",
dm_device_name(pool->pool_md),
sb_metadata_dev_size, metadata_dev_size);
@@ -2673,7 +2831,9 @@ static void pool_status(struct dm_target *ti, status_type_t type,
else
DMEMIT("- ");
- if (pool->pf.mode == PM_READ_ONLY)
+ if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
+ DMEMIT("out_of_data_space ");
+ else if (pool->pf.mode == PM_READ_ONLY)
DMEMIT("ro ");
else
DMEMIT("rw ");
@@ -2787,7 +2947,7 @@ static struct target_type pool_target = {
.name = "thin-pool",
.features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
DM_TARGET_IMMUTABLE,
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.ctr = pool_ctr,
.dtr = pool_dtr,
@@ -2894,6 +3054,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
if (get_pool_mode(tc->pool) == PM_FAIL) {
ti->error = "Couldn't open thin device, Pool is in fail mode";
+ r = -EINVAL;
goto bad_thin_open;
}
@@ -2905,7 +3066,7 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
if (r)
- goto bad_thin_open;
+ goto bad_target_max_io_len;
ti->num_flush_bios = 1;
ti->flush_supported = true;
@@ -2926,6 +3087,8 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
return 0;
+bad_target_max_io_len:
+ dm_pool_close_thin_device(tc->td);
bad_thin_open:
__pool_dec(tc->pool);
bad_pool_lookup:
@@ -2986,10 +3149,23 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
return 0;
}
-static void thin_postsuspend(struct dm_target *ti)
+static void thin_presuspend(struct dm_target *ti)
{
+ struct thin_c *tc = ti->private;
+
if (dm_noflush_suspending(ti))
- requeue_io((struct thin_c *)ti->private);
+ noflush_work(tc, do_noflush_start);
+}
+
+static void thin_postsuspend(struct dm_target *ti)
+{
+ struct thin_c *tc = ti->private;
+
+ /*
+ * The dm_noflush_suspending flag has been cleared by now, so
+ * unfortunately we must always run this.
+ */
+ noflush_work(tc, do_noflush_stop);
}
/*
@@ -3074,12 +3250,13 @@ static int thin_iterate_devices(struct dm_target *ti,
static struct target_type thin_target = {
.name = "thin",
- .version = {1, 10, 0},
+ .version = {1, 11, 0},
.module = THIS_MODULE,
.ctr = thin_ctr,
.dtr = thin_dtr,
.map = thin_map,
.end_io = thin_endio,
+ .presuspend = thin_presuspend,
.postsuspend = thin_postsuspend,
.status = thin_status,
.iterate_devices = thin_iterate_devices,
diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig
index 19b268795415..0c2dec7aec20 100644
--- a/drivers/md/persistent-data/Kconfig
+++ b/drivers/md/persistent-data/Kconfig
@@ -6,3 +6,13 @@ config DM_PERSISTENT_DATA
---help---
Library providing immutable on-disk data structure support for
device-mapper targets such as the thin provisioning target.
+
+config DM_DEBUG_BLOCK_STACK_TRACING
+ boolean "Keep stack trace of persistent data block lock holders"
+ depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA
+ select STACKTRACE
+ ---help---
+ Enable this for messages that may help debug problems with the
+ block manager locking used by thin provisioning and caching.
+
+ If unsure, say N.
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 536782e3bcb7..786b689bdfc7 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -91,6 +91,69 @@ struct block_op {
dm_block_t block;
};
+struct bop_ring_buffer {
+ unsigned begin;
+ unsigned end;
+ struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1];
+};
+
+static void brb_init(struct bop_ring_buffer *brb)
+{
+ brb->begin = 0;
+ brb->end = 0;
+}
+
+static bool brb_empty(struct bop_ring_buffer *brb)
+{
+ return brb->begin == brb->end;
+}
+
+static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old)
+{
+ unsigned r = old + 1;
+ return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r;
+}
+
+static int brb_push(struct bop_ring_buffer *brb,
+ enum block_op_type type, dm_block_t b)
+{
+ struct block_op *bop;
+ unsigned next = brb_next(brb, brb->end);
+
+ /*
+ * We don't allow the last bop to be filled, this way we can
+ * differentiate between full and empty.
+ */
+ if (next == brb->begin)
+ return -ENOMEM;
+
+ bop = brb->bops + brb->end;
+ bop->type = type;
+ bop->block = b;
+
+ brb->end = next;
+
+ return 0;
+}
+
+static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result)
+{
+ struct block_op *bop;
+
+ if (brb_empty(brb))
+ return -ENODATA;
+
+ bop = brb->bops + brb->begin;
+ result->type = bop->type;
+ result->block = bop->block;
+
+ brb->begin = brb_next(brb, brb->begin);
+
+ return 0;
+}
+
+/*----------------------------------------------------------------*/
+
struct sm_metadata {
struct dm_space_map sm;
@@ -101,25 +164,20 @@ struct sm_metadata {
unsigned recursion_count;
unsigned allocated_this_transaction;
- unsigned nr_uncommitted;
- struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS];
+ struct bop_ring_buffer uncommitted;
struct threshold threshold;
};
static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b)
{
- struct block_op *op;
+ int r = brb_push(&smm->uncommitted, type, b);
- if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) {
+ if (r) {
DMERR("too many recursive allocations");
return -ENOMEM;
}
- op = smm->uncommitted + smm->nr_uncommitted++;
- op->type = type;
- op->block = b;
-
return 0;
}
@@ -158,11 +216,17 @@ static int out(struct sm_metadata *smm)
return -ENOMEM;
}
- if (smm->recursion_count == 1 && smm->nr_uncommitted) {
- while (smm->nr_uncommitted && !r) {
- smm->nr_uncommitted--;
- r = commit_bop(smm, smm->uncommitted +
- smm->nr_uncommitted);
+ if (smm->recursion_count == 1) {
+ while (!brb_empty(&smm->uncommitted)) {
+ struct block_op bop;
+
+ r = brb_pop(&smm->uncommitted, &bop);
+ if (r) {
+ DMERR("bug in bop ring buffer");
+ break;
+ }
+
+ r = commit_bop(smm, &bop);
if (r)
break;
}
@@ -217,7 +281,8 @@ static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count)
static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
uint32_t *result)
{
- int r, i;
+ int r;
+ unsigned i;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
unsigned adjustment = 0;
@@ -225,8 +290,10 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
* We may have some uncommitted adjustments to add. This list
* should always be really short.
*/
- for (i = 0; i < smm->nr_uncommitted; i++) {
- struct block_op *op = smm->uncommitted + i;
+ for (i = smm->uncommitted.begin;
+ i != smm->uncommitted.end;
+ i = brb_next(&smm->uncommitted, i)) {
+ struct block_op *op = smm->uncommitted.bops + i;
if (op->block != b)
continue;
@@ -254,7 +321,8 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b,
static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
dm_block_t b, int *result)
{
- int r, i, adjustment = 0;
+ int r, adjustment = 0;
+ unsigned i;
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
uint32_t rc;
@@ -262,8 +330,11 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm,
* We may have some uncommitted adjustments to add. This list
* should always be really short.
*/
- for (i = 0; i < smm->nr_uncommitted; i++) {
- struct block_op *op = smm->uncommitted + i;
+ for (i = smm->uncommitted.begin;
+ i != smm->uncommitted.end;
+ i = brb_next(&smm->uncommitted, i)) {
+
+ struct block_op *op = smm->uncommitted.bops + i;
if (op->block != b)
continue;
@@ -671,7 +742,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
smm->begin = superblock + 1;
smm->recursion_count = 0;
smm->allocated_this_transaction = 0;
- smm->nr_uncommitted = 0;
+ brb_init(&smm->uncommitted);
threshold_init(&smm->threshold);
memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm));
@@ -680,6 +751,8 @@ int dm_sm_metadata_create(struct dm_space_map *sm,
if (r)
return r;
+ if (nr_blocks > DM_SM_METADATA_MAX_BLOCKS)
+ nr_blocks = DM_SM_METADATA_MAX_BLOCKS;
r = sm_ll_extend(&smm->ll, nr_blocks);
if (r)
return r;
@@ -713,7 +786,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm,
smm->begin = 0;
smm->recursion_count = 0;
smm->allocated_this_transaction = 0;
- smm->nr_uncommitted = 0;
+ brb_init(&smm->uncommitted);
threshold_init(&smm->threshold);
memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll));
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.h b/drivers/md/persistent-data/dm-space-map-metadata.h
index 39bba0801cf2..64df923974d8 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.h
+++ b/drivers/md/persistent-data/dm-space-map-metadata.h
@@ -9,6 +9,17 @@
#include "dm-transaction-manager.h"
+#define DM_SM_METADATA_BLOCK_SIZE (4096 >> SECTOR_SHIFT)
+
+/*
+ * The metadata device is currently limited in size.
+ *
+ * We have one block of index, which can hold 255 index entries. Each
+ * index entry contains allocation info about ~16k metadata blocks.
+ */
+#define DM_SM_METADATA_MAX_BLOCKS (255 * ((1 << 14) - 64))
+#define DM_SM_METADATA_MAX_SECTORS (DM_SM_METADATA_MAX_BLOCKS * DM_SM_METADATA_BLOCK_SIZE)
+
/*
* Unfortunately we have to use two-phase construction due to the cycle
* between the tm and sm.
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index fd3a2a14b587..4a6ca1cb2e78 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1953,11 +1953,15 @@ static int process_checks(struct r1bio *r1_bio)
for (i = 0; i < conf->raid_disks * 2; i++) {
int j;
int size;
+ int uptodate;
struct bio *b = r1_bio->bios[i];
if (b->bi_end_io != end_sync_read)
continue;
- /* fixup the bio for reuse */
+ /* fixup the bio for reuse, but preserve BIO_UPTODATE */
+ uptodate = test_bit(BIO_UPTODATE, &b->bi_flags);
bio_reset(b);
+ if (!uptodate)
+ clear_bit(BIO_UPTODATE, &b->bi_flags);
b->bi_vcnt = vcnt;
b->bi_iter.bi_size = r1_bio->sectors << 9;
b->bi_iter.bi_sector = r1_bio->sector +
@@ -1990,11 +1994,14 @@ static int process_checks(struct r1bio *r1_bio)
int j;
struct bio *pbio = r1_bio->bios[primary];
struct bio *sbio = r1_bio->bios[i];
+ int uptodate = test_bit(BIO_UPTODATE, &sbio->bi_flags);
if (sbio->bi_end_io != end_sync_read)
continue;
+ /* Now we can 'fixup' the BIO_UPTODATE flag */
+ set_bit(BIO_UPTODATE, &sbio->bi_flags);
- if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
+ if (uptodate) {
for (j = vcnt; j-- ; ) {
struct page *p, *s;
p = pbio->bi_io_vec[j].bv_page;
@@ -2009,7 +2016,7 @@ static int process_checks(struct r1bio *r1_bio)
if (j >= 0)
atomic64_add(r1_bio->sectors, &mddev->resync_mismatches);
if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)
- && test_bit(BIO_UPTODATE, &sbio->bi_flags))) {
+ && uptodate)) {
/* No need to write to this device. */
sbio->bi_end_io = NULL;
rdev_dec_pending(conf->mirrors[i].rdev, mddev);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index f1feadeb7bb2..16f5c21963db 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5514,23 +5514,43 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
return sectors * (raid_disks - conf->max_degraded);
}
+static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+ safe_put_page(percpu->spare_page);
+ kfree(percpu->scribble);
+ percpu->spare_page = NULL;
+ percpu->scribble = NULL;
+}
+
+static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu)
+{
+ if (conf->level == 6 && !percpu->spare_page)
+ percpu->spare_page = alloc_page(GFP_KERNEL);
+ if (!percpu->scribble)
+ percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
+
+ if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
+ free_scratch_buffer(conf, percpu);
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
static void raid5_free_percpu(struct r5conf *conf)
{
- struct raid5_percpu *percpu;
unsigned long cpu;
if (!conf->percpu)
return;
- get_online_cpus();
- for_each_possible_cpu(cpu) {
- percpu = per_cpu_ptr(conf->percpu, cpu);
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
- }
#ifdef CONFIG_HOTPLUG_CPU
unregister_cpu_notifier(&conf->cpu_notify);
#endif
+
+ get_online_cpus();
+ for_each_possible_cpu(cpu)
+ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
put_online_cpus();
free_percpu(conf->percpu);
@@ -5557,15 +5577,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
switch (action) {
case CPU_UP_PREPARE:
case CPU_UP_PREPARE_FROZEN:
- if (conf->level == 6 && !percpu->spare_page)
- percpu->spare_page = alloc_page(GFP_KERNEL);
- if (!percpu->scribble)
- percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
-
- if (!percpu->scribble ||
- (conf->level == 6 && !percpu->spare_page)) {
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
+ if (alloc_scratch_buffer(conf, percpu)) {
pr_err("%s: failed memory allocation for cpu%ld\n",
__func__, cpu);
return notifier_from_errno(-ENOMEM);
@@ -5573,10 +5585,7 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
break;
case CPU_DEAD:
case CPU_DEAD_FROZEN:
- safe_put_page(percpu->spare_page);
- kfree(percpu->scribble);
- percpu->spare_page = NULL;
- percpu->scribble = NULL;
+ free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
break;
default:
break;
@@ -5588,40 +5597,29 @@ static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action,
static int raid5_alloc_percpu(struct r5conf *conf)
{
unsigned long cpu;
- struct page *spare_page;
- struct raid5_percpu __percpu *allcpus;
- void *scribble;
- int err;
+ int err = 0;
- allcpus = alloc_percpu(struct raid5_percpu);
- if (!allcpus)
+ conf->percpu = alloc_percpu(struct raid5_percpu);
+ if (!conf->percpu)
return -ENOMEM;
- conf->percpu = allcpus;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ conf->cpu_notify.notifier_call = raid456_cpu_notify;
+ conf->cpu_notify.priority = 0;
+ err = register_cpu_notifier(&conf->cpu_notify);
+ if (err)
+ return err;
+#endif
get_online_cpus();
- err = 0;
for_each_present_cpu(cpu) {
- if (conf->level == 6) {
- spare_page = alloc_page(GFP_KERNEL);
- if (!spare_page) {
- err = -ENOMEM;
- break;
- }
- per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page;
- }
- scribble = kmalloc(conf->scribble_len, GFP_KERNEL);
- if (!scribble) {
- err = -ENOMEM;
+ err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu));
+ if (err) {
+ pr_err("%s: failed memory allocation for cpu%ld\n",
+ __func__, cpu);
break;
}
- per_cpu_ptr(conf->percpu, cpu)->scribble = scribble;
}
-#ifdef CONFIG_HOTPLUG_CPU
- conf->cpu_notify.notifier_call = raid456_cpu_notify;
- conf->cpu_notify.priority = 0;
- if (err == 0)
- err = register_cpu_notifier(&conf->cpu_notify);
-#endif
put_online_cpus();
return err;
diff --git a/drivers/media/dvb-frontends/cx24117.c b/drivers/media/dvb-frontends/cx24117.c
index 68f768a5422d..a6c3c9e2e897 100644
--- a/drivers/media/dvb-frontends/cx24117.c
+++ b/drivers/media/dvb-frontends/cx24117.c
@@ -1176,7 +1176,7 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
switch (demod) {
case 0:
- dev_err(&state->priv->i2c->dev,
+ dev_err(&i2c->dev,
"%s: Error attaching frontend %d\n",
KBUILD_MODNAME, demod);
goto error1;
@@ -1200,12 +1200,6 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
state->demod = demod - 1;
state->priv = priv;
- /* test i2c bus for ack */
- if (demod == 0) {
- if (cx24117_readreg(state, 0x00) < 0)
- goto error3;
- }
-
dev_info(&state->priv->i2c->dev,
"%s: Attaching frontend %d\n",
KBUILD_MODNAME, state->demod);
@@ -1216,8 +1210,6 @@ struct dvb_frontend *cx24117_attach(const struct cx24117_config *config,
state->frontend.demodulator_priv = state;
return &state->frontend;
-error3:
- kfree(state);
error2:
cx24117_release_priv(priv);
error1:
diff --git a/drivers/media/dvb-frontends/nxt200x.c b/drivers/media/dvb-frontends/nxt200x.c
index 4bf057544607..8a8e1ecb762d 100644
--- a/drivers/media/dvb-frontends/nxt200x.c
+++ b/drivers/media/dvb-frontends/nxt200x.c
@@ -2,7 +2,7 @@
* Support for NXT2002 and NXT2004 - VSB/QAM
*
* Copyright (C) 2005 Kirk Lapray <kirk.lapray@gmail.com>
- * Copyright (C) 2006 Michael Krufky <mkrufky@m1k.net>
+ * Copyright (C) 2006-2014 Michael Krufky <mkrufky@linuxtv.org>
* based on nxt2002 by Taylor Jacob <rtjacob@earthlink.net>
* and nxt2004 by Jean-Francois Thibert <jeanfrancois@sagetv.com>
*
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 1effc21e1cdd..9bbd6656fb8f 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -2554,7 +2554,7 @@ static int adv7842_core_init(struct v4l2_subdev *sd)
sdp_write_and_or(sd, 0xdd, 0xf0, pdata->sdp_free_run_force |
(pdata->sdp_free_run_cbar_en << 1) |
(pdata->sdp_free_run_man_col_en << 2) |
- (pdata->sdp_free_run_force << 3));
+ (pdata->sdp_free_run_auto << 3));
/* TODO from platform data */
cp_write(sd, 0x69, 0x14); /* Enable CP CSC */
diff --git a/drivers/media/i2c/s5k5baf.c b/drivers/media/i2c/s5k5baf.c
index 4b8381111cbd..77e10e0fd8d6 100644
--- a/drivers/media/i2c/s5k5baf.c
+++ b/drivers/media/i2c/s5k5baf.c
@@ -478,25 +478,33 @@ static void s5k5baf_write_arr_seq(struct s5k5baf *state, u16 addr,
u16 count, const u16 *seq)
{
struct i2c_client *c = v4l2_get_subdevdata(&state->sd);
- __be16 buf[count + 1];
- int ret, n;
+ __be16 buf[65];
s5k5baf_i2c_write(state, REG_CMDWR_ADDR, addr);
if (state->error)
return;
+ v4l2_dbg(3, debug, c, "i2c_write_seq(count=%d): %*ph\n", count,
+ min(2 * count, 64), seq);
+
buf[0] = __constant_cpu_to_be16(REG_CMD_BUF);
- for (n = 1; n <= count; ++n)
- buf[n] = cpu_to_be16(*seq++);
- n *= 2;
- ret = i2c_master_send(c, (char *)buf, n);
- v4l2_dbg(3, debug, c, "i2c_write_seq(count=%d): %*ph\n", count,
- min(2 * count, 64), seq - count);
+ while (count > 0) {
+ int n = min_t(int, count, ARRAY_SIZE(buf) - 1);
+ int ret, i;
- if (ret != n) {
- v4l2_err(c, "i2c_write_seq: error during transfer (%d)\n", ret);
- state->error = ret;
+ for (i = 1; i <= n; ++i)
+ buf[i] = cpu_to_be16(*seq++);
+
+ i *= 2;
+ ret = i2c_master_send(c, (char *)buf, i);
+ if (ret != i) {
+ v4l2_err(c, "i2c_write_seq: error during transfer (%d)\n", ret);
+ state->error = ret;
+ break;
+ }
+
+ count -= n;
}
}
diff --git a/drivers/media/pci/bt8xx/bttv-cards.c b/drivers/media/pci/bt8xx/bttv-cards.c
index d85cb0ace4dc..6662b495b22c 100644
--- a/drivers/media/pci/bt8xx/bttv-cards.c
+++ b/drivers/media/pci/bt8xx/bttv-cards.c
@@ -2426,7 +2426,7 @@ struct tvcard bttv_tvcards[] = {
},
/* ---- card 0x87---------------------------------- */
[BTTV_BOARD_DVICO_FUSIONHDTV_5_LITE] = {
- /* Michael Krufky <mkrufky@m1k.net> */
+ /* Michael Krufky <mkrufky@linuxtv.org> */
.name = "DViCO FusionHDTV 5 Lite",
.tuner_type = TUNER_LG_TDVS_H06XF, /* TDVS-H064F */
.tuner_addr = ADDR_UNSET,
diff --git a/drivers/media/pci/bt8xx/bttv-gpio.c b/drivers/media/pci/bt8xx/bttv-gpio.c
index 922e8233fd0b..3f364b7062b9 100644
--- a/drivers/media/pci/bt8xx/bttv-gpio.c
+++ b/drivers/media/pci/bt8xx/bttv-gpio.c
@@ -98,7 +98,7 @@ int bttv_sub_add_device(struct bttv_core *core, char *name)
err = device_register(&sub->dev);
if (0 != err) {
- kfree(sub);
+ put_device(&sub->dev);
return err;
}
pr_info("%d: add subdevice \"%s\"\n", core->nr, dev_name(&sub->dev));
diff --git a/drivers/media/pci/saa7134/saa7134-cards.c b/drivers/media/pci/saa7134/saa7134-cards.c
index d45e7f6ff332..c9b2350e92c8 100644
--- a/drivers/media/pci/saa7134/saa7134-cards.c
+++ b/drivers/media/pci/saa7134/saa7134-cards.c
@@ -2590,7 +2590,7 @@ struct saa7134_board saa7134_boards[] = {
}},
},
[SAA7134_BOARD_AVERMEDIA_AVERTVHD_A180] = {
- /* Michael Krufky <mkrufky@m1k.net>
+ /* Michael Krufky <mkrufky@linuxtv.org>
* Uses Alps Electric TDHU2, containing NXT2004 ATSC Decoder
* AFAIK, there is no analog demod, thus,
* no support for analog television.
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index a7dfd07e8389..da2fc86cc524 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -1027,7 +1027,8 @@ static int fimc_probe(struct platform_device *pdev)
return 0;
err_gclk:
- clk_disable(fimc->clock[CLK_GATE]);
+ if (!pm_runtime_enabled(dev))
+ clk_disable(fimc->clock[CLK_GATE]);
err_sd:
fimc_unregister_capture_subdev(fimc);
err_sclk:
@@ -1036,6 +1037,7 @@ err_sclk:
return ret;
}
+#ifdef CONFIG_PM_RUNTIME
static int fimc_runtime_resume(struct device *dev)
{
struct fimc_dev *fimc = dev_get_drvdata(dev);
@@ -1068,6 +1070,7 @@ static int fimc_runtime_suspend(struct device *dev)
dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
return ret;
}
+#endif
#ifdef CONFIG_PM_SLEEP
static int fimc_resume(struct device *dev)
diff --git a/drivers/media/platform/exynos4-is/fimc-lite.c b/drivers/media/platform/exynos4-is/fimc-lite.c
index 1234734bccf4..779ec3cd259d 100644
--- a/drivers/media/platform/exynos4-is/fimc-lite.c
+++ b/drivers/media/platform/exynos4-is/fimc-lite.c
@@ -1563,7 +1563,7 @@ static int fimc_lite_probe(struct platform_device *pdev)
if (!pm_runtime_enabled(dev)) {
ret = clk_enable(fimc->clock);
if (ret < 0)
- goto err_clk_put;
+ goto err_sd;
}
fimc->alloc_ctx = vb2_dma_contig_init_ctx(dev);
@@ -1579,7 +1579,8 @@ static int fimc_lite_probe(struct platform_device *pdev)
return 0;
err_clk_dis:
- clk_disable(fimc->clock);
+ if (!pm_runtime_enabled(dev))
+ clk_disable(fimc->clock);
err_sd:
fimc_lite_unregister_capture_subdev(fimc);
err_clk_put:
@@ -1587,6 +1588,7 @@ err_clk_put:
return ret;
}
+#ifdef CONFIG_PM_RUNTIME
static int fimc_lite_runtime_resume(struct device *dev)
{
struct fimc_lite *fimc = dev_get_drvdata(dev);
@@ -1602,6 +1604,7 @@ static int fimc_lite_runtime_suspend(struct device *dev)
clk_disable(fimc->clock);
return 0;
}
+#endif
#ifdef CONFIG_PM_SLEEP
static int fimc_lite_resume(struct device *dev)
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index a1c78c870b68..7d68d0b9966a 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -175,7 +175,7 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
{
.name = "YUV 4:2:0 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12,
- .depth = 16,
+ .depth = 12,
.colplanes = 2,
.h_align = 1,
.v_align = 1,
@@ -188,10 +188,10 @@ static struct s5p_jpeg_fmt sjpeg_formats[] = {
{
.name = "YUV 4:2:0 planar, Y/CbCr",
.fourcc = V4L2_PIX_FMT_NV12,
- .depth = 16,
- .colplanes = 4,
+ .depth = 12,
+ .colplanes = 2,
.h_align = 4,
- .v_align = 1,
+ .v_align = 4,
.flags = SJPEG_FMT_FLAG_ENC_OUTPUT |
SJPEG_FMT_FLAG_DEC_CAPTURE |
SJPEG_FMT_FLAG_S5P |
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 8f9b2cea88f0..8ede8ea762e6 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -1539,6 +1539,8 @@ static const struct usb_device_id af9035_id_table[] = {
&af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
{ DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
&af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
+ { DVB_USB_DEVICE(USB_VID_HAUPPAUGE, 0xf900,
+ &af9035_props, "Hauppauge WinTV-MiniStick 2", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, af9035_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
index d83df4bb72d3..0a98d04c53e4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-demod.c - driver for the MaxLinear MXL111SF DVB-T demodulator
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -601,7 +601,7 @@ struct dvb_frontend *mxl111sf_demod_attach(struct mxl111sf_state *mxl_state,
EXPORT_SYMBOL_GPL(mxl111sf_demod_attach);
MODULE_DESCRIPTION("MaxLinear MxL111SF DVB-T demodulator driver");
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
index 3f3f8bfd190b..2d4530f5be54 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-demod.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-demod.h - driver for the MaxLinear MXL111SF DVB-T demodulator
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
index e4121cb8f5ef..a619410adde4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-gpio.c - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
index 0220f54299a5..b85a5772d771 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-gpio.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-gpio.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
index 34434557ef65..a101d06eb143 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-i2c.c - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
index a57a45ffb9e4..465762145ad2 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-i2c.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-i2c.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
index b741b3a7a325..f6b348024bec 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-phy.c - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
index f0756071d347..0643738de7de 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-phy.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-phy.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
index 17831b0fb9db..89bf115e927e 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-reg.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-reg.h - driver for the MaxLinear MXL111SF
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
index 879c529640f7..a8d2c7053674 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.c
@@ -1,7 +1,7 @@
/*
* mxl111sf-tuner.c - driver for the MaxLinear MXL111SF CMOS tuner
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -512,7 +512,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
EXPORT_SYMBOL_GPL(mxl111sf_tuner_attach);
MODULE_DESCRIPTION("MaxLinear MxL111SF CMOS tuner driver");
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
index 90f583e5d6a6..2046db22519e 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf-tuner.h
@@ -1,7 +1,7 @@
/*
* mxl111sf-tuner.h - driver for the MaxLinear MXL111SF CMOS tuner
*
- * Copyright (C) 2010 Michael Krufky <mkrufky@kernellabs.com>
+ * Copyright (C) 2010-2014 Michael Krufky <mkrufky@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -68,7 +68,7 @@ struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
#else
static inline
struct dvb_frontend *mxl111sf_tuner_attach(struct dvb_frontend *fe,
- struct mxl111sf_state *mxl_state
+ struct mxl111sf_state *mxl_state,
struct mxl111sf_tuner_config *cfg)
{
printk(KERN_WARNING "%s: driver disabled by Kconfig\n", __func__);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 08240e498451..c7304fa8ab73 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com)
+ * Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -105,7 +105,7 @@ int mxl111sf_read_reg(struct mxl111sf_state *state, u8 addr, u8 *data)
ret = -EINVAL;
}
- pr_debug("R: (0x%02x, 0x%02x)\n", addr, *data);
+ pr_debug("R: (0x%02x, 0x%02x)\n", addr, buf[1]);
fail:
return ret;
}
@@ -1421,7 +1421,7 @@ static struct usb_driver mxl111sf_usb_driver = {
module_usb_driver(mxl111sf_usb_driver);
-MODULE_AUTHOR("Michael Krufky <mkrufky@kernellabs.com>");
+MODULE_AUTHOR("Michael Krufky <mkrufky@linuxtv.org>");
MODULE_DESCRIPTION("Driver for MaxLinear MxL111SF");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.h b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
index 9816de86e48c..8516c011b7cc 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.h
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Michael Krufky (mkrufky@kernellabs.com)
+ * Copyright (C) 2010-2014 Michael Krufky (mkrufky@linuxtv.org)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
diff --git a/drivers/media/usb/hdpvr/hdpvr-core.c b/drivers/media/usb/hdpvr/hdpvr-core.c
index 2f0c89cbac76..c5638964c3f2 100644
--- a/drivers/media/usb/hdpvr/hdpvr-core.c
+++ b/drivers/media/usb/hdpvr/hdpvr-core.c
@@ -198,7 +198,6 @@ static int device_authorization(struct hdpvr_device *dev)
hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0);
v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, " response: %s\n",
print_buf);
- kfree(print_buf);
#endif
msleep(100);
@@ -214,6 +213,9 @@ static int device_authorization(struct hdpvr_device *dev)
retval = ret != 8;
unlock:
mutex_unlock(&dev->usbc_mutex);
+#ifdef HDPVR_DEBUG
+ kfree(print_buf);
+#endif
return retval;
}
diff --git a/drivers/media/v4l2-core/v4l2-dv-timings.c b/drivers/media/v4l2-core/v4l2-dv-timings.c
index ee52b9f4a944..f7902fe8a526 100644
--- a/drivers/media/v4l2-core/v4l2-dv-timings.c
+++ b/drivers/media/v4l2-core/v4l2-dv-timings.c
@@ -515,6 +515,7 @@ bool v4l2_detect_gtf(unsigned frame_height,
aspect.denominator = 9;
}
image_width = ((image_height * aspect.numerator) / aspect.denominator);
+ image_width = (image_width + GTF_CELL_GRAN/2) & ~(GTF_CELL_GRAN - 1);
/* Horizontal */
if (default_gtf)
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index 65411adcd0ea..7e6b209b7002 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -66,14 +66,11 @@ static void __videobuf_dc_free(struct device *dev,
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
- dev_dbg(q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
+ dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
map->count++;
- videobuf_queue_unlock(q);
}
static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -85,11 +82,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
- if (!--map->count) {
+ map->count--;
+ if (0 == map->count) {
struct videobuf_dma_contig_memory *mem;
dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
@@ -128,8 +126,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
kfree(map);
+ videobuf_queue_unlock(q);
}
- videobuf_queue_unlock(q);
}
static const struct vm_operations_struct videobuf_vm_ops = {
diff --git a/drivers/media/v4l2-core/videobuf-dma-sg.c b/drivers/media/v4l2-core/videobuf-dma-sg.c
index 9db674ccdc68..828e7c10bd70 100644
--- a/drivers/media/v4l2-core/videobuf-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf-dma-sg.c
@@ -338,14 +338,11 @@ EXPORT_SYMBOL_GPL(videobuf_dma_free);
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
map->count++;
- videobuf_queue_unlock(q);
}
static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -358,9 +355,10 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
- if (!--map->count) {
+ map->count--;
+ if (0 == map->count) {
dprintk(1, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
@@ -376,9 +374,9 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
q->bufs[i]->baddr = 0;
q->ops->buf_release(q, q->bufs[i]);
}
+ videobuf_queue_unlock(q);
kfree(map);
}
- videobuf_queue_unlock(q);
return;
}
diff --git a/drivers/media/v4l2-core/videobuf-vmalloc.c b/drivers/media/v4l2-core/videobuf-vmalloc.c
index 1365c651c177..2ff7fcc77b11 100644
--- a/drivers/media/v4l2-core/videobuf-vmalloc.c
+++ b/drivers/media/v4l2-core/videobuf-vmalloc.c
@@ -54,14 +54,11 @@ MODULE_LICENSE("GPL");
static void videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
- struct videobuf_queue *q = map->q;
dprintk(2, "vm_open %p [count=%u,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
map->count++;
- videobuf_queue_unlock(q);
}
static void videobuf_vm_close(struct vm_area_struct *vma)
@@ -73,11 +70,12 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
dprintk(2, "vm_close %p [count=%u,vma=%08lx-%08lx]\n", map,
map->count, vma->vm_start, vma->vm_end);
- videobuf_queue_lock(q);
- if (!--map->count) {
+ map->count--;
+ if (0 == map->count) {
struct videobuf_vmalloc_memory *mem;
dprintk(1, "munmap %p q=%p\n", map, q);
+ videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
@@ -116,8 +114,8 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
kfree(map);
+ videobuf_queue_unlock(q);
}
- videobuf_queue_unlock(q);
return;
}
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 5a5fb7f09b7b..a127925c9d61 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1776,6 +1776,11 @@ static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
return 0;
}
+ if (!q->num_buffers) {
+ dprintk(1, "streamon: no buffers have been allocated\n");
+ return -EINVAL;
+ }
+
/*
* If any buffers were queued before streamon,
* we can now pass them to driver for processing.
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
index a60c188c2bd9..04bd3b6de401 100644
--- a/drivers/message/i2o/i2o_config.c
+++ b/drivers/message/i2o/i2o_config.c
@@ -754,19 +754,19 @@ static long i2o_cfg_compat_ioctl(struct file *file, unsigned cmd,
unsigned long arg)
{
int ret;
- mutex_lock(&i2o_cfg_mutex);
switch (cmd) {
case I2OGETIOPS:
ret = i2o_cfg_ioctl(file, cmd, arg);
break;
case I2OPASSTHRU32:
+ mutex_lock(&i2o_cfg_mutex);
ret = i2o_cfg_passthru32(file, cmd, arg);
+ mutex_unlock(&i2o_cfg_mutex);
break;
default:
ret = -ENOIOCTLCMD;
break;
}
- mutex_unlock(&i2o_cfg_mutex);
return ret;
}
diff --git a/drivers/mfd/da9055-i2c.c b/drivers/mfd/da9055-i2c.c
index 13af7e50021e..8103e4362132 100644
--- a/drivers/mfd/da9055-i2c.c
+++ b/drivers/mfd/da9055-i2c.c
@@ -53,17 +53,25 @@ static int da9055_i2c_remove(struct i2c_client *i2c)
return 0;
}
+/*
+ * DO NOT change the device Ids. The naming is intentionally specific as both
+ * the PMIC and CODEC parts of this chip are instantiated separately as I2C
+ * devices (both have configurable I2C addresses, and are to all intents and
+ * purposes separate). As a result there are specific DA9055 ids for PMIC
+ * and CODEC, which must be different to operate together.
+ */
static struct i2c_device_id da9055_i2c_id[] = {
- {"da9055", 0},
+ {"da9055-pmic", 0},
{ }
};
+MODULE_DEVICE_TABLE(i2c, da9055_i2c_id);
static struct i2c_driver da9055_i2c_driver = {
.probe = da9055_i2c_probe,
.remove = da9055_i2c_remove,
.id_table = da9055_i2c_id,
.driver = {
- .name = "da9055",
+ .name = "da9055-pmic",
.owner = THIS_MODULE,
},
};
diff --git a/drivers/mfd/max14577.c b/drivers/mfd/max14577.c
index ac514fb2b877..71aa14a6bfbb 100644
--- a/drivers/mfd/max14577.c
+++ b/drivers/mfd/max14577.c
@@ -173,6 +173,7 @@ static const struct i2c_device_id max14577_i2c_id[] = {
};
MODULE_DEVICE_TABLE(i2c, max14577_i2c_id);
+#ifdef CONFIG_PM_SLEEP
static int max14577_suspend(struct device *dev)
{
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
@@ -208,6 +209,7 @@ static int max14577_resume(struct device *dev)
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
static struct of_device_id max14577_dt_match[] = {
{ .compatible = "maxim,max14577", },
diff --git a/drivers/mfd/max8997.c b/drivers/mfd/max8997.c
index be88a3bf7b85..5adede0fb04c 100644
--- a/drivers/mfd/max8997.c
+++ b/drivers/mfd/max8997.c
@@ -164,15 +164,15 @@ static struct max8997_platform_data *max8997_i2c_parse_dt_pdata(
return pd;
}
-static inline int max8997_i2c_get_driver_data(struct i2c_client *i2c,
+static inline unsigned long max8997_i2c_get_driver_data(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(max8997_pmic_dt_match, i2c->dev.of_node);
- return (int)match->data;
+ return (unsigned long)match->data;
}
- return (int)id->driver_data;
+ return id->driver_data;
}
static int max8997_i2c_probe(struct i2c_client *i2c,
diff --git a/drivers/mfd/max8998.c b/drivers/mfd/max8998.c
index 612ca404e150..5d5e186b5d8b 100644
--- a/drivers/mfd/max8998.c
+++ b/drivers/mfd/max8998.c
@@ -169,16 +169,16 @@ static struct max8998_platform_data *max8998_i2c_parse_dt_pdata(
return pd;
}
-static inline int max8998_i2c_get_driver_data(struct i2c_client *i2c,
+static inline unsigned long max8998_i2c_get_driver_data(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
if (IS_ENABLED(CONFIG_OF) && i2c->dev.of_node) {
const struct of_device_id *match;
match = of_match_node(max8998_dt_match, i2c->dev.of_node);
- return (int)(long)match->data;
+ return (unsigned long)match->data;
}
- return (int)id->driver_data;
+ return id->driver_data;
}
static int max8998_i2c_probe(struct i2c_client *i2c,
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index a139798b8065..714e2135210e 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -315,6 +315,7 @@ static int sec_pmic_remove(struct i2c_client *i2c)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
static int sec_pmic_suspend(struct device *dev)
{
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
@@ -349,6 +350,7 @@ static int sec_pmic_resume(struct device *dev)
return 0;
}
+#endif /* CONFIG_PM_SLEEP */
static SIMPLE_DEV_PM_OPS(sec_pmic_pm_ops, sec_pmic_suspend, sec_pmic_resume);
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index 966cf65c5c36..3cc4c7084b92 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -158,7 +158,7 @@ static int tps65217_probe(struct i2c_client *client,
{
struct tps65217 *tps;
unsigned int version;
- unsigned int chip_id = ids->driver_data;
+ unsigned long chip_id = ids->driver_data;
const struct of_device_id *match;
bool status_off = false;
int ret;
@@ -170,7 +170,7 @@ static int tps65217_probe(struct i2c_client *client,
"Failed to find matching dt id\n");
return -EINVAL;
}
- chip_id = (unsigned int)(unsigned long)match->data;
+ chip_id = (unsigned long)match->data;
status_off = of_property_read_bool(client->dev.of_node,
"ti,pmic-shutdown-controller");
}
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index ba04f1bc70eb..e6fab94e2c8a 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -636,7 +636,7 @@ static int wm8994_i2c_probe(struct i2c_client *i2c,
if (i2c->dev.of_node) {
of_id = of_match_device(wm8994_of_match, &i2c->dev);
if (of_id)
- wm8994->type = (int)of_id->data;
+ wm8994->type = (enum wm8994_type)of_id->data;
} else {
wm8994->type = id->driver_data;
}
diff --git a/drivers/misc/genwqe/card_dev.c b/drivers/misc/genwqe/card_dev.c
index 8f8a6b327cdb..2c2c9cc75231 100644
--- a/drivers/misc/genwqe/card_dev.c
+++ b/drivers/misc/genwqe/card_dev.c
@@ -787,6 +787,7 @@ static int genwqe_pin_mem(struct genwqe_file *cfile, struct genwqe_mem *m)
if (rc != 0) {
dev_err(&pci_dev->dev,
"[%s] genwqe_user_vmap rc=%d\n", __func__, rc);
+ kfree(dma_map);
return rc;
}
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index 1ee2b9492a82..89a557972d1b 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -666,7 +666,6 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
goto err;
cb->fop_type = MEI_FOP_READ;
- cl->read_cb = cb;
if (dev->hbuf_is_ready) {
dev->hbuf_is_ready = false;
if (mei_hbm_cl_flow_control_req(dev, cl)) {
@@ -678,6 +677,9 @@ int mei_cl_read_start(struct mei_cl *cl, size_t length)
} else {
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
+
+ cl->read_cb = cb;
+
return rets;
err:
mei_io_cb_free(cb);
@@ -908,7 +910,6 @@ void mei_cl_all_disconnect(struct mei_device *dev)
list_for_each_entry_safe(cl, next, &dev->file_list, link) {
cl->state = MEI_FILE_DISCONNECTED;
cl->mei_flow_ctrl_creds = 0;
- cl->read_cb = NULL;
cl->timer_count = 0;
}
}
@@ -942,8 +943,16 @@ void mei_cl_all_wakeup(struct mei_device *dev)
void mei_cl_all_write_clear(struct mei_device *dev)
{
struct mei_cl_cb *cb, *next;
+ struct list_head *list;
+
+ list = &dev->write_list.list;
+ list_for_each_entry_safe(cb, next, list, list) {
+ list_del(&cb->list);
+ mei_io_cb_free(cb);
+ }
- list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
+ list = &dev->write_waiting_list.list;
+ list_for_each_entry_safe(cb, next, list, list) {
list_del(&cb->list);
mei_io_cb_free(cb);
}
diff --git a/drivers/misc/mic/host/mic_virtio.c b/drivers/misc/mic/host/mic_virtio.c
index 752ff873f891..7e1ef0ebbb80 100644
--- a/drivers/misc/mic/host/mic_virtio.c
+++ b/drivers/misc/mic/host/mic_virtio.c
@@ -156,7 +156,8 @@ static int mic_vringh_copy(struct mic_vdev *mvdev, struct vringh_kiov *iov,
static int _mic_virtio_copy(struct mic_vdev *mvdev,
struct mic_copy_desc *copy)
{
- int ret = 0, iovcnt = copy->iovcnt;
+ int ret = 0;
+ u32 iovcnt = copy->iovcnt;
struct iovec iov;
struct iovec __user *u_iov = copy->iov;
void __user *ubuf = NULL;
diff --git a/drivers/misc/sgi-gru/grukdump.c b/drivers/misc/sgi-gru/grukdump.c
index 9b2062d17327..2bef3f76032a 100644
--- a/drivers/misc/sgi-gru/grukdump.c
+++ b/drivers/misc/sgi-gru/grukdump.c
@@ -139,8 +139,11 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
ubuf += sizeof(hdr);
ubufcch = ubuf;
- if (gru_user_copy_handle(&ubuf, cch))
- goto fail;
+ if (gru_user_copy_handle(&ubuf, cch)) {
+ if (cch_locked)
+ unlock_cch_handle(cch);
+ return -EFAULT;
+ }
if (cch_locked)
ubufcch->delresp = 0;
bytes = sizeof(hdr) + GRU_CACHE_LINE_BYTES;
@@ -179,10 +182,6 @@ static int gru_dump_context(struct gru_state *gru, int ctxnum,
ret = -EFAULT;
return ret ? ret : bytes;
-
-fail:
- unlock_cch_handle(cch);
- return -EFAULT;
}
int gru_dump_chiplet_request(unsigned long arg)
diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c
index b9e2000969f0..95c894482fdd 100644
--- a/drivers/misc/sgi-xp/xpc_uv.c
+++ b/drivers/misc/sgi-xp/xpc_uv.c
@@ -240,7 +240,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
nid = cpu_to_node(cpu);
page = alloc_pages_exact_node(nid,
- GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
pg_order);
if (page == NULL) {
dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 357bbc54fe4b..3e049c13429c 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -197,7 +197,7 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
- limit = dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
+ limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
mq->card = card;
mq->queue = blk_init_queue(mmc_request_fn, lock);
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 59eba5d2c685..9715a7ba164a 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1584,7 +1584,7 @@ read_retry:
}
if (mtd->ecc_stats.failed - ecc_failures) {
- if (retry_mode + 1 <= chip->read_retries) {
+ if (retry_mode + 1 < chip->read_retries) {
retry_mode++;
ret = nand_setup_read_retry(mtd,
retry_mode);
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index ef4190a02b7b..bf642ceef681 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -1633,6 +1633,7 @@ static int omap_nand_probe(struct platform_device *pdev)
int i;
dma_cap_mask_t mask;
unsigned sig;
+ unsigned oob_index;
struct resource *res;
struct mtd_part_parser_data ppdata = {};
@@ -1826,11 +1827,14 @@ static int omap_nand_probe(struct platform_device *pdev)
(mtd->writesize /
nand_chip->ecc.size);
if (nand_chip->options & NAND_BUSWIDTH_16)
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
+ oob_index = BADBLOCK_MARKER_LENGTH;
else
- ecclayout->eccpos[0] = 1;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = 1;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* no reserved-marker in ecclayout for this ecc-scheme */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
break;
case OMAP_ECC_BCH4_CODE_HW_DETECTION_SW:
@@ -1847,9 +1851,15 @@ static int omap_nand_probe(struct platform_device *pdev)
ecclayout->eccbytes = nand_chip->ecc.bytes *
(mtd->writesize /
nand_chip->ecc.size);
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
+ ecclayout->eccpos[i] = oob_index;
+ if (((i + 1) % nand_chip->ecc.bytes) == 0)
+ oob_index++;
+ }
+ /* include reserved-marker in ecclayout->oobfree calculation */
+ ecclayout->oobfree->offset = 1 +
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
/* software bch library is used for locating errors */
nand_chip->ecc.priv = nand_bch_init(mtd,
nand_chip->ecc.size,
@@ -1883,9 +1893,12 @@ static int omap_nand_probe(struct platform_device *pdev)
ecclayout->eccbytes = nand_chip->ecc.bytes *
(mtd->writesize /
nand_chip->ecc.size);
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* reserved marker already included in ecclayout->eccbytes */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
/* This ECC scheme requires ELM H/W block */
if (is_elm_present(info, pdata->elm_of_node, BCH4_ECC) < 0) {
pr_err("nand: error: could not initialize ELM\n");
@@ -1913,9 +1926,15 @@ static int omap_nand_probe(struct platform_device *pdev)
ecclayout->eccbytes = nand_chip->ecc.bytes *
(mtd->writesize /
nand_chip->ecc.size);
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++) {
+ ecclayout->eccpos[i] = oob_index;
+ if (((i + 1) % nand_chip->ecc.bytes) == 0)
+ oob_index++;
+ }
+ /* include reserved-marker in ecclayout->oobfree calculation */
+ ecclayout->oobfree->offset = 1 +
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
/* software bch library is used for locating errors */
nand_chip->ecc.priv = nand_bch_init(mtd,
nand_chip->ecc.size,
@@ -1956,9 +1975,12 @@ static int omap_nand_probe(struct platform_device *pdev)
ecclayout->eccbytes = nand_chip->ecc.bytes *
(mtd->writesize /
nand_chip->ecc.size);
- ecclayout->eccpos[0] = BADBLOCK_MARKER_LENGTH;
- ecclayout->oobfree->offset = ecclayout->eccpos[0] +
- ecclayout->eccbytes;
+ oob_index = BADBLOCK_MARKER_LENGTH;
+ for (i = 0; i < ecclayout->eccbytes; i++, oob_index++)
+ ecclayout->eccpos[i] = oob_index;
+ /* reserved marker already included in ecclayout->eccbytes */
+ ecclayout->oobfree->offset =
+ ecclayout->eccpos[ecclayout->eccbytes - 1] + 1;
break;
#else
pr_err("nand: error: CONFIG_MTD_NAND_OMAP_BCH not enabled\n");
@@ -1972,11 +1994,8 @@ static int omap_nand_probe(struct platform_device *pdev)
goto return_error;
}
- /* populate remaining ECC layout data */
- ecclayout->oobfree->length = mtd->oobsize - (BADBLOCK_MARKER_LENGTH +
- ecclayout->eccbytes);
- for (i = 1; i < ecclayout->eccbytes; i++)
- ecclayout->eccpos[i] = ecclayout->eccpos[0] + i;
+ /* all OOB bytes from oobfree->offset till end off OOB are free */
+ ecclayout->oobfree->length = mtd->oobsize - ecclayout->oobfree->offset;
/* check if NAND device's OOB is enough to store ECC signatures */
if (mtd->oobsize < (ecclayout->eccbytes + BADBLOCK_MARKER_LENGTH)) {
pr_err("not enough OOB bytes required = %d, available=%d\n",
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index ead861307b3c..c5dad652614d 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -463,8 +463,8 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
}
}
if (found_orphan) {
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
}
new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
@@ -846,16 +846,16 @@ fail_bad:
ret = UBI_BAD_FASTMAP;
fail:
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &used, u.list) {
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
}
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans, u.list) {
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
}
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
- kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
+ kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
}
return ret;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index f342278539d5..494b888a6568 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -139,7 +139,7 @@ config MACVTAP
This adds a specialized tap character device driver that is based
on the MAC-VLAN network interface, called macvtap. A macvtap device
can be added in the same way as a macvlan device, using 'type
- macvlan', and then be accessed through the tap user space interface.
+ macvtap', and then be accessed through the tap user space interface.
To compile this driver as a module, choose M here: the module
will be called macvtap.
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index cce1f1bf90b4..dcde56057fe1 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -181,7 +181,7 @@ static inline int __agg_has_partner(struct aggregator *agg)
*/
static inline void __disable_port(struct port *port)
{
- bond_set_slave_inactive_flags(port->slave);
+ bond_set_slave_inactive_flags(port->slave, BOND_SLAVE_NOTIFY_LATER);
}
/**
@@ -193,7 +193,7 @@ static inline void __enable_port(struct port *port)
struct slave *slave = port->slave;
if ((slave->link == BOND_LINK_UP) && IS_UP(slave->dev))
- bond_set_slave_active_flags(slave);
+ bond_set_slave_active_flags(slave, BOND_SLAVE_NOTIFY_LATER);
}
/**
@@ -1796,8 +1796,6 @@ void bond_3ad_initiate_agg_selection(struct bonding *bond, int timeout)
BOND_AD_INFO(bond).agg_select_timer = timeout;
}
-static u16 aggregator_identifier;
-
/**
* bond_3ad_initialize - initialize a bond's 802.3ad parameters and structures
* @bond: bonding struct to work on
@@ -1811,7 +1809,7 @@ void bond_3ad_initialize(struct bonding *bond, u16 tick_resolution)
if (!MAC_ADDRESS_EQUAL(&(BOND_AD_INFO(bond).system.sys_mac_addr),
bond->dev->dev_addr)) {
- aggregator_identifier = 0;
+ BOND_AD_INFO(bond).aggregator_identifier = 0;
BOND_AD_INFO(bond).system.sys_priority = 0xFFFF;
BOND_AD_INFO(bond).system.sys_mac_addr = *((struct mac_addr *)bond->dev->dev_addr);
@@ -1880,7 +1878,7 @@ void bond_3ad_bind_slave(struct slave *slave)
ad_initialize_agg(aggregator);
aggregator->aggregator_mac_address = *((struct mac_addr *)bond->dev->dev_addr);
- aggregator->aggregator_identifier = (++aggregator_identifier);
+ aggregator->aggregator_identifier = ++BOND_AD_INFO(bond).aggregator_identifier;
aggregator->slave = slave;
aggregator->is_active = 0;
aggregator->num_of_ports = 0;
@@ -2064,6 +2062,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
struct list_head *iter;
struct slave *slave;
struct port *port;
+ bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
read_lock(&bond->lock);
rcu_read_lock();
@@ -2121,8 +2120,19 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
}
re_arm:
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (slave->should_notify) {
+ should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
+ break;
+ }
+ }
rcu_read_unlock();
read_unlock(&bond->lock);
+
+ if (should_notify_rtnl && rtnl_trylock()) {
+ bond_slave_state_notify(bond);
+ rtnl_unlock();
+ }
queue_delayed_work(bond->wq, &bond->ad_work, ad_delta_in_ticks);
}
diff --git a/drivers/net/bonding/bond_3ad.h b/drivers/net/bonding/bond_3ad.h
index 13dc9d3c5e34..f4dd9592ac62 100644
--- a/drivers/net/bonding/bond_3ad.h
+++ b/drivers/net/bonding/bond_3ad.h
@@ -253,6 +253,7 @@ struct ad_system {
struct ad_bond_info {
struct ad_system system; /* 802.3ad system structure */
u32 agg_select_timer; // Timer to select aggregator after all adapter's hand shakes
+ u16 aggregator_identifier;
};
struct ad_slave_info {
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index a2c47476804d..e8f133e926aa 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -730,7 +730,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
client_info->ntt = 0;
}
- if (!vlan_get_tag(skb, &client_info->vlan_id))
+ if (vlan_get_tag(skb, &client_info->vlan_id))
client_info->vlan_id = 0;
if (!client_info->assigned) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 4c08018d7333..e5628fc725c3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -829,21 +829,25 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
if (bond_is_lb(bond)) {
bond_alb_handle_active_change(bond, new_active);
if (old_active)
- bond_set_slave_inactive_flags(old_active);
+ bond_set_slave_inactive_flags(old_active,
+ BOND_SLAVE_NOTIFY_NOW);
if (new_active)
- bond_set_slave_active_flags(new_active);
+ bond_set_slave_active_flags(new_active,
+ BOND_SLAVE_NOTIFY_NOW);
} else {
rcu_assign_pointer(bond->curr_active_slave, new_active);
}
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
if (old_active)
- bond_set_slave_inactive_flags(old_active);
+ bond_set_slave_inactive_flags(old_active,
+ BOND_SLAVE_NOTIFY_NOW);
if (new_active) {
bool should_notify_peers = false;
- bond_set_slave_active_flags(new_active);
+ bond_set_slave_active_flags(new_active,
+ BOND_SLAVE_NOTIFY_NOW);
if (bond->params.fail_over_mac)
bond_do_fail_over_mac(bond, new_active,
@@ -1193,6 +1197,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
return -EBUSY;
}
+ if (bond_dev == slave_dev) {
+ pr_err("%s: cannot enslave bond to itself.\n", bond_dev->name);
+ return -EPERM;
+ }
+
/* vlan challenged mutual exclusion */
/* no need to lock since we're protected by rtnl_lock */
if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
@@ -1270,9 +1279,13 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
if (slave_ops->ndo_set_mac_address == NULL) {
if (!bond_has_slaves(bond)) {
- pr_warning("%s: Warning: The first slave device specified does not support setting the MAC address. Setting fail_over_mac to active.",
- bond_dev->name);
- bond->params.fail_over_mac = BOND_FOM_ACTIVE;
+ pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address.\n",
+ bond_dev->name);
+ if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) {
+ bond->params.fail_over_mac = BOND_FOM_ACTIVE;
+ pr_warn("%s: Setting fail_over_mac to active for active-backup mode.\n",
+ bond_dev->name);
+ }
} else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active.\n",
bond_dev->name);
@@ -1315,7 +1328,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
*/
memcpy(new_slave->perm_hwaddr, slave_dev->dev_addr, ETH_ALEN);
- if (!bond->params.fail_over_mac) {
+ if (!bond->params.fail_over_mac ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/*
* Set slave to master's mac address. The application already
* set the master's mac address to that of the first slave
@@ -1458,14 +1472,15 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
switch (bond->params.mode) {
case BOND_MODE_ACTIVEBACKUP:
- bond_set_slave_inactive_flags(new_slave);
+ bond_set_slave_inactive_flags(new_slave,
+ BOND_SLAVE_NOTIFY_NOW);
break;
case BOND_MODE_8023AD:
/* in 802.3ad mode, the internal mechanism
* will activate the slaves in the selected
* aggregator
*/
- bond_set_slave_inactive_flags(new_slave);
+ bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
/* if this is the first slave */
if (!prev_slave) {
SLAVE_AD_INFO(new_slave).id = 1;
@@ -1483,7 +1498,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
case BOND_MODE_TLB:
case BOND_MODE_ALB:
bond_set_active_slave(new_slave);
- bond_set_slave_inactive_flags(new_slave);
+ bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
break;
default:
pr_debug("This slave is always active in trunk mode\n");
@@ -1505,7 +1520,6 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
slave_dev->npinfo = bond->dev->npinfo;
if (slave_dev->npinfo) {
if (slave_enable_netpoll(new_slave)) {
- read_unlock(&bond->lock);
pr_info("Error, %s: master_dev is using netpoll, "
"but new slave device does not support netpoll.\n",
bond_dev->name);
@@ -1539,9 +1553,11 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
bond_set_carrier(bond);
if (USES_PRIMARY(bond->params.mode)) {
+ block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
+ unblock_netpoll_tx();
}
pr_info("%s: enslaving %s as a%s interface with a%s link.\n",
@@ -1567,10 +1583,12 @@ err_detach:
if (bond->primary_slave == new_slave)
bond->primary_slave = NULL;
if (bond->curr_active_slave == new_slave) {
+ block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_change_active_slave(bond, NULL);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
+ unblock_netpoll_tx();
}
slave_disable_netpoll(new_slave);
@@ -1579,7 +1597,8 @@ err_close:
dev_close(slave_dev);
err_restore_mac:
- if (!bond->params.fail_over_mac) {
+ if (!bond->params.fail_over_mac ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/* XXX TODO - fom follow mode needs to change master's
* MAC if this slave's MAC is in use by the bond, or at
* least print a warning.
@@ -1645,9 +1664,6 @@ static int __bond_release_one(struct net_device *bond_dev,
return -EINVAL;
}
- /* release the slave from its bond */
- bond->slave_cnt--;
-
bond_sysfs_slave_del(slave);
bond_upper_dev_unlink(bond_dev, slave_dev);
@@ -1672,7 +1688,8 @@ static int __bond_release_one(struct net_device *bond_dev,
bond->current_arp_slave = NULL;
- if (!all && !bond->params.fail_over_mac) {
+ if (!all && (!bond->params.fail_over_mac ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP)) {
if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
bond_has_slaves(bond))
pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s. Set the HWaddr of %s to a different address to avoid conflicts.\n",
@@ -1728,6 +1745,7 @@ static int __bond_release_one(struct net_device *bond_dev,
unblock_netpoll_tx();
synchronize_rcu();
+ bond->slave_cnt--;
if (!bond_has_slaves(bond)) {
call_netdevice_notifiers(NETDEV_CHANGEADDR, bond->dev);
@@ -1769,7 +1787,8 @@ static int __bond_release_one(struct net_device *bond_dev,
/* close slave before restoring its mac address */
dev_close(slave_dev);
- if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
+ if (bond->params.fail_over_mac != BOND_FOM_ACTIVE ||
+ bond->params.mode != BOND_MODE_ACTIVEBACKUP) {
/* restore original ("permanent") mac address */
memcpy(addr.sa_data, slave->perm_hwaddr, ETH_ALEN);
addr.sa_family = slave_dev->type;
@@ -2004,7 +2023,8 @@ static void bond_miimon_commit(struct bonding *bond)
if (bond->params.mode == BOND_MODE_ACTIVEBACKUP ||
bond->params.mode == BOND_MODE_8023AD)
- bond_set_slave_inactive_flags(slave);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
pr_info("%s: link status definitely down for interface %s, disabling it\n",
bond->dev->name, slave->dev->name);
@@ -2551,7 +2571,8 @@ static void bond_ab_arp_commit(struct bonding *bond)
slave->link = BOND_LINK_UP;
if (bond->current_arp_slave) {
bond_set_slave_inactive_flags(
- bond->current_arp_slave);
+ bond->current_arp_slave,
+ BOND_SLAVE_NOTIFY_NOW);
bond->current_arp_slave = NULL;
}
@@ -2571,7 +2592,8 @@ static void bond_ab_arp_commit(struct bonding *bond)
slave->link_failure_count++;
slave->link = BOND_LINK_DOWN;
- bond_set_slave_inactive_flags(slave);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
pr_info("%s: link status definitely down for interface %s, disabling it\n",
bond->dev->name, slave->dev->name);
@@ -2604,17 +2626,17 @@ do_failover:
/*
* Send ARP probes for active-backup mode ARP monitor.
+ *
+ * Called with rcu_read_lock hold.
*/
static bool bond_ab_arp_probe(struct bonding *bond)
{
struct slave *slave, *before = NULL, *new_slave = NULL,
- *curr_arp_slave, *curr_active_slave;
+ *curr_arp_slave = rcu_dereference(bond->current_arp_slave),
+ *curr_active_slave = rcu_dereference(bond->curr_active_slave);
struct list_head *iter;
bool found = false;
-
- rcu_read_lock();
- curr_arp_slave = rcu_dereference(bond->current_arp_slave);
- curr_active_slave = rcu_dereference(bond->curr_active_slave);
+ bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
if (curr_arp_slave && curr_active_slave)
pr_info("PROBE: c_arp %s && cas %s BAD\n",
@@ -2623,32 +2645,23 @@ static bool bond_ab_arp_probe(struct bonding *bond)
if (curr_active_slave) {
bond_arp_send_all(bond, curr_active_slave);
- rcu_read_unlock();
- return true;
+ return should_notify_rtnl;
}
- rcu_read_unlock();
/* if we don't have a curr_active_slave, search for the next available
* backup slave from the current_arp_slave and make it the candidate
* for becoming the curr_active_slave
*/
- if (!rtnl_trylock())
- return false;
- /* curr_arp_slave might have gone away */
- curr_arp_slave = ACCESS_ONCE(bond->current_arp_slave);
-
if (!curr_arp_slave) {
- curr_arp_slave = bond_first_slave(bond);
- if (!curr_arp_slave) {
- rtnl_unlock();
- return true;
- }
+ curr_arp_slave = bond_first_slave_rcu(bond);
+ if (!curr_arp_slave)
+ return should_notify_rtnl;
}
- bond_set_slave_inactive_flags(curr_arp_slave);
+ bond_set_slave_inactive_flags(curr_arp_slave, BOND_SLAVE_NOTIFY_LATER);
- bond_for_each_slave(bond, slave, iter) {
+ bond_for_each_slave_rcu(bond, slave, iter) {
if (!found && !before && IS_UP(slave->dev))
before = slave;
@@ -2666,7 +2679,8 @@ static bool bond_ab_arp_probe(struct bonding *bond)
if (slave->link_failure_count < UINT_MAX)
slave->link_failure_count++;
- bond_set_slave_inactive_flags(slave);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_LATER);
pr_info("%s: backup interface %s is now down.\n",
bond->dev->name, slave->dev->name);
@@ -2678,26 +2692,31 @@ static bool bond_ab_arp_probe(struct bonding *bond)
if (!new_slave && before)
new_slave = before;
- if (!new_slave) {
- rtnl_unlock();
- return true;
- }
+ if (!new_slave)
+ goto check_state;
new_slave->link = BOND_LINK_BACK;
- bond_set_slave_active_flags(new_slave);
+ bond_set_slave_active_flags(new_slave, BOND_SLAVE_NOTIFY_LATER);
bond_arp_send_all(bond, new_slave);
new_slave->jiffies = jiffies;
rcu_assign_pointer(bond->current_arp_slave, new_slave);
- rtnl_unlock();
- return true;
+check_state:
+ bond_for_each_slave_rcu(bond, slave, iter) {
+ if (slave->should_notify) {
+ should_notify_rtnl = BOND_SLAVE_NOTIFY_NOW;
+ break;
+ }
+ }
+ return should_notify_rtnl;
}
static void bond_activebackup_arp_mon(struct work_struct *work)
{
struct bonding *bond = container_of(work, struct bonding,
arp_work.work);
- bool should_notify_peers = false, should_commit = false;
+ bool should_notify_peers = false;
+ bool should_notify_rtnl = false;
int delta_in_ticks;
delta_in_ticks = msecs_to_jiffies(bond->params.arp_interval);
@@ -2706,11 +2725,12 @@ static void bond_activebackup_arp_mon(struct work_struct *work)
goto re_arm;
rcu_read_lock();
+
should_notify_peers = bond_should_notify_peers(bond);
- should_commit = bond_ab_arp_inspect(bond);
- rcu_read_unlock();
- if (should_commit) {
+ if (bond_ab_arp_inspect(bond)) {
+ rcu_read_unlock();
+
/* Race avoidance with bond_close flush of workqueue */
if (!rtnl_trylock()) {
delta_in_ticks = 1;
@@ -2719,23 +2739,28 @@ static void bond_activebackup_arp_mon(struct work_struct *work)
}
bond_ab_arp_commit(bond);
+
rtnl_unlock();
+ rcu_read_lock();
}
- if (!bond_ab_arp_probe(bond)) {
- /* rtnl locking failed, re-arm */
- delta_in_ticks = 1;
- should_notify_peers = false;
- }
+ should_notify_rtnl = bond_ab_arp_probe(bond);
+ rcu_read_unlock();
re_arm:
if (bond->params.arp_interval)
queue_delayed_work(bond->wq, &bond->arp_work, delta_in_ticks);
- if (should_notify_peers) {
+ if (should_notify_peers || should_notify_rtnl) {
if (!rtnl_trylock())
return;
- call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, bond->dev);
+
+ if (should_notify_peers)
+ call_netdevice_notifiers(NETDEV_NOTIFY_PEERS,
+ bond->dev);
+ if (should_notify_rtnl)
+ bond_slave_state_notify(bond);
+
rtnl_unlock();
}
}
@@ -2857,9 +2882,12 @@ static int bond_slave_netdev_event(unsigned long event,
pr_info("%s: Primary slave changed to %s, reselecting active slave.\n",
bond->dev->name, bond->primary_slave ? slave_dev->name :
"none");
+
+ block_netpoll_tx();
write_lock_bh(&bond->curr_slave_lock);
bond_select_active_slave(bond);
write_unlock_bh(&bond->curr_slave_lock);
+ unblock_netpoll_tx();
break;
case NETDEV_FEAT_CHANGE:
bond_compute_features(bond);
@@ -3032,9 +3060,11 @@ static int bond_open(struct net_device *bond_dev)
bond_for_each_slave(bond, slave, iter) {
if ((bond->params.mode == BOND_MODE_ACTIVEBACKUP)
&& (slave != bond->curr_active_slave)) {
- bond_set_slave_inactive_flags(slave);
+ bond_set_slave_inactive_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
} else {
- bond_set_slave_active_flags(slave);
+ bond_set_slave_active_flags(slave,
+ BOND_SLAVE_NOTIFY_NOW);
}
}
read_unlock(&bond->curr_slave_lock);
@@ -3431,7 +3461,8 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
/* If fail_over_mac is enabled, do nothing and return success.
* Returning an error causes ifenslave to fail.
*/
- if (bond->params.fail_over_mac)
+ if (bond->params.fail_over_mac &&
+ bond->params.mode == BOND_MODE_ACTIVEBACKUP)
return 0;
if (!is_valid_ether_addr(sa->sa_data))
@@ -3692,7 +3723,7 @@ static inline int bond_slave_override(struct bonding *bond,
static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
/*
* This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index 11cb943222d5..298c26509095 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -14,7 +14,7 @@
#include <linux/errno.h>
#include <linux/if.h>
#include <linux/netdevice.h>
-#include <linux/rwlock.h>
+#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/ctype.h>
#include <linux/inet.h>
@@ -121,6 +121,7 @@ static struct bond_opt_value bond_resend_igmp_tbl[] = {
static struct bond_opt_value bond_lp_interval_tbl[] = {
{ "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT},
{ "maxval", INT_MAX, BOND_VALFLAG_MAX},
+ { NULL, -1, 0},
};
static struct bond_option bond_opts[] = {
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 86ccfb9f71cc..2b0fdec695f7 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -195,7 +195,8 @@ struct slave {
s8 new_link;
u8 backup:1, /* indicates backup slave. Value corresponds with
BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
- inactive:1; /* indicates inactive slave */
+ inactive:1, /* indicates inactive slave */
+ should_notify:1; /* indicateds whether the state changed */
u8 duplex;
u32 original_mtu;
u32 link_failure_count;
@@ -303,6 +304,24 @@ static inline void bond_set_backup_slave(struct slave *slave)
}
}
+static inline void bond_set_slave_state(struct slave *slave,
+ int slave_state, bool notify)
+{
+ if (slave->backup == slave_state)
+ return;
+
+ slave->backup = slave_state;
+ if (notify) {
+ rtmsg_ifinfo(RTM_NEWLINK, slave->dev, 0, GFP_KERNEL);
+ slave->should_notify = 0;
+ } else {
+ if (slave->should_notify)
+ slave->should_notify = 0;
+ else
+ slave->should_notify = 1;
+ }
+}
+
static inline void bond_slave_state_change(struct bonding *bond)
{
struct list_head *iter;
@@ -316,6 +335,19 @@ static inline void bond_slave_state_change(struct bonding *bond)
}
}
+static inline void bond_slave_state_notify(struct bonding *bond)
+{
+ struct list_head *iter;
+ struct slave *tmp;
+
+ bond_for_each_slave(bond, tmp, iter) {
+ if (tmp->should_notify) {
+ rtmsg_ifinfo(RTM_NEWLINK, tmp->dev, 0, GFP_KERNEL);
+ tmp->should_notify = 0;
+ }
+ }
+}
+
static inline int bond_slave_state(struct slave *slave)
{
return slave->backup;
@@ -343,6 +375,9 @@ static inline bool bond_is_active_slave(struct slave *slave)
#define BOND_ARP_VALIDATE_ALL (BOND_ARP_VALIDATE_ACTIVE | \
BOND_ARP_VALIDATE_BACKUP)
+#define BOND_SLAVE_NOTIFY_NOW true
+#define BOND_SLAVE_NOTIFY_LATER false
+
static inline int slave_do_arp_validate(struct bonding *bond,
struct slave *slave)
{
@@ -394,17 +429,19 @@ static inline void bond_netpoll_send_skb(const struct slave *slave,
}
#endif
-static inline void bond_set_slave_inactive_flags(struct slave *slave)
+static inline void bond_set_slave_inactive_flags(struct slave *slave,
+ bool notify)
{
if (!bond_is_lb(slave->bond))
- bond_set_backup_slave(slave);
+ bond_set_slave_state(slave, BOND_STATE_BACKUP, notify);
if (!slave->bond->params.all_slaves_active)
slave->inactive = 1;
}
-static inline void bond_set_slave_active_flags(struct slave *slave)
+static inline void bond_set_slave_active_flags(struct slave *slave,
+ bool notify)
{
- bond_set_active_slave(slave);
+ bond_set_slave_state(slave, BOND_STATE_ACTIVE, notify);
slave->inactive = 0;
}
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index d447b881bbde..9e7d95dae2c7 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -104,7 +104,7 @@ config CAN_JANZ_ICAN3
config CAN_FLEXCAN
tristate "Support for Freescale FLEXCAN based chips"
- depends on (ARM && CPU_LITTLE_ENDIAN) || PPC
+ depends on ARM || PPC
---help---
Say Y here if you want to support for Freescale FlexCAN.
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 13a909822e25..fc59bc6f040b 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -323,19 +323,10 @@ void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
}
if (!priv->echo_skb[idx]) {
- struct sock *srcsk = skb->sk;
- if (atomic_read(&skb->users) != 1) {
- struct sk_buff *old_skb = skb;
-
- skb = skb_clone(old_skb, GFP_ATOMIC);
- kfree_skb(old_skb);
- if (!skb)
- return;
- } else
- skb_orphan(skb);
-
- skb->sk = srcsk;
+ skb = can_create_echo_skb(skb);
+ if (!skb)
+ return;
/* make settings for echo to reduce code in irq context */
skb->protocol = htons(ETH_P_CAN);
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index aaed97bee471..61376abdab39 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -144,6 +144,8 @@
#define FLEXCAN_MB_CODE_MASK (0xf0ffffff)
+#define FLEXCAN_TIMEOUT_US (50)
+
/*
* FLEXCAN hardware feature flags
*
@@ -235,9 +237,12 @@ static const struct can_bittiming_const flexcan_bittiming_const = {
};
/*
- * Abstract off the read/write for arm versus ppc.
+ * Abstract off the read/write for arm versus ppc. This
+ * assumes that PPC uses big-endian registers and everything
+ * else uses little-endian registers, independent of CPU
+ * endianess.
*/
-#if defined(__BIG_ENDIAN)
+#if defined(CONFIG_PPC)
static inline u32 flexcan_read(void __iomem *addr)
{
return in_be32(addr);
@@ -259,6 +264,22 @@ static inline void flexcan_write(u32 val, void __iomem *addr)
}
#endif
+static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
+{
+ if (!priv->reg_xceiver)
+ return 0;
+
+ return regulator_enable(priv->reg_xceiver);
+}
+
+static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
+{
+ if (!priv->reg_xceiver)
+ return 0;
+
+ return regulator_disable(priv->reg_xceiver);
+}
+
static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
u32 reg_esr)
{
@@ -266,26 +287,95 @@ static inline int flexcan_has_and_handle_berr(const struct flexcan_priv *priv,
(reg_esr & FLEXCAN_ESR_ERR_BUS);
}
-static inline void flexcan_chip_enable(struct flexcan_priv *priv)
+static int flexcan_chip_enable(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
u32 reg;
reg = flexcan_read(&regs->mcr);
reg &= ~FLEXCAN_MCR_MDIS;
flexcan_write(reg, &regs->mcr);
- udelay(10);
+ while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+ usleep_range(10, 20);
+
+ if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
+ return -ETIMEDOUT;
+
+ return 0;
}
-static inline void flexcan_chip_disable(struct flexcan_priv *priv)
+static int flexcan_chip_disable(struct flexcan_priv *priv)
{
struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
u32 reg;
reg = flexcan_read(&regs->mcr);
reg |= FLEXCAN_MCR_MDIS;
flexcan_write(reg, &regs->mcr);
+
+ while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+ usleep_range(10, 20);
+
+ if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int flexcan_chip_freeze(struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = 1000 * 1000 * 10 / priv->can.bittiming.bitrate;
+ u32 reg;
+
+ reg = flexcan_read(&regs->mcr);
+ reg |= FLEXCAN_MCR_HALT;
+ flexcan_write(reg, &regs->mcr);
+
+ while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ usleep_range(100, 200);
+
+ if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int flexcan_chip_unfreeze(struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+ u32 reg;
+
+ reg = flexcan_read(&regs->mcr);
+ reg &= ~FLEXCAN_MCR_HALT;
+ flexcan_write(reg, &regs->mcr);
+
+ while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+ usleep_range(10, 20);
+
+ if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static int flexcan_chip_softreset(struct flexcan_priv *priv)
+{
+ struct flexcan_regs __iomem *regs = priv->base;
+ unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+
+ flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
+ while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
+ usleep_range(10, 20);
+
+ if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
+ return -ETIMEDOUT;
+
+ return 0;
}
static int flexcan_get_berr_counter(const struct net_device *dev,
@@ -706,19 +796,14 @@ static int flexcan_chip_start(struct net_device *dev)
u32 reg_mcr, reg_ctrl;
/* enable module */
- flexcan_chip_enable(priv);
+ err = flexcan_chip_enable(priv);
+ if (err)
+ return err;
/* soft reset */
- flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
- udelay(10);
-
- reg_mcr = flexcan_read(&regs->mcr);
- if (reg_mcr & FLEXCAN_MCR_SOFTRST) {
- netdev_err(dev, "Failed to softreset can module (mcr=0x%08x)\n",
- reg_mcr);
- err = -ENODEV;
- goto out;
- }
+ err = flexcan_chip_softreset(priv);
+ if (err)
+ goto out_chip_disable;
flexcan_set_bittiming(dev);
@@ -785,16 +870,14 @@ static int flexcan_chip_start(struct net_device *dev)
if (priv->devtype_data->features & FLEXCAN_HAS_V10_FEATURES)
flexcan_write(0x0, &regs->rxfgmask);
- if (priv->reg_xceiver) {
- err = regulator_enable(priv->reg_xceiver);
- if (err)
- goto out;
- }
+ err = flexcan_transceiver_enable(priv);
+ if (err)
+ goto out_chip_disable;
/* synchronize with the can bus */
- reg_mcr = flexcan_read(&regs->mcr);
- reg_mcr &= ~FLEXCAN_MCR_HALT;
- flexcan_write(reg_mcr, &regs->mcr);
+ err = flexcan_chip_unfreeze(priv);
+ if (err)
+ goto out_transceiver_disable;
priv->can.state = CAN_STATE_ERROR_ACTIVE;
@@ -807,7 +890,9 @@ static int flexcan_chip_start(struct net_device *dev)
return 0;
- out:
+ out_transceiver_disable:
+ flexcan_transceiver_disable(priv);
+ out_chip_disable:
flexcan_chip_disable(priv);
return err;
}
@@ -822,18 +907,17 @@ static void flexcan_chip_stop(struct net_device *dev)
{
struct flexcan_priv *priv = netdev_priv(dev);
struct flexcan_regs __iomem *regs = priv->base;
- u32 reg;
+
+ /* freeze + disable module */
+ flexcan_chip_freeze(priv);
+ flexcan_chip_disable(priv);
/* Disable all interrupts */
flexcan_write(0, &regs->imask1);
+ flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+ &regs->ctrl);
- /* Disable + halt module */
- reg = flexcan_read(&regs->mcr);
- reg |= FLEXCAN_MCR_MDIS | FLEXCAN_MCR_HALT;
- flexcan_write(reg, &regs->mcr);
-
- if (priv->reg_xceiver)
- regulator_disable(priv->reg_xceiver);
+ flexcan_transceiver_disable(priv);
priv->can.state = CAN_STATE_STOPPED;
return;
@@ -863,7 +947,7 @@ static int flexcan_open(struct net_device *dev)
/* start chip and queuing */
err = flexcan_chip_start(dev);
if (err)
- goto out_close;
+ goto out_free_irq;
can_led_event(dev, CAN_LED_EVENT_OPEN);
@@ -872,6 +956,8 @@ static int flexcan_open(struct net_device *dev)
return 0;
+ out_free_irq:
+ free_irq(dev->irq, dev);
out_close:
close_candev(dev);
out_disable_per:
@@ -942,12 +1028,16 @@ static int register_flexcandev(struct net_device *dev)
goto out_disable_ipg;
/* select "bus clock", chip must be disabled */
- flexcan_chip_disable(priv);
+ err = flexcan_chip_disable(priv);
+ if (err)
+ goto out_disable_per;
reg = flexcan_read(&regs->ctrl);
reg |= FLEXCAN_CTRL_CLK_SRC;
flexcan_write(reg, &regs->ctrl);
- flexcan_chip_enable(priv);
+ err = flexcan_chip_enable(priv);
+ if (err)
+ goto out_chip_disable;
/* set freeze, halt and activate FIFO, restrict register access */
reg = flexcan_read(&regs->mcr);
@@ -964,14 +1054,15 @@ static int register_flexcandev(struct net_device *dev)
if (!(reg & FLEXCAN_MCR_FEN)) {
netdev_err(dev, "Could not enable RX FIFO, unsupported core\n");
err = -ENODEV;
- goto out_disable_per;
+ goto out_chip_disable;
}
err = register_candev(dev);
- out_disable_per:
/* disable core and turn off clocks */
+ out_chip_disable:
flexcan_chip_disable(priv);
+ out_disable_per:
clk_disable_unprepare(priv->clk_per);
out_disable_ipg:
clk_disable_unprepare(priv->clk_ipg);
@@ -1101,9 +1192,10 @@ static int flexcan_probe(struct platform_device *pdev)
static int flexcan_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
+ struct flexcan_priv *priv = netdev_priv(dev);
unregister_flexcandev(dev);
-
+ netif_napi_del(&priv->napi);
free_candev(dev);
return 0;
@@ -1114,8 +1206,11 @@ static int flexcan_suspend(struct device *device)
{
struct net_device *dev = dev_get_drvdata(device);
struct flexcan_priv *priv = netdev_priv(dev);
+ int err;
- flexcan_chip_disable(priv);
+ err = flexcan_chip_disable(priv);
+ if (err)
+ return err;
if (netif_running(dev)) {
netif_stop_queue(dev);
@@ -1136,9 +1231,7 @@ static int flexcan_resume(struct device *device)
netif_device_attach(dev);
netif_start_queue(dev);
}
- flexcan_chip_enable(priv);
-
- return 0;
+ return flexcan_chip_enable(priv);
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index e24e6690d672..71594e5676fd 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -18,6 +18,7 @@
#include <linux/netdevice.h>
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/can/skb.h>
#include <linux/can/error.h>
#include <linux/mfd/janz.h>
@@ -1133,20 +1134,9 @@ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg)
*/
static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb)
{
- struct sock *srcsk = skb->sk;
-
- if (atomic_read(&skb->users) != 1) {
- struct sk_buff *old_skb = skb;
-
- skb = skb_clone(old_skb, GFP_ATOMIC);
- kfree_skb(old_skb);
- if (!skb)
- return;
- } else {
- skb_orphan(skb);
- }
-
- skb->sk = srcsk;
+ skb = can_create_echo_skb(skb);
+ if (!skb)
+ return;
/* save this skb for tx interrupt echo handling */
skb_queue_tail(&mod->echoq, skb);
@@ -1322,7 +1312,7 @@ static int ican3_napi(struct napi_struct *napi, int budget)
/* process all communication messages */
while (true) {
- struct ican3_msg msg;
+ struct ican3_msg uninitialized_var(msg);
ret = ican3_recv_msg(mod, &msg);
if (ret)
break;
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 6c859bba8b65..e77d11049747 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -473,6 +473,8 @@ static int kvaser_usb_get_card_info(struct kvaser_usb *dev)
return err;
dev->nchannels = msg.u.cardinfo.nchannels;
+ if (dev->nchannels > MAX_NET_DEVICES)
+ return -EINVAL;
return 0;
}
diff --git a/drivers/net/can/vcan.c b/drivers/net/can/vcan.c
index 0a2a5ee79a17..4e94057ef5cf 100644
--- a/drivers/net/can/vcan.c
+++ b/drivers/net/can/vcan.c
@@ -46,6 +46,7 @@
#include <linux/if_ether.h>
#include <linux/can.h>
#include <linux/can/dev.h>
+#include <linux/can/skb.h>
#include <linux/slab.h>
#include <net/rtnetlink.h>
@@ -109,25 +110,23 @@ static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev)
stats->rx_packets++;
stats->rx_bytes += cfd->len;
}
- kfree_skb(skb);
+ consume_skb(skb);
return NETDEV_TX_OK;
}
/* perform standard echo handling for CAN network interfaces */
if (loop) {
- struct sock *srcsk = skb->sk;
- skb = skb_share_check(skb, GFP_ATOMIC);
+ skb = can_create_echo_skb(skb);
if (!skb)
return NETDEV_TX_OK;
/* receive with packet counting */
- skb->sk = srcsk;
vcan_rx(skb, dev);
} else {
/* no looped packets => no counting */
- kfree_skb(skb);
+ consume_skb(skb);
}
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 0f4241c6e97e..238ccea965c8 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -3294,7 +3294,6 @@ static int __init vortex_init(void)
static void __exit vortex_eisa_cleanup(void)
{
- struct vortex_private *vp;
void __iomem *ioaddr;
#ifdef CONFIG_EISA
@@ -3303,7 +3302,6 @@ static void __exit vortex_eisa_cleanup(void)
#endif
if (compaq_net_device) {
- vp = netdev_priv(compaq_net_device);
ioaddr = ioport_map(compaq_net_device->base_addr,
VORTEX_TOTAL_SIZE);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 0cc21437478c..511f6eecd58b 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -929,6 +929,9 @@ static int emac_resume(struct platform_device *dev)
}
static const struct of_device_id emac_of_match[] = {
+ {.compatible = "allwinner,sun4i-a10-emac",},
+
+ /* Deprecated */
{.compatible = "allwinner,sun4i-emac",},
{},
};
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index e92ffd6e1c15..380d24922049 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -1248,19 +1248,13 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* shared register for the high 32 bits, so only a single, aligned,
* 4 GB physical address range can be used for descriptors.
*/
- if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) &&
- !dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64))) {
+ if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
dev_dbg(&pdev->dev, "DMA to 64-BIT addresses\n");
} else {
- err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+ err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
- err = dma_set_coherent_mask(&pdev->dev,
- DMA_BIT_MASK(32));
- if (err) {
- dev_err(&pdev->dev,
- "No usable DMA config, aborting\n");
- goto out_pci_disable;
- }
+ dev_err(&pdev->dev, "No usable DMA config, aborting\n");
+ goto out_pci_disable;
}
}
@@ -1292,6 +1286,7 @@ static int alx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
alx = netdev_priv(netdev);
spin_lock_init(&alx->hw.mdio_lock);
spin_lock_init(&alx->irq_lock);
+ spin_lock_init(&alx->stats_lock);
alx->dev = netdev;
alx->hw.pdev = pdev;
alx->msg_enable = NETIF_MSG_LINK | NETIF_MSG_HW | NETIF_MSG_IFUP |
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index d5c2d3e912e5..422aab27ea1b 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -2436,7 +2436,7 @@ err_reset:
err_register:
err_sw_init:
err_eeprom:
- iounmap(adapter->hw.hw_addr);
+ pci_iounmap(pdev, adapter->hw.hw_addr);
err_init_netdev:
err_ioremap:
free_netdev(netdev);
@@ -2474,7 +2474,7 @@ static void atl1e_remove(struct pci_dev *pdev)
unregister_netdev(netdev);
atl1e_free_ring_resources(adapter);
atl1e_force_ps(&adapter->hw);
- iounmap(adapter->hw.hw_addr);
+ pci_iounmap(pdev, adapter->hw.hw_addr);
pci_release_regions(pdev);
free_netdev(netdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 1f7b5aa114fa..8a7bf7dad898 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -1484,6 +1484,10 @@ static int b44_open(struct net_device *dev)
add_timer(&bp->timer);
b44_enable_ints(bp);
+
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+ phy_start(bp->phydev);
+
netif_start_queue(dev);
out:
return err;
@@ -1646,6 +1650,9 @@ static int b44_close(struct net_device *dev)
netif_stop_queue(dev);
+ if (bp->flags & B44_FLAG_EXTERNAL_PHY)
+ phy_stop(bp->phydev);
+
napi_disable(&bp->napi);
del_timer_sync(&bp->timer);
@@ -2222,7 +2229,12 @@ static void b44_adjust_link(struct net_device *dev)
}
if (status_changed) {
- b44_check_phy(bp);
+ u32 val = br32(bp, B44_TX_CTRL);
+ if (bp->flags & B44_FLAG_FULL_DUPLEX)
+ val |= TX_CTRL_DUPLEX;
+ else
+ val &= ~TX_CTRL_DUPLEX;
+ bw32(bp, B44_TX_CTRL, val);
phy_print_status(phydev);
}
}
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index 9d2dedadf2df..6c9e1c9bdeb8 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -85,7 +85,7 @@ MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
static int disable_msi = 0;
-module_param(disable_msi, int, 0);
+module_param(disable_msi, int, S_IRUGO);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
typedef enum {
@@ -2507,6 +2507,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
bp->fw_wr_seq++;
msg_data |= bp->fw_wr_seq;
+ bp->fw_last_msg = msg_data;
bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
@@ -4000,8 +4001,23 @@ bnx2_setup_wol(struct bnx2 *bp)
wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
}
- if (!(bp->flags & BNX2_FLAG_NO_WOL))
- bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0);
+ if (!(bp->flags & BNX2_FLAG_NO_WOL)) {
+ u32 val;
+
+ wol_msg |= BNX2_DRV_MSG_DATA_WAIT3;
+ if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) {
+ bnx2_fw_sync(bp, wol_msg, 1, 0);
+ return;
+ }
+ /* Tell firmware not to power down the PHY yet, otherwise
+ * the chip will take a long time to respond to MMIO reads.
+ */
+ val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
+ bnx2_shmem_wr(bp, BNX2_PORT_FEATURE,
+ val | BNX2_PORT_FEATURE_ASF_ENABLED);
+ bnx2_fw_sync(bp, wol_msg, 1, 0);
+ bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val);
+ }
}
@@ -4033,9 +4049,22 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
if (bp->wol)
pci_set_power_state(bp->pdev, PCI_D3hot);
- } else {
- pci_set_power_state(bp->pdev, PCI_D3hot);
+ break;
+
+ }
+ if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) {
+ u32 val;
+
+ /* Tell firmware not to power down the PHY yet,
+ * otherwise the other port may not respond to
+ * MMIO reads.
+ */
+ val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
+ val &= ~BNX2_CONDITION_PM_STATE_MASK;
+ val |= BNX2_CONDITION_PM_STATE_UNPREP;
+ bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val);
}
+ pci_set_power_state(bp->pdev, PCI_D3hot);
/* No more memory access after this point until
* device is brought back to D0.
diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h
index f1cf2c44e7ed..e341bc366fa5 100644
--- a/drivers/net/ethernet/broadcom/bnx2.h
+++ b/drivers/net/ethernet/broadcom/bnx2.h
@@ -6900,6 +6900,7 @@ struct bnx2 {
u16 fw_wr_seq;
u16 fw_drv_pulse_wr_seq;
+ u32 fw_last_msg;
int rx_max_ring;
int rx_ring_size;
@@ -7406,6 +7407,10 @@ struct bnx2_rv2p_fw_file {
#define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000
#define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000
#define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000
+#define BNX2_CONDITION_PM_STATE_MASK 0x00030000
+#define BNX2_CONDITION_PM_STATE_FULL 0x00030000
+#define BNX2_CONDITION_PM_STATE_PREP 0x00020000
+#define BNX2_CONDITION_PM_STATE_UNPREP 0x00010000
#define BNX2_BC_STATE_DEBUG_CMD 0x1dc
#define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 9d7419e0390b..dbcff509dc3f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -1873,7 +1873,7 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
}
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct bnx2x *bp = netdev_priv(dev);
@@ -1895,7 +1895,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
}
/* select a non-FCoE queue */
- return __netdev_pick_tx(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
+ return fallback(dev, skb) % BNX2X_NUM_ETH_QUEUES(bp);
}
void bnx2x_set_num_queues(struct bnx2x *bp)
@@ -3875,7 +3875,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
xmit_type);
}
- /* Add the macs to the parsing BD this is a vf */
+ /* Add the macs to the parsing BD if this is a vf or if
+ * Tx Switching is enabled.
+ */
if (IS_VF(bp)) {
/* override GRE parameters in BD */
bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi,
@@ -3887,6 +3889,11 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
&pbd_e2->data.mac_addr.dst_mid,
&pbd_e2->data.mac_addr.dst_lo,
eth->h_dest);
+ } else if (bp->flags & TX_SWITCHING) {
+ bnx2x_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi,
+ &pbd_e2->data.mac_addr.dst_mid,
+ &pbd_e2->data.mac_addr.dst_lo,
+ eth->h_dest);
}
SET_FLAG(pbd_e2_parsing_data,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index 17d1689aec6b..a89a40f88c25 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -496,7 +496,7 @@ int bnx2x_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos);
/* select_queue callback */
u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv);
+ void *accel_priv, select_queue_fallback_t fallback);
static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
struct bnx2x_fastpath *fp,
@@ -936,7 +936,7 @@ static inline int bnx2x_func_start(struct bnx2x *bp)
else /* CHIP_IS_E1X */
start_params->network_cos_mode = FW_WRR;
- start_params->gre_tunnel_mode = IPGRE_TUNNEL;
+ start_params->gre_tunnel_mode = L2GRE_TUNNEL;
start_params->gre_tunnel_rss = GRE_INNER_HEADERS_RSS;
return bnx2x_func_state_change(bp, &func_params);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index c9c445e7b4a5..7d4382286457 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -95,29 +95,29 @@ MODULE_FIRMWARE(FW_FILE_NAME_E1H);
MODULE_FIRMWARE(FW_FILE_NAME_E2);
int bnx2x_num_queues;
-module_param_named(num_queues, bnx2x_num_queues, int, 0);
+module_param_named(num_queues, bnx2x_num_queues, int, S_IRUGO);
MODULE_PARM_DESC(num_queues,
" Set number of queues (default is as a number of CPUs)");
static int disable_tpa;
-module_param(disable_tpa, int, 0);
+module_param(disable_tpa, int, S_IRUGO);
MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
static int int_mode;
-module_param(int_mode, int, 0);
+module_param(int_mode, int, S_IRUGO);
MODULE_PARM_DESC(int_mode, " Force interrupt mode other than MSI-X "
"(1 INT#x; 2 MSI)");
static int dropless_fc;
-module_param(dropless_fc, int, 0);
+module_param(dropless_fc, int, S_IRUGO);
MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
static int mrrs = -1;
-module_param(mrrs, int, 0);
+module_param(mrrs, int, S_IRUGO);
MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
static int debug;
-module_param(debug, int, 0);
+module_param(debug, int, S_IRUGO);
MODULE_PARM_DESC(debug, " Default debug msglevel");
struct workqueue_struct *bnx2x_wq;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index aec5ef2ed7ce..e42f48df6e94 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1446,12 +1446,12 @@ static void bnx2x_vf_igu_reset(struct bnx2x *bp, struct bnx2x_virtf *vf)
if (vf->cfg_flags & VF_CFG_INT_SIMD)
val |= IGU_VF_CONF_SINGLE_ISR_EN;
val &= ~IGU_VF_CONF_PARENT_MASK;
- val |= BP_FUNC(bp) << IGU_VF_CONF_PARENT_SHIFT; /* parent PF */
+ val |= (BP_ABS_FUNC(bp) >> 1) << IGU_VF_CONF_PARENT_SHIFT;
REG_WR(bp, IGU_REG_VF_CONFIGURATION, val);
DP(BNX2X_MSG_IOV,
- "value in IGU_REG_VF_CONFIGURATION of vf %d after write %x\n",
- vf->abs_vfid, REG_RD(bp, IGU_REG_VF_CONFIGURATION));
+ "value in IGU_REG_VF_CONFIGURATION of vf %d after write is 0x%08x\n",
+ vf->abs_vfid, val);
bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index fcf9105a5476..09f3fefcbf9c 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -1,6 +1,6 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -342,7 +342,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
while (retry < 3) {
rc = 0;
rcu_read_lock();
- ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
+ ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
if (ulp_ops)
rc = ulp_ops->iscsi_nl_send_msg(
cp->ulp_handle[CNIC_ULP_ISCSI],
@@ -726,7 +726,7 @@ static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
for (i = 0; i < dma->num_pages; i++) {
if (dma->pg_arr[i]) {
- dma_free_coherent(&dev->pcidev->dev, BNX2_PAGE_SIZE,
+ dma_free_coherent(&dev->pcidev->dev, CNIC_PAGE_SIZE,
dma->pg_arr[i], dma->pg_map_arr[i]);
dma->pg_arr[i] = NULL;
}
@@ -785,7 +785,7 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
for (i = 0; i < pages; i++) {
dma->pg_arr[i] = dma_alloc_coherent(&dev->pcidev->dev,
- BNX2_PAGE_SIZE,
+ CNIC_PAGE_SIZE,
&dma->pg_map_arr[i],
GFP_ATOMIC);
if (dma->pg_arr[i] == NULL)
@@ -794,8 +794,8 @@ static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
if (!use_pg_tbl)
return 0;
- dma->pgtbl_size = ((pages * 8) + BNX2_PAGE_SIZE - 1) &
- ~(BNX2_PAGE_SIZE - 1);
+ dma->pgtbl_size = ((pages * 8) + CNIC_PAGE_SIZE - 1) &
+ ~(CNIC_PAGE_SIZE - 1);
dma->pgtbl = dma_alloc_coherent(&dev->pcidev->dev, dma->pgtbl_size,
&dma->pgtbl_map, GFP_ATOMIC);
if (dma->pgtbl == NULL)
@@ -900,8 +900,8 @@ static int cnic_alloc_context(struct cnic_dev *dev)
if (BNX2_CHIP(cp) == BNX2_CHIP_5709) {
int i, k, arr_size;
- cp->ctx_blk_size = BNX2_PAGE_SIZE;
- cp->cids_per_blk = BNX2_PAGE_SIZE / 128;
+ cp->ctx_blk_size = CNIC_PAGE_SIZE;
+ cp->cids_per_blk = CNIC_PAGE_SIZE / 128;
arr_size = BNX2_MAX_CID / cp->cids_per_blk *
sizeof(struct cnic_ctx);
cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
@@ -933,7 +933,7 @@ static int cnic_alloc_context(struct cnic_dev *dev)
for (i = 0; i < cp->ctx_blks; i++) {
cp->ctx_arr[i].ctx =
dma_alloc_coherent(&dev->pcidev->dev,
- BNX2_PAGE_SIZE,
+ CNIC_PAGE_SIZE,
&cp->ctx_arr[i].mapping,
GFP_KERNEL);
if (cp->ctx_arr[i].ctx == NULL)
@@ -1013,7 +1013,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
if (udev->l2_ring)
return 0;
- udev->l2_ring_size = pages * BNX2_PAGE_SIZE;
+ udev->l2_ring_size = pages * CNIC_PAGE_SIZE;
udev->l2_ring = dma_alloc_coherent(&udev->pdev->dev, udev->l2_ring_size,
&udev->l2_ring_map,
GFP_KERNEL | __GFP_COMP);
@@ -1021,7 +1021,7 @@ static int __cnic_alloc_uio_rings(struct cnic_uio_dev *udev, int pages)
return -ENOMEM;
udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
- udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
+ udev->l2_buf_size = CNIC_PAGE_ALIGN(udev->l2_buf_size);
udev->l2_buf = dma_alloc_coherent(&udev->pdev->dev, udev->l2_buf_size,
&udev->l2_buf_map,
GFP_KERNEL | __GFP_COMP);
@@ -1102,7 +1102,7 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo->mem[0].size = MB_GET_CID_ADDR(TX_TSS_CID +
TX_MAX_TSS_RINGS + 1);
uinfo->mem[1].addr = (unsigned long) cp->status_blk.gen &
- PAGE_MASK;
+ CNIC_PAGE_MASK;
if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
else
@@ -1113,7 +1113,7 @@ static int cnic_init_uio(struct cnic_dev *dev)
uinfo->mem[0].size = pci_resource_len(dev->pcidev, 0);
uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
- PAGE_MASK;
+ CNIC_PAGE_MASK;
uinfo->mem[1].size = sizeof(*cp->bnx2x_def_status_blk);
uinfo->name = "bnx2x_cnic";
@@ -1267,14 +1267,14 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
- pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
- PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
+ CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
if (ret)
return -ENOMEM;
- n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
+ n = CNIC_PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
for (i = 0, j = 0; i < cp->max_cid_space; i++) {
long off = CNIC_KWQ16_DATA_SIZE * (i % n);
@@ -1296,7 +1296,7 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
goto error;
}
- pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
if (ret)
goto error;
@@ -1466,8 +1466,8 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
BNX2X_ISCSI_R2TQE_SIZE;
cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
- pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
- hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
+ pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
+ hq_bds = pages * (CNIC_PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
cp->num_cqs = req1->num_cqs;
if (!dev->max_iscsi_conn)
@@ -1477,9 +1477,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(pfid),
req1->rq_num_wqes);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
- PAGE_SIZE);
+ CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
- TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@@ -1489,9 +1489,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(pfid),
req1->rq_buffer_size);
CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
- PAGE_SIZE);
+ CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_USTRORM_INTMEM +
- USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_USTRORM_INTMEM +
USTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@@ -1504,9 +1504,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
/* init Xstorm RAM */
CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
- PAGE_SIZE);
+ CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
- XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@@ -1519,9 +1519,9 @@ static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
/* init Cstorm RAM */
CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(pfid),
- PAGE_SIZE);
+ CNIC_PAGE_SIZE);
CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
- CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), PAGE_SHIFT);
+ CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(pfid), CNIC_PAGE_BITS);
CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(pfid),
req1->num_tasks_per_conn);
@@ -1623,18 +1623,18 @@ static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
}
ctx->cid = cid;
- pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(cp->task_array_size) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
if (ret)
goto error;
- pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(cp->r2tq_size) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
if (ret)
goto error;
- pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
+ pages = CNIC_PAGE_ALIGN(cp->hq_size) / CNIC_PAGE_SIZE;
ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
if (ret)
goto error;
@@ -1760,7 +1760,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
/* TSTORM requires the base address of RQ DB & not PTE */
ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
- req2->rq_page_table_addr_lo & PAGE_MASK;
+ req2->rq_page_table_addr_lo & CNIC_PAGE_MASK;
ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
req2->rq_page_table_addr_hi;
ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
@@ -1842,7 +1842,7 @@ static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
/* CSTORM and USTORM initialization is different, CSTORM requires
* CQ DB base & not PTE addr */
ictx->cstorm_st_context.cq_db_base.lo =
- req1->cq_page_table_addr_lo & PAGE_MASK;
+ req1->cq_page_table_addr_lo & CNIC_PAGE_MASK;
ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
@@ -2911,7 +2911,7 @@ static int cnic_l2_completion(struct cnic_local *cp)
u16 hw_cons, sw_cons;
struct cnic_uio_dev *udev = cp->udev;
union eth_rx_cqe *cqe, *cqe_ring = (union eth_rx_cqe *)
- (udev->l2_ring + (2 * BNX2_PAGE_SIZE));
+ (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
u32 cmd;
int comp = 0;
@@ -3244,7 +3244,8 @@ static int cnic_copy_ulp_stats(struct cnic_dev *dev, int ulp_type)
int rc;
mutex_lock(&cnic_lock);
- ulp_ops = cnic_ulp_tbl_prot(ulp_type);
+ ulp_ops = rcu_dereference_protected(cp->ulp_ops[ulp_type],
+ lockdep_is_held(&cnic_lock));
if (ulp_ops && ulp_ops->cnic_get_stats)
rc = ulp_ops->cnic_get_stats(cp->ulp_handle[ulp_type]);
else
@@ -4384,7 +4385,7 @@ static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
u32 val;
- memset(cp->ctx_arr[i].ctx, 0, BNX2_PAGE_SIZE);
+ memset(cp->ctx_arr[i].ctx, 0, CNIC_PAGE_SIZE);
CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
(cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
@@ -4628,7 +4629,7 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
val = BNX2_L2CTX_L2_STATUSB_NUM(sb_id);
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
- rxbd = udev->l2_ring + BNX2_PAGE_SIZE;
+ rxbd = udev->l2_ring + CNIC_PAGE_SIZE;
for (i = 0; i < BNX2_MAX_RX_DESC_CNT; i++, rxbd++) {
dma_addr_t buf_map;
int n = (i % cp->l2_rx_ring_size) + 1;
@@ -4639,11 +4640,11 @@ static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
}
- val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
+ val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
rxbd->rx_bd_haddr_hi = val;
- val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
+ val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
rxbd->rx_bd_haddr_lo = val;
@@ -4709,10 +4710,10 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
val = CNIC_RD(dev, BNX2_MQ_CONFIG);
val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
- if (BNX2_PAGE_BITS > 12)
+ if (CNIC_PAGE_BITS > 12)
val |= (12 - 8) << 4;
else
- val |= (BNX2_PAGE_BITS - 8) << 4;
+ val |= (CNIC_PAGE_BITS - 8) << 4;
CNIC_WR(dev, BNX2_MQ_CONFIG, val);
@@ -4742,13 +4743,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
/* Initialize the kernel work queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
- (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+ (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_TYPE, val);
- val = (BNX2_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
+ val = (CNIC_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
- val = ((BNX2_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
+ val = ((CNIC_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
cnic_ctx_wr(dev, kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
@@ -4768,13 +4769,13 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
/* Initialize the kernel complete queue context. */
val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
- (BNX2_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+ (CNIC_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_TYPE, val);
- val = (BNX2_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
+ val = (CNIC_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
- val = ((BNX2_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
+ val = ((CNIC_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
cnic_ctx_wr(dev, kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
val = (u32) ((u64) cp->kcq1.dma.pgtbl_map >> 32);
@@ -4918,7 +4919,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
u32 cli = cp->ethdev->iscsi_l2_client_id;
u32 val;
- memset(txbd, 0, BNX2_PAGE_SIZE);
+ memset(txbd, 0, CNIC_PAGE_SIZE);
buf_map = udev->l2_buf_map;
for (i = 0; i < BNX2_MAX_TX_DESC_CNT; i += 3, txbd += 3) {
@@ -4978,9 +4979,9 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
struct bnx2x *bp = netdev_priv(dev->netdev);
struct cnic_uio_dev *udev = cp->udev;
struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (udev->l2_ring +
- BNX2_PAGE_SIZE);
+ CNIC_PAGE_SIZE);
struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
- (udev->l2_ring + (2 * BNX2_PAGE_SIZE));
+ (udev->l2_ring + (2 * CNIC_PAGE_SIZE));
struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
int i;
u32 cli = cp->ethdev->iscsi_l2_client_id;
@@ -5004,20 +5005,20 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
}
- val = (u64) (ring_map + BNX2_PAGE_SIZE) >> 32;
+ val = (u64) (ring_map + CNIC_PAGE_SIZE) >> 32;
rxbd->addr_hi = cpu_to_le32(val);
data->rx.bd_page_base.hi = cpu_to_le32(val);
- val = (u64) (ring_map + BNX2_PAGE_SIZE) & 0xffffffff;
+ val = (u64) (ring_map + CNIC_PAGE_SIZE) & 0xffffffff;
rxbd->addr_lo = cpu_to_le32(val);
data->rx.bd_page_base.lo = cpu_to_le32(val);
rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
- val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) >> 32;
+ val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) >> 32;
rxcqe->addr_hi = cpu_to_le32(val);
data->rx.cqe_page_base.hi = cpu_to_le32(val);
- val = (u64) (ring_map + (2 * BNX2_PAGE_SIZE)) & 0xffffffff;
+ val = (u64) (ring_map + (2 * CNIC_PAGE_SIZE)) & 0xffffffff;
rxcqe->addr_lo = cpu_to_le32(val);
data->rx.cqe_page_base.lo = cpu_to_le32(val);
@@ -5265,8 +5266,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
msleep(10);
}
clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
- rx_ring = udev->l2_ring + BNX2_PAGE_SIZE;
- memset(rx_ring, 0, BNX2_PAGE_SIZE);
+ rx_ring = udev->l2_ring + CNIC_PAGE_SIZE;
+ memset(rx_ring, 0, CNIC_PAGE_SIZE);
}
static int cnic_register_netdev(struct cnic_dev *dev)
diff --git a/drivers/net/ethernet/broadcom/cnic.h b/drivers/net/ethernet/broadcom/cnic.h
index 0d6b13f854d9..d535ae4228b4 100644
--- a/drivers/net/ethernet/broadcom/cnic.h
+++ b/drivers/net/ethernet/broadcom/cnic.h
@@ -1,6 +1,6 @@
/* cnic.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index 95a8e4b11c9f..dcbca6997e8f 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -1,7 +1,7 @@
/* cnic.c: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/drivers/net/ethernet/broadcom/cnic_if.h b/drivers/net/ethernet/broadcom/cnic_if.h
index 8cf6b1926069..5f4d5573a73d 100644
--- a/drivers/net/ethernet/broadcom/cnic_if.h
+++ b/drivers/net/ethernet/broadcom/cnic_if.h
@@ -1,6 +1,6 @@
/* cnic_if.h: Broadcom CNIC core network driver.
*
- * Copyright (c) 2006-2013 Broadcom Corporation
+ * Copyright (c) 2006-2014 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -14,8 +14,8 @@
#include "bnx2x/bnx2x_mfw_req.h"
-#define CNIC_MODULE_VERSION "2.5.19"
-#define CNIC_MODULE_RELDATE "December 19, 2013"
+#define CNIC_MODULE_VERSION "2.5.20"
+#define CNIC_MODULE_RELDATE "March 14, 2014"
#define CNIC_ULP_RDMA 0
#define CNIC_ULP_ISCSI 1
@@ -24,6 +24,16 @@
#define MAX_CNIC_ULP_TYPE_EXT 3
#define MAX_CNIC_ULP_TYPE 4
+/* Use CPU native page size up to 16K for cnic ring sizes. */
+#if (PAGE_SHIFT > 14)
+#define CNIC_PAGE_BITS 14
+#else
+#define CNIC_PAGE_BITS PAGE_SHIFT
+#endif
+#define CNIC_PAGE_SIZE (1 << (CNIC_PAGE_BITS))
+#define CNIC_PAGE_ALIGN(addr) ALIGN(addr, CNIC_PAGE_SIZE)
+#define CNIC_PAGE_MASK (~((CNIC_PAGE_SIZE) - 1))
+
struct kwqe {
u32 kwqe_op_flag;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index e2ca03e23dc1..3b6d0ba86c71 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -2609,13 +2609,14 @@ static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
tg3_writephy(tp, MII_CTRL1000, phy9_orig);
- if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
- reg32 &= ~0x3000;
- tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
- } else if (!err)
- err = -EBUSY;
+ err = tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
+ if (err)
+ return err;
- return err;
+ reg32 &= ~0x3000;
+ tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
+
+ return 0;
}
static void tg3_carrier_off(struct tg3 *tp)
@@ -6842,8 +6843,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
work_mask |= opaque_key;
- if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
- (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
+ if (desc->err_vlan & RXD_ERR_MASK) {
drop_it:
tg3_recycle_rx(tnapi, tpr, opaque_key,
desc_idx, *post_ptr);
@@ -14113,12 +14113,12 @@ static int tg3_change_mtu(struct net_device *dev, int new_mtu)
tg3_netif_stop(tp);
+ tg3_set_mtu(dev, tp, new_mtu);
+
tg3_full_lock(tp, 1);
tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- tg3_set_mtu(dev, tp, new_mtu);
-
/* Reset PHY, otherwise the read DMA engine will be in a mode that
* breaks all requests to 256 bytes.
*/
diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
index ef472385bce4..04321e5a356e 100644
--- a/drivers/net/ethernet/broadcom/tg3.h
+++ b/drivers/net/ethernet/broadcom/tg3.h
@@ -2608,7 +2608,11 @@ struct tg3_rx_buffer_desc {
#define RXD_ERR_TOO_SMALL 0x00400000
#define RXD_ERR_NO_RESOURCES 0x00800000
#define RXD_ERR_HUGE_FRAME 0x01000000
-#define RXD_ERR_MASK 0xffff0000
+
+#define RXD_ERR_MASK (RXD_ERR_BAD_CRC | RXD_ERR_COLLISION | \
+ RXD_ERR_LINK_LOST | RXD_ERR_PHY_DECODE | \
+ RXD_ERR_MAC_ABRT | RXD_ERR_TOO_SMALL | \
+ RXD_ERR_NO_RESOURCES | RXD_ERR_HUGE_FRAME)
u32 reserved;
u32 opaque;
diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
index 1803c3959044..354ae9792bad 100644
--- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c
+++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c
@@ -1704,7 +1704,7 @@ bfa_flash_sem_get(void __iomem *bar)
while (!bfa_raw_sem_get(bar)) {
if (--n <= 0)
return BFA_STATUS_BADFLASH;
- udelay(10000);
+ mdelay(10);
}
return BFA_STATUS_OK;
}
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index cf64f3d0b60d..4ad1187e82fb 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -707,7 +707,8 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
else
skb_checksum_none_assert(skb);
- if (flags & BNA_CQ_EF_VLAN)
+ if ((flags & BNA_CQ_EF_VLAN) &&
+ (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX))
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), ntohs(cmpl->vlan_tag));
if (BNAD_RXBUF_IS_SK_BUFF(unmap_q->type))
@@ -2094,7 +2095,9 @@ bnad_init_rx_config(struct bnad *bnad, struct bna_rx_config *rx_config)
rx_config->q1_buf_size = BFI_SMALL_RXBUF_SIZE;
}
- rx_config->vlan_strip_status = BNA_STATUS_T_ENABLED;
+ rx_config->vlan_strip_status =
+ (bnad->netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ?
+ BNA_STATUS_T_ENABLED : BNA_STATUS_T_DISABLED;
}
static void
@@ -3245,11 +3248,6 @@ bnad_set_rx_mode(struct net_device *netdev)
BNA_RXMODE_ALLMULTI;
bna_rx_mode_set(bnad->rx_info[0].rx, new_mode, mode_mask, NULL);
- if (bnad->cfg_flags & BNAD_CF_PROMISC)
- bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
- else
- bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
-
spin_unlock_irqrestore(&bnad->bna_lock, flags);
}
@@ -3374,6 +3372,27 @@ bnad_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
return 0;
}
+static int bnad_set_features(struct net_device *dev, netdev_features_t features)
+{
+ struct bnad *bnad = netdev_priv(dev);
+ netdev_features_t changed = features ^ dev->features;
+
+ if ((changed & NETIF_F_HW_VLAN_CTAG_RX) && netif_running(dev)) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&bnad->bna_lock, flags);
+
+ if (features & NETIF_F_HW_VLAN_CTAG_RX)
+ bna_rx_vlan_strip_enable(bnad->rx_info[0].rx);
+ else
+ bna_rx_vlan_strip_disable(bnad->rx_info[0].rx);
+
+ spin_unlock_irqrestore(&bnad->bna_lock, flags);
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void
bnad_netpoll(struct net_device *netdev)
@@ -3421,6 +3440,7 @@ static const struct net_device_ops bnad_netdev_ops = {
.ndo_change_mtu = bnad_change_mtu,
.ndo_vlan_rx_add_vid = bnad_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = bnad_vlan_rx_kill_vid,
+ .ndo_set_features = bnad_set_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = bnad_netpoll
#endif
@@ -3433,14 +3453,14 @@ bnad_netdev_init(struct bnad *bnad, bool using_dac)
netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
- NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX;
+ NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_CTAG_RX;
netdev->vlan_features = NETIF_F_SG | NETIF_F_HIGHDMA |
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_TSO | NETIF_F_TSO6;
- netdev->features |= netdev->hw_features |
- NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_CTAG_FILTER;
+ netdev->features |= netdev->hw_features | NETIF_F_HW_VLAN_CTAG_FILTER;
if (using_dac)
netdev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 3190d38e16fb..d0c38e01e99f 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -632,11 +632,16 @@ static void gem_rx_refill(struct macb *bp)
"Unable to allocate sk_buff\n");
break;
}
- bp->rx_skbuff[entry] = skb;
/* now fill corresponding descriptor entry */
paddr = dma_map_single(&bp->pdev->dev, skb->data,
bp->rx_buffer_size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, paddr)) {
+ dev_kfree_skb(skb);
+ break;
+ }
+
+ bp->rx_skbuff[entry] = skb;
if (entry == RX_RING_SIZE - 1)
paddr |= MACB_BIT(RX_WRAP);
@@ -725,7 +730,7 @@ static int gem_rx(struct macb *bp, int budget)
skb_put(skb, len);
addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
dma_unmap_single(&bp->pdev->dev, addr,
- len, DMA_FROM_DEVICE);
+ bp->rx_buffer_size, DMA_FROM_DEVICE);
skb->protocol = eth_type_trans(skb, bp->dev);
skb_checksum_none_assert(skb);
@@ -1036,11 +1041,15 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
}
entry = macb_tx_ring_wrap(bp->tx_head);
- bp->tx_head++;
netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
mapping = dma_map_single(&bp->pdev->dev, skb->data,
len, DMA_TO_DEVICE);
+ if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+ kfree_skb(skb);
+ goto unlock;
+ }
+ bp->tx_head++;
tx_skb = &bp->tx_skb[entry];
tx_skb->skb = skb;
tx_skb->mapping = mapping;
@@ -1066,6 +1075,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
netif_stop_queue(dev);
+unlock:
spin_unlock_irqrestore(&bp->lock, flags);
return NETDEV_TX_OK;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 43ab35fea48d..34e2488767d9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -6179,6 +6179,7 @@ static struct pci_driver cxgb4_driver = {
.id_table = cxgb4_pci_tbl,
.probe = init_one,
.remove = remove_one,
+ .shutdown = remove_one,
.err_handler = &cxgb4_eeh,
};
diff --git a/drivers/net/ethernet/dec/tulip/tulip_core.c b/drivers/net/ethernet/dec/tulip/tulip_core.c
index add05f14b38b..1642de78aac8 100644
--- a/drivers/net/ethernet/dec/tulip/tulip_core.c
+++ b/drivers/net/ethernet/dec/tulip/tulip_core.c
@@ -1939,6 +1939,7 @@ static void tulip_remove_one(struct pci_dev *pdev)
pci_iounmap(pdev, tp->base_addr);
free_netdev (dev);
pci_release_regions (pdev);
+ pci_disable_device(pdev);
/* pci_power_off (pdev, -1); */
}
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 8d09615da585..05529e273050 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -350,11 +350,13 @@ struct be_drv_stats {
u32 roce_drops_crc;
};
+/* A vlan-id of 0xFFFF must be used to clear transparent vlan-tagging */
+#define BE_RESET_VLAN_TAG_ID 0xFFFF
+
struct be_vf_cfg {
unsigned char mac_addr[ETH_ALEN];
int if_handle;
int pmac_id;
- u16 def_vid;
u16 vlan_tag;
u32 tx_rate;
};
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 04ac9c6a0d39..36c80612e21a 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -913,24 +913,14 @@ static int be_ipv6_tx_stall_chk(struct be_adapter *adapter,
return BE3_chip(adapter) && be_ipv6_exthdr_check(skb);
}
-static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
- struct sk_buff *skb,
- bool *skip_hw_vlan)
+static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter,
+ struct sk_buff *skb,
+ bool *skip_hw_vlan)
{
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
unsigned int eth_hdr_len;
struct iphdr *ip;
- /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or less
- * may cause a transmit stall on that port. So the work-around is to
- * pad short packets (<= 32 bytes) to a 36-byte length.
- */
- if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
- if (skb_padto(skb, 36))
- goto tx_drop;
- skb->len = 36;
- }
-
/* For padded packets, BE HW modifies tot_len field in IP header
* incorrecly when VLAN tag is inserted by HW.
* For padded packets, Lancer computes incorrect checksum.
@@ -959,7 +949,7 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
vlan_tx_tag_present(skb)) {
skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
if (unlikely(!skb))
- goto tx_drop;
+ goto err;
}
/* HW may lockup when VLAN HW tagging is requested on
@@ -981,15 +971,39 @@ static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
be_vlan_tag_tx_chk(adapter, skb)) {
skb = be_insert_vlan_in_pkt(adapter, skb, skip_hw_vlan);
if (unlikely(!skb))
- goto tx_drop;
+ goto err;
}
return skb;
tx_drop:
dev_kfree_skb_any(skb);
+err:
return NULL;
}
+static struct sk_buff *be_xmit_workarounds(struct be_adapter *adapter,
+ struct sk_buff *skb,
+ bool *skip_hw_vlan)
+{
+ /* Lancer, SH-R ASICs have a bug wherein Packets that are 32 bytes or
+ * less may cause a transmit stall on that port. So the work-around is
+ * to pad short packets (<= 32 bytes) to a 36-byte length.
+ */
+ if (unlikely(!BEx_chip(adapter) && skb->len <= 32)) {
+ if (skb_padto(skb, 36))
+ return NULL;
+ skb->len = 36;
+ }
+
+ if (BEx_chip(adapter) || lancer_chip(adapter)) {
+ skb = be_lancer_xmit_workarounds(adapter, skb, skip_hw_vlan);
+ if (!skb)
+ return NULL;
+ }
+
+ return skb;
+}
+
static netdev_tx_t be_xmit(struct sk_buff *skb, struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1157,6 +1171,14 @@ ret:
return status;
}
+static void be_clear_promisc(struct be_adapter *adapter)
+{
+ adapter->promiscuous = false;
+ adapter->flags &= ~BE_FLAGS_VLAN_PROMISC;
+
+ be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
+}
+
static void be_set_rx_mode(struct net_device *netdev)
{
struct be_adapter *adapter = netdev_priv(netdev);
@@ -1170,9 +1192,7 @@ static void be_set_rx_mode(struct net_device *netdev)
/* BE was previously in promiscuous mode; disable it */
if (adapter->promiscuous) {
- adapter->promiscuous = false;
- be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
-
+ be_clear_promisc(adapter);
if (adapter->vlans_added)
be_vid_config(adapter);
}
@@ -1287,24 +1307,20 @@ static int be_set_vf_vlan(struct net_device *netdev,
if (vlan || qos) {
vlan |= qos << VLAN_PRIO_SHIFT;
- if (vf_cfg->vlan_tag != vlan) {
- /* If this is new value, program it. Else skip. */
- vf_cfg->vlan_tag = vlan;
+ if (vf_cfg->vlan_tag != vlan)
status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
vf_cfg->if_handle, 0);
- }
} else {
/* Reset Transparent Vlan Tagging. */
- vf_cfg->vlan_tag = 0;
- vlan = vf_cfg->def_vid;
- status = be_cmd_set_hsw_config(adapter, vlan, vf + 1,
- vf_cfg->if_handle, 0);
+ status = be_cmd_set_hsw_config(adapter, BE_RESET_VLAN_TAG_ID,
+ vf + 1, vf_cfg->if_handle, 0);
}
-
- if (status)
+ if (!status)
+ vf_cfg->vlan_tag = vlan;
+ else
dev_info(&adapter->pdev->dev,
- "VLAN %d config on VF %d failed\n", vlan, vf);
+ "VLAN %d config on VF %d failed\n", vlan, vf);
return status;
}
@@ -3013,11 +3029,11 @@ static int be_vf_setup_init(struct be_adapter *adapter)
static int be_vf_setup(struct be_adapter *adapter)
{
+ struct device *dev = &adapter->pdev->dev;
struct be_vf_cfg *vf_cfg;
- u16 def_vlan, lnk_speed;
int status, old_vfs, vf;
- struct device *dev = &adapter->pdev->dev;
u32 privileges;
+ u16 lnk_speed;
old_vfs = pci_num_vf(adapter->pdev);
if (old_vfs) {
@@ -3084,12 +3100,6 @@ static int be_vf_setup(struct be_adapter *adapter)
if (!status)
vf_cfg->tx_rate = lnk_speed;
- status = be_cmd_get_hsw_config(adapter, &def_vlan,
- vf + 1, vf_cfg->if_handle, NULL);
- if (status)
- goto err;
- vf_cfg->def_vid = def_vlan;
-
if (!old_vfs)
be_cmd_enable_vf(adapter, vf + 1);
}
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 4de8cfd149cf..55e0fa03dc90 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -13,6 +13,7 @@
#include <linux/dma-mapping.h>
#include <linux/etherdevice.h>
+#include <linux/clk.h>
#include <linux/crc32.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -51,6 +52,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
#define ETH_HASH0 0x48
#define ETH_HASH1 0x4c
#define ETH_TXCTRL 0x50
+#define ETH_END 0x54
/* mode register */
#define MODER_RXEN (1 << 0) /* receive enable */
@@ -179,6 +181,7 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
* @membase: pointer to buffer memory region
* @dma_alloc: dma allocated buffer size
* @io_region_size: I/O memory region size
+ * @num_bd: number of buffer descriptors
* @num_tx: number of send buffers
* @cur_tx: last send buffer written
* @dty_tx: last buffer actually sent
@@ -199,6 +202,7 @@ struct ethoc {
int dma_alloc;
resource_size_t io_region_size;
+ unsigned int num_bd;
unsigned int num_tx;
unsigned int cur_tx;
unsigned int dty_tx;
@@ -216,6 +220,7 @@ struct ethoc {
struct phy_device *phy;
struct mii_bus *mdio;
+ struct clk *clk;
s8 phy_id;
};
@@ -688,6 +693,11 @@ static int ethoc_mdio_probe(struct net_device *dev)
}
priv->phy = phy;
+ phy->advertising &= ~(ADVERTISED_1000baseT_Full |
+ ADVERTISED_1000baseT_Half);
+ phy->supported &= ~(SUPPORTED_1000baseT_Full |
+ SUPPORTED_1000baseT_Half);
+
return 0;
}
@@ -890,6 +900,102 @@ out:
return NETDEV_TX_OK;
}
+static int ethoc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phy;
+
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_gset(phydev, cmd);
+}
+
+static int ethoc_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ struct phy_device *phydev = priv->phy;
+
+ if (!phydev)
+ return -EOPNOTSUPP;
+
+ return phy_ethtool_sset(phydev, cmd);
+}
+
+static int ethoc_get_regs_len(struct net_device *netdev)
+{
+ return ETH_END;
+}
+
+static void ethoc_get_regs(struct net_device *dev, struct ethtool_regs *regs,
+ void *p)
+{
+ struct ethoc *priv = netdev_priv(dev);
+ u32 *regs_buff = p;
+ unsigned i;
+
+ regs->version = 0;
+ for (i = 0; i < ETH_END / sizeof(u32); ++i)
+ regs_buff[i] = ethoc_read(priv, i * sizeof(u32));
+}
+
+static void ethoc_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct ethoc *priv = netdev_priv(dev);
+
+ ring->rx_max_pending = priv->num_bd - 1;
+ ring->rx_mini_max_pending = 0;
+ ring->rx_jumbo_max_pending = 0;
+ ring->tx_max_pending = priv->num_bd - 1;
+
+ ring->rx_pending = priv->num_rx;
+ ring->rx_mini_pending = 0;
+ ring->rx_jumbo_pending = 0;
+ ring->tx_pending = priv->num_tx;
+}
+
+static int ethoc_set_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct ethoc *priv = netdev_priv(dev);
+
+ if (ring->tx_pending < 1 || ring->rx_pending < 1 ||
+ ring->tx_pending + ring->rx_pending > priv->num_bd)
+ return -EINVAL;
+ if (ring->rx_mini_pending || ring->rx_jumbo_pending)
+ return -EINVAL;
+
+ if (netif_running(dev)) {
+ netif_tx_disable(dev);
+ ethoc_disable_rx_and_tx(priv);
+ ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+ synchronize_irq(dev->irq);
+ }
+
+ priv->num_tx = rounddown_pow_of_two(ring->tx_pending);
+ priv->num_rx = ring->rx_pending;
+ ethoc_init_ring(priv, dev->mem_start);
+
+ if (netif_running(dev)) {
+ ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
+ ethoc_enable_rx_and_tx(priv);
+ netif_wake_queue(dev);
+ }
+ return 0;
+}
+
+const struct ethtool_ops ethoc_ethtool_ops = {
+ .get_settings = ethoc_get_settings,
+ .set_settings = ethoc_set_settings,
+ .get_regs_len = ethoc_get_regs_len,
+ .get_regs = ethoc_get_regs,
+ .get_link = ethtool_op_get_link,
+ .get_ringparam = ethoc_get_ringparam,
+ .set_ringparam = ethoc_set_ringparam,
+ .get_ts_info = ethtool_op_get_ts_info,
+};
+
static const struct net_device_ops ethoc_netdev_ops = {
.ndo_open = ethoc_open,
.ndo_stop = ethoc_stop,
@@ -917,6 +1023,8 @@ static int ethoc_probe(struct platform_device *pdev)
int num_bd;
int ret = 0;
bool random_mac = false;
+ struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ u32 eth_clkfreq = pdata ? pdata->eth_clkfreq : 0;
/* allocate networking device */
netdev = alloc_etherdev(sizeof(struct ethoc));
@@ -1016,6 +1124,7 @@ static int ethoc_probe(struct platform_device *pdev)
ret = -ENODEV;
goto error;
}
+ priv->num_bd = num_bd;
/* num_tx must be a power of two */
priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
priv->num_rx = num_bd - priv->num_tx;
@@ -1030,8 +1139,7 @@ static int ethoc_probe(struct platform_device *pdev)
}
/* Allow the platform setup code to pass in a MAC address. */
- if (dev_get_platdata(&pdev->dev)) {
- struct ethoc_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
priv->phy_id = pdata->phy_id;
} else {
@@ -1069,6 +1177,27 @@ static int ethoc_probe(struct platform_device *pdev)
if (random_mac)
netdev->addr_assign_type = NET_ADDR_RANDOM;
+ /* Allow the platform setup code to adjust MII management bus clock. */
+ if (!eth_clkfreq) {
+ struct clk *clk = devm_clk_get(&pdev->dev, NULL);
+
+ if (!IS_ERR(clk)) {
+ priv->clk = clk;
+ clk_prepare_enable(clk);
+ eth_clkfreq = clk_get_rate(clk);
+ }
+ }
+ if (eth_clkfreq) {
+ u32 clkdiv = MIIMODER_CLKDIV(eth_clkfreq / 2500000 + 1);
+
+ if (!clkdiv)
+ clkdiv = 2;
+ dev_dbg(&pdev->dev, "setting MII clkdiv to %u\n", clkdiv);
+ ethoc_write(priv, MIIMODER,
+ (ethoc_read(priv, MIIMODER) & MIIMODER_NOPRE) |
+ clkdiv);
+ }
+
/* register MII bus */
priv->mdio = mdiobus_alloc();
if (!priv->mdio) {
@@ -1111,6 +1240,7 @@ static int ethoc_probe(struct platform_device *pdev)
netdev->netdev_ops = &ethoc_netdev_ops;
netdev->watchdog_timeo = ETHOC_TIMEOUT;
netdev->features |= 0;
+ netdev->ethtool_ops = &ethoc_ethtool_ops;
/* setup NAPI */
netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
@@ -1133,6 +1263,8 @@ free_mdio:
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
free:
+ if (priv->clk)
+ clk_disable_unprepare(priv->clk);
free_netdev(netdev);
out:
return ret;
@@ -1157,6 +1289,8 @@ static int ethoc_remove(struct platform_device *pdev)
kfree(priv->mdio->irq);
mdiobus_free(priv->mdio);
}
+ if (priv->clk)
+ clk_disable_unprepare(priv->clk);
unregister_netdev(netdev);
free_netdev(netdev);
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index d4782b42401b..03a351300013 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -389,12 +389,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
netdev_err(ndev, "Tx DMA memory map failed\n");
return NETDEV_TX_OK;
}
- /* Send it on its way. Tell FEC it's ready, interrupt when done,
- * it's the last BD of the frame, and to put the CRC on the end.
- */
- status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
- | BD_ENET_TX_LAST | BD_ENET_TX_TC);
- bdp->cbd_sc = status;
if (fep->bufdesc_ex) {
@@ -416,6 +410,13 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
}
+ /* Send it on its way. Tell FEC it's ready, interrupt when done,
+ * it's the last BD of the frame, and to put the CRC on the end.
+ */
+ status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
+ | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+ bdp->cbd_sc = status;
+
bdp_pre = fec_enet_get_prevdesc(bdp, fep);
if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
!(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
@@ -527,13 +528,6 @@ fec_restart(struct net_device *ndev, int duplex)
/* Clear any outstanding interrupt. */
writel(0xffc00000, fep->hwp + FEC_IEVENT);
- /* Setup multicast filter. */
- set_multicast_list(ndev);
-#ifndef CONFIG_M5272
- writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
- writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
-#endif
-
/* Set maximum receive buffer size. */
writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE);
@@ -654,6 +648,13 @@ fec_restart(struct net_device *ndev, int duplex)
writel(rcntl, fep->hwp + FEC_R_CNTRL);
+ /* Setup multicast filter. */
+ set_multicast_list(ndev);
+#ifndef CONFIG_M5272
+ writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+ writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+#endif
+
if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
/* enable ENET endian swap */
ecntl |= (1 << 8);
@@ -1778,8 +1779,6 @@ fec_enet_open(struct net_device *ndev)
struct fec_enet_private *fep = netdev_priv(ndev);
int ret;
- napi_enable(&fep->napi);
-
/* I should reset the ring buffers here, but I don't yet know
* a simple way to do that.
*/
@@ -1794,6 +1793,8 @@ fec_enet_open(struct net_device *ndev)
fec_enet_free_buffers(ndev);
return ret;
}
+
+ napi_enable(&fep->napi);
phy_start(fep->phy_dev);
netif_start_queue(ndev);
fep->opened = 1;
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index 4be971590461..1fc8334fc181 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -522,10 +522,21 @@ retry:
return rc;
}
+static u64 ibmveth_encode_mac_addr(u8 *mac)
+{
+ int i;
+ u64 encoded = 0;
+
+ for (i = 0; i < ETH_ALEN; i++)
+ encoded = (encoded << 8) | mac[i];
+
+ return encoded;
+}
+
static int ibmveth_open(struct net_device *netdev)
{
struct ibmveth_adapter *adapter = netdev_priv(netdev);
- u64 mac_address = 0;
+ u64 mac_address;
int rxq_entries = 1;
unsigned long lpar_rc;
int rc;
@@ -579,8 +590,7 @@ static int ibmveth_open(struct net_device *netdev)
adapter->rx_queue.num_slots = rxq_entries;
adapter->rx_queue.toggle = 1;
- memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
- mac_address = mac_address >> 16;
+ mac_address = ibmveth_encode_mac_addr(netdev->dev_addr);
rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
adapter->rx_queue.queue_len;
@@ -1183,8 +1193,8 @@ static void ibmveth_set_multicast_list(struct net_device *netdev)
/* add the addresses to the filter table */
netdev_for_each_mc_addr(ha, netdev) {
/* add the multicast address to the filter table */
- unsigned long mcast_addr = 0;
- memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN);
+ u64 mcast_addr;
+ mcast_addr = ibmveth_encode_mac_addr(ha->addr);
lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address,
IbmVethMcastAddFilter,
mcast_addr);
@@ -1372,9 +1382,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16);
- adapter->mac_addr = 0;
- memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN);
-
netdev->irq = dev->irq;
netdev->netdev_ops = &ibmveth_netdev_ops;
netdev->ethtool_ops = &netdev_ethtool_ops;
@@ -1383,7 +1390,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
netdev->features |= netdev->hw_features;
- memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len);
+ memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN);
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
struct kobject *kobj = &adapter->rx_buff_pool[i].kobj;
diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h
index 451ba7949e15..1f37499d4398 100644
--- a/drivers/net/ethernet/ibm/ibmveth.h
+++ b/drivers/net/ethernet/ibm/ibmveth.h
@@ -138,7 +138,6 @@ struct ibmveth_adapter {
struct napi_struct napi;
struct net_device_stats stats;
unsigned int mcastFilterSize;
- unsigned long mac_addr;
void * buffer_list_addr;
void * filter_list_addr;
dma_addr_t buffer_list_dma;
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index cbaba4442d4b..bf7a01ef9a57 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -3034,7 +3034,7 @@ static void __e100_shutdown(struct pci_dev *pdev, bool *enable_wake)
*enable_wake = false;
}
- pci_disable_device(pdev);
+ pci_clear_master(pdev);
}
static int __e100_power_off(struct pci_dev *pdev, bool wake)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 6d4ada72dfd0..18076c4178b4 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -6881,7 +6881,7 @@ static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size)
}
static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct ixgbe_fwd_adapter *fwd_adapter = accel_priv;
#ifdef IXGBE_FCOE
@@ -6907,7 +6907,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
break;
default:
- return __netdev_pick_tx(dev, skb);
+ return fallback(dev, skb);
}
f = &adapter->ring_feature[RING_F_FCOE];
@@ -6920,7 +6920,7 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
return txq + f->offset;
#else
- return __netdev_pick_tx(dev, skb);
+ return fallback(dev, skb);
#endif
}
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index 8f9266c64c75..fd4b6aecf6ee 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -619,7 +619,7 @@ ltq_etop_set_multicast_list(struct net_device *dev)
static u16
ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
/* we are currently only using the first queue */
return 0;
diff --git a/drivers/net/ethernet/marvell/Kconfig b/drivers/net/ethernet/marvell/Kconfig
index 6300fd27f2db..68e6a6613e9a 100644
--- a/drivers/net/ethernet/marvell/Kconfig
+++ b/drivers/net/ethernet/marvell/Kconfig
@@ -43,12 +43,12 @@ config MVMDIO
This driver is used by the MV643XX_ETH and MVNETA drivers.
config MVNETA
- tristate "Marvell Armada 370/XP network interface support"
- depends on MACH_ARMADA_370_XP
+ tristate "Marvell Armada 370/38x/XP network interface support"
+ depends on PLAT_ORION
select MVMDIO
---help---
This driver supports the network interface units in the
- Marvell ARMADA XP and ARMADA 370 SoC family.
+ Marvell ARMADA XP, ARMADA 370 and ARMADA 38x SoC family.
Note that this driver is distinct from the mv643xx_eth
driver, which should be used for the older Marvell SoCs
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index fad45316200a..84a96f70dfb5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -742,6 +742,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
err = mlx4_en_uc_steer_add(priv, new_mac,
&qpn,
&entry->reg_id);
+ if (err)
+ return err;
+ if (priv->tunnel_reg_id) {
+ mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id);
+ priv->tunnel_reg_id = 0;
+ }
+ err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn,
+ &priv->tunnel_reg_id);
return err;
}
}
@@ -1792,6 +1800,8 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
mc_list[5] = priv->port;
mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp,
mc_list, MLX4_PROT_ETH, mclist->reg_id);
+ if (mclist->tunnel_reg_id)
+ mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id);
}
mlx4_en_clear_list(dev);
list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 8e8a7eb43a2c..13457032d15f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -629,7 +629,7 @@ static void build_inline_wqe(struct mlx4_en_tx_desc *tx_desc, struct sk_buff *sk
}
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct mlx4_en_priv *priv = netdev_priv(dev);
u16 rings_p_up = priv->num_tx_rings_p_up;
@@ -641,7 +641,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
if (vlan_tx_tag_present(skb))
up = vlan_tx_tag_get(skb) >> VLAN_PRIO_SHIFT;
- return __netdev_pick_tx(dev, skb) % rings_p_up + up * rings_p_up;
+ return fallback(dev, skb) % rings_p_up + up * rings_p_up;
}
static void mlx4_bf_copy(void __iomem *dst, unsigned long *src, unsigned bytecnt)
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 91b69ff4b4a2..7e2995ecea6f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -129,13 +129,14 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags)
[0] = "RSS support",
[1] = "RSS Toeplitz Hash Function support",
[2] = "RSS XOR Hash Function support",
- [3] = "Device manage flow steering support",
+ [3] = "Device managed flow steering support",
[4] = "Automatic MAC reassignment support",
[5] = "Time stamping support",
[6] = "VST (control vlan insertion/stripping) support",
[7] = "FSM (MAC anti-spoofing) support",
[8] = "Dynamic QP updates support",
- [9] = "TCP/IP offloads/flow-steering for VXLAN support"
+ [9] = "Device managed flow steering IPoIB support",
+ [10] = "TCP/IP offloads/flow-steering for VXLAN support"
};
int i;
@@ -859,7 +860,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET);
/* For guests, disable vxlan tunneling */
- MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN);
+ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN);
field &= 0xf7;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN);
@@ -869,7 +870,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
/* For guests, disable mw type 2 */
- MLX4_GET(bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
+ MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN;
MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET);
@@ -883,7 +884,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
}
/* turn off ipoib managed steering for guests */
- MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
+ MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
field &= ~0x80;
MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index d711158b0d4b..936c15364739 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -150,6 +150,8 @@ struct mlx4_port_config {
struct pci_dev *pdev;
};
+static atomic_t pf_loading = ATOMIC_INIT(0);
+
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
{
@@ -749,7 +751,7 @@ static void mlx4_request_modules(struct mlx4_dev *dev)
has_eth_port = true;
}
- if (has_ib_port)
+ if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE))
request_module_nowait(IB_DRV_NAME);
if (has_eth_port)
request_module_nowait(EN_DRV_NAME);
@@ -1407,6 +1409,11 @@ static int mlx4_init_slave(struct mlx4_dev *dev)
u32 slave_read;
u32 cmd_channel_ver;
+ if (atomic_read(&pf_loading)) {
+ mlx4_warn(dev, "PF is not ready. Deferring probe\n");
+ return -EPROBE_DEFER;
+ }
+
mutex_lock(&priv->cmd.slave_cmd_mutex);
priv->cmd.max_cmds = 1;
mlx4_warn(dev, "Sending reset\n");
@@ -2319,7 +2326,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data)
if (num_vfs) {
mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs);
+
+ atomic_inc(&pf_loading);
err = pci_enable_sriov(pdev, num_vfs);
+ atomic_dec(&pf_loading);
+
if (err) {
mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n",
err);
@@ -2684,6 +2695,7 @@ static struct pci_driver mlx4_driver = {
.name = DRV_NAME,
.id_table = mlx4_pci_table,
.probe = mlx4_init_one,
+ .shutdown = mlx4_remove_one,
.remove = mlx4_remove_one,
.err_handler = &mlx4_err_handler,
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 6b65f7795215..7aec6c833973 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -51,8 +51,8 @@
#define DRV_NAME "mlx4_core"
#define PFX DRV_NAME ": "
-#define DRV_VERSION "1.1"
-#define DRV_RELDATE "Dec, 2011"
+#define DRV_VERSION "2.2-1"
+#define DRV_RELDATE "Feb, 2014"
#define MLX4_FS_UDP_UC_EN (1 << 1)
#define MLX4_FS_TCP_UC_EN (1 << 2)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 3af04c3f42ea..b57e8c87a34e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -57,8 +57,8 @@
#include "en_port.h"
#define DRV_NAME "mlx4_en"
-#define DRV_VERSION "2.0"
-#define DRV_RELDATE "Dec 2011"
+#define DRV_VERSION "2.2-1"
+#define DRV_RELDATE "Feb 2014"
#define MLX4_EN_MSG_LEVEL (NETIF_MSG_LINK | NETIF_MSG_IFDOWN)
@@ -723,7 +723,7 @@ int mlx4_en_arm_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq);
void mlx4_en_tx_irq(struct mlx4_cq *mcq);
u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv);
+ void *accel_priv, select_queue_fallback_t fallback);
netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev);
int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
index 157fe8df2c3e..8ff57e8e3e91 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
+++ b/drivers/net/ethernet/mellanox/mlx5/core/Kconfig
@@ -4,5 +4,5 @@
config MLX5_CORE
tristate
- depends on PCI && X86
+ depends on PCI
default n
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index a064f06e0cb8..23b7e2d35a93 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -46,8 +46,8 @@
#include "mlx5_core.h"
#define DRIVER_NAME "mlx5_core"
-#define DRIVER_VERSION "1.0"
-#define DRIVER_RELDATE "June 2013"
+#define DRIVER_VERSION "2.2-1"
+#define DRIVER_RELDATE "Feb 2014"
MODULE_AUTHOR("Eli Cohen <eli@mellanox.com>");
MODULE_DESCRIPTION("Mellanox ConnectX-IB HCA core library");
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index 727b546a9eb8..e0c92e0e5e1d 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -23,6 +23,7 @@
#include <linux/crc32.h>
#include <linux/mii.h>
#include <linux/eeprom_93cx6.h>
+#include <linux/regulator/consumer.h>
#include <linux/spi/spi.h>
@@ -83,6 +84,7 @@ union ks8851_tx_hdr {
* @rc_rxqcr: Cached copy of KS_RXQCR.
* @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
* @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
+ * @vdd_reg: Optional regulator supplying the chip
*
* The @lock ensures that the chip is protected when certain operations are
* in progress. When the read or write packet transfer is in progress, most
@@ -130,6 +132,7 @@ struct ks8851_net {
struct spi_transfer spi_xfer2[2];
struct eeprom_93cx6 eeprom;
+ struct regulator *vdd_reg;
};
static int msg_enable;
@@ -1414,6 +1417,21 @@ static int ks8851_probe(struct spi_device *spi)
ks->spidev = spi;
ks->tx_space = 6144;
+ ks->vdd_reg = regulator_get_optional(&spi->dev, "vdd");
+ if (IS_ERR(ks->vdd_reg)) {
+ ret = PTR_ERR(ks->vdd_reg);
+ if (ret == -EPROBE_DEFER)
+ goto err_reg;
+ } else {
+ ret = regulator_enable(ks->vdd_reg);
+ if (ret) {
+ dev_err(&spi->dev, "regulator enable fail: %d\n",
+ ret);
+ goto err_reg_en;
+ }
+ }
+
+
mutex_init(&ks->lock);
spin_lock_init(&ks->statelock);
@@ -1508,8 +1526,14 @@ static int ks8851_probe(struct spi_device *spi)
err_netdev:
free_irq(ndev->irq, ks);
-err_id:
err_irq:
+err_id:
+ if (!IS_ERR(ks->vdd_reg))
+ regulator_disable(ks->vdd_reg);
+err_reg_en:
+ if (!IS_ERR(ks->vdd_reg))
+ regulator_put(ks->vdd_reg);
+err_reg:
free_netdev(ndev);
return ret;
}
@@ -1523,6 +1547,10 @@ static int ks8851_remove(struct spi_device *spi)
unregister_netdev(priv->netdev);
free_irq(spi->irq, priv);
+ if (!IS_ERR(priv->vdd_reg)) {
+ regulator_disable(priv->vdd_reg);
+ regulator_put(priv->vdd_reg);
+ }
free_netdev(priv->netdev);
return 0;
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index 1ded50ca1600..e46e8698e630 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -726,9 +726,6 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
int vpath_idx = 0;
enum vxge_hw_status status = VXGE_HW_OK;
struct vxge_vpath *vpath = NULL;
- struct __vxge_hw_device *hldev;
-
- hldev = pci_get_drvdata(vdev->pdev);
mac_address = (u8 *)&mac_addr;
memcpy(mac_address, mac_header, ETH_ALEN);
@@ -2443,9 +2440,6 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
static void vxge_rem_isr(struct vxgedev *vdev)
{
- struct __vxge_hw_device *hldev;
- hldev = pci_get_drvdata(vdev->pdev);
-
#ifdef CONFIG_PCI_MSI
if (vdev->config.intr_type == MSI_X) {
vxge_rem_msix_isr(vdev);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index 4146664d4d6a..27c4f131863b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -340,6 +340,7 @@ int qlcnic_83xx_setup_intr(struct qlcnic_adapter *adapter)
if (qlcnic_sriov_vf_check(adapter))
return -EINVAL;
num_msix = 1;
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
}
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
index 77f1bce432d2..7d4f54912bad 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
@@ -807,7 +807,7 @@ qlcnic_dcb_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio,
!type->tc_param_valid)
return;
- if (tc < 0 || (tc > QLC_DCB_MAX_TC))
+ if (tc < 0 || (tc >= QLC_DCB_MAX_TC))
return;
tc_cfg = &type->tc_cfg[tc];
@@ -843,7 +843,7 @@ static void qlcnic_dcb_get_pg_bwg_cfg_tx(struct net_device *netdev, int pgid,
!type->tc_param_valid)
return;
- if (pgid < 0 || pgid > QLC_DCB_MAX_PG)
+ if (pgid < 0 || pgid >= QLC_DCB_MAX_PG)
return;
pgcfg = &type->pg_cfg[pgid];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index ba78c7481fa3..1222865cfb73 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -816,9 +816,10 @@ static int qlcnic_82xx_setup_intr(struct qlcnic_adapter *adapter)
if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
qlcnic_disable_multi_tx(adapter);
+ adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
err = qlcnic_enable_msi_legacy(adapter);
- if (!err)
+ if (err)
return err;
}
}
@@ -3863,7 +3864,7 @@ int qlcnic_validate_rings(struct qlcnic_adapter *adapter, __u32 ring_cnt,
strcpy(buf, "Tx");
}
- if (!qlcnic_use_msi_x && !qlcnic_use_msi) {
+ if (!QLCNIC_IS_MSI_FAMILY(adapter)) {
netdev_err(netdev, "No RSS/TSS support in INT-x mode\n");
return -EINVAL;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 09acf15c3a56..e5277a632671 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -13,8 +13,6 @@
#define QLC_VF_MIN_TX_RATE 100
#define QLC_VF_MAX_TX_RATE 9999
#define QLC_MAC_OPCODE_MASK 0x7
-#define QLC_MAC_STAR_ADD 6
-#define QLC_MAC_STAR_DEL 7
#define QLC_VF_FLOOD_BIT BIT_16
#define QLC_FLOOD_MODE 0x5
@@ -1206,13 +1204,6 @@ static int qlcnic_sriov_validate_cfg_macvlan(struct qlcnic_adapter *adapter,
struct qlcnic_vport *vp = vf->vp;
u8 op, new_op;
- if (((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_ADD) ||
- ((cmd->req.arg[1] & QLC_MAC_OPCODE_MASK) == QLC_MAC_STAR_DEL)) {
- netdev_err(adapter->netdev, "MAC + any VLAN filter not allowed from VF %d\n",
- vf->pci_func);
- return -EINVAL;
- }
-
if (!(cmd->req.arg[1] & BIT_8))
return -EINVAL;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 91a67ae8f17b..3ff7bc3e7a23 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -209,7 +209,7 @@ static const struct {
[RTL_GIGA_MAC_VER_16] =
_R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true),
[RTL_GIGA_MAC_VER_17] =
- _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false),
+ _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false),
[RTL_GIGA_MAC_VER_18] =
_R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false),
[RTL_GIGA_MAC_VER_19] =
@@ -7118,6 +7118,8 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
mutex_init(&tp->wk.mutex);
+ u64_stats_init(&tp->rx_stats.syncp);
+ u64_stats_init(&tp->tx_stats.syncp);
/* Get MAC address */
for (i = 0; i < ETH_ALEN; i++)
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index eb75fbd11a01..d7a36829649a 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -1668,6 +1668,13 @@ void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev)
struct efx_ptp_data *ptp = efx->ptp_data;
int code = EFX_QWORD_FIELD(*ev, MCDI_EVENT_CODE);
+ if (!ptp) {
+ if (net_ratelimit())
+ netif_warn(efx, drv, efx->net_dev,
+ "Received PTP event but PTP not set up\n");
+ return;
+ }
+
if (!ptp->enabled)
return;
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index c49d1fb16965..75d11fa4eb0a 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -429,7 +429,9 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
}
/* Transfer ownership of the skb to the final buffer */
+#ifdef EFX_USE_PIO
finish_packet:
+#endif
buffer->skb = skb;
buffer->flags = EFX_TX_BUF_SKB | dma_flags;
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index e2f202e3932f..f2d7c702c77f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -37,6 +37,17 @@ config DWMAC_SUNXI
stmmac device driver. This driver is used for A20/A31
GMAC ethernet controller.
+config DWMAC_STI
+ bool "STi GMAC support"
+ depends on STMMAC_PLATFORM && ARCH_STI
+ default y
+ ---help---
+ Support for ethernet controller on STi SOCs.
+
+ This selects STi SoC glue layer support for the stmmac
+ device driver. This driver is used on for the STi series
+ SOCs GMAC ethernet controller.
+
config STMMAC_PCI
bool "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile
index ecadecea79b2..dcef28775dad 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Makefile
+++ b/drivers/net/ethernet/stmicro/stmmac/Makefile
@@ -2,6 +2,7 @@ obj-$(CONFIG_STMMAC_ETH) += stmmac.o
stmmac-$(CONFIG_STMMAC_PLATFORM) += stmmac_platform.o
stmmac-$(CONFIG_STMMAC_PCI) += stmmac_pci.o
stmmac-$(CONFIG_DWMAC_SUNXI) += dwmac-sunxi.o
+stmmac-$(CONFIG_DWMAC_STI) += dwmac-sti.o
stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \
chain_mode.o dwmac_lib.o dwmac1000_core.o dwmac1000_dma.o \
dwmac100_core.o dwmac100_dma.o enh_desc.o norm_desc.o \
diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
index 72d282bf33a5..c553f6b5a913 100644
--- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c
@@ -151,7 +151,7 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p)
sizeof(struct dma_desc)));
}
-const struct stmmac_chain_mode_ops chain_mode_ops = {
+const struct stmmac_mode_ops chain_mode_ops = {
.init = stmmac_init_dma_chain,
.is_jumbo_frm = stmmac_is_jumbo_frm,
.jumbo_frm = stmmac_jumbo_frm,
diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h
index 7834a3993946..74610f3aca9e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/common.h
+++ b/drivers/net/ethernet/stmicro/stmmac/common.h
@@ -419,20 +419,13 @@ struct mii_regs {
unsigned int data; /* MII Data */
};
-struct stmmac_ring_mode_ops {
- unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
- unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
- void (*refill_desc3) (void *priv, struct dma_desc *p);
- void (*init_desc3) (struct dma_desc *p);
- void (*clean_desc3) (void *priv, struct dma_desc *p);
- int (*set_16kib_bfsize) (int mtu);
-};
-
-struct stmmac_chain_mode_ops {
+struct stmmac_mode_ops {
void (*init) (void *des, dma_addr_t phy_addr, unsigned int size,
unsigned int extend_desc);
unsigned int (*is_jumbo_frm) (int len, int ehn_desc);
unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum);
+ int (*set_16kib_bfsize)(int mtu);
+ void (*init_desc3)(struct dma_desc *p);
void (*refill_desc3) (void *priv, struct dma_desc *p);
void (*clean_desc3) (void *priv, struct dma_desc *p);
};
@@ -441,8 +434,7 @@ struct mac_device_info {
const struct stmmac_ops *mac;
const struct stmmac_desc_ops *desc;
const struct stmmac_dma_ops *dma;
- const struct stmmac_ring_mode_ops *ring;
- const struct stmmac_chain_mode_ops *chain;
+ const struct stmmac_mode_ops *mode;
const struct stmmac_hwtimestamp *ptp;
struct mii_regs mii; /* MII register Addresses */
struct mac_link link;
@@ -460,7 +452,7 @@ void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr,
void stmmac_set_mac(void __iomem *ioaddr, bool enable);
void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr);
-extern const struct stmmac_ring_mode_ops ring_mode_ops;
-extern const struct stmmac_chain_mode_ops chain_mode_ops;
+extern const struct stmmac_mode_ops ring_mode_ops;
+extern const struct stmmac_mode_ops chain_mode_ops;
#endif /* __COMMON_H__ */
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
new file mode 100644
index 000000000000..552bbc17863c
--- /dev/null
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-sti.c
@@ -0,0 +1,330 @@
+/**
+ * dwmac-sti.c - STMicroelectronics DWMAC Specific Glue layer
+ *
+ * Copyright (C) 2003-2014 STMicroelectronics (R&D) Limited
+ * Author: Srinivas Kandagatla <srinivas.kandagatla@st.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/stmmac.h>
+#include <linux/phy.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/clk.h>
+#include <linux/of.h>
+#include <linux/of_net.h>
+
+/**
+ * STi GMAC glue logic.
+ * --------------------
+ *
+ * _
+ * | \
+ * --------|0 \ ETH_SEL_INTERNAL_NOTEXT_PHYCLK
+ * phyclk | |___________________________________________
+ * | | | (phyclk-in)
+ * --------|1 / |
+ * int-clk |_ / |
+ * | _
+ * | | \
+ * |_______|1 \ ETH_SEL_TX_RETIME_CLK
+ * | |___________________________
+ * | | (tx-retime-clk)
+ * _______|0 /
+ * | |_ /
+ * _ |
+ * | \ |
+ * --------|0 \ |
+ * clk_125 | |__|
+ * | | ETH_SEL_TXCLK_NOT_CLK125
+ * --------|1 /
+ * txclk |_ /
+ *
+ *
+ * ETH_SEL_INTERNAL_NOTEXT_PHYCLK is valid only for RMII where PHY can
+ * generate 50MHz clock or MAC can generate it.
+ * This bit is configured by "st,ext-phyclk" property.
+ *
+ * ETH_SEL_TXCLK_NOT_CLK125 is only valid for gigabit modes, where the 125Mhz
+ * clock either comes from clk-125 pin or txclk pin. This configuration is
+ * totally driven by the board wiring. This bit is configured by
+ * "st,tx-retime-src" property.
+ *
+ * TXCLK configuration is different for different phy interface modes
+ * and changes according to link speed in modes like RGMII.
+ *
+ * Below table summarizes the clock requirement and clock sources for
+ * supported phy interface modes with link speeds.
+ * ________________________________________________
+ *| PHY_MODE | 1000 Mbit Link | 100 Mbit Link |
+ * ------------------------------------------------
+ *| MII | n/a | 25Mhz |
+ *| | | txclk |
+ * ------------------------------------------------
+ *| GMII | 125Mhz | 25Mhz |
+ *| | clk-125/txclk | txclk |
+ * ------------------------------------------------
+ *| RGMII | 125Mhz | 25Mhz |
+ *| | clk-125/txclk | clkgen |
+ * ------------------------------------------------
+ *| RMII | n/a | 25Mhz |
+ *| | |clkgen/phyclk-in |
+ * ------------------------------------------------
+ *
+ * TX lines are always retimed with a clk, which can vary depending
+ * on the board configuration. Below is the table of these bits
+ * in eth configuration register depending on source of retime clk.
+ *
+ *---------------------------------------------------------------
+ * src | tx_rt_clk | int_not_ext_phyclk | txclk_n_clk125|
+ *---------------------------------------------------------------
+ * txclk | 0 | n/a | 1 |
+ *---------------------------------------------------------------
+ * ck_125| 0 | n/a | 0 |
+ *---------------------------------------------------------------
+ * phyclk| 1 | 0 | n/a |
+ *---------------------------------------------------------------
+ * clkgen| 1 | 1 | n/a |
+ *---------------------------------------------------------------
+ */
+
+ /* Register definition */
+
+ /* 3 bits [8:6]
+ * [6:6] ETH_SEL_TXCLK_NOT_CLK125
+ * [7:7] ETH_SEL_INTERNAL_NOTEXT_PHYCLK
+ * [8:8] ETH_SEL_TX_RETIME_CLK
+ *
+ */
+
+#define TX_RETIME_SRC_MASK GENMASK(8, 6)
+#define ETH_SEL_TX_RETIME_CLK BIT(8)
+#define ETH_SEL_INTERNAL_NOTEXT_PHYCLK BIT(7)
+#define ETH_SEL_TXCLK_NOT_CLK125 BIT(6)
+
+#define ENMII_MASK GENMASK(5, 5)
+#define ENMII BIT(5)
+
+/**
+ * 3 bits [4:2]
+ * 000-GMII/MII
+ * 001-RGMII
+ * 010-SGMII
+ * 100-RMII
+*/
+#define MII_PHY_SEL_MASK GENMASK(4, 2)
+#define ETH_PHY_SEL_RMII BIT(4)
+#define ETH_PHY_SEL_SGMII BIT(3)
+#define ETH_PHY_SEL_RGMII BIT(2)
+#define ETH_PHY_SEL_GMII 0x0
+#define ETH_PHY_SEL_MII 0x0
+
+#define IS_PHY_IF_MODE_RGMII(iface) (iface == PHY_INTERFACE_MODE_RGMII || \
+ iface == PHY_INTERFACE_MODE_RGMII_ID || \
+ iface == PHY_INTERFACE_MODE_RGMII_RXID || \
+ iface == PHY_INTERFACE_MODE_RGMII_TXID)
+
+#define IS_PHY_IF_MODE_GBIT(iface) (IS_PHY_IF_MODE_RGMII(iface) || \
+ iface == PHY_INTERFACE_MODE_GMII)
+
+struct sti_dwmac {
+ int interface;
+ bool ext_phyclk;
+ bool is_tx_retime_src_clk_125;
+ struct clk *clk;
+ int reg;
+ struct device *dev;
+ struct regmap *regmap;
+};
+
+static u32 phy_intf_sels[] = {
+ [PHY_INTERFACE_MODE_MII] = ETH_PHY_SEL_MII,
+ [PHY_INTERFACE_MODE_GMII] = ETH_PHY_SEL_GMII,
+ [PHY_INTERFACE_MODE_RGMII] = ETH_PHY_SEL_RGMII,
+ [PHY_INTERFACE_MODE_RGMII_ID] = ETH_PHY_SEL_RGMII,
+ [PHY_INTERFACE_MODE_SGMII] = ETH_PHY_SEL_SGMII,
+ [PHY_INTERFACE_MODE_RMII] = ETH_PHY_SEL_RMII,
+};
+
+enum {
+ TX_RETIME_SRC_NA = 0,
+ TX_RETIME_SRC_TXCLK = 1,
+ TX_RETIME_SRC_CLK_125,
+ TX_RETIME_SRC_PHYCLK,
+ TX_RETIME_SRC_CLKGEN,
+};
+
+static const char *const tx_retime_srcs[] = {
+ [TX_RETIME_SRC_NA] = "",
+ [TX_RETIME_SRC_TXCLK] = "txclk",
+ [TX_RETIME_SRC_CLK_125] = "clk_125",
+ [TX_RETIME_SRC_PHYCLK] = "phyclk",
+ [TX_RETIME_SRC_CLKGEN] = "clkgen",
+};
+
+static u32 tx_retime_val[] = {
+ [TX_RETIME_SRC_TXCLK] = ETH_SEL_TXCLK_NOT_CLK125,
+ [TX_RETIME_SRC_CLK_125] = 0x0,
+ [TX_RETIME_SRC_PHYCLK] = ETH_SEL_TX_RETIME_CLK,
+ [TX_RETIME_SRC_CLKGEN] = ETH_SEL_TX_RETIME_CLK |
+ ETH_SEL_INTERNAL_NOTEXT_PHYCLK,
+};
+
+static void setup_retime_src(struct sti_dwmac *dwmac, u32 spd)
+{
+ u32 src = 0, freq = 0;
+
+ if (spd == SPEED_100) {
+ if (dwmac->interface == PHY_INTERFACE_MODE_MII ||
+ dwmac->interface == PHY_INTERFACE_MODE_GMII) {
+ src = TX_RETIME_SRC_TXCLK;
+ } else if (dwmac->interface == PHY_INTERFACE_MODE_RMII) {
+ if (dwmac->ext_phyclk) {
+ src = TX_RETIME_SRC_PHYCLK;
+ } else {
+ src = TX_RETIME_SRC_CLKGEN;
+ freq = 50000000;
+ }
+
+ } else if (IS_PHY_IF_MODE_RGMII(dwmac->interface)) {
+ src = TX_RETIME_SRC_CLKGEN;
+ freq = 25000000;
+ }
+
+ if (src == TX_RETIME_SRC_CLKGEN && dwmac->clk)
+ clk_set_rate(dwmac->clk, freq);
+
+ } else if (spd == SPEED_1000) {
+ if (dwmac->is_tx_retime_src_clk_125)
+ src = TX_RETIME_SRC_CLK_125;
+ else
+ src = TX_RETIME_SRC_TXCLK;
+ }
+
+ regmap_update_bits(dwmac->regmap, dwmac->reg,
+ TX_RETIME_SRC_MASK, tx_retime_val[src]);
+}
+
+static void sti_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+ struct sti_dwmac *dwmac = priv;
+
+ if (dwmac->clk)
+ clk_disable_unprepare(dwmac->clk);
+}
+
+static void sti_fix_mac_speed(void *priv, unsigned int spd)
+{
+ struct sti_dwmac *dwmac = priv;
+
+ setup_retime_src(dwmac, spd);
+
+ return;
+}
+
+static int sti_dwmac_parse_data(struct sti_dwmac *dwmac,
+ struct platform_device *pdev)
+{
+ struct resource *res;
+ struct device *dev = &pdev->dev;
+ struct device_node *np = dev->of_node;
+ struct regmap *regmap;
+ int err;
+
+ if (!np)
+ return -EINVAL;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "sti-ethconf");
+ if (!res)
+ return -ENODATA;
+
+ regmap = syscon_regmap_lookup_by_phandle(np, "st,syscon");
+ if (IS_ERR(regmap))
+ return PTR_ERR(regmap);
+
+ dwmac->dev = dev;
+ dwmac->interface = of_get_phy_mode(np);
+ dwmac->regmap = regmap;
+ dwmac->reg = res->start;
+ dwmac->ext_phyclk = of_property_read_bool(np, "st,ext-phyclk");
+ dwmac->is_tx_retime_src_clk_125 = false;
+
+ if (IS_PHY_IF_MODE_GBIT(dwmac->interface)) {
+ const char *rs;
+
+ err = of_property_read_string(np, "st,tx-retime-src", &rs);
+ if (err < 0) {
+ dev_err(dev, "st,tx-retime-src not specified\n");
+ return err;
+ }
+
+ if (!strcasecmp(rs, "clk_125"))
+ dwmac->is_tx_retime_src_clk_125 = true;
+ }
+
+ dwmac->clk = devm_clk_get(dev, "sti-ethclk");
+
+ if (IS_ERR(dwmac->clk))
+ dwmac->clk = NULL;
+
+ return 0;
+}
+
+static int sti_dwmac_init(struct platform_device *pdev, void *priv)
+{
+ struct sti_dwmac *dwmac = priv;
+ struct regmap *regmap = dwmac->regmap;
+ int iface = dwmac->interface;
+ u32 reg = dwmac->reg;
+ u32 val, spd;
+
+ if (dwmac->clk)
+ clk_prepare_enable(dwmac->clk);
+
+ regmap_update_bits(regmap, reg, MII_PHY_SEL_MASK, phy_intf_sels[iface]);
+
+ val = (iface == PHY_INTERFACE_MODE_REVMII) ? 0 : ENMII;
+ regmap_update_bits(regmap, reg, ENMII_MASK, val);
+
+ if (IS_PHY_IF_MODE_GBIT(iface))
+ spd = SPEED_1000;
+ else
+ spd = SPEED_100;
+
+ setup_retime_src(dwmac, spd);
+
+ return 0;
+}
+
+static void *sti_dwmac_setup(struct platform_device *pdev)
+{
+ struct sti_dwmac *dwmac;
+ int ret;
+
+ dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL);
+ if (!dwmac)
+ return ERR_PTR(-ENOMEM);
+
+ ret = sti_dwmac_parse_data(dwmac, pdev);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to parse OF data\n");
+ return ERR_PTR(ret);
+ }
+
+ return dwmac;
+}
+
+const struct stmmac_of_data sti_gmac_data = {
+ .fix_mac_speed = sti_fix_mac_speed,
+ .setup = sti_dwmac_setup,
+ .init = sti_dwmac_init,
+ .exit = sti_dwmac_exit,
+};
diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
index a96c7c2f5f3f..650a4be6bce5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
+++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c
@@ -100,10 +100,9 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p)
{
struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr;
- if (unlikely(priv->plat->has_gmac))
- /* Fill DES3 in case of RING mode */
- if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
- p->des3 = p->des2 + BUF_SIZE_8KiB;
+ /* Fill DES3 in case of RING mode */
+ if (priv->dma_buf_sz >= BUF_SIZE_8KiB)
+ p->des3 = p->des2 + BUF_SIZE_8KiB;
}
/* In ring mode we need to fill the desc3 because it is used as buffer */
@@ -126,7 +125,7 @@ static int stmmac_set_16kib_bfsize(int mtu)
return ret;
}
-const struct stmmac_ring_mode_ops ring_mode_ops = {
+const struct stmmac_mode_ops ring_mode_ops = {
.is_jumbo_frm = stmmac_is_jumbo_frm,
.jumbo_frm = stmmac_jumbo_frm,
.refill_desc3 = stmmac_refill_desc3,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index d9af26ed58ee..f9e60d7918c4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -133,6 +133,9 @@ bool stmmac_eee_init(struct stmmac_priv *priv);
#ifdef CONFIG_DWMAC_SUNXI
extern const struct stmmac_of_data sun7i_gmac_data;
#endif
+#ifdef CONFIG_DWMAC_STI
+extern const struct stmmac_of_data sti_gmac_data;
+#endif
extern struct platform_driver stmmac_pltfr_driver;
static inline int stmmac_register_platform(void)
{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index a2e7d2c96e36..8543e1cfd55e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -92,8 +92,8 @@ static int tc = TC_DEFAULT;
module_param(tc, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(tc, "DMA threshold control value");
-#define DMA_BUFFER_SIZE BUF_SIZE_4KiB
-static int buf_sz = DMA_BUFFER_SIZE;
+#define DEFAULT_BUFSIZE 1536
+static int buf_sz = DEFAULT_BUFSIZE;
module_param(buf_sz, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(buf_sz, "DMA buffer size");
@@ -136,8 +136,8 @@ static void stmmac_verify_args(void)
dma_rxsize = DMA_RX_SIZE;
if (unlikely(dma_txsize < 0))
dma_txsize = DMA_TX_SIZE;
- if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB)))
- buf_sz = DMA_BUFFER_SIZE;
+ if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
+ buf_sz = DEFAULT_BUFSIZE;
if (unlikely(flow_ctrl > 1))
flow_ctrl = FLOW_AUTO;
else if (likely(flow_ctrl < 0))
@@ -286,10 +286,25 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
/* MAC core supports the EEE feature. */
if (priv->dma_cap.eee) {
+ int tx_lpi_timer = priv->tx_lpi_timer;
+
/* Check if the PHY supports EEE */
- if (phy_init_eee(priv->phydev, 1))
+ if (phy_init_eee(priv->phydev, 1)) {
+ /* To manage at run-time if the EEE cannot be supported
+ * anymore (for example because the lp caps have been
+ * changed).
+ * In that case the driver disable own timers.
+ */
+ if (priv->eee_active) {
+ pr_debug("stmmac: disable EEE\n");
+ del_timer_sync(&priv->eee_ctrl_timer);
+ priv->hw->mac->set_eee_timer(priv->ioaddr, 0,
+ tx_lpi_timer);
+ }
+ priv->eee_active = 0;
goto out;
-
+ }
+ /* Activate the EEE and start timers */
if (!priv->eee_active) {
priv->eee_active = 1;
init_timer(&priv->eee_ctrl_timer);
@@ -300,13 +315,13 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
priv->hw->mac->set_eee_timer(priv->ioaddr,
STMMAC_DEFAULT_LIT_LS,
- priv->tx_lpi_timer);
+ tx_lpi_timer);
} else
/* Set HW EEE according to the speed */
priv->hw->mac->set_eee_pls(priv->ioaddr,
priv->phydev->link);
- pr_info("stmmac: Energy-Efficient Ethernet initialized\n");
+ pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
ret = true;
}
@@ -886,10 +901,10 @@ static int stmmac_set_bfsize(int mtu, int bufsize)
ret = BUF_SIZE_8KiB;
else if (mtu >= BUF_SIZE_2KiB)
ret = BUF_SIZE_4KiB;
- else if (mtu >= DMA_BUFFER_SIZE)
+ else if (mtu > DEFAULT_BUFSIZE)
ret = BUF_SIZE_2KiB;
else
- ret = DMA_BUFFER_SIZE;
+ ret = DEFAULT_BUFSIZE;
return ret;
}
@@ -951,9 +966,9 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
p->des2 = priv->rx_skbuff_dma[i];
- if ((priv->mode == STMMAC_RING_MODE) &&
+ if ((priv->hw->mode->init_desc3) &&
(priv->dma_buf_sz == BUF_SIZE_16KiB))
- priv->hw->ring->init_desc3(p);
+ priv->hw->mode->init_desc3(p);
return 0;
}
@@ -984,11 +999,8 @@ static int init_dma_desc_rings(struct net_device *dev)
unsigned int bfsize = 0;
int ret = -ENOMEM;
- /* Set the max buffer size according to the DESC mode
- * and the MTU. Note that RING mode allows 16KiB bsize.
- */
- if (priv->mode == STMMAC_RING_MODE)
- bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu);
+ if (priv->hw->mode->set_16kib_bfsize)
+ bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu);
if (bfsize < BUF_SIZE_16KiB)
bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz);
@@ -1029,15 +1041,15 @@ static int init_dma_desc_rings(struct net_device *dev)
/* Setup the chained descriptor addresses */
if (priv->mode == STMMAC_CHAIN_MODE) {
if (priv->extend_desc) {
- priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy,
- rxsize, 1);
- priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy,
- txsize, 1);
+ priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy,
+ rxsize, 1);
+ priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy,
+ txsize, 1);
} else {
- priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy,
- rxsize, 0);
- priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy,
- txsize, 0);
+ priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy,
+ rxsize, 0);
+ priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy,
+ txsize, 0);
}
}
@@ -1288,7 +1300,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv)
DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry] = 0;
}
- priv->hw->ring->clean_desc3(priv, p);
+ priv->hw->mode->clean_desc3(priv, p);
if (likely(skb != NULL)) {
dev_kfree_skb(skb);
@@ -1705,7 +1717,7 @@ static int stmmac_open(struct net_device *dev)
priv->dma_rx_size = STMMAC_ALIGN(dma_rxsize);
priv->dma_buf_sz = STMMAC_ALIGN(buf_sz);
- alloc_dma_desc_resources(priv);
+ ret = alloc_dma_desc_resources(priv);
if (ret < 0) {
pr_err("%s: DMA descriptors allocation failed\n", __func__);
goto dma_desc_error;
@@ -1844,6 +1856,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
int nfrags = skb_shinfo(skb)->nr_frags;
struct dma_desc *desc, *first;
unsigned int nopaged_len = skb_headlen(skb);
+ unsigned int enh_desc = priv->plat->enh_desc;
if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
if (!netif_queue_stopped(dev)) {
@@ -1871,27 +1884,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
first = desc;
/* To program the descriptors according to the size of the frame */
- if (priv->mode == STMMAC_RING_MODE) {
- is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len,
- priv->plat->enh_desc);
- if (unlikely(is_jumbo))
- entry = priv->hw->ring->jumbo_frm(priv, skb,
- csum_insertion);
- } else {
- is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len,
- priv->plat->enh_desc);
- if (unlikely(is_jumbo))
- entry = priv->hw->chain->jumbo_frm(priv, skb,
- csum_insertion);
- }
+ if (enh_desc)
+ is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc);
+
if (likely(!is_jumbo)) {
desc->des2 = dma_map_single(priv->device, skb->data,
nopaged_len, DMA_TO_DEVICE);
priv->tx_skbuff_dma[entry] = desc->des2;
priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len,
csum_insertion, priv->mode);
- } else
+ } else {
desc = first;
+ entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion);
+ }
for (i = 0; i < nfrags; i++) {
const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
@@ -2029,7 +2034,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
p->des2 = priv->rx_skbuff_dma[entry];
- priv->hw->ring->refill_desc3(priv, p);
+ priv->hw->mode->refill_desc3(priv, p);
if (netif_msg_rx_status(priv))
pr_debug("\trefill entry #%d\n", entry);
@@ -2633,11 +2638,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
/* To use the chained or ring mode */
if (chain_mode) {
- priv->hw->chain = &chain_mode_ops;
+ priv->hw->mode = &chain_mode_ops;
pr_info(" Chain mode enabled\n");
priv->mode = STMMAC_CHAIN_MODE;
} else {
- priv->hw->ring = &ring_mode_ops;
+ priv->hw->mode = &ring_mode_ops;
pr_info(" Ring mode enabled\n");
priv->mode = STMMAC_RING_MODE;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 5884a7d2063b..8fb32a80f1c1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -33,6 +33,11 @@ static const struct of_device_id stmmac_dt_ids[] = {
#ifdef CONFIG_DWMAC_SUNXI
{ .compatible = "allwinner,sun7i-a20-gmac", .data = &sun7i_gmac_data},
#endif
+#ifdef CONFIG_DWMAC_STI
+ { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data},
+ { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data},
+ { .compatible = "st,stid127-dwmac", .data = &sti_gmac_data},
+#endif
/* SoC specific glue layers should come before generic bindings */
{ .compatible = "st,spear600-gmac"},
{ .compatible = "snps,dwmac-3.610"},
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index bde63e3af96f..7d6d8ec676c8 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -554,7 +554,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
* common for both the interface as the interface shares
* the same hardware resource.
*/
- for (i = 0; i <= priv->data.slaves; i++)
+ for (i = 0; i < priv->data.slaves; i++)
if (priv->slaves[i].ndev->flags & IFF_PROMISC)
flag = true;
@@ -578,7 +578,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
unsigned long timeout = jiffies + HZ;
/* Disable Learn for all ports */
- for (i = 0; i <= priv->data.slaves; i++) {
+ for (i = 0; i < priv->data.slaves; i++) {
cpsw_ale_control_set(ale, i,
ALE_PORT_NOLEARN, 1);
cpsw_ale_control_set(ale, i,
@@ -606,7 +606,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
/* Enable Learn for all ports */
- for (i = 0; i <= priv->data.slaves; i++) {
+ for (i = 0; i < priv->data.slaves; i++) {
cpsw_ale_control_set(ale, i,
ALE_PORT_NOLEARN, 0);
cpsw_ale_control_set(ale, i,
@@ -1164,11 +1164,17 @@ static void cpsw_init_host_port(struct cpsw_priv *priv)
static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv)
{
+ u32 slave_port;
+
+ slave_port = cpsw_get_slave_port(priv, slave->slave_num);
+
if (!slave->phy)
return;
phy_stop(slave->phy);
phy_disconnect(slave->phy);
slave->phy = NULL;
+ cpsw_ale_control_set(priv->ale, slave_port,
+ ALE_PORT_STATE, ALE_PORT_STATE_DISABLE);
}
static int cpsw_ndo_open(struct net_device *ndev)
@@ -1878,14 +1884,29 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
phyid = be32_to_cpup(parp+1);
mdio = of_find_device_by_node(mdio_node);
- snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
- PHY_ID_FMT, mdio->name, phyid);
+
+ if (strncmp(mdio->name, "gpio", 4) == 0) {
+ /* GPIO bitbang MDIO driver attached */
+ struct mii_bus *bus = dev_get_drvdata(&mdio->dev);
+
+ snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+ PHY_ID_FMT, bus->id, phyid);
+ } else {
+ /* davinci MDIO driver attached */
+ snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
+ PHY_ID_FMT, mdio->name, phyid);
+ }
mac_addr = of_get_mac_address(slave_node);
if (mac_addr)
memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
slave_data->phy_if = of_get_phy_mode(slave_node);
+ if (slave_data->phy_if < 0) {
+ pr_err("Missing or malformed slave[%d] phy-mode property\n",
+ i);
+ return slave_data->phy_if;
+ }
if (data->dual_emac) {
if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
@@ -2208,10 +2229,6 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_ale_ret;
}
- if (cpts_register(&pdev->dev, priv->cpts,
- data->cpts_clock_mult, data->cpts_clock_shift))
- dev_err(priv->dev, "error registering cpts device\n");
-
cpsw_notice(priv, probe, "initialized device (regs %pa, irq %d)\n",
&ss_res->start, ndev->irq);
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index 364d0c7952c0..88ef27067bf2 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -355,7 +355,7 @@ int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
int i;
spin_lock_irqsave(&ctlr->lock, flags);
- if (ctlr->state != CPDMA_STATE_ACTIVE) {
+ if (ctlr->state == CPDMA_STATE_TEARDOWN) {
spin_unlock_irqrestore(&ctlr->lock, flags);
return -EINVAL;
}
@@ -891,7 +891,7 @@ int cpdma_chan_stop(struct cpdma_chan *chan)
unsigned timeout;
spin_lock_irqsave(&chan->lock, flags);
- if (chan->state != CPDMA_STATE_ACTIVE) {
+ if (chan->state == CPDMA_STATE_TEARDOWN) {
spin_unlock_irqrestore(&chan->lock, flags);
return -EINVAL;
}
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index cd9b164a0434..8f0e69ce07ca 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1532,9 +1532,9 @@ static int emac_dev_open(struct net_device *ndev)
struct device *emac_dev = &ndev->dev;
u32 cnt;
struct resource *res;
- int ret;
+ int q, m, ret;
+ int res_num = 0, irq_num = 0;
int i = 0;
- int k = 0;
struct emac_priv *priv = netdev_priv(ndev);
pm_runtime_get(&priv->pdev->dev);
@@ -1564,15 +1564,24 @@ static int emac_dev_open(struct net_device *ndev)
}
/* Request IRQ */
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ,
+ res_num))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++) {
+ dev_err(emac_dev, "Request IRQ %d\n", irq_num);
+ if (request_irq(irq_num, emac_irq, 0, ndev->name,
+ ndev)) {
+ dev_err(emac_dev,
+ "DaVinci EMAC: request_irq() failed\n");
+ ret = -EBUSY;
- while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
- for (i = res->start; i <= res->end; i++) {
- if (devm_request_irq(&priv->pdev->dev, i, emac_irq,
- 0, ndev->name, ndev))
goto rollback;
+ }
}
- k++;
+ res_num++;
}
+ /* prepare counters for rollback in case of an error */
+ res_num--;
+ irq_num--;
/* Start/Enable EMAC hardware */
emac_hw_enable(priv);
@@ -1639,11 +1648,23 @@ static int emac_dev_open(struct net_device *ndev)
return 0;
-rollback:
-
- dev_err(emac_dev, "DaVinci EMAC: devm_request_irq() failed");
- ret = -EBUSY;
err:
+ emac_int_disable(priv);
+ napi_disable(&priv->napi);
+
+rollback:
+ for (q = res_num; q >= 0; q--) {
+ res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, q);
+ /* at the first iteration, irq_num is already set to the
+ * right value
+ */
+ if (q != res_num)
+ irq_num = res->end;
+
+ for (m = irq_num; m >= res->start; m--)
+ free_irq(m, ndev);
+ }
+ cpdma_ctlr_stop(priv->dma);
pm_runtime_put(&priv->pdev->dev);
return ret;
}
@@ -1659,6 +1680,9 @@ err:
*/
static int emac_dev_stop(struct net_device *ndev)
{
+ struct resource *res;
+ int i = 0;
+ int irq_num;
struct emac_priv *priv = netdev_priv(ndev);
struct device *emac_dev = &ndev->dev;
@@ -1674,6 +1698,13 @@ static int emac_dev_stop(struct net_device *ndev)
if (priv->phydev)
phy_disconnect(priv->phydev);
+ /* Free IRQ */
+ while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, i))) {
+ for (irq_num = res->start; irq_num <= res->end; irq_num++)
+ free_irq(irq_num, priv->ndev);
+ i++;
+ }
+
if (netif_msg_drv(priv))
dev_notice(emac_dev, "DaVinci EMAC: %s stopped\n", ndev->name);
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 023237a65720..17503da9f7a5 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2071,7 +2071,7 @@ static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
/* Return subqueue id on this core (one per core). */
static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
return smp_processor_id();
}
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index ef312bc6b865..6ac20a6738f4 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -923,7 +923,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc) {
dev_err(&pdev->dev,
"32-bit PCI DMA addresses not supported by the card!?\n");
- goto err_out;
+ goto err_out_pci_disable;
}
/* sanity check */
@@ -931,7 +931,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
(pci_resource_len(pdev, 1) < io_size)) {
rc = -EIO;
dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
- goto err_out;
+ goto err_out_pci_disable;
}
pioaddr = pci_resource_start(pdev, 0);
@@ -942,7 +942,7 @@ static int rhine_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev = alloc_etherdev(sizeof(struct rhine_private));
if (!dev) {
rc = -ENOMEM;
- goto err_out;
+ goto err_out_pci_disable;
}
SET_NETDEV_DEV(dev, &pdev->dev);
@@ -1084,6 +1084,8 @@ err_out_free_res:
pci_release_regions(pdev);
err_out_free_netdev:
free_netdev(dev);
+err_out_pci_disable:
+ pci_disable_device(pdev);
err_out:
return rc;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 1ec65feebb9e..4bfdf8c7ada0 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -26,6 +26,7 @@
#include <linux/netdevice.h>
#include <linux/of_mdio.h>
#include <linux/of_platform.h>
+#include <linux/of_irq.h>
#include <linux/of_address.h>
#include <linux/skbuff.h>
#include <linux/spinlock.h>
@@ -600,7 +601,8 @@ static void axienet_start_xmit_done(struct net_device *ndev)
size += status & XAXIDMA_BD_STS_ACTUAL_LEN_MASK;
packets++;
- lp->tx_bd_ci = ++lp->tx_bd_ci % TX_BD_NUM;
+ ++lp->tx_bd_ci;
+ lp->tx_bd_ci %= TX_BD_NUM;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
status = cur_p->status;
}
@@ -686,7 +688,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_headlen(skb), DMA_TO_DEVICE);
for (ii = 0; ii < num_frag; ii++) {
- lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
+ ++lp->tx_bd_tail;
+ lp->tx_bd_tail %= TX_BD_NUM;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii];
cur_p->phys = dma_map_single(ndev->dev.parent,
@@ -702,7 +705,8 @@ static int axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
/* Start the transfer */
axienet_dma_out32(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
- lp->tx_bd_tail = ++lp->tx_bd_tail % TX_BD_NUM;
+ ++lp->tx_bd_tail;
+ lp->tx_bd_tail %= TX_BD_NUM;
return NETDEV_TX_OK;
}
@@ -774,7 +778,8 @@ static void axienet_recv(struct net_device *ndev)
cur_p->status = 0;
cur_p->sw_id_offset = (u32) new_skb;
- lp->rx_bd_ci = ++lp->rx_bd_ci % RX_BD_NUM;
+ ++lp->rx_bd_ci;
+ lp->rx_bd_ci %= RX_BD_NUM;
cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
}
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 7756118c2f0a..d6fce9750b95 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -88,8 +88,12 @@ static int netvsc_open(struct net_device *net)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_device *device_obj = net_device_ctx->device_ctx;
+ struct netvsc_device *nvdev;
+ struct rndis_device *rdev;
int ret = 0;
+ netif_carrier_off(net);
+
/* Open up the device */
ret = rndis_filter_open(device_obj);
if (ret != 0) {
@@ -99,6 +103,11 @@ static int netvsc_open(struct net_device *net)
netif_start_queue(net);
+ nvdev = hv_get_drvdata(device_obj);
+ rdev = nvdev->extension;
+ if (!rdev->link_state)
+ netif_carrier_on(net);
+
return ret;
}
@@ -229,23 +238,24 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
struct net_device *net;
struct net_device_context *ndev_ctx;
struct netvsc_device *net_device;
+ struct rndis_device *rdev;
net_device = hv_get_drvdata(device_obj);
+ rdev = net_device->extension;
+
+ rdev->link_state = status != 1;
+
net = net_device->ndev;
- if (!net) {
- netdev_err(net, "got link status but net device "
- "not initialized yet\n");
+ if (!net || net->reg_state != NETREG_REGISTERED)
return;
- }
+ ndev_ctx = netdev_priv(net);
if (status == 1) {
- netif_carrier_on(net);
- ndev_ctx = netdev_priv(net);
schedule_delayed_work(&ndev_ctx->dwork, 0);
schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
} else {
- netif_carrier_off(net);
+ schedule_delayed_work(&ndev_ctx->dwork, 0);
}
}
@@ -388,17 +398,35 @@ static const struct net_device_ops device_ops = {
* current context when receiving RNDIS_STATUS_MEDIA_CONNECT event. So, add
* another netif_notify_peers() into a delayed work, otherwise GARP packet
* will not be sent after quick migration, and cause network disconnection.
+ * Also, we update the carrier status here.
*/
-static void netvsc_send_garp(struct work_struct *w)
+static void netvsc_link_change(struct work_struct *w)
{
struct net_device_context *ndev_ctx;
struct net_device *net;
struct netvsc_device *net_device;
+ struct rndis_device *rdev;
+ bool notify;
+
+ rtnl_lock();
ndev_ctx = container_of(w, struct net_device_context, dwork.work);
net_device = hv_get_drvdata(ndev_ctx->device_ctx);
+ rdev = net_device->extension;
net = net_device->ndev;
- netdev_notify_peers(net);
+
+ if (rdev->link_state) {
+ netif_carrier_off(net);
+ notify = false;
+ } else {
+ netif_carrier_on(net);
+ notify = true;
+ }
+
+ rtnl_unlock();
+
+ if (notify)
+ netdev_notify_peers(net);
}
@@ -414,13 +442,12 @@ static int netvsc_probe(struct hv_device *dev,
if (!net)
return -ENOMEM;
- /* Set initial state */
netif_carrier_off(net);
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = dev;
hv_set_drvdata(dev, net);
- INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_send_garp);
+ INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
INIT_WORK(&net_device_ctx->work, do_set_multicast);
net->netdev_ops = &device_ops;
@@ -443,13 +470,13 @@ static int netvsc_probe(struct hv_device *dev,
}
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
- netif_carrier_on(net);
-
ret = register_netdev(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
rndis_filter_device_remove(dev);
free_netdev(net);
+ } else {
+ schedule_delayed_work(&net_device_ctx->dwork, 0);
}
return ret;
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 1084e5de3ceb..b54fd257652b 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -243,6 +243,22 @@ static int rndis_filter_send_request(struct rndis_device *dev,
return ret;
}
+static void rndis_set_link_state(struct rndis_device *rdev,
+ struct rndis_request *request)
+{
+ u32 link_status;
+ struct rndis_query_complete *query_complete;
+
+ query_complete = &request->response_msg.msg.query_complete;
+
+ if (query_complete->status == RNDIS_STATUS_SUCCESS &&
+ query_complete->info_buflen == sizeof(u32)) {
+ memcpy(&link_status, (void *)((unsigned long)query_complete +
+ query_complete->info_buf_offset), sizeof(u32));
+ rdev->link_state = link_status != 0;
+ }
+}
+
static void rndis_filter_receive_response(struct rndis_device *dev,
struct rndis_message *resp)
{
@@ -272,6 +288,10 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
sizeof(struct rndis_message) + RNDIS_EXT_LEN) {
memcpy(&request->response_msg, resp,
resp->msg_len);
+ if (request->request_msg.ndis_msg_type ==
+ RNDIS_MSG_QUERY && request->request_msg.msg.
+ query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS)
+ rndis_set_link_state(dev, request);
} else {
netdev_err(ndev,
"rndis response buffer overflow "
@@ -620,7 +640,6 @@ static int rndis_filter_query_device_link_status(struct rndis_device *dev)
ret = rndis_filter_query_device(dev,
RNDIS_OID_GEN_MEDIA_CONNECT_STATUS,
&link_status, &size);
- dev->link_state = (link_status != 0) ? true : false;
return ret;
}
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index ab31544bc254..a30258aad139 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -546,12 +546,12 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
int rc;
unsigned long flags;
- spin_lock(&lp->lock);
+ spin_lock_irqsave(&lp->lock, flags);
if (lp->irq_busy) {
- spin_unlock(&lp->lock);
+ spin_unlock_irqrestore(&lp->lock, flags);
return -EBUSY;
}
- spin_unlock(&lp->lock);
+ spin_unlock_irqrestore(&lp->lock, flags);
might_sleep();
@@ -725,10 +725,11 @@ static void at86rf230_irqwork_level(struct work_struct *work)
static irqreturn_t at86rf230_isr(int irq, void *data)
{
struct at86rf230_local *lp = data;
+ unsigned long flags;
- spin_lock(&lp->lock);
+ spin_lock_irqsave(&lp->lock, flags);
lp->irq_busy = 1;
- spin_unlock(&lp->lock);
+ spin_unlock_irqrestore(&lp->lock, flags);
schedule_work(&lp->irqwork);
diff --git a/drivers/net/irda/Kconfig b/drivers/net/irda/Kconfig
index 2dc82f1d2e70..3da44d5d9149 100644
--- a/drivers/net/irda/Kconfig
+++ b/drivers/net/irda/Kconfig
@@ -210,13 +210,6 @@ config KINGSUN_DONGLE
To compile it as a module, choose M here: the module will be called
kingsun-sir.
-config EP7211_DONGLE
- tristate "Cirrus Logic clps711x I/R support"
- depends on IRTTY_SIR && ARCH_CLPS711X && IRDA
- help
- Say Y here if you want to build support for the Cirrus logic
- EP7211 chipset's infrared module.
-
config KSDAZZLE_DONGLE
tristate "KingSun Dazzle IrDA-USB dongle"
depends on IRDA && USB
diff --git a/drivers/net/irda/Makefile b/drivers/net/irda/Makefile
index dfc64537f62f..be8ab5b9a4a2 100644
--- a/drivers/net/irda/Makefile
+++ b/drivers/net/irda/Makefile
@@ -35,7 +35,6 @@ obj-$(CONFIG_MCP2120_DONGLE) += mcp2120-sir.o
obj-$(CONFIG_ACT200L_DONGLE) += act200l-sir.o
obj-$(CONFIG_MA600_DONGLE) += ma600-sir.o
obj-$(CONFIG_TOIM3232_DONGLE) += toim3232-sir.o
-obj-$(CONFIG_EP7211_DONGLE) += ep7211-sir.o
obj-$(CONFIG_KINGSUN_DONGLE) += kingsun-sir.o
obj-$(CONFIG_KSDAZZLE_DONGLE) += ksdazzle-sir.o
obj-$(CONFIG_KS959_DONGLE) += ks959-sir.o
diff --git a/drivers/net/irda/ep7211-sir.c b/drivers/net/irda/ep7211-sir.c
deleted file mode 100644
index 5fe1f4dd3369..000000000000
--- a/drivers/net/irda/ep7211-sir.c
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- * IR port driver for the Cirrus Logic CLPS711X processors
- *
- * Copyright 2001, Blue Mug Inc. All rights reserved.
- * Copyright 2007, Samuel Ortiz <samuel@sortiz.org>
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-
-#include <mach/hardware.h>
-
-#include "sir-dev.h"
-
-static int clps711x_dongle_open(struct sir_dev *dev)
-{
- unsigned int syscon;
-
- /* Turn on the SIR encoder. */
- syscon = clps_readl(SYSCON1);
- syscon |= SYSCON1_SIREN;
- clps_writel(syscon, SYSCON1);
-
- return 0;
-}
-
-static int clps711x_dongle_close(struct sir_dev *dev)
-{
- unsigned int syscon;
-
- /* Turn off the SIR encoder. */
- syscon = clps_readl(SYSCON1);
- syscon &= ~SYSCON1_SIREN;
- clps_writel(syscon, SYSCON1);
-
- return 0;
-}
-
-static struct dongle_driver clps711x_dongle = {
- .owner = THIS_MODULE,
- .driver_name = "EP7211 IR driver",
- .type = IRDA_EP7211_DONGLE,
- .open = clps711x_dongle_open,
- .close = clps711x_dongle_close,
-};
-
-static int clps711x_sir_probe(struct platform_device *pdev)
-{
- return irda_register_dongle(&clps711x_dongle);
-}
-
-static int clps711x_sir_remove(struct platform_device *pdev)
-{
- return irda_unregister_dongle(&clps711x_dongle);
-}
-
-static struct platform_driver clps711x_sir_driver = {
- .driver = {
- .name = "sir-clps711x",
- .owner = THIS_MODULE,
- },
- .probe = clps711x_sir_probe,
- .remove = clps711x_sir_remove,
-};
-module_platform_driver(clps711x_sir_driver);
-
-MODULE_AUTHOR("Samuel Ortiz <samuel@sortiz.org>");
-MODULE_DESCRIPTION("EP7211 IR dongle driver");
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("irda-dongle-13"); /* IRDA_EP7211_DONGLE */
diff --git a/drivers/net/irda/irtty-sir.c b/drivers/net/irda/irtty-sir.c
index 177441afeb96..24b6dddd7f2f 100644
--- a/drivers/net/irda/irtty-sir.c
+++ b/drivers/net/irda/irtty-sir.c
@@ -522,7 +522,6 @@ static void irtty_close(struct tty_struct *tty)
sirdev_put_instance(priv->dev);
/* Stop tty */
- irtty_stop_receiver(tty, TRUE);
clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
if (tty->ops->stop)
tty->ops->stop(tty);
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 8433de4509c7..1831fb7cd017 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -506,6 +506,9 @@ static int macvlan_change_mtu(struct net_device *dev, int new_mtu)
static struct lock_class_key macvlan_netdev_xmit_lock_key;
static struct lock_class_key macvlan_netdev_addr_lock_key;
+#define ALWAYS_ON_FEATURES \
+ (NETIF_F_SG | NETIF_F_GEN_CSUM | NETIF_F_GSO_SOFTWARE | NETIF_F_LLTX)
+
#define MACVLAN_FEATURES \
(NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \
NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \
@@ -539,7 +542,7 @@ static int macvlan_init(struct net_device *dev)
dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
(lowerdev->state & MACVLAN_STATE_MASK);
dev->features = lowerdev->features & MACVLAN_FEATURES;
- dev->features |= NETIF_F_LLTX;
+ dev->features |= ALWAYS_ON_FEATURES;
dev->gso_max_size = lowerdev->gso_max_size;
dev->iflink = lowerdev->ifindex;
dev->hard_header_len = lowerdev->hard_header_len;
@@ -699,7 +702,7 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
features = netdev_increment_features(vlan->lowerdev->features,
features,
mask);
- features |= NETIF_F_LLTX;
+ features |= ALWAYS_ON_FEATURES;
return features;
}
@@ -879,14 +882,15 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
dev->priv_flags |= IFF_MACVLAN;
err = netdev_upper_dev_link(lowerdev, dev);
if (err)
- goto destroy_port;
-
+ goto unregister_netdev;
list_add_tail_rcu(&vlan->list, &port->vlans);
netif_stacked_transfer_operstate(lowerdev, dev);
return 0;
+unregister_netdev:
+ unregister_netdevice(dev);
destroy_port:
port->count -= 1;
if (!port->count)
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 547725fa8671..98e7cbf720a5 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -437,7 +437,10 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
if (on) {
gpio_num = gpio_tab[EXTTS0_GPIO + index];
evnt |= (gpio_num & EVNT_GPIO_MASK) << EVNT_GPIO_SHIFT;
- evnt |= EVNT_RISE;
+ if (rq->extts.flags & PTP_FALLING_EDGE)
+ evnt |= EVNT_FALL;
+ else
+ evnt |= EVNT_RISE;
}
ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
return 0;
@@ -1003,11 +1006,6 @@ static int dp83640_probe(struct phy_device *phydev)
} else
list_add_tail(&dp83640->list, &clock->phylist);
- if (clock->chosen && !list_empty(&clock->phylist))
- recalibrate(clock);
- else
- enable_broadcast(dp83640->phydev, clock->page, 1);
-
dp83640_clock_put(clock);
return 0;
@@ -1058,6 +1056,21 @@ static void dp83640_remove(struct phy_device *phydev)
kfree(dp83640);
}
+static int dp83640_config_init(struct phy_device *phydev)
+{
+ struct dp83640_private *dp83640 = phydev->priv;
+ struct dp83640_clock *clock = dp83640->clock;
+
+ if (clock->chosen && !list_empty(&clock->phylist))
+ recalibrate(clock);
+ else
+ enable_broadcast(phydev, clock->page, 1);
+
+ enable_status_frames(phydev, true);
+ ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
+ return 0;
+}
+
static int dp83640_ack_interrupt(struct phy_device *phydev)
{
int err = phy_read(phydev, MII_DP83640_MISR);
@@ -1195,11 +1208,6 @@ static int dp83640_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
mutex_lock(&dp83640->clock->extreg_lock);
- if (dp83640->hwts_tx_en || dp83640->hwts_rx_en) {
- enable_status_frames(phydev, true);
- ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
- }
-
ext_write(0, phydev, PAGE5, PTP_TXCFG0, txcfg0);
ext_write(0, phydev, PAGE5, PTP_RXCFG0, rxcfg0);
@@ -1281,6 +1289,7 @@ static void dp83640_txtstamp(struct phy_device *phydev,
}
/* fall through */
case HWTSTAMP_TX_ON:
+ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
skb_queue_tail(&dp83640->tx_queue, skb);
schedule_work(&dp83640->ts_work);
break;
@@ -1330,6 +1339,7 @@ static struct phy_driver dp83640_driver = {
.flags = PHY_HAS_INTERRUPT,
.probe = dp83640_probe,
.remove = dp83640_remove,
+ .config_init = dp83640_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = dp83640_ack_interrupt,
diff --git a/drivers/net/phy/mdio-sun4i.c b/drivers/net/phy/mdio-sun4i.c
index bb88bc7d81fb..9367acc84fbb 100644
--- a/drivers/net/phy/mdio-sun4i.c
+++ b/drivers/net/phy/mdio-sun4i.c
@@ -170,6 +170,9 @@ static int sun4i_mdio_remove(struct platform_device *pdev)
}
static const struct of_device_id sun4i_mdio_dt_ids[] = {
+ { .compatible = "allwinner,sun4i-a10-mdio" },
+
+ /* Deprecated */
{ .compatible = "allwinner,sun4i-mdio" },
{ }
};
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 19c9eca0ef26..76d96b9ebcdb 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -164,9 +164,9 @@ static const struct phy_setting settings[] = {
* of that setting. Returns the index of the last setting if
* none of the others match.
*/
-static inline int phy_find_setting(int speed, int duplex)
+static inline unsigned int phy_find_setting(int speed, int duplex)
{
- int idx = 0;
+ unsigned int idx = 0;
while (idx < ARRAY_SIZE(settings) &&
(settings[idx].speed != speed || settings[idx].duplex != duplex))
@@ -185,7 +185,7 @@ static inline int phy_find_setting(int speed, int duplex)
* the mask in features. Returns the index of the last setting
* if nothing else matches.
*/
-static inline int phy_find_valid(int idx, u32 features)
+static inline unsigned int phy_find_valid(unsigned int idx, u32 features)
{
while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features))
idx++;
@@ -204,7 +204,7 @@ static inline int phy_find_valid(int idx, u32 features)
static void phy_sanitize_settings(struct phy_device *phydev)
{
u32 features = phydev->supported;
- int idx;
+ unsigned int idx;
/* Sanitize settings based on PHY capabilities */
if ((features & SUPPORTED_Autoneg) == 0)
@@ -954,7 +954,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
(phydev->interface == PHY_INTERFACE_MODE_RGMII))) {
int eee_lp, eee_cap, eee_adv;
u32 lp, cap, adv;
- int idx, status;
+ int status;
+ unsigned int idx;
/* Read phy status to properly get the right settings */
status = phy_read_status(phydev);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 4b03e63639b7..2f6989b1e0dc 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -683,10 +683,9 @@ EXPORT_SYMBOL(phy_detach);
int phy_suspend(struct phy_device *phydev)
{
struct phy_driver *phydrv = to_phy_driver(phydev->dev.driver);
- struct ethtool_wolinfo wol;
+ struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
/* If the device has WOL enabled, we cannot suspend the PHY */
- wol.cmd = ETHTOOL_GWOL;
phy_ethtool_get_wol(phydev, &wol);
if (wol.wolopts)
return -EBUSY;
@@ -719,7 +718,7 @@ int phy_resume(struct phy_device *phydev)
static int genphy_config_advert(struct phy_device *phydev)
{
u32 advertise;
- int oldadv, adv;
+ int oldadv, adv, bmsr;
int err, changed = 0;
/* Only allow advertising what this PHY supports */
@@ -744,26 +743,36 @@ static int genphy_config_advert(struct phy_device *phydev)
changed = 1;
}
+ bmsr = phy_read(phydev, MII_BMSR);
+ if (bmsr < 0)
+ return bmsr;
+
+ /* Per 802.3-2008, Section 22.2.4.2.16 Extended status all
+ * 1000Mbits/sec capable PHYs shall have the BMSR_ESTATEN bit set to a
+ * logical 1.
+ */
+ if (!(bmsr & BMSR_ESTATEN))
+ return changed;
+
/* Configure gigabit if it's supported */
+ adv = phy_read(phydev, MII_CTRL1000);
+ if (adv < 0)
+ return adv;
+
+ oldadv = adv;
+ adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
+
if (phydev->supported & (SUPPORTED_1000baseT_Half |
SUPPORTED_1000baseT_Full)) {
- adv = phy_read(phydev, MII_CTRL1000);
- if (adv < 0)
- return adv;
-
- oldadv = adv;
- adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
adv |= ethtool_adv_to_mii_ctrl1000_t(advertise);
-
- if (adv != oldadv) {
- err = phy_write(phydev, MII_CTRL1000, adv);
-
- if (err < 0)
- return err;
+ if (adv != oldadv)
changed = 1;
- }
}
+ err = phy_write(phydev, MII_CTRL1000, adv);
+ if (err < 0)
+ return err;
+
return changed;
}
@@ -906,6 +915,8 @@ int genphy_read_status(struct phy_device *phydev)
int err;
int lpa;
int lpagb = 0;
+ int common_adv;
+ int common_adv_gb = 0;
/* Update the link, but return if there was an error */
err = genphy_update_link(phydev);
@@ -927,7 +938,7 @@ int genphy_read_status(struct phy_device *phydev)
phydev->lp_advertising =
mii_stat1000_to_ethtool_lpa_t(lpagb);
- lpagb &= adv << 2;
+ common_adv_gb = lpagb & adv << 2;
}
lpa = phy_read(phydev, MII_LPA);
@@ -940,25 +951,25 @@ int genphy_read_status(struct phy_device *phydev)
if (adv < 0)
return adv;
- lpa &= adv;
+ common_adv = lpa & adv;
phydev->speed = SPEED_10;
phydev->duplex = DUPLEX_HALF;
phydev->pause = 0;
phydev->asym_pause = 0;
- if (lpagb & (LPA_1000FULL | LPA_1000HALF)) {
+ if (common_adv_gb & (LPA_1000FULL | LPA_1000HALF)) {
phydev->speed = SPEED_1000;
- if (lpagb & LPA_1000FULL)
+ if (common_adv_gb & LPA_1000FULL)
phydev->duplex = DUPLEX_FULL;
- } else if (lpa & (LPA_100FULL | LPA_100HALF)) {
+ } else if (common_adv & (LPA_100FULL | LPA_100HALF)) {
phydev->speed = SPEED_100;
- if (lpa & LPA_100FULL)
+ if (common_adv & LPA_100FULL)
phydev->duplex = DUPLEX_FULL;
} else
- if (lpa & LPA_10FULL)
+ if (common_adv & LPA_10FULL)
phydev->duplex = DUPLEX_FULL;
if (phydev->duplex == DUPLEX_FULL) {
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 28407426fd6f..c8624a8235ab 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1648,7 +1648,7 @@ static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
}
static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
/*
* This helper function exists to help dev_pick_tx get the correct
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 44c4db8450f0..26f8635b027d 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -366,7 +366,7 @@ static inline void tun_flow_save_rps_rxhash(struct tun_flow_entry *e, u32 hash)
* hope the rxq no. may help here.
*/
static u16 tun_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct tun_struct *tun = netdev_priv(dev);
struct tun_flow_entry *e;
@@ -1686,7 +1686,9 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
TUN_USER_FEATURES | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
dev->features = dev->hw_features;
- dev->vlan_features = dev->features;
+ dev->vlan_features = dev->features &
+ ~(NETIF_F_HW_VLAN_CTAG_TX |
+ NETIF_F_HW_VLAN_STAG_TX);
INIT_LIST_HEAD(&tun->disabled);
err = tun_attach(tun, file, false);
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 6b638a066c1d..7e7269fd3707 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -292,6 +292,21 @@ config USB_NET_SR9700
This option adds support for CoreChip-sz SR9700 based USB 1.1
10/100 Ethernet adapters.
+config USB_NET_SR9800
+ tristate "CoreChip-sz SR9800 based USB 2.0 10/100 ethernet devices"
+ depends on USB_USBNET
+ select CRC32
+ ---help---
+ Say Y if you want to use one of the following 100Mbps USB Ethernet
+ device based on the CoreChip-sz SR9800 chip.
+
+ This driver makes the adapter appear as a normal Ethernet interface,
+ typically on eth0, if it is the only ethernet device, or perhaps on
+ eth1, if you have a PCI or ISA ethernet card installed.
+
+ To compile this driver as a module, choose M here: the
+ module will be called sr9800.
+
config USB_NET_SMSC75XX
tristate "SMSC LAN75XX based USB 2.0 gigabit ethernet devices"
depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b17b5e88bbaf..e2797f1e1b31 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -11,10 +11,11 @@ obj-$(CONFIG_USB_HSO) += hso.o
obj-$(CONFIG_USB_NET_AX8817X) += asix.o
asix-y := asix_devices.o asix_common.o ax88172a.o
obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o
-obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o
+obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o
obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o
obj-$(CONFIG_USB_NET_DM9601) += dm9601.o
obj-$(CONFIG_USB_NET_SR9700) += sr9700.o
+obj-$(CONFIG_USB_NET_SR9800) += sr9800.o
obj-$(CONFIG_USB_NET_SMSC75XX) += smsc75xx.o
obj-$(CONFIG_USB_NET_SMSC95XX) += smsc95xx.o
obj-$(CONFIG_USB_NET_GL620A) += gl620a.o
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 9765a7d4766d..5d194093f3e1 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -917,7 +917,8 @@ static const struct driver_info ax88178_info = {
.status = asix_status,
.link_reset = ax88178_link_reset,
.reset = ax88178_reset,
- .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR |
+ FLAG_MULTI_PACKET,
.rx_fixup = asix_rx_fixup_common,
.tx_fixup = asix_tx_fixup,
};
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index d6f64dad05bc..054e59ca6946 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -1029,20 +1029,12 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
dev->mii.phy_id = 0x03;
dev->mii.supports_gmii = 1;
- if (usb_device_no_sg_constraint(dev->udev))
- dev->can_dma_sg = 1;
-
dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
NETIF_F_RXCSUM;
- if (dev->can_dma_sg) {
- dev->net->features |= NETIF_F_SG | NETIF_F_TSO;
- dev->net->hw_features |= NETIF_F_SG | NETIF_F_TSO;
- }
-
/* Enable checksum offload */
*tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP |
AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6;
@@ -1118,6 +1110,10 @@ static int ax88179_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
u16 hdr_off;
u32 *pkt_hdr;
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
skb_trim(skb, skb->len - 4);
memcpy(&rx_hdr, skb_tail_pointer(skb), 4);
le32_to_cpus(&rx_hdr);
@@ -1391,6 +1387,19 @@ static const struct driver_info ax88178a_info = {
.tx_fixup = ax88179_tx_fixup,
};
+static const struct driver_info dlink_dub1312_info = {
+ .description = "D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter",
+ .bind = ax88179_bind,
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+ .reset = ax88179_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+ .tx_fixup = ax88179_tx_fixup,
+};
+
static const struct driver_info sitecom_info = {
.description = "Sitecom USB 3.0 to Gigabit Adapter",
.bind = ax88179_bind,
@@ -1417,6 +1426,19 @@ static const struct driver_info samsung_info = {
.tx_fixup = ax88179_tx_fixup,
};
+static const struct driver_info lenovo_info = {
+ .description = "Lenovo OneLinkDock Gigabit LAN",
+ .bind = ax88179_bind,
+ .unbind = ax88179_unbind,
+ .status = ax88179_status,
+ .link_reset = ax88179_link_reset,
+ .reset = ax88179_reset,
+ .stop = ax88179_stop,
+ .flags = FLAG_ETHER | FLAG_FRAMING_AX,
+ .rx_fixup = ax88179_rx_fixup,
+ .tx_fixup = ax88179_tx_fixup,
+};
+
static const struct usb_device_id products[] = {
{
/* ASIX AX88179 10/100/1000 */
@@ -1427,6 +1449,10 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0b95, 0x178a),
.driver_info = (unsigned long)&ax88178a_info,
}, {
+ /* D-Link DUB-1312 USB 3.0 to Gigabit Ethernet Adapter */
+ USB_DEVICE(0x2001, 0x4a00),
+ .driver_info = (unsigned long)&dlink_dub1312_info,
+}, {
/* Sitecom USB 3.0 to Gigabit Adapter */
USB_DEVICE(0x0df6, 0x0072),
.driver_info = (unsigned long)&sitecom_info,
@@ -1434,6 +1460,10 @@ static const struct usb_device_id products[] = {
/* Samsung USB Ethernet Adapter */
USB_DEVICE(0x04e8, 0xa100),
.driver_info = (unsigned long)&samsung_info,
+}, {
+ /* Lenovo OneLinkDock Gigabit LAN */
+ USB_DEVICE(0x17ef, 0x304b),
+ .driver_info = (unsigned long)&lenovo_info,
},
{ },
};
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 42e176912c8e..bd363b27e854 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -652,6 +652,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* Samsung USB Ethernet Adapters */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, 0xa101, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* WHITELIST!!!
*
* CDC Ether uses two interfaces, not necessarily consecutive.
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
index dbff290ed0e4..d350d2795e10 100644
--- a/drivers/net/usb/cdc_ncm.c
+++ b/drivers/net/usb/cdc_ncm.c
@@ -68,7 +68,6 @@ static struct usb_driver cdc_ncm_driver;
static int cdc_ncm_setup(struct usbnet *dev)
{
struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
- struct usb_cdc_ncm_ntb_parameters ncm_parm;
u32 val;
u8 flags;
u8 iface_no;
@@ -82,22 +81,22 @@ static int cdc_ncm_setup(struct usbnet *dev)
err = usbnet_read_cmd(dev, USB_CDC_GET_NTB_PARAMETERS,
USB_TYPE_CLASS | USB_DIR_IN
|USB_RECIP_INTERFACE,
- 0, iface_no, &ncm_parm,
- sizeof(ncm_parm));
+ 0, iface_no, &ctx->ncm_parm,
+ sizeof(ctx->ncm_parm));
if (err < 0) {
dev_err(&dev->intf->dev, "failed GET_NTB_PARAMETERS\n");
return err; /* GET_NTB_PARAMETERS is required */
}
/* read correct set of parameters according to device mode */
- ctx->rx_max = le32_to_cpu(ncm_parm.dwNtbInMaxSize);
- ctx->tx_max = le32_to_cpu(ncm_parm.dwNtbOutMaxSize);
- ctx->tx_remainder = le16_to_cpu(ncm_parm.wNdpOutPayloadRemainder);
- ctx->tx_modulus = le16_to_cpu(ncm_parm.wNdpOutDivisor);
- ctx->tx_ndp_modulus = le16_to_cpu(ncm_parm.wNdpOutAlignment);
+ ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
+ ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
+ ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
+ ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
+ ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
/* devices prior to NCM Errata shall set this field to zero */
- ctx->tx_max_datagrams = le16_to_cpu(ncm_parm.wNtbOutMaxDatagrams);
- ntb_fmt_supported = le16_to_cpu(ncm_parm.bmNtbFormatsSupported);
+ ctx->tx_max_datagrams = le16_to_cpu(ctx->ncm_parm.wNtbOutMaxDatagrams);
+ ntb_fmt_supported = le16_to_cpu(ctx->ncm_parm.bmNtbFormatsSupported);
/* there are some minor differences in NCM and MBIM defaults */
if (cdc_ncm_comm_intf_is_mbim(ctx->control->cur_altsetting)) {
@@ -146,7 +145,7 @@ static int cdc_ncm_setup(struct usbnet *dev)
}
/* inform device about NTB input size changes */
- if (ctx->rx_max != le32_to_cpu(ncm_parm.dwNtbInMaxSize)) {
+ if (ctx->rx_max != le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize)) {
__le32 dwNtbInMaxSize = cpu_to_le32(ctx->rx_max);
err = usbnet_write_cmd(dev, USB_CDC_SET_NTB_INPUT_SIZE,
@@ -162,14 +161,6 @@ static int cdc_ncm_setup(struct usbnet *dev)
dev_dbg(&dev->intf->dev, "Using default maximum transmit length=%d\n",
CDC_NCM_NTB_MAX_SIZE_TX);
ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
-
- /* Adding a pad byte here simplifies the handling in
- * cdc_ncm_fill_tx_frame, by making tx_max always
- * represent the real skb max size.
- */
- if (ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
- ctx->tx_max++;
-
}
/*
@@ -439,6 +430,10 @@ advance:
goto error2;
}
+ /* initialize data interface */
+ if (cdc_ncm_setup(dev))
+ goto error2;
+
/* configure data interface */
temp = usb_set_interface(dev->udev, iface_no, data_altsetting);
if (temp) {
@@ -453,12 +448,6 @@ advance:
goto error2;
}
- /* initialize data interface */
- if (cdc_ncm_setup(dev)) {
- dev_dbg(&intf->dev, "cdc_ncm_setup() failed\n");
- goto error2;
- }
-
usb_set_intfdata(ctx->data, dev);
usb_set_intfdata(ctx->control, dev);
@@ -475,6 +464,15 @@ advance:
dev->hard_mtu = ctx->tx_max;
dev->rx_urb_size = ctx->rx_max;
+ /* cdc_ncm_setup will override dwNtbOutMaxSize if it is
+ * outside the sane range. Adding a pad byte here if necessary
+ * simplifies the handling in cdc_ncm_fill_tx_frame, making
+ * tx_max always represent the real skb max size.
+ */
+ if (ctx->tx_max != le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize) &&
+ ctx->tx_max % usb_maxpacket(dev->udev, dev->out, 1) == 0)
+ ctx->tx_max++;
+
return 0;
error2:
diff --git a/drivers/net/usb/gl620a.c b/drivers/net/usb/gl620a.c
index e4a8a93fbaf7..1cc24e6f23e2 100644
--- a/drivers/net/usb/gl620a.c
+++ b/drivers/net/usb/gl620a.c
@@ -84,6 +84,10 @@ static int genelink_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
u32 size;
u32 count;
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
header = (struct gl_header *) skb->data;
// get the packet count of the received skb
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 1a482344b3f5..660bd5ea9fc0 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -1201,16 +1201,18 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
struct hso_serial *serial = urb->context;
int status = urb->status;
+ D4("\n--- Got serial_read_bulk callback %02x ---", status);
+
/* sanity check */
if (!serial) {
D1("serial == NULL");
return;
- } else if (status) {
+ }
+ if (status) {
handle_usb_error(status, __func__, serial->parent);
return;
}
- D4("\n--- Got serial_read_bulk callback %02x ---", status);
D1("Actual length = %d\n", urb->actual_length);
DUMP1(urb->transfer_buffer, urb->actual_length);
@@ -1218,25 +1220,13 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
if (serial->port.count == 0)
return;
- if (status == 0) {
- if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
- fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
- /* Valid data, handle RX data */
- spin_lock(&serial->serial_lock);
- serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
- put_rxbuf_data_and_resubmit_bulk_urb(serial);
- spin_unlock(&serial->serial_lock);
- } else if (status == -ENOENT || status == -ECONNRESET) {
- /* Unlinked - check for throttled port. */
- D2("Port %d, successfully unlinked urb", serial->minor);
- spin_lock(&serial->serial_lock);
- serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
- hso_resubmit_rx_bulk_urb(serial, urb);
- spin_unlock(&serial->serial_lock);
- } else {
- D2("Port %d, status = %d for read urb", serial->minor, status);
- return;
- }
+ if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
+ fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
+ /* Valid data, handle RX data */
+ spin_lock(&serial->serial_lock);
+ serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
+ put_rxbuf_data_and_resubmit_bulk_urb(serial);
+ spin_unlock(&serial->serial_lock);
}
/*
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index a305a7b2dae6..82d844a8ebd0 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -526,8 +526,9 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
u8 status;
- if (skb->len == 0) {
- dev_err(&dev->udev->dev, "unexpected empty rx frame\n");
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len) {
+ dev_err(&dev->udev->dev, "unexpected tiny rx frame\n");
return 0;
}
diff --git a/drivers/net/usb/net1080.c b/drivers/net/usb/net1080.c
index 0a85d9227775..4cbdb1307f3e 100644
--- a/drivers/net/usb/net1080.c
+++ b/drivers/net/usb/net1080.c
@@ -364,6 +364,10 @@ static int net1080_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
struct nc_trailer *trailer;
u16 hdr_len, packet_len;
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
if (!(skb->len & 0x01)) {
netdev_dbg(dev->net, "rx framesize %d range %d..%d mtu %d\n",
skb->len, dev->net->hard_header_len, dev->hard_mtu,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 23bdd5b9274d..313cb6cd4848 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -80,10 +80,10 @@ static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
__be16 proto;
- /* usbnet rx_complete guarantees that skb->len is at least
- * hard_header_len, so we can inspect the dest address without
- * checking skb->len
- */
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
switch (skb->data[0] & 0xf0) {
case 0x40:
proto = htons(ETH_P_IP);
@@ -712,6 +712,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x19d2, 0x1255, 3)},
{QMI_FIXED_INTF(0x19d2, 0x1255, 4)},
{QMI_FIXED_INTF(0x19d2, 0x1256, 4)},
+ {QMI_FIXED_INTF(0x19d2, 0x1270, 5)}, /* ZTE MF667 */
{QMI_FIXED_INTF(0x19d2, 0x1401, 2)},
{QMI_FIXED_INTF(0x19d2, 0x1402, 2)}, /* ZTE MF60 */
{QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
@@ -723,6 +724,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1199, 0x68a2, 8)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x68a2, 19)}, /* Sierra Wireless MC7710 in QMI mode */
{QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
+ {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
{QMI_FIXED_INTF(0x1bbb, 0x011e, 4)}, /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
{QMI_FIXED_INTF(0x2357, 0x0201, 4)}, /* TP-LINK HSUPA Modem MA180 */
{QMI_FIXED_INTF(0x2357, 0x9000, 4)}, /* TP-LINK MA260 */
@@ -730,6 +732,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x1bc7, 0x1201, 2)}, /* Telit LE920 */
{QMI_FIXED_INTF(0x0b3c, 0xc005, 6)}, /* Olivetti Olicard 200 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
+ {QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
/* 4. Gobi 1000 devices */
{QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index e8fac732c6f1..adb12f349a61 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -449,9 +449,6 @@ enum rtl8152_flags {
#define MCU_TYPE_PLA 0x0100
#define MCU_TYPE_USB 0x0000
-#define REALTEK_USB_DEVICE(vend, prod) \
- USB_DEVICE_INTERFACE_CLASS(vend, prod, USB_CLASS_VENDOR_SPEC)
-
struct rx_desc {
__le32 opts1;
#define RX_LEN_MASK 0x7fff
@@ -2273,22 +2270,21 @@ static int rtl8152_open(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
+ rtl8152_set_speed(tp, AUTONEG_ENABLE,
+ tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
+ DUPLEX_FULL);
+ tp->speed = 0;
+ netif_carrier_off(netdev);
+ netif_start_queue(netdev);
+ set_bit(WORK_ENABLE, &tp->flags);
res = usb_submit_urb(tp->intr_urb, GFP_KERNEL);
if (res) {
if (res == -ENODEV)
netif_device_detach(tp->netdev);
netif_warn(tp, ifup, netdev, "intr_urb submit failed: %d\n",
res);
- return res;
}
- rtl8152_set_speed(tp, AUTONEG_ENABLE,
- tp->mii.supports_gmii ? SPEED_1000 : SPEED_100,
- DUPLEX_FULL);
- tp->speed = 0;
- netif_carrier_off(netdev);
- netif_start_queue(netdev);
- set_bit(WORK_ENABLE, &tp->flags);
return res;
}
@@ -2298,8 +2294,8 @@ static int rtl8152_close(struct net_device *netdev)
struct r8152 *tp = netdev_priv(netdev);
int res = 0;
- usb_kill_urb(tp->intr_urb);
clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
netif_stop_queue(netdev);
tasklet_disable(&tp->tl);
@@ -2740,6 +2736,12 @@ static int rtl8152_probe(struct usb_interface *intf,
struct net_device *netdev;
int ret;
+ if (udev->actconfig->desc.bConfigurationValue != 1) {
+ usb_driver_set_configuration(udev, 1);
+ return -ENODEV;
+ }
+
+ usb_reset_device(udev);
netdev = alloc_etherdev(sizeof(struct r8152));
if (!netdev) {
dev_err(&intf->dev, "Out of memory\n");
@@ -2820,9 +2822,9 @@ static void rtl8152_disconnect(struct usb_interface *intf)
/* table of devices that work with this driver */
static struct usb_device_id rtl8152_table[] = {
- {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)},
- {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)},
- {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)},
+ {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)},
+ {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)},
+ {USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)},
{}
};
diff --git a/drivers/net/usb/r815x.c b/drivers/net/usb/r815x.c
deleted file mode 100644
index f0a8791b7636..000000000000
--- a/drivers/net/usb/r815x.c
+++ /dev/null
@@ -1,248 +0,0 @@
-#include <linux/module.h>
-#include <linux/netdevice.h>
-#include <linux/mii.h>
-#include <linux/usb.h>
-#include <linux/usb/cdc.h>
-#include <linux/usb/usbnet.h>
-
-#define RTL815x_REQT_READ 0xc0
-#define RTL815x_REQT_WRITE 0x40
-#define RTL815x_REQ_GET_REGS 0x05
-#define RTL815x_REQ_SET_REGS 0x05
-
-#define MCU_TYPE_PLA 0x0100
-#define OCP_BASE 0xe86c
-#define BASE_MII 0xa400
-
-#define BYTE_EN_DWORD 0xff
-#define BYTE_EN_WORD 0x33
-#define BYTE_EN_BYTE 0x11
-
-#define R815x_PHY_ID 32
-#define REALTEK_VENDOR_ID 0x0bda
-
-
-static int pla_read_word(struct usb_device *udev, u16 index)
-{
- int ret;
- u8 shift = index & 2;
- __le32 *tmp;
-
- tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- index &= ~3;
-
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
- index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
- if (ret < 0)
- goto out2;
-
- ret = __le32_to_cpu(*tmp);
- ret >>= (shift * 8);
- ret &= 0xffff;
-
-out2:
- kfree(tmp);
- return ret;
-}
-
-static int pla_write_word(struct usb_device *udev, u16 index, u32 data)
-{
- __le32 *tmp;
- u32 mask = 0xffff;
- u16 byen = BYTE_EN_WORD;
- u8 shift = index & 2;
- int ret;
-
- tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
- return -ENOMEM;
-
- data &= mask;
-
- if (shift) {
- byen <<= shift;
- mask <<= (shift * 8);
- data <<= (shift * 8);
- index &= ~3;
- }
-
- ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
- RTL815x_REQ_GET_REGS, RTL815x_REQT_READ,
- index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500);
- if (ret < 0)
- goto out3;
-
- data |= __le32_to_cpu(*tmp) & ~mask;
- *tmp = __cpu_to_le32(data);
-
- ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
- RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE,
- index, MCU_TYPE_PLA | byen, tmp, sizeof(*tmp),
- 500);
-
-out3:
- kfree(tmp);
- return ret;
-}
-
-static int ocp_reg_read(struct usbnet *dev, u16 addr)
-{
- u16 ocp_base, ocp_index;
- int ret;
-
- ocp_base = addr & 0xf000;
- ret = pla_write_word(dev->udev, OCP_BASE, ocp_base);
- if (ret < 0)
- goto out;
-
- ocp_index = (addr & 0x0fff) | 0xb000;
- ret = pla_read_word(dev->udev, ocp_index);
-
-out:
- return ret;
-}
-
-static int ocp_reg_write(struct usbnet *dev, u16 addr, u16 data)
-{
- u16 ocp_base, ocp_index;
- int ret;
-
- ocp_base = addr & 0xf000;
- ret = pla_write_word(dev->udev, OCP_BASE, ocp_base);
- if (ret < 0)
- goto out1;
-
- ocp_index = (addr & 0x0fff) | 0xb000;
- ret = pla_write_word(dev->udev, ocp_index, data);
-
-out1:
- return ret;
-}
-
-static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg)
-{
- struct usbnet *dev = netdev_priv(netdev);
- int ret;
-
- if (phy_id != R815x_PHY_ID)
- return -EINVAL;
-
- if (usb_autopm_get_interface(dev->intf) < 0)
- return -ENODEV;
-
- ret = ocp_reg_read(dev, BASE_MII + reg * 2);
-
- usb_autopm_put_interface(dev->intf);
- return ret;
-}
-
-static
-void r815x_mdio_write(struct net_device *netdev, int phy_id, int reg, int val)
-{
- struct usbnet *dev = netdev_priv(netdev);
-
- if (phy_id != R815x_PHY_ID)
- return;
-
- if (usb_autopm_get_interface(dev->intf) < 0)
- return;
-
- ocp_reg_write(dev, BASE_MII + reg * 2, val);
-
- usb_autopm_put_interface(dev->intf);
-}
-
-static int r8153_bind(struct usbnet *dev, struct usb_interface *intf)
-{
- int status;
-
- status = usbnet_cdc_bind(dev, intf);
- if (status < 0)
- return status;
-
- dev->mii.dev = dev->net;
- dev->mii.mdio_read = r815x_mdio_read;
- dev->mii.mdio_write = r815x_mdio_write;
- dev->mii.phy_id_mask = 0x3f;
- dev->mii.reg_num_mask = 0x1f;
- dev->mii.phy_id = R815x_PHY_ID;
- dev->mii.supports_gmii = 1;
-
- return status;
-}
-
-static int r8152_bind(struct usbnet *dev, struct usb_interface *intf)
-{
- int status;
-
- status = usbnet_cdc_bind(dev, intf);
- if (status < 0)
- return status;
-
- dev->mii.dev = dev->net;
- dev->mii.mdio_read = r815x_mdio_read;
- dev->mii.mdio_write = r815x_mdio_write;
- dev->mii.phy_id_mask = 0x3f;
- dev->mii.reg_num_mask = 0x1f;
- dev->mii.phy_id = R815x_PHY_ID;
- dev->mii.supports_gmii = 0;
-
- return status;
-}
-
-static const struct driver_info r8152_info = {
- .description = "RTL8152 ECM Device",
- .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
- .bind = r8152_bind,
- .unbind = usbnet_cdc_unbind,
- .status = usbnet_cdc_status,
- .manage_power = usbnet_manage_power,
-};
-
-static const struct driver_info r8153_info = {
- .description = "RTL8153 ECM Device",
- .flags = FLAG_ETHER | FLAG_POINTTOPOINT,
- .bind = r8153_bind,
- .unbind = usbnet_cdc_unbind,
- .status = usbnet_cdc_status,
- .manage_power = usbnet_manage_power,
-};
-
-static const struct usb_device_id products[] = {
-{
- USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &r8152_info,
-},
-
-{
- USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM,
- USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
- .driver_info = (unsigned long) &r8153_info,
-},
-
- { }, /* END */
-};
-MODULE_DEVICE_TABLE(usb, products);
-
-static struct usb_driver r815x_driver = {
- .name = "r815x",
- .id_table = products,
- .probe = usbnet_probe,
- .disconnect = usbnet_disconnect,
- .suspend = usbnet_suspend,
- .resume = usbnet_resume,
- .reset_resume = usbnet_resume,
- .supports_autosuspend = 1,
- .disable_hub_initiated_lpm = 1,
-};
-
-module_usb_driver(r815x_driver);
-
-MODULE_AUTHOR("Hayes Wang");
-MODULE_DESCRIPTION("Realtek USB ECM device");
-MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c
index a48bc0f20c1a..524a47a28120 100644
--- a/drivers/net/usb/rndis_host.c
+++ b/drivers/net/usb/rndis_host.c
@@ -492,6 +492,10 @@ EXPORT_SYMBOL_GPL(rndis_unbind);
*/
int rndis_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
/* peripheral may have batched packets to us... */
while (likely(skb->len)) {
struct rndis_data_hdr *hdr = (void *)skb->data;
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index f17b9e02dd34..d9e7892262fa 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -2106,6 +2106,10 @@ static void smsc75xx_rx_csum_offload(struct usbnet *dev, struct sk_buff *skb,
static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
while (skb->len > 0) {
u32 rx_cmd_a, rx_cmd_b, align_count, size;
struct sk_buff *ax_skb;
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 8dd54a0f7b29..424db65e4396 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1723,6 +1723,10 @@ static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
while (skb->len > 0) {
u32 header, align_count;
struct sk_buff *ax_skb;
diff --git a/drivers/net/usb/sr9800.c b/drivers/net/usb/sr9800.c
new file mode 100644
index 000000000000..b94a0fbb8b3b
--- /dev/null
+++ b/drivers/net/usb/sr9800.c
@@ -0,0 +1,874 @@
+/* CoreChip-sz SR9800 one chip USB 2.0 Ethernet Devices
+ *
+ * Author : Liu Junliang <liujunliang_ljl@163.com>
+ *
+ * Based on asix_common.c, asix_devices.c
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.*
+ */
+
+#include <linux/module.h>
+#include <linux/kmod.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/ethtool.h>
+#include <linux/workqueue.h>
+#include <linux/mii.h>
+#include <linux/usb.h>
+#include <linux/crc32.h>
+#include <linux/usb/usbnet.h>
+#include <linux/slab.h>
+#include <linux/if_vlan.h>
+
+#include "sr9800.h"
+
+static int sr_read_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ int err;
+
+ err = usbnet_read_cmd(dev, cmd, SR_REQ_RD_REG, value, index,
+ data, size);
+ if ((err != size) && (err >= 0))
+ err = -EINVAL;
+
+ return err;
+}
+
+static int sr_write_cmd(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ int err;
+
+ err = usbnet_write_cmd(dev, cmd, SR_REQ_WR_REG, value, index,
+ data, size);
+ if ((err != size) && (err >= 0))
+ err = -EINVAL;
+
+ return err;
+}
+
+static void
+sr_write_cmd_async(struct usbnet *dev, u8 cmd, u16 value, u16 index,
+ u16 size, void *data)
+{
+ usbnet_write_cmd_async(dev, cmd, SR_REQ_WR_REG, value, index, data,
+ size);
+}
+
+static int sr_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
+{
+ int offset = 0;
+
+ /* This check is no longer done by usbnet */
+ if (skb->len < dev->net->hard_header_len)
+ return 0;
+
+ while (offset + sizeof(u32) < skb->len) {
+ struct sk_buff *sr_skb;
+ u16 size;
+ u32 header = get_unaligned_le32(skb->data + offset);
+
+ offset += sizeof(u32);
+ /* get the packet length */
+ size = (u16) (header & 0x7ff);
+ if (size != ((~header >> 16) & 0x07ff)) {
+ netdev_err(dev->net, "%s : Bad Header Length\n",
+ __func__);
+ return 0;
+ }
+
+ if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
+ (size + offset > skb->len)) {
+ netdev_err(dev->net, "%s : Bad RX Length %d\n",
+ __func__, size);
+ return 0;
+ }
+ sr_skb = netdev_alloc_skb_ip_align(dev->net, size);
+ if (!sr_skb)
+ return 0;
+
+ skb_put(sr_skb, size);
+ memcpy(sr_skb->data, skb->data + offset, size);
+ usbnet_skb_return(dev, sr_skb);
+
+ offset += (size + 1) & 0xfffe;
+ }
+
+ if (skb->len != offset) {
+ netdev_err(dev->net, "%s : Bad SKB Length %d\n", __func__,
+ skb->len);
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct sk_buff *sr_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
+ gfp_t flags)
+{
+ int headroom = skb_headroom(skb);
+ int tailroom = skb_tailroom(skb);
+ u32 padbytes = 0xffff0000;
+ u32 packet_len;
+ int padlen;
+
+ padlen = ((skb->len + 4) % (dev->maxpacket - 1)) ? 0 : 4;
+
+ if ((!skb_cloned(skb)) && ((headroom + tailroom) >= (4 + padlen))) {
+ if ((headroom < 4) || (tailroom < padlen)) {
+ skb->data = memmove(skb->head + 4, skb->data,
+ skb->len);
+ skb_set_tail_pointer(skb, skb->len);
+ }
+ } else {
+ struct sk_buff *skb2;
+ skb2 = skb_copy_expand(skb, 4, padlen, flags);
+ dev_kfree_skb_any(skb);
+ skb = skb2;
+ if (!skb)
+ return NULL;
+ }
+
+ skb_push(skb, 4);
+ packet_len = (((skb->len - 4) ^ 0x0000ffff) << 16) + (skb->len - 4);
+ cpu_to_le32s(&packet_len);
+ skb_copy_to_linear_data(skb, &packet_len, sizeof(packet_len));
+
+ if (padlen) {
+ cpu_to_le32s(&padbytes);
+ memcpy(skb_tail_pointer(skb), &padbytes, sizeof(padbytes));
+ skb_put(skb, sizeof(padbytes));
+ }
+
+ return skb;
+}
+
+static void sr_status(struct usbnet *dev, struct urb *urb)
+{
+ struct sr9800_int_data *event;
+ int link;
+
+ if (urb->actual_length < 8)
+ return;
+
+ event = urb->transfer_buffer;
+ link = event->link & 0x01;
+ if (netif_carrier_ok(dev->net) != link) {
+ usbnet_link_change(dev, link, 1);
+ netdev_dbg(dev->net, "Link Status is: %d\n", link);
+ }
+
+ return;
+}
+
+static inline int sr_set_sw_mii(struct usbnet *dev)
+{
+ int ret;
+
+ ret = sr_write_cmd(dev, SR_CMD_SET_SW_MII, 0x0000, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable software MII access\n");
+ return ret;
+}
+
+static inline int sr_set_hw_mii(struct usbnet *dev)
+{
+ int ret;
+
+ ret = sr_write_cmd(dev, SR_CMD_SET_HW_MII, 0x0000, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to enable hardware MII access\n");
+ return ret;
+}
+
+static inline int sr_get_phy_addr(struct usbnet *dev)
+{
+ u8 buf[2];
+ int ret;
+
+ ret = sr_read_cmd(dev, SR_CMD_READ_PHY_ID, 0, 0, 2, buf);
+ if (ret < 0) {
+ netdev_err(dev->net, "%s : Error reading PHYID register:%02x\n",
+ __func__, ret);
+ goto out;
+ }
+ netdev_dbg(dev->net, "%s : returning 0x%04x\n", __func__,
+ *((__le16 *)buf));
+
+ ret = buf[1];
+
+out:
+ return ret;
+}
+
+static int sr_sw_reset(struct usbnet *dev, u8 flags)
+{
+ int ret;
+
+ ret = sr_write_cmd(dev, SR_CMD_SW_RESET, flags, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to send software reset:%02x\n",
+ ret);
+
+ return ret;
+}
+
+static u16 sr_read_rx_ctl(struct usbnet *dev)
+{
+ __le16 v;
+ int ret;
+
+ ret = sr_read_cmd(dev, SR_CMD_READ_RX_CTL, 0, 0, 2, &v);
+ if (ret < 0) {
+ netdev_err(dev->net, "Error reading RX_CTL register:%02x\n",
+ ret);
+ goto out;
+ }
+
+ ret = le16_to_cpu(v);
+out:
+ return ret;
+}
+
+static int sr_write_rx_ctl(struct usbnet *dev, u16 mode)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "%s : mode = 0x%04x\n", __func__, mode);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_RX_CTL, mode, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net,
+ "Failed to write RX_CTL mode to 0x%04x:%02x\n",
+ mode, ret);
+
+ return ret;
+}
+
+static u16 sr_read_medium_status(struct usbnet *dev)
+{
+ __le16 v;
+ int ret;
+
+ ret = sr_read_cmd(dev, SR_CMD_READ_MEDIUM_STATUS, 0, 0, 2, &v);
+ if (ret < 0) {
+ netdev_err(dev->net,
+ "Error reading Medium Status register:%02x\n", ret);
+ return ret; /* TODO: callers not checking for error ret */
+ }
+
+ return le16_to_cpu(v);
+}
+
+static int sr_write_medium_mode(struct usbnet *dev, u16 mode)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "%s : mode = 0x%04x\n", __func__, mode);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_MEDIUM_MODE, mode, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net,
+ "Failed to write Medium Mode mode to 0x%04x:%02x\n",
+ mode, ret);
+ return ret;
+}
+
+static int sr_write_gpio(struct usbnet *dev, u16 value, int sleep)
+{
+ int ret;
+
+ netdev_dbg(dev->net, "%s : value = 0x%04x\n", __func__, value);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_GPIOS, value, 0, 0, NULL);
+ if (ret < 0)
+ netdev_err(dev->net, "Failed to write GPIO value 0x%04x:%02x\n",
+ value, ret);
+ if (sleep)
+ msleep(sleep);
+
+ return ret;
+}
+
+/* SR9800 have a 16-bit RX_CTL value */
+static void sr_set_multicast(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ u16 rx_ctl = SR_DEFAULT_RX_CTL;
+
+ if (net->flags & IFF_PROMISC) {
+ rx_ctl |= SR_RX_CTL_PRO;
+ } else if (net->flags & IFF_ALLMULTI ||
+ netdev_mc_count(net) > SR_MAX_MCAST) {
+ rx_ctl |= SR_RX_CTL_AMALL;
+ } else if (netdev_mc_empty(net)) {
+ /* just broadcast and directed */
+ } else {
+ /* We use the 20 byte dev->data
+ * for our 8 byte filter buffer
+ * to avoid allocating memory that
+ * is tricky to free later
+ */
+ struct netdev_hw_addr *ha;
+ u32 crc_bits;
+
+ memset(data->multi_filter, 0, SR_MCAST_FILTER_SIZE);
+
+ /* Build the multicast hash filter. */
+ netdev_for_each_mc_addr(ha, net) {
+ crc_bits = ether_crc(ETH_ALEN, ha->addr) >> 26;
+ data->multi_filter[crc_bits >> 3] |=
+ 1 << (crc_bits & 7);
+ }
+
+ sr_write_cmd_async(dev, SR_CMD_WRITE_MULTI_FILTER, 0, 0,
+ SR_MCAST_FILTER_SIZE, data->multi_filter);
+
+ rx_ctl |= SR_RX_CTL_AM;
+ }
+
+ sr_write_cmd_async(dev, SR_CMD_WRITE_RX_CTL, rx_ctl, 0, 0, NULL);
+}
+
+static int sr_mdio_read(struct net_device *net, int phy_id, int loc)
+{
+ struct usbnet *dev = netdev_priv(net);
+ __le16 res;
+
+ mutex_lock(&dev->phy_mutex);
+ sr_set_sw_mii(dev);
+ sr_read_cmd(dev, SR_CMD_READ_MII_REG, phy_id, (__u16)loc, 2, &res);
+ sr_set_hw_mii(dev);
+ mutex_unlock(&dev->phy_mutex);
+
+ netdev_dbg(dev->net,
+ "%s : phy_id=0x%02x, loc=0x%02x, returns=0x%04x\n", __func__,
+ phy_id, loc, le16_to_cpu(res));
+
+ return le16_to_cpu(res);
+}
+
+static void
+sr_mdio_write(struct net_device *net, int phy_id, int loc, int val)
+{
+ struct usbnet *dev = netdev_priv(net);
+ __le16 res = cpu_to_le16(val);
+
+ netdev_dbg(dev->net,
+ "%s : phy_id=0x%02x, loc=0x%02x, val=0x%04x\n", __func__,
+ phy_id, loc, val);
+ mutex_lock(&dev->phy_mutex);
+ sr_set_sw_mii(dev);
+ sr_write_cmd(dev, SR_CMD_WRITE_MII_REG, phy_id, (__u16)loc, 2, &res);
+ sr_set_hw_mii(dev);
+ mutex_unlock(&dev->phy_mutex);
+}
+
+/* Get the PHY Identifier from the PHYSID1 & PHYSID2 MII registers */
+static u32 sr_get_phyid(struct usbnet *dev)
+{
+ int phy_reg;
+ u32 phy_id;
+ int i;
+
+ /* Poll for the rare case the FW or phy isn't ready yet. */
+ for (i = 0; i < 100; i++) {
+ phy_reg = sr_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID1);
+ if (phy_reg != 0 && phy_reg != 0xFFFF)
+ break;
+ mdelay(1);
+ }
+
+ if (phy_reg <= 0 || phy_reg == 0xFFFF)
+ return 0;
+
+ phy_id = (phy_reg & 0xffff) << 16;
+
+ phy_reg = sr_mdio_read(dev->net, dev->mii.phy_id, MII_PHYSID2);
+ if (phy_reg < 0)
+ return 0;
+
+ phy_id |= (phy_reg & 0xffff);
+
+ return phy_id;
+}
+
+static void
+sr_get_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 opt;
+
+ if (sr_read_cmd(dev, SR_CMD_READ_MONITOR_MODE, 0, 0, 1, &opt) < 0) {
+ wolinfo->supported = 0;
+ wolinfo->wolopts = 0;
+ return;
+ }
+ wolinfo->supported = WAKE_PHY | WAKE_MAGIC;
+ wolinfo->wolopts = 0;
+ if (opt & SR_MONITOR_LINK)
+ wolinfo->wolopts |= WAKE_PHY;
+ if (opt & SR_MONITOR_MAGIC)
+ wolinfo->wolopts |= WAKE_MAGIC;
+}
+
+static int
+sr_set_wol(struct net_device *net, struct ethtool_wolinfo *wolinfo)
+{
+ struct usbnet *dev = netdev_priv(net);
+ u8 opt = 0;
+
+ if (wolinfo->wolopts & WAKE_PHY)
+ opt |= SR_MONITOR_LINK;
+ if (wolinfo->wolopts & WAKE_MAGIC)
+ opt |= SR_MONITOR_MAGIC;
+
+ if (sr_write_cmd(dev, SR_CMD_WRITE_MONITOR_MODE,
+ opt, 0, 0, NULL) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sr_get_eeprom_len(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+
+ return data->eeprom_len;
+}
+
+static int sr_get_eeprom(struct net_device *net,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ struct usbnet *dev = netdev_priv(net);
+ __le16 *ebuf = (__le16 *)data;
+ int ret;
+ int i;
+
+ /* Crude hack to ensure that we don't overwrite memory
+ * if an odd length is supplied
+ */
+ if (eeprom->len % 2)
+ return -EINVAL;
+
+ eeprom->magic = SR_EEPROM_MAGIC;
+
+ /* sr9800 returns 2 bytes from eeprom on read */
+ for (i = 0; i < eeprom->len / 2; i++) {
+ ret = sr_read_cmd(dev, SR_CMD_READ_EEPROM, eeprom->offset + i,
+ 0, 2, &ebuf[i]);
+ if (ret < 0)
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static void sr_get_drvinfo(struct net_device *net,
+ struct ethtool_drvinfo *info)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+
+ /* Inherit standard device info */
+ usbnet_get_drvinfo(net, info);
+ strncpy(info->driver, DRIVER_NAME, sizeof(info->driver));
+ strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
+ info->eedump_len = data->eeprom_len;
+}
+
+static u32 sr_get_link(struct net_device *net)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ return mii_link_ok(&dev->mii);
+}
+
+static int sr_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
+{
+ struct usbnet *dev = netdev_priv(net);
+
+ return generic_mii_ioctl(&dev->mii, if_mii(rq), cmd, NULL);
+}
+
+static int sr_set_mac_address(struct net_device *net, void *p)
+{
+ struct usbnet *dev = netdev_priv(net);
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ struct sockaddr *addr = p;
+
+ if (netif_running(net))
+ return -EBUSY;
+ if (!is_valid_ether_addr(addr->sa_data))
+ return -EADDRNOTAVAIL;
+
+ memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
+
+ /* We use the 20 byte dev->data
+ * for our 6 byte mac buffer
+ * to avoid allocating memory that
+ * is tricky to free later
+ */
+ memcpy(data->mac_addr, addr->sa_data, ETH_ALEN);
+ sr_write_cmd_async(dev, SR_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+
+ return 0;
+}
+
+static const struct ethtool_ops sr9800_ethtool_ops = {
+ .get_drvinfo = sr_get_drvinfo,
+ .get_link = sr_get_link,
+ .get_msglevel = usbnet_get_msglevel,
+ .set_msglevel = usbnet_set_msglevel,
+ .get_wol = sr_get_wol,
+ .set_wol = sr_set_wol,
+ .get_eeprom_len = sr_get_eeprom_len,
+ .get_eeprom = sr_get_eeprom,
+ .get_settings = usbnet_get_settings,
+ .set_settings = usbnet_set_settings,
+ .nway_reset = usbnet_nway_reset,
+};
+
+static int sr9800_link_reset(struct usbnet *dev)
+{
+ struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+ u16 mode;
+
+ mii_check_media(&dev->mii, 1, 1);
+ mii_ethtool_gset(&dev->mii, &ecmd);
+ mode = SR9800_MEDIUM_DEFAULT;
+
+ if (ethtool_cmd_speed(&ecmd) != SPEED_100)
+ mode &= ~SR_MEDIUM_PS;
+
+ if (ecmd.duplex != DUPLEX_FULL)
+ mode &= ~SR_MEDIUM_FD;
+
+ netdev_dbg(dev->net, "%s : speed: %u duplex: %d mode: 0x%04x\n",
+ __func__, ethtool_cmd_speed(&ecmd), ecmd.duplex, mode);
+
+ sr_write_medium_mode(dev, mode);
+
+ return 0;
+}
+
+
+static int sr9800_set_default_mode(struct usbnet *dev)
+{
+ u16 rx_ctl;
+ int ret;
+
+ sr_mdio_write(dev->net, dev->mii.phy_id, MII_BMCR, BMCR_RESET);
+ sr_mdio_write(dev->net, dev->mii.phy_id, MII_ADVERTISE,
+ ADVERTISE_ALL | ADVERTISE_CSMA);
+ mii_nway_restart(&dev->mii);
+
+ ret = sr_write_medium_mode(dev, SR9800_MEDIUM_DEFAULT);
+ if (ret < 0)
+ goto out;
+
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_IPG012,
+ SR9800_IPG0_DEFAULT | SR9800_IPG1_DEFAULT,
+ SR9800_IPG2_DEFAULT, 0, NULL);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Write IPG,IPG1,IPG2 failed: %d\n", ret);
+ goto out;
+ }
+
+ /* Set RX_CTL to default values with 2k buffer, and enable cactus */
+ ret = sr_write_rx_ctl(dev, SR_DEFAULT_RX_CTL);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after all initializations\n",
+ rx_ctl);
+
+ rx_ctl = sr_read_medium_status(dev);
+ netdev_dbg(dev->net, "Medium Status:0x%04x after all initializations\n",
+ rx_ctl);
+
+ return 0;
+out:
+ return ret;
+}
+
+static int sr9800_reset(struct usbnet *dev)
+{
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ int ret, embd_phy;
+ u16 rx_ctl;
+
+ ret = sr_write_gpio(dev,
+ SR_GPIO_RSE | SR_GPIO_GPO_2 | SR_GPIO_GPO2EN, 5);
+ if (ret < 0)
+ goto out;
+
+ embd_phy = ((sr_get_phy_addr(dev) & 0x1f) == 0x10 ? 1 : 0);
+
+ ret = sr_write_cmd(dev, SR_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
+ goto out;
+ }
+
+ ret = sr_sw_reset(dev, SR_SWRESET_IPPD | SR_SWRESET_PRL);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ ret = sr_sw_reset(dev, SR_SWRESET_CLEAR);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ if (embd_phy) {
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
+ if (ret < 0)
+ goto out;
+ } else {
+ ret = sr_sw_reset(dev, SR_SWRESET_PRTE);
+ if (ret < 0)
+ goto out;
+ }
+
+ msleep(150);
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
+ ret = sr_write_rx_ctl(dev, 0x0000);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
+
+ ret = sr_sw_reset(dev, SR_SWRESET_PRL);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL | SR_SWRESET_PRL);
+ if (ret < 0)
+ goto out;
+
+ msleep(150);
+
+ ret = sr9800_set_default_mode(dev);
+ if (ret < 0)
+ goto out;
+
+ /* Rewrite MAC address */
+ memcpy(data->mac_addr, dev->net->dev_addr, ETH_ALEN);
+ ret = sr_write_cmd(dev, SR_CMD_WRITE_NODE_ID, 0, 0, ETH_ALEN,
+ data->mac_addr);
+ if (ret < 0)
+ goto out;
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static const struct net_device_ops sr9800_netdev_ops = {
+ .ndo_open = usbnet_open,
+ .ndo_stop = usbnet_stop,
+ .ndo_start_xmit = usbnet_start_xmit,
+ .ndo_tx_timeout = usbnet_tx_timeout,
+ .ndo_change_mtu = usbnet_change_mtu,
+ .ndo_set_mac_address = sr_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_do_ioctl = sr_ioctl,
+ .ndo_set_rx_mode = sr_set_multicast,
+};
+
+static int sr9800_phy_powerup(struct usbnet *dev)
+{
+ int ret;
+
+ /* set the embedded Ethernet PHY in power-down state */
+ ret = sr_sw_reset(dev, SR_SWRESET_IPPD | SR_SWRESET_IPRL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to power down PHY : %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ /* set the embedded Ethernet PHY in power-up state */
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to reset PHY: %d\n", ret);
+ return ret;
+ }
+ msleep(600);
+
+ /* set the embedded Ethernet PHY in reset state */
+ ret = sr_sw_reset(dev, SR_SWRESET_CLEAR);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to power up PHY: %d\n", ret);
+ return ret;
+ }
+ msleep(20);
+
+ /* set the embedded Ethernet PHY in power-up state */
+ ret = sr_sw_reset(dev, SR_SWRESET_IPRL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Failed to reset PHY: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int sr9800_bind(struct usbnet *dev, struct usb_interface *intf)
+{
+ struct sr_data *data = (struct sr_data *)&dev->data;
+ u16 led01_mux, led23_mux;
+ int ret, embd_phy;
+ u32 phyid;
+ u16 rx_ctl;
+
+ data->eeprom_len = SR9800_EEPROM_LEN;
+
+ usbnet_get_endpoints(dev, intf);
+
+ /* LED Setting Rule :
+ * AABB:CCDD
+ * AA : MFA0(LED0)
+ * BB : MFA1(LED1)
+ * CC : MFA2(LED2), Reserved for SR9800
+ * DD : MFA3(LED3), Reserved for SR9800
+ */
+ led01_mux = (SR_LED_MUX_LINK_ACTIVE << 8) | SR_LED_MUX_LINK;
+ led23_mux = (SR_LED_MUX_LINK_ACTIVE << 8) | SR_LED_MUX_TX_ACTIVE;
+ ret = sr_write_cmd(dev, SR_CMD_LED_MUX, led01_mux, led23_mux, 0, NULL);
+ if (ret < 0) {
+ netdev_err(dev->net, "set LINK LED failed : %d\n", ret);
+ goto out;
+ }
+
+ /* Get the MAC address */
+ ret = sr_read_cmd(dev, SR_CMD_READ_NODE_ID, 0, 0, ETH_ALEN,
+ dev->net->dev_addr);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Failed to read MAC address: %d\n", ret);
+ return ret;
+ }
+ netdev_dbg(dev->net, "mac addr : %pM\n", dev->net->dev_addr);
+
+ /* Initialize MII structure */
+ dev->mii.dev = dev->net;
+ dev->mii.mdio_read = sr_mdio_read;
+ dev->mii.mdio_write = sr_mdio_write;
+ dev->mii.phy_id_mask = 0x1f;
+ dev->mii.reg_num_mask = 0x1f;
+ dev->mii.phy_id = sr_get_phy_addr(dev);
+
+ dev->net->netdev_ops = &sr9800_netdev_ops;
+ dev->net->ethtool_ops = &sr9800_ethtool_ops;
+
+ embd_phy = ((dev->mii.phy_id & 0x1f) == 0x10 ? 1 : 0);
+ /* Reset the PHY to normal operation mode */
+ ret = sr_write_cmd(dev, SR_CMD_SW_PHY_SELECT, embd_phy, 0, 0, NULL);
+ if (ret < 0) {
+ netdev_dbg(dev->net, "Select PHY #1 failed: %d\n", ret);
+ return ret;
+ }
+
+ /* Init PHY routine */
+ ret = sr9800_phy_powerup(dev);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x after software reset\n", rx_ctl);
+ ret = sr_write_rx_ctl(dev, 0x0000);
+ if (ret < 0)
+ goto out;
+
+ rx_ctl = sr_read_rx_ctl(dev);
+ netdev_dbg(dev->net, "RX_CTL is 0x%04x setting to 0x0000\n", rx_ctl);
+
+ /* Read PHYID register *AFTER* the PHY was reset properly */
+ phyid = sr_get_phyid(dev);
+ netdev_dbg(dev->net, "PHYID=0x%08x\n", phyid);
+
+ /* medium mode setting */
+ ret = sr9800_set_default_mode(dev);
+ if (ret < 0)
+ goto out;
+
+ if (dev->udev->speed == USB_SPEED_HIGH) {
+ ret = sr_write_cmd(dev, SR_CMD_BULKIN_SIZE,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].byte_cnt,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].threshold,
+ 0, NULL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Reset RX_CTL failed: %d\n", ret);
+ goto out;
+ }
+ dev->rx_urb_size =
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_4K].size;
+ } else {
+ ret = sr_write_cmd(dev, SR_CMD_BULKIN_SIZE,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].byte_cnt,
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].threshold,
+ 0, NULL);
+ if (ret < 0) {
+ netdev_err(dev->net, "Reset RX_CTL failed: %d\n", ret);
+ goto out;
+ }
+ dev->rx_urb_size =
+ SR9800_BULKIN_SIZE[SR9800_MAX_BULKIN_2K].size;
+ }
+ netdev_dbg(dev->net, "%s : setting rx_urb_size with : %zu\n", __func__,
+ dev->rx_urb_size);
+ return 0;
+
+out:
+ return ret;
+}
+
+static const struct driver_info sr9800_driver_info = {
+ .description = "CoreChip SR9800 USB 2.0 Ethernet",
+ .bind = sr9800_bind,
+ .status = sr_status,
+ .link_reset = sr9800_link_reset,
+ .reset = sr9800_reset,
+ .flags = DRIVER_FLAG,
+ .rx_fixup = sr_rx_fixup,
+ .tx_fixup = sr_tx_fixup,
+};
+
+static const struct usb_device_id products[] = {
+ {
+ USB_DEVICE(0x0fe6, 0x9800), /* SR9800 Device */
+ .driver_info = (unsigned long) &sr9800_driver_info,
+ },
+ {}, /* END */
+};
+
+MODULE_DEVICE_TABLE(usb, products);
+
+static struct usb_driver sr_driver = {
+ .name = DRIVER_NAME,
+ .id_table = products,
+ .probe = usbnet_probe,
+ .suspend = usbnet_suspend,
+ .resume = usbnet_resume,
+ .disconnect = usbnet_disconnect,
+ .supports_autosuspend = 1,
+};
+
+module_usb_driver(sr_driver);
+
+MODULE_AUTHOR("Liu Junliang <liujunliang_ljl@163.com");
+MODULE_VERSION(DRIVER_VERSION);
+MODULE_DESCRIPTION("SR9800 USB 2.0 USB2NET Dev : http://www.corechip-sz.com");
+MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/sr9800.h b/drivers/net/usb/sr9800.h
new file mode 100644
index 000000000000..18f670251275
--- /dev/null
+++ b/drivers/net/usb/sr9800.h
@@ -0,0 +1,202 @@
+/* CoreChip-sz SR9800 one chip USB 2.0 Ethernet Devices
+ *
+ * Author : Liu Junliang <liujunliang_ljl@163.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#ifndef _SR9800_H
+#define _SR9800_H
+
+/* SR9800 spec. command table on Linux Platform */
+
+/* command : Software Station Management Control Reg */
+#define SR_CMD_SET_SW_MII 0x06
+/* command : PHY Read Reg */
+#define SR_CMD_READ_MII_REG 0x07
+/* command : PHY Write Reg */
+#define SR_CMD_WRITE_MII_REG 0x08
+/* command : Hardware Station Management Control Reg */
+#define SR_CMD_SET_HW_MII 0x0a
+/* command : SROM Read Reg */
+#define SR_CMD_READ_EEPROM 0x0b
+/* command : SROM Write Reg */
+#define SR_CMD_WRITE_EEPROM 0x0c
+/* command : SROM Write Enable Reg */
+#define SR_CMD_WRITE_ENABLE 0x0d
+/* command : SROM Write Disable Reg */
+#define SR_CMD_WRITE_DISABLE 0x0e
+/* command : RX Control Read Reg */
+#define SR_CMD_READ_RX_CTL 0x0f
+#define SR_RX_CTL_PRO (1 << 0)
+#define SR_RX_CTL_AMALL (1 << 1)
+#define SR_RX_CTL_SEP (1 << 2)
+#define SR_RX_CTL_AB (1 << 3)
+#define SR_RX_CTL_AM (1 << 4)
+#define SR_RX_CTL_AP (1 << 5)
+#define SR_RX_CTL_ARP (1 << 6)
+#define SR_RX_CTL_SO (1 << 7)
+#define SR_RX_CTL_RH1M (1 << 8)
+#define SR_RX_CTL_RH2M (1 << 9)
+#define SR_RX_CTL_RH3M (1 << 10)
+/* command : RX Control Write Reg */
+#define SR_CMD_WRITE_RX_CTL 0x10
+/* command : IPG0/IPG1/IPG2 Control Read Reg */
+#define SR_CMD_READ_IPG012 0x11
+/* command : IPG0/IPG1/IPG2 Control Write Reg */
+#define SR_CMD_WRITE_IPG012 0x12
+/* command : Node ID Read Reg */
+#define SR_CMD_READ_NODE_ID 0x13
+/* command : Node ID Write Reg */
+#define SR_CMD_WRITE_NODE_ID 0x14
+/* command : Multicast Filter Array Read Reg */
+#define SR_CMD_READ_MULTI_FILTER 0x15
+/* command : Multicast Filter Array Write Reg */
+#define SR_CMD_WRITE_MULTI_FILTER 0x16
+/* command : Eth/HomePNA PHY Address Reg */
+#define SR_CMD_READ_PHY_ID 0x19
+/* command : Medium Status Read Reg */
+#define SR_CMD_READ_MEDIUM_STATUS 0x1a
+#define SR_MONITOR_LINK (1 << 1)
+#define SR_MONITOR_MAGIC (1 << 2)
+#define SR_MONITOR_HSFS (1 << 4)
+/* command : Medium Status Write Reg */
+#define SR_CMD_WRITE_MEDIUM_MODE 0x1b
+#define SR_MEDIUM_GM (1 << 0)
+#define SR_MEDIUM_FD (1 << 1)
+#define SR_MEDIUM_AC (1 << 2)
+#define SR_MEDIUM_ENCK (1 << 3)
+#define SR_MEDIUM_RFC (1 << 4)
+#define SR_MEDIUM_TFC (1 << 5)
+#define SR_MEDIUM_JFE (1 << 6)
+#define SR_MEDIUM_PF (1 << 7)
+#define SR_MEDIUM_RE (1 << 8)
+#define SR_MEDIUM_PS (1 << 9)
+#define SR_MEDIUM_RSV (1 << 10)
+#define SR_MEDIUM_SBP (1 << 11)
+#define SR_MEDIUM_SM (1 << 12)
+/* command : Monitor Mode Status Read Reg */
+#define SR_CMD_READ_MONITOR_MODE 0x1c
+/* command : Monitor Mode Status Write Reg */
+#define SR_CMD_WRITE_MONITOR_MODE 0x1d
+/* command : GPIO Status Read Reg */
+#define SR_CMD_READ_GPIOS 0x1e
+#define SR_GPIO_GPO0EN (1 << 0) /* GPIO0 Output enable */
+#define SR_GPIO_GPO_0 (1 << 1) /* GPIO0 Output value */
+#define SR_GPIO_GPO1EN (1 << 2) /* GPIO1 Output enable */
+#define SR_GPIO_GPO_1 (1 << 3) /* GPIO1 Output value */
+#define SR_GPIO_GPO2EN (1 << 4) /* GPIO2 Output enable */
+#define SR_GPIO_GPO_2 (1 << 5) /* GPIO2 Output value */
+#define SR_GPIO_RESERVED (1 << 6) /* Reserved */
+#define SR_GPIO_RSE (1 << 7) /* Reload serial EEPROM */
+/* command : GPIO Status Write Reg */
+#define SR_CMD_WRITE_GPIOS 0x1f
+/* command : Eth PHY Power and Reset Control Reg */
+#define SR_CMD_SW_RESET 0x20
+#define SR_SWRESET_CLEAR 0x00
+#define SR_SWRESET_RR (1 << 0)
+#define SR_SWRESET_RT (1 << 1)
+#define SR_SWRESET_PRTE (1 << 2)
+#define SR_SWRESET_PRL (1 << 3)
+#define SR_SWRESET_BZ (1 << 4)
+#define SR_SWRESET_IPRL (1 << 5)
+#define SR_SWRESET_IPPD (1 << 6)
+/* command : Software Interface Selection Status Read Reg */
+#define SR_CMD_SW_PHY_STATUS 0x21
+/* command : Software Interface Selection Status Write Reg */
+#define SR_CMD_SW_PHY_SELECT 0x22
+/* command : BULK in Buffer Size Reg */
+#define SR_CMD_BULKIN_SIZE 0x2A
+/* command : LED_MUX Control Reg */
+#define SR_CMD_LED_MUX 0x70
+#define SR_LED_MUX_TX_ACTIVE (1 << 0)
+#define SR_LED_MUX_RX_ACTIVE (1 << 1)
+#define SR_LED_MUX_COLLISION (1 << 2)
+#define SR_LED_MUX_DUP_COL (1 << 3)
+#define SR_LED_MUX_DUP (1 << 4)
+#define SR_LED_MUX_SPEED (1 << 5)
+#define SR_LED_MUX_LINK_ACTIVE (1 << 6)
+#define SR_LED_MUX_LINK (1 << 7)
+
+/* Register Access Flags */
+#define SR_REQ_RD_REG (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+#define SR_REQ_WR_REG (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE)
+
+/* Multicast Filter Array size & Max Number */
+#define SR_MCAST_FILTER_SIZE 8
+#define SR_MAX_MCAST 64
+
+/* IPG0/1/2 Default Value */
+#define SR9800_IPG0_DEFAULT 0x15
+#define SR9800_IPG1_DEFAULT 0x0c
+#define SR9800_IPG2_DEFAULT 0x12
+
+/* Medium Status Default Mode */
+#define SR9800_MEDIUM_DEFAULT \
+ (SR_MEDIUM_FD | SR_MEDIUM_RFC | \
+ SR_MEDIUM_TFC | SR_MEDIUM_PS | \
+ SR_MEDIUM_AC | SR_MEDIUM_RE)
+
+/* RX Control Default Setting */
+#define SR_DEFAULT_RX_CTL \
+ (SR_RX_CTL_SO | SR_RX_CTL_AB | SR_RX_CTL_RH1M)
+
+/* EEPROM Magic Number & EEPROM Size */
+#define SR_EEPROM_MAGIC 0xdeadbeef
+#define SR9800_EEPROM_LEN 0xff
+
+/* SR9800 Driver Version and Driver Name */
+#define DRIVER_VERSION "11-Nov-2013"
+#define DRIVER_NAME "CoreChips"
+#define DRIVER_FLAG \
+ (FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR | FLAG_MULTI_PACKET)
+
+/* SR9800 BULKIN Buffer Size */
+#define SR9800_MAX_BULKIN_2K 0
+#define SR9800_MAX_BULKIN_4K 1
+#define SR9800_MAX_BULKIN_6K 2
+#define SR9800_MAX_BULKIN_8K 3
+#define SR9800_MAX_BULKIN_16K 4
+#define SR9800_MAX_BULKIN_20K 5
+#define SR9800_MAX_BULKIN_24K 6
+#define SR9800_MAX_BULKIN_32K 7
+
+struct {unsigned short size, byte_cnt, threshold; } SR9800_BULKIN_SIZE[] = {
+ /* 2k */
+ {2048, 0x8000, 0x8001},
+ /* 4k */
+ {4096, 0x8100, 0x8147},
+ /* 6k */
+ {6144, 0x8200, 0x81EB},
+ /* 8k */
+ {8192, 0x8300, 0x83D7},
+ /* 16 */
+ {16384, 0x8400, 0x851E},
+ /* 20k */
+ {20480, 0x8500, 0x8666},
+ /* 24k */
+ {24576, 0x8600, 0x87AE},
+ /* 32k */
+ {32768, 0x8700, 0x8A3D},
+};
+
+/* This structure cannot exceed sizeof(unsigned long [5]) AKA 20 bytes */
+struct sr_data {
+ u8 multi_filter[SR_MCAST_FILTER_SIZE];
+ u8 mac_addr[ETH_ALEN];
+ u8 phymode;
+ u8 ledmode;
+ u8 eeprom_len;
+};
+
+struct sr9800_int_data {
+ __le16 res1;
+ u8 link;
+ __le16 res2;
+ u8 status;
+ __le16 res3;
+} __packed;
+
+#endif /* _SR9800_H */
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 4671da755e7b..dd10d5817d2a 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -542,17 +542,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
}
// else network stack removes extra byte if we forced a short packet
- if (skb->len) {
- /* all data was already cloned from skb inside the driver */
- if (dev->driver_info->flags & FLAG_MULTI_PACKET)
- dev_kfree_skb_any(skb);
- else
- usbnet_skb_return(dev, skb);
+ /* all data was already cloned from skb inside the driver */
+ if (dev->driver_info->flags & FLAG_MULTI_PACKET)
+ goto done;
+
+ if (skb->len < ETH_HLEN) {
+ dev->net->stats.rx_errors++;
+ dev->net->stats.rx_length_errors++;
+ netif_dbg(dev, rx_err, dev->net, "rx length %d\n", skb->len);
+ } else {
+ usbnet_skb_return(dev, skb);
return;
}
- netif_dbg(dev, rx_err, dev->net, "drop\n");
- dev->net->stats.rx_errors++;
done:
skb_queue_tail(&dev->done, skb);
}
@@ -574,13 +576,6 @@ static void rx_complete (struct urb *urb)
switch (urb_status) {
/* success */
case 0:
- if (skb->len < dev->net->hard_header_len) {
- state = rx_cleanup;
- dev->net->stats.rx_errors++;
- dev->net->stats.rx_length_errors++;
- netif_dbg(dev, rx_err, dev->net,
- "rx length %d\n", skb->len);
- }
break;
/* stalls need manual reset. this is rare ... except that
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 2ec2041b62d4..5b374370f71c 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -285,7 +285,8 @@ static void veth_setup(struct net_device *dev)
dev->ethtool_ops = &veth_ethtool_ops;
dev->features |= NETIF_F_LLTX;
dev->features |= VETH_FEATURES;
- dev->vlan_features = dev->features;
+ dev->vlan_features = dev->features &
+ ~(NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX);
dev->destructor = veth_dev_free;
dev->hw_features = VETH_FEATURES;
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d75f8edf4fb3..5632a99cbbd2 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1711,7 +1711,8 @@ static int virtnet_probe(struct virtio_device *vdev)
/* If we can receive ANY GSO packets, we must allocate large ones. */
if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
- virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) ||
+ virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
vi->big_packets = true;
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 3be786faaaec..0fa3b44f7342 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1762,11 +1762,20 @@ vmxnet3_netpoll(struct net_device *netdev)
{
struct vmxnet3_adapter *adapter = netdev_priv(netdev);
- if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
- vmxnet3_disable_all_intrs(adapter);
-
- vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
- vmxnet3_enable_all_intrs(adapter);
+ switch (adapter->intr.type) {
+#ifdef CONFIG_PCI_MSI
+ case VMXNET3_IT_MSIX: {
+ int i;
+ for (i = 0; i < adapter->num_rx_queues; i++)
+ vmxnet3_msix_rx(0, &adapter->rx_queue[i]);
+ break;
+ }
+#endif
+ case VMXNET3_IT_MSI:
+ default:
+ vmxnet3_intr(0, adapter->netdev);
+ break;
+ }
}
#endif /* CONFIG_NET_POLL_CONTROLLER */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 026a313c2d2d..1236812c7be6 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -469,7 +469,6 @@ static inline struct hlist_head *vxlan_fdb_head(struct vxlan_dev *vxlan,
/* Look up Ethernet address in forwarding table */
static struct vxlan_fdb *__vxlan_find_mac(struct vxlan_dev *vxlan,
const u8 *mac)
-
{
struct hlist_head *head = vxlan_fdb_head(vxlan, mac);
struct vxlan_fdb *f;
@@ -596,10 +595,8 @@ static struct sk_buff **vxlan_gro_receive(struct sk_buff **head, struct sk_buff
NAPI_GRO_CB(p)->same_flow = 0;
continue;
}
- goto found;
}
-found:
type = eh->h_proto;
rcu_read_lock();
@@ -1321,6 +1318,9 @@ static int arp_reduce(struct net_device *dev, struct sk_buff *skb)
neigh_release(n);
+ if (reply == NULL)
+ goto out;
+
skb_reset_mac_header(reply);
__skb_pull(reply, skb_network_offset(reply));
reply->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1342,15 +1342,103 @@ out:
}
#if IS_ENABLED(CONFIG_IPV6)
+
+static struct sk_buff *vxlan_na_create(struct sk_buff *request,
+ struct neighbour *n, bool isrouter)
+{
+ struct net_device *dev = request->dev;
+ struct sk_buff *reply;
+ struct nd_msg *ns, *na;
+ struct ipv6hdr *pip6;
+ u8 *daddr;
+ int na_olen = 8; /* opt hdr + ETH_ALEN for target */
+ int ns_olen;
+ int i, len;
+
+ if (dev == NULL)
+ return NULL;
+
+ len = LL_RESERVED_SPACE(dev) + sizeof(struct ipv6hdr) +
+ sizeof(*na) + na_olen + dev->needed_tailroom;
+ reply = alloc_skb(len, GFP_ATOMIC);
+ if (reply == NULL)
+ return NULL;
+
+ reply->protocol = htons(ETH_P_IPV6);
+ reply->dev = dev;
+ skb_reserve(reply, LL_RESERVED_SPACE(request->dev));
+ skb_push(reply, sizeof(struct ethhdr));
+ skb_set_mac_header(reply, 0);
+
+ ns = (struct nd_msg *)skb_transport_header(request);
+
+ daddr = eth_hdr(request)->h_source;
+ ns_olen = request->len - skb_transport_offset(request) - sizeof(*ns);
+ for (i = 0; i < ns_olen-1; i += (ns->opt[i+1]<<3)) {
+ if (ns->opt[i] == ND_OPT_SOURCE_LL_ADDR) {
+ daddr = ns->opt + i + sizeof(struct nd_opt_hdr);
+ break;
+ }
+ }
+
+ /* Ethernet header */
+ ether_addr_copy(eth_hdr(reply)->h_dest, daddr);
+ ether_addr_copy(eth_hdr(reply)->h_source, n->ha);
+ eth_hdr(reply)->h_proto = htons(ETH_P_IPV6);
+ reply->protocol = htons(ETH_P_IPV6);
+
+ skb_pull(reply, sizeof(struct ethhdr));
+ skb_set_network_header(reply, 0);
+ skb_put(reply, sizeof(struct ipv6hdr));
+
+ /* IPv6 header */
+
+ pip6 = ipv6_hdr(reply);
+ memset(pip6, 0, sizeof(struct ipv6hdr));
+ pip6->version = 6;
+ pip6->priority = ipv6_hdr(request)->priority;
+ pip6->nexthdr = IPPROTO_ICMPV6;
+ pip6->hop_limit = 255;
+ pip6->daddr = ipv6_hdr(request)->saddr;
+ pip6->saddr = *(struct in6_addr *)n->primary_key;
+
+ skb_pull(reply, sizeof(struct ipv6hdr));
+ skb_set_transport_header(reply, 0);
+
+ na = (struct nd_msg *)skb_put(reply, sizeof(*na) + na_olen);
+
+ /* Neighbor Advertisement */
+ memset(na, 0, sizeof(*na)+na_olen);
+ na->icmph.icmp6_type = NDISC_NEIGHBOUR_ADVERTISEMENT;
+ na->icmph.icmp6_router = isrouter;
+ na->icmph.icmp6_override = 1;
+ na->icmph.icmp6_solicited = 1;
+ na->target = ns->target;
+ ether_addr_copy(&na->opt[2], n->ha);
+ na->opt[0] = ND_OPT_TARGET_LL_ADDR;
+ na->opt[1] = na_olen >> 3;
+
+ na->icmph.icmp6_cksum = csum_ipv6_magic(&pip6->saddr,
+ &pip6->daddr, sizeof(*na)+na_olen, IPPROTO_ICMPV6,
+ csum_partial(na, sizeof(*na)+na_olen, 0));
+
+ pip6->payload_len = htons(sizeof(*na)+na_olen);
+
+ skb_push(reply, sizeof(struct ipv6hdr));
+
+ reply->ip_summed = CHECKSUM_UNNECESSARY;
+
+ return reply;
+}
+
static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
- struct neighbour *n;
- union vxlan_addr ipa;
+ struct nd_msg *msg;
const struct ipv6hdr *iphdr;
const struct in6_addr *saddr, *daddr;
- struct nd_msg *msg;
- struct inet6_dev *in6_dev = NULL;
+ struct neighbour *n;
+ struct inet6_dev *in6_dev;
in6_dev = __in6_dev_get(dev);
if (!in6_dev)
@@ -1363,19 +1451,20 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
saddr = &iphdr->saddr;
daddr = &iphdr->daddr;
- if (ipv6_addr_loopback(daddr) ||
- ipv6_addr_is_multicast(daddr))
- goto out;
-
msg = (struct nd_msg *)skb_transport_header(skb);
if (msg->icmph.icmp6_code != 0 ||
msg->icmph.icmp6_type != NDISC_NEIGHBOUR_SOLICITATION)
goto out;
- n = neigh_lookup(ipv6_stub->nd_tbl, daddr, dev);
+ if (ipv6_addr_loopback(daddr) ||
+ ipv6_addr_is_multicast(&msg->target))
+ goto out;
+
+ n = neigh_lookup(ipv6_stub->nd_tbl, &msg->target, dev);
if (n) {
struct vxlan_fdb *f;
+ struct sk_buff *reply;
if (!(n->nud_state & NUD_CONNECTED)) {
neigh_release(n);
@@ -1389,13 +1478,23 @@ static int neigh_reduce(struct net_device *dev, struct sk_buff *skb)
goto out;
}
- ipv6_stub->ndisc_send_na(dev, n, saddr, &msg->target,
- !!in6_dev->cnf.forwarding,
- true, false, false);
+ reply = vxlan_na_create(skb, n,
+ !!(f ? f->flags & NTF_ROUTER : 0));
+
neigh_release(n);
+
+ if (reply == NULL)
+ goto out;
+
+ if (netif_rx_ni(reply) == NET_RX_DROP)
+ dev->stats.rx_dropped++;
+
} else if (vxlan->flags & VXLAN_F_L3MISS) {
- ipa.sin6.sin6_addr = *daddr;
- ipa.sa.sa_family = AF_INET6;
+ union vxlan_addr ipa = {
+ .sin6.sin6_addr = msg->target,
+ .sa.sa_family = AF_INET6,
+ };
+
vxlan_ip_miss(dev, &ipa);
}
diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c
index 0d1c7592efa0..19f7cb2cdef3 100644
--- a/drivers/net/wan/dlci.c
+++ b/drivers/net/wan/dlci.c
@@ -71,12 +71,9 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
const void *saddr, unsigned len)
{
struct frhdr hdr;
- struct dlci_local *dlp;
unsigned int hlen;
char *dest;
- dlp = netdev_priv(dev);
-
hdr.control = FRAD_I_UI;
switch (type)
{
@@ -107,11 +104,9 @@ static int dlci_header(struct sk_buff *skb, struct net_device *dev,
static void dlci_receive(struct sk_buff *skb, struct net_device *dev)
{
- struct dlci_local *dlp;
struct frhdr *hdr;
int process, header;
- dlp = netdev_priv(dev);
if (!pskb_may_pull(skb, sizeof(*hdr))) {
netdev_notice(dev, "invalid data no header\n");
dev->stats.rx_errors++;
diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
index 8aa20df55e50..507d9a9ee69a 100644
--- a/drivers/net/wireless/ath/ar5523/ar5523.c
+++ b/drivers/net/wireless/ath/ar5523/ar5523.c
@@ -1764,7 +1764,7 @@ static struct usb_device_id ar5523_id_table[] = {
AR5523_DEVICE_UG(0x07d1, 0x3a07), /* D-Link / WUA-2340 rev A1 */
AR5523_DEVICE_UG(0x1690, 0x0712), /* Gigaset / AR5523 */
AR5523_DEVICE_UG(0x1690, 0x0710), /* Gigaset / SMCWUSBTG */
- AR5523_DEVICE_UG(0x129b, 0x160c), /* Gigaset / USB stick 108
+ AR5523_DEVICE_UG(0x129b, 0x160b), /* Gigaset / USB stick 108
(CyberTAN Technology) */
AR5523_DEVICE_UG(0x16ab, 0x7801), /* Globalsun / AR5523_1 */
AR5523_DEVICE_UX(0x16ab, 0x7811), /* Globalsun / AR5523_2 */
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index d6bc7cb61bfb..1a2973b7acf2 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -110,7 +110,7 @@ ath5k_hw_radio_revision(struct ath5k_hw *ah, enum ieee80211_band band)
ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20));
if (ah->ah_version == AR5K_AR5210) {
- srev = ath5k_hw_reg_read(ah, AR5K_PHY(256) >> 28) & 0xf;
+ srev = (ath5k_hw_reg_read(ah, AR5K_PHY(256)) >> 28) & 0xf;
ret = (u16)ath5k_hw_bitswap(srev, 4) + 1;
} else {
srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 25243cbc07f0..b8daff78b9d1 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -5065,6 +5065,10 @@ static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep,
break;
}
}
+
+ if (is2GHz && !twiceMaxEdgePower)
+ twiceMaxEdgePower = 60;
+
return twiceMaxEdgePower;
}
diff --git a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
index 1cc13569b17b..1b6b4d0cfa97 100644
--- a/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9462_2p0_initvals.h
@@ -57,7 +57,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x00009e14, 0x37b95d5e, 0x37b9605e, 0x3236605e, 0x32365a5e},
{0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
- {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
+ {0x00009e20, 0x000003a5, 0x000003a5, 0x000003a5, 0x000003a5},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
{0x00009e3c, 0xcf946220, 0xcf946220, 0xcfd5c782, 0xcfd5c282},
{0x00009e44, 0x62321e27, 0x62321e27, 0xfe291e27, 0xfe291e27},
@@ -96,7 +96,7 @@ static const u32 ar9462_2p0_baseband_postamble[][5] = {
{0x0000ae04, 0x001c0000, 0x001c0000, 0x001c0000, 0x00100000},
{0x0000ae18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
{0x0000ae1c, 0x0000019c, 0x0000019c, 0x0000019c, 0x0000019c},
- {0x0000ae20, 0x000001b5, 0x000001b5, 0x000001ce, 0x000001ce},
+ {0x0000ae20, 0x000001a6, 0x000001a6, 0x000001aa, 0x000001aa},
{0x0000b284, 0x00000000, 0x00000000, 0x00000550, 0x00000550},
};
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index 58da3468d1f0..99a203174f45 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -262,6 +262,8 @@ enum tid_aggr_state {
struct ath9k_htc_sta {
u8 index;
enum tid_aggr_state tid_state[ATH9K_HTC_MAX_TID];
+ struct work_struct rc_update_work;
+ struct ath9k_htc_priv *htc_priv;
};
#define ATH9K_HTC_RXBUF 256
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index f4e1de20d99c..c57d6b859c04 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -34,6 +34,10 @@ static int ath9k_htc_btcoex_enable;
module_param_named(btcoex_enable, ath9k_htc_btcoex_enable, int, 0444);
MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
+static int ath9k_ps_enable;
+module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
+MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
+
#define CHAN2G(_freq, _idx) { \
.center_freq = (_freq), \
.hw_value = (_idx), \
@@ -725,12 +729,14 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_HAS_RATE_CONTROL |
IEEE80211_HW_RX_INCLUDES_FCS |
- IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_MFP_CAPABLE |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING;
+ if (ath9k_ps_enable)
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 608d739d1378..c9254a61ca52 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -1270,18 +1270,50 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
mutex_unlock(&priv->mutex);
}
+static void ath9k_htc_sta_rc_update_work(struct work_struct *work)
+{
+ struct ath9k_htc_sta *ista =
+ container_of(work, struct ath9k_htc_sta, rc_update_work);
+ struct ieee80211_sta *sta =
+ container_of((void *)ista, struct ieee80211_sta, drv_priv);
+ struct ath9k_htc_priv *priv = ista->htc_priv;
+ struct ath_common *common = ath9k_hw_common(priv->ah);
+ struct ath9k_htc_target_rate trate;
+
+ mutex_lock(&priv->mutex);
+ ath9k_htc_ps_wakeup(priv);
+
+ memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
+ ath9k_htc_setup_rate(priv, sta, &trate);
+ if (!ath9k_htc_send_rate_cmd(priv, &trate))
+ ath_dbg(common, CONFIG,
+ "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
+ sta->addr, be32_to_cpu(trate.capflags));
+ else
+ ath_dbg(common, CONFIG,
+ "Unable to update supported rates for sta: %pM\n",
+ sta->addr);
+
+ ath9k_htc_ps_restore(priv);
+ mutex_unlock(&priv->mutex);
+}
+
static int ath9k_htc_sta_add(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct ath9k_htc_priv *priv = hw->priv;
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
int ret;
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
ret = ath9k_htc_add_station(priv, vif, sta);
- if (!ret)
+ if (!ret) {
+ INIT_WORK(&ista->rc_update_work, ath9k_htc_sta_rc_update_work);
+ ista->htc_priv = priv;
ath9k_htc_init_rate(priv, sta);
+ }
ath9k_htc_ps_restore(priv);
mutex_unlock(&priv->mutex);
@@ -1293,12 +1325,13 @@ static int ath9k_htc_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct ath9k_htc_priv *priv = hw->priv;
- struct ath9k_htc_sta *ista;
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
int ret;
+ cancel_work_sync(&ista->rc_update_work);
+
mutex_lock(&priv->mutex);
ath9k_htc_ps_wakeup(priv);
- ista = (struct ath9k_htc_sta *) sta->drv_priv;
htc_sta_drain(priv->htc, ista->index);
ret = ath9k_htc_remove_station(priv, vif, sta);
ath9k_htc_ps_restore(priv);
@@ -1311,28 +1344,12 @@ static void ath9k_htc_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u32 changed)
{
- struct ath9k_htc_priv *priv = hw->priv;
- struct ath_common *common = ath9k_hw_common(priv->ah);
- struct ath9k_htc_target_rate trate;
-
- mutex_lock(&priv->mutex);
- ath9k_htc_ps_wakeup(priv);
+ struct ath9k_htc_sta *ista = (struct ath9k_htc_sta *) sta->drv_priv;
- if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
- memset(&trate, 0, sizeof(struct ath9k_htc_target_rate));
- ath9k_htc_setup_rate(priv, sta, &trate);
- if (!ath9k_htc_send_rate_cmd(priv, &trate))
- ath_dbg(common, CONFIG,
- "Supported rates for sta: %pM updated, rate caps: 0x%X\n",
- sta->addr, be32_to_cpu(trate.capflags));
- else
- ath_dbg(common, CONFIG,
- "Unable to update supported rates for sta: %pM\n",
- sta->addr);
- }
+ if (!(changed & IEEE80211_RC_SUPP_RATES_CHANGED))
+ return;
- ath9k_htc_ps_restore(priv);
- mutex_unlock(&priv->mutex);
+ schedule_work(&ista->rc_update_work);
}
static int ath9k_htc_conf_tx(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index fbf43c05713f..9078a6c5a74e 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1316,7 +1316,7 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
if (AR_SREV_9300_20_OR_LATER(ah))
udelay(50);
else if (AR_SREV_9100(ah))
- udelay(10000);
+ mdelay(10);
else
udelay(100);
@@ -1534,7 +1534,7 @@ EXPORT_SYMBOL(ath9k_hw_check_nav);
bool ath9k_hw_check_alive(struct ath_hw *ah)
{
int count = 50;
- u32 reg;
+ u32 reg, last_val;
if (AR_SREV_9300(ah))
return !ath9k_hw_detect_mac_hang(ah);
@@ -1542,9 +1542,14 @@ bool ath9k_hw_check_alive(struct ath_hw *ah)
if (AR_SREV_9285_12_OR_LATER(ah))
return true;
+ last_val = REG_READ(ah, AR_OBS_BUS_1);
do {
reg = REG_READ(ah, AR_OBS_BUS_1);
+ if (reg != last_val)
+ return true;
+ udelay(1);
+ last_val = reg;
if ((reg & 0x7E7FFFEF) == 0x00702400)
continue;
@@ -2051,9 +2056,8 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah)
REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
-
if (AR_SREV_9100(ah))
- udelay(10000);
+ mdelay(10);
else
udelay(50);
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index c36de303c8f3..1fc2e5a26b52 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -57,6 +57,10 @@ static int ath9k_bt_ant_diversity;
module_param_named(bt_ant_diversity, ath9k_bt_ant_diversity, int, 0444);
MODULE_PARM_DESC(bt_ant_diversity, "Enable WLAN/BT RX antenna diversity");
+static int ath9k_ps_enable;
+module_param_named(ps_enable, ath9k_ps_enable, int, 0444);
+MODULE_PARM_DESC(ps_enable, "Enable WLAN PowerSave");
+
bool is_ath9k_unloaded;
/* We use the hw_value as an index into our private channel structure */
@@ -903,13 +907,15 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
IEEE80211_HW_SIGNAL_DBM |
- IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_SPECTRUM_MGMT |
IEEE80211_HW_REPORTS_TX_ACK_STATUS |
IEEE80211_HW_SUPPORTS_RC_TABLE |
IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
+ if (ath9k_ps_enable)
+ hw->flags |= IEEE80211_HW_SUPPORTS_PS;
+
if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index a0ebdd000fc2..82e340d3ec60 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -732,11 +732,18 @@ static struct ath_rxbuf *ath_get_next_rx_buf(struct ath_softc *sc,
return NULL;
/*
- * mark descriptor as zero-length and set the 'more'
- * flag to ensure that both buffers get discarded
+ * Re-check previous descriptor, in case it has been filled
+ * in the mean time.
*/
- rs->rs_datalen = 0;
- rs->rs_more = true;
+ ret = ath9k_hw_rxprocdesc(ah, ds, rs);
+ if (ret == -EINPROGRESS) {
+ /*
+ * mark descriptor as zero-length and set the 'more'
+ * flag to ensure that both buffers get discarded
+ */
+ rs->rs_datalen = 0;
+ rs->rs_more = true;
+ }
}
list_del(&bf->list);
@@ -985,32 +992,32 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_hdr *hdr;
bool discard_current = sc->rx.discard_next;
- int ret = 0;
/*
* Discard corrupt descriptors which are marked in
* ath_get_next_rx_buf().
*/
- sc->rx.discard_next = rx_stats->rs_more;
if (discard_current)
- return -EINVAL;
+ goto corrupt;
+
+ sc->rx.discard_next = false;
/*
* Discard zero-length packets.
*/
if (!rx_stats->rs_datalen) {
RX_STAT_INC(rx_len_err);
- return -EINVAL;
+ goto corrupt;
}
- /*
- * rs_status follows rs_datalen so if rs_datalen is too large
- * we can take a hint that hardware corrupted it, so ignore
- * those frames.
- */
+ /*
+ * rs_status follows rs_datalen so if rs_datalen is too large
+ * we can take a hint that hardware corrupted it, so ignore
+ * those frames.
+ */
if (rx_stats->rs_datalen > (common->rx_bufsize - ah->caps.rx_status_len)) {
RX_STAT_INC(rx_len_err);
- return -EINVAL;
+ goto corrupt;
}
/* Only use status info from the last fragment */
@@ -1024,10 +1031,8 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
* This is different from the other corrupt descriptor
* condition handled above.
*/
- if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC) {
- ret = -EINVAL;
- goto exit;
- }
+ if (rx_stats->rs_status & ATH9K_RXERR_CORRUPT_DESC)
+ goto corrupt;
hdr = (struct ieee80211_hdr *) (skb->data + ah->caps.rx_status_len);
@@ -1043,18 +1048,15 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
if (ath_process_fft(sc, hdr, rx_stats, rx_status->mactime))
RX_STAT_INC(rx_spectral);
- ret = -EINVAL;
- goto exit;
+ return -EINVAL;
}
/*
* everything but the rate is checked here, the rate check is done
* separately to avoid doing two lookups for a rate for each frame.
*/
- if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error)) {
- ret = -EINVAL;
- goto exit;
- }
+ if (!ath9k_rx_accept(common, hdr, rx_status, rx_stats, decrypt_error))
+ return -EINVAL;
if (ath_is_mybeacon(common, hdr)) {
RX_STAT_INC(rx_beacons);
@@ -1064,15 +1066,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
/*
* This shouldn't happen, but have a safety check anyway.
*/
- if (WARN_ON(!ah->curchan)) {
- ret = -EINVAL;
- goto exit;
- }
+ if (WARN_ON(!ah->curchan))
+ return -EINVAL;
- if (ath9k_process_rate(common, hw, rx_stats, rx_status)) {
- ret =-EINVAL;
- goto exit;
- }
+ if (ath9k_process_rate(common, hw, rx_stats, rx_status))
+ return -EINVAL;
ath9k_process_rssi(common, hw, rx_stats, rx_status);
@@ -1087,9 +1085,11 @@ static int ath9k_rx_skb_preprocess(struct ath_softc *sc,
sc->rx.num_pkts++;
#endif
-exit:
- sc->rx.discard_next = false;
- return ret;
+ return 0;
+
+corrupt:
+ sc->rx.discard_next = rx_stats->rs_more;
+ return -EINVAL;
}
static void ath9k_rx_skb_postprocess(struct ath_common *common,
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 0a75e2f68c9d..55897d508a76 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1444,14 +1444,16 @@ void ath_tx_aggr_sleep(struct ieee80211_sta *sta, struct ath_softc *sc,
for (tidno = 0, tid = &an->tid[tidno];
tidno < IEEE80211_NUM_TIDS; tidno++, tid++) {
- if (!tid->sched)
- continue;
-
ac = tid->ac;
txq = ac->txq;
ath_txq_lock(sc, txq);
+ if (!tid->sched) {
+ ath_txq_unlock(sc, txq);
+ continue;
+ }
+
buffered = ath_tid_has_buffered(tid);
tid->sched = false;
@@ -2061,7 +2063,7 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
ATH_TXBUF_RESET(bf);
- if (tid) {
+ if (tid && ieee80211_is_data_present(hdr->frame_control)) {
fragno = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
seqno = tid->seq_next;
hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT);
@@ -2184,14 +2186,15 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
txq->stopped = true;
}
+ if (txctl->an && ieee80211_is_data_present(hdr->frame_control))
+ tid = ath_get_skb_tid(sc, txctl->an, skb);
+
if (info->flags & IEEE80211_TX_CTL_PS_RESPONSE) {
ath_txq_unlock(sc, txq);
txq = sc->tx.uapsdq;
ath_txq_lock(sc, txq);
} else if (txctl->an &&
ieee80211_is_data_present(hdr->frame_control)) {
- tid = ath_get_skb_tid(sc, txctl->an, skb);
-
WARN_ON(tid->ac->txq != txctl->txq);
if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index 3e991897d7ca..ddaa9efd053d 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -457,7 +457,6 @@ struct brcmf_sdio {
u8 tx_hdrlen; /* sdio bus header length for tx packet */
bool txglom; /* host tx glomming enable flag */
- struct sk_buff *txglom_sgpad; /* scatter-gather padding buffer */
u16 head_align; /* buffer pointer alignment */
u16 sgentry_align; /* scatter-gather buffer alignment */
};
@@ -1944,19 +1943,21 @@ static int brcmf_sdio_txpkt_prep_sg(struct brcmf_sdio *bus,
if (lastfrm && chain_pad)
tail_pad += blksize - chain_pad;
if (skb_tailroom(pkt) < tail_pad && pkt->len > blksize) {
- pkt_pad = bus->txglom_sgpad;
- if (pkt_pad == NULL)
- brcmu_pkt_buf_get_skb(tail_pad + tail_chop);
+ pkt_pad = brcmu_pkt_buf_get_skb(tail_pad + tail_chop +
+ bus->head_align);
if (pkt_pad == NULL)
return -ENOMEM;
ret = brcmf_sdio_txpkt_hdalign(bus, pkt_pad);
- if (unlikely(ret < 0))
+ if (unlikely(ret < 0)) {
+ kfree_skb(pkt_pad);
return ret;
+ }
memcpy(pkt_pad->data,
pkt->data + pkt->len - tail_chop,
tail_chop);
*(u32 *)(pkt_pad->cb) = ALIGN_SKB_FLAG + tail_chop;
skb_trim(pkt, pkt->len - tail_chop);
+ skb_trim(pkt_pad, tail_pad + tail_chop);
__skb_queue_after(pktq, pkt, pkt_pad);
} else {
ntail = pkt->data_len + tail_pad -
@@ -2011,7 +2012,7 @@ brcmf_sdio_txpkt_prep(struct brcmf_sdio *bus, struct sk_buff_head *pktq,
return ret;
head_pad = (u16)ret;
if (head_pad)
- memset(pkt_next->data, 0, head_pad + bus->tx_hdrlen);
+ memset(pkt_next->data + bus->tx_hdrlen, 0, head_pad);
total_len += pkt_next->len;
@@ -3486,10 +3487,6 @@ static int brcmf_sdio_bus_preinit(struct device *dev)
bus->txglom = false;
value = 1;
pad_size = bus->sdiodev->func[2]->cur_blksize << 1;
- bus->txglom_sgpad = brcmu_pkt_buf_get_skb(pad_size);
- if (!bus->txglom_sgpad)
- brcmf_err("allocating txglom padding skb failed, reduced performance\n");
-
err = brcmf_iovar_data_set(bus->sdiodev->dev, "bus:rxglom",
&value, sizeof(u32));
if (err < 0) {
@@ -4053,7 +4050,6 @@ void brcmf_sdio_remove(struct brcmf_sdio *bus)
brcmf_sdio_chip_detach(&bus->ci);
}
- brcmu_pkt_buf_free_skb(bus->txglom_sgpad);
kfree(bus->rxbuf);
kfree(bus->hdrbuf);
kfree(bus);
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index d36e252d2ccb..596525528f50 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -147,7 +147,7 @@ static void ap_free_sta(struct ap_data *ap, struct sta_info *sta)
if (!sta->ap && sta->u.sta.challenge)
kfree(sta->u.sta.challenge);
- del_timer(&sta->timer);
+ del_timer_sync(&sta->timer);
#endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
kfree(sta);
diff --git a/drivers/net/wireless/hostap/hostap_proc.c b/drivers/net/wireless/hostap/hostap_proc.c
index aa7ad3a7a69b..4e5c0f8c9496 100644
--- a/drivers/net/wireless/hostap/hostap_proc.c
+++ b/drivers/net/wireless/hostap/hostap_proc.c
@@ -496,7 +496,7 @@ void hostap_init_proc(local_info_t *local)
void hostap_remove_proc(local_info_t *local)
{
- remove_proc_subtree(local->ddev->name, hostap_proc);
+ proc_remove(local->proc);
}
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index c24d1d3d55f6..73086c1629ca 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -696,6 +696,24 @@ static int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
return ret;
}
+static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+ return false;
+ return true;
+}
+
+static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+ return false;
+ if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
+ return true;
+
+ /* disabled by default */
+ return false;
+}
+
static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
@@ -717,7 +735,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+ if (!iwl_enable_rx_ampdu(priv->cfg))
break;
IWL_DEBUG_HT(priv, "start Rx\n");
ret = iwl_sta_rx_agg_start(priv, sta, tid, *ssn);
@@ -729,7 +747,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
case IEEE80211_AMPDU_TX_START:
if (!priv->trans->ops->txq_enable)
break;
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+ if (!iwl_enable_tx_ampdu(priv->cfg))
break;
IWL_DEBUG_HT(priv, "start Tx\n");
ret = iwlagn_tx_agg_start(priv, vif, sta, tid, ssn);
diff --git a/drivers/net/wireless/iwlwifi/dvm/sta.c b/drivers/net/wireless/iwlwifi/dvm/sta.c
index c0d070c5df5e..9cdd91cdf661 100644
--- a/drivers/net/wireless/iwlwifi/dvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/dvm/sta.c
@@ -590,6 +590,7 @@ void iwl_deactivate_station(struct iwl_priv *priv, const u8 sta_id,
sizeof(priv->tid_data[sta_id][tid]));
priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
+ priv->stations[sta_id].used &= ~IWL_STA_UCODE_INPROGRESS;
priv->num_stations--;
diff --git a/drivers/net/wireless/iwlwifi/dvm/tx.c b/drivers/net/wireless/iwlwifi/dvm/tx.c
index a6839dfcb82d..398dd096674c 100644
--- a/drivers/net/wireless/iwlwifi/dvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/dvm/tx.c
@@ -1291,8 +1291,6 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
struct iwl_compressed_ba_resp *ba_resp = (void *)pkt->data;
struct iwl_ht_agg *agg;
struct sk_buff_head reclaimed_skbs;
- struct ieee80211_tx_info *info;
- struct ieee80211_hdr *hdr;
struct sk_buff *skb;
int sta_id;
int tid;
@@ -1379,22 +1377,28 @@ int iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
freed = 0;
skb_queue_walk(&reclaimed_skbs, skb) {
- hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (ieee80211_is_data_qos(hdr->frame_control))
freed++;
else
WARN_ON_ONCE(1);
- info = IEEE80211_SKB_CB(skb);
iwl_trans_free_tx_cmd(priv->trans, info->driver_data[1]);
+ memset(&info->status, 0, sizeof(info->status));
+ /* Packet was transmitted successfully, failures come as single
+ * frames because before failing a frame the firmware transmits
+ * it without aggregation at least once.
+ */
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
if (freed == 1) {
/* this is the first skb we deliver in this batch */
/* put the rate scaling data there */
info = IEEE80211_SKB_CB(skb);
memset(&info->status, 0, sizeof(info->status));
- info->flags |= IEEE80211_TX_STAT_ACK;
info->flags |= IEEE80211_TX_STAT_AMPDU;
info->status.ampdu_ack_len = ba_resp->txed_2_done;
info->status.ampdu_len = ba_resp->txed;
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index c3728163be46..75103554cd63 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1286,7 +1286,7 @@ module_param_named(swcrypto, iwlwifi_mod_params.sw_crypto, int, S_IRUGO);
MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
module_param_named(11n_disable, iwlwifi_mod_params.disable_11n, uint, S_IRUGO);
MODULE_PARM_DESC(11n_disable,
- "disable 11n functionality, bitmap: 1: full, 2: agg TX, 4: agg RX");
+ "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
module_param_named(amsdu_size_8K, iwlwifi_mod_params.amsdu_size_8K,
int, S_IRUGO);
MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size (default 0)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-modparams.h b/drivers/net/wireless/iwlwifi/iwl-modparams.h
index 0a84ade7edac..b29075c3da8e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-modparams.h
+++ b/drivers/net/wireless/iwlwifi/iwl-modparams.h
@@ -79,9 +79,12 @@ enum iwl_power_level {
IWL_POWER_NUM
};
-#define IWL_DISABLE_HT_ALL BIT(0)
-#define IWL_DISABLE_HT_TXAGG BIT(1)
-#define IWL_DISABLE_HT_RXAGG BIT(2)
+enum iwl_disable_11n {
+ IWL_DISABLE_HT_ALL = BIT(0),
+ IWL_DISABLE_HT_TXAGG = BIT(1),
+ IWL_DISABLE_HT_RXAGG = BIT(2),
+ IWL_ENABLE_HT_TXAGG = BIT(3),
+};
/**
* struct iwl_mod_params
@@ -90,7 +93,7 @@ enum iwl_power_level {
*
* @sw_crypto: using hardware encryption, default = 0
* @disable_11n: disable 11n capabilities, default = 0,
- * use IWL_DISABLE_HT_* constants
+ * use IWL_[DIS,EN]ABLE_HT_* constants
* @amsdu_size_8K: enable 8K amsdu size, default = 0
* @restart_fw: restart firmware, default = 1
* @wd_disable: enable stuck queue check, default = 0
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index f06f4cbe1317..725e954d8475 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -182,6 +182,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
for (ch_idx = 0; ch_idx < IWL_NUM_CHANNELS; ch_idx++) {
ch_flags = __le16_to_cpup(nvm_ch_flags + ch_idx);
+
+ if (ch_idx >= NUM_2GHZ_CHANNELS &&
+ !data->sku_cap_band_52GHz_enable)
+ ch_flags &= ~NVM_CHANNEL_VALID;
+
if (!(ch_flags & NVM_CHANNEL_VALID)) {
IWL_DEBUG_EEPROM(dev,
"Ch. %d Flags %x [%sGHz] - No traffic\n",
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 76cde6ce6551..18a895a949d4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -872,8 +872,11 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
- /* Rssi update while not associated ?! */
- if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
+ /*
+ * Rssi update while not associated - can happen since the statistics
+ * are handled asynchronously
+ */
+ if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)
return;
/* No BT - reports should be disabled */
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 73cbba7424f2..9426905de6b2 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -504,6 +504,7 @@ struct iwl_scan_offload_profile {
* @match_notify: clients waiting for match found notification
* @pass_match: clients waiting for the results
* @active_clients: active clients bitmap - enum scan_framework_client
+ * @any_beacon_notify: clients waiting for match notification without match
*/
struct iwl_scan_offload_profile_cfg {
struct iwl_scan_offload_profile profiles[IWL_SCAN_MAX_PROFILES];
@@ -512,7 +513,8 @@ struct iwl_scan_offload_profile_cfg {
u8 match_notify;
u8 pass_match;
u8 active_clients;
- u8 reserved[3];
+ u8 any_beacon_notify;
+ u8 reserved[2];
} __packed;
/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index c49b5073c251..c35b8661b395 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -246,7 +246,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
+ if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
@@ -328,6 +328,24 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
ieee80211_free_txskb(hw, skb);
}
+static inline bool iwl_enable_rx_ampdu(const struct iwl_cfg *cfg)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG)
+ return false;
+ return true;
+}
+
+static inline bool iwl_enable_tx_ampdu(const struct iwl_cfg *cfg)
+{
+ if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
+ return false;
+ if (iwlwifi_mod_params.disable_11n & IWL_ENABLE_HT_TXAGG)
+ return true;
+
+ /* enabled by default */
+ return true;
+}
+
static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum ieee80211_ampdu_mlme_action action,
@@ -347,7 +365,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
switch (action) {
case IEEE80211_AMPDU_RX_START:
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_RXAGG) {
+ if (!iwl_enable_rx_ampdu(mvm->cfg)) {
ret = -EINVAL;
break;
}
@@ -357,7 +375,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false);
break;
case IEEE80211_AMPDU_TX_START:
- if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) {
+ if (!iwl_enable_tx_ampdu(mvm->cfg)) {
ret = -EINVAL;
break;
}
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index e4ead86f06d6..2b0ba1fc3c82 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -152,7 +152,7 @@ enum iwl_power_scheme {
IWL_POWER_SCHEME_LP
};
-#define IWL_CONN_MAX_LISTEN_INTERVAL 70
+#define IWL_CONN_MAX_LISTEN_INTERVAL 10
#define IWL_UAPSD_AC_INFO (IEEE80211_WMM_IE_STA_QOSINFO_AC_VO |\
IEEE80211_WMM_IE_STA_QOSINFO_AC_VI |\
IEEE80211_WMM_IE_STA_QOSINFO_AC_BK |\
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 0e0007960612..742afc429c94 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -344,7 +344,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
iwl_mvm_scan_fill_ssids(cmd, req, basic_ssid ? 1 : 0);
- cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL);
+ cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
+ TX_CMD_FLG_BT_DIS);
cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
cmd->tx_cmd.rate_n_flags =
@@ -807,6 +808,8 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
profile_cfg->active_clients = SCAN_CLIENT_SCHED_SCAN;
profile_cfg->pass_match = SCAN_CLIENT_SCHED_SCAN;
profile_cfg->match_notify = SCAN_CLIENT_SCHED_SCAN;
+ if (!req->n_match_sets || !req->match_sets[0].ssid.ssid_len)
+ profile_cfg->any_beacon_notify = SCAN_CLIENT_SCHED_SCAN;
for (i = 0; i < req->n_match_sets; i++) {
profile = &profile_cfg->profiles[i];
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index ec1812133235..3397f59cd4e4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -652,7 +652,7 @@ int iwl_mvm_send_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
- static const u8 *baddr = _baddr;
+ const u8 *baddr = _baddr;
lockdep_assert_held(&mvm->mutex);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 90378c217bc7..76ee486039d7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -659,8 +659,14 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
+ /*
+ * sta can't be NULL otherwise it'd mean that the sta has been freed in
+ * the firmware while we still have packets for it in the Tx queues.
+ */
+ if (WARN_ON_ONCE(!sta))
+ goto out;
- if (!IS_ERR_OR_NULL(sta)) {
+ if (!IS_ERR(sta)) {
mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (tid != IWL_TID_NON_QOS) {
@@ -675,7 +681,6 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
spin_unlock_bh(&mvmsta->lock);
}
} else {
- sta = NULL;
mvmsta = NULL;
}
@@ -683,42 +688,38 @@ static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
* If the txq is not an AMPDU queue, there is no chance we freed
* several skbs. Check that out...
*/
- if (txq_id < mvm->first_agg_queue && !WARN_ON(skb_freed > 1) &&
- atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id])) {
- if (mvmsta) {
- /*
- * If there are no pending frames for this STA, notify
- * mac80211 that this station can go to sleep in its
- * STA table.
- */
- if (mvmsta->vif->type == NL80211_IFTYPE_AP)
- ieee80211_sta_block_awake(mvm->hw, sta, false);
- /*
- * We might very well have taken mvmsta pointer while
- * the station was being removed. The remove flow might
- * have seen a pending_frame (because we didn't take
- * the lock) even if now the queues are drained. So make
- * really sure now that this the station is not being
- * removed. If it is, run the drain worker to remove it.
- */
- spin_lock_bh(&mvmsta->lock);
- sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
- if (!sta || PTR_ERR(sta) == -EBUSY) {
- /*
- * Station disappeared in the meantime:
- * so we are draining.
- */
- set_bit(sta_id, mvm->sta_drained);
- schedule_work(&mvm->sta_drained_wk);
- }
- spin_unlock_bh(&mvmsta->lock);
- } else if (!mvmsta && PTR_ERR(sta) == -EBUSY) {
- /* Tx response without STA, so we are draining */
- set_bit(sta_id, mvm->sta_drained);
- schedule_work(&mvm->sta_drained_wk);
- }
+ if (txq_id >= mvm->first_agg_queue)
+ goto out;
+
+ /* We can't free more than one frame at once on a shared queue */
+ WARN_ON(skb_freed > 1);
+
+ /* If we have still frames from this STA nothing to do here */
+ if (!atomic_sub_and_test(skb_freed, &mvm->pending_frames[sta_id]))
+ goto out;
+
+ if (mvmsta && mvmsta->vif->type == NL80211_IFTYPE_AP) {
+ /*
+ * If there are no pending frames for this STA, notify
+ * mac80211 that this station can go to sleep in its
+ * STA table.
+ * If mvmsta is not NULL, sta is valid.
+ */
+ ieee80211_sta_block_awake(mvm->hw, sta, false);
}
+ if (PTR_ERR(sta) == -EBUSY || PTR_ERR(sta) == -ENOENT) {
+ /*
+ * We are draining and this was the last packet - pre_rcu_remove
+ * has been called already. We might be after the
+ * synchronize_net already.
+ * Don't rely on iwl_mvm_rm_sta to see the empty Tx queues.
+ */
+ set_bit(sta_id, mvm->sta_drained);
+ schedule_work(&mvm->sta_drained_wk);
+ }
+
+out:
rcu_read_unlock();
}
@@ -821,16 +822,12 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
struct sk_buff_head reclaimed_skbs;
struct iwl_mvm_tid_data *tid_data;
- struct ieee80211_tx_info *info;
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
- struct ieee80211_hdr *hdr;
struct sk_buff *skb;
int sta_id, tid, freed;
-
/* "flow" corresponds to Tx queue */
u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
-
/* "ssn" is start of block-ack Tx window, corresponds to index
* (in Tx queue's circular buffer) of first TFD/frame in window */
u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
@@ -887,22 +884,26 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
freed = 0;
skb_queue_walk(&reclaimed_skbs, skb) {
- hdr = (struct ieee80211_hdr *)skb->data;
+ struct ieee80211_hdr *hdr = (void *)skb->data;
+ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (ieee80211_is_data_qos(hdr->frame_control))
freed++;
else
WARN_ON_ONCE(1);
- info = IEEE80211_SKB_CB(skb);
iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
+ memset(&info->status, 0, sizeof(info->status));
+ /* Packet was transmitted successfully, failures come as single
+ * frames because before failing a frame the firmware transmits
+ * it without aggregation at least once.
+ */
+ info->flags |= IEEE80211_TX_STAT_ACK;
+
if (freed == 1) {
/* this is the first skb we deliver in this batch */
/* put the rate scaling data there */
- info = IEEE80211_SKB_CB(skb);
- memset(&info->status, 0, sizeof(info->status));
- info->flags |= IEEE80211_TX_STAT_ACK;
info->flags |= IEEE80211_TX_STAT_AMPDU;
info->status.ampdu_ack_len = ba_notif->txed_2_done;
info->status.ampdu_len = ba_notif->txed;
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index a4a5e25623c3..86989df69356 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -411,6 +411,8 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
mvm->status, table.valid);
}
+ IWL_ERR(mvm, "Loaded firmware version: %s\n", mvm->fw->fw_version);
+
trace_iwlwifi_dev_ucode_error(trans->dev, table.error_id, table.tsf_low,
table.data1, table.data2, table.data3,
table.blink1, table.blink2, table.ilink1,
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 3040924f5f3c..3872ead75488 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -359,20 +359,24 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
- {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x500A, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9012, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
{IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 32f75007a825..cb6d189bc3e6 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -621,7 +621,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
id = *pos++;
elen = *pos++;
left -= 2;
- if (elen > left || elen == 0) {
+ if (elen > left) {
lbs_deb_scan("scan response: invalid IE fmt\n");
goto done;
}
diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c
index 5e0eec4d71c7..5d9a8084665d 100644
--- a/drivers/net/wireless/mwifiex/11ac.c
+++ b/drivers/net/wireless/mwifiex/11ac.c
@@ -189,8 +189,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv,
vht_cap->header.len =
cpu_to_le16(sizeof(struct ieee80211_vht_cap));
memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header),
- (u8 *)bss_desc->bcn_vht_cap +
- sizeof(struct ieee_types_header),
+ (u8 *)bss_desc->bcn_vht_cap,
le16_to_cpu(vht_cap->header.len));
mwifiex_fill_vht_cap_tlv(priv, vht_cap, bss_desc->bss_band);
diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c
index 6261f8c53d44..7db1a89fdd95 100644
--- a/drivers/net/wireless/mwifiex/11n.c
+++ b/drivers/net/wireless/mwifiex/11n.c
@@ -308,8 +308,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv,
ht_cap->header.len =
cpu_to_le16(sizeof(struct ieee80211_ht_cap));
memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header),
- (u8 *) bss_desc->bcn_ht_cap +
- sizeof(struct ieee_types_header),
+ (u8 *)bss_desc->bcn_ht_cap,
le16_to_cpu(ht_cap->header.len));
mwifiex_fill_cap_info(priv, radio_type, ht_cap);
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index 4d79761b9c87..9d3d2758ec35 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -748,7 +748,7 @@ static struct net_device_stats *mwifiex_get_stats(struct net_device *dev)
static u16
mwifiex_netdev_select_wmm_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
skb->priority = cfg80211_classify8021d(skb, NULL);
return mwifiex_1d_to_wmm_queue[skb->priority];
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 03688aa14e8a..7fe7b53fb17a 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -1211,6 +1211,12 @@ static int mwifiex_pcie_process_recv_data(struct mwifiex_adapter *adapter)
rd_index = card->rxbd_rdptr & reg->rx_mask;
skb_data = card->rx_buf_list[rd_index];
+ /* If skb allocation was failed earlier for Rx packet,
+ * rx_buf_list[rd_index] would have been left with a NULL.
+ */
+ if (!skb_data)
+ return -ENOMEM;
+
MWIFIEX_SKB_PACB(skb_data, &buf_pa);
pci_unmap_single(card->dev, buf_pa, MWIFIEX_RX_DATA_BUF_SIZE,
PCI_DMA_FROMDEVICE);
@@ -1525,6 +1531,14 @@ static int mwifiex_pcie_process_cmd_complete(struct mwifiex_adapter *adapter)
if (adapter->ps_state == PS_STATE_SLEEP_CFM) {
mwifiex_process_sleep_confirm_resp(adapter, skb->data,
skb->len);
+ mwifiex_pcie_enable_host_int(adapter);
+ if (mwifiex_write_reg(adapter,
+ PCIE_CPU_INT_EVENT,
+ CPU_INTR_SLEEP_CFM_DONE)) {
+ dev_warn(adapter->dev,
+ "Write register failed\n");
+ return -1;
+ }
while (reg->sleep_cookie && (count++ < 10) &&
mwifiex_pcie_ok_to_access_hw(adapter))
usleep_range(50, 60);
@@ -1993,23 +2007,9 @@ static void mwifiex_interrupt_status(struct mwifiex_adapter *adapter)
adapter->int_status |= pcie_ireg;
spin_unlock_irqrestore(&adapter->int_lock, flags);
- if (pcie_ireg & HOST_INTR_CMD_DONE) {
- if ((adapter->ps_state == PS_STATE_SLEEP_CFM) ||
- (adapter->ps_state == PS_STATE_SLEEP)) {
- mwifiex_pcie_enable_host_int(adapter);
- if (mwifiex_write_reg(adapter,
- PCIE_CPU_INT_EVENT,
- CPU_INTR_SLEEP_CFM_DONE)
- ) {
- dev_warn(adapter->dev,
- "Write register failed\n");
- return;
-
- }
- }
- } else if (!adapter->pps_uapsd_mode &&
- adapter->ps_state == PS_STATE_SLEEP &&
- mwifiex_pcie_ok_to_access_hw(adapter)) {
+ if (!adapter->pps_uapsd_mode &&
+ adapter->ps_state == PS_STATE_SLEEP &&
+ mwifiex_pcie_ok_to_access_hw(adapter)) {
/* Potentially for PCIe we could get other
* interrupts like shared. Don't change power
* state until cookie is set */
diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c
index 0a8a26e10f01..668547c2de84 100644
--- a/drivers/net/wireless/mwifiex/scan.c
+++ b/drivers/net/wireless/mwifiex/scan.c
@@ -2101,12 +2101,12 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv)
curr_bss->ht_info_offset);
if (curr_bss->bcn_vht_cap)
- curr_bss->bcn_ht_cap = (void *)(curr_bss->beacon_buf +
- curr_bss->vht_cap_offset);
+ curr_bss->bcn_vht_cap = (void *)(curr_bss->beacon_buf +
+ curr_bss->vht_cap_offset);
if (curr_bss->bcn_vht_oper)
- curr_bss->bcn_ht_oper = (void *)(curr_bss->beacon_buf +
- curr_bss->vht_info_offset);
+ curr_bss->bcn_vht_oper = (void *)(curr_bss->beacon_buf +
+ curr_bss->vht_info_offset);
if (curr_bss->bcn_bss_co_2040)
curr_bss->bcn_bss_co_2040 =
diff --git a/drivers/net/wireless/mwifiex/usb.c b/drivers/net/wireless/mwifiex/usb.c
index e8ebbd4bc3cd..208748804a55 100644
--- a/drivers/net/wireless/mwifiex/usb.c
+++ b/drivers/net/wireless/mwifiex/usb.c
@@ -22,8 +22,6 @@
#define USB_VERSION "1.0"
-static const char usbdriver_name[] = "usb8xxx";
-
static struct mwifiex_if_ops usb_ops;
static struct semaphore add_remove_card_sem;
static struct usb_card_rec *usb_card;
@@ -527,13 +525,6 @@ static int mwifiex_usb_resume(struct usb_interface *intf)
MWIFIEX_BSS_ROLE_ANY),
MWIFIEX_ASYNC_CMD);
-#ifdef CONFIG_PM
- /* Resume handler may be called due to remote wakeup,
- * force to exit suspend anyway
- */
- usb_disable_autosuspend(card->udev);
-#endif /* CONFIG_PM */
-
return 0;
}
@@ -567,13 +558,12 @@ static void mwifiex_usb_disconnect(struct usb_interface *intf)
}
static struct usb_driver mwifiex_usb_driver = {
- .name = usbdriver_name,
+ .name = "mwifiex_usb",
.probe = mwifiex_usb_probe,
.disconnect = mwifiex_usb_disconnect,
.id_table = mwifiex_usb_table,
.suspend = mwifiex_usb_suspend,
.resume = mwifiex_usb_resume,
- .supports_autosuspend = 1,
};
static int mwifiex_usb_tx_init(struct mwifiex_adapter *adapter)
diff --git a/drivers/net/wireless/mwifiex/wmm.c b/drivers/net/wireless/mwifiex/wmm.c
index 13eaeed03898..981cf6e7c73b 100644
--- a/drivers/net/wireless/mwifiex/wmm.c
+++ b/drivers/net/wireless/mwifiex/wmm.c
@@ -559,7 +559,8 @@ mwifiex_clean_txrx(struct mwifiex_private *priv)
mwifiex_wmm_delete_all_ralist(priv);
memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
- if (priv->adapter->if_ops.clean_pcie_ring)
+ if (priv->adapter->if_ops.clean_pcie_ring &&
+ !priv->adapter->surprise_removed)
priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
spin_unlock_irqrestore(&priv->wmm.ra_list_spinlock, flags);
}
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index abc5f56f29fe..2f1cd929c6f6 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1877,6 +1877,11 @@ static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
EEPROM_MAC_ADDR_0));
/*
+ * Disable powersaving as default.
+ */
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+ /*
* Initialize hw_mode information.
*/
spec->supported_bands = SUPPORT_BAND_2GHZ;
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 9f16824cd1bc..d849d590de25 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1706,6 +1706,11 @@ static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK;
+ /*
+ * Disable powersaving as default.
+ */
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
rt2x00_eeprom_addr(rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index b8f5b06006c4..41d4a8167dc3 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -5460,14 +5460,15 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
rt2800_bbp_write(rt2x00dev, 68, 0x0b);
- rt2800_bbp_write(rt2x00dev, 69, 0x0d);
- rt2800_bbp_write(rt2x00dev, 70, 0x06);
+ rt2800_bbp_write(rt2x00dev, 69, 0x12);
rt2800_bbp_write(rt2x00dev, 73, 0x13);
rt2800_bbp_write(rt2x00dev, 75, 0x46);
rt2800_bbp_write(rt2x00dev, 76, 0x28);
rt2800_bbp_write(rt2x00dev, 77, 0x59);
+ rt2800_bbp_write(rt2x00dev, 70, 0x0a);
+
rt2800_bbp_write(rt2x00dev, 79, 0x13);
rt2800_bbp_write(rt2x00dev, 80, 0x05);
rt2800_bbp_write(rt2x00dev, 81, 0x33);
@@ -5510,7 +5511,6 @@ static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev)
if (rt2x00_rt(rt2x00dev, RT5392)) {
rt2800_bbp_write(rt2x00dev, 134, 0xd0);
rt2800_bbp_write(rt2x00dev, 135, 0xf6);
- rt2800_bbp_write(rt2x00dev, 148, 0x84);
}
rt2800_disable_unused_dac_adc(rt2x00dev);
@@ -7458,10 +7458,9 @@ static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
u32 reg;
/*
- * Disable powersaving as default on PCI devices.
+ * Disable powersaving as default.
*/
- if (rt2x00_is_pci(rt2x00dev) || rt2x00_is_soc(rt2x00dev))
- rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
/*
* Initialize all hw fields.
diff --git a/drivers/net/wireless/rtl818x/rtl8180/dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 8ec17aad0e52..3867d1470b36 100644
--- a/drivers/net/wireless/rtl818x/rtl8180/dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -107,6 +107,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
struct rtl8180_priv *priv = dev->priv;
unsigned int count = 32;
u8 signal, agc, sq;
+ dma_addr_t mapping;
while (count--) {
struct rtl8180_rx_desc *entry = &priv->rx_ring[priv->rx_idx];
@@ -128,6 +129,17 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
if (unlikely(!new_skb))
goto done;
+ mapping = pci_map_single(priv->pdev,
+ skb_tail_pointer(new_skb),
+ MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ kfree_skb(new_skb);
+ dev_err(&priv->pdev->dev, "RX DMA map error\n");
+
+ goto done;
+ }
+
pci_unmap_single(priv->pdev,
*((dma_addr_t *)skb->cb),
MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
@@ -158,9 +170,7 @@ static void rtl8180_handle_rx(struct ieee80211_hw *dev)
skb = new_skb;
priv->rx_buf[priv->rx_idx] = skb;
- *((dma_addr_t *) skb->cb) =
- pci_map_single(priv->pdev, skb_tail_pointer(skb),
- MAX_RX_SIZE, PCI_DMA_FROMDEVICE);
+ *((dma_addr_t *) skb->cb) = mapping;
}
done:
@@ -266,6 +276,13 @@ static void rtl8180_tx(struct ieee80211_hw *dev,
mapping = pci_map_single(priv->pdev, skb->data,
skb->len, PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(priv->pdev, mapping)) {
+ kfree_skb(skb);
+ dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
+ return;
+
+ }
+
tx_flags = RTL818X_TX_DESC_FLAG_OWN | RTL818X_TX_DESC_FLAG_FS |
RTL818X_TX_DESC_FLAG_LS |
(ieee80211_get_tx_rate(dev, info)->hw_value << 24) |
diff --git a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
index 56aee067f324..a6ad79f61bf9 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
@@ -15,6 +15,8 @@
#ifndef RTL8187_H
#define RTL8187_H
+#include <linux/cache.h>
+
#include "rtl818x.h"
#include "leds.h"
@@ -139,7 +141,10 @@ struct rtl8187_priv {
u8 aifsn[4];
u8 rfkill_mask;
struct {
- __le64 buf;
+ union {
+ __le64 buf;
+ u8 dummy1[L1_CACHE_BYTES];
+ } ____cacheline_aligned;
struct sk_buff_head queue;
} b_tx_status; /* This queue is used by both -b and non-b devices */
struct mutex io_mutex;
@@ -147,7 +152,8 @@ struct rtl8187_priv {
u8 bits8;
__le16 bits16;
__le32 bits32;
- } *io_dmabuf;
+ u8 dummy2[L1_CACHE_BYTES];
+ } *io_dmabuf ____cacheline_aligned;
bool rfkill_off;
u16 seqno;
};
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
index deedae3c5449..d1c0191a195b 100644
--- a/drivers/net/wireless/rtlwifi/ps.c
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -48,7 +48,7 @@ bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
/*<2> Enable Adapter */
if (rtlpriv->cfg->ops->hw_init(hw))
- return 1;
+ return false;
RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
/*<3> Enable Interrupt */
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index a82b30a1996c..2eb0b38384dd 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -937,14 +937,26 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
bool is92c;
int err;
u8 tmp_u1b;
+ unsigned long flags;
rtlpci->being_init_adapter = true;
+
+ /* Since this function can take a very long time (up to 350 ms)
+ * and can be called with irqs disabled, reenable the irqs
+ * to let the other devices continue being serviced.
+ *
+ * It is safe doing so since our own interrupts will only be enabled
+ * in a subsequent step.
+ */
+ local_save_flags(flags);
+ local_irq_enable();
+
rtlpriv->intf_ops->disable_aspm(hw);
rtstatus = _rtl92ce_init_mac(hw);
if (!rtstatus) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Init MAC failed\n");
err = 1;
- return err;
+ goto exit;
}
err = rtl92c_download_fw(hw);
@@ -952,7 +964,7 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
"Failed to download FW. Init HW without FW now..\n");
err = 1;
- return err;
+ goto exit;
}
rtlhal->last_hmeboxnum = 0;
@@ -1032,6 +1044,8 @@ int rtl92ce_hw_init(struct ieee80211_hw *hw)
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "under 1.5V\n");
}
rtl92c_dm_init(hw);
+exit:
+ local_irq_restore(flags);
rtlpci->being_init_adapter = false;
return err;
}
diff --git a/drivers/net/wireless/ti/wl1251/rx.c b/drivers/net/wireless/ti/wl1251/rx.c
index 123c4bb50e0a..cde0eaf99714 100644
--- a/drivers/net/wireless/ti/wl1251/rx.c
+++ b/drivers/net/wireless/ti/wl1251/rx.c
@@ -180,7 +180,7 @@ static void wl1251_rx_body(struct wl1251 *wl,
wl1251_mem_read(wl, rx_packet_ring_addr, rx_buffer, length);
/* The actual length doesn't include the target's alignment */
- skb->len = desc->length - PLCP_HEADER_LENGTH;
+ skb_trim(skb, desc->length - PLCP_HEADER_LENGTH);
fc = (u16 *)skb->data;
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 4c76bcb9a879..ae413a2cbee7 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -143,11 +143,7 @@ struct xenvif {
char rx_irq_name[IFNAMSIZ+4]; /* DEVNAME-rx */
struct xen_netif_rx_back_ring rx;
struct sk_buff_head rx_queue;
- bool rx_queue_stopped;
- /* Set when the RX interrupt is triggered by the frontend.
- * The worker thread may need to wake the queue.
- */
- bool rx_event;
+ RING_IDX rx_last_skb_slots;
/* This array is allocated seperately as it is large */
struct gnttab_copy *grant_copy_op;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index b9de31ea7fc4..301cc037fda8 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -100,7 +100,6 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
{
struct xenvif *vif = dev_id;
- vif->rx_event = true;
xenvif_kick_thread(vif);
return IRQ_HANDLED;
@@ -133,8 +132,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
/* If the skb is GSO then we'll also need an extra slot for the
* metadata.
*/
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
- skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ if (skb_is_gso(skb))
min_slots_needed++;
/* If the skb can't possibly fit in the remaining slots
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 6b62c3eb8e18..438d0c09b7e6 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -240,7 +240,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
struct gnttab_copy *copy_gop;
struct xenvif_rx_meta *meta;
unsigned long bytes;
- int gso_type;
+ int gso_type = XEN_NETIF_GSO_TYPE_NONE;
/* Data must not cross a page boundary. */
BUG_ON(size + offset > PAGE_SIZE<<compound_order(page));
@@ -299,12 +299,12 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb,
}
/* Leave a gap for the GSO descriptor. */
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
- gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
- else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
- gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
- else
- gso_type = XEN_NETIF_GSO_TYPE_NONE;
+ if (skb_is_gso(skb)) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
+ }
if (*head && ((1 << gso_type) & vif->gso_mask))
vif->rx.req_cons++;
@@ -338,19 +338,15 @@ static int xenvif_gop_skb(struct sk_buff *skb,
int head = 1;
int old_meta_prod;
int gso_type;
- int gso_size;
old_meta_prod = npo->meta_prod;
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) {
- gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
- gso_size = skb_shinfo(skb)->gso_size;
- } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
- gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
- gso_size = skb_shinfo(skb)->gso_size;
- } else {
- gso_type = XEN_NETIF_GSO_TYPE_NONE;
- gso_size = 0;
+ gso_type = XEN_NETIF_GSO_TYPE_NONE;
+ if (skb_is_gso(skb)) {
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV4;
+ else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ gso_type = XEN_NETIF_GSO_TYPE_TCPV6;
}
/* Set up a GSO prefix descriptor, if necessary */
@@ -358,7 +354,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = gso_type;
- meta->gso_size = gso_size;
+ meta->gso_size = skb_shinfo(skb)->gso_size;
meta->size = 0;
meta->id = req->id;
}
@@ -368,7 +364,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
if ((1 << gso_type) & vif->gso_mask) {
meta->gso_type = gso_type;
- meta->gso_size = gso_size;
+ meta->gso_size = skb_shinfo(skb)->gso_size;
} else {
meta->gso_type = XEN_NETIF_GSO_TYPE_NONE;
meta->gso_size = 0;
@@ -476,7 +472,6 @@ static void xenvif_rx_action(struct xenvif *vif)
unsigned long offset;
struct skb_cb_overlay *sco;
bool need_to_notify = false;
- bool ring_full = false;
struct netrx_pending_operations npo = {
.copy = vif->grant_copy_op,
@@ -486,7 +481,7 @@ static void xenvif_rx_action(struct xenvif *vif)
skb_queue_head_init(&rxq);
while ((skb = skb_dequeue(&vif->rx_queue)) != NULL) {
- int max_slots_needed;
+ RING_IDX max_slots_needed;
int i;
/* We need a cheap worse case estimate for the number of
@@ -501,17 +496,19 @@ static void xenvif_rx_action(struct xenvif *vif)
size = skb_frag_size(&skb_shinfo(skb)->frags[i]);
max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE);
}
- if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
- skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
+ if (skb_is_gso(skb) &&
+ (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 ||
+ skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
max_slots_needed++;
/* If the skb may not fit then bail out now */
if (!xenvif_rx_ring_slots_available(vif, max_slots_needed)) {
skb_queue_head(&vif->rx_queue, skb);
need_to_notify = true;
- ring_full = true;
+ vif->rx_last_skb_slots = max_slots_needed;
break;
- }
+ } else
+ vif->rx_last_skb_slots = 0;
sco = (struct skb_cb_overlay *)skb->cb;
sco->meta_slots_used = xenvif_gop_skb(skb, &npo);
@@ -522,8 +519,6 @@ static void xenvif_rx_action(struct xenvif *vif)
BUG_ON(npo.meta_prod > ARRAY_SIZE(vif->meta));
- vif->rx_queue_stopped = !npo.copy_prod && ring_full;
-
if (!npo.copy_prod)
goto done;
@@ -1473,8 +1468,8 @@ static struct xen_netif_rx_response *make_rx_response(struct xenvif *vif,
static inline int rx_work_todo(struct xenvif *vif)
{
- return (!skb_queue_empty(&vif->rx_queue) && !vif->rx_queue_stopped) ||
- vif->rx_event;
+ return !skb_queue_empty(&vif->rx_queue) &&
+ xenvif_rx_ring_slots_available(vif, vif->rx_last_skb_slots);
}
static inline int tx_work_todo(struct xenvif *vif)
@@ -1560,8 +1555,6 @@ int xenvif_kthread(void *data)
if (!skb_queue_empty(&vif->rx_queue))
xenvif_rx_action(vif);
- vif->rx_event = false;
-
if (skb_queue_empty(&vif->rx_queue) &&
netif_queue_stopped(vif->dev))
xenvif_start_queue(vif);
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index ff04d4f95baa..e30d80033cbc 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -907,6 +907,7 @@ static int handle_incoming_queue(struct net_device *dev,
/* Ethernet work: Delayed to here as it peeks the header. */
skb->protocol = eth_type_trans(skb, dev);
+ skb_reset_network_header(skb);
if (checksum_setup(dev, skb)) {
kfree_skb(skb);
@@ -1832,7 +1833,6 @@ static void netback_changed(struct xenbus_device *dev,
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateUnknown:
- case XenbusStateClosed:
break;
case XenbusStateInitWait:
@@ -1847,6 +1847,10 @@ static void netback_changed(struct xenbus_device *dev,
netdev_notify_peers(netdev);
break;
+ case XenbusStateClosed:
+ if (dev->state == XenbusStateClosed)
+ break;
+ /* Missed the backend's CLOSING state -- fallthrough */
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
diff --git a/drivers/of/address.c b/drivers/of/address.c
index d3dd41c840f1..1a54f1ffaadb 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -99,11 +99,12 @@ static unsigned int of_bus_default_get_flags(const __be32 *addr)
static int of_bus_pci_match(struct device_node *np)
{
/*
+ * "pciex" is PCI Express
* "vci" is for the /chaos bridge on 1st-gen PCI powermacs
* "ht" is hypertransport
*/
- return !strcmp(np->type, "pci") || !strcmp(np->type, "vci") ||
- !strcmp(np->type, "ht");
+ return !strcmp(np->type, "pci") || !strcmp(np->type, "pciex") ||
+ !strcmp(np->type, "vci") || !strcmp(np->type, "ht");
}
static void of_bus_pci_count_cells(struct device_node *np,
diff --git a/drivers/of/base.c b/drivers/of/base.c
index ff85450d5683..89e888a78899 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -342,27 +342,72 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
}
EXPORT_SYMBOL(of_get_cpu_node);
-/** Checks if the given "compat" string matches one of the strings in
- * the device's "compatible" property
+/**
+ * __of_device_is_compatible() - Check if the node matches given constraints
+ * @device: pointer to node
+ * @compat: required compatible string, NULL or "" for any match
+ * @type: required device_type value, NULL or "" for any match
+ * @name: required node name, NULL or "" for any match
+ *
+ * Checks if the given @compat, @type and @name strings match the
+ * properties of the given @device. A constraints can be skipped by
+ * passing NULL or an empty string as the constraint.
+ *
+ * Returns 0 for no match, and a positive integer on match. The return
+ * value is a relative score with larger values indicating better
+ * matches. The score is weighted for the most specific compatible value
+ * to get the highest score. Matching type is next, followed by matching
+ * name. Practically speaking, this results in the following priority
+ * order for matches:
+ *
+ * 1. specific compatible && type && name
+ * 2. specific compatible && type
+ * 3. specific compatible && name
+ * 4. specific compatible
+ * 5. general compatible && type && name
+ * 6. general compatible && type
+ * 7. general compatible && name
+ * 8. general compatible
+ * 9. type && name
+ * 10. type
+ * 11. name
*/
static int __of_device_is_compatible(const struct device_node *device,
- const char *compat)
+ const char *compat, const char *type, const char *name)
{
- const char* cp;
- int cplen, l;
+ struct property *prop;
+ const char *cp;
+ int index = 0, score = 0;
+
+ /* Compatible match has highest priority */
+ if (compat && compat[0]) {
+ prop = __of_find_property(device, "compatible", NULL);
+ for (cp = of_prop_next_string(prop, NULL); cp;
+ cp = of_prop_next_string(prop, cp), index++) {
+ if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
+ score = INT_MAX/2 - (index << 2);
+ break;
+ }
+ }
+ if (!score)
+ return 0;
+ }
- cp = __of_get_property(device, "compatible", &cplen);
- if (cp == NULL)
- return 0;
- while (cplen > 0) {
- if (of_compat_cmp(cp, compat, strlen(compat)) == 0)
- return 1;
- l = strlen(cp) + 1;
- cp += l;
- cplen -= l;
+ /* Matching type is better than matching name */
+ if (type && type[0]) {
+ if (!device->type || of_node_cmp(type, device->type))
+ return 0;
+ score += 2;
}
- return 0;
+ /* Matching name is a bit better than not */
+ if (name && name[0]) {
+ if (!device->name || of_node_cmp(name, device->name))
+ return 0;
+ score++;
+ }
+
+ return score;
}
/** Checks if the given "compat" string matches one of the strings in
@@ -375,7 +420,7 @@ int of_device_is_compatible(const struct device_node *device,
int res;
raw_spin_lock_irqsave(&devtree_lock, flags);
- res = __of_device_is_compatible(device, compat);
+ res = __of_device_is_compatible(device, compat, NULL, NULL);
raw_spin_unlock_irqrestore(&devtree_lock, flags);
return res;
}
@@ -681,10 +726,7 @@ struct device_node *of_find_compatible_node(struct device_node *from,
raw_spin_lock_irqsave(&devtree_lock, flags);
np = from ? from->allnext : of_allnodes;
for (; np; np = np->allnext) {
- if (type
- && !(np->type && (of_node_cmp(np->type, type) == 0)))
- continue;
- if (__of_device_is_compatible(np, compatible) &&
+ if (__of_device_is_compatible(np, compatible, type, NULL) &&
of_node_get(np))
break;
}
@@ -734,43 +776,22 @@ static
const struct of_device_id *__of_match_node(const struct of_device_id *matches,
const struct device_node *node)
{
- const char *cp;
- int cplen, l;
+ const struct of_device_id *best_match = NULL;
+ int score, best_score = 0;
if (!matches)
return NULL;
- cp = __of_get_property(node, "compatible", &cplen);
- do {
- const struct of_device_id *m = matches;
-
- /* Check against matches with current compatible string */
- while (m->name[0] || m->type[0] || m->compatible[0]) {
- int match = 1;
- if (m->name[0])
- match &= node->name
- && !strcmp(m->name, node->name);
- if (m->type[0])
- match &= node->type
- && !strcmp(m->type, node->type);
- if (m->compatible[0])
- match &= cp
- && !of_compat_cmp(m->compatible, cp,
- strlen(m->compatible));
- if (match)
- return m;
- m++;
- }
-
- /* Get node's next compatible string */
- if (cp) {
- l = strlen(cp) + 1;
- cp += l;
- cplen -= l;
+ for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
+ score = __of_device_is_compatible(node, matches->compatible,
+ matches->type, matches->name);
+ if (score > best_score) {
+ best_match = matches;
+ best_score = score;
}
- } while (cp && (cplen > 0));
+ }
- return NULL;
+ return best_match;
}
/**
@@ -778,10 +799,7 @@ const struct of_device_id *__of_match_node(const struct of_device_id *matches,
* @matches: array of of device match structures to search in
* @node: the of device structure to match against
*
- * Low level utility function used by device matching. Matching order
- * is to compare each of the node's compatibles with all given matches
- * first. This implies node's compatible is sorted from specific to
- * generic while matches can be in any order.
+ * Low level utility function used by device matching.
*/
const struct of_device_id *of_match_node(const struct of_device_id *matches,
const struct device_node *node)
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 875b7b6f0d2a..5b3c24f3cde5 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -24,7 +24,11 @@ MODULE_LICENSE("GPL");
static void of_set_phy_supported(struct phy_device *phydev, u32 max_speed)
{
- phydev->supported |= PHY_DEFAULT_FEATURES;
+ /* The default values for phydev->supported are provided by the PHY
+ * driver "features" member, we want to reset to sane defaults fist
+ * before supporting higher speeds.
+ */
+ phydev->supported &= PHY_DEFAULT_FEATURES;
switch (max_speed) {
default:
@@ -44,7 +48,7 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
{
struct phy_device *phy;
bool is_c45;
- int rc, prev_irq;
+ int rc;
u32 max_speed = 0;
is_c45 = of_device_is_compatible(child,
@@ -54,12 +58,14 @@ static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *chi
if (!phy || IS_ERR(phy))
return 1;
- if (mdio->irq) {
- prev_irq = mdio->irq[addr];
- mdio->irq[addr] =
- irq_of_parse_and_map(child, 0);
- if (!mdio->irq[addr])
- mdio->irq[addr] = prev_irq;
+ rc = irq_of_parse_and_map(child, 0);
+ if (rc > 0) {
+ phy->irq = rc;
+ if (mdio->irq)
+ mdio->irq[addr] = rc;
+ } else {
+ if (mdio->irq)
+ phy->irq = mdio->irq[addr];
}
/* Associate the OF node with the device structure so it
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
index e21012bde639..6643d1920985 100644
--- a/drivers/of/selftest.c
+++ b/drivers/of/selftest.c
@@ -300,6 +300,72 @@ static void __init of_selftest_parse_interrupts_extended(void)
of_node_put(np);
}
+static struct of_device_id match_node_table[] = {
+ { .data = "A", .name = "name0", }, /* Name alone is lowest priority */
+ { .data = "B", .type = "type1", }, /* followed by type alone */
+
+ { .data = "Ca", .name = "name2", .type = "type1", }, /* followed by both together */
+ { .data = "Cb", .name = "name2", }, /* Only match when type doesn't match */
+ { .data = "Cc", .name = "name2", .type = "type2", },
+
+ { .data = "E", .compatible = "compat3" },
+ { .data = "G", .compatible = "compat2", },
+ { .data = "H", .compatible = "compat2", .name = "name5", },
+ { .data = "I", .compatible = "compat2", .type = "type1", },
+ { .data = "J", .compatible = "compat2", .type = "type1", .name = "name8", },
+ { .data = "K", .compatible = "compat2", .name = "name9", },
+ {}
+};
+
+static struct {
+ const char *path;
+ const char *data;
+} match_node_tests[] = {
+ { .path = "/testcase-data/match-node/name0", .data = "A", },
+ { .path = "/testcase-data/match-node/name1", .data = "B", },
+ { .path = "/testcase-data/match-node/a/name2", .data = "Ca", },
+ { .path = "/testcase-data/match-node/b/name2", .data = "Cb", },
+ { .path = "/testcase-data/match-node/c/name2", .data = "Cc", },
+ { .path = "/testcase-data/match-node/name3", .data = "E", },
+ { .path = "/testcase-data/match-node/name4", .data = "G", },
+ { .path = "/testcase-data/match-node/name5", .data = "H", },
+ { .path = "/testcase-data/match-node/name6", .data = "G", },
+ { .path = "/testcase-data/match-node/name7", .data = "I", },
+ { .path = "/testcase-data/match-node/name8", .data = "J", },
+ { .path = "/testcase-data/match-node/name9", .data = "K", },
+};
+
+static void __init of_selftest_match_node(void)
+{
+ struct device_node *np;
+ const struct of_device_id *match;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(match_node_tests); i++) {
+ np = of_find_node_by_path(match_node_tests[i].path);
+ if (!np) {
+ selftest(0, "missing testcase node %s\n",
+ match_node_tests[i].path);
+ continue;
+ }
+
+ match = of_match_node(match_node_table, np);
+ if (!match) {
+ selftest(0, "%s didn't match anything\n",
+ match_node_tests[i].path);
+ continue;
+ }
+
+ if (strcmp(match->data, match_node_tests[i].data) != 0) {
+ selftest(0, "%s got wrong match. expected %s, got %s\n",
+ match_node_tests[i].path, match_node_tests[i].data,
+ (const char *)match->data);
+ continue;
+ }
+ selftest(1, "passed");
+ }
+}
+
static int __init of_selftest(void)
{
struct device_node *np;
@@ -316,6 +382,7 @@ static int __init of_selftest(void)
of_selftest_property_match_string();
of_selftest_parse_interrupts();
of_selftest_parse_interrupts_extended();
+ of_selftest_match_node();
pr_info("end of selftest - %i passed, %i failed\n",
selftest_results.passed, selftest_results.failed);
return 0;
diff --git a/drivers/of/testcase-data/testcases.dtsi b/drivers/of/testcase-data/testcases.dtsi
new file mode 100644
index 000000000000..3a5b75a8e4d7
--- /dev/null
+++ b/drivers/of/testcase-data/testcases.dtsi
@@ -0,0 +1,3 @@
+#include "tests-phandle.dtsi"
+#include "tests-interrupts.dtsi"
+#include "tests-match.dtsi"
diff --git a/arch/arm/boot/dts/testcases/tests-interrupts.dtsi b/drivers/of/testcase-data/tests-interrupts.dtsi
index c843720bd3e5..c843720bd3e5 100644
--- a/arch/arm/boot/dts/testcases/tests-interrupts.dtsi
+++ b/drivers/of/testcase-data/tests-interrupts.dtsi
diff --git a/drivers/of/testcase-data/tests-match.dtsi b/drivers/of/testcase-data/tests-match.dtsi
new file mode 100644
index 000000000000..c9e541129534
--- /dev/null
+++ b/drivers/of/testcase-data/tests-match.dtsi
@@ -0,0 +1,19 @@
+
+/ {
+ testcase-data {
+ match-node {
+ name0 { };
+ name1 { device_type = "type1"; };
+ a { name2 { device_type = "type1"; }; };
+ b { name2 { }; };
+ c { name2 { device_type = "type2"; }; };
+ name3 { compatible = "compat3"; };
+ name4 { compatible = "compat2", "compat3"; };
+ name5 { compatible = "compat2", "compat3"; };
+ name6 { compatible = "compat1", "compat2", "compat3"; };
+ name7 { compatible = "compat2"; device_type = "type1"; };
+ name8 { compatible = "compat2"; device_type = "type1"; };
+ name9 { compatible = "compat2"; };
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/testcases/tests-phandle.dtsi b/drivers/of/testcase-data/tests-phandle.dtsi
index 0007d3cd7dc2..0007d3cd7dc2 100644
--- a/arch/arm/boot/dts/testcases/tests-phandle.dtsi
+++ b/drivers/of/testcase-data/tests-phandle.dtsi
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 00660cc502c5..38901665c770 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -162,8 +162,6 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res,
avail = *r;
pci_clip_resource_to_region(bus, &avail, region);
- if (!resource_size(&avail))
- continue;
/*
* "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index 13478ecd4113..0e79665afd44 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -60,14 +60,6 @@
#define PCIE_DEBUG_CTRL 0x1a60
#define PCIE_DEBUG_SOFT_RESET BIT(20)
-/*
- * This product ID is registered by Marvell, and used when the Marvell
- * SoC is not the root complex, but an endpoint on the PCIe bus. It is
- * therefore safe to re-use this PCI ID for our emulated PCI-to-PCI
- * bridge.
- */
-#define MARVELL_EMULATED_PCI_PCI_BRIDGE_ID 0x7846
-
/* PCI configuration space of a PCI-to-PCI bridge */
struct mvebu_sw_pci_bridge {
u16 vendor;
@@ -388,7 +380,8 @@ static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port)
bridge->class = PCI_CLASS_BRIDGE_PCI;
bridge->vendor = PCI_VENDOR_ID_MARVELL;
- bridge->device = MARVELL_EMULATED_PCI_PCI_BRIDGE_ID;
+ bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
+ bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
bridge->header_type = PCI_HEADER_TYPE_BRIDGE;
bridge->cache_line_size = 0x10;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index cd929aed3613..7c7a388c85ab 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -210,10 +210,29 @@ static void post_dock_fixups(acpi_handle not_used, u32 event, void *data)
}
}
+static void dock_event(acpi_handle handle, u32 type, void *data)
+{
+ struct acpiphp_context *context;
+
+ mutex_lock(&acpiphp_context_lock);
+ context = acpiphp_get_context(handle);
+ if (!context || WARN_ON(context->handle != handle)
+ || context->func.parent->is_going_away) {
+ mutex_unlock(&acpiphp_context_lock);
+ return;
+ }
+ get_bridge(context->func.parent);
+ acpiphp_put_context(context);
+ mutex_unlock(&acpiphp_context_lock);
+
+ hotplug_event(handle, type, data);
+
+ put_bridge(context->func.parent);
+}
static const struct acpi_dock_ops acpiphp_dock_ops = {
.fixup = post_dock_fixups,
- .handler = hotplug_event,
+ .handler = dock_event,
};
/* Check whether the PCI device is managed by native PCIe hotplug driver */
@@ -441,7 +460,9 @@ static void cleanup_bridge(struct acpiphp_bridge *bridge)
list_del(&bridge->list);
mutex_unlock(&bridge_mutex);
+ mutex_lock(&acpiphp_context_lock);
bridge->is_going_away = true;
+ mutex_unlock(&acpiphp_context_lock);
}
/**
@@ -709,6 +730,17 @@ static unsigned int get_slot_status(struct acpiphp_slot *slot)
return (unsigned int)sta;
}
+static inline bool device_status_valid(unsigned int sta)
+{
+ /*
+ * ACPI spec says that _STA may return bit 0 clear with bit 3 set
+ * if the device is valid but does not require a device driver to be
+ * loaded (Section 6.3.7 of ACPI 5.0A).
+ */
+ unsigned int mask = ACPI_STA_DEVICE_ENABLED | ACPI_STA_DEVICE_FUNCTIONING;
+ return (sta & mask) == mask;
+}
+
/**
* trim_stale_devices - remove PCI devices that are not responding.
* @dev: PCI device to start walking the hierarchy from.
@@ -724,7 +756,7 @@ static void trim_stale_devices(struct pci_dev *dev)
unsigned long long sta;
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
- alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL)
+ alive = (ACPI_SUCCESS(status) && device_status_valid(sta))
|| acpiphp_no_hotplug(handle);
}
if (!alive) {
@@ -742,7 +774,7 @@ static void trim_stale_devices(struct pci_dev *dev)
/* The device is a bridge. so check the bus below it. */
pm_runtime_get_sync(&dev->dev);
- list_for_each_entry_safe(child, tmp, &bus->devices, bus_list)
+ list_for_each_entry_safe_reverse(child, tmp, &bus->devices, bus_list)
trim_stale_devices(child);
pm_runtime_put(&dev->dev);
@@ -771,10 +803,10 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
mutex_lock(&slot->crit_sect);
if (slot_no_hotplug(slot)) {
; /* do nothing */
- } else if (get_slot_status(slot) == ACPI_STA_ALL) {
+ } else if (device_status_valid(get_slot_status(slot))) {
/* remove stale devices if any */
- list_for_each_entry_safe(dev, tmp, &bus->devices,
- bus_list)
+ list_for_each_entry_safe_reverse(dev, tmp,
+ &bus->devices, bus_list)
if (PCI_SLOT(dev->devfn) == slot->device)
trim_stale_devices(dev);
@@ -805,7 +837,7 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
int i;
unsigned long type_mask = IORESOURCE_IO | IORESOURCE_MEM;
- list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
+ list_for_each_entry_safe_reverse(dev, tmp, &bus->devices, bus_list) {
for (i=0; i<PCI_BRIDGE_RESOURCES; i++) {
struct resource *res = &dev->resource[i];
if ((res->flags & type_mask) && !res->start &&
@@ -829,7 +861,11 @@ void acpiphp_check_host_bridge(acpi_handle handle)
bridge = acpiphp_handle_to_bridge(handle);
if (bridge) {
+ pci_lock_rescan_remove();
+
acpiphp_check_bridge(bridge);
+
+ pci_unlock_rescan_remove();
put_bridge(bridge);
}
}
@@ -852,6 +888,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
mutex_unlock(&acpiphp_context_lock);
+ pci_lock_rescan_remove();
acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
switch (type) {
@@ -905,6 +942,7 @@ static void hotplug_event(acpi_handle handle, u32 type, void *data)
break;
}
+ pci_unlock_rescan_remove();
if (bridge)
put_bridge(bridge);
}
@@ -915,11 +953,9 @@ static void hotplug_event_work(void *data, u32 type)
acpi_handle handle = context->handle;
acpi_scan_lock_acquire();
- pci_lock_rescan_remove();
hotplug_event(handle, type, context);
- pci_unlock_rescan_remove();
acpi_scan_lock_release();
acpi_evaluate_hotplug_ost(handle, type, ACPI_OST_SC_SUCCESS, NULL);
put_bridge(context->func.parent);
@@ -937,6 +973,7 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
{
struct acpiphp_context *context;
u32 ost_code = ACPI_OST_SC_SUCCESS;
+ acpi_status status;
switch (type) {
case ACPI_NOTIFY_BUS_CHECK:
@@ -972,13 +1009,20 @@ static void handle_hotplug_event(acpi_handle handle, u32 type, void *data)
mutex_lock(&acpiphp_context_lock);
context = acpiphp_get_context(handle);
- if (context && !WARN_ON(context->handle != handle)) {
- get_bridge(context->func.parent);
- acpiphp_put_context(context);
- acpi_hotplug_execute(hotplug_event_work, context, type);
+ if (!context || WARN_ON(context->handle != handle)
+ || context->func.parent->is_going_away)
+ goto err_out;
+
+ get_bridge(context->func.parent);
+ acpiphp_put_context(context);
+ status = acpi_hotplug_execute(hotplug_event_work, context, type);
+ if (ACPI_SUCCESS(status)) {
mutex_unlock(&acpiphp_context_lock);
return;
}
+ put_bridge(context->func.parent);
+
+ err_out:
mutex_unlock(&acpiphp_context_lock);
ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 7a0fec6ce571..955ab7990c5b 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -545,9 +545,15 @@ static int populate_msi_sysfs(struct pci_dev *pdev)
return -ENOMEM;
list_for_each_entry(entry, &pdev->msi_list, list) {
char *name = kmalloc(20, GFP_KERNEL);
+ if (!name)
+ goto error_attrs;
+
msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL);
- if (!msi_dev_attr)
+ if (!msi_dev_attr) {
+ kfree(name);
goto error_attrs;
+ }
+
sprintf(name, "%d", entry->irq);
sysfs_attr_init(&msi_dev_attr->attr);
msi_dev_attr->attr.name = name;
@@ -589,6 +595,7 @@ error_attrs:
++count;
msi_attr = msi_attrs[count];
}
+ kfree(msi_attrs);
return ret;
}
@@ -959,7 +966,6 @@ EXPORT_SYMBOL(pci_disable_msi);
/**
* pci_msix_vec_count - return the number of device's MSI-X table entries
* @dev: pointer to the pci_dev data structure of MSI-X device function
-
* This function returns the number of device's MSI-X table entries and
* therefore the number of MSI-X vectors device is capable of sending.
* It returns a negative errno if the device is not capable of sending MSI-X
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 1febe90831b4..fdbc294821e6 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1181,6 +1181,8 @@ EXPORT_SYMBOL_GPL(pci_load_and_free_saved_state);
static int do_pci_enable_device(struct pci_dev *dev, int bars)
{
int err;
+ u16 cmd;
+ u8 pin;
err = pci_set_power_state(dev, PCI_D0);
if (err < 0 && err != -EIO)
@@ -1190,6 +1192,17 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars)
return err;
pci_fixup_device(pci_fixup_enable, dev);
+ if (dev->msi_enabled || dev->msix_enabled)
+ return 0;
+
+ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
+ if (pin) {
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (cmd & PCI_COMMAND_INTX_DISABLE)
+ pci_write_config_word(dev, PCI_COMMAND,
+ cmd & ~PCI_COMMAND_INTX_DISABLE);
+ }
+
return 0;
}
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index afa2354f6600..c7a551c2d5f1 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -5,7 +5,7 @@
menu "PHY Subsystem"
config GENERIC_PHY
- tristate "PHY Core"
+ bool "PHY Core"
help
Generic PHY support.
@@ -61,6 +61,7 @@ config PHY_EXYNOS_DP_VIDEO
config BCM_KONA_USB2_PHY
tristate "Broadcom Kona USB2 PHY Driver"
depends on GENERIC_PHY
+ depends on HAS_IOMEM
help
Enable this to support the Broadcom Kona USB 2.0 PHY.
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 645c867c1257..6c738376daff 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -162,6 +162,9 @@ int phy_init(struct phy *phy)
{
int ret;
+ if (!phy)
+ return 0;
+
ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP)
return ret;
@@ -173,6 +176,8 @@ int phy_init(struct phy *phy)
dev_err(&phy->dev, "phy init failed --> %d\n", ret);
goto out;
}
+ } else {
+ ret = 0; /* Override possible ret == -ENOTSUPP */
}
++phy->init_count;
@@ -187,6 +192,9 @@ int phy_exit(struct phy *phy)
{
int ret;
+ if (!phy)
+ return 0;
+
ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP)
return ret;
@@ -212,6 +220,9 @@ int phy_power_on(struct phy *phy)
{
int ret;
+ if (!phy)
+ return 0;
+
ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP)
return ret;
@@ -223,6 +234,8 @@ int phy_power_on(struct phy *phy)
dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
goto out;
}
+ } else {
+ ret = 0; /* Override possible ret == -ENOTSUPP */
}
++phy->power_count;
mutex_unlock(&phy->mutex);
@@ -240,6 +253,9 @@ int phy_power_off(struct phy *phy)
{
int ret;
+ if (!phy)
+ return 0;
+
mutex_lock(&phy->mutex);
if (phy->power_count == 1 && phy->ops->power_off) {
ret = phy->ops->power_off(phy);
@@ -308,7 +324,7 @@ err0:
*/
void phy_put(struct phy *phy)
{
- if (IS_ERR(phy))
+ if (!phy || IS_ERR(phy))
return;
module_put(phy->ops->owner);
@@ -328,6 +344,9 @@ void devm_phy_put(struct device *dev, struct phy *phy)
{
int r;
+ if (!phy)
+ return;
+
r = devres_destroy(dev, devm_phy_release, devm_phy_match, phy);
dev_WARN_ONCE(dev, r, "couldn't find PHY resource\n");
}
@@ -389,17 +408,11 @@ struct phy *phy_get(struct device *dev, const char *string)
index = of_property_match_string(dev->of_node, "phy-names",
string);
phy = of_phy_get(dev, index);
- if (IS_ERR(phy)) {
- dev_err(dev, "unable to find phy\n");
- return phy;
- }
} else {
phy = phy_lookup(dev, string);
- if (IS_ERR(phy)) {
- dev_err(dev, "unable to find phy\n");
- return phy;
- }
}
+ if (IS_ERR(phy))
+ return phy;
if (!try_module_get(phy->ops->owner))
return ERR_PTR(-EPROBE_DEFER);
@@ -411,6 +424,27 @@ struct phy *phy_get(struct device *dev, const char *string)
EXPORT_SYMBOL_GPL(phy_get);
/**
+ * phy_optional_get() - lookup and obtain a reference to an optional phy.
+ * @dev: device that requests this phy
+ * @string: the phy name as given in the dt data or the name of the controller
+ * port for non-dt case
+ *
+ * Returns the phy driver, after getting a refcount to it; or
+ * NULL if there is no such phy. The caller is responsible for
+ * calling phy_put() to release that count.
+ */
+struct phy *phy_optional_get(struct device *dev, const char *string)
+{
+ struct phy *phy = phy_get(dev, string);
+
+ if (PTR_ERR(phy) == -ENODEV)
+ phy = NULL;
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(phy_optional_get);
+
+/**
* devm_phy_get() - lookup and obtain a reference to a phy.
* @dev: device that requests this phy
* @string: the phy name as given in the dt data or phy device name
@@ -441,6 +475,30 @@ struct phy *devm_phy_get(struct device *dev, const char *string)
EXPORT_SYMBOL_GPL(devm_phy_get);
/**
+ * devm_phy_optional_get() - lookup and obtain a reference to an optional phy.
+ * @dev: device that requests this phy
+ * @string: the phy name as given in the dt data or phy device name
+ * for non-dt case
+ *
+ * Gets the phy using phy_get(), and associates a device with it using
+ * devres. On driver detach, release function is invoked on the devres
+ * data, then, devres data is freed. This differs to devm_phy_get() in
+ * that if the phy does not exist, it is not considered an error and
+ * -ENODEV will not be returned. Instead the NULL phy is returned,
+ * which can be passed to all other phy consumer calls.
+ */
+struct phy *devm_phy_optional_get(struct device *dev, const char *string)
+{
+ struct phy *phy = devm_phy_get(dev, string);
+
+ if (PTR_ERR(phy) == -ENODEV)
+ phy = NULL;
+
+ return phy;
+}
+EXPORT_SYMBOL_GPL(devm_phy_optional_get);
+
+/**
* phy_create() - create a new phy
* @dev: device that is creating the new phy
* @ops: function pointers for performing phy operations
diff --git a/drivers/phy/phy-exynos-dp-video.c b/drivers/phy/phy-exynos-dp-video.c
index 1dbe6ce7b2ce..0786fef842e7 100644
--- a/drivers/phy/phy-exynos-dp-video.c
+++ b/drivers/phy/phy-exynos-dp-video.c
@@ -76,10 +76,6 @@ static int exynos_dp_video_phy_probe(struct platform_device *pdev)
if (IS_ERR(state->regs))
return PTR_ERR(state->regs);
- phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
phy = devm_phy_create(dev, &exynos_dp_video_phy_ops, NULL);
if (IS_ERR(phy)) {
dev_err(dev, "failed to create Display Port PHY\n");
@@ -87,6 +83,10 @@ static int exynos_dp_video_phy_probe(struct platform_device *pdev)
}
phy_set_drvdata(phy, state);
+ phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
return 0;
}
diff --git a/drivers/phy/phy-exynos-mipi-video.c b/drivers/phy/phy-exynos-mipi-video.c
index 0c5efab11af1..7f139326a642 100644
--- a/drivers/phy/phy-exynos-mipi-video.c
+++ b/drivers/phy/phy-exynos-mipi-video.c
@@ -134,11 +134,6 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
dev_set_drvdata(dev, state);
spin_lock_init(&state->slock);
- phy_provider = devm_of_phy_provider_register(dev,
- exynos_mipi_video_phy_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
for (i = 0; i < EXYNOS_MIPI_PHYS_NUM; i++) {
struct phy *phy = devm_phy_create(dev,
&exynos_mipi_video_phy_ops, NULL);
@@ -152,6 +147,11 @@ static int exynos_mipi_video_phy_probe(struct platform_device *pdev)
phy_set_drvdata(phy, &state->phys[i]);
}
+ phy_provider = devm_of_phy_provider_register(dev,
+ exynos_mipi_video_phy_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
return 0;
}
diff --git a/drivers/phy/phy-mvebu-sata.c b/drivers/phy/phy-mvebu-sata.c
index d43786f62437..d70ecd6a1b3f 100644
--- a/drivers/phy/phy-mvebu-sata.c
+++ b/drivers/phy/phy-mvebu-sata.c
@@ -99,17 +99,17 @@ static int phy_mvebu_sata_probe(struct platform_device *pdev)
if (IS_ERR(priv->clk))
return PTR_ERR(priv->clk);
- phy_provider = devm_of_phy_provider_register(&pdev->dev,
- of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
phy = devm_phy_create(&pdev->dev, &phy_mvebu_sata_ops, NULL);
if (IS_ERR(phy))
return PTR_ERR(phy);
phy_set_drvdata(phy, priv);
+ phy_provider = devm_of_phy_provider_register(&pdev->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
/* The boot loader may of left it on. Turn it off. */
phy_mvebu_sata_power_off(phy);
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index bfc5c337f99a..7699752fba11 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -177,11 +177,6 @@ static int omap_usb2_probe(struct platform_device *pdev)
phy->phy.otg = otg;
phy->phy.type = USB_PHY_TYPE_USB2;
- phy_provider = devm_of_phy_provider_register(phy->dev,
- of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
control_node = of_parse_phandle(node, "ctrl-module", 0);
if (!control_node) {
dev_err(&pdev->dev, "Failed to get control device phandle\n");
@@ -214,6 +209,11 @@ static int omap_usb2_probe(struct platform_device *pdev)
phy_set_drvdata(generic_phy, phy);
+ phy_provider = devm_of_phy_provider_register(phy->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
if (IS_ERR(phy->wkupclk)) {
dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index daf65e68aaab..c3ace1db8136 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -695,11 +695,6 @@ static int twl4030_usb_probe(struct platform_device *pdev)
otg->set_host = twl4030_set_host;
otg->set_peripheral = twl4030_set_peripheral;
- phy_provider = devm_of_phy_provider_register(twl->dev,
- of_phy_simple_xlate);
- if (IS_ERR(phy_provider))
- return PTR_ERR(phy_provider);
-
phy = devm_phy_create(twl->dev, &ops, init_data);
if (IS_ERR(phy)) {
dev_dbg(&pdev->dev, "Failed to create PHY\n");
@@ -708,6 +703,11 @@ static int twl4030_usb_probe(struct platform_device *pdev)
phy_set_drvdata(phy, twl);
+ phy_provider = devm_of_phy_provider_register(twl->dev,
+ of_phy_simple_xlate);
+ if (IS_ERR(phy_provider))
+ return PTR_ERR(phy_provider);
+
/* init spinlock for workqueue */
spin_lock_init(&twl->lock);
diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig
index be361b7cd30f..1e4e69384baa 100644
--- a/drivers/pinctrl/Kconfig
+++ b/drivers/pinctrl/Kconfig
@@ -217,7 +217,7 @@ config PINCTRL_IMX28
select PINCTRL_MXS
config PINCTRL_MSM
- tristate
+ bool
select PINMUX
select PINCONF
select GENERIC_PINCONF
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 5ee61a470016..c0fe6091566a 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -851,7 +851,9 @@ static struct pinctrl *create_pinctrl(struct device *dev)
kref_init(&p->users);
/* Add the pinctrl handle to the global list */
+ mutex_lock(&pinctrl_list_mutex);
list_add_tail(&p->node, &pinctrl_list);
+ mutex_unlock(&pinctrl_list_mutex);
return p;
}
@@ -1642,8 +1644,10 @@ static void pinctrl_init_device_debugfs(struct pinctrl_dev *pctldev)
device_root, pctldev, &pinctrl_groups_ops);
debugfs_create_file("gpio-ranges", S_IFREG | S_IRUGO,
device_root, pctldev, &pinctrl_gpioranges_ops);
- pinmux_init_device_debugfs(device_root, pctldev);
- pinconf_init_device_debugfs(device_root, pctldev);
+ if (pctldev->desc->pmxops)
+ pinmux_init_device_debugfs(device_root, pctldev);
+ if (pctldev->desc->confops)
+ pinconf_init_device_debugfs(device_root, pctldev);
}
static void pinctrl_remove_device_debugfs(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index 38c6f8b9790e..d990e33d8aa7 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -1286,22 +1286,22 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
switch (type) {
case IRQ_TYPE_EDGE_RISING:
- irq_set_handler(d->irq, handle_simple_irq);
+ __irq_set_handler_locked(d->irq, handle_simple_irq);
writel_relaxed(mask, pio + PIO_ESR);
writel_relaxed(mask, pio + PIO_REHLSR);
break;
case IRQ_TYPE_EDGE_FALLING:
- irq_set_handler(d->irq, handle_simple_irq);
+ __irq_set_handler_locked(d->irq, handle_simple_irq);
writel_relaxed(mask, pio + PIO_ESR);
writel_relaxed(mask, pio + PIO_FELLSR);
break;
case IRQ_TYPE_LEVEL_LOW:
- irq_set_handler(d->irq, handle_level_irq);
+ __irq_set_handler_locked(d->irq, handle_level_irq);
writel_relaxed(mask, pio + PIO_LSR);
writel_relaxed(mask, pio + PIO_FELLSR);
break;
case IRQ_TYPE_LEVEL_HIGH:
- irq_set_handler(d->irq, handle_level_irq);
+ __irq_set_handler_locked(d->irq, handle_level_irq);
writel_relaxed(mask, pio + PIO_LSR);
writel_relaxed(mask, pio + PIO_REHLSR);
break;
@@ -1310,7 +1310,7 @@ static int alt_gpio_irq_type(struct irq_data *d, unsigned type)
* disable additional interrupt modes:
* fall back to default behavior
*/
- irq_set_handler(d->irq, handle_simple_irq);
+ __irq_set_handler_locked(d->irq, handle_simple_irq);
writel_relaxed(mask, pio + PIO_AIMDR);
return 0;
case IRQ_TYPE_NONE:
diff --git a/drivers/pinctrl/pinctrl-capri.c b/drivers/pinctrl/pinctrl-capri.c
index 4669c53f99b0..eb2500212147 100644
--- a/drivers/pinctrl/pinctrl-capri.c
+++ b/drivers/pinctrl/pinctrl-capri.c
@@ -1435,7 +1435,7 @@ int __init capri_pinctrl_probe(struct platform_device *pdev)
}
static struct of_device_id capri_pinctrl_of_match[] = {
- { .compatible = "brcm,capri-pinctrl", },
+ { .compatible = "brcm,bcm11351-pinctrl", },
{ },
};
diff --git a/drivers/pinctrl/pinctrl-imx1-core.c b/drivers/pinctrl/pinctrl-imx1-core.c
index 17aecde1b51d..815384b377b5 100644
--- a/drivers/pinctrl/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/pinctrl-imx1-core.c
@@ -45,7 +45,7 @@ struct imx1_pinctrl {
#define MX1_DDIR 0x00
#define MX1_OCR 0x04
#define MX1_ICONFA 0x0c
-#define MX1_ICONFB 0x10
+#define MX1_ICONFB 0x14
#define MX1_GIUS 0x20
#define MX1_GPR 0x38
#define MX1_PUEN 0x40
@@ -97,13 +97,13 @@ static void imx1_write_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id,
u32 old_val;
u32 new_val;
- dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n",
- reg, offset, value);
-
/* Use the next register if the pin's port pin number is >=16 */
if (pin_id % 32 >= 16)
reg += 0x04;
+ dev_dbg(ipctl->dev, "write: register 0x%p offset %d value 0x%x\n",
+ reg, offset, value);
+
/* Get current state of pins */
old_val = readl(reg);
old_val &= mask;
@@ -139,7 +139,7 @@ static int imx1_read_2bit(struct imx1_pinctrl *ipctl, unsigned int pin_id,
u32 reg_offset)
{
void __iomem *reg = imx1_mem(ipctl, pin_id) + reg_offset;
- int offset = pin_id % 16;
+ int offset = (pin_id % 16) * 2;
/* Use the next register if the pin's port pin number is >=16 */
if (pin_id % 32 >= 16)
diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c
index 9ccf681dad2f..f9fabe9bf47d 100644
--- a/drivers/pinctrl/pinctrl-sunxi.c
+++ b/drivers/pinctrl/pinctrl-sunxi.c
@@ -14,6 +14,7 @@
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/irqdomain.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -584,7 +585,7 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d,
spin_lock_irqsave(&pctl->lock, flags);
regval = readl(pctl->membase + reg);
- regval &= ~IRQ_CFG_IRQ_MASK;
+ regval &= ~(IRQ_CFG_IRQ_MASK << index);
writel(regval | (mode << index), pctl->membase + reg);
spin_unlock_irqrestore(&pctl->lock, flags);
@@ -665,6 +666,7 @@ static struct irq_chip sunxi_pinctrl_irq_chip = {
static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
{
+ struct irq_chip *chip = irq_get_chip(irq);
struct sunxi_pinctrl *pctl = irq_get_handler_data(irq);
const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG);
@@ -674,10 +676,12 @@ static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc)
if (reg) {
int irqoffset;
+ chained_irq_enter(chip, desc);
for_each_set_bit(irqoffset, &reg, SUNXI_IRQ_NUMBER) {
int pin_irq = irq_find_mapping(pctl->domain, irqoffset);
generic_handle_irq(pin_irq);
}
+ chained_irq_exit(chip, desc);
}
}
diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h
index 01c494f8a14f..552b0e97077a 100644
--- a/drivers/pinctrl/pinctrl-sunxi.h
+++ b/drivers/pinctrl/pinctrl-sunxi.h
@@ -511,7 +511,7 @@ static inline u32 sunxi_pull_offset(u16 pin)
static inline u32 sunxi_irq_cfg_reg(u16 irq)
{
- u8 reg = irq / IRQ_CFG_IRQ_PER_REG;
+ u8 reg = irq / IRQ_CFG_IRQ_PER_REG * 0x04;
return reg + IRQ_CFG_REG;
}
@@ -523,7 +523,7 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq)
static inline u32 sunxi_irq_ctrl_reg(u16 irq)
{
- u8 reg = irq / IRQ_CTRL_IRQ_PER_REG;
+ u8 reg = irq / IRQ_CTRL_IRQ_PER_REG * 0x04;
return reg + IRQ_CTRL_REG;
}
@@ -535,7 +535,7 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq)
static inline u32 sunxi_irq_status_reg(u16 irq)
{
- u8 reg = irq / IRQ_STATUS_IRQ_PER_REG;
+ u8 reg = irq / IRQ_STATUS_IRQ_PER_REG * 0x04;
return reg + IRQ_STATUS_REG;
}
diff --git a/drivers/pinctrl/pinctrl-tegra.c b/drivers/pinctrl/pinctrl-tegra.c
index a2e93a2b5ff4..e767355ab0ad 100644
--- a/drivers/pinctrl/pinctrl-tegra.c
+++ b/drivers/pinctrl/pinctrl-tegra.c
@@ -645,7 +645,7 @@ int tegra_pinctrl_probe(struct platform_device *pdev,
GFP_KERNEL);
if (!pmx->regs) {
dev_err(&pdev->dev, "Can't alloc regs pointer\n");
- return -ENODEV;
+ return -ENOMEM;
}
for (i = 0; i < pmx->nbanks; i++) {
diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
index 77d103fe39d9..567d6918d50b 100644
--- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
+++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c
@@ -89,7 +89,8 @@ enum {
/* GPSR6 */
FN_IP13_10, FN_IP13_11, FN_IP13_12, FN_IP13_13, FN_IP13_14,
- FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19, FN_IP13_22, FN_IP13_24_23,
+ FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19,
+ FN_IP13_22, FN_IP13_24_23, FN_SD1_CLK,
FN_IP13_25, FN_IP13_26, FN_IP13_27, FN_IP13_30_28, FN_IP14_1_0,
FN_IP14_2, FN_IP14_3, FN_IP14_4, FN_IP14_5, FN_IP14_6, FN_IP14_7,
FN_IP14_10_8, FN_IP14_13_11, FN_IP14_16_14, FN_IP14_19_17,
@@ -788,6 +789,7 @@ static const u16 pinmux_data[] = {
PINMUX_DATA(USB1_PWEN_MARK, FN_USB1_PWEN),
PINMUX_DATA(USB1_OVC_MARK, FN_USB1_OVC),
PINMUX_DATA(DU0_DOTCLKIN_MARK, FN_DU0_DOTCLKIN),
+ PINMUX_DATA(SD1_CLK_MARK, FN_SD1_CLK),
/* IPSR0 */
PINMUX_IPSR_DATA(IP0_0, D0),
@@ -3825,7 +3827,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = {
GP_6_11_FN, FN_IP13_25,
GP_6_10_FN, FN_IP13_24_23,
GP_6_9_FN, FN_IP13_22,
- 0, 0,
+ GP_6_8_FN, FN_SD1_CLK,
GP_6_7_FN, FN_IP13_21_19,
GP_6_6_FN, FN_IP13_18_16,
GP_6_5_FN, FN_IP13_15,
diff --git a/drivers/pinctrl/sirf/pinctrl-prima2.c b/drivers/pinctrl/sirf/pinctrl-prima2.c
index 37b42651d76a..dde0285544d6 100644
--- a/drivers/pinctrl/sirf/pinctrl-prima2.c
+++ b/drivers/pinctrl/sirf/pinctrl-prima2.c
@@ -413,7 +413,7 @@ static const struct sirfsoc_padmux ac97_padmux = {
.funcval = 0,
};
-static const unsigned ac97_pins[] = { 33, 34, 35, 36 };
+static const unsigned ac97_pins[] = { 43, 44, 45, 46 };
static const struct sirfsoc_muxmask spi1_muxmask[] = {
{
diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c
index a0d6152701cd..617a4916b50f 100644
--- a/drivers/pinctrl/sirf/pinctrl-sirf.c
+++ b/drivers/pinctrl/sirf/pinctrl-sirf.c
@@ -598,7 +598,7 @@ static unsigned int sirfsoc_gpio_irq_startup(struct irq_data *d)
{
struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
- if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq))
+ if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE))
dev_err(bank->chip.gc.dev,
"unable to lock HW IRQ %lu for IRQ\n",
d->hwirq);
@@ -611,7 +611,7 @@ static void sirfsoc_gpio_irq_shutdown(struct irq_data *d)
struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d);
sirfsoc_gpio_irq_mask(d);
- gpio_unlock_as_irq(&bank->chip.gc, d->hwirq);
+ gpio_unlock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE);
}
static struct irq_chip sirfsoc_irq_chip = {
diff --git a/drivers/pinctrl/vt8500/pinctrl-wmt.c b/drivers/pinctrl/vt8500/pinctrl-wmt.c
index b28d1af9c232..9802b67040cc 100644
--- a/drivers/pinctrl/vt8500/pinctrl-wmt.c
+++ b/drivers/pinctrl/vt8500/pinctrl-wmt.c
@@ -276,7 +276,20 @@ static int wmt_pctl_dt_node_to_map_pull(struct wmt_pinctrl_data *data,
if (!configs)
return -ENOMEM;
- configs[0] = pull;
+ switch (pull) {
+ case 0:
+ configs[0] = PIN_CONFIG_BIAS_DISABLE;
+ break;
+ case 1:
+ configs[0] = PIN_CONFIG_BIAS_PULL_DOWN;
+ break;
+ case 2:
+ configs[0] = PIN_CONFIG_BIAS_PULL_UP;
+ break;
+ default:
+ configs[0] = PIN_CONFIG_BIAS_DISABLE;
+ dev_err(data->dev, "invalid pull state %d - disabling\n", pull);
+ }
map->type = PIN_MAP_TYPE_CONFIGS_PIN;
map->data.configs.group_or_pin = data->groups[group];
diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c
index 167f3d00c916..66977ebf13b3 100644
--- a/drivers/pnp/pnpacpi/rsparser.c
+++ b/drivers/pnp/pnpacpi/rsparser.c
@@ -183,9 +183,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
struct resource r = {0};
int i, flags;
- if (acpi_dev_resource_memory(res, &r)
- || acpi_dev_resource_io(res, &r)
- || acpi_dev_resource_address_space(res, &r)
+ if (acpi_dev_resource_address_space(res, &r)
|| acpi_dev_resource_ext_address_space(res, &r)) {
pnp_add_resource(dev, &r);
return AE_OK;
@@ -217,6 +215,17 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res,
}
switch (res->type) {
+ case ACPI_RESOURCE_TYPE_MEMORY24:
+ case ACPI_RESOURCE_TYPE_MEMORY32:
+ case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
+ if (acpi_dev_resource_memory(res, &r))
+ pnp_add_resource(dev, &r);
+ break;
+ case ACPI_RESOURCE_TYPE_IO:
+ case ACPI_RESOURCE_TYPE_FIXED_IO:
+ if (acpi_dev_resource_io(res, &r))
+ pnp_add_resource(dev, &r);
+ break;
case ACPI_RESOURCE_TYPE_DMA:
dma = &res->data.dma;
if (dma->channel_count > 0 && dma->channels[0] != (u8) -1)
diff --git a/drivers/power/ds2782_battery.c b/drivers/power/ds2782_battery.c
index 563174891c90..041f9b638d28 100644
--- a/drivers/power/ds2782_battery.c
+++ b/drivers/power/ds2782_battery.c
@@ -192,7 +192,7 @@ static int ds2786_get_voltage(struct ds278x_info *info, int *voltage_uV)
/*
* Voltage is measured in units of 1.22mV. The voltage is stored as
- * a 10-bit number plus sign, in the upper bits of a 16-bit register
+ * a 12-bit number plus sign, in the upper bits of a 16-bit register
*/
err = ds278x_read_reg16(info, DS278x_REG_VOLT_MSB, &raw);
if (err)
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 80edb7d8cb54..0b4cf9d63291 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -444,8 +444,6 @@ static int isp1704_charger_probe(struct platform_device *pdev)
ret = PTR_ERR(isp->phy);
goto fail0;
}
- if (!isp->phy)
- goto fail0;
isp->dev = &pdev->dev;
platform_set_drvdata(pdev, isp);
diff --git a/drivers/power/max17040_battery.c b/drivers/power/max17040_battery.c
index c7ff6d67f158..0fbac861080d 100644
--- a/drivers/power/max17040_battery.c
+++ b/drivers/power/max17040_battery.c
@@ -148,7 +148,7 @@ static void max17040_get_online(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- if (chip->pdata->battery_online)
+ if (chip->pdata && chip->pdata->battery_online)
chip->online = chip->pdata->battery_online();
else
chip->online = 1;
@@ -158,7 +158,8 @@ static void max17040_get_status(struct i2c_client *client)
{
struct max17040_chip *chip = i2c_get_clientdata(client);
- if (!chip->pdata->charger_online || !chip->pdata->charger_enable) {
+ if (!chip->pdata || !chip->pdata->charger_online
+ || !chip->pdata->charger_enable) {
chip->status = POWER_SUPPLY_STATUS_UNKNOWN;
return;
}
diff --git a/drivers/pwm/pwm-lp3943.c b/drivers/pwm/pwm-lp3943.c
index 8a843a04c224..a40b9c34e9ff 100644
--- a/drivers/pwm/pwm-lp3943.c
+++ b/drivers/pwm/pwm-lp3943.c
@@ -52,8 +52,10 @@ lp3943_pwm_request_map(struct lp3943_pwm *lp3943_pwm, int hwpwm)
offset = pwm_map->output[i];
/* Return an error if the pin is already assigned */
- if (test_and_set_bit(offset, &lp3943->pin_used))
+ if (test_and_set_bit(offset, &lp3943->pin_used)) {
+ kfree(pwm_map);
return ERR_PTR(-EBUSY);
+ }
}
return pwm_map;
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index b4b0d83f9ef6..7061ac0ad428 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -678,6 +678,7 @@ struct tsi721_bdma_chan {
struct list_head free_list;
dma_cookie_t completed_cookie;
struct tasklet_struct tasklet;
+ bool active;
};
#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
index 502663f5f7c6..91245f5dbe81 100644
--- a/drivers/rapidio/devices/tsi721_dma.c
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -206,8 +206,8 @@ void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
{
/* Disable BDMA channel interrupts */
iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
-
- tasklet_schedule(&bdma_chan->tasklet);
+ if (bdma_chan->active)
+ tasklet_schedule(&bdma_chan->tasklet);
}
#ifdef CONFIG_PCI_MSI
@@ -562,7 +562,7 @@ static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
}
#endif /* CONFIG_PCI_MSI */
- tasklet_enable(&bdma_chan->tasklet);
+ bdma_chan->active = true;
tsi721_bdma_interrupt_enable(bdma_chan, 1);
return bdma_chan->bd_num - 1;
@@ -576,9 +576,7 @@ err_out:
static void tsi721_free_chan_resources(struct dma_chan *dchan)
{
struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
-#ifdef CONFIG_PCI_MSI
struct tsi721_device *priv = to_tsi721(dchan->device);
-#endif
LIST_HEAD(list);
dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
@@ -589,14 +587,25 @@ static void tsi721_free_chan_resources(struct dma_chan *dchan)
BUG_ON(!list_empty(&bdma_chan->active_list));
BUG_ON(!list_empty(&bdma_chan->queue));
- tasklet_disable(&bdma_chan->tasklet);
+ tsi721_bdma_interrupt_enable(bdma_chan, 0);
+ bdma_chan->active = false;
+
+#ifdef CONFIG_PCI_MSI
+ if (priv->flags & TSI721_USING_MSIX) {
+ synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
+ bdma_chan->id].vector);
+ synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
+ bdma_chan->id].vector);
+ } else
+#endif
+ synchronize_irq(priv->pdev->irq);
+
+ tasklet_kill(&bdma_chan->tasklet);
spin_lock_bh(&bdma_chan->lock);
list_splice_init(&bdma_chan->free_list, &list);
spin_unlock_bh(&bdma_chan->lock);
- tsi721_bdma_interrupt_enable(bdma_chan, 0);
-
#ifdef CONFIG_PCI_MSI
if (priv->flags & TSI721_USING_MSIX) {
free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
@@ -790,6 +799,7 @@ int tsi721_register_dma(struct tsi721_device *priv)
bdma_chan->dchan.cookie = 1;
bdma_chan->dchan.chan_id = i;
bdma_chan->id = i;
+ bdma_chan->active = false;
spin_lock_init(&bdma_chan->lock);
@@ -799,7 +809,6 @@ int tsi721_register_dma(struct tsi721_device *priv)
tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
(unsigned long)bdma_chan);
- tasklet_disable(&bdma_chan->tasklet);
list_add_tail(&bdma_chan->dchan.device_node,
&mport->dma.channels);
}
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c
index 77b46d0b37a6..e10febe9ec34 100644
--- a/drivers/regulator/ab3100.c
+++ b/drivers/regulator/ab3100.c
@@ -498,7 +498,7 @@ static int ab3100_regulator_register(struct platform_device *pdev,
struct ab3100_platform_data *plfdata,
struct regulator_init_data *init_data,
struct device_node *np,
- int id)
+ unsigned long id)
{
struct regulator_desc *desc;
struct ab3100_regulator *reg;
@@ -646,7 +646,7 @@ ab3100_regulator_of_probe(struct platform_device *pdev, struct device_node *np)
err = ab3100_regulator_register(
pdev, NULL, ab3100_regulator_matches[i].init_data,
ab3100_regulator_matches[i].of_node,
- (int) ab3100_regulator_matches[i].driver_data);
+ (unsigned long)ab3100_regulator_matches[i].driver_data);
if (err) {
ab3100_regulators_remove(pdev);
return err;
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index b38a6b669e8c..afca1bc24f26 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -953,6 +953,8 @@ static int machine_constraints_current(struct regulator_dev *rdev,
return 0;
}
+static int _regulator_do_enable(struct regulator_dev *rdev);
+
/**
* set_machine_constraints - sets regulator constraints
* @rdev: regulator source
@@ -1013,10 +1015,9 @@ static int set_machine_constraints(struct regulator_dev *rdev,
/* If the constraints say the regulator should be on at this point
* and we have control then make sure it is enabled.
*/
- if ((rdev->constraints->always_on || rdev->constraints->boot_on) &&
- ops->enable) {
- ret = ops->enable(rdev);
- if (ret < 0) {
+ if (rdev->constraints->always_on || rdev->constraints->boot_on) {
+ ret = _regulator_do_enable(rdev);
+ if (ret < 0 && ret != -EINVAL) {
rdev_err(rdev, "failed to enable\n");
goto out;
}
@@ -1272,6 +1273,8 @@ static struct regulator_dev *regulator_dev_lookup(struct device *dev,
if (r->dev.parent &&
node == r->dev.of_node)
return r;
+ *ret = -EPROBE_DEFER;
+ return NULL;
} else {
/*
* If we couldn't even get the node then it's
@@ -1312,7 +1315,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
struct regulator_dev *rdev;
struct regulator *regulator = ERR_PTR(-EPROBE_DEFER);
const char *devname = NULL;
- int ret = -EPROBE_DEFER;
+ int ret;
if (id == NULL) {
pr_err("get() with no identifier\n");
@@ -1322,6 +1325,11 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
if (dev)
devname = dev_name(dev);
+ if (have_full_constraints())
+ ret = -ENODEV;
+ else
+ ret = -EPROBE_DEFER;
+
mutex_lock(&regulator_list_mutex);
rdev = regulator_dev_lookup(dev, id, &ret);
@@ -1352,7 +1360,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
goto found;
/* Don't log an error when called from regulator_get_optional() */
} else if (!have_full_constraints() || exclusive) {
- dev_err(dev, "dummy supplies not allowed\n");
+ dev_warn(dev, "dummy supplies not allowed\n");
}
mutex_unlock(&regulator_list_mutex);
@@ -1900,8 +1908,6 @@ static int _regulator_do_disable(struct regulator_dev *rdev)
trace_regulator_disable_complete(rdev_get_name(rdev));
- _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
- NULL);
return 0;
}
@@ -1925,6 +1931,8 @@ static int _regulator_disable(struct regulator_dev *rdev)
rdev_err(rdev, "failed to disable\n");
return ret;
}
+ _notifier_call_chain(rdev, REGULATOR_EVENT_DISABLE,
+ NULL);
}
rdev->use_count = 0;
@@ -1977,20 +1985,16 @@ static int _regulator_force_disable(struct regulator_dev *rdev)
{
int ret = 0;
- /* force disable */
- if (rdev->desc->ops->disable) {
- /* ah well, who wants to live forever... */
- ret = rdev->desc->ops->disable(rdev);
- if (ret < 0) {
- rdev_err(rdev, "failed to force disable\n");
- return ret;
- }
- /* notify other consumers that power has been forced off */
- _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
- REGULATOR_EVENT_DISABLE, NULL);
+ ret = _regulator_do_disable(rdev);
+ if (ret < 0) {
+ rdev_err(rdev, "failed to force disable\n");
+ return ret;
}
- return ret;
+ _notifier_call_chain(rdev, REGULATOR_EVENT_FORCE_DISABLE |
+ REGULATOR_EVENT_DISABLE, NULL);
+
+ return 0;
}
/**
@@ -3623,23 +3627,18 @@ int regulator_suspend_finish(void)
mutex_lock(&regulator_list_mutex);
list_for_each_entry(rdev, &regulator_list, list) {
- struct regulator_ops *ops = rdev->desc->ops;
-
mutex_lock(&rdev->mutex);
- if ((rdev->use_count > 0 || rdev->constraints->always_on) &&
- ops->enable) {
- error = ops->enable(rdev);
+ if (rdev->use_count > 0 || rdev->constraints->always_on) {
+ error = _regulator_do_enable(rdev);
if (error)
ret = error;
} else {
if (!have_full_constraints())
goto unlock;
- if (!ops->disable)
- goto unlock;
if (!_regulator_is_enabled(rdev))
goto unlock;
- error = ops->disable(rdev);
+ error = _regulator_do_disable(rdev);
if (error)
ret = error;
}
@@ -3813,7 +3812,7 @@ static int __init regulator_init_complete(void)
ops = rdev->desc->ops;
c = rdev->constraints;
- if (!ops->disable || (c && c->always_on))
+ if (c && c->always_on)
continue;
mutex_lock(&rdev->mutex);
@@ -3834,7 +3833,7 @@ static int __init regulator_init_complete(void)
/* We log since this may kill the system if it
* goes wrong. */
rdev_info(rdev, "disabling\n");
- ret = ops->disable(rdev);
+ ret = _regulator_do_disable(rdev);
if (ret != 0)
rdev_err(rdev, "couldn't disable: %d\n", ret);
} else {
diff --git a/drivers/regulator/da9055-regulator.c b/drivers/regulator/da9055-regulator.c
index 7f340206d329..b14ebdad5dd2 100644
--- a/drivers/regulator/da9055-regulator.c
+++ b/drivers/regulator/da9055-regulator.c
@@ -576,7 +576,9 @@ static int da9055_regulator_probe(struct platform_device *pdev)
/* Only LDO 5 and 6 has got the over current interrupt */
if (pdev->id == DA9055_ID_LDO5 || pdev->id == DA9055_ID_LDO6) {
irq = platform_get_irq_byname(pdev, "REGULATOR");
- irq = regmap_irq_get_virq(da9055->irq_data, irq);
+ if (irq < 0)
+ return irq;
+
ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
da9055_ldo5_6_oc_irq,
IRQF_TRIGGER_HIGH |
diff --git a/drivers/regulator/da9063-regulator.c b/drivers/regulator/da9063-regulator.c
index 56727eb745df..91e99a2c8dc1 100644
--- a/drivers/regulator/da9063-regulator.c
+++ b/drivers/regulator/da9063-regulator.c
@@ -1,3 +1,4 @@
+
/*
* Regulator driver for DA9063 PMIC series
*
@@ -60,7 +61,8 @@ struct da9063_regulator_info {
.desc.ops = &da9063_ldo_ops, \
.desc.min_uV = (min_mV) * 1000, \
.desc.uV_step = (step_mV) * 1000, \
- .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1), \
+ .desc.n_voltages = (((max_mV) - (min_mV))/(step_mV) + 1 \
+ + (DA9063_V##regl_name##_BIAS)), \
.desc.enable_reg = DA9063_REG_##regl_name##_CONT, \
.desc.enable_mask = DA9063_LDO_EN, \
.desc.vsel_reg = DA9063_REG_V##regl_name##_A, \
diff --git a/drivers/regulator/max14577.c b/drivers/regulator/max14577.c
index b1078ba3f393..e0619526708c 100644
--- a/drivers/regulator/max14577.c
+++ b/drivers/regulator/max14577.c
@@ -166,12 +166,14 @@ static int max14577_regulator_dt_parse_pdata(struct platform_device *pdev)
ret = of_regulator_match(&pdev->dev, np, max14577_regulator_matches,
MAX14577_REG_MAX);
- if (ret < 0) {
+ if (ret < 0)
dev_err(&pdev->dev, "Error parsing regulator init data: %d\n", ret);
- return ret;
- }
+ else
+ ret = 0;
- return 0;
+ of_node_put(np);
+
+ return ret;
}
static inline struct regulator_init_data *match_init_data(int index)
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index d9e557990577..cd0b9e35a56d 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -441,6 +441,7 @@ common_reg:
for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
if (!reg_np) {
config.init_data = pdata->regulators[i].initdata;
+ config.of_node = pdata->regulators[i].reg_node;
} else {
config.init_data = rdata[i].init_data;
config.of_node = rdata[i].of_node;
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index d7164bb75d3e..d958dfa05125 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -535,7 +535,7 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
return -ENODEV;
}
- regulators_np = of_find_node_by_name(pmic_np, "regulators");
+ regulators_np = of_get_child_by_name(pmic_np, "regulators");
if (!regulators_np) {
dev_err(iodev->dev, "could not find regulators sub-node\n");
return -EINVAL;
@@ -591,6 +591,8 @@ static int s5m8767_pmic_dt_parse_pdata(struct platform_device *pdev,
rmode++;
}
+ of_node_put(regulators_np);
+
if (of_get_property(pmic_np, "s5m8767,pmic-buck2-uses-gpio-dvs", NULL)) {
pdata->buck2_gpiodvs = true;
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 7afd373b9595..c4cde9c08f1f 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -580,10 +580,12 @@ static int s3c_rtc_suspend(struct device *dev)
clk_enable(rtc_clk);
/* save TICNT for anyone using periodic interrupts */
- ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
ticnt_en_save = readw(s3c_rtc_base + S3C2410_RTCCON);
ticnt_en_save &= S3C64XX_RTCCON_TICEN;
+ ticnt_save = readl(s3c_rtc_base + S3C2410_TICNT);
+ } else {
+ ticnt_save = readb(s3c_rtc_base + S3C2410_TICNT);
}
s3c_rtc_enable(pdev, 0);
@@ -605,10 +607,15 @@ static int s3c_rtc_resume(struct device *dev)
clk_enable(rtc_clk);
s3c_rtc_enable(pdev, 1);
- writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
- if (s3c_rtc_cpu_type == TYPE_S3C64XX && ticnt_en_save) {
- tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
- writew(tmp | ticnt_en_save, s3c_rtc_base + S3C2410_RTCCON);
+ if (s3c_rtc_cpu_type == TYPE_S3C64XX) {
+ writel(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
+ if (ticnt_en_save) {
+ tmp = readw(s3c_rtc_base + S3C2410_RTCCON);
+ writew(tmp | ticnt_en_save,
+ s3c_rtc_base + S3C2410_RTCCON);
+ }
+ } else {
+ writeb(ticnt_save, s3c_rtc_base + S3C2410_TICNT);
}
if (device_may_wakeup(dev) && wake_en) {
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index f6b9188c5af5..9f0ea6cb6922 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -610,6 +610,7 @@ void chsc_chp_online(struct chp_id chpid)
css_wait_for_slow_path();
for_each_subchannel_staged(__s390_process_res_acc, NULL,
&link);
+ css_schedule_reprobe();
}
}
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 88e35d85d205..8ee88c4ebd83 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -342,8 +342,9 @@ static int cio_check_config(struct subchannel *sch, struct schib *schib)
*/
int cio_commit_config(struct subchannel *sch)
{
- struct schib schib;
int ccode, retry, ret = 0;
+ struct schib schib;
+ struct irb irb;
if (stsch_err(sch->schid, &schib) || !css_sch_is_valid(&schib))
return -ENODEV;
@@ -367,7 +368,10 @@ int cio_commit_config(struct subchannel *sch)
ret = -EAGAIN;
break;
case 1: /* status pending */
- return -EBUSY;
+ ret = -EBUSY;
+ if (tsch(sch->schid, &irb))
+ return ret;
+ break;
case 2: /* busy */
udelay(100); /* allow for recovery */
ret = -EBUSY;
@@ -403,7 +407,6 @@ EXPORT_SYMBOL_GPL(cio_update_schib);
*/
int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
{
- int retry;
int ret;
CIO_TRACE_EVENT(2, "ensch");
@@ -418,20 +421,14 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm)
sch->config.isc = sch->isc;
sch->config.intparm = intparm;
- for (retry = 0; retry < 3; retry++) {
+ ret = cio_commit_config(sch);
+ if (ret == -EIO) {
+ /*
+ * Got a program check in msch. Try without
+ * the concurrent sense bit the next time.
+ */
+ sch->config.csense = 0;
ret = cio_commit_config(sch);
- if (ret == -EIO) {
- /*
- * Got a program check in msch. Try without
- * the concurrent sense bit the next time.
- */
- sch->config.csense = 0;
- } else if (ret == -EBUSY) {
- struct irb irb;
- if (tsch(sch->schid, &irb) != 0)
- break;
- } else
- break;
}
CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
@@ -444,7 +441,6 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel);
*/
int cio_disable_subchannel(struct subchannel *sch)
{
- int retry;
int ret;
CIO_TRACE_EVENT(2, "dissch");
@@ -456,16 +452,8 @@ int cio_disable_subchannel(struct subchannel *sch)
return -ENODEV;
sch->config.ena = 0;
+ ret = cio_commit_config(sch);
- for (retry = 0; retry < 3; retry++) {
- ret = cio_commit_config(sch);
- if (ret == -EBUSY) {
- struct irb irb;
- if (tsch(sch->schid, &irb) != 0)
- break;
- } else
- break;
- }
CIO_HEX_EVENT(2, &ret, sizeof(ret));
return ret;
}
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 8acaae18bd11..a563e4c00590 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -359,14 +359,12 @@ static inline int multicast_outbound(struct qdio_q *q)
#define need_siga_sync_out_after_pci(q) \
(unlikely(q->irq_ptr->siga_flag.sync_out_after_pci))
-#define for_each_input_queue(irq_ptr, q, i) \
- for (i = 0, q = irq_ptr->input_qs[0]; \
- i < irq_ptr->nr_input_qs; \
- q = irq_ptr->input_qs[++i])
-#define for_each_output_queue(irq_ptr, q, i) \
- for (i = 0, q = irq_ptr->output_qs[0]; \
- i < irq_ptr->nr_output_qs; \
- q = irq_ptr->output_qs[++i])
+#define for_each_input_queue(irq_ptr, q, i) \
+ for (i = 0; i < irq_ptr->nr_input_qs && \
+ ({ q = irq_ptr->input_qs[i]; 1; }); i++)
+#define for_each_output_queue(irq_ptr, q, i) \
+ for (i = 0; i < irq_ptr->nr_output_qs && \
+ ({ q = irq_ptr->output_qs[i]; 1; }); i++)
#define prev_buf(bufnr) \
((bufnr + QDIO_MAX_BUFFERS_MASK) & QDIO_MAX_BUFFERS_MASK)
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index c883a085c059..77466c4faabb 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -996,7 +996,7 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
}
}
- if (!pci_out_supported(q))
+ if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED))
return;
for_each_output_queue(irq_ptr, q, i) {
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index dc542e0a3055..0bc91e46395a 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -311,7 +311,7 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
} __packed * msg = ap_msg->message;
int rcblen = CEIL4(xcRB->request_control_blk_length);
- int replylen;
+ int replylen, req_sumlen, resp_sumlen;
char *req_data = ap_msg->message + sizeof(struct type6_hdr) + rcblen;
char *function_code;
@@ -321,12 +321,34 @@ static int XCRB_msg_to_type6CPRB_msgX(struct zcrypt_device *zdev,
xcRB->request_data_length;
if (ap_msg->length > MSGTYPE06_MAX_MSG_SIZE)
return -EINVAL;
+
+ /* Overflow check
+ sum must be greater (or equal) than the largest operand */
+ req_sumlen = CEIL4(xcRB->request_control_blk_length) +
+ xcRB->request_data_length;
+ if ((CEIL4(xcRB->request_control_blk_length) <=
+ xcRB->request_data_length) ?
+ (req_sumlen < xcRB->request_data_length) :
+ (req_sumlen < CEIL4(xcRB->request_control_blk_length))) {
+ return -EINVAL;
+ }
+
replylen = sizeof(struct type86_fmt2_msg) +
CEIL4(xcRB->reply_control_blk_length) +
xcRB->reply_data_length;
if (replylen > MSGTYPE06_MAX_MSG_SIZE)
return -EINVAL;
+ /* Overflow check
+ sum must be greater (or equal) than the largest operand */
+ resp_sumlen = CEIL4(xcRB->reply_control_blk_length) +
+ xcRB->reply_data_length;
+ if ((CEIL4(xcRB->reply_control_blk_length) <= xcRB->reply_data_length) ?
+ (resp_sumlen < xcRB->reply_data_length) :
+ (resp_sumlen < CEIL4(xcRB->reply_control_blk_length))) {
+ return -EINVAL;
+ }
+
/* prepare type6 header */
msg->hdr = static_type6_hdrX;
memcpy(msg->hdr.agent_id , &(xcRB->agent_ID), sizeof(xcRB->agent_ID));
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c3a83df07894..795ed61a5496 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1660,7 +1660,6 @@ int qeth_qdio_clear_card(struct qeth_card *card, int use_halt)
QDIO_FLAG_CLEANUP_USING_CLEAR);
if (rc)
QETH_CARD_TEXT_(card, 3, "1err%d", rc);
- qdio_free(CARD_DDEV(card));
atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED);
break;
case QETH_QDIO_CLEANING:
@@ -2605,6 +2604,7 @@ static int qeth_mpc_initialize(struct qeth_card *card)
return 0;
out_qdio:
qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
+ qdio_free(CARD_DDEV(card));
return rc;
}
@@ -4906,9 +4906,11 @@ retry:
if (retries < 3)
QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
dev_name(&card->gdev->dev));
+ rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
+ qdio_free(CARD_DDEV(card));
rc = ccw_device_set_online(CARD_RDEV(card));
if (rc)
goto retriable;
@@ -4918,7 +4920,6 @@ retry:
rc = ccw_device_set_online(CARD_DDEV(card));
if (rc)
goto retriable;
- rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
retriable:
if (rc == -ERESTARTSYS) {
QETH_DBF_TEXT(SETUP, 2, "break1");
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 0710550093ce..908d82529ee9 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1091,6 +1091,7 @@ out_remove:
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
+ qdio_free(CARD_DDEV(card));
if (recover_flag == CARD_STATE_RECOVER)
card->state = CARD_STATE_RECOVER;
else
@@ -1132,6 +1133,7 @@ static int __qeth_l2_set_offline(struct ccwgroup_device *cgdev,
rc = (rc2) ? rc2 : rc3;
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ qdio_free(CARD_DDEV(card));
if (recover_flag == CARD_STATE_UP)
card->state = CARD_STATE_RECOVER;
/* let user_space know that device is offline */
@@ -1194,6 +1196,7 @@ static void qeth_l2_shutdown(struct ccwgroup_device *gdev)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
qeth_clear_qdio_buffers(card);
+ qdio_free(CARD_DDEV(card));
}
static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 0f430424c3b8..3524d34ff694 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3447,6 +3447,7 @@ out_remove:
ccw_device_set_offline(CARD_DDEV(card));
ccw_device_set_offline(CARD_WDEV(card));
ccw_device_set_offline(CARD_RDEV(card));
+ qdio_free(CARD_DDEV(card));
if (recover_flag == CARD_STATE_RECOVER)
card->state = CARD_STATE_RECOVER;
else
@@ -3493,6 +3494,7 @@ static int __qeth_l3_set_offline(struct ccwgroup_device *cgdev,
rc = (rc2) ? rc2 : rc3;
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ qdio_free(CARD_DDEV(card));
if (recover_flag == CARD_STATE_UP)
card->state = CARD_STATE_RECOVER;
/* let user_space know that device is offline */
@@ -3545,6 +3547,7 @@ static void qeth_l3_shutdown(struct ccwgroup_device *gdev)
qeth_hw_trap(card, QETH_DIAGS_TRAP_DISARM);
qeth_qdio_clear_card(card, 0);
qeth_clear_qdio_buffers(card);
+ qdio_free(CARD_DDEV(card));
}
static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev)
diff --git a/drivers/sbus/char/jsflash.c b/drivers/sbus/char/jsflash.c
index 6b4678a7900a..4ccb5d869389 100644
--- a/drivers/sbus/char/jsflash.c
+++ b/drivers/sbus/char/jsflash.c
@@ -507,7 +507,6 @@ static int jsflash_init(void)
}
/* Let us be really paranoid for modifications to probing code. */
- /* extern enum sparc_cpu sparc_cpu_model; */ /* in <asm/system.h> */
if (sparc_cpu_model != sun4m) {
/* We must be on sun4m because we use MMU Bypass ASI. */
return -ENXIO;
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 1f375051483a..5642a9b250c2 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -325,7 +325,7 @@ static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
continue;
- if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
+ if (sc->device->lun != abrt_task->sc->device->lun)
continue;
/* Invalidate WRB Posted for this Task */
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index ed880891cb7c..e9279a8c1e1c 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -594,13 +594,13 @@ static void bnx2fc_free_mp_resc(struct bnx2fc_cmd *io_req)
mp_req->mp_resp_bd = NULL;
}
if (mp_req->req_buf) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
mp_req->req_buf,
mp_req->req_buf_dma);
mp_req->req_buf = NULL;
}
if (mp_req->resp_buf) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
mp_req->resp_buf,
mp_req->resp_buf_dma);
mp_req->resp_buf = NULL;
@@ -622,7 +622,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
mp_req->req_len = sizeof(struct fcp_cmnd);
io_req->data_xfer_len = mp_req->req_len;
- mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ mp_req->req_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&mp_req->req_buf_dma,
GFP_ATOMIC);
if (!mp_req->req_buf) {
@@ -631,7 +631,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
return FAILED;
}
- mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ mp_req->resp_buf = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&mp_req->resp_buf_dma,
GFP_ATOMIC);
if (!mp_req->resp_buf) {
@@ -639,8 +639,8 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
bnx2fc_free_mp_resc(io_req);
return FAILED;
}
- memset(mp_req->req_buf, 0, PAGE_SIZE);
- memset(mp_req->resp_buf, 0, PAGE_SIZE);
+ memset(mp_req->req_buf, 0, CNIC_PAGE_SIZE);
+ memset(mp_req->resp_buf, 0, CNIC_PAGE_SIZE);
/* Allocate and map mp_req_bd and mp_resp_bd */
sz = sizeof(struct fcoe_bd_ctx);
@@ -665,7 +665,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
mp_req_bd = mp_req->mp_req_bd;
mp_req_bd->buf_addr_lo = (u32)addr & 0xffffffff;
mp_req_bd->buf_addr_hi = (u32)((u64)addr >> 32);
- mp_req_bd->buf_len = PAGE_SIZE;
+ mp_req_bd->buf_len = CNIC_PAGE_SIZE;
mp_req_bd->flags = 0;
/*
@@ -677,7 +677,7 @@ int bnx2fc_init_mp_req(struct bnx2fc_cmd *io_req)
addr = mp_req->resp_buf_dma;
mp_resp_bd->buf_addr_lo = (u32)addr & 0xffffffff;
mp_resp_bd->buf_addr_hi = (u32)((u64)addr >> 32);
- mp_resp_bd->buf_len = PAGE_SIZE;
+ mp_resp_bd->buf_len = CNIC_PAGE_SIZE;
mp_resp_bd->flags = 0;
return SUCCESS;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index 4d93177dfb53..d9bae5672273 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -673,7 +673,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map SQ */
tgt->sq_mem_size = tgt->max_sqes * BNX2FC_SQ_WQE_SIZE;
- tgt->sq_mem_size = (tgt->sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ tgt->sq_mem_size = (tgt->sq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->sq = dma_alloc_coherent(&hba->pcidev->dev, tgt->sq_mem_size,
&tgt->sq_dma, GFP_KERNEL);
@@ -686,7 +687,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map CQ */
tgt->cq_mem_size = tgt->max_cqes * BNX2FC_CQ_WQE_SIZE;
- tgt->cq_mem_size = (tgt->cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ tgt->cq_mem_size = (tgt->cq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->cq = dma_alloc_coherent(&hba->pcidev->dev, tgt->cq_mem_size,
&tgt->cq_dma, GFP_KERNEL);
@@ -699,7 +701,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map RQ and RQ PBL */
tgt->rq_mem_size = tgt->max_rqes * BNX2FC_RQ_WQE_SIZE;
- tgt->rq_mem_size = (tgt->rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ tgt->rq_mem_size = (tgt->rq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->rq = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_mem_size,
&tgt->rq_dma, GFP_KERNEL);
@@ -710,8 +713,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
}
memset(tgt->rq, 0, tgt->rq_mem_size);
- tgt->rq_pbl_size = (tgt->rq_mem_size / PAGE_SIZE) * sizeof(void *);
- tgt->rq_pbl_size = (tgt->rq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ tgt->rq_pbl_size = (tgt->rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
+ tgt->rq_pbl_size = (tgt->rq_pbl_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->rq_pbl = dma_alloc_coherent(&hba->pcidev->dev, tgt->rq_pbl_size,
&tgt->rq_pbl_dma, GFP_KERNEL);
@@ -722,7 +726,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
}
memset(tgt->rq_pbl, 0, tgt->rq_pbl_size);
- num_pages = tgt->rq_mem_size / PAGE_SIZE;
+ num_pages = tgt->rq_mem_size / CNIC_PAGE_SIZE;
page = tgt->rq_dma;
pbl = (u32 *)tgt->rq_pbl;
@@ -731,13 +735,13 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
pbl++;
*pbl = (u32)((u64)page >> 32);
pbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
/* Allocate and map XFERQ */
tgt->xferq_mem_size = tgt->max_sqes * BNX2FC_XFERQ_WQE_SIZE;
- tgt->xferq_mem_size = (tgt->xferq_mem_size + (PAGE_SIZE - 1)) &
- PAGE_MASK;
+ tgt->xferq_mem_size = (tgt->xferq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->xferq = dma_alloc_coherent(&hba->pcidev->dev, tgt->xferq_mem_size,
&tgt->xferq_dma, GFP_KERNEL);
@@ -750,8 +754,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map CONFQ & CONFQ PBL */
tgt->confq_mem_size = tgt->max_sqes * BNX2FC_CONFQ_WQE_SIZE;
- tgt->confq_mem_size = (tgt->confq_mem_size + (PAGE_SIZE - 1)) &
- PAGE_MASK;
+ tgt->confq_mem_size = (tgt->confq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->confq = dma_alloc_coherent(&hba->pcidev->dev, tgt->confq_mem_size,
&tgt->confq_dma, GFP_KERNEL);
@@ -763,9 +767,9 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
memset(tgt->confq, 0, tgt->confq_mem_size);
tgt->confq_pbl_size =
- (tgt->confq_mem_size / PAGE_SIZE) * sizeof(void *);
+ (tgt->confq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
tgt->confq_pbl_size =
- (tgt->confq_pbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (tgt->confq_pbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
tgt->confq_pbl = dma_alloc_coherent(&hba->pcidev->dev,
tgt->confq_pbl_size,
@@ -777,7 +781,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
}
memset(tgt->confq_pbl, 0, tgt->confq_pbl_size);
- num_pages = tgt->confq_mem_size / PAGE_SIZE;
+ num_pages = tgt->confq_mem_size / CNIC_PAGE_SIZE;
page = tgt->confq_dma;
pbl = (u32 *)tgt->confq_pbl;
@@ -786,7 +790,7 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
pbl++;
*pbl = (u32)((u64)page >> 32);
pbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
/* Allocate and map ConnDB */
@@ -805,8 +809,8 @@ static int bnx2fc_alloc_session_resc(struct bnx2fc_hba *hba,
/* Allocate and map LCQ */
tgt->lcq_mem_size = (tgt->max_sqes + 8) * BNX2FC_SQ_WQE_SIZE;
- tgt->lcq_mem_size = (tgt->lcq_mem_size + (PAGE_SIZE - 1)) &
- PAGE_MASK;
+ tgt->lcq_mem_size = (tgt->lcq_mem_size + (CNIC_PAGE_SIZE - 1)) &
+ CNIC_PAGE_MASK;
tgt->lcq = dma_alloc_coherent(&hba->pcidev->dev, tgt->lcq_mem_size,
&tgt->lcq_dma, GFP_KERNEL);
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index e4cf23df4b4f..b87a1933f880 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -61,7 +61,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
* yield integral num of page buffers
*/
/* adjust SQ */
- num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+ num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
if (hba->max_sqes < num_elements_per_pg)
hba->max_sqes = num_elements_per_pg;
else if (hba->max_sqes % num_elements_per_pg)
@@ -69,7 +69,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
~(num_elements_per_pg - 1);
/* adjust CQ */
- num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
+ num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_CQE_SIZE;
if (hba->max_cqes < num_elements_per_pg)
hba->max_cqes = num_elements_per_pg;
else if (hba->max_cqes % num_elements_per_pg)
@@ -77,7 +77,7 @@ static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
~(num_elements_per_pg - 1);
/* adjust RQ */
- num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
+ num_elements_per_pg = CNIC_PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
if (hba->max_rqes < num_elements_per_pg)
hba->max_rqes = num_elements_per_pg;
else if (hba->max_rqes % num_elements_per_pg)
@@ -959,7 +959,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
/* SQ page table */
memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
- num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
+ num_pages = ep->qp.sq_mem_size / CNIC_PAGE_SIZE;
page = ep->qp.sq_phys;
if (cnic_dev_10g)
@@ -973,7 +973,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
ptbl++;
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
} else {
/* PTE is written in big endian format for
* 5706/5708/5709 devices */
@@ -981,13 +981,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
ptbl++;
*ptbl = (u32) page;
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
}
/* RQ page table */
memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
- num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
+ num_pages = ep->qp.rq_mem_size / CNIC_PAGE_SIZE;
page = ep->qp.rq_phys;
if (cnic_dev_10g)
@@ -1001,7 +1001,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
ptbl++;
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
} else {
/* PTE is written in big endian format for
* 5706/5708/5709 devices */
@@ -1009,13 +1009,13 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
ptbl++;
*ptbl = (u32) page;
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
}
/* CQ page table */
memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
- num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
+ num_pages = ep->qp.cq_mem_size / CNIC_PAGE_SIZE;
page = ep->qp.cq_phys;
if (cnic_dev_10g)
@@ -1029,7 +1029,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
ptbl++;
*ptbl = (u32) ((u64) page >> 32);
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
} else {
/* PTE is written in big endian format for
* 5706/5708/5709 devices */
@@ -1037,7 +1037,7 @@ static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
ptbl++;
*ptbl = (u32) page;
ptbl++;
- page += PAGE_SIZE;
+ page += CNIC_PAGE_SIZE;
}
}
}
@@ -1064,11 +1064,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate page table memory for SQ which is page aligned */
ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
ep->qp.sq_mem_size =
- (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.sq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.sq_pgtbl_size =
- (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
+ (ep->qp.sq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
ep->qp.sq_pgtbl_size =
- (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.sq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.sq_pgtbl_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
@@ -1101,11 +1101,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate page table memory for CQ which is page aligned */
ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
ep->qp.cq_mem_size =
- (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.cq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.cq_pgtbl_size =
- (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
+ (ep->qp.cq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
ep->qp.cq_pgtbl_size =
- (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.cq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.cq_pgtbl_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
@@ -1144,11 +1144,11 @@ int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
/* Allocate page table memory for RQ which is page aligned */
ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
ep->qp.rq_mem_size =
- (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.rq_mem_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.rq_pgtbl_size =
- (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
+ (ep->qp.rq_mem_size / CNIC_PAGE_SIZE) * sizeof(void *);
ep->qp.rq_pgtbl_size =
- (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+ (ep->qp.rq_pgtbl_size + (CNIC_PAGE_SIZE - 1)) & CNIC_PAGE_MASK;
ep->qp.rq_pgtbl_virt =
dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
@@ -1270,7 +1270,7 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
bnx2i_adjust_qp_size(hba);
iscsi_init.flags =
- ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
+ (CNIC_PAGE_BITS - 8) << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
if (en_tcp_dack)
iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
iscsi_init.reserved0 = 0;
@@ -1288,15 +1288,15 @@ int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
iscsi_init.num_ccells_per_conn = hba->num_ccell;
iscsi_init.num_tasks_per_conn = hba->max_sqes;
- iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+ iscsi_init.sq_wqes_per_page = CNIC_PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
iscsi_init.sq_num_wqes = hba->max_sqes;
iscsi_init.cq_log_wqes_per_page =
- (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
+ (u8) bnx2i_power_of2(CNIC_PAGE_SIZE / BNX2I_CQE_SIZE);
iscsi_init.cq_num_wqes = hba->max_cqes;
iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
- (PAGE_SIZE - 1)) / PAGE_SIZE;
+ (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
- (PAGE_SIZE - 1)) / PAGE_SIZE;
+ (CNIC_PAGE_SIZE - 1)) / CNIC_PAGE_SIZE;
iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
iscsi_init.rq_num_wqes = hba->max_rqes;
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 854dad7d5b03..c8b0aff5bbd4 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -525,7 +525,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
struct iscsi_bd *mp_bdt;
u64 addr;
- hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&hba->mp_bd_dma, GFP_KERNEL);
if (!hba->mp_bd_tbl) {
printk(KERN_ERR "unable to allocate Middle Path BDT\n");
@@ -533,11 +533,12 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
goto out;
}
- hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
+ CNIC_PAGE_SIZE,
&hba->dummy_buf_dma, GFP_KERNEL);
if (!hba->dummy_buffer) {
printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
hba->mp_bd_tbl, hba->mp_bd_dma);
hba->mp_bd_tbl = NULL;
rc = -1;
@@ -548,7 +549,7 @@ static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
addr = (unsigned long) hba->dummy_buf_dma;
mp_bdt->buffer_addr_lo = addr & 0xffffffff;
mp_bdt->buffer_addr_hi = addr >> 32;
- mp_bdt->buffer_length = PAGE_SIZE;
+ mp_bdt->buffer_length = CNIC_PAGE_SIZE;
mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
ISCSI_BD_FIRST_IN_BD_CHAIN;
out:
@@ -565,12 +566,12 @@ out:
static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
{
if (hba->mp_bd_tbl) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
hba->mp_bd_tbl, hba->mp_bd_dma);
hba->mp_bd_tbl = NULL;
}
if (hba->dummy_buffer) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
hba->dummy_buffer, hba->dummy_buf_dma);
hba->dummy_buffer = NULL;
}
@@ -934,14 +935,14 @@ static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
struct bnx2i_conn *bnx2i_conn)
{
if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
bnx2i_conn->gen_pdu.resp_bd_tbl,
bnx2i_conn->gen_pdu.resp_bd_dma);
bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
}
if (bnx2i_conn->gen_pdu.req_bd_tbl) {
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
bnx2i_conn->gen_pdu.req_bd_tbl,
bnx2i_conn->gen_pdu.req_bd_dma);
bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
@@ -998,13 +999,13 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
bnx2i_conn->gen_pdu.req_bd_tbl =
- dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
goto login_req_bd_tbl_failure;
bnx2i_conn->gen_pdu.resp_bd_tbl =
- dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_alloc_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
&bnx2i_conn->gen_pdu.resp_bd_dma,
GFP_KERNEL);
if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
@@ -1013,7 +1014,7 @@ static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
return 0;
login_resp_bd_tbl_failure:
- dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+ dma_free_coherent(&hba->pcidev->dev, CNIC_PAGE_SIZE,
bnx2i_conn->gen_pdu.req_bd_tbl,
bnx2i_conn->gen_pdu.req_bd_dma);
bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
diff --git a/drivers/scsi/isci/host.h b/drivers/scsi/isci/host.h
index 4911310a38f5..22a9bb1abae1 100644
--- a/drivers/scsi/isci/host.h
+++ b/drivers/scsi/isci/host.h
@@ -311,9 +311,8 @@ static inline struct Scsi_Host *to_shost(struct isci_host *ihost)
}
#define for_each_isci_host(id, ihost, pdev) \
- for (id = 0, ihost = to_pci_info(pdev)->hosts[id]; \
- id < ARRAY_SIZE(to_pci_info(pdev)->hosts) && ihost; \
- ihost = to_pci_info(pdev)->hosts[++id])
+ for (id = 0; id < SCI_MAX_CONTROLLERS && \
+ (ihost = to_pci_info(pdev)->hosts[id]); id++)
static inline void wait_for_start(struct isci_host *ihost)
{
diff --git a/drivers/scsi/isci/port_config.c b/drivers/scsi/isci/port_config.c
index 85c77f6b802b..ac879745ef80 100644
--- a/drivers/scsi/isci/port_config.c
+++ b/drivers/scsi/isci/port_config.c
@@ -615,13 +615,6 @@ static void sci_apc_agent_link_up(struct isci_host *ihost,
SCIC_SDS_APC_WAIT_LINK_UP_NOTIFICATION);
} else {
/* the phy is already the part of the port */
- u32 port_state = iport->sm.current_state_id;
-
- /* if the PORT'S state is resetting then the link up is from
- * port hard reset in this case, we need to tell the port
- * that link up is recieved
- */
- BUG_ON(port_state != SCI_PORT_RESETTING);
port_agent->phy_ready_mask |= 1 << phy_index;
sci_port_link_up(iport, iphy);
}
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 0d30ca849e8f..5d6fda72d659 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -801,7 +801,7 @@ int isci_task_I_T_nexus_reset(struct domain_device *dev)
/* XXX: need to cleanup any ireqs targeting this
* domain_device
*/
- ret = TMF_RESP_FUNC_COMPLETE;
+ ret = -ENODEV;
goto out;
}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e1fe95ef23e1..266724b6b899 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -2996,8 +2996,7 @@ struct qla_hw_data {
IS_QLA82XX(ha) || IS_QLA83XX(ha) || \
IS_QLA8044(ha))
#define IS_MSIX_NACK_CAPABLE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
-#define IS_NOPOLLING_TYPE(ha) ((IS_QLA25XX(ha) || IS_QLA81XX(ha) || \
- IS_QLA83XX(ha)) && (ha)->flags.msix_enabled)
+#define IS_NOPOLLING_TYPE(ha) (IS_QLA81XX(ha) && (ha)->flags.msix_enabled)
#define IS_FAC_REQUIRED(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
#define IS_NOCACHE_VPD_TYPE(ha) (IS_QLA81XX(ha) || IS_QLA83XX(ha))
#define IS_ALOGIO_CAPABLE(ha) (IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 9bc86b9e86b1..0a1dcb43d18b 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -2880,6 +2880,7 @@ static int
qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
{
#define MIN_MSIX_COUNT 2
+#define ATIO_VECTOR 2
int i, ret;
struct msix_entry *entries;
struct qla_msix_entry *qentry;
@@ -2936,34 +2937,47 @@ msix_failed:
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < ha->msix_count; i++) {
+ for (i = 0; i < 2; i++) {
qentry = &ha->msix_entries[i];
- if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
- ret = request_irq(qentry->vector,
- qla83xx_msix_entries[i].handler,
- 0, qla83xx_msix_entries[i].name, rsp);
- } else if (IS_P3P_TYPE(ha)) {
+ if (IS_P3P_TYPE(ha))
ret = request_irq(qentry->vector,
qla82xx_msix_entries[i].handler,
0, qla82xx_msix_entries[i].name, rsp);
- } else {
+ else
ret = request_irq(qentry->vector,
msix_entries[i].handler,
0, msix_entries[i].name, rsp);
- }
- if (ret) {
- ql_log(ql_log_fatal, vha, 0x00cb,
- "MSI-X: unable to register handler -- %x/%d.\n",
- qentry->vector, ret);
- qla24xx_disable_msix(ha);
- ha->mqenable = 0;
- goto msix_out;
- }
+ if (ret)
+ goto msix_register_fail;
qentry->have_irq = 1;
qentry->rsp = rsp;
rsp->msix = qentry;
}
+ /*
+ * If target mode is enable, also request the vector for the ATIO
+ * queue.
+ */
+ if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha)) {
+ qentry = &ha->msix_entries[ATIO_VECTOR];
+ ret = request_irq(qentry->vector,
+ qla83xx_msix_entries[ATIO_VECTOR].handler,
+ 0, qla83xx_msix_entries[ATIO_VECTOR].name, rsp);
+ qentry->have_irq = 1;
+ qentry->rsp = rsp;
+ rsp->msix = qentry;
+ }
+
+msix_register_fail:
+ if (ret) {
+ ql_log(ql_log_fatal, vha, 0x00cb,
+ "MSI-X: unable to register handler -- %x/%d.\n",
+ qentry->vector, ret);
+ qla24xx_disable_msix(ha);
+ ha->mqenable = 0;
+ goto msix_out;
+ }
+
/* Enable MSI-X vector for response queue update for queue 0 */
if (IS_QLA83XX(ha)) {
if (ha->msixbase && ha->mqiobase &&
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 9e80d61e5a3a..0cb73074c199 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -790,17 +790,32 @@ static inline int test_tgt_sess_count(struct qla_tgt *tgt)
}
/* Called by tcm_qla2xxx configfs code */
-void qlt_stop_phase1(struct qla_tgt *tgt)
+int qlt_stop_phase1(struct qla_tgt *tgt)
{
struct scsi_qla_host *vha = tgt->vha;
struct qla_hw_data *ha = tgt->ha;
unsigned long flags;
+ mutex_lock(&qla_tgt_mutex);
+ if (!vha->fc_vport) {
+ struct Scsi_Host *sh = vha->host;
+ struct fc_host_attrs *fc_host = shost_to_fc_host(sh);
+ bool npiv_vports;
+
+ spin_lock_irqsave(sh->host_lock, flags);
+ npiv_vports = (fc_host->npiv_vports_inuse);
+ spin_unlock_irqrestore(sh->host_lock, flags);
+
+ if (npiv_vports) {
+ mutex_unlock(&qla_tgt_mutex);
+ return -EPERM;
+ }
+ }
if (tgt->tgt_stop || tgt->tgt_stopped) {
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
"Already in tgt->tgt_stop or tgt_stopped state\n");
- dump_stack();
- return;
+ mutex_unlock(&qla_tgt_mutex);
+ return -EPERM;
}
ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
@@ -815,6 +830,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
qlt_clear_tgt_db(tgt, true);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
mutex_unlock(&vha->vha_tgt.tgt_mutex);
+ mutex_unlock(&qla_tgt_mutex);
flush_delayed_work(&tgt->sess_del_work);
@@ -841,6 +857,7 @@ void qlt_stop_phase1(struct qla_tgt *tgt)
/* Wait for sessions to clear out (just in case) */
wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
+ return 0;
}
EXPORT_SYMBOL(qlt_stop_phase1);
@@ -2595,8 +2612,6 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
return -ENOMEM;
}
- INIT_LIST_HEAD(&cmd->cmd_list);
-
memcpy(&cmd->atio, atio, sizeof(*atio));
cmd->state = QLA_TGT_STATE_NEW;
cmd->tgt = vha->vha_tgt.qla_tgt;
@@ -3187,7 +3202,8 @@ restart:
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
"SRR cmd %p (se_cmd %p, tag %d, op %x), "
"sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
- se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
+ se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
+ cmd->sg_cnt, cmd->offset);
qlt_handle_srr(vha, sctio, imm);
@@ -4183,6 +4199,9 @@ int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
+ if (base_vha->fc_vport)
+ return 0;
+
mutex_lock(&qla_tgt_mutex);
list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
mutex_unlock(&qla_tgt_mutex);
@@ -4196,6 +4215,10 @@ int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
if (!vha->vha_tgt.qla_tgt)
return 0;
+ if (vha->fc_vport) {
+ qlt_release(vha->vha_tgt.qla_tgt);
+ return 0;
+ }
mutex_lock(&qla_tgt_mutex);
list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
mutex_unlock(&qla_tgt_mutex);
@@ -4267,6 +4290,12 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
spin_unlock_irqrestore(&ha->hardware_lock, flags);
continue;
}
+ if (tgt->tgt_stop) {
+ pr_debug("MODE_TARGET in shutdown on qla2xxx(%d)\n",
+ host->host_no);
+ spin_unlock_irqrestore(&ha->hardware_lock, flags);
+ continue;
+ }
spin_unlock_irqrestore(&ha->hardware_lock, flags);
if (!scsi_host_get(host)) {
@@ -4281,12 +4310,11 @@ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
scsi_host_put(host);
continue;
}
- mutex_unlock(&qla_tgt_mutex);
-
rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
if (rc != 0)
scsi_host_put(host);
+ mutex_unlock(&qla_tgt_mutex);
return rc;
}
mutex_unlock(&qla_tgt_mutex);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 1d10eecad499..ce33d8c26406 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -855,7 +855,6 @@ struct qla_tgt_cmd {
uint16_t loop_id; /* to save extra sess dereferences */
struct qla_tgt *tgt; /* to save extra sess dereferences */
struct scsi_qla_host *vha;
- struct list_head cmd_list;
struct atio_from_isp atio;
};
@@ -1002,7 +1001,7 @@ extern void qlt_modify_vp_config(struct scsi_qla_host *,
extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
extern int qlt_mem_alloc(struct qla_hw_data *);
extern void qlt_mem_free(struct qla_hw_data *);
-extern void qlt_stop_phase1(struct qla_tgt *);
+extern int qlt_stop_phase1(struct qla_tgt *);
extern void qlt_stop_phase2(struct qla_tgt *);
extern irqreturn_t qla83xx_msix_atio_q(int, void *);
extern void qlt_83xx_iospace_config(struct qla_hw_data *);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 75a141bbe74d..788c4fe2b0c9 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -182,20 +182,6 @@ static int tcm_qla2xxx_npiv_parse_wwn(
return 0;
}
-static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
- u64 wwpn, u64 wwnn)
-{
- u8 b[8], b2[8];
-
- put_unaligned_be64(wwpn, b);
- put_unaligned_be64(wwnn, b2);
- return snprintf(buf, len,
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
- "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
- b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
- b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
-}
-
static char *tcm_qla2xxx_npiv_get_fabric_name(void)
{
return "qla2xxx_npiv";
@@ -227,15 +213,6 @@ static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
return lport->lport_naa_name;
}
-static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
-{
- struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
- struct tcm_qla2xxx_tpg, se_tpg);
- struct tcm_qla2xxx_lport *lport = tpg->lport;
-
- return &lport->lport_npiv_name[0];
-}
-
static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
{
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
@@ -941,15 +918,41 @@ static ssize_t tcm_qla2xxx_tpg_show_enable(
atomic_read(&tpg->lport_tpg_enabled));
}
+static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
+{
+ struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
+ struct tcm_qla2xxx_tpg, tpg_base_work);
+ struct se_portal_group *se_tpg = &base_tpg->se_tpg;
+ struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
+
+ if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
+ &se_tpg->tpg_group.cg_item)) {
+ atomic_set(&base_tpg->lport_tpg_enabled, 1);
+ qlt_enable_vha(base_vha);
+ }
+ complete(&base_tpg->tpg_base_comp);
+}
+
+static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
+{
+ struct tcm_qla2xxx_tpg *base_tpg = container_of(work,
+ struct tcm_qla2xxx_tpg, tpg_base_work);
+ struct se_portal_group *se_tpg = &base_tpg->se_tpg;
+ struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
+
+ if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
+ atomic_set(&base_tpg->lport_tpg_enabled, 0);
+ configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
+ &se_tpg->tpg_group.cg_item);
+ }
+ complete(&base_tpg->tpg_base_comp);
+}
+
static ssize_t tcm_qla2xxx_tpg_store_enable(
struct se_portal_group *se_tpg,
const char *page,
size_t count)
{
- struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
- struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
- struct tcm_qla2xxx_lport, lport_wwn);
- struct scsi_qla_host *vha = lport->qla_vha;
struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
struct tcm_qla2xxx_tpg, se_tpg);
unsigned long op;
@@ -964,19 +967,28 @@ static ssize_t tcm_qla2xxx_tpg_store_enable(
pr_err("Illegal value for tpg_enable: %lu\n", op);
return -EINVAL;
}
-
if (op) {
- atomic_set(&tpg->lport_tpg_enabled, 1);
- qlt_enable_vha(vha);
+ if (atomic_read(&tpg->lport_tpg_enabled))
+ return -EEXIST;
+
+ INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_depend_tpg);
} else {
- if (!vha->vha_tgt.qla_tgt) {
- pr_err("struct qla_hw_data *vha->vha_tgt.qla_tgt is NULL\n");
- return -ENODEV;
- }
- atomic_set(&tpg->lport_tpg_enabled, 0);
- qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ if (!atomic_read(&tpg->lport_tpg_enabled))
+ return count;
+
+ INIT_WORK(&tpg->tpg_base_work, tcm_qla2xxx_undepend_tpg);
}
+ init_completion(&tpg->tpg_base_comp);
+ schedule_work(&tpg->tpg_base_work);
+ wait_for_completion(&tpg->tpg_base_comp);
+ if (op) {
+ if (!atomic_read(&tpg->lport_tpg_enabled))
+ return -ENODEV;
+ } else {
+ if (atomic_read(&tpg->lport_tpg_enabled))
+ return -EPERM;
+ }
return count;
}
@@ -1053,11 +1065,64 @@ static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
/*
* Clear local TPG=1 pointer for non NPIV mode.
*/
- lport->tpg_1 = NULL;
-
+ lport->tpg_1 = NULL;
kfree(tpg);
}
+static ssize_t tcm_qla2xxx_npiv_tpg_show_enable(
+ struct se_portal_group *se_tpg,
+ char *page)
+{
+ return tcm_qla2xxx_tpg_show_enable(se_tpg, page);
+}
+
+static ssize_t tcm_qla2xxx_npiv_tpg_store_enable(
+ struct se_portal_group *se_tpg,
+ const char *page,
+ size_t count)
+{
+ struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
+ struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
+ struct tcm_qla2xxx_lport, lport_wwn);
+ struct scsi_qla_host *vha = lport->qla_vha;
+ struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
+ struct tcm_qla2xxx_tpg, se_tpg);
+ unsigned long op;
+ int rc;
+
+ rc = kstrtoul(page, 0, &op);
+ if (rc < 0) {
+ pr_err("kstrtoul() returned %d\n", rc);
+ return -EINVAL;
+ }
+ if ((op != 1) && (op != 0)) {
+ pr_err("Illegal value for tpg_enable: %lu\n", op);
+ return -EINVAL;
+ }
+ if (op) {
+ if (atomic_read(&tpg->lport_tpg_enabled))
+ return -EEXIST;
+
+ atomic_set(&tpg->lport_tpg_enabled, 1);
+ qlt_enable_vha(vha);
+ } else {
+ if (!atomic_read(&tpg->lport_tpg_enabled))
+ return count;
+
+ atomic_set(&tpg->lport_tpg_enabled, 0);
+ qlt_stop_phase1(vha->vha_tgt.qla_tgt);
+ }
+
+ return count;
+}
+
+TF_TPG_BASE_ATTR(tcm_qla2xxx_npiv, enable, S_IRUGO | S_IWUSR);
+
+static struct configfs_attribute *tcm_qla2xxx_npiv_tpg_attrs[] = {
+ &tcm_qla2xxx_npiv_tpg_enable.attr,
+ NULL,
+};
+
static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
struct se_wwn *wwn,
struct config_group *group,
@@ -1650,6 +1715,9 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
struct scsi_qla_host *npiv_vha;
struct tcm_qla2xxx_lport *lport =
(struct tcm_qla2xxx_lport *)target_lport_ptr;
+ struct tcm_qla2xxx_lport *base_lport =
+ (struct tcm_qla2xxx_lport *)base_vha->vha_tgt.target_lport_ptr;
+ struct tcm_qla2xxx_tpg *base_tpg;
struct fc_vport_identifiers vport_id;
if (!qla_tgt_mode_enabled(base_vha)) {
@@ -1657,6 +1725,13 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
return -EPERM;
}
+ if (!base_lport || !base_lport->tpg_1 ||
+ !atomic_read(&base_lport->tpg_1->lport_tpg_enabled)) {
+ pr_err("qla2xxx base_lport or tpg_1 not available\n");
+ return -EPERM;
+ }
+ base_tpg = base_lport->tpg_1;
+
memset(&vport_id, 0, sizeof(vport_id));
vport_id.port_name = npiv_wwpn;
vport_id.node_name = npiv_wwnn;
@@ -1675,7 +1750,6 @@ static int tcm_qla2xxx_lport_register_npiv_cb(struct scsi_qla_host *base_vha,
npiv_vha = (struct scsi_qla_host *)vport->dd_data;
npiv_vha->vha_tgt.target_lport_ptr = target_lport_ptr;
lport->qla_vha = npiv_vha;
-
scsi_host_get(npiv_vha->host);
return 0;
}
@@ -1714,8 +1788,6 @@ static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
}
lport->lport_npiv_wwpn = npiv_wwpn;
lport->lport_npiv_wwnn = npiv_wwnn;
- tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
- TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
sprintf(lport->lport_naa_name, "naa.%016llx", (unsigned long long) npiv_wwpn);
ret = tcm_qla2xxx_init_lport(lport);
@@ -1824,7 +1896,7 @@ static struct target_core_fabric_ops tcm_qla2xxx_ops = {
static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
.get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
.get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
- .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn,
+ .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
.tpg_get_tag = tcm_qla2xxx_get_tag,
.tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
.tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
@@ -1935,7 +2007,7 @@ static int tcm_qla2xxx_register_configfs(void)
*/
npiv_fabric->tf_cit_tmpl.tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
npiv_fabric->tf_cit_tmpl.tfc_tpg_base_cit.ct_attrs =
- tcm_qla2xxx_tpg_attrs;
+ tcm_qla2xxx_npiv_tpg_attrs;
npiv_fabric->tf_cit_tmpl.tfc_tpg_attrib_cit.ct_attrs = NULL;
npiv_fabric->tf_cit_tmpl.tfc_tpg_param_cit.ct_attrs = NULL;
npiv_fabric->tf_cit_tmpl.tfc_tpg_np_base_cit.ct_attrs = NULL;
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 275d8b9a7a34..33aaac8c7d59 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -4,8 +4,6 @@
#define TCM_QLA2XXX_VERSION "v0.1"
/* length of ASCII WWPNs including pad */
#define TCM_QLA2XXX_NAMELEN 32
-/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */
-#define TCM_QLA2XXX_NPIV_NAMELEN 66
#include "qla_target.h"
@@ -43,6 +41,9 @@ struct tcm_qla2xxx_tpg {
struct tcm_qla2xxx_tpg_attrib tpg_attrib;
/* Returned by tcm_qla2xxx_make_tpg() */
struct se_portal_group se_tpg;
+ /* Items for dealing with configfs_depend_item */
+ struct completion tpg_base_comp;
+ struct work_struct tpg_base_work;
};
struct tcm_qla2xxx_fc_loopid {
@@ -62,8 +63,6 @@ struct tcm_qla2xxx_lport {
char lport_name[TCM_QLA2XXX_NAMELEN];
/* ASCII formatted naa WWPN for VPD page 83 etc */
char lport_naa_name[TCM_QLA2XXX_NAMELEN];
- /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
- char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
/* map for fc_port pointers in 24-bit FC Port ID space */
struct btree_head32 lport_fcport_map;
/* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 7bd7f0d5f050..62ec84b42e31 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1684,7 +1684,7 @@ u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
host_dev = scsi_get_device(shost);
if (host_dev && host_dev->dma_mask)
- bounce_limit = dma_max_pfn(host_dev) << PAGE_SHIFT;
+ bounce_limit = (u64)dma_max_pfn(host_dev) << PAGE_SHIFT;
return bounce_limit;
}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 17d740427240..9969fa1ef7c4 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1419,6 +1419,9 @@ static void storvsc_device_destroy(struct scsi_device *sdevice)
{
struct stor_mem_pools *memp = sdevice->hostdata;
+ if (!memp)
+ return;
+
mempool_destroy(memp->request_mempool);
kmem_cache_destroy(memp->request_pool);
kfree(memp);
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ba9310bc9acb..2a1b0a7d9415 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -150,7 +150,7 @@ config SPI_BUTTERFLY
config SPI_CLPS711X
tristate "CLPS711X host SPI controller"
- depends on ARCH_CLPS711X
+ depends on ARCH_CLPS711X || COMPILE_TEST
help
This enables dedicated general purpose SPI/Microwire1-compatible
master mode interface (SSI1) for CLPS711X-based CPUs.
@@ -212,7 +212,6 @@ config SPI_IMX
tristate "Freescale i.MX SPI controllers"
depends on ARCH_MXC || COMPILE_TEST
select SPI_BITBANG
- default m if IMX_HAVE_PLATFORM_SPI_IMX
help
This enables using the Freescale i.MX SPI controllers in master
mode.
@@ -270,6 +269,7 @@ config SPI_FSL_SPI
config SPI_FSL_DSPI
tristate "Freescale DSPI controller"
select SPI_BITBANG
+ select REGMAP_MMIO
depends on SOC_VF610 || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
@@ -307,7 +307,7 @@ config SPI_OMAP_UWIRE
config SPI_OMAP24XX
tristate "McSPI driver for OMAP"
- depends on ARM || ARM64 || AVR32 || HEXAGON || MIPS || SH
+ depends on ARM || ARM64 || AVR32 || HEXAGON || MIPS || SUPERH
depends on ARCH_OMAP2PLUS || COMPILE_TEST
help
SPI master controller for OMAP24XX and later Multichannel SPI
@@ -376,10 +376,23 @@ config SPI_PXA2XX_PCI
def_tristate SPI_PXA2XX && PCI
config SPI_RSPI
- tristate "Renesas RSPI controller"
+ tristate "Renesas RSPI/QSPI controller"
depends on (SUPERH && SH_DMAE_BASE) || ARCH_SHMOBILE
help
- SPI driver for Renesas RSPI blocks.
+ SPI driver for Renesas RSPI and QSPI blocks.
+
+config SPI_QUP
+ tristate "Qualcomm SPI controller with QUP interface"
+ depends on ARCH_MSM_DT || (ARM && COMPILE_TEST)
+ help
+ Qualcomm Universal Peripheral (QUP) core is an AHB slave that
+ provides a common data path (an output FIFO and an input FIFO)
+ for serial peripheral interface (SPI) mini-core. SPI in master
+ mode supports up to 50MHz, up to four chip selects, programmable
+ data path from 4 bits to 32 bits and numerous protocol variants.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi_qup.
config SPI_S3C24XX
tristate "Samsung S3C24XX series SPI"
@@ -416,7 +429,6 @@ config SPI_SH_MSIOF
tristate "SuperH MSIOF SPI controller"
depends on HAVE_CLK
depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
- select SPI_BITBANG
help
SPI driver for SuperH and SH Mobile MSIOF blocks.
@@ -546,7 +558,7 @@ config SPI_DW_MID_DMA
config SPI_DW_MMIO
tristate "Memory-mapped io interface driver for DW SPI core"
- depends on SPI_DESIGNWARE && HAVE_CLK
+ depends on SPI_DESIGNWARE
#
# There are lots of SPI device types, with sensors and memory
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 95af48d2d360..e598147b06ef 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -59,6 +59,7 @@ spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_PXADMA) += spi-pxa2xx-pxadma.o
spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_DMA) += spi-pxa2xx-dma.o
obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
+obj-$(CONFIG_SPI_QUP) += spi-qup.o
obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
spi-s3c24xx-hw-y := spi-s3c24xx.o
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
index 5d7deaf62867..5b5709a5c957 100644
--- a/drivers/spi/spi-altera.c
+++ b/drivers/spi/spi-altera.c
@@ -13,7 +13,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/module.h>
@@ -200,7 +199,6 @@ static irqreturn_t altera_spi_irq(int irq, void *dev)
static int altera_spi_probe(struct platform_device *pdev)
{
- struct altera_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
struct altera_spi *hw;
struct spi_master *master;
struct resource *res;
@@ -214,6 +212,8 @@ static int altera_spi_probe(struct platform_device *pdev)
master->bus_num = pdev->id;
master->num_chipselect = 16;
master->mode_bits = SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+ master->dev.of_node = pdev->dev.of_node;
hw = spi_master_get_devdata(master);
platform_set_drvdata(pdev, hw);
@@ -245,9 +245,6 @@ static int altera_spi_probe(struct platform_device *pdev)
if (err)
goto exit;
}
- /* find platform data */
- if (!platp)
- hw->bitbang.master->dev.of_node = pdev->dev.of_node;
/* register our spi controller */
err = spi_bitbang_start(&hw->bitbang);
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
index 31534b51715a..3898b0b9ee77 100644
--- a/drivers/spi/spi-ath79.c
+++ b/drivers/spi/spi-ath79.c
@@ -14,7 +14,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -132,9 +131,9 @@ static int ath79_spi_setup_cs(struct spi_device *spi)
flags = GPIOF_DIR_OUT;
if (spi->mode & SPI_CS_HIGH)
- flags |= GPIOF_INIT_HIGH;
- else
flags |= GPIOF_INIT_LOW;
+ else
+ flags |= GPIOF_INIT_HIGH;
status = gpio_request_one(cdata->gpio, flags,
dev_name(&spi->dev));
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index b0842f751016..8005f9869481 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -9,7 +9,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -26,6 +25,7 @@
#include <linux/io.h>
#include <linux/gpio.h>
+#include <linux/pinctrl/consumer.h>
/* SPI register offsets */
#define SPI_CR 0x0000
@@ -993,13 +993,6 @@ static int atmel_spi_setup(struct spi_device *spi)
as = spi_master_get_devdata(spi->master);
- if (spi->chip_select > spi->master->num_chipselect) {
- dev_dbg(&spi->dev,
- "setup: invalid chipselect %u (%u defined)\n",
- spi->chip_select, spi->master->num_chipselect);
- return -EINVAL;
- }
-
/* see notes above re chipselect */
if (!atmel_spi_is_v2(as)
&& spi->chip_select == 0
@@ -1087,14 +1080,6 @@ static int atmel_spi_one_transfer(struct spi_master *master,
}
}
- if (xfer->bits_per_word > 8) {
- if (xfer->len % 2) {
- dev_dbg(&spi->dev,
- "buffer len should be 16 bits aligned\n");
- return -EINVAL;
- }
- }
-
/*
* DMA map early, for performance (empties dcache ASAP) and
* better fault reporting.
@@ -1221,9 +1206,6 @@ static int atmel_spi_transfer_one_message(struct spi_master *master,
dev_dbg(&spi->dev, "new message %p submitted for %s\n",
msg, dev_name(&spi->dev));
- if (unlikely(list_empty(&msg->transfers)))
- return -EINVAL;
-
atmel_spi_lock(as);
cs_activate(as, spi);
@@ -1244,10 +1226,10 @@ static int atmel_spi_transfer_one_message(struct spi_master *master,
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
dev_dbg(&spi->dev,
- " xfer %p: len %u tx %p/%08x rx %p/%08x\n",
+ " xfer %p: len %u tx %p/%pad rx %p/%pad\n",
xfer, xfer->len,
- xfer->tx_buf, xfer->tx_dma,
- xfer->rx_buf, xfer->rx_dma);
+ xfer->tx_buf, &xfer->tx_dma,
+ xfer->rx_buf, &xfer->rx_dma);
}
msg_done:
@@ -1303,6 +1285,9 @@ static int atmel_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct atmel_spi *as;
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(&pdev->dev);
+
regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!regs)
return -ENXIO;
@@ -1455,8 +1440,19 @@ static int atmel_spi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct atmel_spi *as = spi_master_get_devdata(master);
+ int ret;
+
+ /* Stop the queue running */
+ ret = spi_master_suspend(master);
+ if (ret) {
+ dev_warn(dev, "cannot suspend master\n");
+ return ret;
+ }
clk_disable_unprepare(as->clk);
+
+ pinctrl_pm_select_sleep_state(dev);
+
return 0;
}
@@ -1464,9 +1460,18 @@ static int atmel_spi_resume(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct atmel_spi *as = spi_master_get_devdata(master);
+ int ret;
+
+ pinctrl_pm_select_default_state(dev);
clk_prepare_enable(as->clk);
- return 0;
+
+ /* Start the queue running */
+ ret = spi_master_resume(master);
+ if (ret)
+ dev_err(dev, "problem starting queue (%d)\n", ret);
+
+ return ret;
}
static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume);
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
index c4141c92bcff..aafb812d7ea6 100644
--- a/drivers/spi/spi-au1550.c
+++ b/drivers/spi/spi-au1550.c
@@ -55,8 +55,6 @@ struct au1550_spi {
volatile psc_spi_t __iomem *regs;
int irq;
- unsigned freq_max;
- unsigned freq_min;
unsigned len;
unsigned tx_count;
@@ -248,11 +246,8 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
hz = t->speed_hz;
}
- if (hz > spi->max_speed_hz || hz > hw->freq_max || hz < hw->freq_min) {
- dev_err(&spi->dev, "setupxfer: clock rate=%d out of range\n",
- hz);
+ if (!hz)
return -EINVAL;
- }
au1550_spi_bits_handlers_set(hw, spi->bits_per_word);
@@ -287,23 +282,6 @@ static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
return 0;
}
-static int au1550_spi_setup(struct spi_device *spi)
-{
- struct au1550_spi *hw = spi_master_get_devdata(spi->master);
-
- if (spi->max_speed_hz == 0)
- spi->max_speed_hz = hw->freq_max;
- if (spi->max_speed_hz > hw->freq_max
- || spi->max_speed_hz < hw->freq_min)
- return -EINVAL;
- /*
- * NOTE: cannot change speed and other hw settings immediately,
- * otherwise sharing of spi bus is not possible,
- * so do not call setupxfer(spi, NULL) here
- */
- return 0;
-}
-
/*
* for dma spi transfers, we have to setup rx channel, otherwise there is
* no reliable way how to recognize that spi transfer is done
@@ -838,7 +816,6 @@ static int au1550_spi_probe(struct platform_device *pdev)
hw->bitbang.master = hw->master;
hw->bitbang.setup_transfer = au1550_spi_setupxfer;
hw->bitbang.chipselect = au1550_spi_chipsel;
- hw->bitbang.master->setup = au1550_spi_setup;
hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs;
if (hw->usedma) {
@@ -909,8 +886,9 @@ static int au1550_spi_probe(struct platform_device *pdev)
{
int min_div = (2 << 0) * (2 * (4 + 1));
int max_div = (2 << 3) * (2 * (63 + 1));
- hw->freq_max = hw->pdata->mainclk_hz / min_div;
- hw->freq_min = hw->pdata->mainclk_hz / (max_div + 1) + 1;
+ master->max_speed_hz = hw->pdata->mainclk_hz / min_div;
+ master->min_speed_hz =
+ hw->pdata->mainclk_hz / (max_div + 1) + 1;
}
au1550_spi_setup_psc_as_spi(hw);
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
index 8a89dd1f2654..69167456ec1e 100644
--- a/drivers/spi/spi-bcm2835.c
+++ b/drivers/spi/spi-bcm2835.c
@@ -315,7 +315,6 @@ static int bcm2835_spi_probe(struct platform_device *pdev)
master->mode_bits = BCM2835_SPI_MODE_BITS;
master->bits_per_word_mask = SPI_BPW_MASK(8);
- master->bus_num = -1;
master->num_chipselect = 3;
master->transfer_one_message = bcm2835_spi_transfer_one;
master->dev.of_node = pdev->dev.of_node;
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index b528f9fc8bc0..5a211e98383b 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -180,7 +180,7 @@ static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
while (pending > 0) {
int curr_step = min_t(int, step_size, pending);
- init_completion(&bs->done);
+ reinit_completion(&bs->done);
if (tx) {
memcpy_toio(bs->fifo + HSSPI_OPCODE_LEN, tx, curr_step);
tx += curr_step;
@@ -369,6 +369,7 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
bs->fifo = (u8 __iomem *)(bs->regs + HSSPI_FIFO_REG(0));
mutex_init(&bs->bus_mutex);
+ init_completion(&bs->done);
master->bus_num = HSSPI_BUS_NUM;
master->num_chipselect = 8;
@@ -453,9 +454,8 @@ static int bcm63xx_hsspi_resume(struct device *dev)
}
#endif
-static const struct dev_pm_ops bcm63xx_hsspi_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(bcm63xx_hsspi_suspend, bcm63xx_hsspi_resume)
-};
+static SIMPLE_DEV_PM_OPS(bcm63xx_hsspi_pm_ops, bcm63xx_hsspi_suspend,
+ bcm63xx_hsspi_resume);
static struct platform_driver bcm63xx_hsspi_driver = {
.driver = {
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
index 77286aef2adf..0250fa721cea 100644
--- a/drivers/spi/spi-bcm63xx.c
+++ b/drivers/spi/spi-bcm63xx.c
@@ -20,7 +20,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/module.h>
@@ -35,8 +34,6 @@
#include <bcm63xx_dev_spi.h>
-#define PFX KBUILD_MODNAME
-
#define BCM63XX_SPI_MAX_PREPEND 15
struct bcm63xx_spi {
@@ -169,7 +166,7 @@ static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
transfer_list);
}
- init_completion(&bs->done);
+ reinit_completion(&bs->done);
/* Fill in the Message control register */
msg_ctl = (len << SPI_BYTE_CNT_SHIFT);
@@ -353,6 +350,7 @@ static int bcm63xx_spi_probe(struct platform_device *pdev)
}
bs = spi_master_get_devdata(master);
+ init_completion(&bs->done);
platform_set_drvdata(pdev, master);
bs->pdev = pdev;
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
index 38941e5920b5..f515c5e9db57 100644
--- a/drivers/spi/spi-bfin-sport.c
+++ b/drivers/spi/spi-bfin-sport.c
@@ -8,7 +8,6 @@
* Licensed under the GPL-2 or later.
*/
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/device.h>
diff --git a/drivers/spi/spi-bfin-v3.c b/drivers/spi/spi-bfin-v3.c
index 8f8598834b30..4089d0e0d84e 100644
--- a/drivers/spi/spi-bfin-v3.c
+++ b/drivers/spi/spi-bfin-v3.c
@@ -822,7 +822,8 @@ static int bfin_spi_probe(struct platform_device *pdev)
master->cleanup = bfin_spi_cleanup;
master->setup = bfin_spi_setup;
master->transfer_one_message = bfin_spi_transfer_one_message;
- master->bits_per_word_mask = BIT(32 - 1) | BIT(16 - 1) | BIT(8 - 1);
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
drv_data = spi_master_get_devdata(master);
drv_data->master = master;
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
index f0f195af75d4..55e57c3eb9bd 100644
--- a/drivers/spi/spi-bfin5xx.c
+++ b/drivers/spi/spi-bfin5xx.c
@@ -350,7 +350,6 @@ static void *bfin_spi_next_transfer(struct bfin_spi_master_data *drv_data)
static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data)
{
struct bfin_spi_slave_data *chip = drv_data->cur_chip;
- struct spi_transfer *last_transfer;
unsigned long flags;
struct spi_message *msg;
@@ -362,9 +361,6 @@ static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data)
queue_work(drv_data->workqueue, &drv_data->pump_messages);
spin_unlock_irqrestore(&drv_data->lock, flags);
- last_transfer = list_entry(msg->transfers.prev,
- struct spi_transfer, transfer_list);
-
msg->state = NULL;
if (!drv_data->cs_change)
@@ -1030,10 +1026,6 @@ static int bfin_spi_setup(struct spi_device *spi)
}
/* translate common spi framework into our register */
- if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST)) {
- dev_err(&spi->dev, "unsupported spi modes detected\n");
- goto error;
- }
if (spi->mode & SPI_CPOL)
chip->ctl_reg |= BIT_CTL_CPOL;
if (spi->mode & SPI_CPHA)
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
index bd222f6b677d..dc7d2c2d643e 100644
--- a/drivers/spi/spi-bitbang.c
+++ b/drivers/spi/spi-bitbang.c
@@ -16,7 +16,6 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
@@ -467,11 +466,9 @@ EXPORT_SYMBOL_GPL(spi_bitbang_start);
/**
* spi_bitbang_stop - stops the task providing spi communication
*/
-int spi_bitbang_stop(struct spi_bitbang *bitbang)
+void spi_bitbang_stop(struct spi_bitbang *bitbang)
{
spi_unregister_master(bitbang->master);
-
- return 0;
}
EXPORT_SYMBOL_GPL(spi_bitbang_stop);
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
index 8081f96bd1d5..ee4f91ccd8fd 100644
--- a/drivers/spi/spi-butterfly.c
+++ b/drivers/spi/spi-butterfly.c
@@ -309,7 +309,6 @@ done:
static void butterfly_detach(struct parport *p)
{
struct butterfly *pp;
- int status;
/* FIXME this global is ugly ... but, how to quickly get from
* the parport to the "struct butterfly" associated with it?
@@ -321,7 +320,7 @@ static void butterfly_detach(struct parport *p)
butterfly = NULL;
/* stop() unregisters child devices too */
- status = spi_bitbang_stop(&pp->bitbang);
+ spi_bitbang_stop(&pp->bitbang);
/* turn off VCC */
parport_write_data(pp->port, 0);
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
index 374ba4a48a9e..4cd62f636547 100644
--- a/drivers/spi/spi-clps711x.c
+++ b/drivers/spi/spi-clps711x.c
@@ -11,158 +11,125 @@
#include <linux/io.h>
#include <linux/clk.h>
-#include <linux/init.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/clps711x.h>
#include <linux/spi/spi.h>
#include <linux/platform_data/spi-clps711x.h>
-#include <mach/hardware.h>
-
#define DRIVER_NAME "spi-clps711x"
-struct spi_clps711x_data {
- struct completion done;
+#define SYNCIO_FRMLEN(x) ((x) << 8)
+#define SYNCIO_TXFRMEN (1 << 14)
+struct spi_clps711x_data {
+ void __iomem *syncio;
+ struct regmap *syscon;
+ struct regmap *syscon1;
struct clk *spi_clk;
- u32 max_speed_hz;
u8 *tx_buf;
u8 *rx_buf;
- int count;
+ unsigned int bpw;
int len;
-
- int chipselect[0];
};
static int spi_clps711x_setup(struct spi_device *spi)
{
- struct spi_clps711x_data *hw = spi_master_get_devdata(spi->master);
-
/* We are expect that SPI-device is not selected */
- gpio_direction_output(hw->chipselect[spi->chip_select],
- !(spi->mode & SPI_CS_HIGH));
+ gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
return 0;
}
-static void spi_clps711x_setup_mode(struct spi_device *spi)
-{
- /* Setup edge for transfer */
- if (spi->mode & SPI_CPHA)
- clps_writew(clps_readw(SYSCON3) | SYSCON3_ADCCKNSEN, SYSCON3);
- else
- clps_writew(clps_readw(SYSCON3) & ~SYSCON3_ADCCKNSEN, SYSCON3);
-}
-
-static int spi_clps711x_setup_xfer(struct spi_device *spi,
- struct spi_transfer *xfer)
+static void spi_clps711x_setup_xfer(struct spi_device *spi,
+ struct spi_transfer *xfer)
{
- u32 speed = xfer->speed_hz ? : spi->max_speed_hz;
- u8 bpw = xfer->bits_per_word;
- struct spi_clps711x_data *hw = spi_master_get_devdata(spi->master);
-
- if (bpw != 8) {
- dev_err(&spi->dev, "Unsupported master bus width %i\n", bpw);
- return -EINVAL;
- }
+ struct spi_master *master = spi->master;
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
/* Setup SPI frequency divider */
- if (!speed || (speed >= hw->max_speed_hz))
- clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
- SYSCON1_ADCKSEL(3), SYSCON1);
- else if (speed >= (hw->max_speed_hz / 2))
- clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
- SYSCON1_ADCKSEL(2), SYSCON1);
- else if (speed >= (hw->max_speed_hz / 8))
- clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
- SYSCON1_ADCKSEL(1), SYSCON1);
+ if (xfer->speed_hz >= master->max_speed_hz)
+ regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
+ SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(3));
+ else if (xfer->speed_hz >= (master->max_speed_hz / 2))
+ regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
+ SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(2));
+ else if (xfer->speed_hz >= (master->max_speed_hz / 8))
+ regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
+ SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(1));
else
- clps_writel((clps_readl(SYSCON1) & ~SYSCON1_ADCKSEL_MASK) |
- SYSCON1_ADCKSEL(0), SYSCON1);
-
- return 0;
+ regmap_update_bits(hw->syscon1, SYSCON_OFFSET,
+ SYSCON1_ADCKSEL_MASK, SYSCON1_ADCKSEL(0));
}
-static int spi_clps711x_transfer_one_message(struct spi_master *master,
- struct spi_message *msg)
+static int spi_clps711x_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
{
struct spi_clps711x_data *hw = spi_master_get_devdata(master);
- struct spi_transfer *xfer;
- int status = 0, cs = hw->chipselect[msg->spi->chip_select];
- u32 data;
-
- spi_clps711x_setup_mode(msg->spi);
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- if (spi_clps711x_setup_xfer(msg->spi, xfer)) {
- status = -EINVAL;
- goto out_xfr;
- }
+ struct spi_device *spi = msg->spi;
- gpio_set_value(cs, !!(msg->spi->mode & SPI_CS_HIGH));
-
- reinit_completion(&hw->done);
-
- hw->count = 0;
- hw->len = xfer->len;
- hw->tx_buf = (u8 *)xfer->tx_buf;
- hw->rx_buf = (u8 *)xfer->rx_buf;
-
- /* Initiate transfer */
- data = hw->tx_buf ? hw->tx_buf[hw->count] : 0;
- clps_writel(data | SYNCIO_FRMLEN(8) | SYNCIO_TXFRMEN, SYNCIO);
-
- wait_for_completion(&hw->done);
+ /* Setup mode for transfer */
+ return regmap_update_bits(hw->syscon, SYSCON_OFFSET, SYSCON3_ADCCKNSEN,
+ (spi->mode & SPI_CPHA) ?
+ SYSCON3_ADCCKNSEN : 0);
+}
- if (xfer->delay_usecs)
- udelay(xfer->delay_usecs);
+static int spi_clps711x_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ u8 data;
- if (xfer->cs_change ||
- list_is_last(&xfer->transfer_list, &msg->transfers))
- gpio_set_value(cs, !(msg->spi->mode & SPI_CS_HIGH));
+ spi_clps711x_setup_xfer(spi, xfer);
- msg->actual_length += xfer->len;
- }
+ hw->len = xfer->len;
+ hw->bpw = xfer->bits_per_word;
+ hw->tx_buf = (u8 *)xfer->tx_buf;
+ hw->rx_buf = (u8 *)xfer->rx_buf;
-out_xfr:
- msg->status = status;
- spi_finalize_current_message(master);
+ /* Initiate transfer */
+ data = hw->tx_buf ? *hw->tx_buf++ : 0;
+ writel(data | SYNCIO_FRMLEN(hw->bpw) | SYNCIO_TXFRMEN, hw->syncio);
- return 0;
+ return 1;
}
static irqreturn_t spi_clps711x_isr(int irq, void *dev_id)
{
- struct spi_clps711x_data *hw = (struct spi_clps711x_data *)dev_id;
- u32 data;
+ struct spi_master *master = dev_id;
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ u8 data;
/* Handle RX */
- data = clps_readb(SYNCIO);
+ data = readb(hw->syncio);
if (hw->rx_buf)
- hw->rx_buf[hw->count] = (u8)data;
-
- hw->count++;
+ *hw->rx_buf++ = data;
/* Handle TX */
- if (hw->count < hw->len) {
- data = hw->tx_buf ? hw->tx_buf[hw->count] : 0;
- clps_writel(data | SYNCIO_FRMLEN(8) | SYNCIO_TXFRMEN, SYNCIO);
+ if (--hw->len > 0) {
+ data = hw->tx_buf ? *hw->tx_buf++ : 0;
+ writel(data | SYNCIO_FRMLEN(hw->bpw) | SYNCIO_TXFRMEN,
+ hw->syncio);
} else
- complete(&hw->done);
+ spi_finalize_current_transfer(master);
return IRQ_HANDLED;
}
static int spi_clps711x_probe(struct platform_device *pdev)
{
- int i, ret;
- struct spi_master *master;
struct spi_clps711x_data *hw;
struct spi_clps711x_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct spi_master *master;
+ struct resource *res;
+ int i, irq, ret;
if (!pdata) {
dev_err(&pdev->dev, "No platform data supplied\n");
@@ -174,33 +141,37 @@ static int spi_clps711x_probe(struct platform_device *pdev)
return -EINVAL;
}
- master = spi_alloc_master(&pdev->dev,
- sizeof(struct spi_clps711x_data) +
- sizeof(int) * pdata->num_chipselect);
- if (!master) {
- dev_err(&pdev->dev, "SPI allocating memory error\n");
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*hw));
+ if (!master)
return -ENOMEM;
+
+ master->cs_gpios = devm_kzalloc(&pdev->dev, sizeof(int) *
+ pdata->num_chipselect, GFP_KERNEL);
+ if (!master->cs_gpios) {
+ ret = -ENOMEM;
+ goto err_out;
}
master->bus_num = pdev->id;
master->mode_bits = SPI_CPHA | SPI_CS_HIGH;
- master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8);
master->num_chipselect = pdata->num_chipselect;
master->setup = spi_clps711x_setup;
- master->transfer_one_message = spi_clps711x_transfer_one_message;
+ master->prepare_message = spi_clps711x_prepare_message;
+ master->transfer_one = spi_clps711x_transfer_one;
hw = spi_master_get_devdata(master);
for (i = 0; i < master->num_chipselect; i++) {
- hw->chipselect[i] = pdata->chipselect[i];
- if (!gpio_is_valid(hw->chipselect[i])) {
- dev_err(&pdev->dev, "Invalid CS GPIO %i\n", i);
- ret = -EINVAL;
- goto err_out;
- }
- if (devm_gpio_request(&pdev->dev, hw->chipselect[i], NULL)) {
+ master->cs_gpios[i] = pdata->chipselect[i];
+ ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
+ DRIVER_NAME);
+ if (ret) {
dev_err(&pdev->dev, "Can't get CS GPIO %i\n", i);
- ret = -EINVAL;
goto err_out;
}
}
@@ -211,29 +182,45 @@ static int spi_clps711x_probe(struct platform_device *pdev)
ret = PTR_ERR(hw->spi_clk);
goto err_out;
}
- hw->max_speed_hz = clk_get_rate(hw->spi_clk);
+ master->max_speed_hz = clk_get_rate(hw->spi_clk);
- init_completion(&hw->done);
platform_set_drvdata(pdev, master);
+ hw->syscon = syscon_regmap_lookup_by_pdevname("syscon.3");
+ if (IS_ERR(hw->syscon)) {
+ ret = PTR_ERR(hw->syscon);
+ goto err_out;
+ }
+
+ hw->syscon1 = syscon_regmap_lookup_by_pdevname("syscon.1");
+ if (IS_ERR(hw->syscon1)) {
+ ret = PTR_ERR(hw->syscon1);
+ goto err_out;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hw->syncio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->syncio)) {
+ ret = PTR_ERR(hw->syncio);
+ goto err_out;
+ }
+
/* Disable extended mode due hardware problems */
- clps_writew(clps_readw(SYSCON3) & ~SYSCON3_ADCCON, SYSCON3);
+ regmap_update_bits(hw->syscon, SYSCON_OFFSET, SYSCON3_ADCCON, 0);
/* Clear possible pending interrupt */
- clps_readl(SYNCIO);
+ readl(hw->syncio);
- ret = devm_request_irq(&pdev->dev, IRQ_SSEOTI, spi_clps711x_isr, 0,
- dev_name(&pdev->dev), hw);
- if (ret) {
- dev_err(&pdev->dev, "Can't request IRQ\n");
+ ret = devm_request_irq(&pdev->dev, irq, spi_clps711x_isr, 0,
+ dev_name(&pdev->dev), master);
+ if (ret)
goto err_out;
- }
ret = devm_spi_register_master(&pdev->dev, master);
if (!ret) {
dev_info(&pdev->dev,
"SPI bus driver initialized. Master clock %u Hz\n",
- hw->max_speed_hz);
+ master->max_speed_hz);
return 0;
}
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
index cabed8f9119e..e2fa628e55e7 100644
--- a/drivers/spi/spi-coldfire-qspi.c
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -77,8 +77,6 @@ struct mcfqspi {
struct mcfqspi_cs_control *cs_control;
wait_queue_head_t waitq;
-
- struct device *dev;
};
static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val)
@@ -135,13 +133,13 @@ static void mcfqspi_cs_deselect(struct mcfqspi *mcfqspi, u8 chip_select,
static int mcfqspi_cs_setup(struct mcfqspi *mcfqspi)
{
- return (mcfqspi->cs_control && mcfqspi->cs_control->setup) ?
+ return (mcfqspi->cs_control->setup) ?
mcfqspi->cs_control->setup(mcfqspi->cs_control) : 0;
}
static void mcfqspi_cs_teardown(struct mcfqspi *mcfqspi)
{
- if (mcfqspi->cs_control && mcfqspi->cs_control->teardown)
+ if (mcfqspi->cs_control->teardown)
mcfqspi->cs_control->teardown(mcfqspi->cs_control);
}
@@ -300,68 +298,45 @@ static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count,
}
}
-static int mcfqspi_transfer_one_message(struct spi_master *master,
- struct spi_message *msg)
+static void mcfqspi_set_cs(struct spi_device *spi, bool enable)
{
- struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
- struct spi_device *spi = msg->spi;
- struct spi_transfer *t;
- int status = 0;
-
- list_for_each_entry(t, &msg->transfers, transfer_list) {
- bool cs_high = spi->mode & SPI_CS_HIGH;
- u16 qmr = MCFQSPI_QMR_MSTR;
-
- qmr |= t->bits_per_word << 10;
- if (spi->mode & SPI_CPHA)
- qmr |= MCFQSPI_QMR_CPHA;
- if (spi->mode & SPI_CPOL)
- qmr |= MCFQSPI_QMR_CPOL;
- if (t->speed_hz)
- qmr |= mcfqspi_qmr_baud(t->speed_hz);
- else
- qmr |= mcfqspi_qmr_baud(spi->max_speed_hz);
- mcfqspi_wr_qmr(mcfqspi, qmr);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(spi->master);
+ bool cs_high = spi->mode & SPI_CS_HIGH;
+ if (enable)
mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high);
+ else
+ mcfqspi_cs_deselect(mcfqspi, spi->chip_select, cs_high);
+}
- mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
- if (t->bits_per_word == 8)
- mcfqspi_transfer_msg8(mcfqspi, t->len, t->tx_buf,
- t->rx_buf);
- else
- mcfqspi_transfer_msg16(mcfqspi, t->len / 2, t->tx_buf,
- t->rx_buf);
- mcfqspi_wr_qir(mcfqspi, 0);
-
- if (t->delay_usecs)
- udelay(t->delay_usecs);
- if (t->cs_change) {
- if (!list_is_last(&t->transfer_list, &msg->transfers))
- mcfqspi_cs_deselect(mcfqspi, spi->chip_select,
- cs_high);
- } else {
- if (list_is_last(&t->transfer_list, &msg->transfers))
- mcfqspi_cs_deselect(mcfqspi, spi->chip_select,
- cs_high);
- }
- msg->actual_length += t->len;
- }
- msg->status = status;
- spi_finalize_current_message(master);
-
- return status;
+static int mcfqspi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ u16 qmr = MCFQSPI_QMR_MSTR;
+
+ qmr |= t->bits_per_word << 10;
+ if (spi->mode & SPI_CPHA)
+ qmr |= MCFQSPI_QMR_CPHA;
+ if (spi->mode & SPI_CPOL)
+ qmr |= MCFQSPI_QMR_CPOL;
+ qmr |= mcfqspi_qmr_baud(t->speed_hz);
+ mcfqspi_wr_qmr(mcfqspi, qmr);
+
+ mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
+ if (t->bits_per_word == 8)
+ mcfqspi_transfer_msg8(mcfqspi, t->len, t->tx_buf, t->rx_buf);
+ else
+ mcfqspi_transfer_msg16(mcfqspi, t->len / 2, t->tx_buf,
+ t->rx_buf);
+ mcfqspi_wr_qir(mcfqspi, 0);
+ return 0;
}
static int mcfqspi_setup(struct spi_device *spi)
{
- if (spi->chip_select >= spi->master->num_chipselect) {
- dev_dbg(&spi->dev, "%d chip select is out of range\n",
- spi->chip_select);
- return -EINVAL;
- }
-
mcfqspi_cs_deselect(spi_master_get_devdata(spi->master),
spi->chip_select, spi->mode & SPI_CS_HIGH);
@@ -388,6 +363,11 @@ static int mcfqspi_probe(struct platform_device *pdev)
return -ENOENT;
}
+ if (!pdata->cs_control) {
+ dev_dbg(&pdev->dev, "pdata->cs_control is NULL\n");
+ return -EINVAL;
+ }
+
master = spi_alloc_master(&pdev->dev, sizeof(*mcfqspi));
if (master == NULL) {
dev_dbg(&pdev->dev, "spi_alloc_master failed\n");
@@ -436,12 +416,12 @@ static int mcfqspi_probe(struct platform_device *pdev)
}
init_waitqueue_head(&mcfqspi->waitq);
- mcfqspi->dev = &pdev->dev;
master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
master->setup = mcfqspi_setup;
- master->transfer_one_message = mcfqspi_transfer_one_message;
+ master->set_cs = mcfqspi_set_cs;
+ master->transfer_one = mcfqspi_transfer_one;
master->auto_runtime_pm = true;
platform_set_drvdata(pdev, master);
@@ -451,7 +431,7 @@ static int mcfqspi_probe(struct platform_device *pdev)
dev_dbg(&pdev->dev, "spi_register_master failed\n");
goto fail2;
}
- pm_runtime_enable(mcfqspi->dev);
+ pm_runtime_enable(&pdev->dev);
dev_info(&pdev->dev, "Coldfire QSPI bus driver\n");
@@ -473,9 +453,8 @@ static int mcfqspi_remove(struct platform_device *pdev)
{
struct spi_master *master = platform_get_drvdata(pdev);
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
- struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- pm_runtime_disable(mcfqspi->dev);
+ pm_runtime_disable(&pdev->dev);
/* disable the hardware (set the baud rate to 0) */
mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
@@ -490,8 +469,11 @@ static int mcfqspi_suspend(struct device *dev)
{
struct spi_master *master = dev_get_drvdata(dev);
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ int ret;
- spi_master_suspend(master);
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
clk_disable(mcfqspi->clk);
@@ -503,18 +485,17 @@ static int mcfqspi_resume(struct device *dev)
struct spi_master *master = dev_get_drvdata(dev);
struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
- spi_master_resume(master);
-
clk_enable(mcfqspi->clk);
- return 0;
+ return spi_master_resume(master);
}
#endif
#ifdef CONFIG_PM_RUNTIME
static int mcfqspi_runtime_suspend(struct device *dev)
{
- struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
clk_disable(mcfqspi->clk);
@@ -523,7 +504,8 @@ static int mcfqspi_runtime_suspend(struct device *dev)
static int mcfqspi_runtime_resume(struct device *dev)
{
- struct mcfqspi *mcfqspi = dev_get_drvdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
clk_enable(mcfqspi->clk);
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 5e7389faa2a0..50f750989258 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -802,8 +802,7 @@ static int spi_davinci_get_pdata(struct platform_device *pdev,
pdata = &dspi->pdata;
pdata->version = SPI_VERSION_1;
- match = of_match_device(of_match_ptr(davinci_spi_of_match),
- &pdev->dev);
+ match = of_match_device(davinci_spi_of_match, &pdev->dev);
if (!match)
return -ENODEV;
@@ -824,7 +823,6 @@ static int spi_davinci_get_pdata(struct platform_device *pdev,
return 0;
}
#else
-#define davinci_spi_of_match NULL
static struct davinci_spi_platform_data
*spi_davinci_get_pdata(struct platform_device *pdev,
struct davinci_spi *dspi)
@@ -864,10 +862,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
dspi = spi_master_get_devdata(master);
- if (dspi == NULL) {
- ret = -ENOENT;
- goto free_master;
- }
if (dev_get_platdata(&pdev->dev)) {
pdata = dev_get_platdata(&pdev->dev);
@@ -908,10 +902,6 @@ static int davinci_spi_probe(struct platform_device *pdev)
goto free_master;
dspi->bitbang.master = master;
- if (dspi->bitbang.master == NULL) {
- ret = -ENODEV;
- goto free_master;
- }
dspi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(dspi->clk)) {
@@ -1040,7 +1030,7 @@ static struct platform_driver davinci_spi_driver = {
.driver = {
.name = "spi_davinci",
.owner = THIS_MODULE,
- .of_match_table = davinci_spi_of_match,
+ .of_match_table = of_match_ptr(davinci_spi_of_match),
},
.probe = davinci_spi_probe,
.remove = davinci_spi_remove,
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
index 9af56cdf1540..1492f5ee9aaa 100644
--- a/drivers/spi/spi-dw-mmio.c
+++ b/drivers/spi/spi-dw-mmio.c
@@ -66,7 +66,7 @@ static int dw_spi_mmio_probe(struct platform_device *pdev)
if (ret)
return ret;
- dws->bus_num = 0;
+ dws->bus_num = pdev->id;
dws->num_cs = 4;
dws->max_freq = clk_get_rate(dwsmmio->clk);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index bf98d63d92b3..712ac5629cd4 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -276,8 +276,7 @@ static void giveback(struct dw_spi *dws)
queue_work(dws->workqueue, &dws->pump_messages);
spin_unlock_irqrestore(&dws->lock, flags);
- last_transfer = list_entry(msg->transfers.prev,
- struct spi_transfer,
+ last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
transfer_list);
if (!last_transfer->cs_change && dws->cs_control)
@@ -439,12 +438,6 @@ static void pump_transfers(unsigned long data)
if (transfer->speed_hz != speed) {
speed = transfer->speed_hz;
- if (speed > dws->max_freq) {
- printk(KERN_ERR "MRST SPI0: unsupported"
- "freq: %dHz\n", speed);
- message->status = -EIO;
- goto early_exit;
- }
/* clk_div doesn't support odd number */
clk_div = dws->max_freq / speed;
@@ -671,12 +664,6 @@ static int dw_spi_setup(struct spi_device *spi)
return 0;
}
-static void dw_spi_cleanup(struct spi_device *spi)
-{
- struct chip_data *chip = spi_get_ctldata(spi);
- kfree(chip);
-}
-
static int init_queue(struct dw_spi *dws)
{
INIT_LIST_HEAD(&dws->queue);
@@ -806,9 +793,9 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
master->bus_num = dws->bus_num;
master->num_chipselect = dws->num_cs;
- master->cleanup = dw_spi_cleanup;
master->setup = dw_spi_setup;
master->transfer = dw_spi_transfer;
+ master->max_speed_hz = dws->max_freq;
/* Basic HW init */
spi_hw_init(dws);
diff --git a/drivers/spi/spi-efm32.c b/drivers/spi/spi-efm32.c
index d4d3cc534792..be44a3eeb5e8 100644
--- a/drivers/spi/spi-efm32.c
+++ b/drivers/spi/spi-efm32.c
@@ -198,7 +198,7 @@ static int efm32_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
efm32_spi_filltx(ddata);
- init_completion(&ddata->done);
+ reinit_completion(&ddata->done);
efm32_spi_write32(ddata, REG_IF_TXBL | REG_IF_RXDATAV, REG_IEN);
@@ -287,17 +287,17 @@ static u32 efm32_spi_get_configured_location(struct efm32_spi_ddata *ddata)
return (reg & REG_ROUTE_LOCATION__MASK) >> __ffs(REG_ROUTE_LOCATION__MASK);
}
-static int efm32_spi_probe_dt(struct platform_device *pdev,
+static void efm32_spi_probe_dt(struct platform_device *pdev,
struct spi_master *master, struct efm32_spi_ddata *ddata)
{
struct device_node *np = pdev->dev.of_node;
u32 location;
int ret;
- if (!np)
- return 1;
-
- ret = of_property_read_u32(np, "location", &location);
+ ret = of_property_read_u32(np, "efm32,location", &location);
+ if (ret)
+ /* fall back to old and (wrongly) generic property "location" */
+ ret = of_property_read_u32(np, "location", &location);
if (!ret) {
dev_dbg(&pdev->dev, "using location %u\n", location);
} else {
@@ -308,11 +308,6 @@ static int efm32_spi_probe_dt(struct platform_device *pdev,
}
ddata->pdata.location = location;
-
- /* spi core takes care about the bus number using an alias */
- master->bus_num = -1;
-
- return 0;
}
static int efm32_spi_probe(struct platform_device *pdev)
@@ -322,9 +317,14 @@ static int efm32_spi_probe(struct platform_device *pdev)
int ret;
struct spi_master *master;
struct device_node *np = pdev->dev.of_node;
- unsigned int num_cs, i;
+ int num_cs, i;
+
+ if (!np)
+ return -EINVAL;
num_cs = of_gpio_named_count(np, "cs-gpios");
+ if (num_cs < 0)
+ return num_cs;
master = spi_alloc_master(&pdev->dev,
sizeof(*ddata) + num_cs * sizeof(unsigned));
@@ -349,6 +349,7 @@ static int efm32_spi_probe(struct platform_device *pdev)
ddata->bitbang.txrx_bufs = efm32_spi_txrx_bufs;
spin_lock_init(&ddata->lock);
+ init_completion(&ddata->done);
ddata->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(ddata->clk)) {
@@ -415,23 +416,7 @@ static int efm32_spi_probe(struct platform_device *pdev)
goto err;
}
- ret = efm32_spi_probe_dt(pdev, master, ddata);
- if (ret > 0) {
- /* not created by device tree */
- const struct efm32_spi_pdata *pdata =
- dev_get_platdata(&pdev->dev);
-
- if (pdata)
- ddata->pdata = *pdata;
- else
- ddata->pdata.location =
- efm32_spi_get_configured_location(ddata);
-
- master->bus_num = pdev->id;
-
- } else if (ret < 0) {
- goto err_disable_clk;
- }
+ efm32_spi_probe_dt(pdev, master, ddata);
efm32_spi_write32(ddata, 0, REG_IEN);
efm32_spi_write32(ddata, REG_ROUTE_TXPEN | REG_ROUTE_RXPEN |
@@ -487,6 +472,9 @@ static int efm32_spi_remove(struct platform_device *pdev)
static const struct of_device_id efm32_spi_dt_ids[] = {
{
+ .compatible = "energymicro,efm32-spi",
+ }, {
+ /* doesn't follow the "vendor,device" scheme, don't use */
.compatible = "efm32,spi",
}, {
/* sentinel */
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
index 1bfaed6e4073..2f675d32df0e 100644
--- a/drivers/spi/spi-ep93xx.c
+++ b/drivers/spi/spi-ep93xx.c
@@ -73,8 +73,6 @@
* @clk: clock for the controller
* @regs_base: pointer to ioremap()'d registers
* @sspdr_phys: physical address of the SSPDR register
- * @min_rate: minimum clock rate (in Hz) supported by the controller
- * @max_rate: maximum clock rate (in Hz) supported by the controller
* @wait: wait here until given transfer is completed
* @current_msg: message that is currently processed (or %NULL if none)
* @tx: current byte in transfer to transmit
@@ -95,8 +93,6 @@ struct ep93xx_spi {
struct clk *clk;
void __iomem *regs_base;
unsigned long sspdr_phys;
- unsigned long min_rate;
- unsigned long max_rate;
struct completion wait;
struct spi_message *current_msg;
size_t tx;
@@ -199,9 +195,9 @@ static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
* @div_scr: pointer to return the scr divider
*/
static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
- unsigned long rate,
- u8 *div_cpsr, u8 *div_scr)
+ u32 rate, u8 *div_cpsr, u8 *div_scr)
{
+ struct spi_master *master = platform_get_drvdata(espi->pdev);
unsigned long spi_clk_rate = clk_get_rate(espi->clk);
int cpsr, scr;
@@ -210,7 +206,7 @@ static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
* controller. Note that minimum value is already checked in
* ep93xx_spi_transfer_one_message().
*/
- rate = clamp(rate, espi->min_rate, espi->max_rate);
+ rate = clamp(rate, master->min_speed_hz, master->max_speed_hz);
/*
* Calculate divisors so that we can get speed according the
@@ -735,13 +731,6 @@ static int ep93xx_spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct ep93xx_spi *espi = spi_master_get_devdata(master);
- struct spi_transfer *t;
-
- /* first validate each transfer */
- list_for_each_entry(t, &msg->transfers, transfer_list) {
- if (t->speed_hz < espi->min_rate)
- return -EINVAL;
- }
msg->state = NULL;
msg->status = 0;
@@ -917,8 +906,8 @@ static int ep93xx_spi_probe(struct platform_device *pdev)
* Calculate maximum and minimum supported clock rates
* for the controller.
*/
- espi->max_rate = clk_get_rate(espi->clk) / 2;
- espi->min_rate = clk_get_rate(espi->clk) / (254 * 256);
+ master->max_speed_hz = clk_get_rate(espi->clk) / 2;
+ master->min_speed_hz = clk_get_rate(espi->clk) / (254 * 256);
espi->pdev = pdev;
espi->sspdr_phys = res->start + SSPDR;
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
index dd5bd468e962..09965f069a1c 100644
--- a/drivers/spi/spi-falcon.c
+++ b/drivers/spi/spi-falcon.c
@@ -312,9 +312,6 @@ static int falcon_sflash_setup(struct spi_device *spi)
unsigned int i;
unsigned long flags;
- if (spi->chip_select > 0)
- return -ENODEV;
-
spin_lock_irqsave(&ebu_lock, flags);
if (spi->max_speed_hz >= CLOCK_100M) {
@@ -422,9 +419,7 @@ static int falcon_sflash_probe(struct platform_device *pdev)
priv->master = master;
master->mode_bits = SPI_MODE_3;
- master->num_chipselect = 1;
master->flags = SPI_MASTER_HALF_DUPLEX;
- master->bus_num = -1;
master->setup = falcon_sflash_setup;
master->prepare_transfer_hardware = falcon_sflash_prepare_xfer;
master->transfer_one_message = falcon_sflash_xfer_one;
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index ec79f726672a..d565eeee3bd8 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/platform_device.h>
+#include <linux/regmap.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/io.h>
@@ -108,11 +109,11 @@ struct fsl_dspi {
struct spi_bitbang bitbang;
struct platform_device *pdev;
- void __iomem *base;
+ struct regmap *regmap;
int irq;
- struct clk *clk;
+ struct clk *clk;
- struct spi_transfer *cur_transfer;
+ struct spi_transfer *cur_transfer;
struct chip_data *cur_chip;
size_t len;
void *tx;
@@ -123,24 +124,17 @@ struct fsl_dspi {
u8 cs;
u16 void_write_data;
- wait_queue_head_t waitq;
- u32 waitflags;
+ wait_queue_head_t waitq;
+ u32 waitflags;
};
static inline int is_double_byte_mode(struct fsl_dspi *dspi)
{
- return ((readl(dspi->base + SPI_CTAR(dspi->cs)) & SPI_FRAME_BITS_MASK)
- == SPI_FRAME_BITS(8)) ? 0 : 1;
-}
+ unsigned int val;
-static void set_bit_mode(struct fsl_dspi *dspi, unsigned char bits)
-{
- u32 temp;
+ regmap_read(dspi->regmap, SPI_CTAR(dspi->cs), &val);
- temp = readl(dspi->base + SPI_CTAR(dspi->cs));
- temp &= ~SPI_FRAME_BITS_MASK;
- temp |= SPI_FRAME_BITS(bits);
- writel(temp, dspi->base + SPI_CTAR(dspi->cs));
+ return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
}
static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
@@ -188,7 +182,8 @@ static int dspi_transfer_write(struct fsl_dspi *dspi)
*/
if (tx_word && (dspi->len == 1)) {
dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
- set_bit_mode(dspi, 8);
+ regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
+ SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
tx_word = 0;
}
@@ -238,7 +233,8 @@ static int dspi_transfer_write(struct fsl_dspi *dspi)
dspi_pushr |= SPI_PUSHR_CTCNT; /* clear counter */
}
- writel(dspi_pushr, dspi->base + SPI_PUSHR);
+ regmap_write(dspi->regmap, SPI_PUSHR, dspi_pushr);
+
tx_count++;
}
@@ -253,17 +249,23 @@ static int dspi_transfer_read(struct fsl_dspi *dspi)
while ((dspi->rx < dspi->rx_end)
&& (rx_count < DSPI_FIFO_SIZE)) {
if (rx_word) {
+ unsigned int val;
+
if ((dspi->rx_end - dspi->rx) == 1)
break;
- d = SPI_POPR_RXDATA(readl(dspi->base + SPI_POPR));
+ regmap_read(dspi->regmap, SPI_POPR, &val);
+ d = SPI_POPR_RXDATA(val);
if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
*(u16 *)dspi->rx = d;
dspi->rx += 2;
} else {
- d = SPI_POPR_RXDATA(readl(dspi->base + SPI_POPR));
+ unsigned int val;
+
+ regmap_read(dspi->regmap, SPI_POPR, &val);
+ d = SPI_POPR_RXDATA(val);
if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
*(u8 *)dspi->rx = d;
dspi->rx++;
@@ -295,13 +297,13 @@ static int dspi_txrx_transfer(struct spi_device *spi, struct spi_transfer *t)
if (!dspi->tx)
dspi->dataflags |= TRAN_STATE_TX_VOID;
- writel(dspi->cur_chip->mcr_val, dspi->base + SPI_MCR);
- writel(dspi->cur_chip->ctar_val, dspi->base + SPI_CTAR(dspi->cs));
- writel(SPI_RSER_EOQFE, dspi->base + SPI_RSER);
+ regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val);
+ regmap_write(dspi->regmap, SPI_CTAR(dspi->cs), dspi->cur_chip->ctar_val);
+ regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
if (t->speed_hz)
- writel(dspi->cur_chip->ctar_val,
- dspi->base + SPI_CTAR(dspi->cs));
+ regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
+ dspi->cur_chip->ctar_val);
dspi_transfer_write(dspi);
@@ -315,7 +317,9 @@ static int dspi_txrx_transfer(struct spi_device *spi, struct spi_transfer *t)
static void dspi_chipselect(struct spi_device *spi, int value)
{
struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
- u32 pushr = readl(dspi->base + SPI_PUSHR);
+ unsigned int pushr;
+
+ regmap_read(dspi->regmap, SPI_PUSHR, &pushr);
switch (value) {
case BITBANG_CS_ACTIVE:
@@ -326,7 +330,7 @@ static void dspi_chipselect(struct spi_device *spi, int value)
break;
}
- writel(pushr, dspi->base + SPI_PUSHR);
+ regmap_write(dspi->regmap, SPI_PUSHR, pushr);
}
static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
@@ -338,7 +342,8 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
/* Only alloc on first setup */
chip = spi_get_ctldata(spi);
if (chip == NULL) {
- chip = kcalloc(1, sizeof(struct chip_data), GFP_KERNEL);
+ chip = devm_kzalloc(&spi->dev, sizeof(struct chip_data),
+ GFP_KERNEL);
if (!chip)
return -ENOMEM;
}
@@ -349,7 +354,6 @@ static int dspi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
fmsz = spi->bits_per_word - 1;
} else {
pr_err("Invalid wordsize\n");
- kfree(chip);
return -ENODEV;
}
@@ -382,13 +386,15 @@ static irqreturn_t dspi_interrupt(int irq, void *dev_id)
{
struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
- writel(SPI_SR_EOQF, dspi->base + SPI_SR);
+ regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF);
dspi_transfer_read(dspi);
if (!dspi->len) {
if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM)
- set_bit_mode(dspi, 16);
+ regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
+ SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16));
+
dspi->waitflags = 1;
wake_up_interruptible(&dspi->waitq);
} else {
@@ -420,7 +426,6 @@ static int dspi_suspend(struct device *dev)
static int dspi_resume(struct device *dev)
{
-
struct spi_master *master = dev_get_drvdata(dev);
struct fsl_dspi *dspi = spi_master_get_devdata(master);
@@ -431,8 +436,13 @@ static int dspi_resume(struct device *dev)
}
#endif /* CONFIG_PM_SLEEP */
-static const struct dev_pm_ops dspi_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(dspi_suspend, dspi_resume)
+static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
+
+static struct regmap_config dspi_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x88,
};
static int dspi_probe(struct platform_device *pdev)
@@ -441,6 +451,7 @@ static int dspi_probe(struct platform_device *pdev)
struct spi_master *master;
struct fsl_dspi *dspi;
struct resource *res;
+ void __iomem *base;
int ret = 0, cs_num, bus_num;
master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
@@ -475,12 +486,24 @@ static int dspi_probe(struct platform_device *pdev)
master->bus_num = bus_num;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dspi->base = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(dspi->base)) {
- ret = PTR_ERR(dspi->base);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
goto out_master_put;
}
+ dspi_regmap_config.lock_arg = dspi;
+ dspi_regmap_config.val_format_endian =
+ of_property_read_bool(np, "big-endian")
+ ? REGMAP_ENDIAN_BIG : REGMAP_ENDIAN_DEFAULT;
+ dspi->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dspi", base,
+ &dspi_regmap_config);
+ if (IS_ERR(dspi->regmap)) {
+ dev_err(&pdev->dev, "failed to init regmap: %ld\n",
+ PTR_ERR(dspi->regmap));
+ return PTR_ERR(dspi->regmap);
+ }
+
dspi->irq = platform_get_irq(pdev, 0);
if (dspi->irq < 0) {
dev_err(&pdev->dev, "can't get platform irq\n");
@@ -504,7 +527,7 @@ static int dspi_probe(struct platform_device *pdev)
clk_prepare_enable(dspi->clk);
init_waitqueue_head(&dspi->waitq);
- platform_set_drvdata(pdev, dspi);
+ platform_set_drvdata(pdev, master);
ret = spi_bitbang_start(&dspi->bitbang);
if (ret != 0) {
@@ -525,7 +548,8 @@ out_master_put:
static int dspi_remove(struct platform_device *pdev)
{
- struct fsl_dspi *dspi = platform_get_drvdata(pdev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
/* Disconnect from the SPI framework */
spi_bitbang_stop(&dspi->bitbang);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 428dc7a6b62e..6fb2b75df821 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -219,13 +219,8 @@ static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base;
unsigned int len = t->len;
- u8 bits_per_word;
int ret;
- bits_per_word = spi->bits_per_word;
- if (t->bits_per_word)
- bits_per_word = t->bits_per_word;
-
mpc8xxx_spi->len = t->len;
len = roundup(len, 4) / 4;
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
index 0b75f26158ab..e5d45fca3551 100644
--- a/drivers/spi/spi-fsl-lib.c
+++ b/drivers/spi/spi-fsl-lib.c
@@ -200,7 +200,7 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
const void *prop;
int ret = -ENOMEM;
- pinfo = kzalloc(sizeof(*pinfo), GFP_KERNEL);
+ pinfo = devm_kzalloc(&ofdev->dev, sizeof(*pinfo), GFP_KERNEL);
if (!pinfo)
return -ENOMEM;
@@ -215,15 +215,13 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
pdata->sysclk = get_brgfreq();
if (pdata->sysclk == -1) {
pdata->sysclk = fsl_get_sys_freq();
- if (pdata->sysclk == -1) {
- ret = -ENODEV;
- goto err;
- }
+ if (pdata->sysclk == -1)
+ return -ENODEV;
}
#else
ret = of_property_read_u32(np, "clock-frequency", &pdata->sysclk);
if (ret)
- goto err;
+ return ret;
#endif
prop = of_get_property(np, "mode", NULL);
@@ -237,8 +235,4 @@ int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
pdata->flags = SPI_CPM_MODE | SPI_CPM1;
return 0;
-
-err:
- kfree(pinfo);
- return ret;
}
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 119f7af94537..f35488ed62a9 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -239,12 +239,6 @@ static int fsl_spi_setup_transfer(struct spi_device *spi,
if (!bits_per_word)
bits_per_word = spi->bits_per_word;
- /* Make sure its a bit width we support [4..16, 32] */
- if ((bits_per_word < 4)
- || ((bits_per_word > 16) && (bits_per_word != 32))
- || (bits_per_word > mpc8xxx_spi->max_bits_per_word))
- return -EINVAL;
-
if (!hz)
hz = spi->max_speed_hz;
@@ -362,18 +356,28 @@ static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
static void fsl_spi_do_one_msg(struct spi_message *m)
{
struct spi_device *spi = m->spi;
- struct spi_transfer *t;
+ struct spi_transfer *t, *first;
unsigned int cs_change;
const int nsecs = 50;
int status;
- cs_change = 1;
- status = 0;
+ /* Don't allow changes if CS is active */
+ first = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
list_for_each_entry(t, &m->transfers, transfer_list) {
- if (t->bits_per_word || t->speed_hz) {
- /* Don't allow changes if CS is active */
+ if ((first->bits_per_word != t->bits_per_word) ||
+ (first->speed_hz != t->speed_hz)) {
status = -EINVAL;
+ dev_err(&spi->dev,
+ "bits_per_word/speed_hz should be same for the same SPI transfer\n");
+ return;
+ }
+ }
+ cs_change = 1;
+ status = -EINVAL;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->bits_per_word || t->speed_hz) {
if (cs_change)
status = fsl_spi_setup_transfer(spi, t);
if (status < 0)
@@ -641,6 +645,10 @@ static struct spi_master * fsl_spi_probe(struct device *dev,
if (mpc8xxx_spi->type == TYPE_GRLIB)
fsl_spi_grlib_probe(dev);
+ master->bits_per_word_mask =
+ (SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32)) &
+ SPI_BPW_RANGE_MASK(1, mpc8xxx_spi->max_bits_per_word);
+
if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
mpc8xxx_spi->set_shifts = fsl_spi_qe_cpu_set_shifts;
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
index 7beeb29472ac..09823076df88 100644
--- a/drivers/spi/spi-gpio.c
+++ b/drivers/spi/spi-gpio.c
@@ -19,7 +19,6 @@
*/
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/of.h>
@@ -250,7 +249,7 @@ static int spi_gpio_setup(struct spi_device *spi)
/*
* ... otherwise, take it from spi->controller_data
*/
- cs = (unsigned int) spi->controller_data;
+ cs = (unsigned int)(uintptr_t) spi->controller_data;
}
if (!spi->controller_state) {
@@ -503,13 +502,12 @@ static int spi_gpio_remove(struct platform_device *pdev)
{
struct spi_gpio *spi_gpio;
struct spi_gpio_platform_data *pdata;
- int status;
spi_gpio = platform_get_drvdata(pdev);
pdata = dev_get_platdata(&pdev->dev);
/* stop() unregisters child devices too */
- status = spi_bitbang_stop(&spi_gpio->bitbang);
+ spi_bitbang_stop(&spi_gpio->bitbang);
if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO)
gpio_free(SPI_MISO_GPIO);
@@ -518,7 +516,7 @@ static int spi_gpio_remove(struct platform_device *pdev)
gpio_free(SPI_SCK_GPIO);
spi_master_put(spi_gpio->bitbang.master);
- return status;
+ return 0;
}
MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index a5474ef9d2a0..5daff2054ae4 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
@@ -741,7 +740,7 @@ static int spi_imx_transfer(struct spi_device *spi,
spi_imx->count = transfer->len;
spi_imx->txfifo = 0;
- init_completion(&spi_imx->xfer_done);
+ reinit_completion(&spi_imx->xfer_done);
spi_imx_push(spi_imx);
@@ -880,12 +879,12 @@ static int spi_imx_probe(struct platform_device *pdev)
spi_imx->irq = platform_get_irq(pdev, 0);
if (spi_imx->irq < 0) {
- ret = -EINVAL;
+ ret = spi_imx->irq;
goto out_master_put;
}
ret = devm_request_irq(&pdev->dev, spi_imx->irq, spi_imx_isr, 0,
- DRIVER_NAME, spi_imx);
+ dev_name(&pdev->dev), spi_imx);
if (ret) {
dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret);
goto out_master_put;
@@ -948,8 +947,8 @@ static int spi_imx_remove(struct platform_device *pdev)
spi_bitbang_stop(&spi_imx->bitbang);
writel(0, spi_imx->base + MXC_CSPICTRL);
- clk_disable_unprepare(spi_imx->clk_ipg);
- clk_disable_unprepare(spi_imx->clk_per);
+ clk_unprepare(spi_imx->clk_ipg);
+ clk_unprepare(spi_imx->clk_per);
spi_master_put(master);
return 0;
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
index 5032141eeeec..3822eef2ef9d 100644
--- a/drivers/spi/spi-mpc512x-psc.c
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/of_address.h>
@@ -466,10 +465,8 @@ static void mpc512x_spi_cs_control(struct spi_device *spi, bool onoff)
gpio_set_value(spi->cs_gpio, onoff);
}
-/* bus_num is used only for the case dev->platform_data == NULL */
static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
- u32 size, unsigned int irq,
- s16 bus_num)
+ u32 size, unsigned int irq)
{
struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
struct mpc512x_psc_spi *mps;
@@ -488,7 +485,6 @@ static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
if (pdata == NULL) {
mps->cs_control = mpc512x_spi_cs_control;
- master->bus_num = bus_num;
} else {
mps->cs_control = pdata->cs_control;
master->bus_num = pdata->bus_num;
@@ -574,7 +570,6 @@ static int mpc512x_psc_spi_of_probe(struct platform_device *op)
{
const u32 *regaddr_p;
u64 regaddr64, size64;
- s16 id = -1;
regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL);
if (!regaddr_p) {
@@ -583,16 +578,8 @@ static int mpc512x_psc_spi_of_probe(struct platform_device *op)
}
regaddr64 = of_translate_address(op->dev.of_node, regaddr_p);
- /* get PSC id (0..11, used by port_config) */
- id = of_alias_get_id(op->dev.of_node, "spi");
- if (id < 0) {
- dev_err(&op->dev, "no alias id for %s\n",
- op->dev.of_node->full_name);
- return id;
- }
-
return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64,
- irq_of_parse_and_map(op->dev.of_node, 0), id);
+ irq_of_parse_and_map(op->dev.of_node, 0));
}
static int mpc512x_psc_spi_of_remove(struct platform_device *op)
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
index 00ba910ab302..3d18d9351185 100644
--- a/drivers/spi/spi-mpc52xx-psc.c
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
index 7c675fe83101..aac2a5ddd964 100644
--- a/drivers/spi/spi-mpc52xx.c
+++ b/drivers/spi/spi-mpc52xx.c
@@ -12,7 +12,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/errno.h>
#include <linux/of_platform.h>
#include <linux/interrupt.h>
@@ -357,20 +356,6 @@ static void mpc52xx_spi_wq(struct work_struct *work)
* spi_master ops
*/
-static int mpc52xx_spi_setup(struct spi_device *spi)
-{
- if (spi->bits_per_word % 8)
- return -EINVAL;
-
- if (spi->mode & ~(SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST))
- return -EINVAL;
-
- if (spi->chip_select >= spi->master->num_chipselect)
- return -EINVAL;
-
- return 0;
-}
-
static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m)
{
struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master);
@@ -433,9 +418,9 @@ static int mpc52xx_spi_probe(struct platform_device *op)
goto err_alloc;
}
- master->setup = mpc52xx_spi_setup;
master->transfer = mpc52xx_spi_transfer;
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
master->dev.of_node = op->dev.of_node;
platform_set_drvdata(op, master);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
index 79e5aa2250c8..2884f0c2f5f0 100644
--- a/drivers/spi/spi-mxs.c
+++ b/drivers/spi/spi-mxs.c
@@ -29,7 +29,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/of.h>
#include <linux/of_device.h>
@@ -371,7 +370,7 @@ static int mxs_spi_transfer_one(struct spi_master *master,
{
struct mxs_spi *spi = spi_master_get_devdata(master);
struct mxs_ssp *ssp = &spi->ssp;
- struct spi_transfer *t, *tmp_t;
+ struct spi_transfer *t;
unsigned int flag;
int status = 0;
@@ -381,7 +380,7 @@ static int mxs_spi_transfer_one(struct spi_master *master,
writel(mxs_spi_cs_to_reg(m->spi->chip_select),
ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
- list_for_each_entry_safe(t, tmp_t, &m->transfers, transfer_list) {
+ list_for_each_entry(t, &m->transfers, transfer_list) {
status = mxs_spi_setup_transfer(m->spi, t);
if (status)
@@ -473,7 +472,7 @@ static int mxs_spi_probe(struct platform_device *pdev)
iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq_err = platform_get_irq(pdev, 0);
if (irq_err < 0)
- return -EINVAL;
+ return irq_err;
base = devm_ioremap_resource(&pdev->dev, iores);
if (IS_ERR(base))
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
index 50406306bc20..16e30de650b0 100644
--- a/drivers/spi/spi-nuc900.c
+++ b/drivers/spi/spi-nuc900.c
@@ -9,7 +9,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
@@ -38,7 +37,9 @@
/* usi register bit */
#define ENINT (0x01 << 17)
#define ENFLG (0x01 << 16)
+#define SLEEP (0x0f << 12)
#define TXNUM (0x03 << 8)
+#define TXBITLEN (0x1f << 3)
#define TXNEG (0x01 << 2)
#define RXNEG (0x01 << 1)
#define LSB (0x01 << 10)
@@ -58,11 +59,8 @@ struct nuc900_spi {
unsigned char *rx;
struct clk *clk;
struct spi_master *master;
- struct spi_device *curdev;
- struct device *dev;
struct nuc900_spi_info *pdata;
spinlock_t lock;
- struct resource *res;
};
static inline struct nuc900_spi *to_hw(struct spi_device *sdev)
@@ -119,19 +117,16 @@ static void nuc900_spi_chipsel(struct spi_device *spi, int value)
}
}
-static void nuc900_spi_setup_txnum(struct nuc900_spi *hw,
- unsigned int txnum)
+static void nuc900_spi_setup_txnum(struct nuc900_spi *hw, unsigned int txnum)
{
unsigned int val;
unsigned long flags;
spin_lock_irqsave(&hw->lock, flags);
- val = __raw_readl(hw->regs + USI_CNT);
+ val = __raw_readl(hw->regs + USI_CNT) & ~TXNUM;
- if (!txnum)
- val &= ~TXNUM;
- else
+ if (txnum)
val |= txnum << 0x08;
__raw_writel(val, hw->regs + USI_CNT);
@@ -148,7 +143,7 @@ static void nuc900_spi_setup_txbitlen(struct nuc900_spi *hw,
spin_lock_irqsave(&hw->lock, flags);
- val = __raw_readl(hw->regs + USI_CNT);
+ val = __raw_readl(hw->regs + USI_CNT) & ~TXBITLEN;
val |= (txbitlen << 0x03);
@@ -287,12 +282,11 @@ static void nuc900_set_sleep(struct nuc900_spi *hw, unsigned int sleep)
spin_lock_irqsave(&hw->lock, flags);
- val = __raw_readl(hw->regs + USI_CNT);
+ val = __raw_readl(hw->regs + USI_CNT) & ~SLEEP;
if (sleep)
val |= (sleep << 12);
- else
- val &= ~(0x0f << 12);
+
__raw_writel(val, hw->regs + USI_CNT);
spin_unlock_irqrestore(&hw->lock, flags);
@@ -338,6 +332,7 @@ static int nuc900_spi_probe(struct platform_device *pdev)
{
struct nuc900_spi *hw;
struct spi_master *master;
+ struct resource *res;
int err = 0;
master = spi_alloc_master(&pdev->dev, sizeof(struct nuc900_spi));
@@ -349,7 +344,6 @@ static int nuc900_spi_probe(struct platform_device *pdev)
hw = spi_master_get_devdata(master);
hw->master = master;
hw->pdata = dev_get_platdata(&pdev->dev);
- hw->dev = &pdev->dev;
if (hw->pdata == NULL) {
dev_err(&pdev->dev, "No platform data supplied\n");
@@ -361,14 +355,16 @@ static int nuc900_spi_probe(struct platform_device *pdev)
init_completion(&hw->done);
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ if (hw->pdata->lsb)
+ master->mode_bits |= SPI_LSB_FIRST;
master->num_chipselect = hw->pdata->num_cs;
master->bus_num = hw->pdata->bus_num;
hw->bitbang.master = hw->master;
hw->bitbang.chipselect = nuc900_spi_chipsel;
hw->bitbang.txrx_bufs = nuc900_spi_txrx;
- hw->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hw->regs = devm_ioremap_resource(&pdev->dev, hw->res);
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hw->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(hw->regs)) {
err = PTR_ERR(hw->regs);
goto err_pdata;
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
index f7c896e2981e..8998d11c7238 100644
--- a/drivers/spi/spi-oc-tiny.c
+++ b/drivers/spi/spi-oc-tiny.c
@@ -15,7 +15,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/errno.h>
#include <linux/module.h>
@@ -267,8 +266,6 @@ static int tiny_spi_probe(struct platform_device *pdev)
/* setup the state for the bitbang driver */
hw->bitbang.master = master;
- if (!hw->bitbang.master)
- return err;
hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
hw->bitbang.chipselect = tiny_spi_chipselect;
hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c
index 67249a48b391..c5e2f718eebd 100644
--- a/drivers/spi/spi-octeon.c
+++ b/drivers/spi/spi-octeon.c
@@ -11,7 +11,6 @@
#include <linux/spi/spi.h>
#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/init.h>
#include <linux/io.h>
#include <linux/of.h>
@@ -33,13 +32,6 @@ struct octeon_spi {
u64 cs_enax;
};
-struct octeon_spi_setup {
- u32 max_speed_hz;
- u8 chip_select;
- u8 mode;
- u8 bits_per_word;
-};
-
static void octeon_spi_wait_ready(struct octeon_spi *p)
{
union cvmx_mpi_sts mpi_sts;
@@ -57,6 +49,7 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
struct spi_transfer *xfer,
bool last_xfer)
{
+ struct spi_device *spi = msg->spi;
union cvmx_mpi_cfg mpi_cfg;
union cvmx_mpi_tx mpi_tx;
unsigned int clkdiv;
@@ -68,18 +61,11 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
int len;
int i;
- struct octeon_spi_setup *msg_setup = spi_get_ctldata(msg->spi);
-
- speed_hz = msg_setup->max_speed_hz;
- mode = msg_setup->mode;
+ mode = spi->mode;
cpha = mode & SPI_CPHA;
cpol = mode & SPI_CPOL;
- if (xfer->speed_hz)
- speed_hz = xfer->speed_hz;
-
- if (speed_hz > OCTEON_SPI_MAX_CLOCK_HZ)
- speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
+ speed_hz = xfer->speed_hz ? : spi->max_speed_hz;
clkdiv = octeon_get_io_clock_rate() / (2 * speed_hz);
@@ -93,8 +79,8 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
mpi_cfg.s.cslate = cpha ? 1 : 0;
mpi_cfg.s.enable = 1;
- if (msg_setup->chip_select < 4)
- p->cs_enax |= 1ull << (12 + msg_setup->chip_select);
+ if (spi->chip_select < 4)
+ p->cs_enax |= 1ull << (12 + spi->chip_select);
mpi_cfg.u64 |= p->cs_enax;
if (mpi_cfg.u64 != p->last_cfg) {
@@ -114,7 +100,7 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
cvmx_write_csr(p->register_base + OCTEON_SPI_DAT0 + (8 * i), d);
}
mpi_tx.u64 = 0;
- mpi_tx.s.csid = msg_setup->chip_select;
+ mpi_tx.s.csid = spi->chip_select;
mpi_tx.s.leavecs = 1;
mpi_tx.s.txnum = tx_buf ? OCTEON_SPI_MAX_BYTES : 0;
mpi_tx.s.totnum = OCTEON_SPI_MAX_BYTES;
@@ -139,7 +125,7 @@ static int octeon_spi_do_transfer(struct octeon_spi *p,
}
mpi_tx.u64 = 0;
- mpi_tx.s.csid = msg_setup->chip_select;
+ mpi_tx.s.csid = spi->chip_select;
if (last_xfer)
mpi_tx.s.leavecs = xfer->cs_change;
else
@@ -169,17 +155,9 @@ static int octeon_spi_transfer_one_message(struct spi_master *master,
int status = 0;
struct spi_transfer *xfer;
- /*
- * We better have set the configuration via a call to .setup
- * before we get here.
- */
- if (spi_get_ctldata(msg->spi) == NULL) {
- status = -EINVAL;
- goto err;
- }
-
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- bool last_xfer = &xfer->transfer_list == msg->transfers.prev;
+ bool last_xfer = list_is_last(&xfer->transfer_list,
+ &msg->transfers);
int r = octeon_spi_do_transfer(p, msg, xfer, last_xfer);
if (r < 0) {
status = r;
@@ -194,41 +172,6 @@ err:
return status;
}
-static struct octeon_spi_setup *octeon_spi_new_setup(struct spi_device *spi)
-{
- struct octeon_spi_setup *setup = kzalloc(sizeof(*setup), GFP_KERNEL);
- if (!setup)
- return NULL;
-
- setup->max_speed_hz = spi->max_speed_hz;
- setup->chip_select = spi->chip_select;
- setup->mode = spi->mode;
- setup->bits_per_word = spi->bits_per_word;
- return setup;
-}
-
-static int octeon_spi_setup(struct spi_device *spi)
-{
- struct octeon_spi_setup *new_setup;
- struct octeon_spi_setup *old_setup = spi_get_ctldata(spi);
-
- new_setup = octeon_spi_new_setup(spi);
- if (!new_setup)
- return -ENOMEM;
-
- spi_set_ctldata(spi, new_setup);
- kfree(old_setup);
-
- return 0;
-}
-
-static void octeon_spi_cleanup(struct spi_device *spi)
-{
- struct octeon_spi_setup *old_setup = spi_get_ctldata(spi);
- spi_set_ctldata(spi, NULL);
- kfree(old_setup);
-}
-
static int octeon_spi_probe(struct platform_device *pdev)
{
struct resource *res_mem;
@@ -257,8 +200,6 @@ static int octeon_spi_probe(struct platform_device *pdev)
p->register_base = (u64)devm_ioremap(&pdev->dev, res_mem->start,
resource_size(res_mem));
- /* Dynamic bus numbering */
- master->bus_num = -1;
master->num_chipselect = 4;
master->mode_bits = SPI_CPHA |
SPI_CPOL |
@@ -266,10 +207,9 @@ static int octeon_spi_probe(struct platform_device *pdev)
SPI_LSB_FIRST |
SPI_3WIRE;
- master->setup = octeon_spi_setup;
- master->cleanup = octeon_spi_cleanup;
master->transfer_one_message = octeon_spi_transfer_one_message;
master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
master->dev.of_node = pdev->dev.of_node;
err = devm_spi_register_master(&pdev->dev, master);
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
index 0d32054bfc0d..e7ffcded4e14 100644
--- a/drivers/spi/spi-omap-100k.c
+++ b/drivers/spi/spi-omap-100k.c
@@ -83,15 +83,11 @@
#define SPI_SHUTDOWN 1
struct omap1_spi100k {
- struct spi_master *master;
struct clk *ick;
struct clk *fck;
/* Virtual base address of the controller */
void __iomem *base;
-
- /* State of the SPI */
- unsigned int state;
};
struct omap1_spi100k_cs {
@@ -99,13 +95,6 @@ struct omap1_spi100k_cs {
int word_len;
};
-#define MOD_REG_BIT(val, mask, set) do { \
- if (set) \
- val |= mask; \
- else \
- val &= ~mask; \
-} while (0)
-
static void spi100k_enable_clock(struct spi_master *master)
{
unsigned int val;
@@ -139,7 +128,7 @@ static void spi100k_write_data(struct spi_master *master, int len, int data)
}
spi100k_enable_clock(master);
- writew( data , spi100k->base + SPI_TX_MSB);
+ writew(data , spi100k->base + SPI_TX_MSB);
writew(SPI_CTRL_SEN(0) |
SPI_CTRL_WORD_SIZE(len) |
@@ -147,7 +136,8 @@ static void spi100k_write_data(struct spi_master *master, int len, int data)
spi100k->base + SPI_CTRL);
/* Wait for bit ack send change */
- while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE);
+ while ((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE)
+ ;
udelay(1000);
spi100k_disable_clock(master);
@@ -155,7 +145,7 @@ static void spi100k_write_data(struct spi_master *master, int len, int data)
static int spi100k_read_data(struct spi_master *master, int len)
{
- int dataH,dataL;
+ int dataH, dataL;
struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
/* Always do at least 16 bits */
@@ -168,7 +158,8 @@ static int spi100k_read_data(struct spi_master *master, int len)
SPI_CTRL_RD,
spi100k->base + SPI_CTRL);
- while((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD);
+ while ((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD)
+ ;
udelay(1000);
dataL = readw(spi100k->base + SPI_RX_LSB);
@@ -204,12 +195,10 @@ static void omap1_spi100k_force_cs(struct omap1_spi100k *spi100k, int enable)
static unsigned
omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
{
- struct omap1_spi100k *spi100k;
struct omap1_spi100k_cs *cs = spi->controller_state;
unsigned int count, c;
int word_len;
- spi100k = spi_master_get_devdata(spi->master);
count = xfer->len;
c = count;
word_len = cs->word_len;
@@ -221,12 +210,12 @@ omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
- c-=1;
+ c -= 1;
if (xfer->tx_buf != NULL)
spi100k_write_data(spi->master, word_len, *tx++);
if (xfer->rx_buf != NULL)
*rx++ = spi100k_read_data(spi->master, word_len);
- } while(c);
+ } while (c);
} else if (word_len <= 16) {
u16 *rx;
const u16 *tx;
@@ -234,12 +223,12 @@ omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
- c-=2;
+ c -= 2;
if (xfer->tx_buf != NULL)
- spi100k_write_data(spi->master,word_len, *tx++);
+ spi100k_write_data(spi->master, word_len, *tx++);
if (xfer->rx_buf != NULL)
- *rx++ = spi100k_read_data(spi->master,word_len);
- } while(c);
+ *rx++ = spi100k_read_data(spi->master, word_len);
+ } while (c);
} else if (word_len <= 32) {
u32 *rx;
const u32 *tx;
@@ -247,12 +236,12 @@ omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
rx = xfer->rx_buf;
tx = xfer->tx_buf;
do {
- c-=4;
+ c -= 4;
if (xfer->tx_buf != NULL)
- spi100k_write_data(spi->master,word_len, *tx);
+ spi100k_write_data(spi->master, word_len, *tx);
if (xfer->rx_buf != NULL)
- *rx = spi100k_read_data(spi->master,word_len);
- } while(c);
+ *rx = spi100k_read_data(spi->master, word_len);
+ } while (c);
}
return count - c;
}
@@ -294,7 +283,7 @@ static int omap1_spi100k_setup(struct spi_device *spi)
spi100k = spi_master_get_devdata(spi->master);
if (!cs) {
- cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL);
if (!cs)
return -ENOMEM;
cs->base = spi100k->base + spi->chip_select * 0x14;
@@ -411,14 +400,14 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
if (!pdev->id)
return -EINVAL;
- master = spi_alloc_master(&pdev->dev, sizeof *spi100k);
+ master = spi_alloc_master(&pdev->dev, sizeof(*spi100k));
if (master == NULL) {
dev_dbg(&pdev->dev, "master allocation failed\n");
return -ENOMEM;
}
if (pdev->id != -1)
- master->bus_num = pdev->id;
+ master->bus_num = pdev->id;
master->setup = omap1_spi100k_setup;
master->transfer_one_message = omap1_spi100k_transfer_one_message;
@@ -434,7 +423,6 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
spi100k = spi_master_get_devdata(master);
- spi100k->master = master;
/*
* The memory region base address is taken as the platform_data.
@@ -461,8 +449,6 @@ static int omap1_spi100k_probe(struct platform_device *pdev)
if (status < 0)
goto err;
- spi100k->state = SPI_RUNNING;
-
return status;
err:
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
index 9313fd3b413d..be2a2e108e2f 100644
--- a/drivers/spi/spi-omap-uwire.c
+++ b/drivers/spi/spi-omap-uwire.c
@@ -99,7 +99,6 @@ struct uwire_spi {
};
struct uwire_state {
- unsigned bits_per_word;
unsigned div1_idx;
};
@@ -210,9 +209,8 @@ static void uwire_chipselect(struct spi_device *spi, int value)
static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t)
{
- struct uwire_state *ust = spi->controller_state;
unsigned len = t->len;
- unsigned bits = ust->bits_per_word;
+ unsigned bits = t->bits_per_word ? : spi->bits_per_word;
unsigned bytes;
u16 val, w;
int status = 0;
@@ -220,10 +218,6 @@ static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t)
if (!t->tx_buf && !t->rx_buf)
return 0;
- /* Microwire doesn't read and write concurrently */
- if (t->tx_buf && t->rx_buf)
- return -EPERM;
-
w = spi->chip_select << 10;
w |= CS_CMD;
@@ -322,7 +316,6 @@ static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
struct uwire_state *ust = spi->controller_state;
struct uwire_spi *uwire;
unsigned flags = 0;
- unsigned bits;
unsigned hz;
unsigned long rate;
int div1_idx;
@@ -332,23 +325,6 @@ static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
uwire = spi_master_get_devdata(spi->master);
- if (spi->chip_select > 3) {
- pr_debug("%s: cs%d?\n", dev_name(&spi->dev), spi->chip_select);
- status = -ENODEV;
- goto done;
- }
-
- bits = spi->bits_per_word;
- if (t != NULL && t->bits_per_word)
- bits = t->bits_per_word;
-
- if (bits > 16) {
- pr_debug("%s: wordsize %d?\n", dev_name(&spi->dev), bits);
- status = -ENODEV;
- goto done;
- }
- ust->bits_per_word = bits;
-
/* mode 0..3, clock inverted separately;
* standard nCS signaling;
* don't treat DI=high as "not ready"
@@ -502,6 +478,7 @@ static int uwire_probe(struct platform_device *pdev)
status = PTR_ERR(uwire->ck);
dev_dbg(&pdev->dev, "no functional clock?\n");
spi_master_put(master);
+ iounmap(uwire_base);
return status;
}
clk_enable(uwire->ck);
@@ -515,7 +492,7 @@ static int uwire_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
-
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
master->flags = SPI_MASTER_HALF_DUPLEX;
master->bus_num = 2; /* "official" */
@@ -539,14 +516,13 @@ static int uwire_probe(struct platform_device *pdev)
static int uwire_remove(struct platform_device *pdev)
{
struct uwire_spi *uwire = platform_get_drvdata(pdev);
- int status;
// FIXME remove all child devices, somewhere ...
- status = spi_bitbang_stop(&uwire->bitbang);
+ spi_bitbang_stop(&uwire->bitbang);
uwire_off(uwire);
iounmap(uwire_base);
- return status;
+ return 0;
}
/* work with hotplug and coldplug */
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index a72127f08e39..2941c5b96ebc 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -22,7 +22,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/device.h>
@@ -45,6 +44,7 @@
#include <linux/platform_data/spi-omap2-mcspi.h>
#define OMAP2_MCSPI_MAX_FREQ 48000000
+#define OMAP2_MCSPI_MAX_DIVIDER 4096
#define OMAP2_MCSPI_MAX_FIFODEPTH 64
#define OMAP2_MCSPI_MAX_FIFOWCNT 0xFFFF
#define SPI_AUTOSUSPEND_TIMEOUT 2000
@@ -89,6 +89,7 @@
#define OMAP2_MCSPI_CHCONF_FORCE BIT(20)
#define OMAP2_MCSPI_CHCONF_FFET BIT(27)
#define OMAP2_MCSPI_CHCONF_FFER BIT(28)
+#define OMAP2_MCSPI_CHCONF_CLKG BIT(29)
#define OMAP2_MCSPI_CHSTAT_RXS BIT(0)
#define OMAP2_MCSPI_CHSTAT_TXS BIT(1)
@@ -96,6 +97,7 @@
#define OMAP2_MCSPI_CHSTAT_TXFFE BIT(3)
#define OMAP2_MCSPI_CHCTRL_EN BIT(0)
+#define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK (0xff << 8)
#define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0)
@@ -149,7 +151,7 @@ struct omap2_mcspi_cs {
int word_len;
struct list_head node;
/* Context save and restore shadow register */
- u32 chconf0;
+ u32 chconf0, chctrl0;
};
static inline void mcspi_write_reg(struct spi_master *master,
@@ -230,10 +232,16 @@ static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
u32 l;
- l = enable ? OMAP2_MCSPI_CHCTRL_EN : 0;
- mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, l);
+ l = cs->chctrl0;
+ if (enable)
+ l |= OMAP2_MCSPI_CHCTRL_EN;
+ else
+ l &= ~OMAP2_MCSPI_CHCTRL_EN;
+ cs->chctrl0 = l;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
/* Flash post-writes */
mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
}
@@ -840,7 +848,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
struct omap2_mcspi_cs *cs = spi->controller_state;
struct omap2_mcspi *mcspi;
struct spi_master *spi_cntrl;
- u32 l = 0, div = 0;
+ u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0;
u8 word_len = spi->bits_per_word;
u32 speed_hz = spi->max_speed_hz;
@@ -856,7 +864,17 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
speed_hz = t->speed_hz;
speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
- div = omap2_mcspi_calc_divisor(speed_hz);
+ if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) {
+ clkd = omap2_mcspi_calc_divisor(speed_hz);
+ speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd;
+ clkg = 0;
+ } else {
+ div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz;
+ speed_hz = OMAP2_MCSPI_MAX_FREQ / div;
+ clkd = (div - 1) & 0xf;
+ extclk = (div - 1) >> 4;
+ clkg = OMAP2_MCSPI_CHCONF_CLKG;
+ }
l = mcspi_cached_chconf0(spi);
@@ -885,7 +903,16 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
/* set clock divisor */
l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
- l |= div << 2;
+ l |= clkd << 2;
+
+ /* set clock granularity */
+ l &= ~OMAP2_MCSPI_CHCONF_CLKG;
+ l |= clkg;
+ if (clkg) {
+ cs->chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK;
+ cs->chctrl0 |= extclk << 8;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
+ }
/* set SPI mode 0..3 */
if (spi->mode & SPI_CPOL)
@@ -900,7 +927,7 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
mcspi_write_chconf0(spi, l);
dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
- OMAP2_MCSPI_MAX_FREQ >> div,
+ speed_hz,
(spi->mode & SPI_CPHA) ? "trailing" : "leading",
(spi->mode & SPI_CPOL) ? "inverted" : "normal");
@@ -972,6 +999,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
cs->base = mcspi->base + spi->chip_select * 0x14;
cs->phys = mcspi->phys + spi->chip_select * 0x14;
cs->chconf0 = 0;
+ cs->chctrl0 = 0;
spi->controller_state = cs;
/* Link this to context save list */
list_add_tail(&cs->node, &ctx->cs);
@@ -1057,12 +1085,15 @@ static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
status = -EINVAL;
break;
}
- if (par_override || t->speed_hz || t->bits_per_word) {
+ if (par_override ||
+ (t->speed_hz != spi->max_speed_hz) ||
+ (t->bits_per_word != spi->bits_per_word)) {
par_override = 1;
status = omap2_mcspi_setup_transfer(spi, t);
if (status < 0)
break;
- if (!t->speed_hz && !t->bits_per_word)
+ if (t->speed_hz == spi->max_speed_hz &&
+ t->bits_per_word == spi->bits_per_word)
par_override = 0;
}
if (cd && cd->cs_per_word) {
@@ -1176,16 +1207,12 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
m->actual_length = 0;
m->status = 0;
- /* reject invalid messages and transfers */
- if (list_empty(&m->transfers))
- return -EINVAL;
list_for_each_entry(t, &m->transfers, transfer_list) {
const void *tx_buf = t->tx_buf;
void *rx_buf = t->rx_buf;
unsigned len = t->len;
- if (t->speed_hz > OMAP2_MCSPI_MAX_FREQ
- || (len && !(rx_buf || tx_buf))) {
+ if ((len && !(rx_buf || tx_buf))) {
dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
t->speed_hz,
len,
@@ -1194,12 +1221,6 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
t->bits_per_word);
return -EINVAL;
}
- if (t->speed_hz && t->speed_hz < (OMAP2_MCSPI_MAX_FREQ >> 15)) {
- dev_dbg(mcspi->dev, "speed_hz %d below minimum %d Hz\n",
- t->speed_hz,
- OMAP2_MCSPI_MAX_FREQ >> 15);
- return -EINVAL;
- }
if (m->is_dma_mapped || len < DMA_MIN_BYTES)
continue;
@@ -1311,6 +1332,8 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
master->transfer_one_message = omap2_mcspi_transfer_one_message;
master->cleanup = omap2_mcspi_cleanup;
master->dev.of_node = node;
+ master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
+ master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
platform_set_drvdata(pdev, master);
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 7f2121fe2622..d018a4aac3a1 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -9,7 +9,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
@@ -43,8 +42,6 @@
struct orion_spi {
struct spi_master *master;
void __iomem *base;
- unsigned int max_speed;
- unsigned int min_speed;
struct clk *clk;
};
@@ -75,23 +72,6 @@ orion_spi_clrbits(struct orion_spi *orion_spi, u32 reg, u32 mask)
writel(val, reg_addr);
}
-static int orion_spi_set_transfer_size(struct orion_spi *orion_spi, int size)
-{
- if (size == 16) {
- orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
- ORION_SPI_IF_8_16_BIT_MODE);
- } else if (size == 8) {
- orion_spi_clrbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
- ORION_SPI_IF_8_16_BIT_MODE);
- } else {
- pr_debug("Bad bits per word value %d (only 8 or 16 are allowed).\n",
- size);
- return -EINVAL;
- }
-
- return 0;
-}
-
static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
{
u32 tclk_hz;
@@ -170,7 +150,14 @@ orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
if (rc)
return rc;
- return orion_spi_set_transfer_size(orion_spi, bits_per_word);
+ if (bits_per_word == 16)
+ orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
+ ORION_SPI_IF_8_16_BIT_MODE);
+ else
+ orion_spi_clrbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
+ ORION_SPI_IF_8_16_BIT_MODE);
+
+ return 0;
}
static void orion_spi_set_cs(struct orion_spi *orion_spi, int enable)
@@ -260,11 +247,9 @@ orion_spi_write_read_16bit(struct spi_device *spi,
static unsigned int
orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
{
- struct orion_spi *orion_spi;
unsigned int count;
int word_len;
- orion_spi = spi_master_get_devdata(spi->master);
word_len = spi->bits_per_word;
count = xfer->len;
@@ -310,27 +295,6 @@ static int orion_spi_transfer_one_message(struct spi_master *master,
goto msg_done;
list_for_each_entry(t, &m->transfers, transfer_list) {
- /* make sure buffer length is even when working in 16
- * bit mode*/
- if ((t->bits_per_word == 16) && (t->len & 1)) {
- dev_err(&spi->dev,
- "message rejected : "
- "odd data length %d while in 16 bit mode\n",
- t->len);
- status = -EIO;
- goto msg_done;
- }
-
- if (t->speed_hz && t->speed_hz < orion_spi->min_speed) {
- dev_err(&spi->dev,
- "message rejected : "
- "device min speed (%d Hz) exceeds "
- "required transfer speed (%d Hz)\n",
- orion_spi->min_speed, t->speed_hz);
- status = -EIO;
- goto msg_done;
- }
-
if (par_override || t->speed_hz || t->bits_per_word) {
par_override = 1;
status = orion_spi_setup_transfer(spi, t);
@@ -375,28 +339,6 @@ static int orion_spi_reset(struct orion_spi *orion_spi)
return 0;
}
-static int orion_spi_setup(struct spi_device *spi)
-{
- struct orion_spi *orion_spi;
-
- orion_spi = spi_master_get_devdata(spi->master);
-
- if ((spi->max_speed_hz == 0)
- || (spi->max_speed_hz > orion_spi->max_speed))
- spi->max_speed_hz = orion_spi->max_speed;
-
- if (spi->max_speed_hz < orion_spi->min_speed) {
- dev_err(&spi->dev, "setup: requested speed too low %d Hz\n",
- spi->max_speed_hz);
- return -EINVAL;
- }
-
- /*
- * baudrate & width will be set orion_spi_setup_transfer
- */
- return 0;
-}
-
static int orion_spi_probe(struct platform_device *pdev)
{
struct spi_master *master;
@@ -425,9 +367,9 @@ static int orion_spi_probe(struct platform_device *pdev)
/* we support only mode 0, and no options */
master->mode_bits = SPI_CPHA | SPI_CPOL;
- master->setup = orion_spi_setup;
master->transfer_one_message = orion_spi_transfer_one_message;
master->num_chipselect = ORION_NUM_CHIPSELECTS;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
platform_set_drvdata(pdev, master);
@@ -443,8 +385,8 @@ static int orion_spi_probe(struct platform_device *pdev)
clk_prepare(spi->clk);
clk_enable(spi->clk);
tclk_hz = clk_get_rate(spi->clk);
- spi->max_speed = DIV_ROUND_UP(tclk_hz, 4);
- spi->min_speed = DIV_ROUND_UP(tclk_hz, 30);
+ master->max_speed_hz = DIV_ROUND_UP(tclk_hz, 4);
+ master->min_speed_hz = DIV_ROUND_UP(tclk_hz, 30);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
spi->base = devm_ioremap_resource(&pdev->dev, r);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index 2789b452e711..51d99779682f 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -459,9 +459,8 @@ static void giveback(struct pl022 *pl022)
struct spi_transfer *last_transfer;
pl022->next_msg_cs_active = false;
- last_transfer = list_entry(pl022->cur_msg->transfers.prev,
- struct spi_transfer,
- transfer_list);
+ last_transfer = list_last_entry(&pl022->cur_msg->transfers,
+ struct spi_transfer, transfer_list);
/* Delay if requested before any change in chip select */
if (last_transfer->delay_usecs)
@@ -2109,8 +2108,6 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
pl022->chipselects = devm_kzalloc(dev, num_cs * sizeof(int),
GFP_KERNEL);
- pinctrl_pm_select_default_state(dev);
-
/*
* Bus Number Which has been Assigned to this SSP controller
* on this board
@@ -2183,13 +2180,7 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
goto err_no_clk;
}
- status = clk_prepare(pl022->clk);
- if (status) {
- dev_err(&adev->dev, "could not prepare SSP/SPI bus clock\n");
- goto err_clk_prep;
- }
-
- status = clk_enable(pl022->clk);
+ status = clk_prepare_enable(pl022->clk);
if (status) {
dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n");
goto err_no_clk_en;
@@ -2250,10 +2241,8 @@ static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
if (platform_info->enable_dma)
pl022_dma_remove(pl022);
err_no_irq:
- clk_disable(pl022->clk);
+ clk_disable_unprepare(pl022->clk);
err_no_clk_en:
- clk_unprepare(pl022->clk);
- err_clk_prep:
err_no_clk:
err_no_ioremap:
amba_release_regions(adev);
@@ -2281,42 +2270,13 @@ pl022_remove(struct amba_device *adev)
if (pl022->master_info->enable_dma)
pl022_dma_remove(pl022);
- clk_disable(pl022->clk);
- clk_unprepare(pl022->clk);
+ clk_disable_unprepare(pl022->clk);
amba_release_regions(adev);
tasklet_disable(&pl022->pump_transfers);
return 0;
}
-#if defined(CONFIG_SUSPEND) || defined(CONFIG_PM_RUNTIME)
-/*
- * These two functions are used from both suspend/resume and
- * the runtime counterparts to handle external resources like
- * clocks, pins and regulators when going to sleep.
- */
-static void pl022_suspend_resources(struct pl022 *pl022, bool runtime)
-{
- clk_disable(pl022->clk);
-
- if (runtime)
- pinctrl_pm_select_idle_state(&pl022->adev->dev);
- else
- pinctrl_pm_select_sleep_state(&pl022->adev->dev);
-}
-
-static void pl022_resume_resources(struct pl022 *pl022, bool runtime)
-{
- /* First go to the default state */
- pinctrl_pm_select_default_state(&pl022->adev->dev);
- if (!runtime)
- /* Then let's idle the pins until the next transfer happens */
- pinctrl_pm_select_idle_state(&pl022->adev->dev);
-
- clk_enable(pl022->clk);
-}
-#endif
-
-#ifdef CONFIG_SUSPEND
+#ifdef CONFIG_PM_SLEEP
static int pl022_suspend(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
@@ -2328,8 +2288,13 @@ static int pl022_suspend(struct device *dev)
return ret;
}
- pm_runtime_get_sync(dev);
- pl022_suspend_resources(pl022, false);
+ ret = pm_runtime_force_suspend(dev);
+ if (ret) {
+ spi_master_resume(pl022->master);
+ return ret;
+ }
+
+ pinctrl_pm_select_sleep_state(dev);
dev_dbg(dev, "suspended\n");
return 0;
@@ -2340,8 +2305,9 @@ static int pl022_resume(struct device *dev)
struct pl022 *pl022 = dev_get_drvdata(dev);
int ret;
- pl022_resume_resources(pl022, false);
- pm_runtime_put(dev);
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ dev_err(dev, "problem resuming\n");
/* Start the queue running */
ret = spi_master_resume(pl022->master);
@@ -2352,14 +2318,16 @@ static int pl022_resume(struct device *dev)
return ret;
}
-#endif /* CONFIG_PM */
+#endif
-#ifdef CONFIG_PM_RUNTIME
+#ifdef CONFIG_PM
static int pl022_runtime_suspend(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
- pl022_suspend_resources(pl022, true);
+ clk_disable_unprepare(pl022->clk);
+ pinctrl_pm_select_idle_state(dev);
+
return 0;
}
@@ -2367,14 +2335,16 @@ static int pl022_runtime_resume(struct device *dev)
{
struct pl022 *pl022 = dev_get_drvdata(dev);
- pl022_resume_resources(pl022, true);
+ pinctrl_pm_select_default_state(dev);
+ clk_prepare_enable(pl022->clk);
+
return 0;
}
#endif
static const struct dev_pm_ops pl022_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(pl022_suspend, pl022_resume)
- SET_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL)
+ SET_PM_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL)
};
static struct vendor_data vendor_arm = {
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
index 5ee56726f8d0..80b8408ac3e3 100644
--- a/drivers/spi/spi-ppc4xx.c
+++ b/drivers/spi/spi-ppc4xx.c
@@ -24,7 +24,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
index 3c0b55125f1e..713af4806f26 100644
--- a/drivers/spi/spi-pxa2xx-dma.c
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -9,7 +9,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/init.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
#include <linux/dmaengine.h>
diff --git a/drivers/spi/spi-pxa2xx-pxadma.c b/drivers/spi/spi-pxa2xx-pxadma.c
index 2916efc7cfe5..e8a26f25d5c0 100644
--- a/drivers/spi/spi-pxa2xx-pxadma.c
+++ b/drivers/spi/spi-pxa2xx-pxadma.c
@@ -18,7 +18,6 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/dma-mapping.h>
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index c702fc536a77..41185d0557fa 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -362,8 +362,7 @@ static void giveback(struct driver_data *drv_data)
drv_data->cur_msg = NULL;
drv_data->cur_transfer = NULL;
- last_transfer = list_entry(msg->transfers.prev,
- struct spi_transfer,
+ last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
transfer_list);
/* Delay if requested before any change in chip select */
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
new file mode 100644
index 000000000000..b032e8885e24
--- /dev/null
+++ b/drivers/spi/spi-qup.c
@@ -0,0 +1,779 @@
+/*
+ * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License rev 2 and
+ * only rev 2 as published by the free Software foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+
+#define QUP_CONFIG 0x0000
+#define QUP_STATE 0x0004
+#define QUP_IO_M_MODES 0x0008
+#define QUP_SW_RESET 0x000c
+#define QUP_OPERATIONAL 0x0018
+#define QUP_ERROR_FLAGS 0x001c
+#define QUP_ERROR_FLAGS_EN 0x0020
+#define QUP_OPERATIONAL_MASK 0x0028
+#define QUP_HW_VERSION 0x0030
+#define QUP_MX_OUTPUT_CNT 0x0100
+#define QUP_OUTPUT_FIFO 0x0110
+#define QUP_MX_WRITE_CNT 0x0150
+#define QUP_MX_INPUT_CNT 0x0200
+#define QUP_MX_READ_CNT 0x0208
+#define QUP_INPUT_FIFO 0x0218
+
+#define SPI_CONFIG 0x0300
+#define SPI_IO_CONTROL 0x0304
+#define SPI_ERROR_FLAGS 0x0308
+#define SPI_ERROR_FLAGS_EN 0x030c
+
+/* QUP_CONFIG fields */
+#define QUP_CONFIG_SPI_MODE (1 << 8)
+#define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
+#define QUP_CONFIG_NO_INPUT BIT(7)
+#define QUP_CONFIG_NO_OUTPUT BIT(6)
+#define QUP_CONFIG_N 0x001f
+
+/* QUP_STATE fields */
+#define QUP_STATE_VALID BIT(2)
+#define QUP_STATE_RESET 0
+#define QUP_STATE_RUN 1
+#define QUP_STATE_PAUSE 3
+#define QUP_STATE_MASK 3
+#define QUP_STATE_CLEAR 2
+
+#define QUP_HW_VERSION_2_1_1 0x20010001
+
+/* QUP_IO_M_MODES fields */
+#define QUP_IO_M_PACK_EN BIT(15)
+#define QUP_IO_M_UNPACK_EN BIT(14)
+#define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
+#define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
+#define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
+#define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
+
+#define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
+#define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
+#define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
+#define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
+
+#define QUP_IO_M_MODE_FIFO 0
+#define QUP_IO_M_MODE_BLOCK 1
+#define QUP_IO_M_MODE_DMOV 2
+#define QUP_IO_M_MODE_BAM 3
+
+/* QUP_OPERATIONAL fields */
+#define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
+#define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
+#define QUP_OP_IN_SERVICE_FLAG BIT(9)
+#define QUP_OP_OUT_SERVICE_FLAG BIT(8)
+#define QUP_OP_IN_FIFO_FULL BIT(7)
+#define QUP_OP_OUT_FIFO_FULL BIT(6)
+#define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
+#define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
+
+/* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
+#define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
+#define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
+#define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
+#define QUP_ERROR_INPUT_OVER_RUN BIT(2)
+
+/* SPI_CONFIG fields */
+#define SPI_CONFIG_HS_MODE BIT(10)
+#define SPI_CONFIG_INPUT_FIRST BIT(9)
+#define SPI_CONFIG_LOOPBACK BIT(8)
+
+/* SPI_IO_CONTROL fields */
+#define SPI_IO_C_FORCE_CS BIT(11)
+#define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
+#define SPI_IO_C_MX_CS_MODE BIT(8)
+#define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
+#define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
+#define SPI_IO_C_CS_SELECT_MASK 0x000c
+#define SPI_IO_C_TRISTATE_CS BIT(1)
+#define SPI_IO_C_NO_TRI_STATE BIT(0)
+
+/* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
+#define SPI_ERROR_CLK_OVER_RUN BIT(1)
+#define SPI_ERROR_CLK_UNDER_RUN BIT(0)
+
+#define SPI_NUM_CHIPSELECTS 4
+
+/* high speed mode is when bus rate is greater then 26MHz */
+#define SPI_HS_MIN_RATE 26000000
+#define SPI_MAX_RATE 50000000
+
+#define SPI_DELAY_THRESHOLD 1
+#define SPI_DELAY_RETRY 10
+
+struct spi_qup {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *cclk; /* core clock */
+ struct clk *iclk; /* interface clock */
+ int irq;
+ spinlock_t lock;
+
+ int in_fifo_sz;
+ int out_fifo_sz;
+ int in_blk_sz;
+ int out_blk_sz;
+
+ struct spi_transfer *xfer;
+ struct completion done;
+ int error;
+ int w_size; /* bytes per SPI word */
+ int tx_bytes;
+ int rx_bytes;
+};
+
+
+static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
+{
+ u32 opstate = readl_relaxed(controller->base + QUP_STATE);
+
+ return opstate & QUP_STATE_VALID;
+}
+
+static int spi_qup_set_state(struct spi_qup *controller, u32 state)
+{
+ unsigned long loop;
+ u32 cur_state;
+
+ loop = 0;
+ while (!spi_qup_is_valid_state(controller)) {
+
+ usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
+
+ if (++loop > SPI_DELAY_RETRY)
+ return -EIO;
+ }
+
+ if (loop)
+ dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
+ loop, state);
+
+ cur_state = readl_relaxed(controller->base + QUP_STATE);
+ /*
+ * Per spec: for PAUSE_STATE to RESET_STATE, two writes
+ * of (b10) are required
+ */
+ if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
+ (state == QUP_STATE_RESET)) {
+ writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
+ writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
+ } else {
+ cur_state &= ~QUP_STATE_MASK;
+ cur_state |= state;
+ writel_relaxed(cur_state, controller->base + QUP_STATE);
+ }
+
+ loop = 0;
+ while (!spi_qup_is_valid_state(controller)) {
+
+ usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
+
+ if (++loop > SPI_DELAY_RETRY)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+static void spi_qup_fifo_read(struct spi_qup *controller,
+ struct spi_transfer *xfer)
+{
+ u8 *rx_buf = xfer->rx_buf;
+ u32 word, state;
+ int idx, shift, w_size;
+
+ w_size = controller->w_size;
+
+ while (controller->rx_bytes < xfer->len) {
+
+ state = readl_relaxed(controller->base + QUP_OPERATIONAL);
+ if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
+ break;
+
+ word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
+
+ if (!rx_buf) {
+ controller->rx_bytes += w_size;
+ continue;
+ }
+
+ for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
+ /*
+ * The data format depends on bytes per SPI word:
+ * 4 bytes: 0x12345678
+ * 2 bytes: 0x00001234
+ * 1 byte : 0x00000012
+ */
+ shift = BITS_PER_BYTE;
+ shift *= (w_size - idx - 1);
+ rx_buf[controller->rx_bytes] = word >> shift;
+ }
+ }
+}
+
+static void spi_qup_fifo_write(struct spi_qup *controller,
+ struct spi_transfer *xfer)
+{
+ const u8 *tx_buf = xfer->tx_buf;
+ u32 word, state, data;
+ int idx, w_size;
+
+ w_size = controller->w_size;
+
+ while (controller->tx_bytes < xfer->len) {
+
+ state = readl_relaxed(controller->base + QUP_OPERATIONAL);
+ if (state & QUP_OP_OUT_FIFO_FULL)
+ break;
+
+ word = 0;
+ for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
+
+ if (!tx_buf) {
+ controller->tx_bytes += w_size;
+ break;
+ }
+
+ data = tx_buf[controller->tx_bytes];
+ word |= data << (BITS_PER_BYTE * (3 - idx));
+ }
+
+ writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
+ }
+}
+
+static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
+{
+ struct spi_qup *controller = dev_id;
+ struct spi_transfer *xfer;
+ u32 opflags, qup_err, spi_err;
+ unsigned long flags;
+ int error = 0;
+
+ spin_lock_irqsave(&controller->lock, flags);
+ xfer = controller->xfer;
+ controller->xfer = NULL;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
+ spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
+ opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
+
+ writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
+ writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
+ writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
+
+ if (!xfer) {
+ dev_err_ratelimited(controller->dev, "unexpected irq %x08 %x08 %x08\n",
+ qup_err, spi_err, opflags);
+ return IRQ_HANDLED;
+ }
+
+ if (qup_err) {
+ if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
+ dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
+ if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
+ dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
+ if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
+ dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
+ if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
+ dev_warn(controller->dev, "INPUT_OVER_RUN\n");
+
+ error = -EIO;
+ }
+
+ if (spi_err) {
+ if (spi_err & SPI_ERROR_CLK_OVER_RUN)
+ dev_warn(controller->dev, "CLK_OVER_RUN\n");
+ if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
+ dev_warn(controller->dev, "CLK_UNDER_RUN\n");
+
+ error = -EIO;
+ }
+
+ if (opflags & QUP_OP_IN_SERVICE_FLAG)
+ spi_qup_fifo_read(controller, xfer);
+
+ if (opflags & QUP_OP_OUT_SERVICE_FLAG)
+ spi_qup_fifo_write(controller, xfer);
+
+ spin_lock_irqsave(&controller->lock, flags);
+ controller->error = error;
+ controller->xfer = xfer;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ if (controller->rx_bytes == xfer->len || error)
+ complete(&controller->done);
+
+ return IRQ_HANDLED;
+}
+
+
+/* set clock freq ... bits per word */
+static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct spi_qup *controller = spi_master_get_devdata(spi->master);
+ u32 config, iomode, mode;
+ int ret, n_words, w_size;
+
+ if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
+ dev_err(controller->dev, "too big size for loopback %d > %d\n",
+ xfer->len, controller->in_fifo_sz);
+ return -EIO;
+ }
+
+ ret = clk_set_rate(controller->cclk, xfer->speed_hz);
+ if (ret) {
+ dev_err(controller->dev, "fail to set frequency %d",
+ xfer->speed_hz);
+ return -EIO;
+ }
+
+ if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
+ dev_err(controller->dev, "cannot set RESET state\n");
+ return -EIO;
+ }
+
+ w_size = 4;
+ if (xfer->bits_per_word <= 8)
+ w_size = 1;
+ else if (xfer->bits_per_word <= 16)
+ w_size = 2;
+
+ n_words = xfer->len / w_size;
+ controller->w_size = w_size;
+
+ if (n_words <= controller->in_fifo_sz) {
+ mode = QUP_IO_M_MODE_FIFO;
+ writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
+ writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
+ /* must be zero for FIFO */
+ writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
+ writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
+ } else {
+ mode = QUP_IO_M_MODE_BLOCK;
+ writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
+ writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
+ /* must be zero for BLOCK and BAM */
+ writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
+ writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
+ }
+
+ iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
+ /* Set input and output transfer mode */
+ iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
+ iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
+ iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
+ iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
+
+ writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
+
+ config = readl_relaxed(controller->base + SPI_CONFIG);
+
+ if (spi->mode & SPI_LOOP)
+ config |= SPI_CONFIG_LOOPBACK;
+ else
+ config &= ~SPI_CONFIG_LOOPBACK;
+
+ if (spi->mode & SPI_CPHA)
+ config &= ~SPI_CONFIG_INPUT_FIRST;
+ else
+ config |= SPI_CONFIG_INPUT_FIRST;
+
+ /*
+ * HS_MODE improves signal stability for spi-clk high rates,
+ * but is invalid in loop back mode.
+ */
+ if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
+ config |= SPI_CONFIG_HS_MODE;
+ else
+ config &= ~SPI_CONFIG_HS_MODE;
+
+ writel_relaxed(config, controller->base + SPI_CONFIG);
+
+ config = readl_relaxed(controller->base + QUP_CONFIG);
+ config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
+ config |= xfer->bits_per_word - 1;
+ config |= QUP_CONFIG_SPI_MODE;
+ writel_relaxed(config, controller->base + QUP_CONFIG);
+
+ writel_relaxed(0, controller->base + QUP_OPERATIONAL_MASK);
+ return 0;
+}
+
+static void spi_qup_set_cs(struct spi_device *spi, bool enable)
+{
+ struct spi_qup *controller = spi_master_get_devdata(spi->master);
+
+ u32 iocontol, mask;
+
+ iocontol = readl_relaxed(controller->base + SPI_IO_CONTROL);
+
+ /* Disable auto CS toggle and use manual */
+ iocontol &= ~SPI_IO_C_MX_CS_MODE;
+ iocontol |= SPI_IO_C_FORCE_CS;
+
+ iocontol &= ~SPI_IO_C_CS_SELECT_MASK;
+ iocontol |= SPI_IO_C_CS_SELECT(spi->chip_select);
+
+ mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
+
+ if (enable)
+ iocontol |= mask;
+ else
+ iocontol &= ~mask;
+
+ writel_relaxed(iocontol, controller->base + SPI_IO_CONTROL);
+}
+
+static int spi_qup_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ unsigned long timeout, flags;
+ int ret = -EIO;
+
+ ret = spi_qup_io_config(spi, xfer);
+ if (ret)
+ return ret;
+
+ timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
+ timeout = DIV_ROUND_UP(xfer->len * 8, timeout);
+ timeout = 100 * msecs_to_jiffies(timeout);
+
+ reinit_completion(&controller->done);
+
+ spin_lock_irqsave(&controller->lock, flags);
+ controller->xfer = xfer;
+ controller->error = 0;
+ controller->rx_bytes = 0;
+ controller->tx_bytes = 0;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
+ dev_warn(controller->dev, "cannot set RUN state\n");
+ goto exit;
+ }
+
+ if (spi_qup_set_state(controller, QUP_STATE_PAUSE)) {
+ dev_warn(controller->dev, "cannot set PAUSE state\n");
+ goto exit;
+ }
+
+ spi_qup_fifo_write(controller, xfer);
+
+ if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
+ dev_warn(controller->dev, "cannot set EXECUTE state\n");
+ goto exit;
+ }
+
+ if (!wait_for_completion_timeout(&controller->done, timeout))
+ ret = -ETIMEDOUT;
+exit:
+ spi_qup_set_state(controller, QUP_STATE_RESET);
+ spin_lock_irqsave(&controller->lock, flags);
+ controller->xfer = NULL;
+ if (!ret)
+ ret = controller->error;
+ spin_unlock_irqrestore(&controller->lock, flags);
+ return ret;
+}
+
+static int spi_qup_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct clk *iclk, *cclk;
+ struct spi_qup *controller;
+ struct resource *res;
+ struct device *dev;
+ void __iomem *base;
+ u32 data, max_freq, iomode;
+ int ret, irq, size;
+
+ dev = &pdev->dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ cclk = devm_clk_get(dev, "core");
+ if (IS_ERR(cclk))
+ return PTR_ERR(cclk);
+
+ iclk = devm_clk_get(dev, "iface");
+ if (IS_ERR(iclk))
+ return PTR_ERR(iclk);
+
+ /* This is optional parameter */
+ if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
+ max_freq = SPI_MAX_RATE;
+
+ if (!max_freq || max_freq > SPI_MAX_RATE) {
+ dev_err(dev, "invalid clock frequency %d\n", max_freq);
+ return -ENXIO;
+ }
+
+ ret = clk_prepare_enable(cclk);
+ if (ret) {
+ dev_err(dev, "cannot enable core clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(iclk);
+ if (ret) {
+ clk_disable_unprepare(cclk);
+ dev_err(dev, "cannot enable iface clock\n");
+ return ret;
+ }
+
+ data = readl_relaxed(base + QUP_HW_VERSION);
+
+ if (data < QUP_HW_VERSION_2_1_1) {
+ clk_disable_unprepare(cclk);
+ clk_disable_unprepare(iclk);
+ dev_err(dev, "v.%08x is not supported\n", data);
+ return -ENXIO;
+ }
+
+ master = spi_alloc_master(dev, sizeof(struct spi_qup));
+ if (!master) {
+ clk_disable_unprepare(cclk);
+ clk_disable_unprepare(iclk);
+ dev_err(dev, "cannot allocate master\n");
+ return -ENOMEM;
+ }
+
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
+ master->num_chipselect = SPI_NUM_CHIPSELECTS;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ master->max_speed_hz = max_freq;
+ master->set_cs = spi_qup_set_cs;
+ master->transfer_one = spi_qup_transfer_one;
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+
+ platform_set_drvdata(pdev, master);
+
+ controller = spi_master_get_devdata(master);
+
+ controller->dev = dev;
+ controller->base = base;
+ controller->iclk = iclk;
+ controller->cclk = cclk;
+ controller->irq = irq;
+
+ spin_lock_init(&controller->lock);
+ init_completion(&controller->done);
+
+ iomode = readl_relaxed(base + QUP_IO_M_MODES);
+
+ size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
+ if (size)
+ controller->out_blk_sz = size * 16;
+ else
+ controller->out_blk_sz = 4;
+
+ size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
+ if (size)
+ controller->in_blk_sz = size * 16;
+ else
+ controller->in_blk_sz = 4;
+
+ size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
+ controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
+
+ size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
+ controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
+
+ dev_info(dev, "v.%08x IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
+ data, controller->in_blk_sz, controller->in_fifo_sz,
+ controller->out_blk_sz, controller->out_fifo_sz);
+
+ writel_relaxed(1, base + QUP_SW_RESET);
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret) {
+ dev_err(dev, "cannot set RESET state\n");
+ goto error;
+ }
+
+ writel_relaxed(0, base + QUP_OPERATIONAL);
+ writel_relaxed(0, base + QUP_IO_M_MODES);
+ writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
+ writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
+ base + SPI_ERROR_FLAGS_EN);
+
+ writel_relaxed(0, base + SPI_CONFIG);
+ writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
+
+ ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
+ IRQF_TRIGGER_HIGH, pdev->name, controller);
+ if (ret)
+ goto error;
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ goto error;
+
+ pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ return 0;
+
+error:
+ clk_disable_unprepare(cclk);
+ clk_disable_unprepare(iclk);
+ spi_master_put(master);
+ return ret;
+}
+
+#ifdef CONFIG_PM_RUNTIME
+static int spi_qup_pm_suspend_runtime(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ u32 config;
+
+ /* Enable clocks auto gaiting */
+ config = readl(controller->base + QUP_CONFIG);
+ config |= QUP_CONFIG_CLOCK_AUTO_GATE;
+ writel_relaxed(config, controller->base + QUP_CONFIG);
+ return 0;
+}
+
+static int spi_qup_pm_resume_runtime(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ u32 config;
+
+ /* Disable clocks auto gaiting */
+ config = readl_relaxed(controller->base + QUP_CONFIG);
+ config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
+ writel_relaxed(config, controller->base + QUP_CONFIG);
+ return 0;
+}
+#endif /* CONFIG_PM_RUNTIME */
+
+#ifdef CONFIG_PM_SLEEP
+static int spi_qup_suspend(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+ return 0;
+}
+
+static int spi_qup_resume(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(controller->iclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(controller->cclk);
+ if (ret)
+ return ret;
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ return ret;
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int spi_qup_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = dev_get_drvdata(&pdev->dev);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret)
+ return ret;
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static struct of_device_id spi_qup_dt_match[] = {
+ { .compatible = "qcom,spi-qup-v2.1.1", },
+ { .compatible = "qcom,spi-qup-v2.2.1", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
+
+static const struct dev_pm_ops spi_qup_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
+ SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
+ spi_qup_pm_resume_runtime,
+ NULL)
+};
+
+static struct platform_driver spi_qup_driver = {
+ .driver = {
+ .name = "spi_qup",
+ .owner = THIS_MODULE,
+ .pm = &spi_qup_dev_pm_ops,
+ .of_match_table = spi_qup_dt_match,
+ },
+ .probe = spi_qup_probe,
+ .remove = spi_qup_remove,
+};
+module_platform_driver(spi_qup_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spi_qup");
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index 28987d9fcfe5..1fb0ad213324 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -1,7 +1,8 @@
/*
* SH RSPI driver
*
- * Copyright (C) 2012 Renesas Solutions Corp.
+ * Copyright (C) 2012, 2013 Renesas Solutions Corp.
+ * Copyright (C) 2014 Glider bvba
*
* Based on spi-sh.c:
* Copyright (C) 2011 Renesas Solutions Corp.
@@ -25,14 +26,14 @@
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
-#include <linux/list.h>
-#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
#include <linux/sh_dma.h>
#include <linux/spi/spi.h>
#include <linux/spi/rspi.h>
@@ -49,7 +50,7 @@
#define RSPI_SPCKD 0x0c /* Clock Delay Register */
#define RSPI_SSLND 0x0d /* Slave Select Negation Delay Register */
#define RSPI_SPND 0x0e /* Next-Access Delay Register */
-#define RSPI_SPCR2 0x0f /* Control Register 2 */
+#define RSPI_SPCR2 0x0f /* Control Register 2 (SH only) */
#define RSPI_SPCMD0 0x10 /* Command Register 0 */
#define RSPI_SPCMD1 0x12 /* Command Register 1 */
#define RSPI_SPCMD2 0x14 /* Command Register 2 */
@@ -58,16 +59,23 @@
#define RSPI_SPCMD5 0x1a /* Command Register 5 */
#define RSPI_SPCMD6 0x1c /* Command Register 6 */
#define RSPI_SPCMD7 0x1e /* Command Register 7 */
+#define RSPI_SPCMD(i) (RSPI_SPCMD0 + (i) * 2)
+#define RSPI_NUM_SPCMD 8
+#define RSPI_RZ_NUM_SPCMD 4
+#define QSPI_NUM_SPCMD 4
+
+/* RSPI on RZ only */
#define RSPI_SPBFCR 0x20 /* Buffer Control Register */
#define RSPI_SPBFDR 0x22 /* Buffer Data Count Setting Register */
-/*qspi only */
+/* QSPI only */
#define QSPI_SPBFCR 0x18 /* Buffer Control Register */
#define QSPI_SPBDCR 0x1a /* Buffer Data Count Register */
#define QSPI_SPBMUL0 0x1c /* Transfer Data Length Multiplier Setting Register 0 */
#define QSPI_SPBMUL1 0x20 /* Transfer Data Length Multiplier Setting Register 1 */
#define QSPI_SPBMUL2 0x24 /* Transfer Data Length Multiplier Setting Register 2 */
#define QSPI_SPBMUL3 0x28 /* Transfer Data Length Multiplier Setting Register 3 */
+#define QSPI_SPBMUL(i) (QSPI_SPBMUL0 + (i) * 4)
/* SPCR - Control Register */
#define SPCR_SPRIE 0x80 /* Receive Interrupt Enable */
@@ -104,7 +112,7 @@
#define SPSR_PERF 0x08 /* Parity Error Flag */
#define SPSR_MODF 0x04 /* Mode Fault Error Flag */
#define SPSR_IDLNF 0x02 /* RSPI Idle Flag */
-#define SPSR_OVRF 0x01 /* Overrun Error Flag */
+#define SPSR_OVRF 0x01 /* Overrun Error Flag (RSPI only) */
/* SPSCR - Sequence Control Register */
#define SPSCR_SPSLN_MASK 0x07 /* Sequence Length Specification */
@@ -121,13 +129,13 @@
#define SPDCR_SPLWORD SPDCR_SPLW1
#define SPDCR_SPLBYTE SPDCR_SPLW0
#define SPDCR_SPLW 0x20 /* Access Width Specification (SH) */
-#define SPDCR_SPRDTD 0x10 /* Receive Transmit Data Select */
+#define SPDCR_SPRDTD 0x10 /* Receive Transmit Data Select (SH) */
#define SPDCR_SLSEL1 0x08
#define SPDCR_SLSEL0 0x04
-#define SPDCR_SLSEL_MASK 0x0c /* SSL1 Output Select */
+#define SPDCR_SLSEL_MASK 0x0c /* SSL1 Output Select (SH) */
#define SPDCR_SPFC1 0x02
#define SPDCR_SPFC0 0x01
-#define SPDCR_SPFC_MASK 0x03 /* Frame Count Setting (1-4) */
+#define SPDCR_SPFC_MASK 0x03 /* Frame Count Setting (1-4) (SH) */
/* SPCKD - Clock Delay Register */
#define SPCKD_SCKDL_MASK 0x07 /* Clock Delay Setting (1-8) */
@@ -151,7 +159,7 @@
#define SPCMD_LSBF 0x1000 /* LSB First */
#define SPCMD_SPB_MASK 0x0f00 /* Data Length Setting */
#define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
-#define SPCMD_SPB_8BIT 0x0000 /* qspi only */
+#define SPCMD_SPB_8BIT 0x0000 /* QSPI only */
#define SPCMD_SPB_16BIT 0x0100
#define SPCMD_SPB_20BIT 0x0000
#define SPCMD_SPB_24BIT 0x0100
@@ -170,8 +178,8 @@
#define SPCMD_CPHA 0x0001 /* Clock Phase Setting */
/* SPBFCR - Buffer Control Register */
-#define SPBFCR_TXRST 0x80 /* Transmit Buffer Data Reset (qspi only) */
-#define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset (qspi only) */
+#define SPBFCR_TXRST 0x80 /* Transmit Buffer Data Reset */
+#define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset */
#define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */
#define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */
@@ -181,22 +189,21 @@ struct rspi_data {
void __iomem *addr;
u32 max_speed_hz;
struct spi_master *master;
- struct list_head queue;
- struct work_struct ws;
wait_queue_head_t wait;
- spinlock_t lock;
struct clk *clk;
- u8 spsr;
u16 spcmd;
+ u8 spsr;
+ u8 sppcr;
+ int rx_irq, tx_irq;
const struct spi_ops *ops;
/* for dmaengine */
struct dma_chan *chan_tx;
struct dma_chan *chan_rx;
- int irq;
unsigned dma_width_16bit:1;
unsigned dma_callbacked:1;
+ unsigned byte_access:1;
};
static void rspi_write8(const struct rspi_data *rspi, u8 data, u16 offset)
@@ -224,34 +231,47 @@ static u16 rspi_read16(const struct rspi_data *rspi, u16 offset)
return ioread16(rspi->addr + offset);
}
+static void rspi_write_data(const struct rspi_data *rspi, u16 data)
+{
+ if (rspi->byte_access)
+ rspi_write8(rspi, data, RSPI_SPDR);
+ else /* 16 bit */
+ rspi_write16(rspi, data, RSPI_SPDR);
+}
+
+static u16 rspi_read_data(const struct rspi_data *rspi)
+{
+ if (rspi->byte_access)
+ return rspi_read8(rspi, RSPI_SPDR);
+ else /* 16 bit */
+ return rspi_read16(rspi, RSPI_SPDR);
+}
+
/* optional functions */
struct spi_ops {
- int (*set_config_register)(const struct rspi_data *rspi,
- int access_size);
- int (*send_pio)(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t);
- int (*receive_pio)(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t);
-
+ int (*set_config_register)(struct rspi_data *rspi, int access_size);
+ int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer);
+ u16 mode_bits;
};
/*
- * functions for RSPI
+ * functions for RSPI on legacy SH
*/
-static int rspi_set_config_register(const struct rspi_data *rspi,
- int access_size)
+static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
{
int spbr;
- /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
- rspi_write8(rspi, 0x00, RSPI_SPPCR);
+ /* Sets output mode, MOSI signal, and (optionally) loopback */
+ rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
/* Sets transfer bit rate */
spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
- /* Sets number of frames to be used: 1 frame */
- rspi_write8(rspi, 0x00, RSPI_SPDCR);
+ /* Disable dummy transmission, set 16-bit word access, 1 frame */
+ rspi_write8(rspi, 0, RSPI_SPDCR);
+ rspi->byte_access = 0;
/* Sets RSPCK, SSL, next-access delay value */
rspi_write8(rspi, 0x00, RSPI_SPCKD);
@@ -262,8 +282,41 @@ static int rspi_set_config_register(const struct rspi_data *rspi,
rspi_write8(rspi, 0x00, RSPI_SPCR2);
/* Sets SPCMD */
- rspi_write16(rspi, SPCMD_SPB_8_TO_16(access_size) | rspi->spcmd,
- RSPI_SPCMD0);
+ rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+
+ /* Sets RSPI mode */
+ rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
+
+ return 0;
+}
+
+/*
+ * functions for RSPI on RZ
+ */
+static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
+{
+ int spbr;
+
+ /* Sets output mode, MOSI signal, and (optionally) loopback */
+ rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
+
+ /* Sets transfer bit rate */
+ spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz) - 1;
+ rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
+
+ /* Disable dummy transmission, set byte access */
+ rspi_write8(rspi, SPDCR_SPLBYTE, RSPI_SPDCR);
+ rspi->byte_access = 1;
+
+ /* Sets RSPCK, SSL, next-access delay value */
+ rspi_write8(rspi, 0x00, RSPI_SPCKD);
+ rspi_write8(rspi, 0x00, RSPI_SSLND);
+ rspi_write8(rspi, 0x00, RSPI_SPND);
+
+ /* Sets SPCMD */
+ rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
/* Sets RSPI mode */
rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
@@ -274,21 +327,20 @@ static int rspi_set_config_register(const struct rspi_data *rspi,
/*
* functions for QSPI
*/
-static int qspi_set_config_register(const struct rspi_data *rspi,
- int access_size)
+static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
{
- u16 spcmd;
int spbr;
- /* Sets output mode(CMOS) and MOSI signal(from previous transfer) */
- rspi_write8(rspi, 0x00, RSPI_SPPCR);
+ /* Sets output mode, MOSI signal, and (optionally) loopback */
+ rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
/* Sets transfer bit rate */
spbr = clk_get_rate(rspi->clk) / (2 * rspi->max_speed_hz);
rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
- /* Sets number of frames to be used: 1 frame */
- rspi_write8(rspi, 0x00, RSPI_SPDCR);
+ /* Disable dummy transmission, set byte access */
+ rspi_write8(rspi, 0, RSPI_SPDCR);
+ rspi->byte_access = 1;
/* Sets RSPCK, SSL, next-access delay value */
rspi_write8(rspi, 0x00, RSPI_SPCKD);
@@ -297,13 +349,13 @@ static int qspi_set_config_register(const struct rspi_data *rspi,
/* Data Length Setting */
if (access_size == 8)
- spcmd = SPCMD_SPB_8BIT;
+ rspi->spcmd |= SPCMD_SPB_8BIT;
else if (access_size == 16)
- spcmd = SPCMD_SPB_16BIT;
+ rspi->spcmd |= SPCMD_SPB_16BIT;
else
- spcmd = SPCMD_SPB_32BIT;
+ rspi->spcmd |= SPCMD_SPB_32BIT;
- spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | rspi->spcmd | SPCMD_SPNDEN;
+ rspi->spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SPNDEN;
/* Resets transfer data length */
rspi_write32(rspi, 0, QSPI_SPBMUL0);
@@ -314,9 +366,9 @@ static int qspi_set_config_register(const struct rspi_data *rspi,
rspi_write8(rspi, 0x00, QSPI_SPBFCR);
/* Sets SPCMD */
- rspi_write16(rspi, spcmd, RSPI_SPCMD0);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
- /* Enables SPI function in a master mode */
+ /* Enables SPI function in master mode */
rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
return 0;
@@ -340,6 +392,9 @@ static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
int ret;
rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (rspi->spsr & wait_mask)
+ return 0;
+
rspi_enable_irq(rspi, enable_bit);
ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
if (ret == 0 && !(rspi->spsr & wait_mask))
@@ -348,78 +403,39 @@ static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
return 0;
}
-static void rspi_assert_ssl(const struct rspi_data *rspi)
-{
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
-}
-
-static void rspi_negate_ssl(const struct rspi_data *rspi)
+static int rspi_data_out(struct rspi_data *rspi, u8 data)
{
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
+ if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
+ dev_err(&rspi->master->dev, "transmit timeout\n");
+ return -ETIMEDOUT;
+ }
+ rspi_write_data(rspi, data);
+ return 0;
}
-static int rspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t)
+static int rspi_data_in(struct rspi_data *rspi)
{
- int remain = t->len;
- const u8 *data = t->tx_buf;
- while (remain > 0) {
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD,
- RSPI_SPCR);
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: tx empty timeout\n", __func__);
- return -ETIMEDOUT;
- }
+ u8 data;
- rspi_write16(rspi, *data, RSPI_SPDR);
- data++;
- remain--;
+ if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
+ dev_err(&rspi->master->dev, "receive timeout\n");
+ return -ETIMEDOUT;
}
-
- /* Waiting for the last transmission */
- rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
-
- return 0;
+ data = rspi_read_data(rspi);
+ return data;
}
-static int qspi_send_pio(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t)
+static int rspi_data_out_in(struct rspi_data *rspi, u8 data)
{
- int remain = t->len;
- const u8 *data = t->tx_buf;
-
- rspi_write8(rspi, SPBFCR_TXRST, QSPI_SPBFCR);
- rspi_write8(rspi, 0x00, QSPI_SPBFCR);
-
- while (remain > 0) {
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: tx empty timeout\n", __func__);
- return -ETIMEDOUT;
- }
- rspi_write8(rspi, *data++, RSPI_SPDR);
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: receive timeout\n", __func__);
- return -ETIMEDOUT;
- }
- rspi_read8(rspi, RSPI_SPDR);
-
- remain--;
- }
+ int ret;
- /* Waiting for the last transmission */
- rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
+ ret = rspi_data_out(rspi, data);
+ if (ret < 0)
+ return ret;
- return 0;
+ return rspi_data_in(rspi);
}
-#define send_pio(spi, mesg, t) spi->ops->send_pio(spi, mesg, t)
-
static void rspi_dma_complete(void *arg)
{
struct rspi_data *rspi = arg;
@@ -471,7 +487,7 @@ static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
struct scatterlist sg;
const void *buf = NULL;
struct dma_async_tx_descriptor *desc;
- unsigned len;
+ unsigned int len;
int ret = 0;
if (rspi->dma_width_16bit) {
@@ -509,7 +525,7 @@ static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
* DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
* called. So, this driver disables the IRQ while DMA transfer.
*/
- disable_irq(rspi->irq);
+ disable_irq(rspi->tx_irq);
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_TXMD, RSPI_SPCR);
rspi_enable_irq(rspi, SPCR_SPTIE);
@@ -528,7 +544,7 @@ static int rspi_send_dma(struct rspi_data *rspi, struct spi_transfer *t)
ret = -ETIMEDOUT;
rspi_disable_irq(rspi, SPCR_SPTIE);
- enable_irq(rspi->irq);
+ enable_irq(rspi->tx_irq);
end:
rspi_dma_unmap_sg(&sg, rspi->chan_tx, DMA_TO_DEVICE);
@@ -545,46 +561,17 @@ static void rspi_receive_init(const struct rspi_data *rspi)
spsr = rspi_read8(rspi, RSPI_SPSR);
if (spsr & SPSR_SPRF)
- rspi_read16(rspi, RSPI_SPDR); /* dummy read */
+ rspi_read_data(rspi); /* dummy read */
if (spsr & SPSR_OVRF)
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
RSPI_SPSR);
}
-static int rspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t)
+static void rspi_rz_receive_init(const struct rspi_data *rspi)
{
- int remain = t->len;
- u8 *data;
-
rspi_receive_init(rspi);
-
- data = t->rx_buf;
- while (remain > 0) {
- rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD,
- RSPI_SPCR);
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: tx empty timeout\n", __func__);
- return -ETIMEDOUT;
- }
- /* dummy write for generate clock */
- rspi_write16(rspi, DUMMY_DATA, RSPI_SPDR);
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: receive timeout\n", __func__);
- return -ETIMEDOUT;
- }
- /* SPDR allows 16 or 32-bit access only */
- *data = (u8)rspi_read16(rspi, RSPI_SPDR);
-
- data++;
- remain--;
- }
-
- return 0;
+ rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, RSPI_SPBFCR);
+ rspi_write8(rspi, 0, RSPI_SPBFCR);
}
static void qspi_receive_init(const struct rspi_data *rspi)
@@ -593,51 +580,17 @@ static void qspi_receive_init(const struct rspi_data *rspi)
spsr = rspi_read8(rspi, RSPI_SPSR);
if (spsr & SPSR_SPRF)
- rspi_read8(rspi, RSPI_SPDR); /* dummy read */
+ rspi_read_data(rspi); /* dummy read */
rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
- rspi_write8(rspi, 0x00, QSPI_SPBFCR);
+ rspi_write8(rspi, 0, QSPI_SPBFCR);
}
-static int qspi_receive_pio(struct rspi_data *rspi, struct spi_message *mesg,
- struct spi_transfer *t)
-{
- int remain = t->len;
- u8 *data;
-
- qspi_receive_init(rspi);
-
- data = t->rx_buf;
- while (remain > 0) {
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: tx empty timeout\n", __func__);
- return -ETIMEDOUT;
- }
- /* dummy write for generate clock */
- rspi_write8(rspi, DUMMY_DATA, RSPI_SPDR);
-
- if (rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE) < 0) {
- dev_err(&rspi->master->dev,
- "%s: receive timeout\n", __func__);
- return -ETIMEDOUT;
- }
- /* SPDR allows 8, 16 or 32-bit access */
- *data++ = rspi_read8(rspi, RSPI_SPDR);
- remain--;
- }
-
- return 0;
-}
-
-#define receive_pio(spi, mesg, t) spi->ops->receive_pio(spi, mesg, t)
-
static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
{
struct scatterlist sg, sg_dummy;
void *dummy = NULL, *rx_buf = NULL;
struct dma_async_tx_descriptor *desc, *desc_dummy;
- unsigned len;
+ unsigned int len;
int ret = 0;
if (rspi->dma_width_16bit) {
@@ -695,7 +648,9 @@ static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
* DMAC needs SPTIE, but if SPTIE is set, this IRQ routine will be
* called. So, this driver disables the IRQ while DMA transfer.
*/
- disable_irq(rspi->irq);
+ disable_irq(rspi->tx_irq);
+ if (rspi->rx_irq != rspi->tx_irq)
+ disable_irq(rspi->rx_irq);
rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_TXMD, RSPI_SPCR);
rspi_enable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
@@ -718,7 +673,9 @@ static int rspi_receive_dma(struct rspi_data *rspi, struct spi_transfer *t)
ret = -ETIMEDOUT;
rspi_disable_irq(rspi, SPCR_SPTIE | SPCR_SPRIE);
- enable_irq(rspi->irq);
+ enable_irq(rspi->tx_irq);
+ if (rspi->rx_irq != rspi->tx_irq)
+ enable_irq(rspi->rx_irq);
end:
rspi_dma_unmap_sg(&sg, rspi->chan_rx, DMA_FROM_DEVICE);
@@ -746,56 +703,175 @@ static int rspi_is_dma(const struct rspi_data *rspi, struct spi_transfer *t)
return 0;
}
-static void rspi_work(struct work_struct *work)
+static int rspi_transfer_out_in(struct rspi_data *rspi,
+ struct spi_transfer *xfer)
{
- struct rspi_data *rspi = container_of(work, struct rspi_data, ws);
- struct spi_message *mesg;
- struct spi_transfer *t;
- unsigned long flags;
- int ret;
+ int remain = xfer->len, ret;
+ const u8 *tx_buf = xfer->tx_buf;
+ u8 *rx_buf = xfer->rx_buf;
+ u8 spcr, data;
- while (1) {
- spin_lock_irqsave(&rspi->lock, flags);
- if (list_empty(&rspi->queue)) {
- spin_unlock_irqrestore(&rspi->lock, flags);
- break;
- }
- mesg = list_entry(rspi->queue.next, struct spi_message, queue);
- list_del_init(&mesg->queue);
- spin_unlock_irqrestore(&rspi->lock, flags);
-
- rspi_assert_ssl(rspi);
-
- list_for_each_entry(t, &mesg->transfers, transfer_list) {
- if (t->tx_buf) {
- if (rspi_is_dma(rspi, t))
- ret = rspi_send_dma(rspi, t);
- else
- ret = send_pio(rspi, mesg, t);
- if (ret < 0)
- goto error;
- }
- if (t->rx_buf) {
- if (rspi_is_dma(rspi, t))
- ret = rspi_receive_dma(rspi, t);
- else
- ret = receive_pio(rspi, mesg, t);
- if (ret < 0)
- goto error;
- }
- mesg->actual_length += t->len;
+ rspi_receive_init(rspi);
+
+ spcr = rspi_read8(rspi, RSPI_SPCR);
+ if (rx_buf)
+ spcr &= ~SPCR_TXMD;
+ else
+ spcr |= SPCR_TXMD;
+ rspi_write8(rspi, spcr, RSPI_SPCR);
+
+ while (remain > 0) {
+ data = tx_buf ? *tx_buf++ : DUMMY_DATA;
+ ret = rspi_data_out(rspi, data);
+ if (ret < 0)
+ return ret;
+ if (rx_buf) {
+ ret = rspi_data_in(rspi);
+ if (ret < 0)
+ return ret;
+ *rx_buf++ = ret;
}
- rspi_negate_ssl(rspi);
+ remain--;
+ }
+
+ /* Wait for the last transmission */
+ rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
+
+ return 0;
+}
+
+static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+ int ret;
+
+ if (!rspi_is_dma(rspi, xfer))
+ return rspi_transfer_out_in(rspi, xfer);
+
+ if (xfer->tx_buf) {
+ ret = rspi_send_dma(rspi, xfer);
+ if (ret < 0)
+ return ret;
+ }
+ if (xfer->rx_buf)
+ return rspi_receive_dma(rspi, xfer);
+
+ return 0;
+}
+
+static int rspi_rz_transfer_out_in(struct rspi_data *rspi,
+ struct spi_transfer *xfer)
+{
+ int remain = xfer->len, ret;
+ const u8 *tx_buf = xfer->tx_buf;
+ u8 *rx_buf = xfer->rx_buf;
+ u8 data;
+
+ rspi_rz_receive_init(rspi);
+
+ while (remain > 0) {
+ data = tx_buf ? *tx_buf++ : DUMMY_DATA;
+ ret = rspi_data_out_in(rspi, data);
+ if (ret < 0)
+ return ret;
+ if (rx_buf)
+ *rx_buf++ = ret;
+ remain--;
+ }
+
+ /* Wait for the last transmission */
+ rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
+
+ return 0;
+}
+
+static int rspi_rz_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+
+ return rspi_rz_transfer_out_in(rspi, xfer);
+}
+
+static int qspi_transfer_out_in(struct rspi_data *rspi,
+ struct spi_transfer *xfer)
+{
+ int remain = xfer->len, ret;
+ const u8 *tx_buf = xfer->tx_buf;
+ u8 *rx_buf = xfer->rx_buf;
+ u8 data;
- mesg->status = 0;
- mesg->complete(mesg->context);
+ qspi_receive_init(rspi);
+
+ while (remain > 0) {
+ data = tx_buf ? *tx_buf++ : DUMMY_DATA;
+ ret = rspi_data_out_in(rspi, data);
+ if (ret < 0)
+ return ret;
+ if (rx_buf)
+ *rx_buf++ = ret;
+ remain--;
+ }
+
+ /* Wait for the last transmission */
+ rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
+
+ return 0;
+}
+
+static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
+{
+ const u8 *buf = xfer->tx_buf;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < xfer->len; i++) {
+ ret = rspi_data_out(rspi, *buf++);
+ if (ret < 0)
+ return ret;
}
- return;
+ /* Wait for the last transmission */
+ rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
-error:
- mesg->status = ret;
- mesg->complete(mesg->context);
+ return 0;
+}
+
+static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
+{
+ u8 *buf = xfer->rx_buf;
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < xfer->len; i++) {
+ ret = rspi_data_in(rspi);
+ if (ret < 0)
+ return ret;
+ *buf++ = ret;
+ }
+
+ return 0;
+}
+
+static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+
+ if (spi->mode & SPI_LOOP) {
+ return qspi_transfer_out_in(rspi, xfer);
+ } else if (xfer->tx_buf && xfer->tx_nbits > SPI_NBITS_SINGLE) {
+ /* Quad or Dual SPI Write */
+ return qspi_transfer_out(rspi, xfer);
+ } else if (xfer->rx_buf && xfer->rx_nbits > SPI_NBITS_SINGLE) {
+ /* Quad or Dual SPI Read */
+ return qspi_transfer_in(rspi, xfer);
+ } else {
+ /* Single SPI Transfer */
+ return qspi_transfer_out_in(rspi, xfer);
+ }
}
static int rspi_setup(struct spi_device *spi)
@@ -810,32 +886,115 @@ static int rspi_setup(struct spi_device *spi)
if (spi->mode & SPI_CPHA)
rspi->spcmd |= SPCMD_CPHA;
+ /* CMOS output mode and MOSI signal from previous transfer */
+ rspi->sppcr = 0;
+ if (spi->mode & SPI_LOOP)
+ rspi->sppcr |= SPPCR_SPLP;
+
set_config_register(rspi, 8);
return 0;
}
-static int rspi_transfer(struct spi_device *spi, struct spi_message *mesg)
+static u16 qspi_transfer_mode(const struct spi_transfer *xfer)
{
- struct rspi_data *rspi = spi_master_get_devdata(spi->master);
- unsigned long flags;
+ if (xfer->tx_buf)
+ switch (xfer->tx_nbits) {
+ case SPI_NBITS_QUAD:
+ return SPCMD_SPIMOD_QUAD;
+ case SPI_NBITS_DUAL:
+ return SPCMD_SPIMOD_DUAL;
+ default:
+ return 0;
+ }
+ if (xfer->rx_buf)
+ switch (xfer->rx_nbits) {
+ case SPI_NBITS_QUAD:
+ return SPCMD_SPIMOD_QUAD | SPCMD_SPRW;
+ case SPI_NBITS_DUAL:
+ return SPCMD_SPIMOD_DUAL | SPCMD_SPRW;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
- mesg->actual_length = 0;
- mesg->status = -EINPROGRESS;
+static int qspi_setup_sequencer(struct rspi_data *rspi,
+ const struct spi_message *msg)
+{
+ const struct spi_transfer *xfer;
+ unsigned int i = 0, len = 0;
+ u16 current_mode = 0xffff, mode;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ mode = qspi_transfer_mode(xfer);
+ if (mode == current_mode) {
+ len += xfer->len;
+ continue;
+ }
+
+ /* Transfer mode change */
+ if (i) {
+ /* Set transfer data length of previous transfer */
+ rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
+ }
- spin_lock_irqsave(&rspi->lock, flags);
- list_add_tail(&mesg->queue, &rspi->queue);
- schedule_work(&rspi->ws);
- spin_unlock_irqrestore(&rspi->lock, flags);
+ if (i >= QSPI_NUM_SPCMD) {
+ dev_err(&msg->spi->dev,
+ "Too many different transfer modes");
+ return -EINVAL;
+ }
+
+ /* Program transfer mode for this transfer */
+ rspi_write16(rspi, rspi->spcmd | mode, RSPI_SPCMD(i));
+ current_mode = mode;
+ len = xfer->len;
+ i++;
+ }
+ if (i) {
+ /* Set final transfer data length and sequence length */
+ rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
+ rspi_write8(rspi, i - 1, RSPI_SPSCR);
+ }
return 0;
}
-static void rspi_cleanup(struct spi_device *spi)
+static int rspi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+ int ret;
+
+ if (msg->spi->mode &
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
+ /* Setup sequencer for messages with multiple transfer modes */
+ ret = qspi_setup_sequencer(rspi, msg);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable SPI function in master mode */
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
+ return 0;
}
-static irqreturn_t rspi_irq(int irq, void *_sr)
+static int rspi_unprepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+
+ /* Disable SPI function */
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
+
+ /* Reset sequencer for Single SPI Transfers */
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+ rspi_write8(rspi, 0, RSPI_SPSCR);
+ return 0;
+}
+
+static irqreturn_t rspi_irq_mux(int irq, void *_sr)
{
struct rspi_data *rspi = _sr;
u8 spsr;
@@ -857,6 +1016,36 @@ static irqreturn_t rspi_irq(int irq, void *_sr)
return ret;
}
+static irqreturn_t rspi_irq_rx(int irq, void *_sr)
+{
+ struct rspi_data *rspi = _sr;
+ u8 spsr;
+
+ rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPRF) {
+ rspi_disable_irq(rspi, SPCR_SPRIE);
+ wake_up(&rspi->wait);
+ return IRQ_HANDLED;
+ }
+
+ return 0;
+}
+
+static irqreturn_t rspi_irq_tx(int irq, void *_sr)
+{
+ struct rspi_data *rspi = _sr;
+ u8 spsr;
+
+ rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPTEF) {
+ rspi_disable_irq(rspi, SPCR_SPTIE);
+ wake_up(&rspi->wait);
+ return IRQ_HANDLED;
+ }
+
+ return 0;
+}
+
static int rspi_request_dma(struct rspi_data *rspi,
struct platform_device *pdev)
{
@@ -923,34 +1112,89 @@ static int rspi_remove(struct platform_device *pdev)
struct rspi_data *rspi = platform_get_drvdata(pdev);
rspi_release_dma(rspi);
- clk_disable(rspi->clk);
+ pm_runtime_disable(&pdev->dev);
return 0;
}
+static const struct spi_ops rspi_ops = {
+ .set_config_register = rspi_set_config_register,
+ .transfer_one = rspi_transfer_one,
+ .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
+};
+
+static const struct spi_ops rspi_rz_ops = {
+ .set_config_register = rspi_rz_set_config_register,
+ .transfer_one = rspi_rz_transfer_one,
+ .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
+};
+
+static const struct spi_ops qspi_ops = {
+ .set_config_register = qspi_set_config_register,
+ .transfer_one = qspi_transfer_one,
+ .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP |
+ SPI_TX_DUAL | SPI_TX_QUAD |
+ SPI_RX_DUAL | SPI_RX_QUAD,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id rspi_of_match[] = {
+ /* RSPI on legacy SH */
+ { .compatible = "renesas,rspi", .data = &rspi_ops },
+ /* RSPI on RZ/A1H */
+ { .compatible = "renesas,rspi-rz", .data = &rspi_rz_ops },
+ /* QSPI on R-Car Gen2 */
+ { .compatible = "renesas,qspi", .data = &qspi_ops },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, rspi_of_match);
+
+static int rspi_parse_dt(struct device *dev, struct spi_master *master)
+{
+ u32 num_cs;
+ int error;
+
+ /* Parse DT properties */
+ error = of_property_read_u32(dev->of_node, "num-cs", &num_cs);
+ if (error) {
+ dev_err(dev, "of_property_read_u32 num-cs failed %d\n", error);
+ return error;
+ }
+
+ master->num_chipselect = num_cs;
+ return 0;
+}
+#else
+#define rspi_of_match NULL
+static inline int rspi_parse_dt(struct device *dev, struct spi_master *master)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_OF */
+
+static int rspi_request_irq(struct device *dev, unsigned int irq,
+ irq_handler_t handler, const char *suffix,
+ void *dev_id)
+{
+ const char *base = dev_name(dev);
+ size_t len = strlen(base) + strlen(suffix) + 2;
+ char *name = devm_kzalloc(dev, len, GFP_KERNEL);
+ if (!name)
+ return -ENOMEM;
+ snprintf(name, len, "%s:%s", base, suffix);
+ return devm_request_irq(dev, irq, handler, 0, name, dev_id);
+}
+
static int rspi_probe(struct platform_device *pdev)
{
struct resource *res;
struct spi_master *master;
struct rspi_data *rspi;
- int ret, irq;
- char clk_name[16];
- const struct rspi_plat_data *rspi_pd = dev_get_platdata(&pdev->dev);
+ int ret;
+ const struct of_device_id *of_id;
+ const struct rspi_plat_data *rspi_pd;
const struct spi_ops *ops;
- const struct platform_device_id *id_entry = pdev->id_entry;
-
- ops = (struct spi_ops *)id_entry->driver_data;
- /* ops parameter check */
- if (!ops->set_config_register) {
- dev_err(&pdev->dev, "there is no set_config_register\n");
- return -ENODEV;
- }
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- dev_err(&pdev->dev, "platform_get_irq error\n");
- return -ENODEV;
- }
master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
if (master == NULL) {
@@ -958,6 +1202,28 @@ static int rspi_probe(struct platform_device *pdev)
return -ENOMEM;
}
+ of_id = of_match_device(rspi_of_match, &pdev->dev);
+ if (of_id) {
+ ops = of_id->data;
+ ret = rspi_parse_dt(&pdev->dev, master);
+ if (ret)
+ goto error1;
+ } else {
+ ops = (struct spi_ops *)pdev->id_entry->driver_data;
+ rspi_pd = dev_get_platdata(&pdev->dev);
+ if (rspi_pd && rspi_pd->num_chipselect)
+ master->num_chipselect = rspi_pd->num_chipselect;
+ else
+ master->num_chipselect = 2; /* default */
+ };
+
+ /* ops parameter check */
+ if (!ops->set_config_register) {
+ dev_err(&pdev->dev, "there is no set_config_register\n");
+ ret = -ENODEV;
+ goto error1;
+ }
+
rspi = spi_master_get_devdata(master);
platform_set_drvdata(pdev, rspi);
rspi->ops = ops;
@@ -970,39 +1236,61 @@ static int rspi_probe(struct platform_device *pdev)
goto error1;
}
- snprintf(clk_name, sizeof(clk_name), "%s%d", id_entry->name, pdev->id);
- rspi->clk = devm_clk_get(&pdev->dev, clk_name);
+ rspi->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(rspi->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
ret = PTR_ERR(rspi->clk);
goto error1;
}
- clk_enable(rspi->clk);
- INIT_LIST_HEAD(&rspi->queue);
- spin_lock_init(&rspi->lock);
- INIT_WORK(&rspi->ws, rspi_work);
- init_waitqueue_head(&rspi->wait);
+ pm_runtime_enable(&pdev->dev);
- if (rspi_pd && rspi_pd->num_chipselect)
- master->num_chipselect = rspi_pd->num_chipselect;
- else
- master->num_chipselect = 2; /* default */
+ init_waitqueue_head(&rspi->wait);
master->bus_num = pdev->id;
master->setup = rspi_setup;
- master->transfer = rspi_transfer;
- master->cleanup = rspi_cleanup;
- master->mode_bits = SPI_CPHA | SPI_CPOL;
+ master->auto_runtime_pm = true;
+ master->transfer_one = ops->transfer_one;
+ master->prepare_message = rspi_prepare_message;
+ master->unprepare_message = rspi_unprepare_message;
+ master->mode_bits = ops->mode_bits;
+ master->dev.of_node = pdev->dev.of_node;
+
+ ret = platform_get_irq_byname(pdev, "rx");
+ if (ret < 0) {
+ ret = platform_get_irq_byname(pdev, "mux");
+ if (ret < 0)
+ ret = platform_get_irq(pdev, 0);
+ if (ret >= 0)
+ rspi->rx_irq = rspi->tx_irq = ret;
+ } else {
+ rspi->rx_irq = ret;
+ ret = platform_get_irq_byname(pdev, "tx");
+ if (ret >= 0)
+ rspi->tx_irq = ret;
+ }
+ if (ret < 0) {
+ dev_err(&pdev->dev, "platform_get_irq error\n");
+ goto error2;
+ }
- ret = devm_request_irq(&pdev->dev, irq, rspi_irq, 0,
- dev_name(&pdev->dev), rspi);
+ if (rspi->rx_irq == rspi->tx_irq) {
+ /* Single multiplexed interrupt */
+ ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_mux,
+ "mux", rspi);
+ } else {
+ /* Multi-interrupt mode, only SPRI and SPTI are used */
+ ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_rx,
+ "rx", rspi);
+ if (!ret)
+ ret = rspi_request_irq(&pdev->dev, rspi->tx_irq,
+ rspi_irq_tx, "tx", rspi);
+ }
if (ret < 0) {
dev_err(&pdev->dev, "request_irq error\n");
goto error2;
}
- rspi->irq = irq;
ret = rspi_request_dma(rspi, pdev);
if (ret < 0) {
dev_err(&pdev->dev, "rspi_request_dma failed.\n");
@@ -1022,27 +1310,16 @@ static int rspi_probe(struct platform_device *pdev)
error3:
rspi_release_dma(rspi);
error2:
- clk_disable(rspi->clk);
+ pm_runtime_disable(&pdev->dev);
error1:
spi_master_put(master);
return ret;
}
-static struct spi_ops rspi_ops = {
- .set_config_register = rspi_set_config_register,
- .send_pio = rspi_send_pio,
- .receive_pio = rspi_receive_pio,
-};
-
-static struct spi_ops qspi_ops = {
- .set_config_register = qspi_set_config_register,
- .send_pio = qspi_send_pio,
- .receive_pio = qspi_receive_pio,
-};
-
static struct platform_device_id spi_driver_ids[] = {
{ "rspi", (kernel_ulong_t)&rspi_ops },
+ { "rspi-rz", (kernel_ulong_t)&rspi_rz_ops },
{ "qspi", (kernel_ulong_t)&qspi_ops },
{},
};
@@ -1056,6 +1333,7 @@ static struct platform_driver rspi_driver = {
.driver = {
.name = "renesas_spi",
.owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(rspi_of_match),
},
};
module_platform_driver(rspi_driver);
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
index 746424aa5353..bed23384dfab 100644
--- a/drivers/spi/spi-s3c24xx.c
+++ b/drivers/spi/spi-s3c24xx.c
@@ -9,7 +9,6 @@
*
*/
-#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/interrupt.h>
@@ -123,25 +122,15 @@ static int s3c24xx_spi_update_state(struct spi_device *spi,
{
struct s3c24xx_spi *hw = to_hw(spi);
struct s3c24xx_spi_devstate *cs = spi->controller_state;
- unsigned int bpw;
unsigned int hz;
unsigned int div;
unsigned long clk;
- bpw = t ? t->bits_per_word : spi->bits_per_word;
hz = t ? t->speed_hz : spi->max_speed_hz;
- if (!bpw)
- bpw = 8;
-
if (!hz)
hz = spi->max_speed_hz;
- if (bpw != 8) {
- dev_err(&spi->dev, "invalid bits-per-word (%d)\n", bpw);
- return -EINVAL;
- }
-
if (spi->mode != cs->mode) {
u8 spcon = SPCON_DEFAULT | S3C2410_SPCON_ENSCK;
@@ -544,6 +533,7 @@ static int s3c24xx_spi_probe(struct platform_device *pdev)
master->num_chipselect = hw->pdata->num_cs;
master->bus_num = pdata->bus_num;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
/* setup the state for the bitbang driver */
@@ -643,6 +633,11 @@ static int s3c24xx_spi_remove(struct platform_device *dev)
static int s3c24xx_spi_suspend(struct device *dev)
{
struct s3c24xx_spi *hw = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(hw->master);
+ if (ret)
+ return ret;
if (hw->pdata && hw->pdata->gpio_setup)
hw->pdata->gpio_setup(hw->pdata, 0);
@@ -656,7 +651,7 @@ static int s3c24xx_spi_resume(struct device *dev)
struct s3c24xx_spi *hw = dev_get_drvdata(dev);
s3c24xx_spi_initialsetup(hw);
- return 0;
+ return spi_master_resume(hw->master);
}
static const struct dev_pm_ops s3c24xx_spi_pmops = {
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
index ae907dde1371..f19cd97855e8 100644
--- a/drivers/spi/spi-s3c64xx.c
+++ b/drivers/spi/spi-s3c64xx.c
@@ -34,10 +34,6 @@
#include <linux/platform_data/spi-s3c64xx.h>
-#ifdef CONFIG_S3C_DMA
-#include <mach/dma.h>
-#endif
-
#define MAX_SPI_PORTS 3
#define S3C64XX_SPI_QUIRK_POLL (1 << 0)
@@ -200,9 +196,6 @@ struct s3c64xx_spi_driver_data {
unsigned cur_speed;
struct s3c64xx_spi_dma_data rx_dma;
struct s3c64xx_spi_dma_data tx_dma;
-#ifdef CONFIG_S3C_DMA
- struct samsung_dma_ops *ops;
-#endif
struct s3c64xx_spi_port_config *port_conf;
unsigned int port_id;
bool cs_gpio;
@@ -284,104 +277,8 @@ static void s3c64xx_spi_dmacb(void *data)
spin_unlock_irqrestore(&sdd->lock, flags);
}
-#ifdef CONFIG_S3C_DMA
-/* FIXME: remove this section once arch/arm/mach-s3c64xx uses dmaengine */
-
-static struct s3c2410_dma_client s3c64xx_spi_dma_client = {
- .name = "samsung-spi-dma",
-};
-
-static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
- unsigned len, dma_addr_t buf)
-{
- struct s3c64xx_spi_driver_data *sdd;
- struct samsung_dma_prep info;
- struct samsung_dma_config config;
-
- if (dma->direction == DMA_DEV_TO_MEM) {
- sdd = container_of((void *)dma,
- struct s3c64xx_spi_driver_data, rx_dma);
- config.direction = sdd->rx_dma.direction;
- config.fifo = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
- config.width = sdd->cur_bpw / 8;
- sdd->ops->config((enum dma_ch)sdd->rx_dma.ch, &config);
- } else {
- sdd = container_of((void *)dma,
- struct s3c64xx_spi_driver_data, tx_dma);
- config.direction = sdd->tx_dma.direction;
- config.fifo = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
- config.width = sdd->cur_bpw / 8;
- sdd->ops->config((enum dma_ch)sdd->tx_dma.ch, &config);
- }
-
- info.cap = DMA_SLAVE;
- info.len = len;
- info.fp = s3c64xx_spi_dmacb;
- info.fp_param = dma;
- info.direction = dma->direction;
- info.buf = buf;
-
- sdd->ops->prepare((enum dma_ch)dma->ch, &info);
- sdd->ops->trigger((enum dma_ch)dma->ch);
-}
-
-static int acquire_dma(struct s3c64xx_spi_driver_data *sdd)
-{
- struct samsung_dma_req req;
- struct device *dev = &sdd->pdev->dev;
-
- sdd->ops = samsung_dma_get_ops();
-
- req.cap = DMA_SLAVE;
- req.client = &s3c64xx_spi_dma_client;
-
- sdd->rx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request(
- sdd->rx_dma.dmach, &req, dev, "rx");
- sdd->tx_dma.ch = (struct dma_chan *)(unsigned long)sdd->ops->request(
- sdd->tx_dma.dmach, &req, dev, "tx");
-
- return 1;
-}
-
-static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
-{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
-
- /*
- * If DMA resource was not available during
- * probe, no need to continue with dma requests
- * else Acquire DMA channels
- */
- while (!is_polling(sdd) && !acquire_dma(sdd))
- usleep_range(10000, 11000);
-
- return 0;
-}
-
-static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
-{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
-
- /* Free DMA channels */
- if (!is_polling(sdd)) {
- sdd->ops->release((enum dma_ch)sdd->rx_dma.ch,
- &s3c64xx_spi_dma_client);
- sdd->ops->release((enum dma_ch)sdd->tx_dma.ch,
- &s3c64xx_spi_dma_client);
- }
-
- return 0;
-}
-
-static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
- struct s3c64xx_spi_dma_data *dma)
-{
- sdd->ops->stop((enum dma_ch)dma->ch);
-}
-#else
-
static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
- unsigned len, dma_addr_t buf)
+ struct sg_table *sgt)
{
struct s3c64xx_spi_driver_data *sdd;
struct dma_slave_config config;
@@ -407,8 +304,8 @@ static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
dmaengine_slave_config(dma->ch, &config);
}
- desc = dmaengine_prep_slave_single(dma->ch, buf, len,
- dma->direction, DMA_PREP_INTERRUPT);
+ desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
+ dma->direction, DMA_PREP_INTERRUPT);
desc->callback = s3c64xx_spi_dmacb;
desc->callback_param = dma;
@@ -437,6 +334,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
ret = -EBUSY;
goto out;
}
+ spi->dma_rx = sdd->rx_dma.ch;
sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
(void *)sdd->tx_dma.dmach, dev, "tx");
@@ -445,6 +343,7 @@ static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
ret = -EBUSY;
goto out_rx;
}
+ spi->dma_tx = sdd->tx_dma.ch;
}
ret = pm_runtime_get_sync(&sdd->pdev->dev);
@@ -477,12 +376,14 @@ static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
return 0;
}
-static void s3c64xx_spi_dma_stop(struct s3c64xx_spi_driver_data *sdd,
- struct s3c64xx_spi_dma_data *dma)
+static bool s3c64xx_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
{
- dmaengine_terminate_all(dma->ch);
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+
+ return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
}
-#endif
static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
struct spi_device *spi,
@@ -515,7 +416,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
chcfg |= S3C64XX_SPI_CH_TXCH_ON;
if (dma_mode) {
modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
- prepare_dma(&sdd->tx_dma, xfer->len, xfer->tx_dma);
+ prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
} else {
switch (sdd->cur_bpw) {
case 32:
@@ -547,7 +448,7 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
| S3C64XX_SPI_PACKET_CNT_EN,
regs + S3C64XX_SPI_PACKET_CNT);
- prepare_dma(&sdd->rx_dma, xfer->len, xfer->rx_dma);
+ prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
}
}
@@ -555,23 +456,6 @@ static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
}
-static inline void enable_cs(struct s3c64xx_spi_driver_data *sdd,
- struct spi_device *spi)
-{
- if (sdd->tgl_spi != NULL) { /* If last device toggled after mssg */
- if (sdd->tgl_spi != spi) { /* if last mssg on diff device */
- /* Deselect the last toggled device */
- if (spi->cs_gpio >= 0)
- gpio_set_value(spi->cs_gpio,
- spi->mode & SPI_CS_HIGH ? 0 : 1);
- }
- sdd->tgl_spi = NULL;
- }
-
- if (spi->cs_gpio >= 0)
- gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 1 : 0);
-}
-
static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
int timeout_ms)
{
@@ -593,112 +477,111 @@ static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
return RX_FIFO_LVL(status, sdd);
}
-static int wait_for_xfer(struct s3c64xx_spi_driver_data *sdd,
- struct spi_transfer *xfer, int dma_mode)
+static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer)
{
void __iomem *regs = sdd->regs;
unsigned long val;
+ u32 status;
int ms;
/* millisecs to xfer 'len' bytes @ 'cur_speed' */
ms = xfer->len * 8 * 1000 / sdd->cur_speed;
ms += 10; /* some tolerance */
- if (dma_mode) {
- val = msecs_to_jiffies(ms) + 10;
- val = wait_for_completion_timeout(&sdd->xfer_completion, val);
- } else {
- u32 status;
- val = msecs_to_loops(ms);
- do {
+ val = msecs_to_jiffies(ms) + 10;
+ val = wait_for_completion_timeout(&sdd->xfer_completion, val);
+
+ /*
+ * If the previous xfer was completed within timeout, then
+ * proceed further else return -EIO.
+ * DmaTx returns after simply writing data in the FIFO,
+ * w/o waiting for real transmission on the bus to finish.
+ * DmaRx returns only after Dma read data from FIFO which
+ * needs bus transmission to finish, so we don't worry if
+ * Xfer involved Rx(with or without Tx).
+ */
+ if (val && !xfer->rx_buf) {
+ val = msecs_to_loops(10);
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ while ((TX_FIFO_LVL(status, sdd)
+ || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
+ && --val) {
+ cpu_relax();
status = readl(regs + S3C64XX_SPI_STATUS);
- } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
+ }
+
}
- if (dma_mode) {
- u32 status;
-
- /*
- * If the previous xfer was completed within timeout, then
- * proceed further else return -EIO.
- * DmaTx returns after simply writing data in the FIFO,
- * w/o waiting for real transmission on the bus to finish.
- * DmaRx returns only after Dma read data from FIFO which
- * needs bus transmission to finish, so we don't worry if
- * Xfer involved Rx(with or without Tx).
- */
- if (val && !xfer->rx_buf) {
- val = msecs_to_loops(10);
- status = readl(regs + S3C64XX_SPI_STATUS);
- while ((TX_FIFO_LVL(status, sdd)
- || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
- && --val) {
- cpu_relax();
- status = readl(regs + S3C64XX_SPI_STATUS);
- }
+ /* If timed out while checking rx/tx status return error */
+ if (!val)
+ return -EIO;
- }
+ return 0;
+}
- /* If timed out while checking rx/tx status return error */
- if (!val)
- return -EIO;
- } else {
- int loops;
- u32 cpy_len;
- u8 *buf;
-
- /* If it was only Tx */
- if (!xfer->rx_buf) {
- sdd->state &= ~TXBUSY;
- return 0;
- }
+static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer)
+{
+ void __iomem *regs = sdd->regs;
+ unsigned long val;
+ u32 status;
+ int loops;
+ u32 cpy_len;
+ u8 *buf;
+ int ms;
- /*
- * If the receive length is bigger than the controller fifo
- * size, calculate the loops and read the fifo as many times.
- * loops = length / max fifo size (calculated by using the
- * fifo mask).
- * For any size less than the fifo size the below code is
- * executed atleast once.
- */
- loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
- buf = xfer->rx_buf;
- do {
- /* wait for data to be received in the fifo */
- cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
- (loops ? ms : 0));
+ /* millisecs to xfer 'len' bytes @ 'cur_speed' */
+ ms = xfer->len * 8 * 1000 / sdd->cur_speed;
+ ms += 10; /* some tolerance */
- switch (sdd->cur_bpw) {
- case 32:
- ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
- buf, cpy_len / 4);
- break;
- case 16:
- ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
- buf, cpy_len / 2);
- break;
- default:
- ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
- buf, cpy_len);
- break;
- }
+ val = msecs_to_loops(ms);
+ do {
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
- buf = buf + cpy_len;
- } while (loops--);
- sdd->state &= ~RXBUSY;
+
+ /* If it was only Tx */
+ if (!xfer->rx_buf) {
+ sdd->state &= ~TXBUSY;
+ return 0;
}
- return 0;
-}
+ /*
+ * If the receive length is bigger than the controller fifo
+ * size, calculate the loops and read the fifo as many times.
+ * loops = length / max fifo size (calculated by using the
+ * fifo mask).
+ * For any size less than the fifo size the below code is
+ * executed atleast once.
+ */
+ loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
+ buf = xfer->rx_buf;
+ do {
+ /* wait for data to be received in the fifo */
+ cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
+ (loops ? ms : 0));
+
+ switch (sdd->cur_bpw) {
+ case 32:
+ ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
+ buf, cpy_len / 4);
+ break;
+ case 16:
+ ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
+ buf, cpy_len / 2);
+ break;
+ default:
+ ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
+ buf, cpy_len);
+ break;
+ }
-static inline void disable_cs(struct s3c64xx_spi_driver_data *sdd,
- struct spi_device *spi)
-{
- if (sdd->tgl_spi == spi)
- sdd->tgl_spi = NULL;
+ buf = buf + cpy_len;
+ } while (loops--);
+ sdd->state &= ~RXBUSY;
- if (spi->cs_gpio >= 0)
- gpio_set_value(spi->cs_gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
+ return 0;
}
static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
@@ -774,81 +657,6 @@ static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
-static int s3c64xx_spi_map_mssg(struct s3c64xx_spi_driver_data *sdd,
- struct spi_message *msg)
-{
- struct device *dev = &sdd->pdev->dev;
- struct spi_transfer *xfer;
-
- if (is_polling(sdd) || msg->is_dma_mapped)
- return 0;
-
- /* First mark all xfer unmapped */
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
- xfer->rx_dma = XFER_DMAADDR_INVALID;
- xfer->tx_dma = XFER_DMAADDR_INVALID;
- }
-
- /* Map until end or first fail */
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-
- if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
- continue;
-
- if (xfer->tx_buf != NULL) {
- xfer->tx_dma = dma_map_single(dev,
- (void *)xfer->tx_buf, xfer->len,
- DMA_TO_DEVICE);
- if (dma_mapping_error(dev, xfer->tx_dma)) {
- dev_err(dev, "dma_map_single Tx failed\n");
- xfer->tx_dma = XFER_DMAADDR_INVALID;
- return -ENOMEM;
- }
- }
-
- if (xfer->rx_buf != NULL) {
- xfer->rx_dma = dma_map_single(dev, xfer->rx_buf,
- xfer->len, DMA_FROM_DEVICE);
- if (dma_mapping_error(dev, xfer->rx_dma)) {
- dev_err(dev, "dma_map_single Rx failed\n");
- dma_unmap_single(dev, xfer->tx_dma,
- xfer->len, DMA_TO_DEVICE);
- xfer->tx_dma = XFER_DMAADDR_INVALID;
- xfer->rx_dma = XFER_DMAADDR_INVALID;
- return -ENOMEM;
- }
- }
- }
-
- return 0;
-}
-
-static void s3c64xx_spi_unmap_mssg(struct s3c64xx_spi_driver_data *sdd,
- struct spi_message *msg)
-{
- struct device *dev = &sdd->pdev->dev;
- struct spi_transfer *xfer;
-
- if (is_polling(sdd) || msg->is_dma_mapped)
- return;
-
- list_for_each_entry(xfer, &msg->transfers, transfer_list) {
-
- if (xfer->len <= ((FIFO_LVL_MASK(sdd) >> 1) + 1))
- continue;
-
- if (xfer->rx_buf != NULL
- && xfer->rx_dma != XFER_DMAADDR_INVALID)
- dma_unmap_single(dev, xfer->rx_dma,
- xfer->len, DMA_FROM_DEVICE);
-
- if (xfer->tx_buf != NULL
- && xfer->tx_dma != XFER_DMAADDR_INVALID)
- dma_unmap_single(dev, xfer->tx_dma,
- xfer->len, DMA_TO_DEVICE);
- }
-}
-
static int s3c64xx_spi_prepare_message(struct spi_master *master,
struct spi_message *msg)
{
@@ -866,13 +674,6 @@ static int s3c64xx_spi_prepare_message(struct spi_master *master,
s3c64xx_spi_config(sdd);
}
- /* Map all the transfers if needed */
- if (s3c64xx_spi_map_mssg(sdd, msg)) {
- dev_err(&spi->dev,
- "Xfer: Unable to map message buffers!\n");
- return -ENOMEM;
- }
-
/* Configure feedback delay */
writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
@@ -896,13 +697,6 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
bpw = xfer->bits_per_word;
speed = xfer->speed_hz ? : spi->max_speed_hz;
- if (xfer->len % (bpw / 8)) {
- dev_err(&spi->dev,
- "Xfer length(%u) not a multiple of word size(%u)\n",
- xfer->len, bpw / 8);
- return -EIO;
- }
-
if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
sdd->cur_bpw = bpw;
sdd->cur_speed = speed;
@@ -929,7 +723,10 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
spin_unlock_irqrestore(&sdd->lock, flags);
- status = wait_for_xfer(sdd, xfer, use_dma);
+ if (use_dma)
+ status = wait_for_dma(sdd, xfer);
+ else
+ status = wait_for_pio(sdd, xfer);
if (status) {
dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
@@ -941,10 +738,10 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
if (use_dma) {
if (xfer->tx_buf != NULL
&& (sdd->state & TXBUSY))
- s3c64xx_spi_dma_stop(sdd, &sdd->tx_dma);
+ dmaengine_terminate_all(sdd->tx_dma.ch);
if (xfer->rx_buf != NULL
&& (sdd->state & RXBUSY))
- s3c64xx_spi_dma_stop(sdd, &sdd->rx_dma);
+ dmaengine_terminate_all(sdd->rx_dma.ch);
}
} else {
flush_fifo(sdd);
@@ -953,16 +750,6 @@ static int s3c64xx_spi_transfer_one(struct spi_master *master,
return status;
}
-static int s3c64xx_spi_unprepare_message(struct spi_master *master,
- struct spi_message *msg)
-{
- struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
-
- s3c64xx_spi_unmap_mssg(sdd, msg);
-
- return 0;
-}
-
static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
struct spi_device *spi)
{
@@ -1092,14 +879,12 @@ static int s3c64xx_spi_setup(struct spi_device *spi)
pm_runtime_put(&sdd->pdev->dev);
writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
- disable_cs(sdd, spi);
return 0;
setup_exit:
pm_runtime_put(&sdd->pdev->dev);
/* setup() returns with device de-selected */
writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
- disable_cs(sdd, spi);
gpio_free(cs->line);
spi_set_ctldata(spi, NULL);
@@ -1338,7 +1123,6 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
master->prepare_message = s3c64xx_spi_prepare_message;
master->transfer_one = s3c64xx_spi_transfer_one;
- master->unprepare_message = s3c64xx_spi_unprepare_message;
master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
master->num_chipselect = sci->num_cs;
master->dma_alignment = 8;
@@ -1347,6 +1131,8 @@ static int s3c64xx_spi_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->auto_runtime_pm = true;
+ if (!is_polling(sdd))
+ master->can_dma = s3c64xx_spi_can_dma;
sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
if (IS_ERR(sdd->regs)) {
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
index 121c2e1dea36..237f2e7a7179 100644
--- a/drivers/spi/spi-sc18is602.c
+++ b/drivers/spi/spi-sc18is602.c
@@ -183,17 +183,9 @@ static int sc18is602_setup_transfer(struct sc18is602 *hw, u32 hz, u8 mode)
static int sc18is602_check_transfer(struct spi_device *spi,
struct spi_transfer *t, int tlen)
{
- uint32_t hz;
-
if (t && t->len + tlen > SC18IS602_BUFSIZ)
return -EINVAL;
- hz = spi->max_speed_hz;
- if (t && t->speed_hz)
- hz = t->speed_hz;
- if (hz == 0)
- return -EINVAL;
-
return 0;
}
@@ -205,22 +197,15 @@ static int sc18is602_transfer_one(struct spi_master *master,
struct spi_transfer *t;
int status = 0;
- /* SC18IS602 does not support CS2 */
- if (hw->id == sc18is602 && spi->chip_select == 2) {
- status = -ENXIO;
- goto error;
- }
-
hw->tlen = 0;
list_for_each_entry(t, &m->transfers, transfer_list) {
- u32 hz = t->speed_hz ? : spi->max_speed_hz;
bool do_transfer;
status = sc18is602_check_transfer(spi, t, hw->tlen);
if (status < 0)
break;
- status = sc18is602_setup_transfer(hw, hz, spi->mode);
+ status = sc18is602_setup_transfer(hw, t->speed_hz, spi->mode);
if (status < 0)
break;
@@ -238,7 +223,6 @@ static int sc18is602_transfer_one(struct spi_master *master,
if (t->delay_usecs)
udelay(t->delay_usecs);
}
-error:
m->status = status;
spi_finalize_current_message(master);
@@ -247,10 +231,13 @@ error:
static int sc18is602_setup(struct spi_device *spi)
{
- if (spi->mode & ~(SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST))
- return -EINVAL;
+ struct sc18is602 *hw = spi_master_get_devdata(spi->master);
- return sc18is602_check_transfer(spi, NULL, 0);
+ /* SC18IS602 does not support CS2 */
+ if (hw->id == sc18is602 && spi->chip_select == 2)
+ return -ENXIO;
+
+ return 0;
}
static int sc18is602_probe(struct i2c_client *client,
@@ -309,6 +296,8 @@ static int sc18is602_probe(struct i2c_client *client,
master->setup = sc18is602_setup;
master->transfer_one_message = sc18is602_transfer_one;
master->dev.of_node = np;
+ master->min_speed_hz = hw->freq / 128;
+ master->max_speed_hz = hw->freq / 4;
error = devm_spi_register_master(dev, master);
if (error)
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
index 82d2f922ffa0..9009456bdf4d 100644
--- a/drivers/spi/spi-sh-hspi.c
+++ b/drivers/spi/spi-sh-hspi.c
@@ -46,8 +46,6 @@
/* SPSR */
#define RXFL (1 << 2)
-#define hspi2info(h) (h->dev->platform_data)
-
struct hspi_priv {
void __iomem *addr;
struct spi_master *master;
@@ -113,14 +111,9 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
{
struct spi_device *spi = msg->spi;
struct device *dev = hspi->dev;
- u32 target_rate;
u32 spcr, idiv_clk;
u32 rate, best_rate, min, tmp;
- target_rate = t ? t->speed_hz : 0;
- if (!target_rate)
- target_rate = spi->max_speed_hz;
-
/*
* find best IDIV/CLKCx settings
*/
@@ -140,7 +133,7 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
rate /= (((idiv_clk & 0x1F) + 1) * 2);
/* save best settings */
- tmp = abs(target_rate - rate);
+ tmp = abs(t->speed_hz - rate);
if (tmp < min) {
min = tmp;
spcr = idiv_clk;
@@ -153,7 +146,7 @@ static void hspi_hw_setup(struct hspi_priv *hspi,
if (spi->mode & SPI_CPOL)
spcr |= 1 << 6;
- dev_dbg(dev, "speed %d/%d\n", target_rate, best_rate);
+ dev_dbg(dev, "speed %d/%d\n", t->speed_hz, best_rate);
hspi_write(hspi, SPCR, spcr);
hspi_write(hspi, SPSR, 0x0);
@@ -230,29 +223,6 @@ static int hspi_transfer_one_message(struct spi_master *master,
return ret;
}
-static int hspi_setup(struct spi_device *spi)
-{
- struct hspi_priv *hspi = spi_master_get_devdata(spi->master);
- struct device *dev = hspi->dev;
-
- if (8 != spi->bits_per_word) {
- dev_err(dev, "bits_per_word should be 8\n");
- return -EIO;
- }
-
- dev_dbg(dev, "%s setup\n", spi->modalias);
-
- return 0;
-}
-
-static void hspi_cleanup(struct spi_device *spi)
-{
- struct hspi_priv *hspi = spi_master_get_devdata(spi->master);
- struct device *dev = hspi->dev;
-
- dev_dbg(dev, "%s cleanup\n", spi->modalias);
-}
-
static int hspi_probe(struct platform_device *pdev)
{
struct resource *res;
@@ -298,22 +268,23 @@ static int hspi_probe(struct platform_device *pdev)
pm_runtime_enable(&pdev->dev);
- master->num_chipselect = 1;
master->bus_num = pdev->id;
- master->setup = hspi_setup;
- master->cleanup = hspi_cleanup;
master->mode_bits = SPI_CPOL | SPI_CPHA;
master->dev.of_node = pdev->dev.of_node;
master->auto_runtime_pm = true;
master->transfer_one_message = hspi_transfer_one_message;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+
ret = devm_spi_register_master(&pdev->dev, master);
if (ret < 0) {
dev_err(&pdev->dev, "spi_register_master error.\n");
- goto error1;
+ goto error2;
}
return 0;
+ error2:
+ pm_runtime_disable(&pdev->dev);
error1:
clk_put(clk);
error0:
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 81cc02f5f9b0..e850d03e7190 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -15,59 +15,108 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/gpio.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/sh_msiof.h>
#include <linux/spi/spi.h>
-#include <linux/spi/spi_bitbang.h>
#include <asm/unaligned.h>
+
+struct sh_msiof_chipdata {
+ u16 tx_fifo_size;
+ u16 rx_fifo_size;
+ u16 master_flags;
+};
+
struct sh_msiof_spi_priv {
- struct spi_bitbang bitbang; /* must be first for spi_bitbang.c */
void __iomem *mapbase;
struct clk *clk;
struct platform_device *pdev;
+ const struct sh_msiof_chipdata *chipdata;
struct sh_msiof_spi_info *info;
struct completion done;
- unsigned long flags;
int tx_fifo_size;
int rx_fifo_size;
};
-#define TMDR1 0x00
-#define TMDR2 0x04
-#define TMDR3 0x08
-#define RMDR1 0x10
-#define RMDR2 0x14
-#define RMDR3 0x18
-#define TSCR 0x20
-#define RSCR 0x22
-#define CTR 0x28
-#define FCTR 0x30
-#define STR 0x40
-#define IER 0x44
-#define TDR1 0x48
-#define TDR2 0x4c
-#define TFDR 0x50
-#define RDR1 0x58
-#define RDR2 0x5c
-#define RFDR 0x60
-
-#define CTR_TSCKE (1 << 15)
-#define CTR_TFSE (1 << 14)
-#define CTR_TXE (1 << 9)
-#define CTR_RXE (1 << 8)
-
-#define STR_TEOF (1 << 23)
-#define STR_REOF (1 << 7)
+#define TMDR1 0x00 /* Transmit Mode Register 1 */
+#define TMDR2 0x04 /* Transmit Mode Register 2 */
+#define TMDR3 0x08 /* Transmit Mode Register 3 */
+#define RMDR1 0x10 /* Receive Mode Register 1 */
+#define RMDR2 0x14 /* Receive Mode Register 2 */
+#define RMDR3 0x18 /* Receive Mode Register 3 */
+#define TSCR 0x20 /* Transmit Clock Select Register */
+#define RSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
+#define CTR 0x28 /* Control Register */
+#define FCTR 0x30 /* FIFO Control Register */
+#define STR 0x40 /* Status Register */
+#define IER 0x44 /* Interrupt Enable Register */
+#define TDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
+#define TDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
+#define TFDR 0x50 /* Transmit FIFO Data Register */
+#define RDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
+#define RDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
+#define RFDR 0x60 /* Receive FIFO Data Register */
+
+/* TMDR1 and RMDR1 */
+#define MDR1_TRMD 0x80000000 /* Transfer Mode (1 = Master mode) */
+#define MDR1_SYNCMD_MASK 0x30000000 /* SYNC Mode */
+#define MDR1_SYNCMD_SPI 0x20000000 /* Level mode/SPI */
+#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */
+#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
+#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
+#define MDR1_FLD_MASK 0x000000c0 /* Frame Sync Signal Interval (0-3) */
+#define MDR1_FLD_SHIFT 2
+#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
+/* TMDR1 */
+#define TMDR1_PCON 0x40000000 /* Transfer Signal Connection */
+
+/* TMDR2 and RMDR2 */
+#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
+#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
+#define MDR2_GRPMASK1 0x00000001 /* Group Output Mask 1 (SH, A1) */
+
+/* TSCR and RSCR */
+#define SCR_BRPS_MASK 0x1f00 /* Prescaler Setting (1-32) */
+#define SCR_BRPS(i) (((i) - 1) << 8)
+#define SCR_BRDV_MASK 0x0007 /* Baud Rate Generator's Division Ratio */
+#define SCR_BRDV_DIV_2 0x0000
+#define SCR_BRDV_DIV_4 0x0001
+#define SCR_BRDV_DIV_8 0x0002
+#define SCR_BRDV_DIV_16 0x0003
+#define SCR_BRDV_DIV_32 0x0004
+#define SCR_BRDV_DIV_1 0x0007
+
+/* CTR */
+#define CTR_TSCKIZ_MASK 0xc0000000 /* Transmit Clock I/O Polarity Select */
+#define CTR_TSCKIZ_SCK 0x80000000 /* Disable SCK when TX disabled */
+#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
+#define CTR_RSCKIZ_MASK 0x30000000 /* Receive Clock Polarity Select */
+#define CTR_RSCKIZ_SCK 0x20000000 /* Must match CTR_TSCKIZ_SCK */
+#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
+#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
+#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
+#define CTR_TXDIZ_MASK 0x00c00000 /* Pin Output When TX is Disabled */
+#define CTR_TXDIZ_LOW 0x00000000 /* 0 */
+#define CTR_TXDIZ_HIGH 0x00400000 /* 1 */
+#define CTR_TXDIZ_HIZ 0x00800000 /* High-impedance */
+#define CTR_TSCKE 0x00008000 /* Transmit Serial Clock Output Enable */
+#define CTR_TFSE 0x00004000 /* Transmit Frame Sync Signal Output Enable */
+#define CTR_TXE 0x00000200 /* Transmit Enable */
+#define CTR_RXE 0x00000100 /* Receive Enable */
+
+/* STR and IER */
+#define STR_TEOF 0x00800000 /* Frame Transmission End */
+#define STR_REOF 0x00000080 /* Frame Reception End */
+
static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
{
@@ -131,22 +180,21 @@ static struct {
unsigned short div;
unsigned short scr;
} const sh_msiof_spi_clk_table[] = {
- { 1, 0x0007 },
- { 2, 0x0000 },
- { 4, 0x0001 },
- { 8, 0x0002 },
- { 16, 0x0003 },
- { 32, 0x0004 },
- { 64, 0x1f00 },
- { 128, 0x1f01 },
- { 256, 0x1f02 },
- { 512, 0x1f03 },
- { 1024, 0x1f04 },
+ { 1, SCR_BRPS( 1) | SCR_BRDV_DIV_1 },
+ { 2, SCR_BRPS( 1) | SCR_BRDV_DIV_2 },
+ { 4, SCR_BRPS( 1) | SCR_BRDV_DIV_4 },
+ { 8, SCR_BRPS( 1) | SCR_BRDV_DIV_8 },
+ { 16, SCR_BRPS( 1) | SCR_BRDV_DIV_16 },
+ { 32, SCR_BRPS( 1) | SCR_BRDV_DIV_32 },
+ { 64, SCR_BRPS(32) | SCR_BRDV_DIV_2 },
+ { 128, SCR_BRPS(32) | SCR_BRDV_DIV_4 },
+ { 256, SCR_BRPS(32) | SCR_BRDV_DIV_8 },
+ { 512, SCR_BRPS(32) | SCR_BRDV_DIV_16 },
+ { 1024, SCR_BRPS(32) | SCR_BRDV_DIV_32 },
};
static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
- unsigned long parent_rate,
- unsigned long spi_hz)
+ unsigned long parent_rate, u32 spi_hz)
{
unsigned long div = 1024;
size_t k;
@@ -164,7 +212,8 @@ static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_clk_table) - 1);
sh_msiof_write(p, TSCR, sh_msiof_spi_clk_table[k].scr);
- sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr);
+ if (!(p->chipdata->master_flags & SPI_MASTER_MUST_TX))
+ sh_msiof_write(p, RSCR, sh_msiof_spi_clk_table[k].scr);
}
static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
@@ -183,21 +232,25 @@ static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
*/
sh_msiof_write(p, FCTR, 0);
- tmp = 0;
- tmp |= !cs_high << 25;
- tmp |= lsb_first << 24;
- sh_msiof_write(p, TMDR1, 0xe0000005 | tmp);
- sh_msiof_write(p, RMDR1, 0x20000005 | tmp);
+ tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
+ tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
+ tmp |= lsb_first << MDR1_BITLSB_SHIFT;
+ sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
+ if (p->chipdata->master_flags & SPI_MASTER_MUST_TX) {
+ /* These bits are reserved if RX needs TX */
+ tmp &= ~0x0000ffff;
+ }
+ sh_msiof_write(p, RMDR1, tmp);
- tmp = 0xa0000000;
- tmp |= cpol << 30; /* TSCKIZ */
- tmp |= cpol << 28; /* RSCKIZ */
+ tmp = 0;
+ tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT;
+ tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT;
edge = cpol ^ !cpha;
- tmp |= edge << 27; /* TEDG */
- tmp |= edge << 26; /* REDG */
- tmp |= (tx_hi_z ? 2 : 0) << 22; /* TXDIZ */
+ tmp |= edge << CTR_TEDG_SHIFT;
+ tmp |= edge << CTR_REDG_SHIFT;
+ tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW;
sh_msiof_write(p, CTR, tmp);
}
@@ -205,12 +258,12 @@ static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
const void *tx_buf, void *rx_buf,
u32 bits, u32 words)
{
- u32 dr2 = ((bits - 1) << 24) | ((words - 1) << 16);
+ u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
- if (tx_buf)
+ if (tx_buf || (p->chipdata->master_flags & SPI_MASTER_MUST_TX))
sh_msiof_write(p, TMDR2, dr2);
else
- sh_msiof_write(p, TMDR2, dr2 | 1);
+ sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
if (rx_buf)
sh_msiof_write(p, RMDR2, dr2);
@@ -363,77 +416,45 @@ static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
}
-static int sh_msiof_spi_bits(struct spi_device *spi, struct spi_transfer *t)
+static int sh_msiof_spi_setup(struct spi_device *spi)
{
- int bits;
-
- bits = t ? t->bits_per_word : 0;
- if (!bits)
- bits = spi->bits_per_word;
- return bits;
-}
-
-static unsigned long sh_msiof_spi_hz(struct spi_device *spi,
- struct spi_transfer *t)
-{
- unsigned long hz;
-
- hz = t ? t->speed_hz : 0;
- if (!hz)
- hz = spi->max_speed_hz;
- return hz;
-}
+ struct device_node *np = spi->master->dev.of_node;
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
-static int sh_msiof_spi_setup_transfer(struct spi_device *spi,
- struct spi_transfer *t)
-{
- int bits;
+ if (!np) {
+ /*
+ * Use spi->controller_data for CS (same strategy as spi_gpio),
+ * if any. otherwise let HW control CS
+ */
+ spi->cs_gpio = (uintptr_t)spi->controller_data;
+ }
- /* noting to check hz values against since parent clock is disabled */
+ /* Configure pins before deasserting CS */
+ sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
+ !!(spi->mode & SPI_CPHA),
+ !!(spi->mode & SPI_3WIRE),
+ !!(spi->mode & SPI_LSB_FIRST),
+ !!(spi->mode & SPI_CS_HIGH));
- bits = sh_msiof_spi_bits(spi, t);
- if (bits < 8)
- return -EINVAL;
- if (bits > 32)
- return -EINVAL;
+ if (spi->cs_gpio >= 0)
+ gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
- return spi_bitbang_setup_transfer(spi, t);
+ return 0;
}
-static void sh_msiof_spi_chipselect(struct spi_device *spi, int is_on)
+static int sh_msiof_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
{
- struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
- int value;
-
- /* chip select is active low unless SPI_CS_HIGH is set */
- if (spi->mode & SPI_CS_HIGH)
- value = (is_on == BITBANG_CS_ACTIVE) ? 1 : 0;
- else
- value = (is_on == BITBANG_CS_ACTIVE) ? 0 : 1;
-
- if (is_on == BITBANG_CS_ACTIVE) {
- if (!test_and_set_bit(0, &p->flags)) {
- pm_runtime_get_sync(&p->pdev->dev);
- clk_enable(p->clk);
- }
-
- /* Configure pins before asserting CS */
- sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
- !!(spi->mode & SPI_CPHA),
- !!(spi->mode & SPI_3WIRE),
- !!(spi->mode & SPI_LSB_FIRST),
- !!(spi->mode & SPI_CS_HIGH));
- }
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
+ const struct spi_device *spi = msg->spi;
- /* use spi->controller data for CS (same strategy as spi_gpio) */
- gpio_set_value((uintptr_t)spi->controller_data, value);
-
- if (is_on == BITBANG_CS_INACTIVE) {
- if (test_and_clear_bit(0, &p->flags)) {
- clk_disable(p->clk);
- pm_runtime_put(&p->pdev->dev);
- }
- }
+ /* Configure pins before asserting CS */
+ sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
+ !!(spi->mode & SPI_CPHA),
+ !!(spi->mode & SPI_3WIRE),
+ !!(spi->mode & SPI_LSB_FIRST),
+ !!(spi->mode & SPI_CS_HIGH));
+ return 0;
}
static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
@@ -487,7 +508,7 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
/* clear status bits */
sh_msiof_reset_str(p);
- /* shut down frame, tx/tx and clock signals */
+ /* shut down frame, rx/tx and clock signals */
ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
ret = ret ? ret : sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
if (rx_buf)
@@ -505,9 +526,11 @@ static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
return ret;
}
-static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
+static int sh_msiof_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
{
- struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
int bits;
@@ -517,7 +540,7 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
int n;
bool swab;
- bits = sh_msiof_spi_bits(spi, t);
+ bits = t->bits_per_word;
if (bits <= 8 && t->len > 15 && !(t->len & 3)) {
bits = 32;
@@ -567,8 +590,7 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
}
/* setup clocks (clock already enabled in chipselect()) */
- sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk),
- sh_msiof_spi_hz(spi, t));
+ sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
/* transfer in fifo sized chunks */
words = t->len / bytes_per_word;
@@ -588,22 +610,36 @@ static int sh_msiof_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
words -= n;
}
- return bytes_done;
-}
-
-static u32 sh_msiof_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
- u32 word, u8 bits)
-{
- BUG(); /* unused but needed by bitbang code */
return 0;
}
+static const struct sh_msiof_chipdata sh_data = {
+ .tx_fifo_size = 64,
+ .rx_fifo_size = 64,
+ .master_flags = 0,
+};
+
+static const struct sh_msiof_chipdata r8a779x_data = {
+ .tx_fifo_size = 64,
+ .rx_fifo_size = 256,
+ .master_flags = SPI_MASTER_MUST_TX,
+};
+
+static const struct of_device_id sh_msiof_match[] = {
+ { .compatible = "renesas,sh-msiof", .data = &sh_data },
+ { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
+ { .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data },
+ { .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sh_msiof_match);
+
#ifdef CONFIG_OF
static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
{
struct sh_msiof_spi_info *info;
struct device_node *np = dev->of_node;
- u32 num_cs = 0;
+ u32 num_cs = 1;
info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL);
if (!info) {
@@ -633,6 +669,7 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
{
struct resource *r;
struct spi_master *master;
+ const struct of_device_id *of_id;
struct sh_msiof_spi_priv *p;
int i;
int ret;
@@ -646,10 +683,15 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
p = spi_master_get_devdata(master);
platform_set_drvdata(pdev, p);
- if (pdev->dev.of_node)
+
+ of_id = of_match_device(sh_msiof_match, &pdev->dev);
+ if (of_id) {
+ p->chipdata = of_id->data;
p->info = sh_msiof_spi_parse_dt(&pdev->dev);
- else
+ } else {
+ p->chipdata = (const void *)pdev->id_entry->driver_data;
p->info = dev_get_platdata(&pdev->dev);
+ }
if (!p->info) {
dev_err(&pdev->dev, "failed to obtain device info\n");
@@ -687,49 +729,40 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
goto err1;
}
- ret = clk_prepare(p->clk);
- if (ret < 0) {
- dev_err(&pdev->dev, "unable to prepare clock\n");
- goto err1;
- }
-
p->pdev = pdev;
pm_runtime_enable(&pdev->dev);
- /* The standard version of MSIOF use 64 word FIFOs */
- p->tx_fifo_size = 64;
- p->rx_fifo_size = 64;
-
/* Platform data may override FIFO sizes */
+ p->tx_fifo_size = p->chipdata->tx_fifo_size;
+ p->rx_fifo_size = p->chipdata->rx_fifo_size;
if (p->info->tx_fifo_override)
p->tx_fifo_size = p->info->tx_fifo_override;
if (p->info->rx_fifo_override)
p->rx_fifo_size = p->info->rx_fifo_override;
- /* init master and bitbang code */
+ /* init master code */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
- master->flags = 0;
+ master->flags = p->chipdata->master_flags;
master->bus_num = pdev->id;
+ master->dev.of_node = pdev->dev.of_node;
master->num_chipselect = p->info->num_chipselect;
- master->setup = spi_bitbang_setup;
- master->cleanup = spi_bitbang_cleanup;
-
- p->bitbang.master = master;
- p->bitbang.chipselect = sh_msiof_spi_chipselect;
- p->bitbang.setup_transfer = sh_msiof_spi_setup_transfer;
- p->bitbang.txrx_bufs = sh_msiof_spi_txrx;
- p->bitbang.txrx_word[SPI_MODE_0] = sh_msiof_spi_txrx_word;
- p->bitbang.txrx_word[SPI_MODE_1] = sh_msiof_spi_txrx_word;
- p->bitbang.txrx_word[SPI_MODE_2] = sh_msiof_spi_txrx_word;
- p->bitbang.txrx_word[SPI_MODE_3] = sh_msiof_spi_txrx_word;
-
- ret = spi_bitbang_start(&p->bitbang);
- if (ret == 0)
- return 0;
+ master->setup = sh_msiof_spi_setup;
+ master->prepare_message = sh_msiof_prepare_message;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ master->auto_runtime_pm = true;
+ master->transfer_one = sh_msiof_transfer_one;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_master error.\n");
+ goto err2;
+ }
+ return 0;
+
+ err2:
pm_runtime_disable(&pdev->dev);
- clk_unprepare(p->clk);
err1:
spi_master_put(master);
return ret;
@@ -737,30 +770,22 @@ static int sh_msiof_spi_probe(struct platform_device *pdev)
static int sh_msiof_spi_remove(struct platform_device *pdev)
{
- struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
- int ret;
-
- ret = spi_bitbang_stop(&p->bitbang);
- if (!ret) {
- pm_runtime_disable(&pdev->dev);
- clk_unprepare(p->clk);
- spi_master_put(p->bitbang.master);
- }
- return ret;
+ pm_runtime_disable(&pdev->dev);
+ return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id sh_msiof_match[] = {
- { .compatible = "renesas,sh-msiof", },
- { .compatible = "renesas,sh-mobile-msiof", },
+static struct platform_device_id spi_driver_ids[] = {
+ { "spi_sh_msiof", (kernel_ulong_t)&sh_data },
+ { "spi_r8a7790_msiof", (kernel_ulong_t)&r8a779x_data },
+ { "spi_r8a7791_msiof", (kernel_ulong_t)&r8a779x_data },
{},
};
-MODULE_DEVICE_TABLE(of, sh_msiof_match);
-#endif
+MODULE_DEVICE_TABLE(platform, spi_driver_ids);
static struct platform_driver sh_msiof_spi_drv = {
.probe = sh_msiof_spi_probe,
.remove = sh_msiof_spi_remove,
+ .id_table = spi_driver_ids,
.driver = {
.name = "spi_sh_msiof",
.owner = THIS_MODULE,
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
index 38eb24df796c..8b44b71f5024 100644
--- a/drivers/spi/spi-sh-sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -14,7 +14,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
@@ -109,7 +108,7 @@ static void sh_sci_spi_chipselect(struct spi_device *dev, int value)
{
struct sh_sci_spi *sp = spi_master_get_devdata(dev->master);
- if (sp->info && sp->info->chip_select)
+ if (sp->info->chip_select)
(sp->info->chip_select)(sp->info, dev->chip_select, value);
}
@@ -131,6 +130,11 @@ static int sh_sci_spi_probe(struct platform_device *dev)
platform_set_drvdata(dev, sp);
sp->info = dev_get_platdata(&dev->dev);
+ if (!sp->info) {
+ dev_err(&dev->dev, "platform data is missing\n");
+ ret = -ENOENT;
+ goto err1;
+ }
/* setup spi bitbang adaptor */
sp->bitbang.master = master;
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
index 413c71843492..6be661e4c6e1 100644
--- a/drivers/spi/spi-tegra114.c
+++ b/drivers/spi/spi-tegra114.c
@@ -23,7 +23,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -172,7 +171,6 @@ struct tegra_spi_data {
void __iomem *base;
phys_addr_t phys;
unsigned irq;
- u32 spi_max_frequency;
u32 cur_speed;
struct spi_device *cur_spi;
@@ -761,11 +759,6 @@ static int tegra_spi_setup(struct spi_device *spi)
spi->mode & SPI_CPHA ? "" : "~",
spi->max_speed_hz);
- BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
-
- /* Set speed to the spi max fequency if spi device has not set */
- spi->max_speed_hz = spi->max_speed_hz ? : tspi->spi_max_frequency;
-
ret = pm_runtime_get_sync(tspi->dev);
if (ret < 0) {
dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
@@ -1019,16 +1012,6 @@ static irqreturn_t tegra_spi_isr(int irq, void *context_data)
return IRQ_WAKE_THREAD;
}
-static void tegra_spi_parse_dt(struct platform_device *pdev,
- struct tegra_spi_data *tspi)
-{
- struct device_node *np = pdev->dev.of_node;
-
- if (of_property_read_u32(np, "spi-max-frequency",
- &tspi->spi_max_frequency))
- tspi->spi_max_frequency = 25000000; /* 25MHz */
-}
-
static struct of_device_id tegra_spi_of_match[] = {
{ .compatible = "nvidia,tegra114-spi", },
{}
@@ -1050,15 +1033,15 @@ static int tegra_spi_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, master);
tspi = spi_master_get_devdata(master);
- /* Parse DT */
- tegra_spi_parse_dt(pdev, tspi);
+ if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
+ &master->max_speed_hz))
+ master->max_speed_hz = 25000000; /* 25MHz */
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
master->setup = tegra_spi_setup;
master->transfer_one_message = tegra_spi_transfer_one_message;
master->num_chipselect = MAX_CHIP_SELECT;
- master->bus_num = -1;
master->auto_runtime_pm = true;
tspi->master = master;
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
index 08794977f21a..47869ea636e1 100644
--- a/drivers/spi/spi-tegra20-sflash.c
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -22,7 +22,6 @@
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -121,7 +120,6 @@ struct tegra_sflash_data {
struct reset_control *rst;
void __iomem *base;
unsigned irq;
- u32 spi_max_frequency;
u32 cur_speed;
struct spi_device *cur_spi;
@@ -315,15 +313,6 @@ static int tegra_sflash_start_transfer_one(struct spi_device *spi,
return tegra_sflash_start_cpu_based_transfer(tsd, t);
}
-static int tegra_sflash_setup(struct spi_device *spi)
-{
- struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master);
-
- /* Set speed to the spi max fequency if spi device has not set */
- spi->max_speed_hz = spi->max_speed_hz ? : tsd->spi_max_frequency;
- return 0;
-}
-
static int tegra_sflash_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
@@ -430,15 +419,6 @@ static irqreturn_t tegra_sflash_isr(int irq, void *context_data)
return handle_cpu_based_xfer(tsd);
}
-static void tegra_sflash_parse_dt(struct tegra_sflash_data *tsd)
-{
- struct device_node *np = tsd->dev->of_node;
-
- if (of_property_read_u32(np, "spi-max-frequency",
- &tsd->spi_max_frequency))
- tsd->spi_max_frequency = 25000000; /* 25MHz */
-}
-
static struct of_device_id tegra_sflash_of_match[] = {
{ .compatible = "nvidia,tegra20-sflash", },
{}
@@ -467,11 +447,9 @@ static int tegra_sflash_probe(struct platform_device *pdev)
/* the spi->mode bits understood by this driver: */
master->mode_bits = SPI_CPOL | SPI_CPHA;
- master->setup = tegra_sflash_setup;
master->transfer_one_message = tegra_sflash_transfer_one_message;
master->auto_runtime_pm = true;
master->num_chipselect = MAX_CHIP_SELECT;
- master->bus_num = -1;
platform_set_drvdata(pdev, master);
tsd = spi_master_get_devdata(master);
@@ -479,7 +457,9 @@ static int tegra_sflash_probe(struct platform_device *pdev)
tsd->dev = &pdev->dev;
spin_lock_init(&tsd->lock);
- tegra_sflash_parse_dt(tsd);
+ if (of_property_read_u32(tsd->dev->of_node, "spi-max-frequency",
+ &master->max_speed_hz))
+ master->max_speed_hz = 25000000; /* 25MHz */
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
tsd->base = devm_ioremap_resource(&pdev->dev, r);
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index be3a069879c3..e3c1b93e45d1 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -23,7 +23,6 @@
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/err.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
@@ -171,7 +170,6 @@ struct tegra_slink_data {
void __iomem *base;
phys_addr_t phys;
unsigned irq;
- u32 spi_max_frequency;
u32 cur_speed;
struct spi_device *cur_spi;
@@ -761,10 +759,6 @@ static int tegra_slink_setup(struct spi_device *spi)
spi->mode & SPI_CPHA ? "" : "~",
spi->max_speed_hz);
- BUG_ON(spi->chip_select >= MAX_CHIP_SELECT);
-
- /* Set speed to the spi max fequency if spi device has not set */
- spi->max_speed_hz = spi->max_speed_hz ? : tspi->spi_max_frequency;
ret = pm_runtime_get_sync(tspi->dev);
if (ret < 0) {
dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
@@ -999,15 +993,6 @@ static irqreturn_t tegra_slink_isr(int irq, void *context_data)
return IRQ_WAKE_THREAD;
}
-static void tegra_slink_parse_dt(struct tegra_slink_data *tspi)
-{
- struct device_node *np = tspi->dev->of_node;
-
- if (of_property_read_u32(np, "spi-max-frequency",
- &tspi->spi_max_frequency))
- tspi->spi_max_frequency = 25000000; /* 25MHz */
-}
-
static const struct tegra_slink_chip_data tegra30_spi_cdata = {
.cs_hold_time = true,
};
@@ -1053,7 +1038,6 @@ static int tegra_slink_probe(struct platform_device *pdev)
master->unprepare_message = tegra_slink_unprepare_message;
master->auto_runtime_pm = true;
master->num_chipselect = MAX_CHIP_SELECT;
- master->bus_num = -1;
platform_set_drvdata(pdev, master);
tspi = spi_master_get_devdata(master);
@@ -1062,7 +1046,9 @@ static int tegra_slink_probe(struct platform_device *pdev)
tspi->chip_data = cdata;
spin_lock_init(&tspi->lock);
- tegra_slink_parse_dt(tspi);
+ if (of_property_read_u32(tspi->dev->of_node, "spi-max-frequency",
+ &master->max_speed_hz))
+ master->max_speed_hz = 25000000; /* 25MHz */
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
index 3d09265b5133..49ddfc7f12b1 100644
--- a/drivers/spi/spi-ti-qspi.c
+++ b/drivers/spi/spi-ti-qspi.c
@@ -429,13 +429,13 @@ static int ti_qspi_probe(struct platform_device *pdev)
master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
- master->bus_num = -1;
master->flags = SPI_MASTER_HALF_DUPLEX;
master->setup = ti_qspi_setup;
master->auto_runtime_pm = true;
master->transfer_one_message = ti_qspi_start_transfer_one;
master->dev.of_node = pdev->dev.of_node;
- master->bits_per_word_mask = BIT(32 - 1) | BIT(16 - 1) | BIT(8 - 1);
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
if (!of_property_read_u32(np, "num-cs", &num_cs))
master->num_chipselect = num_cs;
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
index 2e7f38c7a961..88eb57e858b3 100644
--- a/drivers/spi/spi-topcliff-pch.c
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -915,7 +915,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
/* Set Tx DMA */
param = &dma->param_tx;
param->dma_dev = &dma_dev->dev;
- param->chan_id = data->master->bus_num * 2; /* Tx = 0, 2 */
+ param->chan_id = data->ch * 2; /* Tx = 0, 2 */;
param->tx_reg = data->io_base_addr + PCH_SPDWR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -930,7 +930,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
/* Set Rx DMA */
param = &dma->param_rx;
param->dma_dev = &dma_dev->dev;
- param->chan_id = data->master->bus_num * 2 + 1; /* Rx = Tx + 1 */
+ param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */;
param->rx_reg = data->io_base_addr + PCH_SPDRR;
param->width = width;
chan = dma_request_channel(mask, pch_spi_filter, param);
@@ -1452,6 +1452,11 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
pch_spi_set_master_mode(master);
+ if (use_dma) {
+ dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
+ pch_alloc_dma_buf(board_dat, data);
+ }
+
ret = spi_register_master(master);
if (ret != 0) {
dev_err(&plat_dev->dev,
@@ -1459,14 +1464,10 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev)
goto err_spi_register_master;
}
- if (use_dma) {
- dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
- pch_alloc_dma_buf(board_dat, data);
- }
-
return 0;
err_spi_register_master:
+ pch_free_dma_buf(board_dat, data);
free_irq(board_dat->pdev->irq, data);
err_request_irq:
pch_spi_free_resources(board_dat, data);
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
index 24c40b13dab1..350a76b7e8d4 100644
--- a/drivers/spi/spi-xcomm.c
+++ b/drivers/spi/spi-xcomm.c
@@ -8,7 +8,6 @@
*/
#include <linux/kernel.h>
-#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/i2c.h>
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
index 6d4ce4615163..e6cd1112ae40 100644
--- a/drivers/spi/spi-xilinx.c
+++ b/drivers/spi/spi-xilinx.c
@@ -14,7 +14,6 @@
*/
#include <linux/module.h>
-#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/of.h>
#include <linux/platform_device.h>
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 23756b0f9036..ffc1a2ebc4ca 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -24,6 +24,8 @@
#include <linux/device.h>
#include <linux/init.h>
#include <linux/cache.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
#include <linux/mutex.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
@@ -255,13 +257,12 @@ EXPORT_SYMBOL_GPL(spi_bus_type);
static int spi_drv_probe(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
- struct spi_device *spi = to_spi_device(dev);
int ret;
- acpi_dev_pm_attach(&spi->dev, true);
- ret = sdrv->probe(spi);
+ acpi_dev_pm_attach(dev, true);
+ ret = sdrv->probe(to_spi_device(dev));
if (ret)
- acpi_dev_pm_detach(&spi->dev, true);
+ acpi_dev_pm_detach(dev, true);
return ret;
}
@@ -269,11 +270,10 @@ static int spi_drv_probe(struct device *dev)
static int spi_drv_remove(struct device *dev)
{
const struct spi_driver *sdrv = to_spi_driver(dev->driver);
- struct spi_device *spi = to_spi_device(dev);
int ret;
- ret = sdrv->remove(spi);
- acpi_dev_pm_detach(&spi->dev, true);
+ ret = sdrv->remove(to_spi_device(dev));
+ acpi_dev_pm_detach(dev, true);
return ret;
}
@@ -580,6 +580,169 @@ static void spi_set_cs(struct spi_device *spi, bool enable)
spi->master->set_cs(spi, !enable);
}
+static int spi_map_buf(struct spi_master *master, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
+ enum dma_data_direction dir)
+{
+ const bool vmalloced_buf = is_vmalloc_addr(buf);
+ const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len;
+ const int sgs = DIV_ROUND_UP(len, desc_len);
+ struct page *vm_page;
+ void *sg_buf;
+ size_t min;
+ int i, ret;
+
+ ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
+ if (ret != 0)
+ return ret;
+
+ for (i = 0; i < sgs; i++) {
+ min = min_t(size_t, len, desc_len);
+
+ if (vmalloced_buf) {
+ vm_page = vmalloc_to_page(buf);
+ if (!vm_page) {
+ sg_free_table(sgt);
+ return -ENOMEM;
+ }
+ sg_buf = page_address(vm_page) +
+ ((size_t)buf & ~PAGE_MASK);
+ } else {
+ sg_buf = buf;
+ }
+
+ sg_set_buf(&sgt->sgl[i], sg_buf, min);
+
+ buf += min;
+ len -= min;
+ }
+
+ ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
+ if (ret < 0) {
+ sg_free_table(sgt);
+ return ret;
+ }
+
+ sgt->nents = ret;
+
+ return 0;
+}
+
+static void spi_unmap_buf(struct spi_master *master, struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ if (sgt->orig_nents) {
+ dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
+ sg_free_table(sgt);
+ }
+}
+
+static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
+{
+ struct device *tx_dev, *rx_dev;
+ struct spi_transfer *xfer;
+ void *tmp;
+ unsigned int max_tx, max_rx;
+ int ret;
+
+ if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
+ max_tx = 0;
+ max_rx = 0;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if ((master->flags & SPI_MASTER_MUST_TX) &&
+ !xfer->tx_buf)
+ max_tx = max(xfer->len, max_tx);
+ if ((master->flags & SPI_MASTER_MUST_RX) &&
+ !xfer->rx_buf)
+ max_rx = max(xfer->len, max_rx);
+ }
+
+ if (max_tx) {
+ tmp = krealloc(master->dummy_tx, max_tx,
+ GFP_KERNEL | GFP_DMA);
+ if (!tmp)
+ return -ENOMEM;
+ master->dummy_tx = tmp;
+ memset(tmp, 0, max_tx);
+ }
+
+ if (max_rx) {
+ tmp = krealloc(master->dummy_rx, max_rx,
+ GFP_KERNEL | GFP_DMA);
+ if (!tmp)
+ return -ENOMEM;
+ master->dummy_rx = tmp;
+ }
+
+ if (max_tx || max_rx) {
+ list_for_each_entry(xfer, &msg->transfers,
+ transfer_list) {
+ if (!xfer->tx_buf)
+ xfer->tx_buf = master->dummy_tx;
+ if (!xfer->rx_buf)
+ xfer->rx_buf = master->dummy_rx;
+ }
+ }
+ }
+
+ if (!master->can_dma)
+ return 0;
+
+ tx_dev = &master->dma_tx->dev->device;
+ rx_dev = &master->dma_rx->dev->device;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!master->can_dma(master, msg->spi, xfer))
+ continue;
+
+ if (xfer->tx_buf != NULL) {
+ ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
+ (void *)xfer->tx_buf, xfer->len,
+ DMA_TO_DEVICE);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (xfer->rx_buf != NULL) {
+ ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
+ xfer->rx_buf, xfer->len,
+ DMA_FROM_DEVICE);
+ if (ret != 0) {
+ spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
+ DMA_TO_DEVICE);
+ return ret;
+ }
+ }
+ }
+
+ master->cur_msg_mapped = true;
+
+ return 0;
+}
+
+static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ struct device *tx_dev, *rx_dev;
+
+ if (!master->cur_msg_mapped || !master->can_dma)
+ return 0;
+
+ tx_dev = &master->dma_tx->dev->device;
+ rx_dev = &master->dma_rx->dev->device;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!master->can_dma(master, msg->spi, xfer))
+ continue;
+
+ spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+ spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ }
+
+ return 0;
+}
+
/*
* spi_transfer_one_message - Default implementation of transfer_one_message()
*
@@ -591,7 +754,6 @@ static int spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct spi_transfer *xfer;
- bool cur_cs = true;
bool keep_cs = false;
int ret = 0;
@@ -627,8 +789,9 @@ static int spi_transfer_one_message(struct spi_master *master,
&msg->transfers)) {
keep_cs = true;
} else {
- cur_cs = !cur_cs;
- spi_set_cs(msg->spi, cur_cs);
+ spi_set_cs(msg->spi, false);
+ udelay(10);
+ spi_set_cs(msg->spi, true);
}
}
@@ -686,6 +849,10 @@ static void spi_pump_messages(struct kthread_work *work)
}
master->busy = false;
spin_unlock_irqrestore(&master->queue_lock, flags);
+ kfree(master->dummy_rx);
+ master->dummy_rx = NULL;
+ kfree(master->dummy_tx);
+ master->dummy_tx = NULL;
if (master->unprepare_transfer_hardware &&
master->unprepare_transfer_hardware(master))
dev_err(&master->dev,
@@ -752,14 +919,19 @@ static void spi_pump_messages(struct kthread_work *work)
master->cur_msg_prepared = true;
}
- ret = master->transfer_one_message(master, master->cur_msg);
+ ret = spi_map_msg(master, master->cur_msg);
if (ret) {
- dev_err(&master->dev,
- "failed to transfer one message from queue: %d\n", ret);
master->cur_msg->status = ret;
spi_finalize_current_message(master);
return;
}
+
+ ret = master->transfer_one_message(master, master->cur_msg);
+ if (ret) {
+ dev_err(&master->dev,
+ "failed to transfer one message from queue\n");
+ return;
+ }
}
static int spi_init_queue(struct spi_master *master)
@@ -841,6 +1013,8 @@ void spi_finalize_current_message(struct spi_master *master)
queue_kthread_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
+ spi_unmap_msg(master, mesg);
+
if (master->cur_msg_prepared && master->unprepare_message) {
ret = master->unprepare_message(master, mesg);
if (ret) {
@@ -894,7 +1068,7 @@ static int spi_stop_queue(struct spi_master *master)
*/
while ((!list_empty(&master->queue) || master->busy) && limit--) {
spin_unlock_irqrestore(&master->queue_lock, flags);
- msleep(10);
+ usleep_range(10000, 11000);
spin_lock_irqsave(&master->queue_lock, flags);
}
@@ -1374,6 +1548,8 @@ int spi_register_master(struct spi_master *master)
mutex_init(&master->bus_lock_mutex);
master->bus_lock_flag = 0;
init_completion(&master->xfer_completion);
+ if (!master->max_dma_len)
+ master->max_dma_len = INT_MAX;
/* register the device, then userspace will see it.
* registration fails if the bus ID is in use.
@@ -1599,6 +1775,9 @@ int spi_setup(struct spi_device *spi)
if (!spi->bits_per_word)
spi->bits_per_word = 8;
+ if (!spi->max_speed_hz)
+ spi->max_speed_hz = spi->master->max_speed_hz;
+
if (spi->master->setup)
status = spi->master->setup(spi);
@@ -1619,11 +1798,10 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
{
struct spi_master *master = spi->master;
struct spi_transfer *xfer;
+ int w_size;
if (list_empty(&message->transfers))
return -EINVAL;
- if (!message->complete)
- return -EINVAL;
/* Half-duplex links include original MicroWire, and ones with
* only one data pin like SPI_3WIRE (switches direction) or where
@@ -1654,12 +1832,13 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
message->frame_length += xfer->len;
if (!xfer->bits_per_word)
xfer->bits_per_word = spi->bits_per_word;
- if (!xfer->speed_hz) {
+
+ if (!xfer->speed_hz)
xfer->speed_hz = spi->max_speed_hz;
- if (master->max_speed_hz &&
- xfer->speed_hz > master->max_speed_hz)
- xfer->speed_hz = master->max_speed_hz;
- }
+
+ if (master->max_speed_hz &&
+ xfer->speed_hz > master->max_speed_hz)
+ xfer->speed_hz = master->max_speed_hz;
if (master->bits_per_word_mask) {
/* Only 32 bits fit in the mask */
@@ -1670,12 +1849,24 @@ static int __spi_validate(struct spi_device *spi, struct spi_message *message)
return -EINVAL;
}
+ /*
+ * SPI transfer length should be multiple of SPI word size
+ * where SPI word size should be power-of-two multiple
+ */
+ if (xfer->bits_per_word <= 8)
+ w_size = 1;
+ else if (xfer->bits_per_word <= 16)
+ w_size = 2;
+ else
+ w_size = 4;
+
+ /* No partial transfers accepted */
+ if (xfer->len % w_size)
+ return -EINVAL;
+
if (xfer->speed_hz && master->min_speed_hz &&
xfer->speed_hz < master->min_speed_hz)
return -EINVAL;
- if (xfer->speed_hz && master->max_speed_hz &&
- xfer->speed_hz > master->max_speed_hz)
- return -EINVAL;
if (xfer->tx_buf && !xfer->tx_nbits)
xfer->tx_nbits = SPI_NBITS_SINGLE;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index d7c6e36021e8..e3bc23bb5883 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -73,7 +73,8 @@ static DECLARE_BITMAP(minors, N_SPI_MINORS);
*/
#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
| SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
- | SPI_NO_CS | SPI_READY)
+ | SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
+ | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)
struct spidev_data {
dev_t devt;
@@ -265,6 +266,8 @@ static int spidev_message(struct spidev_data *spidev,
buf += k_tmp->len;
k_tmp->cs_change = !!u_tmp->cs_change;
+ k_tmp->tx_nbits = u_tmp->tx_nbits;
+ k_tmp->rx_nbits = u_tmp->rx_nbits;
k_tmp->bits_per_word = u_tmp->bits_per_word;
k_tmp->delay_usecs = u_tmp->delay_usecs;
k_tmp->speed_hz = u_tmp->speed_hz;
@@ -359,6 +362,10 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
retval = __put_user(spi->mode & SPI_MODE_MASK,
(__u8 __user *)arg);
break;
+ case SPI_IOC_RD_MODE32:
+ retval = __put_user(spi->mode & SPI_MODE_MASK,
+ (__u32 __user *)arg);
+ break;
case SPI_IOC_RD_LSB_FIRST:
retval = __put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0,
(__u8 __user *)arg);
@@ -372,9 +379,13 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
/* write requests */
case SPI_IOC_WR_MODE:
- retval = __get_user(tmp, (u8 __user *)arg);
+ case SPI_IOC_WR_MODE32:
+ if (cmd == SPI_IOC_WR_MODE)
+ retval = __get_user(tmp, (u8 __user *)arg);
+ else
+ retval = __get_user(tmp, (u32 __user *)arg);
if (retval == 0) {
- u8 save = spi->mode;
+ u32 save = spi->mode;
if (tmp & ~SPI_MODE_MASK) {
retval = -EINVAL;
@@ -382,18 +393,18 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
tmp |= spi->mode & ~SPI_MODE_MASK;
- spi->mode = (u8)tmp;
+ spi->mode = (u16)tmp;
retval = spi_setup(spi);
if (retval < 0)
spi->mode = save;
else
- dev_dbg(&spi->dev, "spi mode %02x\n", tmp);
+ dev_dbg(&spi->dev, "spi mode %x\n", tmp);
}
break;
case SPI_IOC_WR_LSB_FIRST:
retval = __get_user(tmp, (__u8 __user *)arg);
if (retval == 0) {
- u8 save = spi->mode;
+ u32 save = spi->mode;
if (tmp)
spi->mode |= SPI_LSB_FIRST;
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 23948f167012..713a97226787 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -295,21 +295,29 @@ static ssize_t ashmem_read(struct file *file, char __user *buf,
/* If size is not set, or set to 0, always return EOF. */
if (asma->size == 0)
- goto out;
+ goto out_unlock;
if (!asma->file) {
ret = -EBADF;
- goto out;
+ goto out_unlock;
}
- ret = asma->file->f_op->read(asma->file, buf, len, pos);
- if (ret < 0)
- goto out;
+ mutex_unlock(&ashmem_mutex);
- /** Update backing file pos, since f_ops->read() doesn't */
- asma->file->f_pos = *pos;
+ /*
+ * asma and asma->file are used outside the lock here. We assume
+ * once asma->file is set it will never be changed, and will not
+ * be destroyed until all references to the file are dropped and
+ * ashmem_release is called.
+ */
+ ret = asma->file->f_op->read(asma->file, buf, len, pos);
+ if (ret >= 0) {
+ /** Update backing file pos, since f_ops->read() doesn't */
+ asma->file->f_pos = *pos;
+ }
+ return ret;
-out:
+out_unlock:
mutex_unlock(&ashmem_mutex);
return ret;
}
@@ -498,6 +506,7 @@ out:
static int set_name(struct ashmem_area *asma, void __user *name)
{
+ int len;
int ret = 0;
char local_name[ASHMEM_NAME_LEN];
@@ -510,21 +519,19 @@ static int set_name(struct ashmem_area *asma, void __user *name)
* variable that does not need protection and later copy the local
* variable to the structure member with lock held.
*/
- if (copy_from_user(local_name, name, ASHMEM_NAME_LEN))
- return -EFAULT;
-
+ len = strncpy_from_user(local_name, name, ASHMEM_NAME_LEN);
+ if (len < 0)
+ return len;
+ if (len == ASHMEM_NAME_LEN)
+ local_name[ASHMEM_NAME_LEN - 1] = '\0';
mutex_lock(&ashmem_mutex);
/* cannot change an existing mapping's name */
- if (unlikely(asma->file)) {
+ if (unlikely(asma->file))
ret = -EINVAL;
- goto out;
- }
- memcpy(asma->name + ASHMEM_NAME_PREFIX_LEN,
- local_name, ASHMEM_NAME_LEN);
- asma->name[ASHMEM_FULL_NAME_LEN-1] = '\0';
-out:
- mutex_unlock(&ashmem_mutex);
+ else
+ strcpy(asma->name + ASHMEM_NAME_PREFIX_LEN, local_name);
+ mutex_unlock(&ashmem_mutex);
return ret;
}
diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c
index eaec1dab7fe4..1432d956769c 100644
--- a/drivers/staging/android/binder.c
+++ b/drivers/staging/android/binder.c
@@ -2904,7 +2904,7 @@ static int binder_node_release(struct binder_node *node, int refs)
refs++;
if (!ref->death)
- goto out;
+ continue;
death++;
@@ -2917,7 +2917,6 @@ static int binder_node_release(struct binder_node *node, int refs)
BUG();
}
-out:
binder_debug(BINDER_DEBUG_DEAD_BINDER,
"node %d now dead, refs %d, death %d\n",
node->debug_id, refs, death);
diff --git a/drivers/staging/android/ion/compat_ion.c b/drivers/staging/android/ion/compat_ion.c
index af6cd370b30f..ee3a7380e53b 100644
--- a/drivers/staging/android/ion/compat_ion.c
+++ b/drivers/staging/android/ion/compat_ion.c
@@ -35,9 +35,14 @@ struct compat_ion_custom_data {
compat_ulong_t arg;
};
+struct compat_ion_handle_data {
+ compat_int_t handle;
+};
+
#define COMPAT_ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
struct compat_ion_allocation_data)
-#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
+#define COMPAT_ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, \
+ struct compat_ion_handle_data)
#define COMPAT_ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, \
struct compat_ion_custom_data)
@@ -64,6 +69,19 @@ static int compat_get_ion_allocation_data(
return err;
}
+static int compat_get_ion_handle_data(
+ struct compat_ion_handle_data __user *data32,
+ struct ion_handle_data __user *data)
+{
+ compat_int_t i;
+ int err;
+
+ err = get_user(i, &data32->handle);
+ err |= put_user(i, &data->handle);
+
+ return err;
+}
+
static int compat_put_ion_allocation_data(
struct compat_ion_allocation_data __user *data32,
struct ion_allocation_data __user *data)
@@ -132,8 +150,8 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
}
case COMPAT_ION_IOC_FREE:
{
- struct compat_ion_allocation_data __user *data32;
- struct ion_allocation_data __user *data;
+ struct compat_ion_handle_data __user *data32;
+ struct ion_handle_data __user *data;
int err;
data32 = compat_ptr(arg);
@@ -141,7 +159,7 @@ long compat_ion_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
if (data == NULL)
return -EFAULT;
- err = compat_get_ion_allocation_data(data32, data);
+ err = compat_get_ion_handle_data(data32, data);
if (err)
return err;
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index 55b2002753f2..01cdc8aee898 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -17,9 +17,11 @@
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
+#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/sizes.h>
+#include <linux/io.h>
#include "ion.h"
#include "ion_priv.h"
@@ -57,7 +59,7 @@ struct ion_platform_heap dummy_heaps[] = {
};
struct ion_platform_data dummy_ion_pdata = {
- .nr = 4,
+ .nr = ARRAY_SIZE(dummy_heaps),
.heaps = dummy_heaps,
};
@@ -69,7 +71,7 @@ static int __init ion_dummy_init(void)
heaps = kzalloc(sizeof(struct ion_heap *) * dummy_ion_pdata.nr,
GFP_KERNEL);
if (!heaps)
- return PTR_ERR(heaps);
+ return -ENOMEM;
/* Allocate a dummy carveout heap */
@@ -128,6 +130,7 @@ err:
}
return err;
}
+device_initcall(ion_dummy_init);
static void __exit ion_dummy_exit(void)
{
@@ -152,7 +155,4 @@ static void __exit ion_dummy_exit(void)
return;
}
-
-module_init(ion_dummy_init);
-module_exit(ion_dummy_exit);
-
+__exitcall(ion_dummy_exit);
diff --git a/drivers/staging/android/ion/ion_heap.c b/drivers/staging/android/ion/ion_heap.c
index 296c74f98dc0..37e64d51394c 100644
--- a/drivers/staging/android/ion/ion_heap.c
+++ b/drivers/staging/android/ion/ion_heap.c
@@ -243,12 +243,12 @@ int ion_heap_init_deferred_free(struct ion_heap *heap)
init_waitqueue_head(&heap->waitqueue);
heap->task = kthread_run(ion_heap_deferred_free, heap,
"%s", heap->name);
- sched_setscheduler(heap->task, SCHED_IDLE, &param);
if (IS_ERR(heap->task)) {
pr_err("%s: creating thread for deferred free failed\n",
__func__);
return PTR_RET(heap->task);
}
+ sched_setscheduler(heap->task, SCHED_IDLE, &param);
return 0;
}
diff --git a/drivers/staging/android/ion/ion_priv.h b/drivers/staging/android/ion/ion_priv.h
index d98673981cc4..fc2e4fccf69d 100644
--- a/drivers/staging/android/ion/ion_priv.h
+++ b/drivers/staging/android/ion/ion_priv.h
@@ -17,6 +17,7 @@
#ifndef _ION_PRIV_H
#define _ION_PRIV_H
+#include <linux/device.h>
#include <linux/dma-direction.h>
#include <linux/kref.h>
#include <linux/mm_types.h>
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 7f0729130d65..9849f3963e75 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -124,6 +124,7 @@ static struct page_info *alloc_largest_available(struct ion_system_heap *heap,
info->page = page;
info->order = orders[i];
+ INIT_LIST_HEAD(&info->list);
return info;
}
kfree(info);
@@ -145,12 +146,15 @@ static int ion_system_heap_allocate(struct ion_heap *heap,
struct list_head pages;
struct page_info *info, *tmp_info;
int i = 0;
- long size_remaining = PAGE_ALIGN(size);
+ unsigned long size_remaining = PAGE_ALIGN(size);
unsigned int max_order = orders[0];
if (align > PAGE_SIZE)
return -EINVAL;
+ if (size / PAGE_SIZE > totalram_pages / 2)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&pages);
while (size_remaining > 0) {
info = alloc_largest_available(sys_heap, buffer, size_remaining,
diff --git a/drivers/staging/android/sw_sync.h b/drivers/staging/android/sw_sync.h
index 585040be5f18..5aaf71d6974b 100644
--- a/drivers/staging/android/sw_sync.h
+++ b/drivers/staging/android/sw_sync.h
@@ -35,10 +35,27 @@ struct sw_sync_pt {
u32 value;
};
+#if IS_ENABLED(CONFIG_SW_SYNC)
struct sw_sync_timeline *sw_sync_timeline_create(const char *name);
void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc);
struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj, u32 value);
+#else
+static inline struct sw_sync_timeline *sw_sync_timeline_create(const char *name)
+{
+ return NULL;
+}
+
+static inline void sw_sync_timeline_inc(struct sw_sync_timeline *obj, u32 inc)
+{
+}
+
+static inline struct sync_pt *sw_sync_pt_create(struct sw_sync_timeline *obj,
+ u32 value)
+{
+ return NULL;
+}
+#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
#endif /* __KERNEL __ */
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 38e5d3b5ed9b..3d05f662110b 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -79,27 +79,27 @@ static void sync_timeline_free(struct kref *kref)
container_of(kref, struct sync_timeline, kref);
unsigned long flags;
- if (obj->ops->release_obj)
- obj->ops->release_obj(obj);
-
spin_lock_irqsave(&sync_timeline_list_lock, flags);
list_del(&obj->sync_timeline_list);
spin_unlock_irqrestore(&sync_timeline_list_lock, flags);
+ if (obj->ops->release_obj)
+ obj->ops->release_obj(obj);
+
kfree(obj);
}
void sync_timeline_destroy(struct sync_timeline *obj)
{
obj->destroyed = true;
+ smp_wmb();
/*
- * If this is not the last reference, signal any children
- * that their parent is going away.
+ * signal any children that their parent is going away.
*/
+ sync_timeline_signal(obj);
- if (!kref_put(&obj->kref, sync_timeline_free))
- sync_timeline_signal(obj);
+ kref_put(&obj->kref, sync_timeline_free);
}
EXPORT_SYMBOL(sync_timeline_destroy);
diff --git a/drivers/staging/bcm/Bcmnet.c b/drivers/staging/bcm/Bcmnet.c
index 8dfdd2732bdc..95a2358267ba 100644
--- a/drivers/staging/bcm/Bcmnet.c
+++ b/drivers/staging/bcm/Bcmnet.c
@@ -40,7 +40,7 @@ static INT bcm_close(struct net_device *dev)
}
static u16 bcm_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
return ClassifyPacket(netdev_priv(dev), skb);
}
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 246080316c90..5b15033a94bf 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -616,8 +616,6 @@ int comedi_auto_config(struct device *hardware_device,
ret = driver->auto_attach(dev, context);
if (ret >= 0)
ret = comedi_device_postconfig(dev);
- if (ret < 0)
- comedi_device_detach(dev);
mutex_unlock(&dev->mutex);
if (ret < 0) {
diff --git a/drivers/staging/comedi/drivers/adv_pci1710.c b/drivers/staging/comedi/drivers/adv_pci1710.c
index 593676cf706a..d9ad2c0fdda2 100644
--- a/drivers/staging/comedi/drivers/adv_pci1710.c
+++ b/drivers/staging/comedi/drivers/adv_pci1710.c
@@ -494,6 +494,7 @@ static int pci171x_insn_write_ao(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
struct pci1710_private *devpriv = dev->private;
+ unsigned int val;
int n, chan, range, ofs;
chan = CR_CHAN(insn->chanspec);
@@ -509,11 +510,14 @@ static int pci171x_insn_write_ao(struct comedi_device *dev,
outw(devpriv->da_ranges, dev->iobase + PCI171x_DAREF);
ofs = PCI171x_DA1;
}
+ val = devpriv->ao_data[chan];
- for (n = 0; n < insn->n; n++)
- outw(data[n], dev->iobase + ofs);
+ for (n = 0; n < insn->n; n++) {
+ val = data[n];
+ outw(val, dev->iobase + ofs);
+ }
- devpriv->ao_data[chan] = data[n];
+ devpriv->ao_data[chan] = val;
return n;
@@ -679,6 +683,7 @@ static int pci1720_insn_write_ao(struct comedi_device *dev,
struct comedi_insn *insn, unsigned int *data)
{
struct pci1710_private *devpriv = dev->private;
+ unsigned int val;
int n, rangereg, chan;
chan = CR_CHAN(insn->chanspec);
@@ -688,13 +693,15 @@ static int pci1720_insn_write_ao(struct comedi_device *dev,
outb(rangereg, dev->iobase + PCI1720_RANGE);
devpriv->da_ranges = rangereg;
}
+ val = devpriv->ao_data[chan];
for (n = 0; n < insn->n; n++) {
- outw(data[n], dev->iobase + PCI1720_DA0 + (chan << 1));
+ val = data[n];
+ outw(val, dev->iobase + PCI1720_DA0 + (chan << 1));
outb(0, dev->iobase + PCI1720_SYNCOUT); /* update outputs */
}
- devpriv->ao_data[chan] = data[n];
+ devpriv->ao_data[chan] = val;
return n;
}
diff --git a/drivers/staging/comedi/drivers/usbduxsigma.c b/drivers/staging/comedi/drivers/usbduxsigma.c
index 3beeb1254152..88c60b6020c4 100644
--- a/drivers/staging/comedi/drivers/usbduxsigma.c
+++ b/drivers/staging/comedi/drivers/usbduxsigma.c
@@ -48,6 +48,7 @@
#include <linux/usb.h>
#include <linux/fcntl.h>
#include <linux/compiler.h>
+#include <asm/unaligned.h>
#include "comedi_fc.h"
#include "../comedidev.h"
@@ -792,7 +793,8 @@ static int usbduxsigma_ai_insn_read(struct comedi_device *dev,
}
/* 32 bits big endian from the A/D converter */
- val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf) + 1)));
+ val = be32_to_cpu(get_unaligned((uint32_t
+ *)(devpriv->insn_buf + 1)));
val &= 0x00ffffff; /* strip status byte */
val ^= 0x00800000; /* convert to unsigned */
@@ -1357,7 +1359,7 @@ static int usbduxsigma_getstatusinfo(struct comedi_device *dev, int chan)
return ret;
/* 32 bits big endian from the A/D converter */
- val = be32_to_cpu(*((uint32_t *)((devpriv->insn_buf)+1)));
+ val = be32_to_cpu(get_unaligned((uint32_t *)(devpriv->insn_buf + 1)));
val &= 0x00ffffff; /* strip status byte */
val ^= 0x00800000; /* convert to unsigned */
diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c
index 4a08e16e42f7..79206cb3fb94 100644
--- a/drivers/staging/cxt1e1/linux.c
+++ b/drivers/staging/cxt1e1/linux.c
@@ -866,6 +866,8 @@ c4_ioctl (struct net_device *ndev, struct ifreq *ifr, int cmd)
_IOC_SIZE (iocmd));
#endif
iolen = _IOC_SIZE (iocmd);
+ if (iolen > sizeof(arg))
+ return -EFAULT;
data = ifr->ifr_data + sizeof (iocmd);
if (copy_from_user (&arg, data, iolen))
return -EFAULT;
diff --git a/drivers/staging/dgrp/dgrp_net_ops.c b/drivers/staging/dgrp/dgrp_net_ops.c
index 1f61b89eca44..33ac7fb88cbd 100644
--- a/drivers/staging/dgrp/dgrp_net_ops.c
+++ b/drivers/staging/dgrp/dgrp_net_ops.c
@@ -2232,177 +2232,6 @@ done:
return rtn;
}
-/*
- * Common Packet Handling code
- */
-
-static void handle_data_in_packet(struct nd_struct *nd, struct ch_struct *ch,
- long dlen, long plen, int n1, u8 *dbuf)
-{
- char *error;
- long n;
- long remain;
- u8 *buf;
- u8 *b;
-
- remain = nd->nd_remain;
- nd->nd_tx_work = 1;
-
- /*
- * Otherwise data should appear only when we are
- * in the CS_READY state.
- */
-
- if (ch->ch_state < CS_READY) {
- error = "Data received before RWIN established";
- nd->nd_remain = 0;
- nd->nd_state = NS_SEND_ERROR;
- nd->nd_error = error;
- }
-
- /*
- * Assure that the data received is within the
- * allowable window.
- */
-
- n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
-
- if (dlen > n) {
- error = "Receive data overrun";
- nd->nd_remain = 0;
- nd->nd_state = NS_SEND_ERROR;
- nd->nd_error = error;
- }
-
- /*
- * If we received 3 or less characters,
- * assume it is a human typing, and set RTIME
- * to 10 milliseconds.
- *
- * If we receive 10 or more characters,
- * assume its not a human typing, and set RTIME
- * to 100 milliseconds.
- */
-
- if (ch->ch_edelay != DGRP_RTIME) {
- if (ch->ch_rtime != ch->ch_edelay) {
- ch->ch_rtime = ch->ch_edelay;
- ch->ch_flag |= CH_PARAM;
- }
- } else if (dlen <= 3) {
- if (ch->ch_rtime != 10) {
- ch->ch_rtime = 10;
- ch->ch_flag |= CH_PARAM;
- }
- } else {
- if (ch->ch_rtime != DGRP_RTIME) {
- ch->ch_rtime = DGRP_RTIME;
- ch->ch_flag |= CH_PARAM;
- }
- }
-
- /*
- * If a portion of the packet is outside the
- * buffer, shorten the effective length of the
- * data packet to be the amount of data received.
- */
-
- if (remain < plen)
- dlen -= plen - remain;
-
- /*
- * Detect if receive flush is now complete.
- */
-
- if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
- ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
- ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
- ch->ch_flag &= ~CH_RX_FLUSH;
- }
-
- /*
- * If we are ready to receive, move the data into
- * the receive buffer.
- */
-
- ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
-
- if (ch->ch_state == CS_READY &&
- (ch->ch_tun.un_open_count != 0) &&
- (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
- (ch->ch_cflag & CF_CREAD) != 0 &&
- (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
- (ch->ch_send & RR_RX_FLUSH) == 0) {
-
- if (ch->ch_rin + dlen >= RBUF_MAX) {
- n = RBUF_MAX - ch->ch_rin;
-
- memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
-
- ch->ch_rin = 0;
- dbuf += n;
- dlen -= n;
- }
-
- memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
-
- ch->ch_rin += dlen;
-
-
- /*
- * If we are not in fastcook mode, or
- * if there is a fastcook thread
- * waiting for data, send the data to
- * the line discipline.
- */
-
- if ((ch->ch_flag & CH_FAST_READ) == 0 ||
- ch->ch_inwait != 0) {
- dgrp_input(ch);
- }
-
- /*
- * If there is a read thread waiting
- * in select, and we are in fastcook
- * mode, wake him up.
- */
-
- if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
- (ch->ch_flag & CH_FAST_READ) != 0)
- wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
-
- /*
- * Wake any thread waiting in the
- * fastcook loop.
- */
-
- if ((ch->ch_flag & CH_INPUT) != 0) {
- ch->ch_flag &= ~CH_INPUT;
- wake_up_interruptible(&ch->ch_flag_wait);
- }
- }
-
- /*
- * Fabricate and insert a data packet header to
- * preced the remaining data when it comes in.
- */
-
- if (remain < plen) {
- dlen = plen - remain;
- b = buf;
-
- b[0] = 0x90 + n1;
- put_unaligned_be16(dlen, b + 1);
-
- remain = 3;
- if (remain > 0 && b != buf)
- memcpy(buf, b, remain);
-
- nd->nd_remain = remain;
- return;
- }
-}
-
/**
* dgrp_receive() -- decode data packets received from the remote PortServer.
* @nd: pointer to a node structure
@@ -2477,8 +2306,7 @@ static void dgrp_receive(struct nd_struct *nd)
plen = dlen + 1;
dbuf = b + 1;
- handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf);
- break;
+ goto data;
/*
* Process 2-byte header data packet.
@@ -2492,8 +2320,7 @@ static void dgrp_receive(struct nd_struct *nd)
plen = dlen + 2;
dbuf = b + 2;
- handle_data_in_packet(nd, ch, dlen, plen, n1, dbuf);
- break;
+ goto data;
/*
* Process 3-byte header data packet.
@@ -2508,6 +2335,159 @@ static void dgrp_receive(struct nd_struct *nd)
dbuf = b + 3;
+ /*
+ * Common packet handling code.
+ */
+
+data:
+ nd->nd_tx_work = 1;
+
+ /*
+ * Otherwise data should appear only when we are
+ * in the CS_READY state.
+ */
+
+ if (ch->ch_state < CS_READY) {
+ error = "Data received before RWIN established";
+ goto prot_error;
+ }
+
+ /*
+ * Assure that the data received is within the
+ * allowable window.
+ */
+
+ n = (ch->ch_s_rwin - ch->ch_s_rin) & 0xffff;
+
+ if (dlen > n) {
+ error = "Receive data overrun";
+ goto prot_error;
+ }
+
+ /*
+ * If we received 3 or less characters,
+ * assume it is a human typing, and set RTIME
+ * to 10 milliseconds.
+ *
+ * If we receive 10 or more characters,
+ * assume its not a human typing, and set RTIME
+ * to 100 milliseconds.
+ */
+
+ if (ch->ch_edelay != DGRP_RTIME) {
+ if (ch->ch_rtime != ch->ch_edelay) {
+ ch->ch_rtime = ch->ch_edelay;
+ ch->ch_flag |= CH_PARAM;
+ }
+ } else if (dlen <= 3) {
+ if (ch->ch_rtime != 10) {
+ ch->ch_rtime = 10;
+ ch->ch_flag |= CH_PARAM;
+ }
+ } else {
+ if (ch->ch_rtime != DGRP_RTIME) {
+ ch->ch_rtime = DGRP_RTIME;
+ ch->ch_flag |= CH_PARAM;
+ }
+ }
+
+ /*
+ * If a portion of the packet is outside the
+ * buffer, shorten the effective length of the
+ * data packet to be the amount of data received.
+ */
+
+ if (remain < plen)
+ dlen -= plen - remain;
+
+ /*
+ * Detect if receive flush is now complete.
+ */
+
+ if ((ch->ch_flag & CH_RX_FLUSH) != 0 &&
+ ((ch->ch_flush_seq - nd->nd_seq_out) & SEQ_MASK) >=
+ ((nd->nd_seq_in - nd->nd_seq_out) & SEQ_MASK)) {
+ ch->ch_flag &= ~CH_RX_FLUSH;
+ }
+
+ /*
+ * If we are ready to receive, move the data into
+ * the receive buffer.
+ */
+
+ ch->ch_s_rin = (ch->ch_s_rin + dlen) & 0xffff;
+
+ if (ch->ch_state == CS_READY &&
+ (ch->ch_tun.un_open_count != 0) &&
+ (ch->ch_tun.un_flag & UN_CLOSING) == 0 &&
+ (ch->ch_cflag & CF_CREAD) != 0 &&
+ (ch->ch_flag & (CH_BAUD0 | CH_RX_FLUSH)) == 0 &&
+ (ch->ch_send & RR_RX_FLUSH) == 0) {
+
+ if (ch->ch_rin + dlen >= RBUF_MAX) {
+ n = RBUF_MAX - ch->ch_rin;
+
+ memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, n);
+
+ ch->ch_rin = 0;
+ dbuf += n;
+ dlen -= n;
+ }
+
+ memcpy(ch->ch_rbuf + ch->ch_rin, dbuf, dlen);
+
+ ch->ch_rin += dlen;
+
+
+ /*
+ * If we are not in fastcook mode, or
+ * if there is a fastcook thread
+ * waiting for data, send the data to
+ * the line discipline.
+ */
+
+ if ((ch->ch_flag & CH_FAST_READ) == 0 ||
+ ch->ch_inwait != 0) {
+ dgrp_input(ch);
+ }
+
+ /*
+ * If there is a read thread waiting
+ * in select, and we are in fastcook
+ * mode, wake him up.
+ */
+
+ if (waitqueue_active(&ch->ch_tun.un_tty->read_wait) &&
+ (ch->ch_flag & CH_FAST_READ) != 0)
+ wake_up_interruptible(&ch->ch_tun.un_tty->read_wait);
+
+ /*
+ * Wake any thread waiting in the
+ * fastcook loop.
+ */
+
+ if ((ch->ch_flag & CH_INPUT) != 0) {
+ ch->ch_flag &= ~CH_INPUT;
+
+ wake_up_interruptible(&ch->ch_flag_wait);
+ }
+ }
+
+ /*
+ * Fabricate and insert a data packet header to
+ * preced the remaining data when it comes in.
+ */
+
+ if (remain < plen) {
+ dlen = plen - remain;
+ b = buf;
+
+ b[0] = 0x90 + n1;
+ put_unaligned_be16(dlen, b + 1);
+
+ remain = 3;
+ goto done;
+ }
break;
/*
diff --git a/drivers/staging/gdm72xx/gdm_usb.c b/drivers/staging/gdm72xx/gdm_usb.c
index f8788bf0a7d3..cdeffe75496b 100644
--- a/drivers/staging/gdm72xx/gdm_usb.c
+++ b/drivers/staging/gdm72xx/gdm_usb.c
@@ -635,11 +635,14 @@ static int gdm_usb_probe(struct usb_interface *intf,
#endif /* CONFIG_WIMAX_GDM72XX_USB_PM */
ret = register_wimax_device(phy_dev, &intf->dev);
+ if (ret)
+ release_usb(udev);
out:
if (ret) {
kfree(phy_dev);
kfree(udev);
+ usb_put_dev(usbdev);
} else {
usb_set_intfdata(intf, phy_dev);
}
diff --git a/drivers/staging/iio/Documentation/iio_utils.h b/drivers/staging/iio/Documentation/iio_utils.h
index 35154d60faf6..c9fedb79e3a2 100644
--- a/drivers/staging/iio/Documentation/iio_utils.h
+++ b/drivers/staging/iio/Documentation/iio_utils.h
@@ -77,7 +77,6 @@ struct iio_channel_info {
uint64_t mask;
unsigned be;
unsigned is_signed;
- unsigned enabled;
unsigned location;
};
@@ -335,6 +334,7 @@ inline int build_channel_array(const char *device_dir,
while (ent = readdir(dp), ent != NULL) {
if (strcmp(ent->d_name + strlen(ent->d_name) - strlen("_en"),
"_en") == 0) {
+ int current_enabled = 0;
current = &(*ci_array)[count++];
ret = asprintf(&filename,
"%s/%s", scan_el_dir, ent->d_name);
@@ -350,10 +350,10 @@ inline int build_channel_array(const char *device_dir,
ret = -errno;
goto error_cleanup_array;
}
- fscanf(sysfsfp, "%u", &current->enabled);
+ fscanf(sysfsfp, "%u", &current_enabled);
fclose(sysfsfp);
- if (!current->enabled) {
+ if (!current_enabled) {
free(filename);
count--;
continue;
diff --git a/drivers/staging/iio/adc/ad799x_core.c b/drivers/staging/iio/adc/ad799x_core.c
index 5ea36410f716..5708ffc62aec 100644
--- a/drivers/staging/iio/adc/ad799x_core.c
+++ b/drivers/staging/iio/adc/ad799x_core.c
@@ -393,7 +393,7 @@ static const struct iio_event_spec ad799x_events[] = {
}, {
.type = IIO_EV_TYPE_THRESH,
.dir = IIO_EV_DIR_FALLING,
- .mask_separate = BIT(IIO_EV_INFO_VALUE),
+ .mask_separate = BIT(IIO_EV_INFO_VALUE) |
BIT(IIO_EV_INFO_ENABLE),
}, {
.type = IIO_EV_TYPE_THRESH,
@@ -409,7 +409,13 @@ static const struct iio_event_spec ad799x_events[] = {
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
.scan_index = (_index), \
- .scan_type = IIO_ST('u', _realbits, 16, 12 - (_realbits)), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_realbits), \
+ .storagebits = 16, \
+ .shift = 12 - (_realbits), \
+ .endianness = IIO_BE, \
+ }, \
.event_spec = _ev_spec, \
.num_event_specs = _num_ev_spec, \
}
@@ -588,7 +594,8 @@ static int ad799x_probe(struct i2c_client *client,
return 0;
error_free_irq:
- free_irq(client->irq, indio_dev);
+ if (client->irq > 0)
+ free_irq(client->irq, indio_dev);
error_cleanup_ring:
ad799x_ring_cleanup(indio_dev);
error_disable_reg:
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index df71669bb60e..514844efac75 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -757,6 +757,7 @@ static void mxs_lradc_finish_touch_event(struct mxs_lradc *lradc, bool valid)
}
/* if it is released, wait for the next touch via IRQ */
+ lradc->cur_plate = LRADC_TOUCH;
mxs_lradc_reg_clear(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ, LRADC_CTRL1);
mxs_lradc_reg_set(lradc, LRADC_CTRL1_TOUCH_DETECT_IRQ_EN, LRADC_CTRL1);
}
@@ -1035,8 +1036,6 @@ SHOW_SCALE_AVAILABLE_ATTR(4);
SHOW_SCALE_AVAILABLE_ATTR(5);
SHOW_SCALE_AVAILABLE_ATTR(6);
SHOW_SCALE_AVAILABLE_ATTR(7);
-SHOW_SCALE_AVAILABLE_ATTR(8);
-SHOW_SCALE_AVAILABLE_ATTR(9);
SHOW_SCALE_AVAILABLE_ATTR(10);
SHOW_SCALE_AVAILABLE_ATTR(11);
SHOW_SCALE_AVAILABLE_ATTR(12);
@@ -1053,8 +1052,6 @@ static struct attribute *mxs_lradc_attributes[] = {
&iio_dev_attr_in_voltage5_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage6_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage7_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage8_scale_available.dev_attr.attr,
- &iio_dev_attr_in_voltage9_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage10_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage11_scale_available.dev_attr.attr,
&iio_dev_attr_in_voltage12_scale_available.dev_attr.attr,
@@ -1613,7 +1610,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
* of the array.
*/
scale_uv = ((u64)lradc->vref_mv[i] * 100000000) >>
- (iio->channels[i].scan_type.realbits - s);
+ (LRADC_RESOLUTION - s);
lradc->scale_avail[i][s].nano =
do_div(scale_uv, 100000000) * 10;
lradc->scale_avail[i][s].integer = scale_uv;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index 0a4298b744e6..2b96665da8a2 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -629,7 +629,7 @@ static int ad5933_register_ring_funcs_and_init(struct iio_dev *indio_dev)
struct iio_buffer *buffer;
buffer = iio_kfifo_allocate(indio_dev);
- if (buffer)
+ if (!buffer)
return -ENOMEM;
iio_device_attach_buffer(indio_dev, buffer);
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 09ef5fb8bae6..236ed66f116a 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -88,9 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm)
imx_drm_device_put();
- drm_vblank_cleanup(imxdrm->drm);
- drm_kms_helper_poll_fini(imxdrm->drm);
- drm_mode_config_cleanup(imxdrm->drm);
+ drm_vblank_cleanup(drm);
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
return 0;
}
@@ -142,19 +142,19 @@ EXPORT_SYMBOL_GPL(imx_drm_crtc_panel_format);
int imx_drm_crtc_vblank_get(struct imx_drm_crtc *imx_drm_crtc)
{
- return drm_vblank_get(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+ return drm_vblank_get(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
}
EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_get);
void imx_drm_crtc_vblank_put(struct imx_drm_crtc *imx_drm_crtc)
{
- drm_vblank_put(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+ drm_vblank_put(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
}
EXPORT_SYMBOL_GPL(imx_drm_crtc_vblank_put);
void imx_drm_handle_vblank(struct imx_drm_crtc *imx_drm_crtc)
{
- drm_handle_vblank(imx_drm_crtc->imxdrm->drm, imx_drm_crtc->pipe);
+ drm_handle_vblank(imx_drm_crtc->crtc->dev, imx_drm_crtc->pipe);
}
EXPORT_SYMBOL_GPL(imx_drm_handle_vblank);
@@ -370,29 +370,6 @@ static void imx_drm_connector_unregister(
}
/*
- * register a crtc to the drm core
- */
-static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
-{
- struct imx_drm_device *imxdrm = __imx_drm_device();
- int ret;
-
- ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
- if (ret)
- return ret;
-
- drm_crtc_helper_add(imx_drm_crtc->crtc,
- imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
-
- drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
- imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
-
- drm_mode_group_reinit(imxdrm->drm);
-
- return 0;
-}
-
-/*
* Called by the CRTC driver when all CRTCs are registered. This
* puts all the pieces together and initializes the driver.
* Once this is called no more CRTCs can be registered since
@@ -424,15 +401,15 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
mutex_lock(&imxdrm->mutex);
- drm_kms_helper_poll_init(imxdrm->drm);
+ drm_kms_helper_poll_init(drm);
/* setup the grouping for the legacy output */
- ret = drm_mode_group_init_legacy_group(imxdrm->drm,
- &imxdrm->drm->primary->mode_group);
+ ret = drm_mode_group_init_legacy_group(drm,
+ &drm->primary->mode_group);
if (ret)
goto err_kms;
- ret = drm_vblank_init(imxdrm->drm, MAX_CRTC);
+ ret = drm_vblank_init(drm, MAX_CRTC);
if (ret)
goto err_kms;
@@ -441,7 +418,7 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
* by drm timer once a current process gives up ownership of
* vblank event.(after drm_vblank_put function is called)
*/
- imxdrm->drm->vblank_disable_allowed = true;
+ drm->vblank_disable_allowed = true;
if (!imx_drm_device_get()) {
ret = -EINVAL;
@@ -536,10 +513,18 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
*new_crtc = imx_drm_crtc;
- ret = imx_drm_crtc_register(imx_drm_crtc);
+ ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
if (ret)
goto err_register;
+ drm_crtc_helper_add(crtc,
+ imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
+
+ drm_crtc_init(imxdrm->drm, crtc,
+ imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
+
+ drm_mode_group_reinit(imxdrm->drm);
+
imx_drm_update_possible_crtcs();
mutex_unlock(&imxdrm->mutex);
diff --git a/drivers/staging/imx-drm/imx-hdmi.c b/drivers/staging/imx-drm/imx-hdmi.c
index f3a1f5e2e492..62ce0e86f14b 100644
--- a/drivers/staging/imx-drm/imx-hdmi.c
+++ b/drivers/staging/imx-drm/imx-hdmi.c
@@ -16,6 +16,7 @@
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/clk.h>
+#include <linux/hdmi.h>
#include <linux/regmap.h>
#include <linux/mfd/syscon.h>
#include <linux/mfd/syscon/imx6q-iomuxc-gpr.h>
@@ -52,11 +53,6 @@ enum hdmi_datamap {
YCbCr422_12B = 0x12,
};
-enum hdmi_colorimetry {
- ITU601,
- ITU709,
-};
-
enum imx_hdmi_devtype {
IMX6Q_HDMI,
IMX6DL_HDMI,
@@ -489,12 +485,12 @@ static void imx_hdmi_update_csc_coeffs(struct imx_hdmi *hdmi)
if (is_color_space_conversion(hdmi)) {
if (hdmi->hdmi_data.enc_out_format == RGB) {
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
csc_coeff = &csc_coeff_rgb_out_eitu601;
else
csc_coeff = &csc_coeff_rgb_out_eitu709;
} else if (hdmi->hdmi_data.enc_in_format == RGB) {
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
csc_coeff = &csc_coeff_rgb_in_eitu601;
else
csc_coeff = &csc_coeff_rgb_in_eitu709;
@@ -1140,16 +1136,16 @@ static void hdmi_config_AVI(struct imx_hdmi *hdmi)
/* Set up colorimetry */
if (hdmi->hdmi_data.enc_out_format == XVYCC444) {
colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_EXTENDED_INFO;
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
ext_colorimetry =
HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
- else /* hdmi->hdmi_data.colorimetry == ITU709 */
+ else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
ext_colorimetry =
HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC709;
} else if (hdmi->hdmi_data.enc_out_format != RGB) {
- if (hdmi->hdmi_data.colorimetry == ITU601)
+ if (hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_601)
colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_SMPTE;
- else /* hdmi->hdmi_data.colorimetry == ITU709 */
+ else /*hdmi->hdmi_data.colorimetry == HDMI_COLORIMETRY_ITU_709*/
colorimetry = HDMI_FC_AVICONF1_COLORIMETRY_ITUR;
ext_colorimetry = HDMI_FC_AVICONF2_EXT_COLORIMETRY_XVYCC601;
} else { /* Carries no data */
@@ -1379,9 +1375,9 @@ static int imx_hdmi_setup(struct imx_hdmi *hdmi, struct drm_display_mode *mode)
(hdmi->vic == 21) || (hdmi->vic == 22) ||
(hdmi->vic == 2) || (hdmi->vic == 3) ||
(hdmi->vic == 17) || (hdmi->vic == 18))
- hdmi->hdmi_data.colorimetry = ITU601;
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_601;
else
- hdmi->hdmi_data.colorimetry = ITU709;
+ hdmi->hdmi_data.colorimetry = HDMI_COLORIMETRY_ITU_709;
if ((hdmi->vic == 10) || (hdmi->vic == 11) ||
(hdmi->vic == 12) || (hdmi->vic == 13) ||
diff --git a/drivers/staging/lustre/TODO b/drivers/staging/lustre/TODO
index 22742d6d62a8..0a2b6cb3775e 100644
--- a/drivers/staging/lustre/TODO
+++ b/drivers/staging/lustre/TODO
@@ -9,5 +9,6 @@
* Other minor misc cleanups...
Please send any patches to Greg Kroah-Hartman <greg@kroah.com>, Andreas Dilger
-<andreas.dilger@intel.com> and Peng Tao <tao.peng@emc.com>. CCing
-hpdd-discuss <hpdd-discuss@lists.01.org> would be great too.
+<andreas.dilger@intel.com>, Oleg Drokin <oleg.drokin@intel.com> and
+Peng Tao <tao.peng@emc.com>. CCing hpdd-discuss <hpdd-discuss@lists.01.org>
+would be great too.
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
index 596a15fc8996..037ae8a6d531 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_kernelcomm.h
@@ -61,6 +61,8 @@ struct kuc_hdr {
__u16 kuc_msglen; /* Including header */
} __attribute__((aligned(sizeof(__u64))));
+#define KUC_CHANGELOG_MSG_MAXSIZE (sizeof(struct kuc_hdr)+CR_MAXSIZE)
+
#define KUC_MAGIC 0x191C /*Lustre9etLinC */
#define KUC_FL_BLOCK 0x01 /* Wait for send */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index d0d942ced01a..dddccca120c9 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -120,7 +120,7 @@ do { \
do { \
LASSERT(!in_interrupt() || \
((size) <= LIBCFS_VMALLOC_SIZE && \
- ((mask) & GFP_ATOMIC)) != 0); \
+ ((mask) & __GFP_WAIT) == 0)); \
} while (0)
#define LIBCFS_ALLOC_POST(ptr, size) \
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 93648632ba26..6f58ead20393 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -529,7 +529,7 @@ kiblnd_kvaddr_to_page (unsigned long vaddr)
{
struct page *page;
- if (is_vmalloc_addr(vaddr)) {
+ if (is_vmalloc_addr((void *)vaddr)) {
page = vmalloc_to_page ((void *)vaddr);
LASSERT (page != NULL);
return page;
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
index 68a4f52ec998..b7b53b579c85 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_cb.c
@@ -924,7 +924,7 @@ ksocknal_launch_packet (lnet_ni_t *ni, ksock_tx_t *tx, lnet_process_id_t id)
int
ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
{
- int mpflag = 0;
+ int mpflag = 1;
int type = lntmsg->msg_type;
lnet_process_id_t target = lntmsg->msg_target;
unsigned int payload_niov = lntmsg->msg_niov;
@@ -993,8 +993,9 @@ ksocknal_send(lnet_ni_t *ni, void *private, lnet_msg_t *lntmsg)
/* The first fragment will be set later in pro_pack */
rc = ksocknal_launch_packet(ni, tx, target);
- if (lntmsg->msg_vmflush)
+ if (!mpflag)
cfs_memory_pressure_restore(mpflag);
+
if (rc == 0)
return (0);
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 6b6c0240e824..7893d83e131f 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -760,7 +760,8 @@ static inline void hsm_set_cl_error(int *flags, int error)
*flags |= (error << CLF_HSM_ERR_L);
}
-#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + sizeof(struct changelog_rec))
+#define CR_MAXSIZE cfs_size_round(2*NAME_MAX + 1 + \
+ sizeof(struct changelog_ext_rec))
struct changelog_rec {
__u16 cr_namelen;
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 22d0acc95bc5..52b7731bcc38 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -1086,7 +1086,7 @@ static int quotactl_ioctl(struct ll_sb_info *sbi, struct if_quotactl *qctl)
break;
case Q_GETQUOTA:
if (((type == USRQUOTA &&
- uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
+ !uid_eq(current_euid(), make_kuid(&init_user_ns, id))) ||
(type == GRPQUOTA &&
!in_egroup_p(make_kgid(&init_user_ns, id)))) &&
(!cfs_capable(CFS_CAP_SYS_ADMIN) ||
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index d1ad91c34ddc..83013927e131 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -1430,7 +1430,7 @@ static struct kuc_hdr *changelog_kuc_hdr(char *buf, int len, int flags)
{
struct kuc_hdr *lh = (struct kuc_hdr *)buf;
- LASSERT(len <= CR_MAXSIZE);
+ LASSERT(len <= KUC_CHANGELOG_MSG_MAXSIZE);
lh->kuc_magic = KUC_MAGIC;
lh->kuc_transport = KUC_TRANSPORT_CHANGELOG;
@@ -1503,7 +1503,7 @@ static int mdc_changelog_send_thread(void *csdata)
CDEBUG(D_CHANGELOG, "changelog to fp=%p start "LPU64"\n",
cs->cs_fp, cs->cs_startrec);
- OBD_ALLOC(cs->cs_buf, CR_MAXSIZE);
+ OBD_ALLOC(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
if (cs->cs_buf == NULL)
GOTO(out, rc = -ENOMEM);
@@ -1540,7 +1540,7 @@ out:
if (ctxt)
llog_ctxt_put(ctxt);
if (cs->cs_buf)
- OBD_FREE(cs->cs_buf, CR_MAXSIZE);
+ OBD_FREE(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
OBD_FREE_PTR(cs);
return rc;
}
diff --git a/drivers/staging/media/go7007/go7007-loader.c b/drivers/staging/media/go7007/go7007-loader.c
index 10bb41c2fb6d..eecb1f2a5574 100644
--- a/drivers/staging/media/go7007/go7007-loader.c
+++ b/drivers/staging/media/go7007/go7007-loader.c
@@ -59,7 +59,7 @@ static int go7007_loader_probe(struct usb_interface *interface,
if (usbdev->descriptor.bNumConfigurations != 1) {
dev_err(&interface->dev, "can't handle multiple config\n");
- return -ENODEV;
+ goto failed2;
}
vendor = le16_to_cpu(usbdev->descriptor.idVendor);
@@ -108,6 +108,7 @@ static int go7007_loader_probe(struct usb_interface *interface,
return 0;
failed2:
+ usb_put_dev(usbdev);
dev_err(&interface->dev, "probe failed\n");
return -ENODEV;
}
@@ -115,6 +116,7 @@ failed2:
static void go7007_loader_disconnect(struct usb_interface *interface)
{
dev_info(&interface->dev, "disconnect\n");
+ usb_put_dev(interface_to_usbdev(interface));
usb_set_intfdata(interface, NULL);
}
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index eedffed17e39..31b269a5fff7 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -307,7 +307,7 @@ static netdev_tx_t xlr_net_start_xmit(struct sk_buff *skb,
}
static u16 xlr_net_select_queue(struct net_device *ndev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
return (u16)smp_processor_id();
}
@@ -892,6 +892,11 @@ static int xlr_setup_mdio(struct xlr_net_priv *priv,
priv->mii_bus->write = xlr_mii_write;
priv->mii_bus->parent = &pdev->dev;
priv->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+ if (priv->mii_bus->irq == NULL) {
+ pr_err("irq alloc failed\n");
+ mdiobus_free(priv->mii_bus);
+ return -ENOMEM;
+ }
priv->mii_bus->irq[priv->phy_addr] = priv->ndev->irq;
/* Scan only the enabled address */
diff --git a/drivers/staging/octeon-usb/octeon-hcd.c b/drivers/staging/octeon-usb/octeon-hcd.c
index 47e0a91238a1..5a001d9b4252 100644
--- a/drivers/staging/octeon-usb/octeon-hcd.c
+++ b/drivers/staging/octeon-usb/octeon-hcd.c
@@ -275,13 +275,6 @@ enum cvmx_usb_pipe_flags {
*/
#define MAX_TRANSFER_PACKETS ((1<<10)-1)
-enum {
- USB_CLOCK_TYPE_REF_12,
- USB_CLOCK_TYPE_REF_24,
- USB_CLOCK_TYPE_REF_48,
- USB_CLOCK_TYPE_CRYSTAL_12,
-};
-
/**
* Logical transactions may take numerous low level
* transactions, especially when splits are concerned. This
@@ -471,19 +464,6 @@ struct octeon_hcd {
/* Returns the IO address to push/pop stuff data from the FIFOs */
#define USB_FIFO_ADDRESS(channel, usb_index) (CVMX_USBCX_GOTGCTL(usb_index) + ((channel)+1)*0x1000)
-static int octeon_usb_get_clock_type(void)
-{
- switch (cvmx_sysinfo_get()->board_type) {
- case CVMX_BOARD_TYPE_BBGW_REF:
- case CVMX_BOARD_TYPE_LANAI2_A:
- case CVMX_BOARD_TYPE_LANAI2_U:
- case CVMX_BOARD_TYPE_LANAI2_G:
- case CVMX_BOARD_TYPE_UBNT_E100:
- return USB_CLOCK_TYPE_CRYSTAL_12;
- }
- return USB_CLOCK_TYPE_REF_48;
-}
-
/**
* Read a USB 32bit CSR. It performs the necessary address swizzle
* for 32bit CSRs and logs the value in a readable format if
@@ -582,37 +562,6 @@ static inline int __cvmx_usb_get_data_pid(struct cvmx_usb_pipe *pipe)
return 0; /* Data0 */
}
-
-/**
- * Return the number of USB ports supported by this Octeon
- * chip. If the chip doesn't support USB, or is not supported
- * by this API, a zero will be returned. Most Octeon chips
- * support one usb port, but some support two ports.
- * cvmx_usb_initialize() must be called on independent
- * struct cvmx_usb_state.
- *
- * Returns: Number of port, zero if usb isn't supported
- */
-static int cvmx_usb_get_num_ports(void)
-{
- int arch_ports = 0;
-
- if (OCTEON_IS_MODEL(OCTEON_CN56XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN52XX))
- arch_ports = 2;
- else if (OCTEON_IS_MODEL(OCTEON_CN50XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN31XX))
- arch_ports = 1;
- else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
- arch_ports = 1;
- else
- arch_ports = 0;
-
- return arch_ports;
-}
-
/**
* Initialize a USB port for use. This must be called before any
* other access to the Octeon USB port is made. The port starts
@@ -628,41 +577,16 @@ static int cvmx_usb_get_num_ports(void)
* Returns: 0 or a negative error code.
*/
static int cvmx_usb_initialize(struct cvmx_usb_state *usb,
- int usb_port_number)
+ int usb_port_number,
+ enum cvmx_usb_initialize_flags flags)
{
union cvmx_usbnx_clk_ctl usbn_clk_ctl;
union cvmx_usbnx_usbp_ctl_status usbn_usbp_ctl_status;
- enum cvmx_usb_initialize_flags flags = 0;
int i;
/* At first allow 0-1 for the usb port number */
if ((usb_port_number < 0) || (usb_port_number > 1))
return -EINVAL;
- /* For all chips except 52XX there is only one port */
- if (!OCTEON_IS_MODEL(OCTEON_CN52XX) && (usb_port_number > 0))
- return -EINVAL;
- /* Try to determine clock type automatically */
- if (octeon_usb_get_clock_type() == USB_CLOCK_TYPE_CRYSTAL_12) {
- /* Only 12 MHZ crystals are supported */
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
- } else {
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
-
- switch (octeon_usb_get_clock_type()) {
- case USB_CLOCK_TYPE_REF_12:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
- break;
- case USB_CLOCK_TYPE_REF_24:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
- break;
- case USB_CLOCK_TYPE_REF_48:
- flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
- break;
- default:
- return -EINVAL;
- break;
- }
- }
memset(usb, 0, sizeof(*usb));
usb->init_flags = flags;
@@ -3431,7 +3355,6 @@ static int octeon_usb_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
return 0;
}
-
static const struct hc_driver octeon_hc_driver = {
.description = "Octeon USB",
.product_desc = "Octeon Host Controller",
@@ -3448,15 +3371,74 @@ static const struct hc_driver octeon_hc_driver = {
.hub_control = octeon_usb_hub_control,
};
-
-static int octeon_usb_driver_probe(struct device *dev)
+static int octeon_usb_probe(struct platform_device *pdev)
{
int status;
- int usb_num = to_platform_device(dev)->id;
- int irq = platform_get_irq(to_platform_device(dev), 0);
+ int initialize_flags;
+ int usb_num;
+ struct resource *res_mem;
+ struct device_node *usbn_node;
+ int irq = platform_get_irq(pdev, 0);
+ struct device *dev = &pdev->dev;
struct octeon_hcd *priv;
struct usb_hcd *hcd;
unsigned long flags;
+ u32 clock_rate = 48000000;
+ bool is_crystal_clock = false;
+ const char *clock_type;
+ int i;
+
+ if (dev->of_node == NULL) {
+ dev_err(dev, "Error: empty of_node\n");
+ return -ENXIO;
+ }
+ usbn_node = dev->of_node->parent;
+
+ i = of_property_read_u32(usbn_node,
+ "refclk-frequency", &clock_rate);
+ if (i) {
+ dev_err(dev, "No USBN \"refclk-frequency\"\n");
+ return -ENXIO;
+ }
+ switch (clock_rate) {
+ case 12000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_12MHZ;
+ break;
+ case 24000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_24MHZ;
+ break;
+ case 48000000:
+ initialize_flags = CVMX_USB_INITIALIZE_FLAGS_CLOCK_48MHZ;
+ break;
+ default:
+ dev_err(dev, "Illebal USBN \"refclk-frequency\" %u\n", clock_rate);
+ return -ENXIO;
+
+ }
+
+ i = of_property_read_string(usbn_node,
+ "refclk-type", &clock_type);
+
+ if (!i && strcmp("crystal", clock_type) == 0)
+ is_crystal_clock = true;
+
+ if (is_crystal_clock)
+ initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_XI;
+ else
+ initialize_flags |= CVMX_USB_INITIALIZE_FLAGS_CLOCK_XO_GND;
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res_mem == NULL) {
+ dev_err(dev, "found no memory resource\n");
+ return -ENXIO;
+ }
+ usb_num = (res_mem->start >> 44) & 1;
+
+ if (irq < 0) {
+ /* Defective device tree, but we know how to fix it. */
+ irq_hw_number_t hwirq = usb_num ? (1 << 6) + 17 : 56;
+ irq = irq_create_mapping(NULL, hwirq);
+ }
/*
* Set the DMA mask to 64bits so we get buffers already translated for
@@ -3465,6 +3447,26 @@ static int octeon_usb_driver_probe(struct device *dev)
dev->coherent_dma_mask = ~0;
dev->dma_mask = &dev->coherent_dma_mask;
+ /*
+ * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
+ * IOB priority registers. Under heavy network load USB
+ * hardware can be starved by the IOB causing a crash. Give
+ * it a priority boost if it has been waiting more than 400
+ * cycles to avoid this situation.
+ *
+ * Testing indicates that a cnt_val of 8192 is not sufficient,
+ * but no failures are seen with 4096. We choose a value of
+ * 400 to give a safety factor of 10.
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
+ union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
+
+ pri_cnt.u64 = 0;
+ pri_cnt.s.cnt_enb = 1;
+ pri_cnt.s.cnt_val = 400;
+ cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
+ }
+
hcd = usb_create_hcd(&octeon_hc_driver, dev, dev_name(dev));
if (!hcd) {
dev_dbg(dev, "Failed to allocate memory for HCD\n");
@@ -3478,7 +3480,7 @@ static int octeon_usb_driver_probe(struct device *dev)
tasklet_init(&priv->dequeue_tasklet, octeon_usb_urb_dequeue_work, (unsigned long)priv);
INIT_LIST_HEAD(&priv->dequeue_list);
- status = cvmx_usb_initialize(&priv->usb, usb_num);
+ status = cvmx_usb_initialize(&priv->usb, usb_num, initialize_flags);
if (status) {
dev_dbg(dev, "USB initialization failed with %d\n", status);
kfree(hcd);
@@ -3492,7 +3494,7 @@ static int octeon_usb_driver_probe(struct device *dev)
cvmx_usb_poll(&priv->usb);
spin_unlock_irqrestore(&priv->lock, flags);
- status = usb_add_hcd(hcd, irq, IRQF_SHARED);
+ status = usb_add_hcd(hcd, irq, 0);
if (status) {
dev_dbg(dev, "USB add HCD failed with %d\n", status);
kfree(hcd);
@@ -3500,14 +3502,15 @@ static int octeon_usb_driver_probe(struct device *dev)
}
device_wakeup_enable(hcd->self.controller);
- dev_dbg(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
+ dev_info(dev, "Registered HCD for port %d on irq %d\n", usb_num, irq);
return 0;
}
-static int octeon_usb_driver_remove(struct device *dev)
+static int octeon_usb_remove(struct platform_device *pdev)
{
int status;
+ struct device *dev = &pdev->dev;
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct octeon_hcd *priv = hcd_to_octeon(hcd);
unsigned long flags;
@@ -3525,85 +3528,41 @@ static int octeon_usb_driver_remove(struct device *dev)
return 0;
}
-static struct device_driver octeon_usb_driver = {
- .name = "OcteonUSB",
- .bus = &platform_bus_type,
- .probe = octeon_usb_driver_probe,
- .remove = octeon_usb_driver_remove,
+static struct of_device_id octeon_usb_match[] = {
+ {
+ .compatible = "cavium,octeon-5750-usbc",
+ },
+ {},
};
+static struct platform_driver octeon_usb_driver = {
+ .driver = {
+ .name = "OcteonUSB",
+ .owner = THIS_MODULE,
+ .of_match_table = octeon_usb_match,
+ },
+ .probe = octeon_usb_probe,
+ .remove = octeon_usb_remove,
+};
-#define MAX_USB_PORTS 10
-static struct platform_device *pdev_glob[MAX_USB_PORTS];
-static int octeon_usb_registered;
-static int __init octeon_usb_module_init(void)
+static int __init octeon_usb_driver_init(void)
{
- int num_devices = cvmx_usb_get_num_ports();
- int device;
-
- if (usb_disabled() || num_devices == 0)
- return -ENODEV;
-
- if (driver_register(&octeon_usb_driver))
- return -ENOMEM;
-
- octeon_usb_registered = 1;
-
- /*
- * Only cn52XX and cn56XX have DWC_OTG USB hardware and the
- * IOB priority registers. Under heavy network load USB
- * hardware can be starved by the IOB causing a crash. Give
- * it a priority boost if it has been waiting more than 400
- * cycles to avoid this situation.
- *
- * Testing indicates that a cnt_val of 8192 is not sufficient,
- * but no failures are seen with 4096. We choose a value of
- * 400 to give a safety factor of 10.
- */
- if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN56XX)) {
- union cvmx_iob_n2c_l2c_pri_cnt pri_cnt;
-
- pri_cnt.u64 = 0;
- pri_cnt.s.cnt_enb = 1;
- pri_cnt.s.cnt_val = 400;
- cvmx_write_csr(CVMX_IOB_N2C_L2C_PRI_CNT, pri_cnt.u64);
- }
-
- for (device = 0; device < num_devices; device++) {
- struct resource irq_resource;
- struct platform_device *pdev;
- memset(&irq_resource, 0, sizeof(irq_resource));
- irq_resource.start = (device == 0) ? OCTEON_IRQ_USB0 : OCTEON_IRQ_USB1;
- irq_resource.end = irq_resource.start;
- irq_resource.flags = IORESOURCE_IRQ;
- pdev = platform_device_register_simple((char *)octeon_usb_driver. name, device, &irq_resource, 1);
- if (IS_ERR(pdev)) {
- driver_unregister(&octeon_usb_driver);
- octeon_usb_registered = 0;
- return PTR_ERR(pdev);
- }
- if (device < MAX_USB_PORTS)
- pdev_glob[device] = pdev;
+ if (usb_disabled())
+ return 0;
- }
- return 0;
+ return platform_driver_register(&octeon_usb_driver);
}
+module_init(octeon_usb_driver_init);
-static void __exit octeon_usb_module_cleanup(void)
+static void __exit octeon_usb_driver_exit(void)
{
- int i;
+ if (usb_disabled())
+ return;
- for (i = 0; i < MAX_USB_PORTS; i++)
- if (pdev_glob[i]) {
- platform_device_unregister(pdev_glob[i]);
- pdev_glob[i] = NULL;
- }
- if (octeon_usb_registered)
- driver_unregister(&octeon_usb_driver);
+ platform_driver_unregister(&octeon_usb_driver);
}
+module_exit(octeon_usb_driver_exit);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Cavium Networks <support@caviumnetworks.com>");
-MODULE_DESCRIPTION("Cavium Networks Octeon USB Host driver.");
-module_init(octeon_usb_module_init);
-module_exit(octeon_usb_module_cleanup);
+MODULE_AUTHOR("Cavium, Inc. <support@cavium.com>");
+MODULE_DESCRIPTION("Cavium Inc. OCTEON USB Host driver.");
diff --git a/drivers/staging/ozwpan/ozproto.c b/drivers/staging/ozwpan/ozproto.c
index cb060364dfe7..5d965cf06d59 100644
--- a/drivers/staging/ozwpan/ozproto.c
+++ b/drivers/staging/ozwpan/ozproto.c
@@ -668,8 +668,8 @@ void oz_binding_add(const char *net_dev)
if (binding) {
binding->ptype.type = __constant_htons(OZ_ETHERTYPE);
binding->ptype.func = oz_pkt_recv;
- memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
if (net_dev && *net_dev) {
+ memcpy(binding->name, net_dev, OZ_MAX_BINDING_LEN);
oz_dbg(ON, "Adding binding: %s\n", net_dev);
binding->ptype.dev =
dev_get_by_name(&init_net, net_dev);
@@ -680,6 +680,7 @@ void oz_binding_add(const char *net_dev)
}
} else {
oz_dbg(ON, "Binding to all netcards\n");
+ memset(binding->name, 0, OZ_MAX_BINDING_LEN);
binding->ptype.dev = NULL;
}
if (binding) {
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 153ec61493ab..96df62f95b6b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -912,12 +912,12 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
unsigned char *pbuf;
u32 wpa_ielen = 0;
u8 *pbssid = GetAddr3Ptr(pframe);
- u32 hidden_ssid = 0;
struct HT_info_element *pht_info = NULL;
struct rtw_ieee80211_ht_cap *pht_cap = NULL;
u32 bcn_channel;
unsigned short ht_cap_info;
unsigned char ht_info_infos_0;
+ int ssid_len;
if (is_client_associated_to_ap(Adapter) == false)
return true;
@@ -999,21 +999,15 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
}
/* checking SSID */
+ ssid_len = 0;
p = rtw_get_ie(bssid->IEs + _FIXED_IE_LENGTH_, _SSID_IE_, &len, bssid->IELength - _FIXED_IE_LENGTH_);
- if (p == NULL) {
- DBG_88E("%s marc: cannot find SSID for survey event\n", __func__);
- hidden_ssid = true;
- } else {
- hidden_ssid = false;
- }
-
- if ((NULL != p) && (false == hidden_ssid && (*(p + 1)))) {
- memcpy(bssid->Ssid.Ssid, (p + 2), *(p + 1));
- bssid->Ssid.SsidLength = *(p + 1);
- } else {
- bssid->Ssid.SsidLength = 0;
- bssid->Ssid.Ssid[0] = '\0';
+ if (p) {
+ ssid_len = *(p + 1);
+ if (ssid_len > NDIS_802_11_LENGTH_SSID)
+ ssid_len = 0;
}
+ memcpy(bssid->Ssid.Ssid, (p + 2), ssid_len);
+ bssid->Ssid.SsidLength = ssid_len;
RT_TRACE(_module_rtl871x_mlme_c_, _drv_info_, ("%s bssid.Ssid.Ssid:%s bssid.Ssid.SsidLength:%d "
"cur_network->network.Ssid.Ssid:%s len:%d\n", __func__, bssid->Ssid.Ssid,
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index dec992569476..4ad80ae1067f 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -2500,7 +2500,7 @@ static int rtw_mp_ioctl_hdl(struct net_device *dev, struct iw_request_info *info
("rtw_mp_ioctl_hdl: subcode [%d], len[%d], buffer_len[%d]\r\n",
poidparam->subcode, poidparam->len, len));
- if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) {
+ if (poidparam->subcode >= ARRAY_SIZE(mp_ioctl_hdl)) {
RT_TRACE(_module_rtl871x_ioctl_os_c, _drv_err_, ("no matching drvext subcodes\r\n"));
ret = -EINVAL;
goto _rtw_mp_ioctl_hdl_exit;
@@ -3164,9 +3164,7 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
u8 *p2pie;
uint p2pielen = 0, attr_contentlen = 0;
u8 attr_content[100] = {0x00};
-
- u8 go_devadd_str[17 + 10] = {0x00};
- /* +10 is for the str "go_devadd =", we have to clear it at wrqu->data.pointer */
+ u8 go_devadd_str[17 + 12] = {};
/* Commented by Albert 20121209 */
/* The input data is the GO's interface address which the application wants to know its device address. */
@@ -3223,12 +3221,12 @@ static int rtw_p2p_get_go_device_address(struct net_device *dev,
spin_unlock_bh(&pmlmepriv->scanned_queue.lock);
if (!blnMatch)
- sprintf(go_devadd_str, "\n\ndev_add = NULL");
+ snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add = NULL");
else
- sprintf(go_devadd_str, "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
+ snprintf(go_devadd_str, sizeof(go_devadd_str), "\n\ndev_add =%.2X:%.2X:%.2X:%.2X:%.2X:%.2X",
attr_content[0], attr_content[1], attr_content[2], attr_content[3], attr_content[4], attr_content[5]);
- if (copy_to_user(wrqu->data.pointer, go_devadd_str, 10 + 17))
+ if (copy_to_user(wrqu->data.pointer, go_devadd_str, sizeof(go_devadd_str)))
return -EFAULT;
return ret;
}
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 68f98fa114d2..7c9ee58f47bb 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -653,7 +653,7 @@ static unsigned int rtw_classify8021d(struct sk_buff *skb)
}
static u16 rtw_select_queue(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv, select_queue_fallback_t fallback)
{
struct adapter *padapter = rtw_netdev_priv(dev);
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 0a341d6ec51f..2f40ff5901d6 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -53,8 +53,9 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
{USB_DEVICE(USB_VENDER_ID_REALTEK, 0x0179)}, /* 8188ETV */
/*=== Customer ID ===*/
/****** 8188EUS ********/
- {USB_DEVICE(0x8179, 0x07B8)}, /* Abocom - Abocom */
+ {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
{USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
+ {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
{} /* Terminating entry */
};
diff --git a/drivers/staging/rtl8821ae/Kconfig b/drivers/staging/rtl8821ae/Kconfig
index 2aa5dac2f1df..abccc9dabd65 100644
--- a/drivers/staging/rtl8821ae/Kconfig
+++ b/drivers/staging/rtl8821ae/Kconfig
@@ -1,6 +1,6 @@
config R8821AE
tristate "RealTek RTL8821AE Wireless LAN NIC driver"
- depends on PCI && WLAN
+ depends on PCI && WLAN && MAC80211
depends on m
select WIRELESS_EXT
select WEXT_PRIV
diff --git a/drivers/staging/rtl8821ae/wifi.h b/drivers/staging/rtl8821ae/wifi.h
index cfe88a1efd55..76bef93ad70a 100644
--- a/drivers/staging/rtl8821ae/wifi.h
+++ b/drivers/staging/rtl8821ae/wifi.h
@@ -1414,7 +1414,7 @@ struct rtl_dm {
/*88e tx power tracking*/
- u8 bb_swing_idx_ofdm[2];
+ u8 bb_swing_idx_ofdm[MAX_RF_PATH];
u8 bb_swing_idx_ofdm_current;
u8 bb_swing_idx_ofdm_base[MAX_RF_PATH];
bool bb_swing_flag_Ofdm;
diff --git a/drivers/staging/usbip/userspace/libsrc/names.c b/drivers/staging/usbip/userspace/libsrc/names.c
index 3c8d28b771e0..81ff8522405c 100644
--- a/drivers/staging/usbip/userspace/libsrc/names.c
+++ b/drivers/staging/usbip/userspace/libsrc/names.c
@@ -169,14 +169,14 @@ static void *my_malloc(size_t size)
struct pool *p;
p = calloc(1, sizeof(struct pool));
- if (!p) {
- free(p);
+ if (!p)
return NULL;
- }
p->mem = calloc(1, size);
- if (!p->mem)
+ if (!p->mem) {
+ free(p);
return NULL;
+ }
p->next = pool_head;
pool_head = p;
diff --git a/drivers/staging/usbip/vhci_sysfs.c b/drivers/staging/usbip/vhci_sysfs.c
index 9b51586d11d9..0141bc34d5cc 100644
--- a/drivers/staging/usbip/vhci_sysfs.c
+++ b/drivers/staging/usbip/vhci_sysfs.c
@@ -149,7 +149,8 @@ static int valid_args(__u32 rhport, enum usb_device_speed speed)
case USB_SPEED_WIRELESS:
break;
default:
- pr_err("speed %d\n", speed);
+ pr_err("Failed attach request for unsupported USB speed: %s\n",
+ usb_speed_string(speed));
return -EINVAL;
}
diff --git a/drivers/staging/wlags49_h2/wl_wext.c b/drivers/staging/wlags49_h2/wl_wext.c
index 4a1ddaf5e00f..187fc060de26 100644
--- a/drivers/staging/wlags49_h2/wl_wext.c
+++ b/drivers/staging/wlags49_h2/wl_wext.c
@@ -1061,7 +1061,7 @@ static int wireless_set_essid(struct net_device *dev, struct iw_request_info *in
goto out;
}
- if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN + 1) {
+ if (data->flags != 0 && data->length > HCF_MAX_NAME_LEN) {
ret = -EINVAL;
goto out;
}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 7f1a7ce4b771..b83ec378d04f 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -785,7 +785,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn)
spin_unlock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
iscsit_free_cmd(cmd, false);
}
}
@@ -3708,7 +3708,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state
break;
case ISTATE_REMOVE:
spin_lock_bh(&conn->cmd_lock);
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, false);
@@ -4151,7 +4151,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)
spin_lock_bh(&conn->cmd_lock);
list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_increment_maxcmdsn(cmd, sess);
@@ -4196,6 +4196,10 @@ int iscsit_close_connection(
iscsit_stop_timers_for_cmds(conn);
iscsit_stop_nopin_response_timer(conn);
iscsit_stop_nopin_timer(conn);
+
+ if (conn->conn_transport->iscsit_wait_conn)
+ conn->conn_transport->iscsit_wait_conn(conn);
+
iscsit_free_queue_reqs_for_conn(conn);
/*
diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c
index e048d6439f4a..cda4d80cfaef 100644
--- a/drivers/target/iscsi/iscsi_target_erl1.c
+++ b/drivers/target/iscsi/iscsi_target_erl1.c
@@ -507,7 +507,9 @@ int iscsit_handle_status_snack(
u32 last_statsn;
int found_cmd;
- if (conn->exp_statsn > begrun) {
+ if (!begrun) {
+ begrun = conn->exp_statsn;
+ } else if (conn->exp_statsn > begrun) {
pr_err("Got Status SNACK Begrun: 0x%08x, RunLength:"
" 0x%08x but already got ExpStatSN: 0x%08x on CID:"
" %hu.\n", begrun, runlength, conn->exp_statsn,
diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c
index 33be1fb1df32..4ca8fd2a70db 100644
--- a/drivers/target/iscsi/iscsi_target_erl2.c
+++ b/drivers/target/iscsi/iscsi_target_erl2.c
@@ -138,7 +138,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_conn_node) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd, true);
@@ -160,7 +160,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)
list_for_each_entry_safe(cmd, cmd_tmp,
&cr->conn_recovery_cmd_list, i_conn_node) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
cmd->conn = NULL;
spin_unlock(&cr->conn_recovery_cmd_lock);
iscsit_free_cmd(cmd, true);
@@ -216,7 +216,7 @@ int iscsit_remove_cmd_from_connection_recovery(
}
cr = cmd->cr;
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
return --cr->cmd_count;
}
@@ -297,7 +297,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)
if (!(cmd->cmd_flags & ICF_OOO_CMDSN))
continue;
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
@@ -335,7 +335,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
/*
* Only perform connection recovery on ISCSI_OP_SCSI_CMD or
* ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call
- * list_del(&cmd->i_conn_node); to release the command to the
+ * list_del_init(&cmd->i_conn_node); to release the command to the
* session pool and remove it from the connection's list.
*
* Also stop the DataOUT timer, which will be restarted after
@@ -351,7 +351,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
" CID: %hu\n", cmd->iscsi_opcode,
cmd->init_task_tag, cmd->cmd_sn, conn->cid);
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
@@ -371,7 +371,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
*/
if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd &&
iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) {
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_cmd(cmd, true);
spin_lock_bh(&conn->cmd_lock);
@@ -393,7 +393,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)
cmd->sess = conn->sess;
- list_del(&cmd->i_conn_node);
+ list_del_init(&cmd->i_conn_node);
spin_unlock_bh(&conn->cmd_lock);
iscsit_free_all_datain_reqs(cmd);
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 39761837608d..44a5471de00f 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -137,7 +137,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) {
spin_lock(&tpg->tpg_state_lock);
- if (tpg->tpg_state == TPG_STATE_FREE) {
+ if (tpg->tpg_state != TPG_STATE_ACTIVE) {
spin_unlock(&tpg->tpg_state_lock);
continue;
}
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index 12da9b386169..c3d9df6aaf5f 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -500,7 +500,7 @@ static inline int core_alua_state_lba_dependent(
if (segment_mult) {
u64 tmp = lba;
- start_lba = sector_div(tmp, segment_size * segment_mult);
+ start_lba = do_div(tmp, segment_size * segment_mult);
last_lba = first_lba + segment_size - 1;
if (start_lba >= first_lba &&
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 2f5d77932c80..3013287a2aaa 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -2009,7 +2009,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
struct t10_reservation *pr_tmpl = &dev->t10_pr;
unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
sense_reason_t ret = TCM_NO_SENSE;
- int pr_holder = 0;
+ int pr_holder = 0, type;
if (!se_sess || !se_lun) {
pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
@@ -2131,6 +2131,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
ret = TCM_RESERVATION_CONFLICT;
goto out;
}
+ type = pr_reg->pr_res_type;
spin_lock(&pr_tmpl->registration_lock);
/*
@@ -2161,6 +2162,7 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
* Release the calling I_T Nexus registration now..
*/
__core_scsi3_free_registration(cmd->se_dev, pr_reg, NULL, 1);
+ pr_reg = NULL;
/*
* From spc4r17, section 5.7.11.3 Unregistering
@@ -2174,8 +2176,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
* RESERVATIONS RELEASED.
*/
if (pr_holder &&
- (pr_reg->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
- pr_reg->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
+ (type == PR_TYPE_WRITE_EXCLUSIVE_REGONLY ||
+ type == PR_TYPE_EXCLUSIVE_ACCESS_REGONLY)) {
list_for_each_entry(pr_reg_p,
&pr_tmpl->registration_list,
pr_reg_list) {
@@ -2194,7 +2196,8 @@ core_scsi3_emulate_pro_register(struct se_cmd *cmd, u64 res_key, u64 sa_res_key,
ret = core_scsi3_update_and_write_aptpl(dev, aptpl);
out:
- core_scsi3_put_pr_reg(pr_reg);
+ if (pr_reg)
+ core_scsi3_put_pr_reg(pr_reg);
return ret;
}
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index fa3cae393e13..77e6531fb0a1 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -1074,23 +1074,36 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read,
struct scatterlist *psg;
void *paddr, *addr;
unsigned int i, len, left;
+ unsigned int offset = sg_off;
left = sectors * dev->prot_length;
for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) {
+ unsigned int psg_len, copied = 0;
- len = min(psg->length, left);
paddr = kmap_atomic(sg_page(psg)) + psg->offset;
- addr = kmap_atomic(sg_page(sg)) + sg_off;
+ psg_len = min(left, psg->length);
+ while (psg_len) {
+ len = min(psg_len, sg->length - offset);
+ addr = kmap_atomic(sg_page(sg)) + sg->offset + offset;
- if (read)
- memcpy(paddr, addr, len);
- else
- memcpy(addr, paddr, len);
+ if (read)
+ memcpy(paddr + copied, addr, len);
+ else
+ memcpy(addr, paddr + copied, len);
- left -= len;
+ left -= len;
+ offset += len;
+ copied += len;
+ psg_len -= len;
+
+ if (offset >= sg->length) {
+ sg = sg_next(sg);
+ offset = 0;
+ }
+ kunmap_atomic(addr);
+ }
kunmap_atomic(paddr);
- kunmap_atomic(addr);
}
}
@@ -1155,7 +1168,7 @@ sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
{
struct se_device *dev = cmd->se_dev;
struct se_dif_v1_tuple *sdt;
- struct scatterlist *dsg;
+ struct scatterlist *dsg, *psg = sg;
sector_t sector = start;
void *daddr, *paddr;
int i, j, offset = sg_off;
@@ -1163,14 +1176,14 @@ sbc_dif_verify_read(struct se_cmd *cmd, sector_t start, unsigned int sectors,
for_each_sg(cmd->t_data_sg, dsg, cmd->t_data_nents, i) {
daddr = kmap_atomic(sg_page(dsg)) + dsg->offset;
- paddr = kmap_atomic(sg_page(sg)) + sg->offset;
+ paddr = kmap_atomic(sg_page(psg)) + sg->offset;
for (j = 0; j < dsg->length; j += dev->dev_attrib.block_size) {
- if (offset >= sg->length) {
+ if (offset >= psg->length) {
kunmap_atomic(paddr);
- sg = sg_next(sg);
- paddr = kmap_atomic(sg_page(sg)) + sg->offset;
+ psg = sg_next(psg);
+ paddr = kmap_atomic(sg_page(psg)) + psg->offset;
offset = 0;
}
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 43c5ca9878bc..3bebc71ea033 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -440,8 +440,8 @@ check_scsi_name:
padding = ((-scsi_target_len) & 3);
if (padding)
scsi_target_len += padding;
- if (scsi_name_len > 256)
- scsi_name_len = 256;
+ if (scsi_target_len > 256)
+ scsi_target_len = 256;
buf[off-1] = scsi_target_len;
off += scsi_target_len;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index c50fd9f11aab..2956250b7225 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -669,9 +669,6 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
return;
}
- if (!success)
- cmd->transport_state |= CMD_T_FAILED;
-
/*
* Check for case where an explicit ABORT_TASK has been received
* and transport_wait_for_tasks() will be waiting for completion..
@@ -681,7 +678,7 @@ void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&cmd->t_transport_stop_comp);
return;
- } else if (cmd->transport_state & CMD_T_FAILED) {
+ } else if (!success) {
INIT_WORK(&cmd->work, target_complete_failure_work);
} else {
INIT_WORK(&cmd->work, target_complete_ok_work);
@@ -1604,6 +1601,9 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_CHECK_CONDITION_ABORT_CMD:
case TCM_CHECK_CONDITION_UNIT_ATTENTION:
case TCM_CHECK_CONDITION_NOT_READY:
+ case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
+ case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
+ case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
break;
case TCM_OUT_OF_RESOURCES:
sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 35c066489a19..5f88d767671e 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -136,6 +136,7 @@ config SPEAR_THERMAL
config RCAR_THERMAL
tristate "Renesas R-Car thermal driver"
depends on ARCH_SHMOBILE || COMPILE_TEST
+ depends on HAS_IOMEM
help
Enable this to plug the R-Car thermal sensor driver into the Linux
thermal framework.
@@ -210,8 +211,16 @@ config ACPI_INT3403_THERMAL
tristate "ACPI INT3403 thermal driver"
depends on X86 && ACPI
help
- This driver uses ACPI INT3403 device objects. If present, it will
- register each INT3403 thermal sensor as a thermal zone.
+ Newer laptops and tablets that use ACPI may have thermal sensors
+ outside the core CPU/SOC for thermal safety reasons. These
+ temperature sensors are also exposed for the OS to use via the so
+ called INT3403 ACPI object. This driver will, on devices that have
+ such sensors, expose the temperature information from these sensors
+ to userspace via the normal thermal framework. This means that a wide
+ range of applications and GUI widgets can show this information to
+ the user or use this information for making decisions. For example,
+ the Intel Thermal Daemon can use this information to allow the user
+ to select his laptop to run without turning on the fans.
menu "Texas Instruments thermal drivers"
source "drivers/thermal/ti-soc-thermal/Kconfig"
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 338a88bf6662..71b0ec0c370d 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -56,10 +56,15 @@ static LIST_HEAD(thermal_governor_list);
static DEFINE_MUTEX(thermal_list_lock);
static DEFINE_MUTEX(thermal_governor_lock);
+static struct thermal_governor *def_governor;
+
static struct thermal_governor *__find_governor(const char *name)
{
struct thermal_governor *pos;
+ if (!name || !name[0])
+ return def_governor;
+
list_for_each_entry(pos, &thermal_governor_list, governor_list)
if (!strnicmp(name, pos->name, THERMAL_NAME_LENGTH))
return pos;
@@ -82,17 +87,23 @@ int thermal_register_governor(struct thermal_governor *governor)
if (__find_governor(governor->name) == NULL) {
err = 0;
list_add(&governor->governor_list, &thermal_governor_list);
+ if (!def_governor && !strncmp(governor->name,
+ DEFAULT_THERMAL_GOVERNOR, THERMAL_NAME_LENGTH))
+ def_governor = governor;
}
mutex_lock(&thermal_list_lock);
list_for_each_entry(pos, &thermal_tz_list, node) {
+ /*
+ * only thermal zones with specified tz->tzp->governor_name
+ * may run with tz->govenor unset
+ */
if (pos->governor)
continue;
- if (pos->tzp)
- name = pos->tzp->governor_name;
- else
- name = DEFAULT_THERMAL_GOVERNOR;
+
+ name = pos->tzp->governor_name;
+
if (!strnicmp(name, governor->name, THERMAL_NAME_LENGTH))
pos->governor = governor;
}
@@ -342,8 +353,8 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz)
static void handle_non_critical_trips(struct thermal_zone_device *tz,
int trip, enum thermal_trip_type trip_type)
{
- if (tz->governor)
- tz->governor->throttle(tz, trip);
+ tz->governor ? tz->governor->throttle(tz, trip) :
+ def_governor->throttle(tz, trip);
}
static void handle_critical_trips(struct thermal_zone_device *tz,
@@ -1107,7 +1118,7 @@ __thermal_cooling_device_register(struct device_node *np,
INIT_LIST_HEAD(&cdev->thermal_instances);
cdev->np = np;
cdev->ops = ops;
- cdev->updated = true;
+ cdev->updated = false;
cdev->device.class = &thermal_class;
cdev->devdata = devdata;
dev_set_name(&cdev->device, "cooling_device%d", cdev->id);
@@ -1533,7 +1544,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
if (tz->tzp)
tz->governor = __find_governor(tz->tzp->governor_name);
else
- tz->governor = __find_governor(DEFAULT_THERMAL_GOVERNOR);
+ tz->governor = def_governor;
mutex_unlock(&thermal_governor_lock);
diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c
index 972e1c73722a..081fd7e6a9f0 100644
--- a/drivers/thermal/x86_pkg_temp_thermal.c
+++ b/drivers/thermal/x86_pkg_temp_thermal.c
@@ -68,6 +68,10 @@ struct phy_dev_entry {
struct thermal_zone_device *tzone;
};
+static const struct thermal_zone_params pkg_temp_tz_params = {
+ .no_hwmon = true,
+};
+
/* List maintaining number of package instances */
static LIST_HEAD(phy_dev_list);
static DEFINE_MUTEX(phy_dev_list_mutex);
@@ -394,7 +398,6 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
int err;
u32 tj_max;
struct phy_dev_entry *phy_dev_entry;
- char buffer[30];
int thres_count;
u32 eax, ebx, ecx, edx;
u8 *temp;
@@ -440,13 +443,11 @@ static int pkg_temp_thermal_device_add(unsigned int cpu)
phy_dev_entry->first_cpu = cpu;
phy_dev_entry->tj_max = tj_max;
phy_dev_entry->ref_cnt = 1;
- snprintf(buffer, sizeof(buffer), "pkg-temp-%d\n",
- phy_dev_entry->phys_proc_id);
- phy_dev_entry->tzone = thermal_zone_device_register(buffer,
+ phy_dev_entry->tzone = thermal_zone_device_register("x86_pkg_temp",
thres_count,
(thres_count == MAX_NUMBER_OF_TRIPS) ?
0x03 : 0x01,
- phy_dev_entry, &tzone_ops, NULL, 0, 0);
+ phy_dev_entry, &tzone_ops, &pkg_temp_tz_params, 0, 0);
if (IS_ERR(phy_dev_entry->tzone)) {
err = PTR_ERR(phy_dev_entry->tzone);
goto err_ret_free;
diff --git a/drivers/tty/hvc/hvc_opal.c b/drivers/tty/hvc/hvc_opal.c
index 6496872e2e47..b01659bd4f7c 100644
--- a/drivers/tty/hvc/hvc_opal.c
+++ b/drivers/tty/hvc/hvc_opal.c
@@ -255,13 +255,7 @@ static int __init hvc_opal_init(void)
/* Register as a vio device to receive callbacks */
return platform_driver_register(&hvc_opal_driver);
}
-module_init(hvc_opal_init);
-
-static void __exit hvc_opal_exit(void)
-{
- platform_driver_unregister(&hvc_opal_driver);
-}
-module_exit(hvc_opal_exit);
+device_initcall(hvc_opal_init);
static void udbg_opal_putc(char c)
{
diff --git a/drivers/tty/hvc/hvc_rtas.c b/drivers/tty/hvc/hvc_rtas.c
index 0069bb86ba49..08c87920b74a 100644
--- a/drivers/tty/hvc/hvc_rtas.c
+++ b/drivers/tty/hvc/hvc_rtas.c
@@ -102,17 +102,7 @@ static int __init hvc_rtas_init(void)
return 0;
}
-module_init(hvc_rtas_init);
-
-/* This will tear down the tty portion of the driver */
-static void __exit hvc_rtas_exit(void)
-{
- /* Really the fun isn't over until the worker thread breaks down and
- * the tty cleans up */
- if (hvc_rtas_dev)
- hvc_remove(hvc_rtas_dev);
-}
-module_exit(hvc_rtas_exit);
+device_initcall(hvc_rtas_init);
/* This will happen prior to module init. There is no tty at this time? */
static int __init hvc_rtas_console_init(void)
diff --git a/drivers/tty/hvc/hvc_udbg.c b/drivers/tty/hvc/hvc_udbg.c
index 72228276fe31..9cf573d06a29 100644
--- a/drivers/tty/hvc/hvc_udbg.c
+++ b/drivers/tty/hvc/hvc_udbg.c
@@ -80,14 +80,7 @@ static int __init hvc_udbg_init(void)
return 0;
}
-module_init(hvc_udbg_init);
-
-static void __exit hvc_udbg_exit(void)
-{
- if (hvc_udbg_dev)
- hvc_remove(hvc_udbg_dev);
-}
-module_exit(hvc_udbg_exit);
+device_initcall(hvc_udbg_init);
static int __init hvc_udbg_console_init(void)
{
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index 636c9baad7a5..2dc2831840ca 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -561,18 +561,7 @@ static int __init xen_hvc_init(void)
#endif
return r;
}
-
-static void __exit xen_hvc_fini(void)
-{
- struct xencons_info *entry, *next;
-
- if (list_empty(&xenconsoles))
- return;
-
- list_for_each_entry_safe(entry, next, &xenconsoles, list) {
- xen_console_remove(entry);
- }
-}
+device_initcall(xen_hvc_init);
static int xen_cons_init(void)
{
@@ -598,10 +587,6 @@ static int xen_cons_init(void)
hvc_instantiate(HVC_COOKIE, 0, ops);
return 0;
}
-
-
-module_init(xen_hvc_init);
-module_exit(xen_hvc_fini);
console_initcall(xen_cons_init);
#ifdef CONFIG_EARLY_PRINTK
diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
index f34461c5f14e..2ebe47b78a3e 100644
--- a/drivers/tty/n_gsm.c
+++ b/drivers/tty/n_gsm.c
@@ -1090,6 +1090,7 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
{
unsigned int addr = 0;
unsigned int modem = 0;
+ unsigned int brk = 0;
struct gsm_dlci *dlci;
int len = clen;
u8 *dp = data;
@@ -1116,6 +1117,16 @@ static void gsm_control_modem(struct gsm_mux *gsm, u8 *data, int clen)
if (len == 0)
return;
}
+ len--;
+ if (len > 0) {
+ while (gsm_read_ea(&brk, *dp++) == 0) {
+ len--;
+ if (len == 0)
+ return;
+ }
+ modem <<= 7;
+ modem |= (brk & 0x7f);
+ }
tty = tty_port_tty_get(&dlci->port);
gsm_process_modem(tty, dlci, modem, clen);
if (tty) {
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index cb8017aa4434..d15624c1b751 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -817,8 +817,7 @@ static void process_echoes(struct tty_struct *tty)
struct n_tty_data *ldata = tty->disc_data;
size_t echoed;
- if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
- ldata->echo_mark == ldata->echo_tail)
+ if (ldata->echo_mark == ldata->echo_tail)
return;
mutex_lock(&ldata->output_lock);
@@ -1244,7 +1243,8 @@ n_tty_receive_signal_char(struct tty_struct *tty, int signal, unsigned char c)
if (L_ECHO(tty)) {
echo_char(c, tty);
commit_echoes(tty);
- }
+ } else
+ process_echoes(tty);
isig(signal, tty);
return;
}
@@ -1274,7 +1274,7 @@ n_tty_receive_char_special(struct tty_struct *tty, unsigned char c)
if (I_IXON(tty)) {
if (c == START_CHAR(tty)) {
start_tty(tty);
- commit_echoes(tty);
+ process_echoes(tty);
return 0;
}
if (c == STOP_CHAR(tty)) {
@@ -1820,8 +1820,10 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old)
* Fix tty hang when I_IXON(tty) is cleared, but the tty
* been stopped by STOP_CHAR(tty) before it.
*/
- if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped)
+ if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) {
start_tty(tty);
+ process_echoes(tty);
+ }
/* The termios change make the tty ready for I/O */
if (waitqueue_active(&tty->write_wait))
@@ -1896,7 +1898,7 @@ err:
static inline int input_available_p(struct tty_struct *tty, int poll)
{
struct n_tty_data *ldata = tty->disc_data;
- int amt = poll && !TIME_CHAR(tty) ? MIN_CHAR(tty) : 1;
+ int amt = poll && !TIME_CHAR(tty) && MIN_CHAR(tty) ? MIN_CHAR(tty) : 1;
if (ldata->icanon && !L_EXTPROC(tty)) {
if (ldata->canon_head != ldata->read_tail)
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 61ecd709a722..69932b7556cf 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -2433,6 +2433,24 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
serial_dl_write(up, quot);
/*
+ * XR17V35x UARTs have an extra fractional divisor register (DLD)
+ *
+ * We need to recalculate all of the registers, because DLM and DLL
+ * are already rounded to a whole integer.
+ *
+ * When recalculating we use a 32x clock instead of a 16x clock to
+ * allow 1-bit for rounding in the fractional part.
+ */
+ if (up->port.type == PORT_XR17V35X) {
+ unsigned int baud_x32 = (port->uartclk * 2) / baud;
+ u16 quot = baud_x32 / 32;
+ u8 quot_frac = DIV_ROUND_CLOSEST(baud_x32 % 32, 2);
+
+ serial_dl_write(up, quot);
+ serial_port_out(port, 0x2, quot_frac & 0xf);
+ }
+
+ /*
* LCR DLAB must be set to enable 64-byte FIFO mode. If the FCR
* is written without DLAB set, this mode will be disabled.
*/
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index faa64e646100..ed3113576740 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -391,7 +391,7 @@ static int dw8250_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
static int dw8250_suspend(struct device *dev)
{
struct dw8250_data *data = dev_get_drvdata(dev);
@@ -409,7 +409,7 @@ static int dw8250_resume(struct device *dev)
return 0;
}
-#endif /* CONFIG_PM */
+#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_RUNTIME
static int dw8250_runtime_suspend(struct device *dev)
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 50228eed3b6f..0ff3e3624d4c 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -783,7 +783,8 @@ static int pci_netmos_9900_setup(struct serial_private *priv,
{
unsigned int bar;
- if ((priv->dev->subsystem_device & 0xff00) == 0x3000) {
+ if ((priv->dev->device != PCI_DEVICE_ID_NETMOS_9865) &&
+ (priv->dev->subsystem_device & 0xff00) == 0x3000) {
/* netmos apparently orders BARs by datasheet layout, so serial
* ports get BARs 0 and 3 (or 1 and 4 for memmapped)
*/
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index fa511ebab67c..77f035158d6c 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -738,9 +738,6 @@ static int serial_omap_startup(struct uart_port *port)
return retval;
}
disable_irq(up->wakeirq);
- } else {
- dev_info(up->port.dev, "no wakeirq for uart%d\n",
- up->port.line);
}
dev_dbg(up->port.dev, "serial_omap_startup+%d\n", up->port.line);
@@ -1604,8 +1601,11 @@ static int serial_omap_probe_rs485(struct uart_omap_port *up,
flags & SER_RS485_RTS_AFTER_SEND);
if (ret < 0)
return ret;
- } else
+ } else if (up->rts_gpio == -EPROBE_DEFER) {
+ return -EPROBE_DEFER;
+ } else {
up->rts_gpio = -EINVAL;
+ }
if (of_property_read_u32_array(np, "rs485-rts-delay",
rs485_delay, 2) == 0) {
@@ -1687,6 +1687,9 @@ static int serial_omap_probe(struct platform_device *pdev)
up->port.iotype = UPIO_MEM;
up->port.irq = uartirq;
up->wakeirq = wakeirq;
+ if (!up->wakeirq)
+ dev_info(up->port.dev, "no wakeirq for uart%d\n",
+ up->port.line);
up->port.regshift = 2;
up->port.fifosize = 64;
diff --git a/drivers/tty/serial/sirfsoc_uart.c b/drivers/tty/serial/sirfsoc_uart.c
index 49a2ffd101a7..b7bfe24d4ebc 100644
--- a/drivers/tty/serial/sirfsoc_uart.c
+++ b/drivers/tty/serial/sirfsoc_uart.c
@@ -542,8 +542,10 @@ static void sirfsoc_rx_tmo_process_tl(unsigned long param)
wr_regl(port, ureg->sirfsoc_rx_dma_io_ctrl,
rd_regl(port, ureg->sirfsoc_rx_dma_io_ctrl) |
SIRFUART_IO_MODE);
- sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
spin_unlock_irqrestore(&sirfport->rx_lock, flags);
+ spin_lock(&port->lock);
+ sirfsoc_uart_pio_rx_chars(port, 4 - sirfport->rx_io_count);
+ spin_unlock(&port->lock);
if (sirfport->rx_io_count == 4) {
spin_lock_irqsave(&sirfport->rx_lock, flags);
sirfport->rx_io_count = 0;
diff --git a/drivers/tty/serial/sunhv.c b/drivers/tty/serial/sunhv.c
index cf86e729532b..dc697cee248a 100644
--- a/drivers/tty/serial/sunhv.c
+++ b/drivers/tty/serial/sunhv.c
@@ -433,13 +433,10 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
unsigned long flags;
int locked = 1;
- local_irq_save(flags);
- if (port->sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&port->lock);
- } else
- spin_lock(&port->lock);
+ if (port->sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
while (n > 0) {
unsigned long ra = __pa(con_write_page);
@@ -470,8 +467,7 @@ static void sunhv_console_write_paged(struct console *con, const char *s, unsign
}
if (locked)
- spin_unlock(&port->lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static inline void sunhv_console_putchar(struct uart_port *port, char c)
@@ -492,7 +488,10 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
unsigned long flags;
int i, locked = 1;
- local_irq_save(flags);
+ if (port->sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&port->lock, flags);
+ else
+ spin_lock_irqsave(&port->lock, flags);
if (port->sysrq) {
locked = 0;
} else if (oops_in_progress) {
@@ -507,8 +506,7 @@ static void sunhv_console_write_bychar(struct console *con, const char *s, unsig
}
if (locked)
- spin_unlock(&port->lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&port->lock, flags);
}
static struct console sunhv_console = {
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 380fb5355cb2..5faa8e905e98 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -844,20 +844,16 @@ static void sunsab_console_write(struct console *con, const char *s, unsigned n)
unsigned long flags;
int locked = 1;
- local_irq_save(flags);
- if (up->port.sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&up->port.lock);
- } else
- spin_lock(&up->port.lock);
+ if (up->port.sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+ spin_lock_irqsave(&up->port.lock, flags);
uart_console_write(&up->port, s, n, sunsab_console_putchar);
sunsab_tec_wait(up);
if (locked)
- spin_unlock(&up->port.lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&up->port.lock, flags);
}
static int sunsab_console_setup(struct console *con, char *options)
diff --git a/drivers/tty/serial/sunsu.c b/drivers/tty/serial/sunsu.c
index db79b76f5c8e..9a0f24f83720 100644
--- a/drivers/tty/serial/sunsu.c
+++ b/drivers/tty/serial/sunsu.c
@@ -1295,13 +1295,10 @@ static void sunsu_console_write(struct console *co, const char *s,
unsigned int ier;
int locked = 1;
- local_irq_save(flags);
- if (up->port.sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&up->port.lock);
- } else
- spin_lock(&up->port.lock);
+ if (up->port.sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+ spin_lock_irqsave(&up->port.lock, flags);
/*
* First save the UER then disable the interrupts
@@ -1319,8 +1316,7 @@ static void sunsu_console_write(struct console *co, const char *s,
serial_out(up, UART_IER, ier);
if (locked)
- spin_unlock(&up->port.lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&up->port.lock, flags);
}
/*
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index 45a8c6aa5837..a2c40ed287d2 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -1195,20 +1195,16 @@ sunzilog_console_write(struct console *con, const char *s, unsigned int count)
unsigned long flags;
int locked = 1;
- local_irq_save(flags);
- if (up->port.sysrq) {
- locked = 0;
- } else if (oops_in_progress) {
- locked = spin_trylock(&up->port.lock);
- } else
- spin_lock(&up->port.lock);
+ if (up->port.sysrq || oops_in_progress)
+ locked = spin_trylock_irqsave(&up->port.lock, flags);
+ else
+ spin_lock_irqsave(&up->port.lock, flags);
uart_console_write(&up->port, s, count, sunzilog_putchar);
udelay(2);
if (locked)
- spin_unlock(&up->port.lock);
- local_irq_restore(flags);
+ spin_unlock_irqrestore(&up->port.lock, flags);
}
static int __init sunzilog_console_setup(struct console *con, char *options)
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 61b1137d7e56..23b5d32954bf 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -1164,6 +1164,8 @@ static void csi_J(struct vc_data *vc, int vpar)
scr_memsetw(vc->vc_screenbuf, vc->vc_video_erase_char,
vc->vc_screenbuf_size >> 1);
set_origin(vc);
+ if (CON_IS_VISIBLE(vc))
+ update_screen(vc);
/* fall through */
case 2: /* erase whole display */
count = vc->vc_cols * vc->vc_rows;
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 80de2f88ed2c..4ab2cb62dfce 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -105,7 +105,7 @@ static int hw_ep_flush(struct ci_hdrc *ci, int num, int dir)
do {
/* flush any pending transfer */
- hw_write(ci, OP_ENDPTFLUSH, BIT(n), BIT(n));
+ hw_write(ci, OP_ENDPTFLUSH, ~0, BIT(n));
while (hw_read(ci, OP_ENDPTFLUSH, BIT(n)))
cpu_relax();
} while (hw_read(ci, OP_ENDPTSTAT, BIT(n)));
@@ -205,7 +205,7 @@ static int hw_ep_prime(struct ci_hdrc *ci, int num, int dir, int is_ctrl)
if (is_ctrl && dir == RX && hw_read(ci, OP_ENDPTSETUPSTAT, BIT(num)))
return -EAGAIN;
- hw_write(ci, OP_ENDPTPRIME, BIT(n), BIT(n));
+ hw_write(ci, OP_ENDPTPRIME, ~0, BIT(n));
while (hw_read(ci, OP_ENDPTPRIME, BIT(n)))
cpu_relax();
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 8d72f0c65937..062967c90b2a 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -717,6 +717,10 @@ int usb_get_configuration(struct usb_device *dev)
result = -ENOMEM;
goto err;
}
+
+ if (dev->quirks & USB_QUIRK_DELAY_INIT)
+ msleep(100);
+
result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno,
bigbuffer, length);
if (result < 0) {
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 5d01558cef66..ab90a0156828 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -63,8 +63,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
dynid->id.idProduct = idProduct;
dynid->id.match_flags = USB_DEVICE_ID_MATCH_DEVICE;
if (fields > 2 && bInterfaceClass) {
- if (bInterfaceClass > 255)
- return -EINVAL;
+ if (bInterfaceClass > 255) {
+ retval = -EINVAL;
+ goto fail;
+ }
dynid->id.bInterfaceClass = (u8)bInterfaceClass;
dynid->id.match_flags |= USB_DEVICE_ID_MATCH_INT_CLASS;
@@ -73,17 +75,21 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
if (fields > 4) {
const struct usb_device_id *id = id_table;
- if (!id)
- return -ENODEV;
+ if (!id) {
+ retval = -ENODEV;
+ goto fail;
+ }
for (; id->match_flags; id++)
if (id->idVendor == refVendor && id->idProduct == refProduct)
break;
- if (id->match_flags)
+ if (id->match_flags) {
dynid->id.driver_info = id->driver_info;
- else
- return -ENODEV;
+ } else {
+ retval = -ENODEV;
+ goto fail;
+ }
}
spin_lock(&dynids->lock);
@@ -95,6 +101,10 @@ ssize_t usb_store_new_id(struct usb_dynids *dynids,
if (retval)
return retval;
return count;
+
+fail:
+ kfree(dynid);
+ return retval;
}
EXPORT_SYMBOL_GPL(usb_store_new_id);
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 199aaea6bfe0..2518c3250750 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1032,7 +1032,6 @@ static int register_root_hub(struct usb_hcd *hcd)
dev_name(&usb_dev->dev), retval);
return retval;
}
- usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
}
retval = usb_new_device (usb_dev);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index babba885978d..64ea21971be2 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -128,7 +128,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
return usb_get_intfdata(hdev->actconfig->interface[0]);
}
-int usb_device_supports_lpm(struct usb_device *udev)
+static int usb_device_supports_lpm(struct usb_device *udev)
{
/* USB 2.1 (and greater) devices indicate LPM support through
* their USB 2.0 Extended Capabilities BOS descriptor.
@@ -149,11 +149,6 @@ int usb_device_supports_lpm(struct usb_device *udev)
"Power management will be impacted.\n");
return 0;
}
-
- /* udev is root hub */
- if (!udev->parent)
- return 1;
-
if (udev->parent->lpm_capable)
return 1;
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 8f37063c0a49..739ee8e8bdfd 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -47,6 +47,10 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Microsoft LifeCam-VX700 v2.0 */
{ USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* Logitech HD Pro Webcams C920 and C930e */
+ { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT },
+ { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT },
+
/* Logitech Quickcam Fusion */
{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index c49383669cd8..823857767a16 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -35,7 +35,6 @@ extern int usb_get_device_descriptor(struct usb_device *dev,
unsigned int size);
extern int usb_get_bos_descriptor(struct usb_device *dev);
extern void usb_release_bos_descriptor(struct usb_device *dev);
-extern int usb_device_supports_lpm(struct usb_device *udev);
extern char *usb_cache_string(struct usb_device *udev, int index);
extern int usb_set_configuration(struct usb_device *dev, int configuration);
extern int usb_choose_configuration(struct usb_device *udev);
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index 8565d87f94b4..1d129884cc39 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -216,7 +216,7 @@ static int dwc2_hs_phy_init(struct dwc2_hsotg *hsotg, bool select_phy)
int retval = 0;
if (!select_phy)
- return -ENODEV;
+ return 0;
usbcfg = readl(hsotg->regs + GUSBCFG);
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index f59484d43b35..4d918ed8d343 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -2565,25 +2565,14 @@ static void _dwc2_hcd_endpoint_reset(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
- int is_control = usb_endpoint_xfer_control(&ep->desc);
- int is_out = usb_endpoint_dir_out(&ep->desc);
- int epnum = usb_endpoint_num(&ep->desc);
- struct usb_device *udev;
unsigned long flags;
dev_dbg(hsotg->dev,
"DWC OTG HCD EP RESET: bEndpointAddress=0x%02x\n",
ep->desc.bEndpointAddress);
- udev = to_usb_device(hsotg->dev);
-
spin_lock_irqsave(&hsotg->lock, flags);
-
- usb_settoggle(udev, epnum, is_out, 0);
- if (is_control)
- usb_settoggle(udev, epnum, !is_out, 0);
dwc2_hcd_endpoint_reset(hsotg, ep);
-
spin_unlock_irqrestore(&hsotg->lock, flags);
}
diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c
index d01d0d3f2cf0..eaba547ce26b 100644
--- a/drivers/usb/dwc2/platform.c
+++ b/drivers/usb/dwc2/platform.c
@@ -124,6 +124,9 @@ static int dwc2_driver_probe(struct platform_device *dev)
int retval;
int irq;
+ if (usb_disabled())
+ return -ENODEV;
+
match = of_match_device(dwc2_of_match_table, &dev->dev);
if (match && match->data) {
params = match->data;
diff --git a/drivers/usb/gadget/bcm63xx_udc.c b/drivers/usb/gadget/bcm63xx_udc.c
index 888fbb43b338..e969eb809a85 100644
--- a/drivers/usb/gadget/bcm63xx_udc.c
+++ b/drivers/usb/gadget/bcm63xx_udc.c
@@ -360,24 +360,30 @@ static inline void usb_dma_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
bcm_writel(val, udc->iudma_regs + off);
}
-static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off)
+static inline u32 usb_dmac_readl(struct bcm63xx_udc *udc, u32 off, int chan)
{
- return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
+ return bcm_readl(udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
}
-static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
+static inline void usb_dmac_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
+ int chan)
{
- bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off);
+ bcm_writel(val, udc->iudma_regs + IUDMA_DMAC_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
}
-static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off)
+static inline u32 usb_dmas_readl(struct bcm63xx_udc *udc, u32 off, int chan)
{
- return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
+ return bcm_readl(udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
}
-static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off)
+static inline void usb_dmas_writel(struct bcm63xx_udc *udc, u32 val, u32 off,
+ int chan)
{
- bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off);
+ bcm_writel(val, udc->iudma_regs + IUDMA_DMAS_OFFSET + off +
+ (ENETDMA_CHAN_WIDTH * chan));
}
static inline void set_clocks(struct bcm63xx_udc *udc, bool is_enabled)
@@ -638,7 +644,7 @@ static void iudma_write(struct bcm63xx_udc *udc, struct iudma_ch *iudma,
} while (!last_bd);
usb_dmac_writel(udc, ENETDMAC_CHANCFG_EN_MASK,
- ENETDMAC_CHANCFG_REG(iudma->ch_idx));
+ ENETDMAC_CHANCFG_REG, iudma->ch_idx);
}
/**
@@ -694,9 +700,9 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
bcm63xx_fifo_reset_ep(udc, max(0, iudma->ep_num));
/* stop DMA, then wait for the hardware to wrap up */
- usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG(ch_idx));
+ usb_dmac_writel(udc, 0, ENETDMAC_CHANCFG_REG, ch_idx);
- while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)) &
+ while (usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx) &
ENETDMAC_CHANCFG_EN_MASK) {
udelay(1);
@@ -713,10 +719,10 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
dev_warn(udc->dev, "forcibly halting IUDMA channel %d\n",
ch_idx);
usb_dmac_writel(udc, ENETDMAC_CHANCFG_BUFHALT_MASK,
- ENETDMAC_CHANCFG_REG(ch_idx));
+ ENETDMAC_CHANCFG_REG, ch_idx);
}
}
- usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG(ch_idx));
+ usb_dmac_writel(udc, ~0, ENETDMAC_IR_REG, ch_idx);
/* don't leave "live" HW-owned entries for the next guy to step on */
for (d = iudma->bd_ring; d <= iudma->end_bd; d++)
@@ -728,11 +734,11 @@ static void iudma_reset_channel(struct bcm63xx_udc *udc, struct iudma_ch *iudma)
/* set up IRQs, UBUS burst size, and BD base for this channel */
usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
- ENETDMAC_IRMASK_REG(ch_idx));
- usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG(ch_idx));
+ ENETDMAC_IRMASK_REG, ch_idx);
+ usb_dmac_writel(udc, 8, ENETDMAC_MAXBURST_REG, ch_idx);
- usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG(ch_idx));
- usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG(ch_idx));
+ usb_dmas_writel(udc, iudma->bd_ring_dma, ENETDMAS_RSTART_REG, ch_idx);
+ usb_dmas_writel(udc, 0, ENETDMAS_SRAM2_REG, ch_idx);
}
/**
@@ -2035,7 +2041,7 @@ static irqreturn_t bcm63xx_udc_data_isr(int irq, void *dev_id)
spin_lock(&udc->lock);
usb_dmac_writel(udc, ENETDMAC_IR_BUFDONE_MASK,
- ENETDMAC_IR_REG(iudma->ch_idx));
+ ENETDMAC_IR_REG, iudma->ch_idx);
bep = iudma->bep;
rc = iudma_read(udc, iudma);
@@ -2175,18 +2181,18 @@ static int bcm63xx_iudma_dbg_show(struct seq_file *s, void *p)
seq_printf(s, " [ep%d]:\n",
max_t(int, iudma_defaults[ch_idx].ep_num, 0));
seq_printf(s, " cfg: %08x; irqstat: %08x; irqmask: %08x; maxburst: %08x\n",
- usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG(ch_idx)),
- usb_dmac_readl(udc, ENETDMAC_IR_REG(ch_idx)),
- usb_dmac_readl(udc, ENETDMAC_IRMASK_REG(ch_idx)),
- usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG(ch_idx)));
+ usb_dmac_readl(udc, ENETDMAC_CHANCFG_REG, ch_idx),
+ usb_dmac_readl(udc, ENETDMAC_IR_REG, ch_idx),
+ usb_dmac_readl(udc, ENETDMAC_IRMASK_REG, ch_idx),
+ usb_dmac_readl(udc, ENETDMAC_MAXBURST_REG, ch_idx));
- sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG(ch_idx));
- sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG(ch_idx));
+ sram2 = usb_dmas_readl(udc, ENETDMAS_SRAM2_REG, ch_idx);
+ sram3 = usb_dmas_readl(udc, ENETDMAS_SRAM3_REG, ch_idx);
seq_printf(s, " base: %08x; index: %04x_%04x; desc: %04x_%04x %08x\n",
- usb_dmas_readl(udc, ENETDMAS_RSTART_REG(ch_idx)),
+ usb_dmas_readl(udc, ENETDMAS_RSTART_REG, ch_idx),
sram2 >> 16, sram2 & 0xffff,
sram3 >> 16, sram3 & 0xffff,
- usb_dmas_readl(udc, ENETDMAS_SRAM4_REG(ch_idx)));
+ usb_dmas_readl(udc, ENETDMAS_SRAM4_REG, ch_idx));
seq_printf(s, " desc: %d/%d used", iudma->n_bds_used,
iudma->n_bds);
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 306a2b52125c..2b4334394076 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -585,7 +585,6 @@ static ssize_t ffs_epfile_io(struct file *file,
char __user *buf, size_t len, int read)
{
struct ffs_epfile *epfile = file->private_data;
- struct usb_gadget *gadget = epfile->ffs->gadget;
struct ffs_ep *ep;
char *data = NULL;
ssize_t ret, data_len;
@@ -622,6 +621,12 @@ static ssize_t ffs_epfile_io(struct file *file,
/* Allocate & copy */
if (!halt) {
/*
+ * if we _do_ wait above, the epfile->ffs->gadget might be NULL
+ * before the waiting completes, so do not assign to 'gadget' earlier
+ */
+ struct usb_gadget *gadget = epfile->ffs->gadget;
+
+ /*
* Controller may require buffer size to be aligned to
* maxpacketsize of an out endpoint.
*/
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index bf7a56b6d48a..69b76efd11e9 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -1157,7 +1157,7 @@ static int __init printer_bind_config(struct usb_configuration *c)
usb_gadget_set_selfpowered(gadget);
- if (gadget->is_otg) {
+ if (gadget_is_otg(gadget)) {
otg_descriptor.bmAttributes |= USB_OTG_HNP;
printer_cfg_driver.descriptors = otg_desc;
printer_cfg_driver.bmAttributes |= USB_CONFIG_ATT_WAKEUP;
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index f04b2c3154de..dd9678f85c58 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1629,7 +1629,7 @@ static void s3c2410_udc_reinit(struct s3c2410_udc *dev)
ep->ep.desc = NULL;
ep->halted = 0;
INIT_LIST_HEAD(&ep->queue);
- usb_ep_set_maxpacket_limit(&ep->ep, &ep->ep.maxpacket);
+ usb_ep_set_maxpacket_limit(&ep->ep, ep->ep.maxpacket);
}
}
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index 471142725ffe..81cda09b47e3 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -685,8 +685,15 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 status, masked_status, pcd_status = 0, cmd;
int bh;
+ unsigned long flags;
- spin_lock (&ehci->lock);
+ /*
+ * For threadirqs option we use spin_lock_irqsave() variant to prevent
+ * deadlock with ehci hrtimer callback, because hrtimer callbacks run
+ * in interrupt context even when threadirqs is specified. We can go
+ * back to spin_lock() variant when hrtimer callbacks become threaded.
+ */
+ spin_lock_irqsave(&ehci->lock, flags);
status = ehci_readl(ehci, &ehci->regs->status);
@@ -704,7 +711,7 @@ static irqreturn_t ehci_irq (struct usb_hcd *hcd)
/* Shared IRQ? */
if (!masked_status || unlikely(ehci->rh_state == EHCI_RH_HALTED)) {
- spin_unlock(&ehci->lock);
+ spin_unlock_irqrestore(&ehci->lock, flags);
return IRQ_NONE;
}
@@ -815,7 +822,7 @@ dead:
if (bh)
ehci_work (ehci);
- spin_unlock (&ehci->lock);
+ spin_unlock_irqrestore(&ehci->lock, flags);
if (pcd_status)
usb_hcd_poll_rh_status(hcd);
return IRQ_HANDLED;
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 47b858fc50b2..7ae0c4d51741 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -238,6 +238,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
int port;
int mask;
int changed;
+ bool fs_idle_delay;
ehci_dbg(ehci, "suspend root hub\n");
@@ -272,6 +273,7 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
ehci->bus_suspended = 0;
ehci->owned_ports = 0;
changed = 0;
+ fs_idle_delay = false;
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *reg = &ehci->regs->port_status [port];
@@ -300,16 +302,32 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
}
if (t1 != t2) {
+ /*
+ * On some controllers, Wake-On-Disconnect will
+ * generate false wakeup signals until the bus
+ * switches over to full-speed idle. For their
+ * sake, add a delay if we need one.
+ */
+ if ((t2 & PORT_WKDISC_E) &&
+ ehci_port_speed(ehci, t2) ==
+ USB_PORT_STAT_HIGH_SPEED)
+ fs_idle_delay = true;
ehci_writel(ehci, t2, reg);
changed = 1;
}
}
+ spin_unlock_irq(&ehci->lock);
+
+ if ((changed && ehci->has_tdi_phy_lpm) || fs_idle_delay) {
+ /*
+ * Wait for HCD to enter low-power mode or for the bus
+ * to switch to full-speed idle.
+ */
+ usleep_range(5000, 5500);
+ }
if (changed && ehci->has_tdi_phy_lpm) {
- spin_unlock_irq(&ehci->lock);
- msleep(5); /* 5 ms for HCD to enter low-power mode */
spin_lock_irq(&ehci->lock);
-
port = HCS_N_PORTS(ehci->hcs_params);
while (port--) {
u32 __iomem *hostpc_reg = &ehci->regs->hostpc[port];
@@ -322,8 +340,8 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
port, (t3 & HOSTPC_PHCD) ?
"succeeded" : "failed");
}
+ spin_unlock_irq(&ehci->lock);
}
- spin_unlock_irq(&ehci->lock);
/* Apparently some devices need a >= 1-uframe delay here */
if (ehci->bus_suspended)
diff --git a/drivers/usb/host/xhci-dbg.c b/drivers/usb/host/xhci-dbg.c
index b016d38199f2..eb009a457fb5 100644
--- a/drivers/usb/host/xhci-dbg.c
+++ b/drivers/usb/host/xhci-dbg.c
@@ -203,12 +203,12 @@ void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num)
addr, (unsigned int)temp);
addr = &ir_set->erst_base;
- temp_64 = readq(addr);
+ temp_64 = xhci_read_64(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_base = @%08llx\n",
addr, temp_64);
addr = &ir_set->erst_dequeue;
- temp_64 = readq(addr);
+ temp_64 = xhci_read_64(xhci, addr);
xhci_dbg(xhci, " %p: ir_set.erst_dequeue = @%08llx\n",
addr, temp_64);
}
@@ -412,7 +412,7 @@ void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci)
{
u64 val;
- val = readq(&xhci->op_regs->cmd_ring);
+ val = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
xhci_dbg(xhci, "// xHC command ring deq ptr low bits + flags = @%08x\n",
lower_32_bits(val));
xhci_dbg(xhci, "// xHC command ring deq ptr high bits = @%08x\n",
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 873c272b3ef5..bce4391a0e7d 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1958,7 +1958,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
xhci_warn(xhci, "WARN something wrong with SW event ring "
"dequeue ptr.\n");
/* Update HC event ring dequeue pointer */
- temp = readq(&xhci->ir_set->erst_dequeue);
+ temp = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp &= ERST_PTR_MASK;
/* Don't clear the EHB bit (which is RW1C) because
* there might be more events to service.
@@ -1967,7 +1967,7 @@ static void xhci_set_hc_event_deq(struct xhci_hcd *xhci)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Write event ring dequeue pointer, "
"preserving EHB bit");
- writeq(((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
+ xhci_write_64(xhci, ((u64) deq & (u64) ~ERST_PTR_MASK) | temp,
&xhci->ir_set->erst_dequeue);
}
@@ -2269,7 +2269,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Device context base array address = 0x%llx (DMA), %p (virt)",
(unsigned long long)xhci->dcbaa->dma, xhci->dcbaa);
- writeq(dma, &xhci->op_regs->dcbaa_ptr);
+ xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr);
/*
* Initialize the ring segment pool. The ring must be a contiguous
@@ -2312,13 +2312,13 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
(unsigned long long)xhci->cmd_ring->first_seg->dma);
/* Set the address in the Command Ring Control register */
- val_64 = readq(&xhci->op_regs->cmd_ring);
+ val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) |
xhci->cmd_ring->cycle_state;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting command ring address to 0x%x", val);
- writeq(val_64, &xhci->op_regs->cmd_ring);
+ xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
xhci_dbg_cmd_ptrs(xhci);
xhci->lpm_command = xhci_alloc_command(xhci, true, true, flags);
@@ -2396,10 +2396,10 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Set ERST base address for ir_set 0 = 0x%llx",
(unsigned long long)xhci->erst.erst_dma_addr);
- val_64 = readq(&xhci->ir_set->erst_base);
+ val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base);
val_64 &= ERST_PTR_MASK;
val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK);
- writeq(val_64, &xhci->ir_set->erst_base);
+ xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base);
/* Set the event ring dequeue address */
xhci_set_hc_event_deq(xhci);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 3c898c12a06b..04f986d9234f 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -142,6 +142,11 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
"QUIRK: Resetting on resume");
xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
+ if (pdev->vendor == PCI_VENDOR_ID_RENESAS &&
+ pdev->device == 0x0015 &&
+ pdev->subsystem_vendor == PCI_VENDOR_ID_SAMSUNG &&
+ pdev->subsystem_device == 0xc0cd)
+ xhci->quirks |= XHCI_RESET_ON_RESUME;
if (pdev->vendor == PCI_VENDOR_ID_VIA)
xhci->quirks |= XHCI_RESET_ON_RESUME;
}
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index a0b248c34526..0ed64eb68e48 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -307,13 +307,14 @@ static int xhci_abort_cmd_ring(struct xhci_hcd *xhci)
return 0;
}
- temp_64 = readq(&xhci->op_regs->cmd_ring);
+ temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
if (!(temp_64 & CMD_RING_RUNNING)) {
xhci_dbg(xhci, "Command ring had been stopped\n");
return 0;
}
xhci->cmd_ring_state = CMD_RING_STATE_ABORTED;
- writeq(temp_64 | CMD_RING_ABORT, &xhci->op_regs->cmd_ring);
+ xhci_write_64(xhci, temp_64 | CMD_RING_ABORT,
+ &xhci->op_regs->cmd_ring);
/* Section 4.6.1.2 of xHCI 1.0 spec says software should
* time the completion od all xHCI commands, including
@@ -2864,8 +2865,9 @@ hw_died:
/* Clear the event handler busy flag (RW1C);
* the event ring should be empty.
*/
- temp_64 = readq(&xhci->ir_set->erst_dequeue);
- writeq(temp_64 | ERST_EHB, &xhci->ir_set->erst_dequeue);
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
+ xhci_write_64(xhci, temp_64 | ERST_EHB,
+ &xhci->ir_set->erst_dequeue);
spin_unlock(&xhci->lock);
return IRQ_HANDLED;
@@ -2877,7 +2879,7 @@ hw_died:
*/
while (xhci_handle_event(xhci) > 0) {}
- temp_64 = readq(&xhci->ir_set->erst_dequeue);
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
/* If necessary, update the HW's version of the event ring deq ptr. */
if (event_ring_deq != xhci->event_ring->dequeue) {
deq = xhci_trb_virt_to_dma(xhci->event_ring->deq_seg,
@@ -2892,7 +2894,7 @@ hw_died:
/* Clear the event handler busy flag (RW1C); event ring is empty. */
temp_64 |= ERST_EHB;
- writeq(temp_64, &xhci->ir_set->erst_dequeue);
+ xhci_write_64(xhci, temp_64, &xhci->ir_set->erst_dequeue);
spin_unlock(&xhci->lock);
@@ -2965,58 +2967,8 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
}
while (1) {
- if (room_on_ring(xhci, ep_ring, num_trbs)) {
- union xhci_trb *trb = ep_ring->enqueue;
- unsigned int usable = ep_ring->enq_seg->trbs +
- TRBS_PER_SEGMENT - 1 - trb;
- u32 nop_cmd;
-
- /*
- * Section 4.11.7.1 TD Fragments states that a link
- * TRB must only occur at the boundary between
- * data bursts (eg 512 bytes for 480M).
- * While it is possible to split a large fragment
- * we don't know the size yet.
- * Simplest solution is to fill the trb before the
- * LINK with nop commands.
- */
- if (num_trbs == 1 || num_trbs <= usable || usable == 0)
- break;
-
- if (ep_ring->type != TYPE_BULK)
- /*
- * While isoc transfers might have a buffer that
- * crosses a 64k boundary it is unlikely.
- * Since we can't add NOPs without generating
- * gaps in the traffic just hope it never
- * happens at the end of the ring.
- * This could be fixed by writing a LINK TRB
- * instead of the first NOP - however the
- * TRB_TYPE_LINK_LE32() calls would all need
- * changing to check the ring length.
- */
- break;
-
- if (num_trbs >= TRBS_PER_SEGMENT) {
- xhci_err(xhci, "Too many fragments %d, max %d\n",
- num_trbs, TRBS_PER_SEGMENT - 1);
- return -EINVAL;
- }
-
- nop_cmd = cpu_to_le32(TRB_TYPE(TRB_TR_NOOP) |
- ep_ring->cycle_state);
- ep_ring->num_trbs_free -= usable;
- do {
- trb->generic.field[0] = 0;
- trb->generic.field[1] = 0;
- trb->generic.field[2] = 0;
- trb->generic.field[3] = nop_cmd;
- trb++;
- } while (--usable);
- ep_ring->enqueue = trb;
- if (room_on_ring(xhci, ep_ring, num_trbs))
- break;
- }
+ if (room_on_ring(xhci, ep_ring, num_trbs))
+ break;
if (ep_ring == xhci->cmd_ring) {
xhci_err(xhci, "Do not support expand command ring\n");
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index ad364394885a..924a6ccdb622 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -611,7 +611,7 @@ int xhci_run(struct usb_hcd *hcd)
xhci_dbg(xhci, "Event ring:\n");
xhci_debug_ring(xhci, xhci->event_ring);
xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
- temp_64 = readq(&xhci->ir_set->erst_dequeue);
+ temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
temp_64 &= ~ERST_PTR_MASK;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"ERST deq = 64'h%0lx", (long unsigned int) temp_64);
@@ -756,11 +756,11 @@ static void xhci_save_registers(struct xhci_hcd *xhci)
{
xhci->s3.command = readl(&xhci->op_regs->command);
xhci->s3.dev_nt = readl(&xhci->op_regs->dev_notification);
- xhci->s3.dcbaa_ptr = readq(&xhci->op_regs->dcbaa_ptr);
+ xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci->s3.config_reg = readl(&xhci->op_regs->config_reg);
xhci->s3.erst_size = readl(&xhci->ir_set->erst_size);
- xhci->s3.erst_base = readq(&xhci->ir_set->erst_base);
- xhci->s3.erst_dequeue = readq(&xhci->ir_set->erst_dequeue);
+ xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
+ xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
xhci->s3.irq_pending = readl(&xhci->ir_set->irq_pending);
xhci->s3.irq_control = readl(&xhci->ir_set->irq_control);
}
@@ -769,11 +769,11 @@ static void xhci_restore_registers(struct xhci_hcd *xhci)
{
writel(xhci->s3.command, &xhci->op_regs->command);
writel(xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
- writeq(xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
+ xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
writel(xhci->s3.config_reg, &xhci->op_regs->config_reg);
writel(xhci->s3.erst_size, &xhci->ir_set->erst_size);
- writeq(xhci->s3.erst_base, &xhci->ir_set->erst_base);
- writeq(xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
+ xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
+ xhci_write_64(xhci, xhci->s3.erst_dequeue, &xhci->ir_set->erst_dequeue);
writel(xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
writel(xhci->s3.irq_control, &xhci->ir_set->irq_control);
}
@@ -783,7 +783,7 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
u64 val_64;
/* step 2: initialize command ring buffer */
- val_64 = readq(&xhci->op_regs->cmd_ring);
+ val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
(xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
xhci->cmd_ring->dequeue) &
@@ -792,7 +792,7 @@ static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"// Setting command ring address to 0x%llx",
(long unsigned long) val_64);
- writeq(val_64, &xhci->op_regs->cmd_ring);
+ xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
}
/*
@@ -3842,7 +3842,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
if (ret) {
return ret;
}
- temp_64 = readq(&xhci->op_regs->dcbaa_ptr);
+ temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
"Op regs DCBAA ptr = %#016llx", temp_64);
xhci_dbg_trace(xhci, trace_xhci_dbg_address,
@@ -4730,8 +4730,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
struct device *dev = hcd->self.controller;
int retval;
- /* Limit the block layer scatter-gather lists to half a segment. */
- hcd->self.sg_tablesize = TRBS_PER_SEGMENT / 2;
+ /* Accept arbitrarily long scatter-gather lists */
+ hcd->self.sg_tablesize = ~0;
/* support to build packet from discontinuous buffers */
hcd->self.no_sg_constraint = 1;
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index f8416639bf31..58ed9d088e63 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -28,17 +28,6 @@
#include <linux/kernel.h>
#include <linux/usb/hcd.h>
-/*
- * Registers should always be accessed with double word or quad word accesses.
- *
- * Some xHCI implementations may support 64-bit address pointers. Registers
- * with 64-bit address pointers should be written to with dword accesses by
- * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
- * xHCI implementations that do not support 64-bit address pointers will ignore
- * the high dword, and write order is irrelevant.
- */
-#include <asm-generic/io-64-nonatomic-lo-hi.h>
-
/* Code sharing between pci-quirks and xhci hcd */
#include "xhci-ext-caps.h"
#include "pci-quirks.h"
@@ -1279,7 +1268,7 @@ union xhci_trb {
* since the command ring is 64-byte aligned.
* It must also be greater than 16.
*/
-#define TRBS_PER_SEGMENT 256
+#define TRBS_PER_SEGMENT 64
/* Allow two commands + a link TRB, along with any reserved command TRBs */
#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
#define TRB_SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
@@ -1614,6 +1603,34 @@ static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
#define xhci_warn_ratelimited(xhci, fmt, args...) \
dev_warn_ratelimited(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
+/*
+ * Registers should always be accessed with double word or quad word accesses.
+ *
+ * Some xHCI implementations may support 64-bit address pointers. Registers
+ * with 64-bit address pointers should be written to with dword accesses by
+ * writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
+ * xHCI implementations that do not support 64-bit address pointers will ignore
+ * the high dword, and write order is irrelevant.
+ */
+static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
+ __le64 __iomem *regs)
+{
+ __u32 __iomem *ptr = (__u32 __iomem *) regs;
+ u64 val_lo = readl(ptr);
+ u64 val_hi = readl(ptr + 1);
+ return val_lo + (val_hi << 32);
+}
+static inline void xhci_write_64(struct xhci_hcd *xhci,
+ const u64 val, __le64 __iomem *regs)
+{
+ __u32 __iomem *ptr = (__u32 __iomem *) regs;
+ u32 val_lo = lower_32_bits(val);
+ u32 val_hi = upper_32_bits(val);
+
+ writel(val_lo, ptr);
+ writel(val_hi, ptr + 1);
+}
+
static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
{
return xhci->quirks & XHCI_LINK_TRB_QUIRK;
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index fc192ad9cc6a..239ad0b1ceb6 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -477,8 +477,11 @@ static irqreturn_t musb_stage0_irq(struct musb *musb, u8 int_usb,
musb->port1_status |=
(USB_PORT_STAT_C_SUSPEND << 16)
| MUSB_PORT_STAT_RESUME;
+ musb->rh_timer = jiffies
+ + msecs_to_jiffies(20);
schedule_delayed_work(
- &musb->finish_resume_work, 20);
+ &musb->finish_resume_work,
+ msecs_to_jiffies(20));
musb->xceiv->state = OTG_STATE_A_HOST;
musb->is_active = 1;
@@ -2157,11 +2160,19 @@ static void musb_restore_context(struct musb *musb)
void __iomem *musb_base = musb->mregs;
void __iomem *ep_target_regs;
void __iomem *epio;
+ u8 power;
musb_writew(musb_base, MUSB_FRAME, musb->context.frame);
musb_writeb(musb_base, MUSB_TESTMODE, musb->context.testmode);
musb_write_ulpi_buscontrol(musb->mregs, musb->context.busctl);
- musb_writeb(musb_base, MUSB_POWER, musb->context.power);
+
+ /* Don't affect SUSPENDM/RESUME bits in POWER reg */
+ power = musb_readb(musb_base, MUSB_POWER);
+ power &= MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME;
+ musb->context.power &= ~(MUSB_POWER_SUSPENDM | MUSB_POWER_RESUME);
+ power |= musb->context.power;
+ musb_writeb(musb_base, MUSB_POWER, power);
+
musb_writew(musb_base, MUSB_INTRTXE, musb->intrtxe);
musb_writew(musb_base, MUSB_INTRRXE, musb->intrrxe);
musb_writeb(musb_base, MUSB_INTRUSBE, musb->context.intrusbe);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index ed455724017b..abb38c3833ef 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1183,6 +1183,9 @@ irqreturn_t musb_h_ep0_irq(struct musb *musb)
csr = MUSB_CSR0_H_STATUSPKT
| MUSB_CSR0_TXPKTRDY;
+ /* disable ping token in status phase */
+ csr |= MUSB_CSR0_H_DIS_PING;
+
/* flag status stage */
musb->ep0_stage = MUSB_EP0_STATUS;
diff --git a/drivers/usb/musb/musb_virthub.c b/drivers/usb/musb/musb_virthub.c
index eb634433ef09..e2d2d8c9891b 100644
--- a/drivers/usb/musb/musb_virthub.c
+++ b/drivers/usb/musb/musb_virthub.c
@@ -135,7 +135,8 @@ void musb_port_suspend(struct musb *musb, bool do_suspend)
/* later, GetPortStatus will stop RESUME signaling */
musb->port1_status |= MUSB_PORT_STAT_RESUME;
- schedule_delayed_work(&musb->finish_resume_work, 20);
+ schedule_delayed_work(&musb->finish_resume_work,
+ msecs_to_jiffies(20));
}
}
@@ -158,7 +159,6 @@ void musb_port_reset(struct musb *musb, bool do_reset)
*/
power = musb_readb(mbase, MUSB_POWER);
if (do_reset) {
-
/*
* If RESUME is set, we must make sure it stays minimum 20 ms.
* Then we must clear RESUME and wait a bit to let musb start
@@ -167,11 +167,22 @@ void musb_port_reset(struct musb *musb, bool do_reset)
* detected".
*/
if (power & MUSB_POWER_RESUME) {
- while (time_before(jiffies, musb->rh_timer))
- msleep(1);
+ long remain = (unsigned long) musb->rh_timer - jiffies;
+
+ if (musb->rh_timer > 0 && remain > 0) {
+ /* take into account the minimum delay after resume */
+ schedule_delayed_work(
+ &musb->deassert_reset_work, remain);
+ return;
+ }
+
musb_writeb(mbase, MUSB_POWER,
- power & ~MUSB_POWER_RESUME);
- msleep(1);
+ power & ~MUSB_POWER_RESUME);
+
+ /* Give the core 1 ms to clear MUSB_POWER_RESUME */
+ schedule_delayed_work(&musb->deassert_reset_work,
+ msecs_to_jiffies(1));
+ return;
}
power &= 0xf0;
@@ -180,7 +191,8 @@ void musb_port_reset(struct musb *musb, bool do_reset)
musb->port1_status |= USB_PORT_STAT_RESET;
musb->port1_status &= ~USB_PORT_STAT_ENABLE;
- schedule_delayed_work(&musb->deassert_reset_work, 50);
+ schedule_delayed_work(&musb->deassert_reset_work,
+ msecs_to_jiffies(50));
} else {
dev_dbg(musb->controller, "root port reset stopped\n");
musb_writeb(mbase, MUSB_POWER,
diff --git a/drivers/usb/musb/omap2430.c b/drivers/usb/musb/omap2430.c
index 2a408cdaf7b2..8aa59a2c5eb2 100644
--- a/drivers/usb/musb/omap2430.c
+++ b/drivers/usb/musb/omap2430.c
@@ -659,7 +659,6 @@ static int omap2430_runtime_suspend(struct device *dev)
OTG_INTERFSEL);
omap2430_low_level_exit(musb);
- phy_power_off(musb->phy);
}
return 0;
@@ -674,7 +673,6 @@ static int omap2430_runtime_resume(struct device *dev)
omap2430_low_level_init(musb);
musb_writel(musb->mregs, OTG_INTERFSEL,
musb->context.otg_interfsel);
- phy_power_on(musb->phy);
}
return 0;
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 8546c8dccd51..d204f745ed05 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -159,32 +159,6 @@ put_3p3:
return rc;
}
-#ifdef CONFIG_PM_SLEEP
-#define USB_PHY_SUSP_DIG_VOL 500000
-static int msm_hsusb_config_vddcx(int high)
-{
- int max_vol = USB_PHY_VDD_DIG_VOL_MAX;
- int min_vol;
- int ret;
-
- if (high)
- min_vol = USB_PHY_VDD_DIG_VOL_MIN;
- else
- min_vol = USB_PHY_SUSP_DIG_VOL;
-
- ret = regulator_set_voltage(hsusb_vddcx, min_vol, max_vol);
- if (ret) {
- pr_err("%s: unable to set the voltage for regulator "
- "HSUSB_VDDCX\n", __func__);
- return ret;
- }
-
- pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol);
-
- return ret;
-}
-#endif
-
static int msm_hsusb_ldo_set_mode(int on)
{
int ret = 0;
@@ -440,7 +414,32 @@ static int msm_otg_reset(struct usb_phy *phy)
#define PHY_SUSPEND_TIMEOUT_USEC (500 * 1000)
#define PHY_RESUME_TIMEOUT_USEC (100 * 1000)
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
+
+#define USB_PHY_SUSP_DIG_VOL 500000
+static int msm_hsusb_config_vddcx(int high)
+{
+ int max_vol = USB_PHY_VDD_DIG_VOL_MAX;
+ int min_vol;
+ int ret;
+
+ if (high)
+ min_vol = USB_PHY_VDD_DIG_VOL_MIN;
+ else
+ min_vol = USB_PHY_SUSP_DIG_VOL;
+
+ ret = regulator_set_voltage(hsusb_vddcx, min_vol, max_vol);
+ if (ret) {
+ pr_err("%s: unable to set the voltage for regulator "
+ "HSUSB_VDDCX\n", __func__);
+ return ret;
+ }
+
+ pr_debug("%s: min_vol:%d max_vol:%d\n", __func__, min_vol, max_vol);
+
+ return ret;
+}
+
static int msm_otg_suspend(struct msm_otg *motg)
{
struct usb_phy *phy = &motg->phy;
@@ -1733,22 +1732,18 @@ static int msm_otg_pm_resume(struct device *dev)
}
#endif
-#ifdef CONFIG_PM
static const struct dev_pm_ops msm_otg_dev_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(msm_otg_pm_suspend, msm_otg_pm_resume)
SET_RUNTIME_PM_OPS(msm_otg_runtime_suspend, msm_otg_runtime_resume,
msm_otg_runtime_idle)
};
-#endif
static struct platform_driver msm_otg_driver = {
.remove = msm_otg_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
-#ifdef CONFIG_PM
.pm = &msm_otg_dev_pm_ops,
-#endif
},
};
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index e6f61e4361df..8afa813d690b 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -130,7 +130,7 @@ struct usb_phy *usb_get_phy(enum usb_phy_type type)
phy = __usb_find_phy(&phy_list, type);
if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
- pr_err("unable to find transceiver of type %s\n",
+ pr_debug("PHY: unable to find transceiver of type %s\n",
usb_phy_type_string(type));
goto err0;
}
@@ -228,7 +228,7 @@ struct usb_phy *usb_get_phy_dev(struct device *dev, u8 index)
phy = __usb_find_phy_dev(dev, &phy_bind_list, index);
if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
- pr_err("unable to find transceiver\n");
+ dev_dbg(dev, "unable to find transceiver\n");
goto err0;
}
@@ -424,10 +424,8 @@ int usb_bind_phy(const char *dev_name, u8 index,
unsigned long flags;
phy_bind = kzalloc(sizeof(*phy_bind), GFP_KERNEL);
- if (!phy_bind) {
- pr_err("phy_bind(): No memory for phy_bind");
+ if (!phy_bind)
return -ENOMEM;
- }
phy_bind->dev_name = dev_name;
phy_bind->phy_dev_name = phy_dev_name;
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index ce0d7b0db012..44ab12986805 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -152,6 +152,7 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(FTDI_VID, FTDI_CANUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_CANDAPTER_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_NXTCAM_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_EV3CON_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_0_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_1_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SCS_DEVICE_2_PID) },
@@ -191,6 +192,8 @@ static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) },
{ USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_SPROG_II) },
+ { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_LP101_PID) },
+ { USB_DEVICE(FTDI_VID, FTDI_TAGSYS_P200X_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_LENZ_LIUSB_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_632_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_XF_634_PID) },
@@ -904,6 +907,8 @@ static const struct usb_device_id id_table_combined[] = {
/* Crucible Devices */
{ USB_DEVICE(FTDI_VID, FTDI_CT_COMET_PID) },
{ USB_DEVICE(FTDI_VID, FTDI_Z3X_PID) },
+ /* Cressi Devices */
+ { USB_DEVICE(FTDI_VID, FTDI_CRESSI_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index a7019d1e3058..e599fbfcde5f 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -50,6 +50,7 @@
#define TI_XDS100V2_PID 0xa6d0
#define FTDI_NXTCAM_PID 0xABB8 /* NXTCam for Mindstorms NXT */
+#define FTDI_EV3CON_PID 0xABB9 /* Mindstorms EV3 Console Adapter */
/* US Interface Navigator (http://www.usinterface.com/) */
#define FTDI_USINT_CAT_PID 0xb810 /* Navigator CAT and 2nd PTT lines */
@@ -363,6 +364,12 @@
/* Sprog II (Andrew Crosland's SprogII DCC interface) */
#define FTDI_SPROG_II 0xF0C8
+/*
+ * Two of the Tagsys RFID Readers
+ */
+#define FTDI_TAGSYS_LP101_PID 0xF0E9 /* Tagsys L-P101 RFID*/
+#define FTDI_TAGSYS_P200X_PID 0xF0EE /* Tagsys Medio P200x RFID*/
+
/* an infrared receiver for user access control with IR tags */
#define FTDI_PIEGROUP_PID 0xF208 /* Product Id */
@@ -1313,3 +1320,9 @@
* Manufacturer: Smart GSM Team
*/
#define FTDI_Z3X_PID 0x0011
+
+/*
+ * Product: Cressi PC Interface
+ * Manufacturer: Cressi
+ */
+#define FTDI_CRESSI_PID 0x87d0
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 5c86f57e4afa..68fc9fe65936 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1362,7 +1362,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1267, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1268, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1269, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1270, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf5_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1271, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1272, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1273, 0xff, 0xff, 0xff) },
@@ -1525,7 +1526,8 @@ static const struct usb_device_id option_ids[] = {
/* Cinterion */
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) },
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index c65437cfd4a2..968a40201e5f 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -139,6 +139,9 @@ static const struct usb_device_id id_table[] = {
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 0)}, /* Sierra Wireless EM7700 Device Management */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 2)}, /* Sierra Wireless EM7700 NMEA */
{USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x901c, 3)}, /* Sierra Wireless EM7700 Modem */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 0)}, /* Netgear AirCard 340U Device Management */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 2)}, /* Netgear AirCard 340U NMEA */
+ {USB_DEVICE_INTERFACE_NUMBER(0x1199, 0x9051, 3)}, /* Netgear AirCard 340U Modem */
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index f112b079ddfc..fb79775447b0 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -71,7 +71,8 @@ DEVICE(hp4x, HP4X_IDS);
/* Suunto ANT+ USB Driver */
#define SUUNTO_IDS() \
- { USB_DEVICE(0x0fcf, 0x1008) }
+ { USB_DEVICE(0x0fcf, 0x1008) }, \
+ { USB_DEVICE(0x0fcf, 0x1009) } /* Dynastream ANT USB-m Stick */
DEVICE(suunto, SUUNTO_IDS);
/* Siemens USB/MPI adapter */
diff --git a/drivers/usb/storage/Kconfig b/drivers/usb/storage/Kconfig
index 8470e1b114f2..1dd0604d1911 100644
--- a/drivers/usb/storage/Kconfig
+++ b/drivers/usb/storage/Kconfig
@@ -18,7 +18,9 @@ config USB_STORAGE
This option depends on 'SCSI' support being enabled, but you
probably also need 'SCSI device support: SCSI disk support'
- (BLK_DEV_SD) for most USB storage devices.
+ (BLK_DEV_SD) for most USB storage devices. Some devices also
+ will require 'Probe all LUNs on each SCSI device'
+ (SCSI_MULTI_LUN).
To compile this driver as a module, choose M here: the
module will be called usb-storage.
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 18509e6c21ab..9d38ddc8da49 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -78,6 +78,8 @@ static const char* host_info(struct Scsi_Host *host)
static int slave_alloc (struct scsi_device *sdev)
{
+ struct us_data *us = host_to_us(sdev->host);
+
/*
* Set the INQUIRY transfer length to 36. We don't use any of
* the extra data and many devices choke if asked for more or
@@ -102,6 +104,10 @@ static int slave_alloc (struct scsi_device *sdev)
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
+ /* Tell the SCSI layer if we know there is more than one LUN */
+ if (us->protocol == USB_PR_BULK && us->max_lun > 0)
+ sdev->sdev_bflags |= BLIST_FORCELUN;
+
return 0;
}
diff --git a/drivers/usb/storage/unusual_cypress.h b/drivers/usb/storage/unusual_cypress.h
index 65a6a75066a8..82e8ed0324e3 100644
--- a/drivers/usb/storage/unusual_cypress.h
+++ b/drivers/usb/storage/unusual_cypress.h
@@ -31,7 +31,7 @@ UNUSUAL_DEV( 0x04b4, 0x6831, 0x0000, 0x9999,
"Cypress ISD-300LP",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
-UNUSUAL_DEV( 0x14cd, 0x6116, 0x0000, 0x0219,
+UNUSUAL_DEV( 0x14cd, 0x6116, 0x0160, 0x0160,
"Super Top",
"USB 2.0 SATA BRIDGE",
USB_SC_CYP_ATACB, USB_PR_DEVICE, NULL, 0),
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index ad06255c2ade..adbeb255616a 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1455,6 +1455,13 @@ UNUSUAL_DEV( 0x0f88, 0x042e, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_FIX_CAPACITY ),
+/* Reported by Moritz Moeller-Herrmann <moritz-kernel@moeller-herrmann.de> */
+UNUSUAL_DEV( 0x0fca, 0x8004, 0x0201, 0x0201,
+ "Research In Motion",
+ "BlackBerry Bold 9000",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64 ),
+
/* Reported by Michael Stattmann <michael@stattmann.com> */
UNUSUAL_DEV( 0x0fce, 0xd008, 0x0000, 0x0000,
"Sony Ericsson",
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 4fb7a8f83c8a..54af4e933695 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -186,12 +186,12 @@ static bool is_invalid_reserved_pfn(unsigned long pfn)
if (pfn_valid(pfn)) {
bool reserved;
struct page *tail = pfn_to_page(pfn);
- struct page *head = compound_trans_head(tail);
+ struct page *head = compound_head(tail);
reserved = !!(PageReserved(head));
if (head != tail) {
/*
* "head" is not a dangling pointer
- * (compound_trans_head takes care of that)
+ * (compound_head takes care of that)
* but the hugepage may have been split
* from under us (and we may not hold a
* reference count on the head page so it can
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 9a68409580d5..a0fa5de210cf 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -70,7 +70,12 @@ enum {
};
struct vhost_net_ubuf_ref {
- struct kref kref;
+ /* refcount follows semantics similar to kref:
+ * 0: object is released
+ * 1: no outstanding ubufs
+ * >1: outstanding ubufs
+ */
+ atomic_t refcount;
wait_queue_head_t wait;
struct vhost_virtqueue *vq;
};
@@ -116,14 +121,6 @@ static void vhost_net_enable_zcopy(int vq)
vhost_net_zcopy_mask |= 0x1 << vq;
}
-static void vhost_net_zerocopy_done_signal(struct kref *kref)
-{
- struct vhost_net_ubuf_ref *ubufs;
-
- ubufs = container_of(kref, struct vhost_net_ubuf_ref, kref);
- wake_up(&ubufs->wait);
-}
-
static struct vhost_net_ubuf_ref *
vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
{
@@ -134,21 +131,24 @@ vhost_net_ubuf_alloc(struct vhost_virtqueue *vq, bool zcopy)
ubufs = kmalloc(sizeof(*ubufs), GFP_KERNEL);
if (!ubufs)
return ERR_PTR(-ENOMEM);
- kref_init(&ubufs->kref);
+ atomic_set(&ubufs->refcount, 1);
init_waitqueue_head(&ubufs->wait);
ubufs->vq = vq;
return ubufs;
}
-static void vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
+static int vhost_net_ubuf_put(struct vhost_net_ubuf_ref *ubufs)
{
- kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
+ int r = atomic_sub_return(1, &ubufs->refcount);
+ if (unlikely(!r))
+ wake_up(&ubufs->wait);
+ return r;
}
static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
{
- kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
- wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
+ vhost_net_ubuf_put(ubufs);
+ wait_event(ubufs->wait, !atomic_read(&ubufs->refcount));
}
static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
@@ -306,23 +306,26 @@ static void vhost_zerocopy_callback(struct ubuf_info *ubuf, bool success)
{
struct vhost_net_ubuf_ref *ubufs = ubuf->ctx;
struct vhost_virtqueue *vq = ubufs->vq;
- int cnt = atomic_read(&ubufs->kref.refcount);
+ int cnt;
+
+ rcu_read_lock_bh();
/* set len to mark this desc buffers done DMA */
vq->heads[ubuf->desc].len = success ?
VHOST_DMA_DONE_LEN : VHOST_DMA_FAILED_LEN;
- vhost_net_ubuf_put(ubufs);
+ cnt = vhost_net_ubuf_put(ubufs);
/*
* Trigger polling thread if guest stopped submitting new buffers:
- * in this case, the refcount after decrement will eventually reach 1
- * so here it is 2.
+ * in this case, the refcount after decrement will eventually reach 1.
* We also trigger polling periodically after each 16 packets
* (the value 16 here is more or less arbitrary, it's tuned to trigger
* less than 10% of times).
*/
- if (cnt <= 2 || !(cnt % 16))
+ if (cnt <= 1 || !(cnt % 16))
vhost_poll_queue(&vq->poll);
+
+ rcu_read_unlock_bh();
}
/* Expects to be always run from workqueue - which acts as
@@ -420,7 +423,7 @@ static void handle_tx(struct vhost_net *net)
msg.msg_control = ubuf;
msg.msg_controllen = sizeof(ubuf);
ubufs = nvq->ubufs;
- kref_get(&ubufs->kref);
+ atomic_inc(&ubufs->refcount);
nvq->upend_idx = (nvq->upend_idx + 1) % UIO_MAXIOV;
} else {
msg.msg_control = NULL;
@@ -780,7 +783,7 @@ static void vhost_net_flush(struct vhost_net *n)
vhost_net_ubuf_put_and_wait(n->vqs[VHOST_NET_VQ_TX].ubufs);
mutex_lock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
n->tx_flush = false;
- kref_init(&n->vqs[VHOST_NET_VQ_TX].ubufs->kref);
+ atomic_set(&n->vqs[VHOST_NET_VQ_TX].ubufs->refcount, 1);
mutex_unlock(&n->vqs[VHOST_NET_VQ_TX].vq.mutex);
}
}
@@ -800,6 +803,8 @@ static int vhost_net_release(struct inode *inode, struct file *f)
fput(tx_sock->file);
if (rx_sock)
fput(rx_sock->file);
+ /* Make sure no callbacks are outstanding */
+ synchronize_rcu_bh();
/* We do an extra flush before freeing memory,
* since jobs can re-queue themselves. */
vhost_net_flush(n);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 0a025b8e2a12..e48d4a672580 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1001,6 +1001,12 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
break;
}
+ /* virtio-scsi spec requires byte 0 of the lun to be 1 */
+ if (unlikely(v_req.lun[0] != 1)) {
+ vhost_scsi_send_bad_target(vs, vq, head, out);
+ continue;
+ }
+
/* Extract the tpgt */
target = v_req.lun[1];
tpg = ACCESS_ONCE(vs_tpg[target]);
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 22262a3a0e2d..dade5b7699bc 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -364,7 +364,7 @@ config FB_SA1100
config FB_IMX
tristate "Freescale i.MX1/21/25/27 LCD support"
- depends on FB && IMX_HAVE_PLATFORM_IMX_FB
+ depends on FB && ARCH_MXC
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
diff --git a/drivers/video/exynos/Kconfig b/drivers/video/exynos/Kconfig
index 1129d0e9e640..75c8a8e7efc0 100644
--- a/drivers/video/exynos/Kconfig
+++ b/drivers/video/exynos/Kconfig
@@ -22,7 +22,8 @@ config EXYNOS_MIPI_DSI
config EXYNOS_LCD_S6E8AX0
bool "S6E8AX0 MIPI AMOLED LCD Driver"
- depends on (EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE && LCD_CLASS_DEVICE)
+ depends on EXYNOS_MIPI_DSI && BACKLIGHT_CLASS_DEVICE
+ depends on (LCD_CLASS_DEVICE = y)
default n
help
If you have an S6E8AX0 MIPI AMOLED LCD Panel, say Y to enable its
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index bbeb8dd7f108..77d6221618f4 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -2160,8 +2160,8 @@ static int dispc_ovl_calc_scaling_24xx(unsigned long pclk, unsigned long lclk,
*five_taps = false;
do {
- in_height = DIV_ROUND_UP(height, *decim_y);
- in_width = DIV_ROUND_UP(width, *decim_x);
+ in_height = height / *decim_y;
+ in_width = width / *decim_x;
*core_clk = dispc.feat->calc_core_clk(pclk, in_width,
in_height, out_width, out_height, mem_to_mem);
error = (in_width > maxsinglelinewidth || !*core_clk ||
@@ -2199,8 +2199,8 @@ static int dispc_ovl_calc_scaling_34xx(unsigned long pclk, unsigned long lclk,
dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
do {
- in_height = DIV_ROUND_UP(height, *decim_y);
- in_width = DIV_ROUND_UP(width, *decim_x);
+ in_height = height / *decim_y;
+ in_width = width / *decim_x;
*five_taps = in_height > out_height;
if (in_width > maxsinglelinewidth)
@@ -2268,7 +2268,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
{
u16 in_width, in_width_max;
int decim_x_min = *decim_x;
- u16 in_height = DIV_ROUND_UP(height, *decim_y);
+ u16 in_height = height / *decim_y;
const int maxsinglelinewidth =
dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
@@ -2287,7 +2287,7 @@ static int dispc_ovl_calc_scaling_44xx(unsigned long pclk, unsigned long lclk,
return -EINVAL;
do {
- in_width = DIV_ROUND_UP(width, *decim_x);
+ in_width = width / *decim_x;
} while (*decim_x <= *x_predecim &&
in_width > maxsinglelinewidth && ++*decim_x);
@@ -2466,8 +2466,8 @@ static int dispc_ovl_setup_common(enum omap_plane plane,
if (r)
return r;
- in_width = DIV_ROUND_UP(in_width, x_predecim);
- in_height = DIV_ROUND_UP(in_height, y_predecim);
+ in_width = in_width / x_predecim;
+ in_height = in_height / y_predecim;
if (color_mode == OMAP_DSS_COLOR_YUV2 ||
color_mode == OMAP_DSS_COLOR_UYVY ||
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index 7411f2674e16..23ef21ffc2c4 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -117,7 +117,7 @@ struct dpi_clk_calc_ctx {
/* outputs */
struct dsi_clock_info dsi_cinfo;
- unsigned long long fck;
+ unsigned long fck;
struct dispc_clock_info dispc_cinfo;
};
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index efb9ee9e3c96..ba806c9e7f54 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -46,7 +46,7 @@ static struct {
struct sdi_clk_calc_ctx {
unsigned long pck_min, pck_max;
- unsigned long long fck;
+ unsigned long fck;
struct dispc_clock_info dispc_cinfo;
};
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index a06edbfa95ca..1b5d48c578e1 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -884,7 +884,7 @@ static ssize_t ca91cx42_master_read(struct vme_master_resource *image,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
@@ -938,7 +938,7 @@ static ssize_t ca91cx42_master_write(struct vme_master_resource *image,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
diff --git a/drivers/vme/bridges/vme_tsi148.c b/drivers/vme/bridges/vme_tsi148.c
index 16830d8b777c..9911cd5fddb5 100644
--- a/drivers/vme/bridges/vme_tsi148.c
+++ b/drivers/vme/bridges/vme_tsi148.c
@@ -1289,7 +1289,7 @@ static ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
*(u8 *)(buf + done) = ioread8(addr + done);
done += 1;
@@ -1371,7 +1371,7 @@ static ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
if (done == count)
goto out;
}
- if ((uintptr_t)addr & 0x2) {
+ if ((uintptr_t)(addr + done) & 0x2) {
if ((count - done) < 2) {
iowrite8(*(u8 *)(buf + done), addr + done);
done += 1;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index 4c4c566c52a3..79d25894343a 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -223,6 +223,7 @@ config SA1100_WATCHDOG
config DW_WATCHDOG
tristate "Synopsys DesignWare watchdog"
+ depends on HAS_IOMEM
help
Say Y here if to include support for the Synopsys DesignWare
watchdog timer found in many chips.
diff --git a/drivers/watchdog/w83697hf_wdt.c b/drivers/watchdog/w83697hf_wdt.c
index aaf2995d37f4..68b45fc9ba6a 100644
--- a/drivers/watchdog/w83697hf_wdt.c
+++ b/drivers/watchdog/w83697hf_wdt.c
@@ -402,7 +402,7 @@ static int __init wdt_init(void)
if (!found) {
pr_err("No W83697HF/HG could be found\n");
- ret = -EIO;
+ ret = -ENODEV;
goto out;
}
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index d75c811bfa56..45e00afa7f2d 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -16,7 +16,6 @@ xen-pad-$(CONFIG_X86) += xen-acpi-pad.o
dom0-$(CONFIG_X86) += pcpu.o
obj-$(CONFIG_XEN_DOM0) += $(dom0-y)
obj-$(CONFIG_BLOCK) += biomerge.o
-obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
obj-$(CONFIG_XEN_BALLOON) += xen-balloon.o
obj-$(CONFIG_XEN_SELFBALLOONING) += xen-selfballoon.o
obj-$(CONFIG_XEN_DEV_EVTCHN) += xen-evtchn.o
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 4672e003c0ad..f4a9e3311297 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -862,6 +862,8 @@ int bind_evtchn_to_irq(unsigned int evtchn)
irq = ret;
goto out;
}
+ /* New interdomain events are bound to VCPU 0. */
+ bind_evtchn_to_cpu(evtchn, 0);
} else {
struct irq_info *info = info_for_irq(irq);
WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 34a2704fbc88..073b4a19a8b0 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -284,10 +284,8 @@ static int map_grant_pages(struct grant_map *map)
}
pr_debug("map %d+%d\n", map->index, map->count);
- err = gnttab_map_refs_userspace(map->map_ops,
- use_ptemod ? map->kmap_ops : NULL,
- map->pages,
- map->count);
+ err = gnttab_map_refs(map->map_ops, use_ptemod ? map->kmap_ops : NULL,
+ map->pages, map->count);
if (err)
return err;
@@ -317,10 +315,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
}
}
- err = gnttab_unmap_refs_userspace(map->unmap_ops + offset,
- use_ptemod ? map->kmap_ops + offset : NULL,
- map->pages + offset,
- pages);
+ err = gnttab_unmap_refs(map->unmap_ops + offset,
+ use_ptemod ? map->kmap_ops + offset : NULL, map->pages + offset,
+ pages);
if (err)
return err;
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 8ee13e2e45e2..b84e3ab839aa 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -928,17 +928,15 @@ void gnttab_batch_copy(struct gnttab_copy *batch, unsigned count)
}
EXPORT_SYMBOL_GPL(gnttab_batch_copy);
-int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
+int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count,
- bool m2p_override)
+ struct page **pages, unsigned int count)
{
int i, ret;
bool lazy = false;
pte_t *pte;
- unsigned long mfn, pfn;
+ unsigned long mfn;
- BUG_ON(kmap_ops && !m2p_override);
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map_ops, count);
if (ret)
return ret;
@@ -957,12 +955,10 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
set_phys_to_machine(map_ops[i].host_addr >> PAGE_SHIFT,
map_ops[i].dev_bus_addr >> PAGE_SHIFT);
}
- return 0;
+ return ret;
}
- if (m2p_override &&
- !in_interrupt() &&
- paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+ if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
lazy = true;
}
@@ -979,20 +975,8 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
} else {
mfn = PFN_DOWN(map_ops[i].dev_bus_addr);
}
- pfn = page_to_pfn(pages[i]);
-
- WARN_ON(PagePrivate(pages[i]));
- SetPagePrivate(pages[i]);
- set_page_private(pages[i], mfn);
-
- pages[i]->index = pfn_to_mfn(pfn);
- if (unlikely(!set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) {
- ret = -ENOMEM;
- goto out;
- }
- if (m2p_override)
- ret = m2p_add_override(mfn, pages[i], kmap_ops ?
- &kmap_ops[i] : NULL);
+ ret = m2p_add_override(mfn, pages[i], kmap_ops ?
+ &kmap_ops[i] : NULL);
if (ret)
goto out;
}
@@ -1003,32 +987,15 @@ int __gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
return ret;
}
-
-int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
- struct page **pages, unsigned int count)
-{
- return __gnttab_map_refs(map_ops, NULL, pages, count, false);
-}
EXPORT_SYMBOL_GPL(gnttab_map_refs);
-int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops,
- struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count)
-{
- return __gnttab_map_refs(map_ops, kmap_ops, pages, count, true);
-}
-EXPORT_SYMBOL_GPL(gnttab_map_refs_userspace);
-
-int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
+int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count,
- bool m2p_override)
+ struct page **pages, unsigned int count)
{
int i, ret;
bool lazy = false;
- unsigned long pfn, mfn;
- BUG_ON(kmap_ops && !m2p_override);
ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
if (ret)
return ret;
@@ -1039,33 +1006,17 @@ int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
set_phys_to_machine(unmap_ops[i].host_addr >> PAGE_SHIFT,
INVALID_P2M_ENTRY);
}
- return 0;
+ return ret;
}
- if (m2p_override &&
- !in_interrupt() &&
- paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
+ if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
arch_enter_lazy_mmu_mode();
lazy = true;
}
for (i = 0; i < count; i++) {
- pfn = page_to_pfn(pages[i]);
- mfn = get_phys_to_machine(pfn);
- if (mfn == INVALID_P2M_ENTRY || !(mfn & FOREIGN_FRAME_BIT)) {
- ret = -EINVAL;
- goto out;
- }
-
- set_page_private(pages[i], INVALID_P2M_ENTRY);
- WARN_ON(!PagePrivate(pages[i]));
- ClearPagePrivate(pages[i]);
- set_phys_to_machine(pfn, pages[i]->index);
- if (m2p_override)
- ret = m2p_remove_override(pages[i],
- kmap_ops ?
- &kmap_ops[i] : NULL,
- mfn);
+ ret = m2p_remove_override(pages[i], kmap_ops ?
+ &kmap_ops[i] : NULL);
if (ret)
goto out;
}
@@ -1076,22 +1027,8 @@ int __gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
return ret;
}
-
-int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *map_ops,
- struct page **pages, unsigned int count)
-{
- return __gnttab_unmap_refs(map_ops, NULL, pages, count, false);
-}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
-int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *map_ops,
- struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count)
-{
- return __gnttab_unmap_refs(map_ops, kmap_ops, pages, count, true);
-}
-EXPORT_SYMBOL_GPL(gnttab_unmap_refs_userspace);
-
static unsigned nr_status_frames(unsigned nr_grant_frames)
{
BUG_ON(grefs_per_grant_frame == 0);
diff --git a/drivers/xen/xencomm.c b/drivers/xen/xencomm.c
deleted file mode 100644
index 4793fc594549..000000000000
--- a/drivers/xen/xencomm.c
+++ /dev/null
@@ -1,219 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Copyright (C) IBM Corp. 2006
- *
- * Authors: Hollis Blanchard <hollisb@us.ibm.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/mm.h>
-#include <linux/slab.h>
-#include <asm/page.h>
-#include <xen/xencomm.h>
-#include <xen/interface/xen.h>
-#include <asm/xen/xencomm.h> /* for xencomm_is_phys_contiguous() */
-
-static int xencomm_init(struct xencomm_desc *desc,
- void *buffer, unsigned long bytes)
-{
- unsigned long recorded = 0;
- int i = 0;
-
- while ((recorded < bytes) && (i < desc->nr_addrs)) {
- unsigned long vaddr = (unsigned long)buffer + recorded;
- unsigned long paddr;
- int offset;
- int chunksz;
-
- offset = vaddr % PAGE_SIZE; /* handle partial pages */
- chunksz = min(PAGE_SIZE - offset, bytes - recorded);
-
- paddr = xencomm_vtop(vaddr);
- if (paddr == ~0UL) {
- printk(KERN_DEBUG "%s: couldn't translate vaddr %lx\n",
- __func__, vaddr);
- return -EINVAL;
- }
-
- desc->address[i++] = paddr;
- recorded += chunksz;
- }
-
- if (recorded < bytes) {
- printk(KERN_DEBUG
- "%s: could only translate %ld of %ld bytes\n",
- __func__, recorded, bytes);
- return -ENOSPC;
- }
-
- /* mark remaining addresses invalid (just for safety) */
- while (i < desc->nr_addrs)
- desc->address[i++] = XENCOMM_INVALID;
-
- desc->magic = XENCOMM_MAGIC;
-
- return 0;
-}
-
-static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
- void *buffer, unsigned long bytes)
-{
- struct xencomm_desc *desc;
- unsigned long buffer_ulong = (unsigned long)buffer;
- unsigned long start = buffer_ulong & PAGE_MASK;
- unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
- unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
- unsigned long size = sizeof(*desc) +
- sizeof(desc->address[0]) * nr_addrs;
-
- /*
- * slab allocator returns at least sizeof(void*) aligned pointer.
- * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
- * cross page boundary.
- */
- if (sizeof(*desc) > sizeof(void *)) {
- unsigned long order = get_order(size);
- desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
- order);
- if (desc == NULL)
- return NULL;
-
- desc->nr_addrs =
- ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
- sizeof(*desc->address);
- } else {
- desc = kmalloc(size, gfp_mask);
- if (desc == NULL)
- return NULL;
-
- desc->nr_addrs = nr_addrs;
- }
- return desc;
-}
-
-void xencomm_free(struct xencomm_handle *desc)
-{
- if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
- struct xencomm_desc *desc__ = (struct xencomm_desc *)desc;
- if (sizeof(*desc__) > sizeof(void *)) {
- unsigned long size = sizeof(*desc__) +
- sizeof(desc__->address[0]) * desc__->nr_addrs;
- unsigned long order = get_order(size);
- free_pages((unsigned long)__va(desc), order);
- } else
- kfree(__va(desc));
- }
-}
-
-static int xencomm_create(void *buffer, unsigned long bytes,
- struct xencomm_desc **ret, gfp_t gfp_mask)
-{
- struct xencomm_desc *desc;
- int rc;
-
- pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes);
-
- if (bytes == 0) {
- /* don't create a descriptor; Xen recognizes NULL. */
- BUG_ON(buffer != NULL);
- *ret = NULL;
- return 0;
- }
-
- BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
-
- desc = xencomm_alloc(gfp_mask, buffer, bytes);
- if (!desc) {
- printk(KERN_DEBUG "%s failure\n", "xencomm_alloc");
- return -ENOMEM;
- }
-
- rc = xencomm_init(desc, buffer, bytes);
- if (rc) {
- printk(KERN_DEBUG "%s failure: %d\n", "xencomm_init", rc);
- xencomm_free((struct xencomm_handle *)__pa(desc));
- return rc;
- }
-
- *ret = desc;
- return 0;
-}
-
-static struct xencomm_handle *xencomm_create_inline(void *ptr)
-{
- unsigned long paddr;
-
- BUG_ON(!xencomm_is_phys_contiguous((unsigned long)ptr));
-
- paddr = (unsigned long)xencomm_pa(ptr);
- BUG_ON(paddr & XENCOMM_INLINE_FLAG);
- return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
-}
-
-/* "mini" routine, for stack-based communications: */
-static int xencomm_create_mini(void *buffer,
- unsigned long bytes, struct xencomm_mini *xc_desc,
- struct xencomm_desc **ret)
-{
- int rc = 0;
- struct xencomm_desc *desc;
- BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0);
-
- desc = (void *)xc_desc;
-
- desc->nr_addrs = XENCOMM_MINI_ADDRS;
-
- rc = xencomm_init(desc, buffer, bytes);
- if (!rc)
- *ret = desc;
-
- return rc;
-}
-
-struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
-{
- int rc;
- struct xencomm_desc *desc;
-
- if (xencomm_is_phys_contiguous((unsigned long)ptr))
- return xencomm_create_inline(ptr);
-
- rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
-
- if (rc || desc == NULL)
- return NULL;
-
- return xencomm_pa(desc);
-}
-
-struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
- struct xencomm_mini *xc_desc)
-{
- int rc;
- struct xencomm_desc *desc = NULL;
-
- if (xencomm_is_phys_contiguous((unsigned long)ptr))
- return xencomm_create_inline(ptr);
-
- rc = xencomm_create_mini(ptr, bytes, xc_desc,
- &desc);
-
- if (rc)
- return NULL;
-
- return xencomm_pa(desc);
-}
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 0bad24ddc2e7..4f70f383132c 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -114,6 +114,14 @@ void bio_integrity_free(struct bio *bio)
}
EXPORT_SYMBOL(bio_integrity_free);
+static inline unsigned int bip_integrity_vecs(struct bio_integrity_payload *bip)
+{
+ if (bip->bip_slab == BIO_POOL_NONE)
+ return BIP_INLINE_VECS;
+
+ return bvec_nr_vecs(bip->bip_slab);
+}
+
/**
* bio_integrity_add_page - Attach integrity metadata
* @bio: bio to update
@@ -129,7 +137,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
struct bio_integrity_payload *bip = bio->bi_integrity;
struct bio_vec *iv;
- if (bip->bip_vcnt >= bvec_nr_vecs(bip->bip_slab)) {
+ if (bip->bip_vcnt >= bip_integrity_vecs(bip)) {
printk(KERN_ERR "%s: bip_vec full\n", __func__);
return 0;
}
@@ -226,7 +234,8 @@ unsigned int bio_integrity_tag_size(struct bio *bio)
}
EXPORT_SYMBOL(bio_integrity_tag_size);
-int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, int set)
+static int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len,
+ int set)
{
struct bio_integrity_payload *bip = bio->bi_integrity;
struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
@@ -449,11 +458,10 @@ static int bio_integrity_verify(struct bio *bio)
struct blk_integrity_exchg bix;
struct bio_vec *bv;
sector_t sector = bio->bi_integrity->bip_iter.bi_sector;
- unsigned int sectors, total, ret;
+ unsigned int sectors, ret = 0;
void *prot_buf = bio->bi_integrity->bip_buf;
int i;
- ret = total = 0;
bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
bix.sector_size = bi->sector_size;
@@ -475,8 +483,6 @@ static int bio_integrity_verify(struct bio *bio)
sectors = bv->bv_len / bi->sector_size;
sector += sectors;
prot_buf += sectors * bi->tuple_size;
- total += sectors * bi->tuple_size;
- BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
kunmap_atomic(kaddr);
}
diff --git a/fs/bio.c b/fs/bio.c
index 75c49a382239..8754e7b6eb49 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -611,7 +611,6 @@ EXPORT_SYMBOL(bio_clone_fast);
struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
struct bio_set *bs)
{
- unsigned nr_iovecs = 0;
struct bvec_iter iter;
struct bio_vec bv;
struct bio *bio;
@@ -638,10 +637,7 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
* __bio_clone_fast() anyways.
*/
- bio_for_each_segment(bv, bio_src, iter)
- nr_iovecs++;
-
- bio = bio_alloc_bioset(gfp_mask, nr_iovecs, bs);
+ bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
if (!bio)
return NULL;
@@ -650,9 +646,18 @@ struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
+ if (bio->bi_rw & REQ_DISCARD)
+ goto integrity_clone;
+
+ if (bio->bi_rw & REQ_WRITE_SAME) {
+ bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
+ goto integrity_clone;
+ }
+
bio_for_each_segment(bv, bio_src, iter)
bio->bi_io_vec[bio->bi_vcnt++] = bv;
+integrity_clone:
if (bio_integrity(bio_src)) {
int ret;
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 49a62b4dda3b..0e8388e72d8d 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -92,11 +92,11 @@
#include <linux/slab.h>
#include <linux/buffer_head.h>
#include <linux/mutex.h>
-#include <linux/crc32c.h>
#include <linux/genhd.h>
#include <linux/blkdev.h>
#include "ctree.h"
#include "disk-io.h"
+#include "hash.h"
#include "transaction.h"
#include "extent_io.h"
#include "volumes.h"
@@ -1823,7 +1823,7 @@ static int btrfsic_test_for_metadata(struct btrfsic_state *state,
size_t sublen = i ? PAGE_CACHE_SIZE :
(PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE);
- crc = crc32c(crc, data, sublen);
+ crc = btrfs_crc32c(crc, data, sublen);
}
btrfs_csum_final(crc, csum);
if (memcmp(csum, h->csum, state->csum_size))
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index e2600cdb6c25..b01fb6c527e3 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -1010,6 +1010,8 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
bytes = min(bytes, working_bytes);
kaddr = kmap_atomic(page_out);
memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
+ if (*pg_index == (vcnt - 1) && *pg_offset == 0)
+ memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
kunmap_atomic(kaddr);
flush_dcache_page(page_out);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 0e69295d0031..81ea55314b1f 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -26,7 +26,6 @@
#include <linux/workqueue.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
-#include <linux/crc32c.h>
#include <linux/slab.h>
#include <linux/migrate.h>
#include <linux/ratelimit.h>
@@ -35,6 +34,7 @@
#include <asm/unaligned.h>
#include "ctree.h"
#include "disk-io.h"
+#include "hash.h"
#include "transaction.h"
#include "btrfs_inode.h"
#include "volumes.h"
@@ -244,7 +244,7 @@ out:
u32 btrfs_csum_data(char *data, u32 seed, size_t len)
{
- return crc32c(seed, data, len);
+ return btrfs_crc32c(seed, data, len);
}
void btrfs_csum_final(u32 crc, char *result)
@@ -3839,7 +3839,6 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
rb_erase(&ref->rb_node, &head->ref_root);
atomic_dec(&delayed_refs->num_entries);
btrfs_put_delayed_ref(ref);
- cond_resched_lock(&head->lock);
}
if (head->must_insert_reserved)
pin_bytes = true;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 9c9ecc93ae2c..32312e09f0f5 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2385,6 +2385,7 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
spin_unlock(&delayed_refs->lock);
locked_ref = NULL;
cond_resched();
+ count++;
continue;
}
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 5c4ab9c18940..d3d44486290b 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -2629,7 +2629,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
EXTENT_DEFRAG, 1, cached_state);
if (ret) {
u64 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
- if (last_snapshot >= BTRFS_I(inode)->generation)
+ if (0 && last_snapshot >= BTRFS_I(inode)->generation)
/* the inode is shared */
new = record_old_file_extents(inode, ordered_extent);
@@ -5154,7 +5154,7 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry,
return ERR_CAST(inode);
}
- return d_splice_alias(inode, dentry);
+ return d_materialise_unique(dentry, inode);
}
unsigned char btrfs_filetype_table[] = {
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index b0134892dc70..a6d8efa46bfe 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3537,20 +3537,6 @@ out:
return ret;
}
-static long btrfs_ioctl_global_rsv(struct btrfs_root *root, void __user *arg)
-{
- struct btrfs_block_rsv *block_rsv = &root->fs_info->global_block_rsv;
- u64 reserved;
-
- spin_lock(&block_rsv->lock);
- reserved = block_rsv->reserved;
- spin_unlock(&block_rsv->lock);
-
- if (arg && copy_to_user(arg, &reserved, sizeof(reserved)))
- return -EFAULT;
- return 0;
-}
-
/*
* there are many ways the trans_start and trans_end ioctls can lead
* to deadlocks. They should only be used by applications that
@@ -4525,7 +4511,7 @@ static int btrfs_ioctl_set_fslabel(struct file *file, void __user *arg)
spin_lock(&root->fs_info->super_lock);
strcpy(super_block->label, label);
spin_unlock(&root->fs_info->super_lock);
- ret = btrfs_end_transaction(trans, root);
+ ret = btrfs_commit_transaction(trans, root);
out_unlock:
mnt_drop_write_file(file);
@@ -4668,7 +4654,7 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
if (ret)
return ret;
- trans = btrfs_start_transaction(root, 1);
+ trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans))
return PTR_ERR(trans);
@@ -4689,7 +4675,7 @@ static int btrfs_ioctl_set_features(struct file *file, void __user *arg)
btrfs_set_super_incompat_flags(super_block, newflags);
spin_unlock(&root->fs_info->super_lock);
- return btrfs_end_transaction(trans, root);
+ return btrfs_commit_transaction(trans, root);
}
long btrfs_ioctl(struct file *file, unsigned int
@@ -4757,8 +4743,6 @@ long btrfs_ioctl(struct file *file, unsigned int
return btrfs_ioctl_logical_to_ino(root, argp);
case BTRFS_IOC_SPACE_INFO:
return btrfs_ioctl_space_info(root, argp);
- case BTRFS_IOC_GLOBAL_RSV:
- return btrfs_ioctl_global_rsv(root, argp);
case BTRFS_IOC_SYNC: {
int ret;
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 730dce395858..9dde9717c1b9 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -24,12 +24,12 @@
#include <linux/xattr.h>
#include <linux/posix_acl_xattr.h>
#include <linux/radix-tree.h>
-#include <linux/crc32c.h>
#include <linux/vmalloc.h>
#include <linux/string.h>
#include "send.h"
#include "backref.h"
+#include "hash.h"
#include "locking.h"
#include "disk-io.h"
#include "btrfs_inode.h"
@@ -620,7 +620,7 @@ static int send_cmd(struct send_ctx *sctx)
hdr->len = cpu_to_le32(sctx->send_size - sizeof(*hdr));
hdr->crc = 0;
- crc = crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
+ crc = btrfs_crc32c(0, (unsigned char *)sctx->send_buf, sctx->send_size);
hdr->crc = cpu_to_le32(crc);
ret = write_buf(sctx->send_filp, sctx->send_buf, sctx->send_size,
@@ -1332,6 +1332,16 @@ verbose_printk(KERN_DEBUG "btrfs: find_extent_clone: data_offset=%llu, "
}
if (cur_clone_root) {
+ if (compressed != BTRFS_COMPRESS_NONE) {
+ /*
+ * Offsets given by iterate_extent_inodes() are relative
+ * to the start of the extent, we need to add logical
+ * offset from the file extent item.
+ * (See why at backref.c:check_extent_in_eb())
+ */
+ cur_clone_root->offset += btrfs_file_extent_offset(eb,
+ fi);
+ }
*found = cur_clone_root;
ret = 0;
} else {
@@ -2774,8 +2784,6 @@ static int add_waiting_dir_move(struct send_ctx *sctx, u64 ino)
return 0;
}
-#ifdef CONFIG_BTRFS_ASSERT
-
static int del_waiting_dir_move(struct send_ctx *sctx, u64 ino)
{
struct rb_node *n = sctx->waiting_dir_moves.rb_node;
@@ -2796,8 +2804,6 @@ static int del_waiting_dir_move(struct send_ctx *sctx, u64 ino)
return -ENOENT;
}
-#endif
-
static int add_pending_dir_move(struct send_ctx *sctx, u64 parent_ino)
{
struct rb_node **p = &sctx->pending_dir_moves.rb_node;
@@ -2902,7 +2908,9 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
}
sctx->send_progress = sctx->cur_ino + 1;
- ASSERT(del_waiting_dir_move(sctx, pm->ino) == 0);
+ ret = del_waiting_dir_move(sctx, pm->ino);
+ ASSERT(ret == 0);
+
ret = get_cur_path(sctx, pm->ino, pm->gen, to_path);
if (ret < 0)
goto out;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index c02f63356895..d04db817be5c 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -566,7 +566,7 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
kfree(num);
if (info->max_inline) {
- info->max_inline = max_t(u64,
+ info->max_inline = min_t(u64,
info->max_inline,
root->sectorsize);
}
@@ -855,6 +855,7 @@ static struct dentry *get_default_root(struct super_block *sb,
struct btrfs_path *path;
struct btrfs_key location;
struct inode *inode;
+ struct dentry *dentry;
u64 dir_id;
int new = 0;
@@ -925,7 +926,13 @@ setup_root:
return dget(sb->s_root);
}
- return d_obtain_alias(inode);
+ dentry = d_obtain_alias(inode);
+ if (!IS_ERR(dentry)) {
+ spin_lock(&dentry->d_lock);
+ dentry->d_flags &= ~DCACHE_DISCONNECTED;
+ spin_unlock(&dentry->d_lock);
+ }
+ return dentry;
}
static int btrfs_fill_super(struct super_block *sb,
@@ -1996,7 +2003,7 @@ static void __exit exit_btrfs_fs(void)
btrfs_hash_exit();
}
-module_init(init_btrfs_fs)
+late_initcall(init_btrfs_fs);
module_exit(exit_btrfs_fs)
MODULE_LICENSE("GPL");
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 782374d8fd19..865f4cf9a769 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -578,8 +578,14 @@ static int add_device_membership(struct btrfs_fs_info *fs_info)
return -ENOMEM;
list_for_each_entry(dev, &fs_devices->devices, dev_list) {
- struct hd_struct *disk = dev->bdev->bd_part;
- struct kobject *disk_kobj = &part_to_dev(disk)->kobj;
+ struct hd_struct *disk;
+ struct kobject *disk_kobj;
+
+ if (!dev->bdev)
+ continue;
+
+ disk = dev->bdev->bd_part;
+ disk_kobj = &part_to_dev(disk)->kobj;
error = sysfs_create_link(fs_info->device_dir_kobj,
disk_kobj, disk_kobj->name);
diff --git a/fs/buffer.c b/fs/buffer.c
index 651dba10b9c2..27265a8b43c1 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -654,14 +654,16 @@ EXPORT_SYMBOL(mark_buffer_dirty_inode);
static void __set_page_dirty(struct page *page,
struct address_space *mapping, int warn)
{
- spin_lock_irq(&mapping->tree_lock);
+ unsigned long flags;
+
+ spin_lock_irqsave(&mapping->tree_lock, flags);
if (page->mapping) { /* Race with truncate? */
WARN_ON_ONCE(warn && !PageUptodate(page));
account_page_dirtied(page, mapping);
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
}
- spin_unlock_irq(&mapping->tree_lock);
+ spin_unlock_irqrestore(&mapping->tree_lock, flags);
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
}
diff --git a/fs/ceph/acl.c b/fs/ceph/acl.c
index 4c2d452c4bfc..21887d63dad5 100644
--- a/fs/ceph/acl.c
+++ b/fs/ceph/acl.c
@@ -54,11 +54,6 @@ static inline struct posix_acl *ceph_get_cached_acl(struct inode *inode,
return acl;
}
-void ceph_forget_all_cached_acls(struct inode *inode)
-{
- forget_all_cached_acls(inode);
-}
-
struct posix_acl *ceph_get_acl(struct inode *inode, int type)
{
int size;
@@ -160,11 +155,7 @@ int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type)
goto out_dput;
}
- if (value)
- ret = __ceph_setxattr(dentry, name, value, size, 0);
- else
- ret = __ceph_removexattr(dentry, name);
-
+ ret = __ceph_setxattr(dentry, name, value, size, 0);
if (ret) {
if (new_mode != old_mode) {
newattrs.ia_mode = old_mode;
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 6da4df84ba30..45eda6d7a40c 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -100,6 +100,14 @@ static unsigned fpos_off(loff_t p)
return p & 0xffffffff;
}
+static int fpos_cmp(loff_t l, loff_t r)
+{
+ int v = ceph_frag_compare(fpos_frag(l), fpos_frag(r));
+ if (v)
+ return v;
+ return (int)(fpos_off(l) - fpos_off(r));
+}
+
/*
* When possible, we try to satisfy a readdir by peeking at the
* dcache. We make this work by carefully ordering dentries on
@@ -156,7 +164,7 @@ more:
if (!d_unhashed(dentry) && dentry->d_inode &&
ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
- ctx->pos <= di->offset)
+ fpos_cmp(ctx->pos, di->offset) <= 0)
break;
dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
dentry->d_name.len, dentry->d_name.name, di->offset,
@@ -695,9 +703,8 @@ static int ceph_mknod(struct inode *dir, struct dentry *dentry,
ceph_mdsc_put_request(req);
if (!err)
- err = ceph_init_acl(dentry, dentry->d_inode, dir);
-
- if (err)
+ ceph_init_acl(dentry, dentry->d_inode, dir);
+ else
d_drop(dentry);
return err;
}
@@ -735,7 +742,9 @@ static int ceph_symlink(struct inode *dir, struct dentry *dentry,
if (!err && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
ceph_mdsc_put_request(req);
- if (err)
+ if (!err)
+ ceph_init_acl(dentry, dentry->d_inode, dir);
+ else
d_drop(dentry);
return err;
}
@@ -776,7 +785,9 @@ static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
err = ceph_handle_notrace_create(dir, dentry);
ceph_mdsc_put_request(req);
out:
- if (err < 0)
+ if (!err)
+ ceph_init_acl(dentry, dentry->d_inode, dir);
+ else
d_drop(dentry);
return err;
}
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index dfd2ce3419f8..09c7afe32e49 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -286,6 +286,7 @@ int ceph_atomic_open(struct inode *dir, struct dentry *dentry,
} else {
dout("atomic_open finish_open on dn %p\n", dn);
if (req->r_op == CEPH_MDS_OP_CREATE && req->r_reply_info.has_create_ino) {
+ ceph_init_acl(dentry, dentry->d_inode, dir);
*opened |= FILE_CREATED;
}
err = finish_open(file, dentry, ceph_open, opened);
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 2df963f1cf5a..10a4ccbf38da 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -144,7 +144,11 @@ enum {
Opt_ino32,
Opt_noino32,
Opt_fscache,
- Opt_nofscache
+ Opt_nofscache,
+#ifdef CONFIG_CEPH_FS_POSIX_ACL
+ Opt_acl,
+#endif
+ Opt_noacl
};
static match_table_t fsopt_tokens = {
@@ -172,6 +176,10 @@ static match_table_t fsopt_tokens = {
{Opt_noino32, "noino32"},
{Opt_fscache, "fsc"},
{Opt_nofscache, "nofsc"},
+#ifdef CONFIG_CEPH_FS_POSIX_ACL
+ {Opt_acl, "acl"},
+#endif
+ {Opt_noacl, "noacl"},
{-1, NULL}
};
@@ -271,6 +279,14 @@ static int parse_fsopt_token(char *c, void *private)
case Opt_nofscache:
fsopt->flags &= ~CEPH_MOUNT_OPT_FSCACHE;
break;
+#ifdef CONFIG_CEPH_FS_POSIX_ACL
+ case Opt_acl:
+ fsopt->sb_flags |= MS_POSIXACL;
+ break;
+#endif
+ case Opt_noacl:
+ fsopt->sb_flags &= ~MS_POSIXACL;
+ break;
default:
BUG_ON(token);
}
@@ -438,6 +454,13 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
else
seq_puts(m, ",nofsc");
+#ifdef CONFIG_CEPH_FS_POSIX_ACL
+ if (fsopt->sb_flags & MS_POSIXACL)
+ seq_puts(m, ",acl");
+ else
+ seq_puts(m, ",noacl");
+#endif
+
if (fsopt->wsize)
seq_printf(m, ",wsize=%d", fsopt->wsize);
if (fsopt->rsize != CEPH_RSIZE_DEFAULT)
@@ -819,9 +842,6 @@ static int ceph_set_super(struct super_block *s, void *data)
s->s_flags = fsc->mount_options->sb_flags;
s->s_maxbytes = 1ULL << 40; /* temp value until we get mdsmap */
-#ifdef CONFIG_CEPH_FS_POSIX_ACL
- s->s_flags |= MS_POSIXACL;
-#endif
s->s_xattr = ceph_xattr_handlers;
s->s_fs_info = fsc;
@@ -911,6 +931,10 @@ static struct dentry *ceph_mount(struct file_system_type *fs_type,
struct ceph_options *opt = NULL;
dout("ceph_mount\n");
+
+#ifdef CONFIG_CEPH_FS_POSIX_ACL
+ flags |= MS_POSIXACL;
+#endif
err = parse_mount_options(&fsopt, &opt, flags, data, dev_name, &path);
if (err < 0) {
res = ERR_PTR(err);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 19793b56d0a7..d8801a95b685 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -13,6 +13,7 @@
#include <linux/wait.h>
#include <linux/writeback.h>
#include <linux/slab.h>
+#include <linux/posix_acl.h>
#include <linux/ceph/libceph.h>
@@ -743,7 +744,11 @@ extern const struct xattr_handler *ceph_xattr_handlers[];
struct posix_acl *ceph_get_acl(struct inode *, int);
int ceph_set_acl(struct inode *inode, struct posix_acl *acl, int type);
int ceph_init_acl(struct dentry *, struct inode *, struct inode *);
-void ceph_forget_all_cached_acls(struct inode *inode);
+
+static inline void ceph_forget_all_cached_acls(struct inode *inode)
+{
+ forget_all_cached_acls(inode);
+}
#else
diff --git a/fs/ceph/xattr.c b/fs/ceph/xattr.c
index 898b6565ad3e..a55ec37378c6 100644
--- a/fs/ceph/xattr.c
+++ b/fs/ceph/xattr.c
@@ -12,6 +12,9 @@
#define XATTR_CEPH_PREFIX "ceph."
#define XATTR_CEPH_PREFIX_LEN (sizeof (XATTR_CEPH_PREFIX) - 1)
+static int __remove_xattr(struct ceph_inode_info *ci,
+ struct ceph_inode_xattr *xattr);
+
/*
* List of handlers for synthetic system.* attributes. Other
* attributes are handled directly.
@@ -319,8 +322,7 @@ static struct ceph_vxattr *ceph_match_vxattr(struct inode *inode,
static int __set_xattr(struct ceph_inode_info *ci,
const char *name, int name_len,
const char *val, int val_len,
- int dirty,
- int should_free_name, int should_free_val,
+ int flags, int update_xattr,
struct ceph_inode_xattr **newxattr)
{
struct rb_node **p;
@@ -349,12 +351,31 @@ static int __set_xattr(struct ceph_inode_info *ci,
xattr = NULL;
}
+ if (update_xattr) {
+ int err = 0;
+ if (xattr && (flags & XATTR_CREATE))
+ err = -EEXIST;
+ else if (!xattr && (flags & XATTR_REPLACE))
+ err = -ENODATA;
+ if (err) {
+ kfree(name);
+ kfree(val);
+ return err;
+ }
+ if (update_xattr < 0) {
+ if (xattr)
+ __remove_xattr(ci, xattr);
+ kfree(name);
+ return 0;
+ }
+ }
+
if (!xattr) {
new = 1;
xattr = *newxattr;
xattr->name = name;
xattr->name_len = name_len;
- xattr->should_free_name = should_free_name;
+ xattr->should_free_name = update_xattr;
ci->i_xattrs.count++;
dout("__set_xattr count=%d\n", ci->i_xattrs.count);
@@ -364,7 +385,7 @@ static int __set_xattr(struct ceph_inode_info *ci,
if (xattr->should_free_val)
kfree((void *)xattr->val);
- if (should_free_name) {
+ if (update_xattr) {
kfree((void *)name);
name = xattr->name;
}
@@ -379,8 +400,8 @@ static int __set_xattr(struct ceph_inode_info *ci,
xattr->val = "";
xattr->val_len = val_len;
- xattr->dirty = dirty;
- xattr->should_free_val = (val && should_free_val);
+ xattr->dirty = update_xattr;
+ xattr->should_free_val = (val && update_xattr);
if (new) {
rb_link_node(&xattr->node, parent, p);
@@ -442,7 +463,7 @@ static int __remove_xattr(struct ceph_inode_info *ci,
struct ceph_inode_xattr *xattr)
{
if (!xattr)
- return -EOPNOTSUPP;
+ return -ENODATA;
rb_erase(&xattr->node, &ci->i_xattrs.index);
@@ -588,7 +609,7 @@ start:
p += len;
err = __set_xattr(ci, name, namelen, val, len,
- 0, 0, 0, &xattrs[numattr]);
+ 0, 0, &xattrs[numattr]);
if (err < 0)
goto bad;
@@ -850,6 +871,9 @@ static int ceph_sync_setxattr(struct dentry *dentry, const char *name,
dout("setxattr value=%.*s\n", (int)size, value);
+ if (!value)
+ flags |= CEPH_XATTR_REMOVE;
+
/* do request */
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETXATTR,
USE_AUTH_MDS);
@@ -892,7 +916,7 @@ int __ceph_setxattr(struct dentry *dentry, const char *name,
struct ceph_inode_info *ci = ceph_inode(inode);
int issued;
int err;
- int dirty;
+ int dirty = 0;
int name_len = strlen(name);
int val_len = size;
char *newname = NULL;
@@ -953,12 +977,14 @@ retry:
goto retry;
}
- err = __set_xattr(ci, newname, name_len, newval,
- val_len, 1, 1, 1, &xattr);
+ err = __set_xattr(ci, newname, name_len, newval, val_len,
+ flags, value ? 1 : -1, &xattr);
- dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
- ci->i_xattrs.dirty = true;
- inode->i_ctime = CURRENT_TIME;
+ if (!err) {
+ dirty = __ceph_mark_dirty_caps(ci, CEPH_CAP_XATTR_EXCL);
+ ci->i_xattrs.dirty = true;
+ inode->i_ctime = CURRENT_TIME;
+ }
spin_unlock(&ci->i_ceph_lock);
if (dirty)
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 8f9b4f710d4a..7ff866dbb89e 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -865,8 +865,8 @@ static int build_sec_desc(struct cifs_ntsd *pntsd, struct cifs_ntsd *pnntsd,
return rc;
}
-static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
- __u16 fid, u32 *pacllen)
+struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
+ const struct cifs_fid *cifsfid, u32 *pacllen)
{
struct cifs_ntsd *pntsd = NULL;
unsigned int xid;
@@ -877,7 +877,8 @@ static struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *cifs_sb,
return ERR_CAST(tlink);
xid = get_xid();
- rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), fid, &pntsd, pacllen);
+ rc = CIFSSMBGetCIFSACL(xid, tlink_tcon(tlink), cifsfid->netfid, &pntsd,
+ pacllen);
free_xid(xid);
cifs_put_tlink(tlink);
@@ -946,7 +947,7 @@ struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *cifs_sb,
if (!open_file)
return get_cifs_acl_by_path(cifs_sb, path, pacllen);
- pntsd = get_cifs_acl_by_fid(cifs_sb, open_file->fid.netfid, pacllen);
+ pntsd = get_cifs_acl_by_fid(cifs_sb, &open_file->fid, pacllen);
cifsFileInfo_put(open_file);
return pntsd;
}
@@ -1006,19 +1007,31 @@ out:
/* Translate the CIFS ACL (simlar to NTFS ACL) for a file into mode bits */
int
cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
- struct inode *inode, const char *path, const __u16 *pfid)
+ struct inode *inode, const char *path,
+ const struct cifs_fid *pfid)
{
struct cifs_ntsd *pntsd = NULL;
u32 acllen = 0;
int rc = 0;
+ struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+ struct cifs_tcon *tcon;
cifs_dbg(NOISY, "converting ACL to mode for %s\n", path);
- if (pfid)
- pntsd = get_cifs_acl_by_fid(cifs_sb, *pfid, &acllen);
- else
- pntsd = get_cifs_acl(cifs_sb, inode, path, &acllen);
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ tcon = tlink_tcon(tlink);
+ if (pfid && (tcon->ses->server->ops->get_acl_by_fid))
+ pntsd = tcon->ses->server->ops->get_acl_by_fid(cifs_sb, pfid,
+ &acllen);
+ else if (tcon->ses->server->ops->get_acl)
+ pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path,
+ &acllen);
+ else {
+ cifs_put_tlink(tlink);
+ return -EOPNOTSUPP;
+ }
/* if we can retrieve the ACL, now parse Access Control Entries, ACEs */
if (IS_ERR(pntsd)) {
rc = PTR_ERR(pntsd);
@@ -1030,6 +1043,8 @@ cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
cifs_dbg(VFS, "parse sec desc failed rc = %d\n", rc);
}
+ cifs_put_tlink(tlink);
+
return rc;
}
@@ -1043,15 +1058,30 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
__u32 secdesclen = 0;
struct cifs_ntsd *pntsd = NULL; /* acl obtained from server */
struct cifs_ntsd *pnntsd = NULL; /* modified acl to be sent to server */
+ struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
+ struct tcon_link *tlink = cifs_sb_tlink(cifs_sb);
+ struct cifs_tcon *tcon;
+
+ if (IS_ERR(tlink))
+ return PTR_ERR(tlink);
+ tcon = tlink_tcon(tlink);
cifs_dbg(NOISY, "set ACL from mode for %s\n", path);
/* Get the security descriptor */
- pntsd = get_cifs_acl(CIFS_SB(inode->i_sb), inode, path, &secdesclen);
+
+ if (tcon->ses->server->ops->get_acl == NULL) {
+ cifs_put_tlink(tlink);
+ return -EOPNOTSUPP;
+ }
+
+ pntsd = tcon->ses->server->ops->get_acl(cifs_sb, inode, path,
+ &secdesclen);
if (IS_ERR(pntsd)) {
rc = PTR_ERR(pntsd);
cifs_dbg(VFS, "%s: error %d getting sec desc\n", __func__, rc);
- goto out;
+ cifs_put_tlink(tlink);
+ return rc;
}
/*
@@ -1064,6 +1094,7 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
pnntsd = kmalloc(secdesclen, GFP_KERNEL);
if (!pnntsd) {
kfree(pntsd);
+ cifs_put_tlink(tlink);
return -ENOMEM;
}
@@ -1072,14 +1103,18 @@ id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64 nmode,
cifs_dbg(NOISY, "build_sec_desc rc: %d\n", rc);
+ if (tcon->ses->server->ops->set_acl == NULL)
+ rc = -EOPNOTSUPP;
+
if (!rc) {
/* Set the security descriptor */
- rc = set_cifs_acl(pnntsd, secdesclen, inode, path, aclflag);
+ rc = tcon->ses->server->ops->set_acl(pnntsd, secdesclen, inode,
+ path, aclflag);
cifs_dbg(NOISY, "set_cifs_acl rc: %d\n", rc);
}
+ cifs_put_tlink(tlink);
kfree(pnntsd);
kfree(pntsd);
-out:
return rc;
}
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index a245d1809ed8..c0f3718b77a8 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -323,7 +323,8 @@ struct smb_version_operations {
/* async read from the server */
int (*async_readv)(struct cifs_readdata *);
/* async write to the server */
- int (*async_writev)(struct cifs_writedata *);
+ int (*async_writev)(struct cifs_writedata *,
+ void (*release)(struct kref *));
/* sync read from the server */
int (*sync_read)(const unsigned int, struct cifsFileInfo *,
struct cifs_io_parms *, unsigned int *, char **,
@@ -395,6 +396,12 @@ struct smb_version_operations {
int (*set_EA)(const unsigned int, struct cifs_tcon *, const char *,
const char *, const void *, const __u16,
const struct nls_table *, int);
+ struct cifs_ntsd * (*get_acl)(struct cifs_sb_info *, struct inode *,
+ const char *, u32 *);
+ struct cifs_ntsd * (*get_acl_by_fid)(struct cifs_sb_info *,
+ const struct cifs_fid *, u32 *);
+ int (*set_acl)(struct cifs_ntsd *, __u32, struct inode *, const char *,
+ int);
};
struct smb_version_values {
@@ -506,7 +513,7 @@ struct cifs_mnt_data {
static inline unsigned int
get_rfc1002_length(void *buf)
{
- return be32_to_cpu(*((__be32 *)buf));
+ return be32_to_cpu(*((__be32 *)buf)) & 0xffffff;
}
static inline void
@@ -1064,7 +1071,7 @@ struct cifs_writedata {
unsigned int pagesz;
unsigned int tailsz;
unsigned int nr_pages;
- struct page *pages[1];
+ struct page *pages[];
};
/*
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 79e6e9a93a8c..acc4ee8ed075 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -151,7 +151,7 @@ extern struct inode *cifs_iget(struct super_block *sb,
extern int cifs_get_inode_info(struct inode **inode, const char *full_path,
FILE_ALL_INFO *data, struct super_block *sb,
- int xid, const __u16 *fid);
+ int xid, const struct cifs_fid *fid);
extern int cifs_get_inode_info_unix(struct inode **pinode,
const unsigned char *search_path,
struct super_block *sb, unsigned int xid);
@@ -162,11 +162,13 @@ extern int cifs_rename_pending_delete(const char *full_path,
const unsigned int xid);
extern int cifs_acl_to_fattr(struct cifs_sb_info *cifs_sb,
struct cifs_fattr *fattr, struct inode *inode,
- const char *path, const __u16 *pfid);
+ const char *path, const struct cifs_fid *pfid);
extern int id_mode_to_cifs_acl(struct inode *inode, const char *path, __u64,
kuid_t, kgid_t);
extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *,
const char *, u32 *);
+extern struct cifs_ntsd *get_cifs_acl_by_fid(struct cifs_sb_info *,
+ const struct cifs_fid *, u32 *);
extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *,
const char *, int);
@@ -488,7 +490,8 @@ void cifs_readdata_release(struct kref *refcount);
int cifs_async_readv(struct cifs_readdata *rdata);
int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid);
-int cifs_async_writev(struct cifs_writedata *wdata);
+int cifs_async_writev(struct cifs_writedata *wdata,
+ void (*release)(struct kref *kref));
void cifs_writev_complete(struct work_struct *work);
struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages,
work_func_t complete);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 4d881c35eeca..f3264bd7a83d 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1910,7 +1910,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
do {
server = tlink_tcon(wdata->cfile->tlink)->ses->server;
- rc = server->ops->async_writev(wdata);
+ rc = server->ops->async_writev(wdata, cifs_writedata_release);
} while (rc == -EAGAIN);
for (i = 0; i < wdata->nr_pages; i++) {
@@ -1962,15 +1962,9 @@ cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete)
{
struct cifs_writedata *wdata;
- /* this would overflow */
- if (nr_pages == 0) {
- cifs_dbg(VFS, "%s: called with nr_pages == 0!\n", __func__);
- return NULL;
- }
-
/* writedata + number of page pointers */
wdata = kzalloc(sizeof(*wdata) +
- sizeof(struct page *) * (nr_pages - 1), GFP_NOFS);
+ sizeof(struct page *) * nr_pages, GFP_NOFS);
if (wdata != NULL) {
kref_init(&wdata->refcount);
INIT_LIST_HEAD(&wdata->list);
@@ -2031,7 +2025,8 @@ cifs_writev_callback(struct mid_q_entry *mid)
/* cifs_async_writev - send an async write, and set up mid to handle result */
int
-cifs_async_writev(struct cifs_writedata *wdata)
+cifs_async_writev(struct cifs_writedata *wdata,
+ void (*release)(struct kref *kref))
{
int rc = -EACCES;
WRITE_REQ *smb = NULL;
@@ -2105,7 +2100,7 @@ cifs_async_writev(struct cifs_writedata *wdata)
if (rc == 0)
cifs_stats_inc(&tcon->stats.cifs_stats.num_writes);
else
- kref_put(&wdata->refcount, cifs_writedata_release);
+ kref_put(&wdata->refcount, release);
async_writev_out:
cifs_small_buf_release(smb);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index d3a6796caa5a..3db0c5fd9a11 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -378,7 +378,7 @@ cifs_create_get_file_info:
xid);
else {
rc = cifs_get_inode_info(&newinode, full_path, buf, inode->i_sb,
- xid, &fid->netfid);
+ xid, fid);
if (newinode) {
if (server->ops->set_lease_key)
server->ops->set_lease_key(newinode, fid);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 853d6d1cc822..834fce759d80 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -244,7 +244,7 @@ cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb,
xid);
else
rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
- xid, &fid->netfid);
+ xid, fid);
out:
kfree(buf);
@@ -2043,7 +2043,8 @@ retry:
}
wdata->pid = wdata->cfile->pid;
server = tlink_tcon(wdata->cfile->tlink)->ses->server;
- rc = server->ops->async_writev(wdata);
+ rc = server->ops->async_writev(wdata,
+ cifs_writedata_release);
} while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN);
for (i = 0; i < nr_pages; ++i)
@@ -2331,9 +2332,20 @@ size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len)
}
static void
-cifs_uncached_writev_complete(struct work_struct *work)
+cifs_uncached_writedata_release(struct kref *refcount)
{
int i;
+ struct cifs_writedata *wdata = container_of(refcount,
+ struct cifs_writedata, refcount);
+
+ for (i = 0; i < wdata->nr_pages; i++)
+ put_page(wdata->pages[i]);
+ cifs_writedata_release(refcount);
+}
+
+static void
+cifs_uncached_writev_complete(struct work_struct *work)
+{
struct cifs_writedata *wdata = container_of(work,
struct cifs_writedata, work);
struct inode *inode = wdata->cfile->dentry->d_inode;
@@ -2347,12 +2359,7 @@ cifs_uncached_writev_complete(struct work_struct *work)
complete(&wdata->done);
- if (wdata->result != -EAGAIN) {
- for (i = 0; i < wdata->nr_pages; i++)
- put_page(wdata->pages[i]);
- }
-
- kref_put(&wdata->refcount, cifs_writedata_release);
+ kref_put(&wdata->refcount, cifs_uncached_writedata_release);
}
/* attempt to send write to server, retry on any -EAGAIN errors */
@@ -2370,7 +2377,8 @@ cifs_uncached_retry_writev(struct cifs_writedata *wdata)
if (rc != 0)
continue;
}
- rc = server->ops->async_writev(wdata);
+ rc = server->ops->async_writev(wdata,
+ cifs_uncached_writedata_release);
} while (rc == -EAGAIN);
return rc;
@@ -2381,7 +2389,7 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
unsigned long nr_segs, loff_t *poffset)
{
unsigned long nr_pages, i;
- size_t copied, len, cur_len;
+ size_t bytes, copied, len, cur_len;
ssize_t total_written = 0;
loff_t offset;
struct iov_iter it;
@@ -2436,14 +2444,45 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
save_len = cur_len;
for (i = 0; i < nr_pages; i++) {
- copied = min_t(const size_t, cur_len, PAGE_SIZE);
+ bytes = min_t(const size_t, cur_len, PAGE_SIZE);
copied = iov_iter_copy_from_user(wdata->pages[i], &it,
- 0, copied);
+ 0, bytes);
cur_len -= copied;
iov_iter_advance(&it, copied);
+ /*
+ * If we didn't copy as much as we expected, then that
+ * may mean we trod into an unmapped area. Stop copying
+ * at that point. On the next pass through the big
+ * loop, we'll likely end up getting a zero-length
+ * write and bailing out of it.
+ */
+ if (copied < bytes)
+ break;
}
cur_len = save_len - cur_len;
+ /*
+ * If we have no data to send, then that probably means that
+ * the copy above failed altogether. That's most likely because
+ * the address in the iovec was bogus. Set the rc to -EFAULT,
+ * free anything we allocated and bail out.
+ */
+ if (!cur_len) {
+ for (i = 0; i < nr_pages; i++)
+ put_page(wdata->pages[i]);
+ kfree(wdata);
+ rc = -EFAULT;
+ break;
+ }
+
+ /*
+ * i + 1 now represents the number of pages we actually used in
+ * the copy phase above. Bring nr_pages down to that, and free
+ * any pages that we didn't use.
+ */
+ for ( ; nr_pages > i + 1; nr_pages--)
+ put_page(wdata->pages[nr_pages - 1]);
+
wdata->sync_mode = WB_SYNC_ALL;
wdata->nr_pages = nr_pages;
wdata->offset = (__u64)offset;
@@ -2454,7 +2493,8 @@ cifs_iovec_write(struct file *file, const struct iovec *iov,
wdata->tailsz = cur_len - ((nr_pages - 1) * PAGE_SIZE);
rc = cifs_uncached_retry_writev(wdata);
if (rc) {
- kref_put(&wdata->refcount, cifs_writedata_release);
+ kref_put(&wdata->refcount,
+ cifs_uncached_writedata_release);
break;
}
@@ -2496,7 +2536,7 @@ restart_loop:
}
}
list_del_init(&wdata->list);
- kref_put(&wdata->refcount, cifs_writedata_release);
+ kref_put(&wdata->refcount, cifs_uncached_writedata_release);
}
if (total_written > 0)
@@ -2539,31 +2579,19 @@ cifs_writev(struct kiocb *iocb, const struct iovec *iov,
struct cifsInodeInfo *cinode = CIFS_I(inode);
struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
ssize_t rc = -EACCES;
+ loff_t lock_pos = pos;
- BUG_ON(iocb->ki_pos != pos);
-
+ if (file->f_flags & O_APPEND)
+ lock_pos = i_size_read(inode);
/*
* We need to hold the sem to be sure nobody modifies lock list
* with a brlock that prevents writing.
*/
down_read(&cinode->lock_sem);
- if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs),
+ if (!cifs_find_lock_conflict(cfile, lock_pos, iov_length(iov, nr_segs),
server->vals->exclusive_lock_type, NULL,
- CIFS_WRITE_OP)) {
- mutex_lock(&inode->i_mutex);
- rc = __generic_file_aio_write(iocb, iov, nr_segs,
- &iocb->ki_pos);
- mutex_unlock(&inode->i_mutex);
- }
-
- if (rc > 0) {
- ssize_t err;
-
- err = generic_write_sync(file, pos, rc);
- if (err < 0 && rc > 0)
- rc = err;
- }
-
+ CIFS_WRITE_OP))
+ rc = generic_file_aio_write(iocb, iov, nr_segs, pos);
up_read(&cinode->lock_sem);
return rc;
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 9cb9679d7357..aadc2b68678b 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -527,10 +527,15 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path,
return PTR_ERR(tlink);
tcon = tlink_tcon(tlink);
- rc = CIFSSMBQAllEAs(xid, tcon, path, "SETFILEBITS",
- ea_value, 4 /* size of buf */, cifs_sb->local_nls,
- cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
+ if (tcon->ses->server->ops->query_all_EAs == NULL) {
+ cifs_put_tlink(tlink);
+ return -EOPNOTSUPP;
+ }
+
+ rc = tcon->ses->server->ops->query_all_EAs(xid, tcon, path,
+ "SETFILEBITS", ea_value, 4 /* size of buf */,
+ cifs_sb->local_nls,
+ cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
cifs_put_tlink(tlink);
if (rc < 0)
return (int)rc;
@@ -672,7 +677,7 @@ cgfi_exit:
int
cifs_get_inode_info(struct inode **inode, const char *full_path,
FILE_ALL_INFO *data, struct super_block *sb, int xid,
- const __u16 *fid)
+ const struct cifs_fid *fid)
{
bool validinum = false;
__u16 srchflgs;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index 9ac5bfc9cc56..526fb89f9230 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -1067,6 +1067,15 @@ struct smb_version_operations smb1_operations = {
.query_mf_symlink = cifs_query_mf_symlink,
.create_mf_symlink = cifs_create_mf_symlink,
.is_read_op = cifs_is_read_op,
+#ifdef CONFIG_CIFS_XATTR
+ .query_all_EAs = CIFSSMBQAllEAs,
+ .set_EA = CIFSSMBSetEA,
+#endif /* CIFS_XATTR */
+#ifdef CONFIG_CIFS_ACL
+ .get_acl = get_cifs_acl,
+ .get_acl_by_fid = get_cifs_acl_by_fid,
+ .set_acl = set_cifs_acl,
+#endif /* CIFS_ACL */
};
struct smb_version_values smb1_values = {
diff --git a/fs/cifs/smb2glob.h b/fs/cifs/smb2glob.h
index c38350851b08..bc0bb9c34f72 100644
--- a/fs/cifs/smb2glob.h
+++ b/fs/cifs/smb2glob.h
@@ -57,4 +57,7 @@
#define SMB2_CMACAES_SIZE (16)
#define SMB3_SIGNKEY_SIZE (16)
+/* Maximum buffer size value we can send with 1 credit */
+#define SMB2_MAX_BUFFER_SIZE 65536
+
#endif /* _SMB2_GLOB_H */
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 757da3e54d3d..192f51a12cf1 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -182,11 +182,8 @@ smb2_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
/* start with specified wsize, or default */
wsize = volume_info->wsize ? volume_info->wsize : CIFS_DEFAULT_IOSIZE;
wsize = min_t(unsigned int, wsize, server->max_write);
- /*
- * limit write size to 2 ** 16, because we don't support multicredit
- * requests now.
- */
- wsize = min_t(unsigned int, wsize, 2 << 15);
+ /* set it to the maximum buffer size value we can send with 1 credit */
+ wsize = min_t(unsigned int, wsize, SMB2_MAX_BUFFER_SIZE);
return wsize;
}
@@ -200,11 +197,8 @@ smb2_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info)
/* start with specified rsize, or default */
rsize = volume_info->rsize ? volume_info->rsize : CIFS_DEFAULT_IOSIZE;
rsize = min_t(unsigned int, rsize, server->max_read);
- /*
- * limit write size to 2 ** 16, because we don't support multicredit
- * requests now.
- */
- rsize = min_t(unsigned int, rsize, 2 << 15);
+ /* set it to the maximum buffer size value we can send with 1 credit */
+ rsize = min_t(unsigned int, rsize, SMB2_MAX_BUFFER_SIZE);
return rsize;
}
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 2013234b73ad..860344701067 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -413,7 +413,9 @@ SMB2_negotiate(const unsigned int xid, struct cifs_ses *ses)
/* SMB2 only has an extended negflavor */
server->negflavor = CIFS_NEGFLAVOR_EXTENDED;
- server->maxBuf = le32_to_cpu(rsp->MaxTransactSize);
+ /* set it to the maximum buffer size value we can send with 1 credit */
+ server->maxBuf = min_t(unsigned int, le32_to_cpu(rsp->MaxTransactSize),
+ SMB2_MAX_BUFFER_SIZE);
server->max_read = le32_to_cpu(rsp->MaxReadSize);
server->max_write = le32_to_cpu(rsp->MaxWriteSize);
/* BB Do we need to validate the SecurityMode? */
@@ -1890,7 +1892,8 @@ smb2_writev_callback(struct mid_q_entry *mid)
/* smb2_async_writev - send an async write, and set up mid to handle result */
int
-smb2_async_writev(struct cifs_writedata *wdata)
+smb2_async_writev(struct cifs_writedata *wdata,
+ void (*release)(struct kref *kref))
{
int rc = -EACCES;
struct smb2_write_req *req = NULL;
@@ -1938,7 +1941,7 @@ smb2_async_writev(struct cifs_writedata *wdata)
smb2_writev_callback, wdata, 0);
if (rc) {
- kref_put(&wdata->refcount, cifs_writedata_release);
+ kref_put(&wdata->refcount, release);
cifs_stats_fail_inc(tcon, SMB2_WRITE_HE);
}
diff --git a/fs/cifs/smb2proto.h b/fs/cifs/smb2proto.h
index 93adc64666f3..0ce48db20a65 100644
--- a/fs/cifs/smb2proto.h
+++ b/fs/cifs/smb2proto.h
@@ -123,7 +123,8 @@ extern int SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
extern int smb2_async_readv(struct cifs_readdata *rdata);
extern int SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, char **buf, int *buf_type);
-extern int smb2_async_writev(struct cifs_writedata *wdata);
+extern int smb2_async_writev(struct cifs_writedata *wdata,
+ void (*release)(struct kref *kref));
extern int SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, struct kvec *iov, int n_vec);
extern int SMB2_echo(struct TCP_Server_Info *server);
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index b37570952846..18cd5650a5fc 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -270,6 +270,26 @@ cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx,
iov->iov_len = rqst->rq_pagesz;
}
+static unsigned long
+rqst_len(struct smb_rqst *rqst)
+{
+ unsigned int i;
+ struct kvec *iov = rqst->rq_iov;
+ unsigned long buflen = 0;
+
+ /* total up iov array first */
+ for (i = 0; i < rqst->rq_nvec; i++)
+ buflen += iov[i].iov_len;
+
+ /* add in the page array if there is one */
+ if (rqst->rq_npages) {
+ buflen += rqst->rq_pagesz * (rqst->rq_npages - 1);
+ buflen += rqst->rq_tailsz;
+ }
+
+ return buflen;
+}
+
static int
smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
{
@@ -277,6 +297,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
struct kvec *iov = rqst->rq_iov;
int n_vec = rqst->rq_nvec;
unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base);
+ unsigned long send_length;
unsigned int i;
size_t total_len = 0, sent;
struct socket *ssocket = server->ssocket;
@@ -285,6 +306,14 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst)
if (ssocket == NULL)
return -ENOTSOCK;
+ /* sanity check send length */
+ send_length = rqst_len(rqst);
+ if (send_length != smb_buf_length + 4) {
+ WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n",
+ send_length, smb_buf_length);
+ return -EIO;
+ }
+
cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length);
dump_smb(iov[0].iov_base, iov[0].iov_len);
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c
index 95c43bb20335..5ac836a86b18 100644
--- a/fs/cifs/xattr.c
+++ b/fs/cifs/xattr.c
@@ -176,8 +176,12 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name,
rc = -ENOMEM;
} else {
memcpy(pacl, ea_value, value_size);
- rc = set_cifs_acl(pacl, value_size,
- direntry->d_inode, full_path, CIFS_ACL_DACL);
+ if (pTcon->ses->server->ops->set_acl)
+ rc = pTcon->ses->server->ops->set_acl(pacl,
+ value_size, direntry->d_inode,
+ full_path, CIFS_ACL_DACL);
+ else
+ rc = -EOPNOTSUPP;
if (rc == 0) /* force revalidate of the inode */
CIFS_I(direntry->d_inode)->time = 0;
kfree(pacl);
@@ -323,8 +327,11 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name,
u32 acllen;
struct cifs_ntsd *pacl;
- pacl = get_cifs_acl(cifs_sb, direntry->d_inode,
- full_path, &acllen);
+ if (pTcon->ses->server->ops->get_acl == NULL)
+ goto get_ea_exit; /* rc already EOPNOTSUPP */
+
+ pacl = pTcon->ses->server->ops->get_acl(cifs_sb,
+ direntry->d_inode, full_path, &acllen);
if (IS_ERR(pacl)) {
rc = PTR_ERR(pacl);
cifs_dbg(VFS, "%s: error %zd getting sec desc\n",
diff --git a/fs/exec.c b/fs/exec.c
index e1529b4c79b1..3d78fccdd723 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -748,11 +748,10 @@ EXPORT_SYMBOL(setup_arg_pages);
#endif /* CONFIG_MMU */
-struct file *open_exec(const char *name)
+static struct file *do_open_exec(struct filename *name)
{
struct file *file;
int err;
- struct filename tmp = { .name = name };
static const struct open_flags open_exec_flags = {
.open_flag = O_LARGEFILE | O_RDONLY | __FMODE_EXEC,
.acc_mode = MAY_EXEC | MAY_OPEN,
@@ -760,7 +759,7 @@ struct file *open_exec(const char *name)
.lookup_flags = LOOKUP_FOLLOW,
};
- file = do_filp_open(AT_FDCWD, &tmp, &open_exec_flags);
+ file = do_filp_open(AT_FDCWD, name, &open_exec_flags);
if (IS_ERR(file))
goto out;
@@ -784,6 +783,12 @@ exit:
fput(file);
return ERR_PTR(err);
}
+
+struct file *open_exec(const char *name)
+{
+ struct filename tmp = { .name = name };
+ return do_open_exec(&tmp);
+}
EXPORT_SYMBOL(open_exec);
int kernel_read(struct file *file, loff_t offset,
@@ -1162,7 +1167,7 @@ int prepare_bprm_creds(struct linux_binprm *bprm)
return -ENOMEM;
}
-void free_bprm(struct linux_binprm *bprm)
+static void free_bprm(struct linux_binprm *bprm)
{
free_arg_pages(bprm);
if (bprm->cred) {
@@ -1432,7 +1437,7 @@ static int exec_binprm(struct linux_binprm *bprm)
/*
* sys_execve() executes a new program.
*/
-static int do_execve_common(const char *filename,
+static int do_execve_common(struct filename *filename,
struct user_arg_ptr argv,
struct user_arg_ptr envp)
{
@@ -1441,6 +1446,9 @@ static int do_execve_common(const char *filename,
struct files_struct *displaced;
int retval;
+ if (IS_ERR(filename))
+ return PTR_ERR(filename);
+
/*
* We move the actual failure in case of RLIMIT_NPROC excess from
* set*uid() to execve() because too many poorly written programs
@@ -1473,7 +1481,7 @@ static int do_execve_common(const char *filename,
check_unsafe_exec(bprm);
current->in_execve = 1;
- file = open_exec(filename);
+ file = do_open_exec(filename);
retval = PTR_ERR(file);
if (IS_ERR(file))
goto out_unmark;
@@ -1481,8 +1489,7 @@ static int do_execve_common(const char *filename,
sched_exec();
bprm->file = file;
- bprm->filename = filename;
- bprm->interp = filename;
+ bprm->filename = bprm->interp = filename->name;
retval = bprm_mm_init(bprm);
if (retval)
@@ -1523,6 +1530,7 @@ static int do_execve_common(const char *filename,
acct_update_integrals(current);
task_numa_free(current);
free_bprm(bprm);
+ putname(filename);
if (displaced)
put_files_struct(displaced);
return retval;
@@ -1544,10 +1552,11 @@ out_files:
if (displaced)
reset_files_struct(displaced);
out_ret:
+ putname(filename);
return retval;
}
-int do_execve(const char *filename,
+int do_execve(struct filename *filename,
const char __user *const __user *__argv,
const char __user *const __user *__envp)
{
@@ -1557,7 +1566,7 @@ int do_execve(const char *filename,
}
#ifdef CONFIG_COMPAT
-static int compat_do_execve(const char *filename,
+static int compat_do_execve(struct filename *filename,
const compat_uptr_t __user *__argv,
const compat_uptr_t __user *__envp)
{
@@ -1607,25 +1616,13 @@ SYSCALL_DEFINE3(execve,
const char __user *const __user *, argv,
const char __user *const __user *, envp)
{
- struct filename *path = getname(filename);
- int error = PTR_ERR(path);
- if (!IS_ERR(path)) {
- error = do_execve(path->name, argv, envp);
- putname(path);
- }
- return error;
+ return do_execve(getname(filename), argv, envp);
}
#ifdef CONFIG_COMPAT
asmlinkage long compat_sys_execve(const char __user * filename,
const compat_uptr_t __user * argv,
const compat_uptr_t __user * envp)
{
- struct filename *path = getname(filename);
- int error = PTR_ERR(path);
- if (!IS_ERR(path)) {
- error = compat_do_execve(path->name, argv, envp);
- putname(path);
- }
- return error;
+ return compat_do_execve(getname(filename), argv, envp);
}
#endif
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index ece55565b9cd..d3a534fdc5ff 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -771,6 +771,8 @@ do { \
if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime)) \
(einode)->xtime.tv_sec = \
(signed)le32_to_cpu((raw_inode)->xtime); \
+ else \
+ (einode)->xtime.tv_sec = 0; \
if (EXT4_FITS_IN_INODE(raw_inode, einode, xtime ## _extra)) \
ext4_decode_extra_time(&(einode)->xtime, \
raw_inode->xtime ## _extra); \
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 10cff4736b11..74bc2d549c58 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3906,6 +3906,7 @@ ext4_ext_handle_uninitialized_extents(handle_t *handle, struct inode *inode,
} else
err = ret;
map->m_flags |= EXT4_MAP_MAPPED;
+ map->m_pblk = newblock;
if (allocated > map->m_len)
allocated = map->m_len;
map->m_len = allocated;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 43e64f6022eb..1a5073959f32 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -152,7 +152,7 @@ ext4_file_dio_write(struct kiocb *iocb, const struct iovec *iov,
if (ret > 0) {
ssize_t err;
- err = generic_write_sync(file, pos, ret);
+ err = generic_write_sync(file, iocb->ki_pos - ret, ret);
if (err < 0 && ret > 0)
ret = err;
}
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 6bea80614d77..a2a837f00407 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -140,7 +140,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
handle = ext4_journal_start(inode_bl, EXT4_HT_MOVE_EXTENTS, 2);
if (IS_ERR(handle)) {
err = -EINVAL;
- goto swap_boot_out;
+ goto journal_err_out;
}
/* Protect extent tree against block allocations via delalloc */
@@ -198,6 +198,7 @@ static long swap_inode_boot_loader(struct super_block *sb,
ext4_double_up_write_data_sem(inode, inode_bl);
+journal_err_out:
ext4_inode_resume_unlocked_dio(inode);
ext4_inode_resume_unlocked_dio(inode_bl);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index c5adbb318a90..f3b84cd9de56 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -243,6 +243,7 @@ static int ext4_alloc_group_tables(struct super_block *sb,
ext4_group_t group;
ext4_group_t last_group;
unsigned overhead;
+ __u16 uninit_mask = (flexbg_size > 1) ? ~EXT4_BG_BLOCK_UNINIT : ~0;
BUG_ON(flex_gd->count == 0 || group_data == NULL);
@@ -266,7 +267,7 @@ next_group:
src_group++;
for (; src_group <= last_group; src_group++) {
overhead = ext4_group_overhead_blocks(sb, src_group);
- if (overhead != 0)
+ if (overhead == 0)
last_blk += group_data[src_group - group].blocks_count;
else
break;
@@ -280,8 +281,7 @@ next_group:
group = ext4_get_group_number(sb, start_blk - 1);
group -= group_data[0].group;
group_data[group].free_blocks_count--;
- if (flexbg_size > 1)
- flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
+ flex_gd->bg_flags[group] &= uninit_mask;
}
/* Allocate inode bitmaps */
@@ -292,22 +292,30 @@ next_group:
group = ext4_get_group_number(sb, start_blk - 1);
group -= group_data[0].group;
group_data[group].free_blocks_count--;
- if (flexbg_size > 1)
- flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
+ flex_gd->bg_flags[group] &= uninit_mask;
}
/* Allocate inode tables */
for (; it_index < flex_gd->count; it_index++) {
- if (start_blk + EXT4_SB(sb)->s_itb_per_group > last_blk)
+ unsigned int itb = EXT4_SB(sb)->s_itb_per_group;
+ ext4_fsblk_t next_group_start;
+
+ if (start_blk + itb > last_blk)
goto next_group;
group_data[it_index].inode_table = start_blk;
- group = ext4_get_group_number(sb, start_blk - 1);
+ group = ext4_get_group_number(sb, start_blk);
+ next_group_start = ext4_group_first_block_no(sb, group + 1);
group -= group_data[0].group;
- group_data[group].free_blocks_count -=
- EXT4_SB(sb)->s_itb_per_group;
- if (flexbg_size > 1)
- flex_gd->bg_flags[group] &= ~EXT4_BG_BLOCK_UNINIT;
+ if (start_blk + itb > next_group_start) {
+ flex_gd->bg_flags[group + 1] &= uninit_mask;
+ overhead = start_blk + itb - next_group_start;
+ group_data[group + 1].free_blocks_count -= overhead;
+ itb -= overhead;
+ }
+
+ group_data[group].free_blocks_count -= itb;
+ flex_gd->bg_flags[group] &= uninit_mask;
start_blk += EXT4_SB(sb)->s_itb_per_group;
}
@@ -401,7 +409,7 @@ static int set_flexbg_block_bitmap(struct super_block *sb, handle_t *handle,
start = ext4_group_first_block_no(sb, group);
group -= flex_gd->groups[0].group;
- count2 = sb->s_blocksize * 8 - (block - start);
+ count2 = EXT4_BLOCKS_PER_GROUP(sb) - (block - start);
if (count2 > count)
count2 = count;
@@ -620,7 +628,7 @@ handle_ib:
if (err)
goto out;
count = group_table_count[j];
- start = group_data[i].block_bitmap;
+ start = (&group_data[i].block_bitmap)[j];
block = start;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 1f7784de05b6..710fed2377d4 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3695,16 +3695,22 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
for (i = 0; i < 4; i++)
sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
sbi->s_def_hash_version = es->s_def_hash_version;
- i = le32_to_cpu(es->s_flags);
- if (i & EXT2_FLAGS_UNSIGNED_HASH)
- sbi->s_hash_unsigned = 3;
- else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
+ if (EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_DIR_INDEX)) {
+ i = le32_to_cpu(es->s_flags);
+ if (i & EXT2_FLAGS_UNSIGNED_HASH)
+ sbi->s_hash_unsigned = 3;
+ else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
#ifdef __CHAR_UNSIGNED__
- es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
- sbi->s_hash_unsigned = 3;
+ if (!(sb->s_flags & MS_RDONLY))
+ es->s_flags |=
+ cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
+ sbi->s_hash_unsigned = 3;
#else
- es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
+ if (!(sb->s_flags & MS_RDONLY))
+ es->s_flags |=
+ cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
#endif
+ }
}
/* Handle clustersize */
diff --git a/fs/file.c b/fs/file.c
index 771578b33fb6..60a45e9f5323 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -34,7 +34,7 @@ static void *alloc_fdmem(size_t size)
* vmalloc() if the allocation size will be considered "large" by the VM.
*/
if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
- void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN);
+ void *data = kmalloc(size, GFP_KERNEL|__GFP_NOWARN|__GFP_NORETRY);
if (data != NULL)
return data;
}
@@ -683,35 +683,65 @@ EXPORT_SYMBOL(fget_raw);
* The fput_needed flag returned by fget_light should be passed to the
* corresponding fput_light.
*/
-struct file *__fget_light(unsigned int fd, fmode_t mask, int *fput_needed)
+static unsigned long __fget_light(unsigned int fd, fmode_t mask)
{
struct files_struct *files = current->files;
struct file *file;
- *fput_needed = 0;
if (atomic_read(&files->count) == 1) {
file = __fcheck_files(files, fd);
- if (file && (file->f_mode & mask))
- file = NULL;
+ if (!file || unlikely(file->f_mode & mask))
+ return 0;
+ return (unsigned long)file;
} else {
file = __fget(fd, mask);
- if (file)
- *fput_needed = 1;
+ if (!file)
+ return 0;
+ return FDPUT_FPUT | (unsigned long)file;
}
-
- return file;
}
-struct file *fget_light(unsigned int fd, int *fput_needed)
+unsigned long __fdget(unsigned int fd)
{
- return __fget_light(fd, FMODE_PATH, fput_needed);
+ return __fget_light(fd, FMODE_PATH);
}
-EXPORT_SYMBOL(fget_light);
+EXPORT_SYMBOL(__fdget);
-struct file *fget_raw_light(unsigned int fd, int *fput_needed)
+unsigned long __fdget_raw(unsigned int fd)
{
- return __fget_light(fd, 0, fput_needed);
+ return __fget_light(fd, 0);
+}
+
+unsigned long __fdget_pos(unsigned int fd)
+{
+ struct files_struct *files = current->files;
+ struct file *file;
+ unsigned long v;
+
+ if (atomic_read(&files->count) == 1) {
+ file = __fcheck_files(files, fd);
+ v = 0;
+ } else {
+ file = __fget(fd, 0);
+ v = FDPUT_FPUT;
+ }
+ if (!file)
+ return 0;
+
+ if (file->f_mode & FMODE_ATOMIC_POS) {
+ if (file_count(file) > 1) {
+ v |= FDPUT_POS_UNLOCK;
+ mutex_lock(&file->f_pos_lock);
+ }
+ }
+ return v | (unsigned long)file;
}
+/*
+ * We only lock f_pos if we have threads or if the file might be
+ * shared with another process. In both cases we'll have an elevated
+ * file count (done either by fdget() or by fork()).
+ */
+
void set_close_on_exec(unsigned int fd, int flag)
{
struct files_struct *files = current->files;
diff --git a/fs/file_table.c b/fs/file_table.c
index 5fff9030be34..5b24008ea4f6 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -135,6 +135,7 @@ struct file *get_empty_filp(void)
atomic_long_set(&f->f_count, 1);
rwlock_init(&f->f_owner.lock);
spin_lock_init(&f->f_lock);
+ mutex_init(&f->f_pos_lock);
eventpoll_init_file(f);
/* f->f_version: 0 */
return f;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index e0259a163f98..d754e3cf99a8 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -40,18 +40,13 @@
struct wb_writeback_work {
long nr_pages;
struct super_block *sb;
- /*
- * Write only inodes dirtied before this time. Don't forget to set
- * older_than_this_is_set when you set this.
- */
- unsigned long older_than_this;
+ unsigned long *older_than_this;
enum writeback_sync_modes sync_mode;
unsigned int tagged_writepages:1;
unsigned int for_kupdate:1;
unsigned int range_cyclic:1;
unsigned int for_background:1;
unsigned int for_sync:1; /* sync(2) WB_SYNC_ALL writeback */
- unsigned int older_than_this_is_set:1;
enum wb_reason reason; /* why was writeback initiated? */
struct list_head list; /* pending work list */
@@ -252,10 +247,10 @@ static int move_expired_inodes(struct list_head *delaying_queue,
int do_sb_sort = 0;
int moved = 0;
- WARN_ON_ONCE(!work->older_than_this_is_set);
while (!list_empty(delaying_queue)) {
inode = wb_inode(delaying_queue->prev);
- if (inode_dirtied_after(inode, work->older_than_this))
+ if (work->older_than_this &&
+ inode_dirtied_after(inode, *work->older_than_this))
break;
list_move(&inode->i_wb_list, &tmp);
moved++;
@@ -742,8 +737,6 @@ static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
.sync_mode = WB_SYNC_NONE,
.range_cyclic = 1,
.reason = reason,
- .older_than_this = jiffies,
- .older_than_this_is_set = 1,
};
spin_lock(&wb->list_lock);
@@ -802,13 +795,12 @@ static long wb_writeback(struct bdi_writeback *wb,
{
unsigned long wb_start = jiffies;
long nr_pages = work->nr_pages;
+ unsigned long oldest_jif;
struct inode *inode;
long progress;
- if (!work->older_than_this_is_set) {
- work->older_than_this = jiffies;
- work->older_than_this_is_set = 1;
- }
+ oldest_jif = jiffies;
+ work->older_than_this = &oldest_jif;
spin_lock(&wb->list_lock);
for (;;) {
@@ -842,10 +834,10 @@ static long wb_writeback(struct bdi_writeback *wb,
* safe.
*/
if (work->for_kupdate) {
- work->older_than_this = jiffies -
+ oldest_jif = jiffies -
msecs_to_jiffies(dirty_expire_interval * 10);
} else if (work->for_background)
- work->older_than_this = jiffies;
+ oldest_jif = jiffies;
trace_writeback_start(wb->bdi, work);
if (list_empty(&wb->b_io))
@@ -1357,21 +1349,18 @@ EXPORT_SYMBOL(try_to_writeback_inodes_sb);
/**
* sync_inodes_sb - sync sb inode pages
- * @sb: the superblock
- * @older_than_this: timestamp
+ * @sb: the superblock
*
* This function writes and waits on any dirty inode belonging to this
- * superblock that has been dirtied before given timestamp.
+ * super_block.
*/
-void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this)
+void sync_inodes_sb(struct super_block *sb)
{
DECLARE_COMPLETION_ONSTACK(done);
struct wb_writeback_work work = {
.sb = sb,
.sync_mode = WB_SYNC_ALL,
.nr_pages = LONG_MAX,
- .older_than_this = older_than_this,
- .older_than_this_is_set = 1,
.range_cyclic = 0,
.done = &done,
.reason = WB_REASON_SYNC,
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
index e1959efad64f..b5ebc2d7d80d 100644
--- a/fs/fscache/object-list.c
+++ b/fs/fscache/object-list.c
@@ -50,6 +50,8 @@ void fscache_objlist_add(struct fscache_object *obj)
struct fscache_object *xobj;
struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL;
+ ASSERT(RB_EMPTY_NODE(&obj->objlist_link));
+
write_lock(&fscache_object_list_lock);
while (*p) {
@@ -75,6 +77,9 @@ void fscache_objlist_add(struct fscache_object *obj)
*/
void fscache_objlist_remove(struct fscache_object *obj)
{
+ if (RB_EMPTY_NODE(&obj->objlist_link))
+ return;
+
write_lock(&fscache_object_list_lock);
BUG_ON(RB_EMPTY_ROOT(&fscache_object_list));
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index 53d35c504240..d3b4539f1651 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -314,6 +314,9 @@ void fscache_object_init(struct fscache_object *object,
object->cache = cache;
object->cookie = cookie;
object->parent = NULL;
+#ifdef CONFIG_FSCACHE_OBJECT_LIST
+ RB_CLEAR_NODE(&object->objlist_link);
+#endif
object->oob_event_mask = 0;
for (t = object->oob_table; t->events; t++)
diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c
index 968ce411db53..32602c667b4a 100644
--- a/fs/hfsplus/catalog.c
+++ b/fs/hfsplus/catalog.c
@@ -103,6 +103,8 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry,
folder = &entry->folder;
memset(folder, 0, sizeof(*folder));
folder->type = cpu_to_be16(HFSPLUS_FOLDER);
+ if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags))
+ folder->flags |= cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT);
folder->id = cpu_to_be32(inode->i_ino);
HFSPLUS_I(inode)->create_date =
folder->create_date =
@@ -203,6 +205,36 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid,
return hfs_brec_find(fd, hfs_find_rec_by_key);
}
+static void hfsplus_subfolders_inc(struct inode *dir)
+{
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
+
+ if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) {
+ /*
+ * Increment subfolder count. Note, the value is only meaningful
+ * for folders with HFSPLUS_HAS_FOLDER_COUNT flag set.
+ */
+ HFSPLUS_I(dir)->subfolders++;
+ }
+}
+
+static void hfsplus_subfolders_dec(struct inode *dir)
+{
+ struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb);
+
+ if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) {
+ /*
+ * Decrement subfolder count. Note, the value is only meaningful
+ * for folders with HFSPLUS_HAS_FOLDER_COUNT flag set.
+ *
+ * Check for zero. Some subfolders may have been created
+ * by an implementation ignorant of this counter.
+ */
+ if (HFSPLUS_I(dir)->subfolders)
+ HFSPLUS_I(dir)->subfolders--;
+ }
+}
+
int hfsplus_create_cat(u32 cnid, struct inode *dir,
struct qstr *str, struct inode *inode)
{
@@ -247,6 +279,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir,
goto err1;
dir->i_size++;
+ if (S_ISDIR(inode->i_mode))
+ hfsplus_subfolders_inc(dir);
dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
@@ -336,6 +370,8 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str)
goto out;
dir->i_size--;
+ if (type == HFSPLUS_FOLDER)
+ hfsplus_subfolders_dec(dir);
dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC;
hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY);
@@ -380,6 +416,7 @@ int hfsplus_rename_cat(u32 cnid,
hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset,
src_fd.entrylength);
+ type = be16_to_cpu(entry.type);
/* create new dir entry with the data from the old entry */
hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name);
@@ -394,6 +431,8 @@ int hfsplus_rename_cat(u32 cnid,
if (err)
goto out;
dst_dir->i_size++;
+ if (type == HFSPLUS_FOLDER)
+ hfsplus_subfolders_inc(dst_dir);
dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC;
/* finally remove the old entry */
@@ -405,6 +444,8 @@ int hfsplus_rename_cat(u32 cnid,
if (err)
goto out;
src_dir->i_size--;
+ if (type == HFSPLUS_FOLDER)
+ hfsplus_subfolders_dec(src_dir);
src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC;
/* remove old thread entry */
diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h
index 08846425b67f..62d571eb69ba 100644
--- a/fs/hfsplus/hfsplus_fs.h
+++ b/fs/hfsplus/hfsplus_fs.h
@@ -242,6 +242,7 @@ struct hfsplus_inode_info {
*/
sector_t fs_blocks;
u8 userflags; /* BSD user file flags */
+ u32 subfolders; /* Subfolder count (HFSX only) */
struct list_head open_dir_list;
loff_t phys_size;
diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h
index 8ffb3a8ffe75..5a126828d85e 100644
--- a/fs/hfsplus/hfsplus_raw.h
+++ b/fs/hfsplus/hfsplus_raw.h
@@ -261,7 +261,7 @@ struct hfsplus_cat_folder {
struct DInfo user_info;
struct DXInfo finder_info;
__be32 text_encoding;
- u32 reserved;
+ __be32 subfolders; /* Subfolder count in HFSX. Reserved in HFS+. */
} __packed;
/* HFS file info (stolen from hfs.h) */
@@ -301,11 +301,13 @@ struct hfsplus_cat_file {
struct hfsplus_fork_raw rsrc_fork;
} __packed;
-/* File attribute bits */
+/* File and folder flag bits */
#define HFSPLUS_FILE_LOCKED 0x0001
#define HFSPLUS_FILE_THREAD_EXISTS 0x0002
#define HFSPLUS_XATTR_EXISTS 0x0004
#define HFSPLUS_ACL_EXISTS 0x0008
+#define HFSPLUS_HAS_FOLDER_COUNT 0x0010 /* Folder has subfolder count
+ * (HFSX only) */
/* HFS+ catalog thread (part of a cat_entry) */
struct hfsplus_cat_thread {
diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c
index fa929f325f87..a4f45bd88a63 100644
--- a/fs/hfsplus/inode.c
+++ b/fs/hfsplus/inode.c
@@ -375,6 +375,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode)
hip->extent_state = 0;
hip->flags = 0;
hip->userflags = 0;
+ hip->subfolders = 0;
memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec));
memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec));
hip->alloc_blocks = 0;
@@ -494,6 +495,10 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd)
inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date);
HFSPLUS_I(inode)->create_date = folder->create_date;
HFSPLUS_I(inode)->fs_blocks = 0;
+ if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
+ HFSPLUS_I(inode)->subfolders =
+ be32_to_cpu(folder->subfolders);
+ }
inode->i_op = &hfsplus_dir_inode_operations;
inode->i_fop = &hfsplus_dir_operations;
} else if (type == HFSPLUS_FILE) {
@@ -566,6 +571,10 @@ int hfsplus_cat_write_inode(struct inode *inode)
folder->content_mod_date = hfsp_ut2mt(inode->i_mtime);
folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime);
folder->valence = cpu_to_be32(inode->i_size - 2);
+ if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) {
+ folder->subfolders =
+ cpu_to_be32(HFSPLUS_I(inode)->subfolders);
+ }
hfs_bnode_write(fd.bnode, &entry, fd.entryoffset,
sizeof(struct hfsplus_cat_folder));
} else if (HFSPLUS_IS_RSRC(inode)) {
diff --git a/fs/hfsplus/options.c b/fs/hfsplus/options.c
index 968eab5bc1f5..68537e8b7a09 100644
--- a/fs/hfsplus/options.c
+++ b/fs/hfsplus/options.c
@@ -75,7 +75,7 @@ int hfsplus_parse_options_remount(char *input, int *force)
int token;
if (!input)
- return 0;
+ return 1;
while ((p = strsep(&input, ",")) != NULL) {
if (!*p)
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 8360674c85bc..60bb365f54a5 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -514,11 +514,13 @@ int jbd2_journal_start_reserved(handle_t *handle, unsigned int type,
* similarly constrained call sites
*/
ret = start_this_handle(journal, handle, GFP_NOFS);
- if (ret < 0)
+ if (ret < 0) {
jbd2_journal_free_reserved(handle);
+ return ret;
+ }
handle->h_type = type;
handle->h_line_no = line_no;
- return ret;
+ return 0;
}
EXPORT_SYMBOL(jbd2_journal_start_reserved);
diff --git a/fs/jfs/acl.c b/fs/jfs/acl.c
index e973b85d6afd..5a8ea16eedbc 100644
--- a/fs/jfs/acl.c
+++ b/fs/jfs/acl.c
@@ -86,6 +86,8 @@ static int __jfs_set_acl(tid_t tid, struct inode *inode, int type,
rc = posix_acl_equiv_mode(acl, &inode->i_mode);
if (rc < 0)
return rc;
+ inode->i_ctime = CURRENT_TIME;
+ mark_inode_dirty(inode);
if (rc == 0)
acl = NULL;
break;
diff --git a/fs/jfs/xattr.c b/fs/jfs/xattr.c
index 3bd5ee45f7b3..46325d5c34fc 100644
--- a/fs/jfs/xattr.c
+++ b/fs/jfs/xattr.c
@@ -854,9 +854,6 @@ int jfs_setxattr(struct dentry *dentry, const char *name, const void *value,
int rc;
tid_t tid;
- if ((rc = can_set_xattr(inode, name, value, value_len)))
- return rc;
-
/*
* If this is a request for a synthetic attribute in the system.*
* namespace use the generic infrastructure to resolve a handler
@@ -865,6 +862,9 @@ int jfs_setxattr(struct dentry *dentry, const char *name, const void *value,
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_setxattr(dentry, name, value, value_len, flags);
+ if ((rc = can_set_xattr(inode, name, value, value_len)))
+ return rc;
+
if (value == NULL) { /* empty EA, do not remove */
value = "";
value_len = 0;
@@ -1034,9 +1034,6 @@ int jfs_removexattr(struct dentry *dentry, const char *name)
int rc;
tid_t tid;
- if ((rc = can_set_xattr(inode, name, NULL, 0)))
- return rc;
-
/*
* If this is a request for a synthetic attribute in the system.*
* namespace use the generic infrastructure to resolve a handler
@@ -1045,6 +1042,9 @@ int jfs_removexattr(struct dentry *dentry, const char *name)
if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN))
return generic_removexattr(dentry, name);
+ if ((rc = can_set_xattr(inode, name, NULL, 0)))
+ return rc;
+
tid = txBegin(inode->i_sb, 0);
mutex_lock(&ji->commit_mutex);
rc = __jfs_setxattr(tid, dentry->d_inode, name, NULL, 0, XATTR_REPLACE);
@@ -1061,7 +1061,7 @@ int jfs_removexattr(struct dentry *dentry, const char *name)
* attributes are handled directly.
*/
const struct xattr_handler *jfs_xattr_handlers[] = {
-#ifdef JFS_POSIX_ACL
+#ifdef CONFIG_JFS_POSIX_ACL
&posix_acl_access_xattr_handler,
&posix_acl_default_xattr_handler,
#endif
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 5104cf5d25c5..bd6e18be6e1a 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -187,19 +187,23 @@ static void kernfs_deactivate(struct kernfs_node *kn)
kn->u.completion = (void *)&wait;
- rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
+ if (kn->flags & KERNFS_LOCKDEP)
+ rwsem_acquire(&kn->dep_map, 0, 0, _RET_IP_);
/* atomic_add_return() is a mb(), put_active() will always see
* the updated kn->u.completion.
*/
v = atomic_add_return(KN_DEACTIVATED_BIAS, &kn->active);
if (v != KN_DEACTIVATED_BIAS) {
- lock_contended(&kn->dep_map, _RET_IP_);
+ if (kn->flags & KERNFS_LOCKDEP)
+ lock_contended(&kn->dep_map, _RET_IP_);
wait_for_completion(&wait);
}
- lock_acquired(&kn->dep_map, _RET_IP_);
- rwsem_release(&kn->dep_map, 1, _RET_IP_);
+ if (kn->flags & KERNFS_LOCKDEP) {
+ lock_acquired(&kn->dep_map, _RET_IP_);
+ rwsem_release(&kn->dep_map, 1, _RET_IP_);
+ }
}
/**
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index 0d6ce895a9ee..0f4152defe7b 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -94,6 +94,7 @@ const void *kernfs_super_ns(struct super_block *sb)
* @fs_type: file_system_type of the fs being mounted
* @flags: mount flags specified for the mount
* @root: kernfs_root of the hierarchy being mounted
+ * @new_sb_created: tell the caller if we allocated a new superblock
* @ns: optional namespace tag of the mount
*
* This is to be called from each kernfs user's file_system_type->mount()
@@ -104,7 +105,8 @@ const void *kernfs_super_ns(struct super_block *sb)
* The return value can be passed to the vfs layer verbatim.
*/
struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
- struct kernfs_root *root, const void *ns)
+ struct kernfs_root *root, bool *new_sb_created,
+ const void *ns)
{
struct super_block *sb;
struct kernfs_super_info *info;
@@ -122,6 +124,10 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
kfree(info);
if (IS_ERR(sb))
return ERR_CAST(sb);
+
+ if (new_sb_created)
+ *new_sb_created = !sb->s_root;
+
if (!sb->s_root) {
error = kernfs_fill_super(sb);
if (error) {
diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c
index e066a3902973..ab798a88ec1d 100644
--- a/fs/lockd/svclock.c
+++ b/fs/lockd/svclock.c
@@ -779,6 +779,7 @@ nlmsvc_grant_blocked(struct nlm_block *block)
struct nlm_file *file = block->b_file;
struct nlm_lock *lock = &block->b_call->a_args.lock;
int error;
+ loff_t fl_start, fl_end;
dprintk("lockd: grant blocked lock %p\n", block);
@@ -796,9 +797,16 @@ nlmsvc_grant_blocked(struct nlm_block *block)
}
/* Try the lock operation again */
+ /* vfs_lock_file() can mangle fl_start and fl_end, but we need
+ * them unchanged for the GRANT_MSG
+ */
lock->fl.fl_flags |= FL_SLEEP;
+ fl_start = lock->fl.fl_start;
+ fl_end = lock->fl.fl_end;
error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL);
lock->fl.fl_flags &= ~FL_SLEEP;
+ lock->fl.fl_start = fl_start;
+ lock->fl.fl_end = fl_end;
switch (error) {
case 0:
diff --git a/fs/namei.c b/fs/namei.c
index d580df2e6804..2f730ef9b4b3 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -196,6 +196,7 @@ recopy:
goto error;
result->uptr = filename;
+ result->aname = NULL;
audit_getname(result);
return result;
@@ -210,6 +211,35 @@ getname(const char __user * filename)
return getname_flags(filename, 0, NULL);
}
+/*
+ * The "getname_kernel()" interface doesn't do pathnames longer
+ * than EMBEDDED_NAME_MAX. Deal with it - you're a kernel user.
+ */
+struct filename *
+getname_kernel(const char * filename)
+{
+ struct filename *result;
+ char *kname;
+ int len;
+
+ len = strlen(filename);
+ if (len >= EMBEDDED_NAME_MAX)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ result = __getname();
+ if (unlikely(!result))
+ return ERR_PTR(-ENOMEM);
+
+ kname = (char *)result + sizeof(*result);
+ result->name = kname;
+ result->uptr = NULL;
+ result->aname = NULL;
+ result->separate = false;
+
+ strlcpy(kname, filename, EMBEDDED_NAME_MAX);
+ return result;
+}
+
#ifdef CONFIG_AUDITSYSCALL
void putname(struct filename *name)
{
@@ -1854,7 +1884,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
nd->path = f.file->f_path;
if (flags & LOOKUP_RCU) {
- if (f.need_put)
+ if (f.flags & FDPUT_FPUT)
*fp = f.file;
nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
rcu_read_lock();
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index ef792f29f831..5d8ccecf5f5c 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -659,16 +659,19 @@ int nfs_async_inode_return_delegation(struct inode *inode,
rcu_read_lock();
delegation = rcu_dereference(NFS_I(inode)->delegation);
+ if (delegation == NULL)
+ goto out_enoent;
- if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) {
- rcu_read_unlock();
- return -ENOENT;
- }
+ if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid))
+ goto out_enoent;
nfs_mark_return_delegation(server, delegation);
rcu_read_unlock();
nfs_delegation_run_state_manager(clp);
return 0;
+out_enoent:
+ rcu_read_unlock();
+ return -ENOENT;
}
static struct inode *
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index be38b573495a..4a48fe4b84b6 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1846,6 +1846,11 @@ int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
GFP_KERNEL)) {
SetPageUptodate(page);
unlock_page(page);
+ /*
+ * add_to_page_cache_lru() grabs an extra page refcount.
+ * Drop it here to avoid leaking this page later.
+ */
+ page_cache_release(page);
} else
__free_page(page);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 28a0a3cbd3b7..360114ae8b82 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -164,17 +164,16 @@ static void nfs_zap_caches_locked(struct inode *inode)
if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
nfs_fscache_invalidate(inode);
nfsi->cache_validity |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_LABEL
| NFS_INO_INVALID_DATA
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL
| NFS_INO_REVAL_PAGECACHE;
} else
nfsi->cache_validity |= NFS_INO_INVALID_ATTR
- | NFS_INO_INVALID_LABEL
| NFS_INO_INVALID_ACCESS
| NFS_INO_INVALID_ACL
| NFS_INO_REVAL_PAGECACHE;
+ nfs_zap_label_cache_locked(nfsi);
}
void nfs_zap_caches(struct inode *inode)
@@ -266,6 +265,13 @@ nfs_init_locked(struct inode *inode, void *opaque)
}
#ifdef CONFIG_NFS_V4_SECURITY_LABEL
+static void nfs_clear_label_invalid(struct inode *inode)
+{
+ spin_lock(&inode->i_lock);
+ NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_LABEL;
+ spin_unlock(&inode->i_lock);
+}
+
void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
struct nfs4_label *label)
{
@@ -283,6 +289,7 @@ void nfs_setsecurity(struct inode *inode, struct nfs_fattr *fattr,
__func__,
(char *)label->label,
label->len, error);
+ nfs_clear_label_invalid(inode);
}
}
@@ -1648,7 +1655,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
inode->i_blocks = fattr->du.nfs2.blocks;
/* Update attrtimeo value if we're out of the unstable period */
- if (invalid & (NFS_INO_INVALID_ATTR|NFS_INO_INVALID_LABEL)) {
+ if (invalid & NFS_INO_INVALID_ATTR) {
nfs_inc_stats(inode, NFSIOS_ATTRINVALIDATE);
nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
nfsi->attrtimeo_timestamp = now;
@@ -1661,7 +1668,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
}
}
invalid &= ~NFS_INO_INVALID_ATTR;
- invalid &= ~NFS_INO_INVALID_LABEL;
/* Don't invalidate the data if we were to blame */
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
|| S_ISLNK(inode->i_mode)))
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 8b5cc04a8611..b46cf5a67329 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -176,7 +176,8 @@ extern struct nfs_server *nfs4_create_server(
extern struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *,
struct nfs_fh *);
extern int nfs4_update_server(struct nfs_server *server, const char *hostname,
- struct sockaddr *sap, size_t salen);
+ struct sockaddr *sap, size_t salen,
+ struct net *net);
extern void nfs_free_server(struct nfs_server *server);
extern struct nfs_server *nfs_clone_server(struct nfs_server *,
struct nfs_fh *,
@@ -279,9 +280,18 @@ static inline void nfs4_label_free(struct nfs4_label *label)
}
return;
}
+
+static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
+{
+ if (nfs_server_capable(&nfsi->vfs_inode, NFS_CAP_SECURITY_LABEL))
+ nfsi->cache_validity |= NFS_INO_INVALID_LABEL;
+}
#else
static inline struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags) { return NULL; }
static inline void nfs4_label_free(void *label) {}
+static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
+{
+}
#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
/* proc.c */
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 9a5ca03fa539..871d6eda8dba 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -80,7 +80,7 @@ struct posix_acl *nfs3_get_acl(struct inode *inode, int type)
}
if (res.acl_access != NULL) {
- if (posix_acl_equiv_mode(res.acl_access, NULL) ||
+ if ((posix_acl_equiv_mode(res.acl_access, NULL) == 0) ||
res.acl_access->a_count == 0) {
posix_acl_release(res.acl_access);
res.acl_access = NULL;
@@ -113,7 +113,7 @@ getout:
return ERR_PTR(status);
}
-int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
+static int __nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
struct posix_acl *dfacl)
{
struct nfs_server *server = NFS_SERVER(inode);
@@ -198,6 +198,15 @@ out:
return status;
}
+int nfs3_proc_setacls(struct inode *inode, struct posix_acl *acl,
+ struct posix_acl *dfacl)
+{
+ int ret;
+ ret = __nfs3_proc_setacls(inode, acl, dfacl);
+ return (ret == -EOPNOTSUPP) ? 0 : ret;
+
+}
+
int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type)
{
struct posix_acl *alloc = NULL, *dfacl = NULL;
@@ -225,7 +234,7 @@ int nfs3_set_acl(struct inode *inode, struct posix_acl *acl, int type)
if (IS_ERR(alloc))
goto fail;
}
- status = nfs3_proc_setacls(inode, acl, dfacl);
+ status = __nfs3_proc_setacls(inode, acl, dfacl);
posix_acl_release(alloc);
return status;
@@ -233,25 +242,6 @@ fail:
return PTR_ERR(alloc);
}
-int nfs3_proc_set_default_acl(struct inode *dir, struct inode *inode,
- umode_t mode)
-{
- struct posix_acl *default_acl, *acl;
- int error;
-
- error = posix_acl_create(dir, &mode, &default_acl, &acl);
- if (error)
- return (error == -EOPNOTSUPP) ? 0 : error;
-
- error = nfs3_proc_setacls(inode, acl, default_acl);
-
- if (acl)
- posix_acl_release(acl);
- if (default_acl)
- posix_acl_release(default_acl);
- return error;
-}
-
const struct xattr_handler *nfs3_xattr_handlers[] = {
&posix_acl_access_xattr_handler,
&posix_acl_default_xattr_handler,
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index aa9bc973f36a..a462ef0fb5d6 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -18,6 +18,7 @@
#include <linux/lockd/bind.h>
#include <linux/nfs_mount.h>
#include <linux/freezer.h>
+#include <linux/xattr.h>
#include "iostat.h"
#include "internal.h"
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index dbb3e1f30c68..0e46d3d1b6cc 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -170,7 +170,7 @@ void nfs41_shutdown_client(struct nfs_client *clp)
void nfs40_shutdown_client(struct nfs_client *clp)
{
if (clp->cl_slot_tbl) {
- nfs4_release_slot_table(clp->cl_slot_tbl);
+ nfs4_shutdown_slot_table(clp->cl_slot_tbl);
kfree(clp->cl_slot_tbl);
}
}
@@ -1135,6 +1135,7 @@ static int nfs_probe_destination(struct nfs_server *server)
* @hostname: new end-point's hostname
* @sap: new end-point's socket address
* @salen: size of "sap"
+ * @net: net namespace
*
* The nfs_server must be quiescent before this function is invoked.
* Either its session is drained (NFSv4.1+), or its transport is
@@ -1143,13 +1144,13 @@ static int nfs_probe_destination(struct nfs_server *server)
* Returns zero on success, or a negative errno value.
*/
int nfs4_update_server(struct nfs_server *server, const char *hostname,
- struct sockaddr *sap, size_t salen)
+ struct sockaddr *sap, size_t salen, struct net *net)
{
struct nfs_client *clp = server->nfs_client;
struct rpc_clnt *clnt = server->client;
struct xprt_create xargs = {
.ident = clp->cl_proto,
- .net = &init_net,
+ .net = net,
.dstaddr = sap,
.addrlen = salen,
.servername = hostname,
@@ -1189,7 +1190,7 @@ int nfs4_update_server(struct nfs_server *server, const char *hostname,
error = nfs4_set_client(server, hostname, sap, salen, buf,
clp->cl_rpcclient->cl_auth->au_flavor,
clp->cl_proto, clnt->cl_timeout,
- clp->cl_minorversion, clp->cl_net);
+ clp->cl_minorversion, net);
nfs_put_client(clp);
if (error != 0) {
nfs_server_insert_lists(server);
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 12c8132ad408..b9a35c05b60f 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -324,8 +324,9 @@ static void filelayout_read_prepare(struct rpc_task *task, void *data)
&rdata->res.seq_res,
task))
return;
- nfs4_set_rw_stateid(&rdata->args.stateid, rdata->args.context,
- rdata->args.lock_context, FMODE_READ);
+ if (nfs4_set_rw_stateid(&rdata->args.stateid, rdata->args.context,
+ rdata->args.lock_context, FMODE_READ) == -EIO)
+ rpc_exit(task, -EIO); /* lost lock, terminate I/O */
}
static void filelayout_read_call_done(struct rpc_task *task, void *data)
@@ -435,8 +436,9 @@ static void filelayout_write_prepare(struct rpc_task *task, void *data)
&wdata->res.seq_res,
task))
return;
- nfs4_set_rw_stateid(&wdata->args.stateid, wdata->args.context,
- wdata->args.lock_context, FMODE_WRITE);
+ if (nfs4_set_rw_stateid(&wdata->args.stateid, wdata->args.context,
+ wdata->args.lock_context, FMODE_WRITE) == -EIO)
+ rpc_exit(task, -EIO); /* lost lock, terminate I/O */
}
static void filelayout_write_call_done(struct rpc_task *task, void *data)
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 4e7f05d3e9db..3d5dbf80d46a 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -121,9 +121,8 @@ static int nfs4_validate_fspath(struct dentry *dentry,
}
static size_t nfs_parse_server_name(char *string, size_t len,
- struct sockaddr *sa, size_t salen, struct nfs_server *server)
+ struct sockaddr *sa, size_t salen, struct net *net)
{
- struct net *net = rpc_net_ns(server->client);
ssize_t ret;
ret = rpc_pton(net, string, len, sa, salen);
@@ -223,6 +222,7 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
const struct nfs4_fs_location *location)
{
const size_t addr_bufsize = sizeof(struct sockaddr_storage);
+ struct net *net = rpc_net_ns(NFS_SB(mountdata->sb)->client);
struct vfsmount *mnt = ERR_PTR(-ENOENT);
char *mnt_path;
unsigned int maxbuflen;
@@ -248,8 +248,7 @@ static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
continue;
mountdata->addrlen = nfs_parse_server_name(buf->data, buf->len,
- mountdata->addr, addr_bufsize,
- NFS_SB(mountdata->sb));
+ mountdata->addr, addr_bufsize, net);
if (mountdata->addrlen == 0)
continue;
@@ -419,6 +418,7 @@ static int nfs4_try_replacing_one_location(struct nfs_server *server,
const struct nfs4_fs_location *location)
{
const size_t addr_bufsize = sizeof(struct sockaddr_storage);
+ struct net *net = rpc_net_ns(server->client);
struct sockaddr *sap;
unsigned int s;
size_t salen;
@@ -440,7 +440,7 @@ static int nfs4_try_replacing_one_location(struct nfs_server *server,
continue;
salen = nfs_parse_server_name(buf->data, buf->len,
- sap, addr_bufsize, server);
+ sap, addr_bufsize, net);
if (salen == 0)
continue;
rpc_set_port(sap, NFS_PORT);
@@ -450,7 +450,7 @@ static int nfs4_try_replacing_one_location(struct nfs_server *server,
if (hostname == NULL)
break;
- error = nfs4_update_server(server, hostname, sap, salen);
+ error = nfs4_update_server(server, hostname, sap, salen, net);
kfree(hostname);
if (error == 0)
break;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 42da6af77587..450bfedbe2f4 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1620,15 +1620,15 @@ static void nfs4_open_confirm_prepare(struct rpc_task *task, void *calldata)
{
struct nfs4_opendata *data = calldata;
- nfs40_setup_sequence(data->o_arg.server, &data->o_arg.seq_args,
- &data->o_res.seq_res, task);
+ nfs40_setup_sequence(data->o_arg.server, &data->c_arg.seq_args,
+ &data->c_res.seq_res, task);
}
static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
{
struct nfs4_opendata *data = calldata;
- nfs40_sequence_done(task, &data->o_res.seq_res);
+ nfs40_sequence_done(task, &data->c_res.seq_res);
data->rpc_status = task->tk_status;
if (data->rpc_status == 0) {
@@ -1686,7 +1686,7 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
};
int status;
- nfs4_init_sequence(&data->o_arg.seq_args, &data->o_res.seq_res, 1);
+ nfs4_init_sequence(&data->c_arg.seq_args, &data->c_res.seq_res, 1);
kref_get(&data->kref);
data->rpc_done = 0;
data->rpc_status = 0;
@@ -2398,13 +2398,16 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) {
/* Use that stateid */
- } else if (truncate && state != NULL && nfs4_valid_open_stateid(state)) {
+ } else if (truncate && state != NULL) {
struct nfs_lockowner lockowner = {
.l_owner = current->files,
.l_pid = current->tgid,
};
- nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
- &lockowner);
+ if (!nfs4_valid_open_stateid(state))
+ return -EBADF;
+ if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE,
+ &lockowner) == -EIO)
+ return -EBADF;
} else
nfs4_stateid_copy(&arg.stateid, &zero_stateid);
@@ -4011,8 +4014,9 @@ static bool nfs4_stateid_is_current(nfs4_stateid *stateid,
{
nfs4_stateid current_stateid;
- if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode))
- return false;
+ /* If the current stateid represents a lost lock, then exit */
+ if (nfs4_set_rw_stateid(&current_stateid, ctx, l_ctx, fmode) == -EIO)
+ return true;
return nfs4_stateid_match(stateid, &current_stateid);
}
@@ -5828,8 +5832,7 @@ struct nfs_release_lockowner_data {
struct nfs4_lock_state *lsp;
struct nfs_server *server;
struct nfs_release_lockowner_args args;
- struct nfs4_sequence_args seq_args;
- struct nfs4_sequence_res seq_res;
+ struct nfs_release_lockowner_res res;
unsigned long timestamp;
};
@@ -5837,7 +5840,7 @@ static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata
{
struct nfs_release_lockowner_data *data = calldata;
nfs40_setup_sequence(data->server,
- &data->seq_args, &data->seq_res, task);
+ &data->args.seq_args, &data->res.seq_res, task);
data->timestamp = jiffies;
}
@@ -5846,7 +5849,7 @@ static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata)
struct nfs_release_lockowner_data *data = calldata;
struct nfs_server *server = data->server;
- nfs40_sequence_done(task, &data->seq_res);
+ nfs40_sequence_done(task, &data->res.seq_res);
switch (task->tk_status) {
case 0:
@@ -5887,7 +5890,6 @@ static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_st
data = kmalloc(sizeof(*data), GFP_NOFS);
if (!data)
return -ENOMEM;
- nfs4_init_sequence(&data->seq_args, &data->seq_res, 0);
data->lsp = lsp;
data->server = server;
data->args.lock_owner.clientid = server->nfs_client->cl_clientid;
@@ -5895,6 +5897,8 @@ static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_st
data->args.lock_owner.s_dev = server->s_dev;
msg.rpc_argp = &data->args;
+ msg.rpc_resp = &data->res;
+ nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0);
rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data);
return 0;
}
diff --git a/fs/nfs/nfs4session.c b/fs/nfs/nfs4session.c
index cf883c7ae053..e799dc3c3b1d 100644
--- a/fs/nfs/nfs4session.c
+++ b/fs/nfs/nfs4session.c
@@ -231,14 +231,23 @@ out:
return ret;
}
+/*
+ * nfs4_release_slot_table - release all slot table entries
+ */
+static void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
+{
+ nfs4_shrink_slot_table(tbl, 0);
+}
+
/**
- * nfs4_release_slot_table - release resources attached to a slot table
+ * nfs4_shutdown_slot_table - release resources attached to a slot table
* @tbl: slot table to shut down
*
*/
-void nfs4_release_slot_table(struct nfs4_slot_table *tbl)
+void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl)
{
- nfs4_shrink_slot_table(tbl, 0);
+ nfs4_release_slot_table(tbl);
+ rpc_destroy_wait_queue(&tbl->slot_tbl_waitq);
}
/**
@@ -422,7 +431,7 @@ void nfs41_update_target_slotid(struct nfs4_slot_table *tbl,
spin_unlock(&tbl->slot_tbl_lock);
}
-static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
+static void nfs4_release_session_slot_tables(struct nfs4_session *session)
{
nfs4_release_slot_table(&session->fc_slot_table);
nfs4_release_slot_table(&session->bc_slot_table);
@@ -450,7 +459,7 @@ int nfs4_setup_session_slot_tables(struct nfs4_session *ses)
if (status && tbl->slots == NULL)
/* Fore and back channel share a connection so get
* both slot tables or neither */
- nfs4_destroy_session_slot_tables(ses);
+ nfs4_release_session_slot_tables(ses);
return status;
}
@@ -470,6 +479,12 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
return session;
}
+static void nfs4_destroy_session_slot_tables(struct nfs4_session *session)
+{
+ nfs4_shutdown_slot_table(&session->fc_slot_table);
+ nfs4_shutdown_slot_table(&session->bc_slot_table);
+}
+
void nfs4_destroy_session(struct nfs4_session *session)
{
struct rpc_xprt *xprt;
diff --git a/fs/nfs/nfs4session.h b/fs/nfs/nfs4session.h
index 232306100651..b34ada9bc6a2 100644
--- a/fs/nfs/nfs4session.h
+++ b/fs/nfs/nfs4session.h
@@ -74,7 +74,7 @@ enum nfs4_session_state {
extern int nfs4_setup_slot_table(struct nfs4_slot_table *tbl,
unsigned int max_reqs, const char *queue);
-extern void nfs4_release_slot_table(struct nfs4_slot_table *tbl);
+extern void nfs4_shutdown_slot_table(struct nfs4_slot_table *tbl);
extern struct nfs4_slot *nfs4_alloc_slot(struct nfs4_slot_table *tbl);
extern void nfs4_free_slot(struct nfs4_slot_table *tbl, struct nfs4_slot *slot);
extern void nfs4_slot_tbl_drain_complete(struct nfs4_slot_table *tbl);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index e5be72518bd7..0deb32105ccf 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -974,9 +974,6 @@ static int nfs4_copy_lock_stateid(nfs4_stateid *dst,
else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) {
nfs4_stateid_copy(dst, &lsp->ls_stateid);
ret = 0;
- smp_rmb();
- if (!list_empty(&lsp->ls_seqid.list))
- ret = -EWOULDBLOCK;
}
spin_unlock(&state->state_lock);
nfs4_put_lock_state(lsp);
@@ -984,10 +981,9 @@ out:
return ret;
}
-static int nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
+static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
{
const nfs4_stateid *src;
- int ret;
int seq;
do {
@@ -996,12 +992,7 @@ static int nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state)
if (test_bit(NFS_OPEN_STATE, &state->flags))
src = &state->open_stateid;
nfs4_stateid_copy(dst, src);
- ret = 0;
- smp_rmb();
- if (!list_empty(&state->owner->so_seqid.list))
- ret = -EWOULDBLOCK;
} while (read_seqretry(&state->seqlock, seq));
- return ret;
}
/*
@@ -1015,15 +1006,19 @@ int nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state,
if (ret == -EIO)
/* A lost lock - don't even consider delegations */
goto out;
- if (nfs4_copy_delegation_stateid(dst, state->inode, fmode))
+ /* returns true if delegation stateid found and copied */
+ if (nfs4_copy_delegation_stateid(dst, state->inode, fmode)) {
+ ret = 0;
goto out;
+ }
if (ret != -ENOENT)
/* nfs4_copy_delegation_stateid() didn't over-write
* dst, so it still has the lock stateid which we now
* choose to use.
*/
goto out;
- ret = nfs4_copy_open_stateid(dst, state);
+ nfs4_copy_open_stateid(dst, state);
+ ret = 0;
out:
if (nfs_server_capable(state->inode, NFS_CAP_STATEID_NFSV41))
dst->seqid = 0;
diff --git a/fs/nfsd/nfs4acl.c b/fs/nfsd/nfs4acl.c
index d3a587144222..d190e33d0ec2 100644
--- a/fs/nfsd/nfs4acl.c
+++ b/fs/nfsd/nfs4acl.c
@@ -151,17 +151,15 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
if (IS_ERR(pacl))
return PTR_ERR(pacl);
- /* allocate for worst case: one (deny, allow) pair each: */
- size += 2 * pacl->a_count;
}
+ /* allocate for worst case: one (deny, allow) pair each: */
+ size += 2 * pacl->a_count;
if (S_ISDIR(inode->i_mode)) {
flags = NFS4_ACL_DIR;
dpacl = get_acl(inode, ACL_TYPE_DEFAULT);
if (dpacl)
size += 2 * dpacl->a_count;
- } else {
- dpacl = NULL;
}
*acl = nfs4_acl_new(size);
@@ -170,8 +168,7 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
goto out;
}
- if (pacl)
- _posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT);
+ _posix_to_nfsv4_one(pacl, *acl, flags & ~NFS4_ACL_TYPE_DEFAULT);
if (dpacl)
_posix_to_nfsv4_one(dpacl, *acl, flags | NFS4_ACL_TYPE_DEFAULT);
diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c
index 0b9ff4395e6a..abc8cbcfe90e 100644
--- a/fs/notify/dnotify/dnotify.c
+++ b/fs/notify/dnotify/dnotify.c
@@ -86,7 +86,7 @@ static int dnotify_handle_event(struct fsnotify_group *group,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
u32 mask, void *data, int data_type,
- const unsigned char *file_name)
+ const unsigned char *file_name, u32 cookie)
{
struct dnotify_mark *dn_mark;
struct dnotify_struct *dn;
diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c
index 0e792f5e3147..dc638f786d5c 100644
--- a/fs/notify/fanotify/fanotify.c
+++ b/fs/notify/fanotify/fanotify.c
@@ -147,7 +147,7 @@ static int fanotify_handle_event(struct fsnotify_group *group,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *fanotify_mark,
u32 mask, void *data, int data_type,
- const unsigned char *file_name)
+ const unsigned char *file_name, u32 cookie)
{
int ret = 0;
struct fanotify_event_info *event;
@@ -192,10 +192,12 @@ static int fanotify_handle_event(struct fsnotify_group *group,
ret = fsnotify_add_notify_event(group, fsn_event, fanotify_merge);
if (ret) {
- BUG_ON(mask & FAN_ALL_PERM_EVENTS);
+ /* Permission events shouldn't be merged */
+ BUG_ON(ret == 1 && mask & FAN_ALL_PERM_EVENTS);
/* Our event wasn't used in the end. Free it. */
fsnotify_destroy_event(group, fsn_event);
- ret = 0;
+
+ return 0;
}
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index b6175fa11bf8..287a22c04149 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -698,6 +698,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
struct fsnotify_group *group;
int f_flags, fd;
struct user_struct *user;
+ struct fanotify_event_info *oevent;
pr_debug("%s: flags=%d event_f_flags=%d\n",
__func__, flags, event_f_flags);
@@ -730,8 +731,20 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
group->fanotify_data.user = user;
atomic_inc(&user->fanotify_listeners);
+ oevent = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL);
+ if (unlikely(!oevent)) {
+ fd = -ENOMEM;
+ goto out_destroy_group;
+ }
+ group->overflow_event = &oevent->fse;
+ fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW);
+ oevent->tgid = get_pid(task_tgid(current));
+ oevent->path.mnt = NULL;
+ oevent->path.dentry = NULL;
+
group->fanotify_data.f_flags = event_f_flags;
#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS
+ oevent->response = 0;
mutex_init(&group->fanotify_data.access_mutex);
init_waitqueue_head(&group->fanotify_data.access_waitq);
INIT_LIST_HEAD(&group->fanotify_data.access_list);
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 1d4e1ea2f37c..9d3e9c50066a 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -179,7 +179,7 @@ static int send_to_group(struct inode *to_tell,
return group->ops->handle_event(group, to_tell, inode_mark,
vfsmount_mark, mask, data, data_is,
- file_name);
+ file_name, cookie);
}
/*
diff --git a/fs/notify/group.c b/fs/notify/group.c
index ee674fe2cec7..ad1995980456 100644
--- a/fs/notify/group.c
+++ b/fs/notify/group.c
@@ -55,6 +55,13 @@ void fsnotify_destroy_group(struct fsnotify_group *group)
/* clear the notification queue of all events */
fsnotify_flush_notify(group);
+ /*
+ * Destroy overflow event (we cannot use fsnotify_destroy_event() as
+ * that deliberately ignores overflow events.
+ */
+ if (group->overflow_event)
+ group->ops->free_event(group->overflow_event);
+
fsnotify_put_group(group);
}
@@ -99,7 +106,6 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)
INIT_LIST_HEAD(&group->marks_list);
group->ops = ops;
- fsnotify_init_event(&group->overflow_event, NULL, FS_Q_OVERFLOW);
return group;
}
diff --git a/fs/notify/inotify/inotify.h b/fs/notify/inotify/inotify.h
index 485eef3f4407..ed855ef6f077 100644
--- a/fs/notify/inotify/inotify.h
+++ b/fs/notify/inotify/inotify.h
@@ -27,6 +27,6 @@ extern int inotify_handle_event(struct fsnotify_group *group,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
u32 mask, void *data, int data_type,
- const unsigned char *file_name);
+ const unsigned char *file_name, u32 cookie);
extern const struct fsnotify_ops inotify_fsnotify_ops;
diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c
index d5ee56348bb8..43ab1e1a07a2 100644
--- a/fs/notify/inotify/inotify_fsnotify.c
+++ b/fs/notify/inotify/inotify_fsnotify.c
@@ -67,7 +67,7 @@ int inotify_handle_event(struct fsnotify_group *group,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
u32 mask, void *data, int data_type,
- const unsigned char *file_name)
+ const unsigned char *file_name, u32 cookie)
{
struct inotify_inode_mark *i_mark;
struct inotify_event_info *event;
@@ -103,6 +103,7 @@ int inotify_handle_event(struct fsnotify_group *group,
fsn_event = &event->fse;
fsnotify_init_event(fsn_event, inode, mask);
event->wd = i_mark->wd;
+ event->sync_cookie = cookie;
event->name_len = len;
if (len)
strcpy(event->name, file_name);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index 497395c8274b..78a2ca3966c3 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -495,7 +495,7 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,
/* Queue ignore event for the watch */
inotify_handle_event(group, NULL, fsn_mark, NULL, FS_IN_IGNORED,
- NULL, FSNOTIFY_EVENT_NONE, NULL);
+ NULL, FSNOTIFY_EVENT_NONE, NULL, 0);
i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark);
/* remove this mark from the idr */
@@ -633,11 +633,23 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod
static struct fsnotify_group *inotify_new_group(unsigned int max_events)
{
struct fsnotify_group *group;
+ struct inotify_event_info *oevent;
group = fsnotify_alloc_group(&inotify_fsnotify_ops);
if (IS_ERR(group))
return group;
+ oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL);
+ if (unlikely(!oevent)) {
+ fsnotify_destroy_group(group);
+ return ERR_PTR(-ENOMEM);
+ }
+ group->overflow_event = &oevent->fse;
+ fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW);
+ oevent->wd = -1;
+ oevent->sync_cookie = 0;
+ oevent->name_len = 0;
+
group->max_events = max_events;
spin_lock_init(&group->inotify_data.idr_lock);
diff --git a/fs/notify/notification.c b/fs/notify/notification.c
index 18b3c4427dca..1e58402171a5 100644
--- a/fs/notify/notification.c
+++ b/fs/notify/notification.c
@@ -80,7 +80,8 @@ void fsnotify_destroy_event(struct fsnotify_group *group,
/*
* Add an event to the group notification queue. The group can later pull this
* event off the queue to deal with. The function returns 0 if the event was
- * added to the queue, 1 if the event was merged with some other queued event.
+ * added to the queue, 1 if the event was merged with some other queued event,
+ * 2 if the queue of events has overflown.
*/
int fsnotify_add_notify_event(struct fsnotify_group *group,
struct fsnotify_event *event,
@@ -95,10 +96,14 @@ int fsnotify_add_notify_event(struct fsnotify_group *group,
mutex_lock(&group->notification_mutex);
if (group->q_len >= group->max_events) {
+ ret = 2;
/* Queue overflow event only if it isn't already queued */
- if (list_empty(&group->overflow_event.list))
- event = &group->overflow_event;
- ret = 1;
+ if (!list_empty(&group->overflow_event->list)) {
+ mutex_unlock(&group->notification_mutex);
+ return ret;
+ }
+ event = group->overflow_event;
+ goto queue;
}
if (!list_empty(list) && merge) {
@@ -109,6 +114,7 @@ int fsnotify_add_notify_event(struct fsnotify_group *group,
}
}
+queue:
group->q_len++;
list_add_tail(&event->list, list);
mutex_unlock(&group->notification_mutex);
@@ -132,7 +138,11 @@ struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group
event = list_first_entry(&group->notification_list,
struct fsnotify_event, list);
- list_del(&event->list);
+ /*
+ * We need to init list head for the case of overflow event so that
+ * check in fsnotify_add_notify_events() works
+ */
+ list_del_init(&event->list);
group->q_len--;
return event;
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index ea4ba9daeb47..db9bd8a31725 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -2134,7 +2134,7 @@ static ssize_t ntfs_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
ret = ntfs_file_aio_write_nolock(iocb, iov, nr_segs, &iocb->ki_pos);
mutex_unlock(&inode->i_mutex);
if (ret > 0) {
- int err = generic_write_sync(file, pos, ret);
+ int err = generic_write_sync(file, iocb->ki_pos - ret, ret);
if (err < 0)
ret = err;
}
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 8750ae1b8636..e2edff38be52 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -4742,6 +4742,7 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
enum ocfs2_alloc_restarted *reason_ret)
{
int status = 0, err = 0;
+ int need_free = 0;
int free_extents;
enum ocfs2_alloc_restarted reason = RESTART_NONE;
u32 bit_off, num_bits;
@@ -4796,7 +4797,8 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
- goto leave;
+ need_free = 1;
+ goto bail;
}
block = ocfs2_clusters_to_blocks(osb->sb, bit_off);
@@ -4807,7 +4809,8 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
num_bits, flags, meta_ac);
if (status < 0) {
mlog_errno(status);
- goto leave;
+ need_free = 1;
+ goto bail;
}
ocfs2_journal_dirty(handle, et->et_root_bh);
@@ -4821,6 +4824,19 @@ int ocfs2_add_clusters_in_btree(handle_t *handle,
reason = RESTART_TRANS;
}
+bail:
+ if (need_free) {
+ if (data_ac->ac_which == OCFS2_AC_USE_LOCAL)
+ ocfs2_free_local_alloc_bits(osb, handle, data_ac,
+ bit_off, num_bits);
+ else
+ ocfs2_free_clusters(handle,
+ data_ac->ac_inode,
+ data_ac->ac_bh,
+ ocfs2_clusters_to_blocks(osb->sb, bit_off),
+ num_bits);
+ }
+
leave:
if (reason_ret)
*reason_ret = reason;
@@ -6805,6 +6821,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
struct buffer_head *di_bh)
{
int ret, i, has_data, num_pages = 0;
+ int need_free = 0;
+ u32 bit_off, num;
handle_t *handle;
u64 uninitialized_var(block);
struct ocfs2_inode_info *oi = OCFS2_I(inode);
@@ -6850,7 +6868,6 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
}
if (has_data) {
- u32 bit_off, num;
unsigned int page_end;
u64 phys;
@@ -6886,6 +6903,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
ret = ocfs2_grab_eof_pages(inode, 0, end, pages, &num_pages);
if (ret) {
mlog_errno(ret);
+ need_free = 1;
goto out_commit;
}
@@ -6896,6 +6914,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
ret = ocfs2_read_inline_data(inode, pages[0], di_bh);
if (ret) {
mlog_errno(ret);
+ need_free = 1;
goto out_commit;
}
@@ -6927,6 +6946,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
ret = ocfs2_insert_extent(handle, &et, 0, block, 1, 0, NULL);
if (ret) {
mlog_errno(ret);
+ need_free = 1;
goto out_commit;
}
@@ -6938,6 +6958,18 @@ out_commit:
dquot_free_space_nodirty(inode,
ocfs2_clusters_to_bytes(osb->sb, 1));
+ if (need_free) {
+ if (data_ac->ac_which == OCFS2_AC_USE_LOCAL)
+ ocfs2_free_local_alloc_bits(osb, handle, data_ac,
+ bit_off, num);
+ else
+ ocfs2_free_clusters(handle,
+ data_ac->ac_inode,
+ data_ac->ac_bh,
+ ocfs2_clusters_to_blocks(osb->sb, bit_off),
+ num);
+ }
+
ocfs2_commit_trans(osb, handle);
out_unlock:
@@ -7126,7 +7158,7 @@ int ocfs2_truncate_inline(struct inode *inode, struct buffer_head *di_bh,
if (end > i_size_read(inode))
end = i_size_read(inode);
- BUG_ON(start >= end);
+ BUG_ON(start > end);
if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) ||
!(le16_to_cpu(di->i_dyn_features) & OCFS2_INLINE_DATA_FL) ||
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index d77d71ead8d1..51632c40e896 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -185,6 +185,9 @@ static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
file->f_path.dentry->d_name.name,
(unsigned long long)datasync);
+ if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
+ return -EROFS;
+
err = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (err)
return err;
@@ -474,11 +477,6 @@ static int ocfs2_truncate_file(struct inode *inode,
goto bail;
}
- /* lets handle the simple truncate cases before doing any more
- * cluster locking. */
- if (new_i_size == le64_to_cpu(fe->i_size))
- goto bail;
-
down_write(&OCFS2_I(inode)->ip_alloc_sem);
ocfs2_resv_discard(&osb->osb_la_resmap,
@@ -718,7 +716,8 @@ leave:
* While a write will already be ordering the data, a truncate will not.
* Thus, we need to explicitly order the zeroed pages.
*/
-static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode)
+static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
+ struct buffer_head *di_bh)
{
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
handle_t *handle = NULL;
@@ -735,7 +734,14 @@ static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode)
}
ret = ocfs2_jbd2_file_inode(handle, inode);
- if (ret < 0)
+ if (ret < 0) {
+ mlog_errno(ret);
+ goto out;
+ }
+
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret)
mlog_errno(ret);
out:
@@ -751,7 +757,7 @@ out:
* to be too fragile to do exactly what we need without us having to
* worry about recursive locking in ->write_begin() and ->write_end(). */
static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
- u64 abs_to)
+ u64 abs_to, struct buffer_head *di_bh)
{
struct address_space *mapping = inode->i_mapping;
struct page *page;
@@ -759,6 +765,7 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
handle_t *handle = NULL;
int ret = 0;
unsigned zero_from, zero_to, block_start, block_end;
+ struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
BUG_ON(abs_from >= abs_to);
BUG_ON(abs_to > (((u64)index + 1) << PAGE_CACHE_SHIFT));
@@ -801,7 +808,8 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
}
if (!handle) {
- handle = ocfs2_zero_start_ordered_transaction(inode);
+ handle = ocfs2_zero_start_ordered_transaction(inode,
+ di_bh);
if (IS_ERR(handle)) {
ret = PTR_ERR(handle);
handle = NULL;
@@ -818,8 +826,22 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
ret = 0;
}
- if (handle)
+ if (handle) {
+ /*
+ * fs-writeback will release the dirty pages without page lock
+ * whose offset are over inode size, the release happens at
+ * block_write_full_page_endio().
+ */
+ i_size_write(inode, abs_to);
+ inode->i_blocks = ocfs2_inode_sector_count(inode);
+ di->i_size = cpu_to_le64((u64)i_size_read(inode));
+ inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
+ di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
+ di->i_mtime_nsec = di->i_ctime_nsec;
+ ocfs2_journal_dirty(handle, di_bh);
ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
+ }
out_unlock:
unlock_page(page);
@@ -915,7 +937,7 @@ out:
* has made sure that the entire range needs zeroing.
*/
static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
- u64 range_end)
+ u64 range_end, struct buffer_head *di_bh)
{
int rc = 0;
u64 next_pos;
@@ -931,7 +953,7 @@ static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
next_pos = (zero_pos & PAGE_CACHE_MASK) + PAGE_CACHE_SIZE;
if (next_pos > range_end)
next_pos = range_end;
- rc = ocfs2_write_zero_page(inode, zero_pos, next_pos);
+ rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
if (rc < 0) {
mlog_errno(rc);
break;
@@ -977,7 +999,7 @@ int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
range_end = zero_to_size;
ret = ocfs2_zero_extend_range(inode, range_start,
- range_end);
+ range_end, di_bh);
if (ret) {
mlog_errno(ret);
break;
@@ -1145,14 +1167,14 @@ int ocfs2_setattr(struct dentry *dentry, struct iattr *attr)
goto bail_unlock_rw;
}
- if (size_change && attr->ia_size != i_size_read(inode)) {
+ if (size_change) {
status = inode_newsize_ok(inode, attr->ia_size);
if (status)
goto bail_unlock;
inode_dio_wait(inode);
- if (i_size_read(inode) > attr->ia_size) {
+ if (i_size_read(inode) >= attr->ia_size) {
if (ocfs2_should_order_data(inode)) {
status = ocfs2_begin_ordered_truncate(inode,
attr->ia_size);
@@ -2371,8 +2393,8 @@ out_dio:
if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) ||
((file->f_flags & O_DIRECT) && !direct_io)) {
- ret = filemap_fdatawrite_range(file->f_mapping, pos,
- pos + count - 1);
+ ret = filemap_fdatawrite_range(file->f_mapping, *ppos,
+ *ppos + count - 1);
if (ret < 0)
written = ret;
@@ -2385,8 +2407,8 @@ out_dio:
}
if (!ret)
- ret = filemap_fdatawait_range(file->f_mapping, pos,
- pos + count - 1);
+ ret = filemap_fdatawait_range(file->f_mapping, *ppos,
+ *ppos + count - 1);
}
/*
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index cd5496b7a0a3..044013455621 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -781,6 +781,48 @@ bail:
return status;
}
+int ocfs2_free_local_alloc_bits(struct ocfs2_super *osb,
+ handle_t *handle,
+ struct ocfs2_alloc_context *ac,
+ u32 bit_off,
+ u32 num_bits)
+{
+ int status, start;
+ u32 clear_bits;
+ struct inode *local_alloc_inode;
+ void *bitmap;
+ struct ocfs2_dinode *alloc;
+ struct ocfs2_local_alloc *la;
+
+ BUG_ON(ac->ac_which != OCFS2_AC_USE_LOCAL);
+
+ local_alloc_inode = ac->ac_inode;
+ alloc = (struct ocfs2_dinode *) osb->local_alloc_bh->b_data;
+ la = OCFS2_LOCAL_ALLOC(alloc);
+
+ bitmap = la->la_bitmap;
+ start = bit_off - le32_to_cpu(la->la_bm_off);
+ clear_bits = num_bits;
+
+ status = ocfs2_journal_access_di(handle,
+ INODE_CACHE(local_alloc_inode),
+ osb->local_alloc_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto bail;
+ }
+
+ while (clear_bits--)
+ ocfs2_clear_bit(start++, bitmap);
+
+ le32_add_cpu(&alloc->id1.bitmap1.i_used, -num_bits);
+ ocfs2_journal_dirty(handle, osb->local_alloc_bh);
+
+bail:
+ return status;
+}
+
static u32 ocfs2_local_alloc_count_bits(struct ocfs2_dinode *alloc)
{
u32 count;
diff --git a/fs/ocfs2/localalloc.h b/fs/ocfs2/localalloc.h
index 1be9b5864460..44a7d1fb2dec 100644
--- a/fs/ocfs2/localalloc.h
+++ b/fs/ocfs2/localalloc.h
@@ -55,6 +55,12 @@ int ocfs2_claim_local_alloc_bits(struct ocfs2_super *osb,
u32 *bit_off,
u32 *num_bits);
+int ocfs2_free_local_alloc_bits(struct ocfs2_super *osb,
+ handle_t *handle,
+ struct ocfs2_alloc_context *ac,
+ u32 bit_off,
+ u32 num_bits);
+
void ocfs2_local_alloc_seen_free_bits(struct ocfs2_super *osb,
unsigned int num_clusters);
void ocfs2_la_enable_worker(struct work_struct *work);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index f4d609be9400..3683643f3f0e 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -664,6 +664,7 @@ static int ocfs2_link(struct dentry *old_dentry,
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
struct ocfs2_dir_lookup_result lookup = { NULL, };
sigset_t oldset;
+ u64 old_de_ino;
trace_ocfs2_link((unsigned long long)OCFS2_I(inode)->ip_blkno,
old_dentry->d_name.len, old_dentry->d_name.name,
@@ -686,6 +687,22 @@ static int ocfs2_link(struct dentry *old_dentry,
goto out;
}
+ err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name,
+ old_dentry->d_name.len, &old_de_ino);
+ if (err) {
+ err = -ENOENT;
+ goto out;
+ }
+
+ /*
+ * Check whether another node removed the source inode while we
+ * were in the vfs.
+ */
+ if (old_de_ino != OCFS2_I(inode)->ip_blkno) {
+ err = -ENOENT;
+ goto out;
+ }
+
err = ocfs2_check_dir_for_entry(dir, dentry->d_name.name,
dentry->d_name.len);
if (err)
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index aaa50611ec66..d7b5108789e2 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -717,6 +717,12 @@ static int ocfs2_release_dquot(struct dquot *dquot)
*/
if (status < 0)
mlog_errno(status);
+ /*
+ * Clear dq_off so that we search for the structure in quota file next
+ * time we acquire it. The structure might be deleted and reallocated
+ * elsewhere by another node while our dquot structure is on freelist.
+ */
+ dquot->dq_off = 0;
clear_bit(DQ_ACTIVE_B, &dquot->dq_flags);
out_trans:
ocfs2_commit_trans(osb, handle);
@@ -756,16 +762,17 @@ static int ocfs2_acquire_dquot(struct dquot *dquot)
status = ocfs2_lock_global_qf(info, 1);
if (status < 0)
goto out;
- if (!test_bit(DQ_READ_B, &dquot->dq_flags)) {
- status = ocfs2_qinfo_lock(info, 0);
- if (status < 0)
- goto out_dq;
- status = qtree_read_dquot(&info->dqi_gi, dquot);
- ocfs2_qinfo_unlock(info, 0);
- if (status < 0)
- goto out_dq;
- }
- set_bit(DQ_READ_B, &dquot->dq_flags);
+ status = ocfs2_qinfo_lock(info, 0);
+ if (status < 0)
+ goto out_dq;
+ /*
+ * We always want to read dquot structure from disk because we don't
+ * know what happened with it while it was on freelist.
+ */
+ status = qtree_read_dquot(&info->dqi_gi, dquot);
+ ocfs2_qinfo_unlock(info, 0);
+ if (status < 0)
+ goto out_dq;
OCFS2_DQUOT(dquot)->dq_use_count++;
OCFS2_DQUOT(dquot)->dq_origspace = dquot->dq_dqb.dqb_curspace;
diff --git a/fs/ocfs2/quota_local.c b/fs/ocfs2/quota_local.c
index 2e4344be3b96..2001862bf2b1 100644
--- a/fs/ocfs2/quota_local.c
+++ b/fs/ocfs2/quota_local.c
@@ -1303,10 +1303,6 @@ int ocfs2_local_release_dquot(handle_t *handle, struct dquot *dquot)
ocfs2_journal_dirty(handle, od->dq_chunk->qc_headerbh);
out:
- /* Clear the read bit so that next time someone uses this
- * dquot he reads fresh info from disk and allocates local
- * dquot structure */
- clear_bit(DQ_READ_B, &dquot->dq_flags);
return status;
}
diff --git a/fs/open.c b/fs/open.c
index 4b3e1edf2fe4..b9ed8b25c108 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -705,6 +705,10 @@ static int do_dentry_open(struct file *f,
return 0;
}
+ /* POSIX.1-2008/SUSv4 Section XSI 2.9.7 */
+ if (S_ISREG(inode->i_mode))
+ f->f_mode |= FMODE_ATOMIC_POS;
+
f->f_op = fops_get(inode->i_fop);
if (unlikely(WARN_ON(!f->f_op))) {
error = -ENODEV;
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 38bae5a0ea25..11c54fd51e16 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -521,8 +521,11 @@ posix_acl_chmod(struct inode *inode, umode_t mode)
return -EOPNOTSUPP;
acl = get_acl(inode, ACL_TYPE_ACCESS);
- if (IS_ERR_OR_NULL(acl))
+ if (IS_ERR_OR_NULL(acl)) {
+ if (acl == ERR_PTR(-EOPNOTSUPP))
+ return 0;
return PTR_ERR(acl);
+ }
ret = __posix_acl_chmod(&acl, GFP_KERNEL, mode);
if (ret)
@@ -544,14 +547,15 @@ posix_acl_create(struct inode *dir, umode_t *mode,
goto no_acl;
p = get_acl(dir, ACL_TYPE_DEFAULT);
- if (IS_ERR(p))
+ if (IS_ERR(p)) {
+ if (p == ERR_PTR(-EOPNOTSUPP))
+ goto apply_umask;
return PTR_ERR(p);
-
- if (!p) {
- *mode &= ~current_umask();
- goto no_acl;
}
+ if (!p)
+ goto apply_umask;
+
*acl = posix_acl_clone(p, GFP_NOFS);
if (!*acl)
return -ENOMEM;
@@ -575,6 +579,8 @@ posix_acl_create(struct inode *dir, umode_t *mode,
}
return 0;
+apply_umask:
+ *mode &= ~current_umask();
no_acl:
*default_acl = NULL;
*acl = NULL;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 51507065263b..b9760628e1fd 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1824,6 +1824,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path)
if (rc)
goto out_mmput;
+ rc = -ENOENT;
down_read(&mm->mmap_sem);
vma = find_exact_vma(mm, vm_start, vm_end);
if (vma && vma->vm_file) {
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 02174a610315..e647c55275d9 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -121,9 +121,8 @@ u64 stable_page_flags(struct page *page)
* just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
* to make sure a given page is a thp, not a non-huge compound page.
*/
- else if (PageTransCompound(page) &&
- (PageLRU(compound_trans_head(page)) ||
- PageAnon(compound_trans_head(page))))
+ else if (PageTransCompound(page) && (PageLRU(compound_head(page)) ||
+ PageAnon(compound_head(page))))
u |= 1 << KPF_THP;
/*
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 2ca7ba047f04..88d4585b30f1 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -468,17 +468,24 @@ static int __init update_note_header_size_elf64(const Elf64_Ehdr *ehdr_ptr)
return rc;
}
nhdr_ptr = notes_section;
- while (real_sz < max_sz) {
- if (nhdr_ptr->n_namesz == 0)
- break;
+ while (nhdr_ptr->n_namesz != 0) {
sz = sizeof(Elf64_Nhdr) +
((nhdr_ptr->n_namesz + 3) & ~3) +
((nhdr_ptr->n_descsz + 3) & ~3);
+ if ((real_sz + sz) > max_sz) {
+ pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
+ nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
+ break;
+ }
real_sz += sz;
nhdr_ptr = (Elf64_Nhdr*)((char*)nhdr_ptr + sz);
}
kfree(notes_section);
phdr_ptr->p_memsz = real_sz;
+ if (real_sz == 0) {
+ pr_warn("Warning: Zero PT_NOTE entries found\n");
+ return -EINVAL;
+ }
}
return 0;
@@ -648,17 +655,24 @@ static int __init update_note_header_size_elf32(const Elf32_Ehdr *ehdr_ptr)
return rc;
}
nhdr_ptr = notes_section;
- while (real_sz < max_sz) {
- if (nhdr_ptr->n_namesz == 0)
- break;
+ while (nhdr_ptr->n_namesz != 0) {
sz = sizeof(Elf32_Nhdr) +
((nhdr_ptr->n_namesz + 3) & ~3) +
((nhdr_ptr->n_descsz + 3) & ~3);
+ if ((real_sz + sz) > max_sz) {
+ pr_warn("Warning: Exceeded p_memsz, dropping PT_NOTE entry n_namesz=0x%x, n_descsz=0x%x\n",
+ nhdr_ptr->n_namesz, nhdr_ptr->n_descsz);
+ break;
+ }
real_sz += sz;
nhdr_ptr = (Elf32_Nhdr*)((char*)nhdr_ptr + sz);
}
kfree(notes_section);
phdr_ptr->p_memsz = real_sz;
+ if (real_sz == 0) {
+ pr_warn("Warning: Zero PT_NOTE entries found\n");
+ return -EINVAL;
+ }
}
return 0;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 831d49a4111f..cfc8dcc16043 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -581,9 +581,17 @@ int dquot_scan_active(struct super_block *sb,
dqstats_inc(DQST_LOOKUPS);
dqput(old_dquot);
old_dquot = dquot;
- ret = fn(dquot, priv);
- if (ret < 0)
- goto out;
+ /*
+ * ->release_dquot() can be racing with us. Our reference
+ * protects us from new calls to it so just wait for any
+ * outstanding call and recheck the DQ_ACTIVE_B after that.
+ */
+ wait_on_dquot(dquot);
+ if (test_bit(DQ_ACTIVE_B, &dquot->dq_flags)) {
+ ret = fn(dquot, priv);
+ if (ret < 0)
+ goto out;
+ }
spin_lock(&dq_list_lock);
/* We are safe to continue now because our dquot could not
* be moved out of the inuse list while we hold the reference */
diff --git a/fs/read_write.c b/fs/read_write.c
index edc5746a902a..54e19b9392dc 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -264,10 +264,22 @@ loff_t vfs_llseek(struct file *file, loff_t offset, int whence)
}
EXPORT_SYMBOL(vfs_llseek);
+static inline struct fd fdget_pos(int fd)
+{
+ return __to_fd(__fdget_pos(fd));
+}
+
+static inline void fdput_pos(struct fd f)
+{
+ if (f.flags & FDPUT_POS_UNLOCK)
+ mutex_unlock(&f.file->f_pos_lock);
+ fdput(f);
+}
+
SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
{
off_t retval;
- struct fd f = fdget(fd);
+ struct fd f = fdget_pos(fd);
if (!f.file)
return -EBADF;
@@ -278,7 +290,7 @@ SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence)
if (res != (loff_t)retval)
retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */
}
- fdput(f);
+ fdput_pos(f);
return retval;
}
@@ -498,7 +510,7 @@ static inline void file_pos_write(struct file *file, loff_t pos)
SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
{
- struct fd f = fdget(fd);
+ struct fd f = fdget_pos(fd);
ssize_t ret = -EBADF;
if (f.file) {
@@ -506,7 +518,7 @@ SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
ret = vfs_read(f.file, buf, count, &pos);
if (ret >= 0)
file_pos_write(f.file, pos);
- fdput(f);
+ fdput_pos(f);
}
return ret;
}
@@ -514,7 +526,7 @@ SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count)
SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
size_t, count)
{
- struct fd f = fdget(fd);
+ struct fd f = fdget_pos(fd);
ssize_t ret = -EBADF;
if (f.file) {
@@ -522,7 +534,7 @@ SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf,
ret = vfs_write(f.file, buf, count, &pos);
if (ret >= 0)
file_pos_write(f.file, pos);
- fdput(f);
+ fdput_pos(f);
}
return ret;
@@ -797,7 +809,7 @@ EXPORT_SYMBOL(vfs_writev);
SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
unsigned long, vlen)
{
- struct fd f = fdget(fd);
+ struct fd f = fdget_pos(fd);
ssize_t ret = -EBADF;
if (f.file) {
@@ -805,7 +817,7 @@ SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
ret = vfs_readv(f.file, vec, vlen, &pos);
if (ret >= 0)
file_pos_write(f.file, pos);
- fdput(f);
+ fdput_pos(f);
}
if (ret > 0)
@@ -817,7 +829,7 @@ SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec,
SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
unsigned long, vlen)
{
- struct fd f = fdget(fd);
+ struct fd f = fdget_pos(fd);
ssize_t ret = -EBADF;
if (f.file) {
@@ -825,7 +837,7 @@ SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec,
ret = vfs_writev(f.file, vec, vlen, &pos);
if (ret >= 0)
file_pos_write(f.file, pos);
- fdput(f);
+ fdput_pos(f);
}
if (ret > 0)
@@ -968,7 +980,7 @@ COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
const struct compat_iovec __user *,vec,
compat_ulong_t, vlen)
{
- struct fd f = fdget(fd);
+ struct fd f = fdget_pos(fd);
ssize_t ret;
loff_t pos;
@@ -978,7 +990,7 @@ COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd,
ret = compat_readv(f.file, vec, vlen, &pos);
if (ret >= 0)
f.file->f_pos = pos;
- fdput(f);
+ fdput_pos(f);
return ret;
}
@@ -1035,7 +1047,7 @@ COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
const struct compat_iovec __user *, vec,
compat_ulong_t, vlen)
{
- struct fd f = fdget(fd);
+ struct fd f = fdget_pos(fd);
ssize_t ret;
loff_t pos;
@@ -1045,7 +1057,7 @@ COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd,
ret = compat_writev(f.file, vec, vlen, &pos);
if (ret >= 0)
f.file->f_pos = pos;
- fdput(f);
+ fdput_pos(f);
return ret;
}
diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
index 2b7882b508db..9a3c68cf6026 100644
--- a/fs/reiserfs/do_balan.c
+++ b/fs/reiserfs/do_balan.c
@@ -324,23 +324,17 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
switch (flag) {
case M_INSERT: /* insert item into L[0] */
- if (item_pos == tb->lnum[0] - 1
- && tb->lbytes != -1) {
+ if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
/* part of new item falls into L[0] */
int new_item_len;
int version;
- ret_val =
- leaf_shift_left(tb, tb->lnum[0] - 1,
- -1);
+ ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, -1);
/* Calculate item length to insert to S[0] */
- new_item_len =
- ih_item_len(ih) - tb->lbytes;
+ new_item_len = ih_item_len(ih) - tb->lbytes;
/* Calculate and check item length to insert to L[0] */
- put_ih_item_len(ih,
- ih_item_len(ih) -
- new_item_len);
+ put_ih_item_len(ih, ih_item_len(ih) - new_item_len);
RFALSE(ih_item_len(ih) <= 0,
"PAP-12080: there is nothing to insert into L[0]: ih_item_len=%d",
@@ -349,30 +343,18 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
/* Insert new item into L[0] */
buffer_info_init_left(tb, &bi);
leaf_insert_into_buf(&bi,
- n + item_pos -
- ret_val, ih, body,
- zeros_num >
- ih_item_len(ih) ?
- ih_item_len(ih) :
- zeros_num);
+ n + item_pos - ret_val, ih, body,
+ zeros_num > ih_item_len(ih) ? ih_item_len(ih) : zeros_num);
version = ih_version(ih);
/* Calculate key component, item length and body to insert into S[0] */
- set_le_ih_k_offset(ih,
- le_ih_k_offset(ih) +
- (tb->
- lbytes <<
- (is_indirect_le_ih
- (ih) ? tb->tb_sb->
- s_blocksize_bits -
- UNFM_P_SHIFT :
- 0)));
+ set_le_ih_k_offset(ih, le_ih_k_offset(ih) +
+ (tb-> lbytes << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0)));
put_ih_item_len(ih, new_item_len);
if (tb->lbytes > zeros_num) {
- body +=
- (tb->lbytes - zeros_num);
+ body += (tb->lbytes - zeros_num);
zeros_num = 0;
} else
zeros_num -= tb->lbytes;
@@ -383,15 +365,10 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
} else {
/* new item in whole falls into L[0] */
/* Shift lnum[0]-1 items to L[0] */
- ret_val =
- leaf_shift_left(tb, tb->lnum[0] - 1,
- tb->lbytes);
+ ret_val = leaf_shift_left(tb, tb->lnum[0] - 1, tb->lbytes);
/* Insert new item into L[0] */
buffer_info_init_left(tb, &bi);
- leaf_insert_into_buf(&bi,
- n + item_pos -
- ret_val, ih, body,
- zeros_num);
+ leaf_insert_into_buf(&bi, n + item_pos - ret_val, ih, body, zeros_num);
tb->insert_size[0] = 0;
zeros_num = 0;
}
@@ -399,264 +376,117 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
case M_PASTE: /* append item in L[0] */
- if (item_pos == tb->lnum[0] - 1
- && tb->lbytes != -1) {
+ if (item_pos == tb->lnum[0] - 1 && tb->lbytes != -1) {
/* we must shift the part of the appended item */
- if (is_direntry_le_ih
- (B_N_PITEM_HEAD(tbS0, item_pos))) {
+ if (is_direntry_le_ih(B_N_PITEM_HEAD(tbS0, item_pos))) {
RFALSE(zeros_num,
"PAP-12090: invalid parameter in case of a directory");
/* directory item */
if (tb->lbytes > pos_in_item) {
/* new directory entry falls into L[0] */
- struct item_head
- *pasted;
- int l_pos_in_item =
- pos_in_item;
+ struct item_head *pasted;
+ int l_pos_in_item = pos_in_item;
/* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 entries from given directory item */
- ret_val =
- leaf_shift_left(tb,
- tb->
- lnum
- [0],
- tb->
- lbytes
- -
- 1);
- if (ret_val
- && !item_pos) {
- pasted =
- B_N_PITEM_HEAD
- (tb->L[0],
- B_NR_ITEMS
- (tb->
- L[0]) -
- 1);
- l_pos_in_item +=
- I_ENTRY_COUNT
- (pasted) -
- (tb->
- lbytes -
- 1);
+ ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes-1);
+ if (ret_val && !item_pos) {
+ pasted = B_N_PITEM_HEAD(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1);
+ l_pos_in_item += I_ENTRY_COUNT(pasted) - (tb->lbytes -1);
}
/* Append given directory entry to directory item */
buffer_info_init_left(tb, &bi);
- leaf_paste_in_buffer
- (&bi,
- n + item_pos -
- ret_val,
- l_pos_in_item,
- tb->insert_size[0],
- body, zeros_num);
+ leaf_paste_in_buffer(&bi, n + item_pos - ret_val, l_pos_in_item, tb->insert_size[0], body, zeros_num);
/* previous string prepared space for pasting new entry, following string pastes this entry */
/* when we have merge directory item, pos_in_item has been changed too */
/* paste new directory entry. 1 is entry number */
- leaf_paste_entries(&bi,
- n +
- item_pos
- -
- ret_val,
- l_pos_in_item,
- 1,
- (struct
- reiserfs_de_head
- *)
- body,
- body
- +
- DEH_SIZE,
- tb->
- insert_size
- [0]
- );
+ leaf_paste_entries(&bi, n + item_pos - ret_val, l_pos_in_item,
+ 1, (struct reiserfs_de_head *) body,
+ body + DEH_SIZE, tb->insert_size[0]);
tb->insert_size[0] = 0;
} else {
/* new directory item doesn't fall into L[0] */
/* Shift lnum[0]-1 items in whole. Shift lbytes directory entries from directory item number lnum[0] */
- leaf_shift_left(tb,
- tb->
- lnum[0],
- tb->
- lbytes);
+ leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
}
/* Calculate new position to append in item body */
pos_in_item -= tb->lbytes;
} else {
/* regular object */
- RFALSE(tb->lbytes <= 0,
- "PAP-12095: there is nothing to shift to L[0]. lbytes=%d",
- tb->lbytes);
- RFALSE(pos_in_item !=
- ih_item_len
- (B_N_PITEM_HEAD
- (tbS0, item_pos)),
+ RFALSE(tb->lbytes <= 0, "PAP-12095: there is nothing to shift to L[0]. lbytes=%d", tb->lbytes);
+ RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)),
"PAP-12100: incorrect position to paste: item_len=%d, pos_in_item=%d",
- ih_item_len
- (B_N_PITEM_HEAD
- (tbS0, item_pos)),
- pos_in_item);
+ ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)),pos_in_item);
if (tb->lbytes >= pos_in_item) {
/* appended item will be in L[0] in whole */
int l_n;
/* this bytes number must be appended to the last item of L[h] */
- l_n =
- tb->lbytes -
- pos_in_item;
+ l_n = tb->lbytes - pos_in_item;
/* Calculate new insert_size[0] */
- tb->insert_size[0] -=
- l_n;
+ tb->insert_size[0] -= l_n;
- RFALSE(tb->
- insert_size[0] <=
- 0,
+ RFALSE(tb->insert_size[0] <= 0,
"PAP-12105: there is nothing to paste into L[0]. insert_size=%d",
- tb->
- insert_size[0]);
- ret_val =
- leaf_shift_left(tb,
- tb->
- lnum
- [0],
- ih_item_len
- (B_N_PITEM_HEAD
- (tbS0,
- item_pos)));
+ tb->insert_size[0]);
+ ret_val = leaf_shift_left(tb, tb->lnum[0], ih_item_len
+ (B_N_PITEM_HEAD(tbS0, item_pos)));
/* Append to body of item in L[0] */
buffer_info_init_left(tb, &bi);
leaf_paste_in_buffer
- (&bi,
- n + item_pos -
- ret_val,
- ih_item_len
- (B_N_PITEM_HEAD
- (tb->L[0],
- n + item_pos -
- ret_val)), l_n,
- body,
- zeros_num >
- l_n ? l_n :
- zeros_num);
+ (&bi, n + item_pos - ret_val, ih_item_len
+ (B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val)),
+ l_n, body,
+ zeros_num > l_n ? l_n : zeros_num);
/* 0-th item in S0 can be only of DIRECT type when l_n != 0 */
{
int version;
- int temp_l =
- l_n;
-
- RFALSE
- (ih_item_len
- (B_N_PITEM_HEAD
- (tbS0,
- 0)),
+ int temp_l = l_n;
+
+ RFALSE(ih_item_len(B_N_PITEM_HEAD(tbS0, 0)),
"PAP-12106: item length must be 0");
- RFALSE
- (comp_short_le_keys
- (B_N_PKEY
- (tbS0, 0),
- B_N_PKEY
- (tb->L[0],
- n +
- item_pos
- -
- ret_val)),
+ RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY
+ (tb->L[0], n + item_pos - ret_val)),
"PAP-12107: items must be of the same file");
if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val))) {
- temp_l =
- l_n
- <<
- (tb->
- tb_sb->
- s_blocksize_bits
- -
- UNFM_P_SHIFT);
+ temp_l = l_n << (tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT);
}
/* update key of first item in S0 */
- version =
- ih_version
- (B_N_PITEM_HEAD
- (tbS0, 0));
- set_le_key_k_offset
- (version,
- B_N_PKEY
- (tbS0, 0),
- le_key_k_offset
- (version,
- B_N_PKEY
- (tbS0,
- 0)) +
- temp_l);
+ version = ih_version(B_N_PITEM_HEAD(tbS0, 0));
+ set_le_key_k_offset(version, B_N_PKEY(tbS0, 0),
+ le_key_k_offset(version,B_N_PKEY(tbS0, 0)) + temp_l);
/* update left delimiting key */
- set_le_key_k_offset
- (version,
- B_N_PDELIM_KEY
- (tb->
- CFL[0],
- tb->
- lkey[0]),
- le_key_k_offset
- (version,
- B_N_PDELIM_KEY
- (tb->
- CFL[0],
- tb->
- lkey[0]))
- + temp_l);
+ set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]),
+ le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0])) + temp_l);
}
/* Calculate new body, position in item and insert_size[0] */
if (l_n > zeros_num) {
- body +=
- (l_n -
- zeros_num);
+ body += (l_n - zeros_num);
zeros_num = 0;
} else
- zeros_num -=
- l_n;
+ zeros_num -= l_n;
pos_in_item = 0;
- RFALSE
- (comp_short_le_keys
- (B_N_PKEY(tbS0, 0),
- B_N_PKEY(tb->L[0],
- B_NR_ITEMS
- (tb->
- L[0]) -
- 1))
- ||
- !op_is_left_mergeable
- (B_N_PKEY(tbS0, 0),
- tbS0->b_size)
- ||
- !op_is_left_mergeable
- (B_N_PDELIM_KEY
- (tb->CFL[0],
- tb->lkey[0]),
- tbS0->b_size),
+ RFALSE(comp_short_le_keys(B_N_PKEY(tbS0, 0), B_N_PKEY(tb->L[0], B_NR_ITEMS(tb->L[0]) - 1))
+ || !op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)
+ || !op_is_left_mergeable(B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]), tbS0->b_size),
"PAP-12120: item must be merge-able with left neighboring item");
} else { /* only part of the appended item will be in L[0] */
/* Calculate position in item for append in S[0] */
- pos_in_item -=
- tb->lbytes;
+ pos_in_item -= tb->lbytes;
- RFALSE(pos_in_item <= 0,
- "PAP-12125: no place for paste. pos_in_item=%d",
- pos_in_item);
+ RFALSE(pos_in_item <= 0, "PAP-12125: no place for paste. pos_in_item=%d", pos_in_item);
/* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
- leaf_shift_left(tb,
- tb->
- lnum[0],
- tb->
- lbytes);
+ leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
}
}
} else { /* appended item will be in L[0] in whole */
@@ -665,52 +495,30 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
if (!item_pos && op_is_left_mergeable(B_N_PKEY(tbS0, 0), tbS0->b_size)) { /* if we paste into first item of S[0] and it is left mergable */
/* then increment pos_in_item by the size of the last item in L[0] */
- pasted =
- B_N_PITEM_HEAD(tb->L[0],
- n - 1);
+ pasted = B_N_PITEM_HEAD(tb->L[0], n - 1);
if (is_direntry_le_ih(pasted))
- pos_in_item +=
- ih_entry_count
- (pasted);
+ pos_in_item += ih_entry_count(pasted);
else
- pos_in_item +=
- ih_item_len(pasted);
+ pos_in_item += ih_item_len(pasted);
}
/* Shift lnum[0] - 1 items in whole. Shift lbytes - 1 byte from item number lnum[0] */
- ret_val =
- leaf_shift_left(tb, tb->lnum[0],
- tb->lbytes);
+ ret_val = leaf_shift_left(tb, tb->lnum[0], tb->lbytes);
/* Append to body of item in L[0] */
buffer_info_init_left(tb, &bi);
- leaf_paste_in_buffer(&bi,
- n + item_pos -
- ret_val,
+ leaf_paste_in_buffer(&bi, n + item_pos - ret_val,
pos_in_item,
tb->insert_size[0],
body, zeros_num);
/* if appended item is directory, paste entry */
- pasted =
- B_N_PITEM_HEAD(tb->L[0],
- n + item_pos -
- ret_val);
+ pasted = B_N_PITEM_HEAD(tb->L[0], n + item_pos - ret_val);
if (is_direntry_le_ih(pasted))
- leaf_paste_entries(&bi,
- n +
- item_pos -
- ret_val,
- pos_in_item,
- 1,
- (struct
- reiserfs_de_head
- *)body,
- body +
- DEH_SIZE,
- tb->
- insert_size
- [0]
- );
+ leaf_paste_entries(&bi, n + item_pos - ret_val,
+ pos_in_item, 1,
+ (struct reiserfs_de_head *) body,
+ body + DEH_SIZE,
+ tb->insert_size[0]);
/* if appended item is indirect item, put unformatted node into un list */
if (is_indirect_le_ih(pasted))
set_ih_free_space(pasted, 0);
@@ -722,13 +530,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
reiserfs_panic(tb->tb_sb, "PAP-12130",
"lnum > 0: unexpected mode: "
" %s(%d)",
- (flag ==
- M_DELETE) ? "DELETE" : ((flag ==
- M_CUT)
- ? "CUT"
- :
- "UNKNOWN"),
- flag);
+ (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
}
} else {
/* new item doesn't fall into L[0] */
@@ -748,14 +550,12 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
case M_INSERT: /* insert item */
if (n - tb->rnum[0] < item_pos) { /* new item or its part falls to R[0] */
if (item_pos == n - tb->rnum[0] + 1 && tb->rbytes != -1) { /* part of new item falls into R[0] */
- loff_t old_key_comp, old_len,
- r_zeros_number;
+ loff_t old_key_comp, old_len, r_zeros_number;
const char *r_body;
int version;
loff_t offset;
- leaf_shift_right(tb, tb->rnum[0] - 1,
- -1);
+ leaf_shift_right(tb, tb->rnum[0] - 1, -1);
version = ih_version(ih);
/* Remember key component and item length */
@@ -763,29 +563,17 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
old_len = ih_item_len(ih);
/* Calculate key component and item length to insert into R[0] */
- offset =
- le_ih_k_offset(ih) +
- ((old_len -
- tb->
- rbytes) << (is_indirect_le_ih(ih)
- ? tb->tb_sb->
- s_blocksize_bits -
- UNFM_P_SHIFT : 0));
+ offset = le_ih_k_offset(ih) + ((old_len - tb->rbytes) << (is_indirect_le_ih(ih) ? tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT : 0));
set_le_ih_k_offset(ih, offset);
put_ih_item_len(ih, tb->rbytes);
/* Insert part of the item into R[0] */
buffer_info_init_right(tb, &bi);
if ((old_len - tb->rbytes) > zeros_num) {
r_zeros_number = 0;
- r_body =
- body + (old_len -
- tb->rbytes) -
- zeros_num;
+ r_body = body + (old_len - tb->rbytes) - zeros_num;
} else {
r_body = body;
- r_zeros_number =
- zeros_num - (old_len -
- tb->rbytes);
+ r_zeros_number = zeros_num - (old_len - tb->rbytes);
zeros_num -= r_zeros_number;
}
@@ -798,25 +586,18 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
/* Calculate key component and item length to insert into S[0] */
set_le_ih_k_offset(ih, old_key_comp);
- put_ih_item_len(ih,
- old_len - tb->rbytes);
+ put_ih_item_len(ih, old_len - tb->rbytes);
tb->insert_size[0] -= tb->rbytes;
} else { /* whole new item falls into R[0] */
/* Shift rnum[0]-1 items to R[0] */
- ret_val =
- leaf_shift_right(tb,
- tb->rnum[0] - 1,
- tb->rbytes);
+ ret_val = leaf_shift_right(tb, tb->rnum[0] - 1, tb->rbytes);
/* Insert new item into R[0] */
buffer_info_init_right(tb, &bi);
- leaf_insert_into_buf(&bi,
- item_pos - n +
- tb->rnum[0] - 1,
- ih, body,
- zeros_num);
+ leaf_insert_into_buf(&bi, item_pos - n + tb->rnum[0] - 1,
+ ih, body, zeros_num);
if (item_pos - n + tb->rnum[0] - 1 == 0) {
replace_key(tb, tb->CFR[0],
@@ -841,200 +622,97 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
RFALSE(zeros_num,
"PAP-12145: invalid parameter in case of a directory");
- entry_count =
- I_ENTRY_COUNT(B_N_PITEM_HEAD
- (tbS0,
- item_pos));
+ entry_count = I_ENTRY_COUNT(B_N_PITEM_HEAD
+ (tbS0, item_pos));
if (entry_count - tb->rbytes <
pos_in_item)
/* new directory entry falls into R[0] */
{
int paste_entry_position;
- RFALSE(tb->rbytes - 1 >=
- entry_count
- || !tb->
- insert_size[0],
+ RFALSE(tb->rbytes - 1 >= entry_count || !tb-> insert_size[0],
"PAP-12150: no enough of entries to shift to R[0]: rbytes=%d, entry_count=%d",
- tb->rbytes,
- entry_count);
+ tb->rbytes, entry_count);
/* Shift rnum[0]-1 items in whole. Shift rbytes-1 directory entries from directory item number rnum[0] */
- leaf_shift_right(tb,
- tb->
- rnum
- [0],
- tb->
- rbytes
- - 1);
+ leaf_shift_right(tb, tb->rnum[0], tb->rbytes - 1);
/* Paste given directory entry to directory item */
- paste_entry_position =
- pos_in_item -
- entry_count +
- tb->rbytes - 1;
+ paste_entry_position = pos_in_item - entry_count + tb->rbytes - 1;
buffer_info_init_right(tb, &bi);
- leaf_paste_in_buffer
- (&bi, 0,
- paste_entry_position,
- tb->insert_size[0],
- body, zeros_num);
+ leaf_paste_in_buffer(&bi, 0, paste_entry_position, tb->insert_size[0], body, zeros_num);
/* paste entry */
- leaf_paste_entries(&bi,
- 0,
- paste_entry_position,
- 1,
- (struct
- reiserfs_de_head
- *)
- body,
- body
- +
- DEH_SIZE,
- tb->
- insert_size
- [0]
- );
-
- if (paste_entry_position
- == 0) {
+ leaf_paste_entries(&bi, 0, paste_entry_position, 1,
+ (struct reiserfs_de_head *) body,
+ body + DEH_SIZE, tb->insert_size[0]);
+
+ if (paste_entry_position == 0) {
/* change delimiting keys */
- replace_key(tb,
- tb->
- CFR
- [0],
- tb->
- rkey
- [0],
- tb->
- R
- [0],
- 0);
+ replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0],0);
}
tb->insert_size[0] = 0;
pos_in_item++;
} else { /* new directory entry doesn't fall into R[0] */
- leaf_shift_right(tb,
- tb->
- rnum
- [0],
- tb->
- rbytes);
+ leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
}
} else { /* regular object */
- int n_shift, n_rem,
- r_zeros_number;
+ int n_shift, n_rem, r_zeros_number;
const char *r_body;
/* Calculate number of bytes which must be shifted from appended item */
- if ((n_shift =
- tb->rbytes -
- tb->insert_size[0]) < 0)
+ if ((n_shift = tb->rbytes - tb->insert_size[0]) < 0)
n_shift = 0;
- RFALSE(pos_in_item !=
- ih_item_len
- (B_N_PITEM_HEAD
- (tbS0, item_pos)),
+ RFALSE(pos_in_item != ih_item_len
+ (B_N_PITEM_HEAD(tbS0, item_pos)),
"PAP-12155: invalid position to paste. ih_item_len=%d, pos_in_item=%d",
- pos_in_item,
- ih_item_len
- (B_N_PITEM_HEAD
- (tbS0, item_pos)));
-
- leaf_shift_right(tb,
- tb->rnum[0],
- n_shift);
+ pos_in_item, ih_item_len
+ (B_N_PITEM_HEAD(tbS0, item_pos)));
+
+ leaf_shift_right(tb, tb->rnum[0], n_shift);
/* Calculate number of bytes which must remain in body after appending to R[0] */
- if ((n_rem =
- tb->insert_size[0] -
- tb->rbytes) < 0)
+ if ((n_rem = tb->insert_size[0] - tb->rbytes) < 0)
n_rem = 0;
{
int version;
- unsigned long temp_rem =
- n_rem;
-
- version =
- ih_version
- (B_N_PITEM_HEAD
- (tb->R[0], 0));
- if (is_indirect_le_key
- (version,
- B_N_PKEY(tb->R[0],
- 0))) {
- temp_rem =
- n_rem <<
- (tb->tb_sb->
- s_blocksize_bits
- -
- UNFM_P_SHIFT);
+ unsigned long temp_rem = n_rem;
+
+ version = ih_version(B_N_PITEM_HEAD(tb->R[0], 0));
+ if (is_indirect_le_key(version, B_N_PKEY(tb->R[0], 0))) {
+ temp_rem = n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT);
}
- set_le_key_k_offset
- (version,
- B_N_PKEY(tb->R[0],
- 0),
- le_key_k_offset
- (version,
- B_N_PKEY(tb->R[0],
- 0)) +
- temp_rem);
- set_le_key_k_offset
- (version,
- B_N_PDELIM_KEY(tb->
- CFR
- [0],
- tb->
- rkey
- [0]),
- le_key_k_offset
- (version,
- B_N_PDELIM_KEY
- (tb->CFR[0],
- tb->rkey[0])) +
- temp_rem);
+ set_le_key_k_offset(version, B_N_PKEY(tb->R[0], 0),
+ le_key_k_offset(version, B_N_PKEY(tb->R[0], 0)) + temp_rem);
+ set_le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]),
+ le_key_k_offset(version, B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0])) + temp_rem);
}
/* k_offset (B_N_PKEY(tb->R[0],0)) += n_rem;
k_offset (B_N_PDELIM_KEY(tb->CFR[0],tb->rkey[0])) += n_rem;*/
- do_balance_mark_internal_dirty
- (tb, tb->CFR[0], 0);
+ do_balance_mark_internal_dirty(tb, tb->CFR[0], 0);
/* Append part of body into R[0] */
buffer_info_init_right(tb, &bi);
if (n_rem > zeros_num) {
r_zeros_number = 0;
- r_body =
- body + n_rem -
- zeros_num;
+ r_body = body + n_rem - zeros_num;
} else {
r_body = body;
- r_zeros_number =
- zeros_num - n_rem;
- zeros_num -=
- r_zeros_number;
+ r_zeros_number = zeros_num - n_rem;
+ zeros_num -= r_zeros_number;
}
- leaf_paste_in_buffer(&bi, 0,
- n_shift,
- tb->
- insert_size
- [0] -
- n_rem,
- r_body,
- r_zeros_number);
-
- if (is_indirect_le_ih
- (B_N_PITEM_HEAD
- (tb->R[0], 0))) {
+ leaf_paste_in_buffer(&bi, 0, n_shift,
+ tb->insert_size[0] - n_rem,
+ r_body, r_zeros_number);
+
+ if (is_indirect_le_ih(B_N_PITEM_HEAD(tb->R[0], 0))) {
#if 0
RFALSE(n_rem,
"PAP-12160: paste more than one unformatted node pointer");
#endif
- set_ih_free_space
- (B_N_PITEM_HEAD
- (tb->R[0], 0), 0);
+ set_ih_free_space(B_N_PITEM_HEAD(tb->R[0], 0), 0);
}
tb->insert_size[0] = n_rem;
if (!n_rem)
@@ -1044,58 +722,28 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
struct item_head *pasted;
- ret_val =
- leaf_shift_right(tb, tb->rnum[0],
- tb->rbytes);
+ ret_val = leaf_shift_right(tb, tb->rnum[0], tb->rbytes);
/* append item in R[0] */
if (pos_in_item >= 0) {
buffer_info_init_right(tb, &bi);
- leaf_paste_in_buffer(&bi,
- item_pos -
- n +
- tb->
- rnum[0],
- pos_in_item,
- tb->
- insert_size
- [0], body,
- zeros_num);
+ leaf_paste_in_buffer(&bi, item_pos - n + tb->rnum[0], pos_in_item,
+ tb->insert_size[0], body, zeros_num);
}
/* paste new entry, if item is directory item */
- pasted =
- B_N_PITEM_HEAD(tb->R[0],
- item_pos - n +
- tb->rnum[0]);
- if (is_direntry_le_ih(pasted)
- && pos_in_item >= 0) {
- leaf_paste_entries(&bi,
- item_pos -
- n +
- tb->rnum[0],
- pos_in_item,
- 1,
- (struct
- reiserfs_de_head
- *)body,
- body +
- DEH_SIZE,
- tb->
- insert_size
- [0]
- );
+ pasted = B_N_PITEM_HEAD(tb->R[0], item_pos - n + tb->rnum[0]);
+ if (is_direntry_le_ih(pasted) && pos_in_item >= 0) {
+ leaf_paste_entries(&bi, item_pos - n + tb->rnum[0],
+ pos_in_item, 1,
+ (struct reiserfs_de_head *) body,
+ body + DEH_SIZE, tb->insert_size[0]);
if (!pos_in_item) {
- RFALSE(item_pos - n +
- tb->rnum[0],
+ RFALSE(item_pos - n + tb->rnum[0],
"PAP-12165: directory item must be first item of node when pasting is in 0th position");
/* update delimiting keys */
- replace_key(tb,
- tb->CFR[0],
- tb->rkey[0],
- tb->R[0],
- 0);
+ replace_key(tb, tb->CFR[0], tb->rkey[0], tb->R[0], 0);
}
}
@@ -1111,22 +759,16 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
default: /* cases d and t */
reiserfs_panic(tb->tb_sb, "PAP-12175",
"rnum > 0: unexpected mode: %s(%d)",
- (flag ==
- M_DELETE) ? "DELETE" : ((flag ==
- M_CUT) ? "CUT"
- : "UNKNOWN"),
- flag);
+ (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
}
}
/* tb->rnum[0] > 0 */
RFALSE(tb->blknum[0] > 3,
- "PAP-12180: blknum can not be %d. It must be <= 3",
- tb->blknum[0]);
+ "PAP-12180: blknum can not be %d. It must be <= 3", tb->blknum[0]);
RFALSE(tb->blknum[0] < 0,
- "PAP-12185: blknum can not be %d. It must be >= 0",
- tb->blknum[0]);
+ "PAP-12185: blknum can not be %d. It must be >= 0", tb->blknum[0]);
/* if while adding to a node we discover that it is possible to split
it in two, and merge the left part into the left neighbor and the
@@ -1177,8 +819,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
if (n - snum[i] < item_pos) { /* new item or it's part falls to first new node S_new[i] */
if (item_pos == n - snum[i] + 1 && sbytes[i] != -1) { /* part of new item falls into S_new[i] */
- int old_key_comp, old_len,
- r_zeros_number;
+ int old_key_comp, old_len, r_zeros_number;
const char *r_body;
int version;
@@ -1192,15 +833,8 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
old_len = ih_item_len(ih);
/* Calculate key component and item length to insert into S_new[i] */
- set_le_ih_k_offset(ih,
- le_ih_k_offset(ih) +
- ((old_len -
- sbytes[i]) <<
- (is_indirect_le_ih
- (ih) ? tb->tb_sb->
- s_blocksize_bits -
- UNFM_P_SHIFT :
- 0)));
+ set_le_ih_k_offset(ih, le_ih_k_offset(ih) +
+ ((old_len - sbytes[i]) << (is_indirect_le_ih(ih) ? tb->tb_sb-> s_blocksize_bits - UNFM_P_SHIFT : 0)));
put_ih_item_len(ih, sbytes[i]);
@@ -1209,39 +843,29 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
if ((old_len - sbytes[i]) > zeros_num) {
r_zeros_number = 0;
- r_body =
- body + (old_len -
- sbytes[i]) -
- zeros_num;
+ r_body = body + (old_len - sbytes[i]) - zeros_num;
} else {
r_body = body;
- r_zeros_number =
- zeros_num - (old_len -
- sbytes[i]);
+ r_zeros_number = zeros_num - (old_len - sbytes[i]);
zeros_num -= r_zeros_number;
}
- leaf_insert_into_buf(&bi, 0, ih, r_body,
- r_zeros_number);
+ leaf_insert_into_buf(&bi, 0, ih, r_body, r_zeros_number);
/* Calculate key component and item length to insert into S[i] */
set_le_ih_k_offset(ih, old_key_comp);
- put_ih_item_len(ih,
- old_len - sbytes[i]);
+ put_ih_item_len(ih, old_len - sbytes[i]);
tb->insert_size[0] -= sbytes[i];
} else { /* whole new item falls into S_new[i] */
/* Shift snum[0] - 1 items to S_new[i] (sbytes[i] of split item) */
leaf_move_items(LEAF_FROM_S_TO_SNEW, tb,
- snum[i] - 1, sbytes[i],
- S_new[i]);
+ snum[i] - 1, sbytes[i], S_new[i]);
/* Insert new item into S_new[i] */
buffer_info_init_bh(tb, &bi, S_new[i]);
- leaf_insert_into_buf(&bi,
- item_pos - n +
- snum[i] - 1, ih,
- body, zeros_num);
+ leaf_insert_into_buf(&bi, item_pos - n + snum[i] - 1,
+ ih, body, zeros_num);
zeros_num = tb->insert_size[0] = 0;
}
@@ -1268,150 +892,73 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
int entry_count;
- entry_count =
- ih_entry_count(aux_ih);
+ entry_count = ih_entry_count(aux_ih);
- if (entry_count - sbytes[i] <
- pos_in_item
- && pos_in_item <=
- entry_count) {
+ if (entry_count - sbytes[i] < pos_in_item && pos_in_item <= entry_count) {
/* new directory entry falls into S_new[i] */
- RFALSE(!tb->
- insert_size[0],
- "PAP-12215: insert_size is already 0");
- RFALSE(sbytes[i] - 1 >=
- entry_count,
+ RFALSE(!tb->insert_size[0], "PAP-12215: insert_size is already 0");
+ RFALSE(sbytes[i] - 1 >= entry_count,
"PAP-12220: there are no so much entries (%d), only %d",
- sbytes[i] - 1,
- entry_count);
+ sbytes[i] - 1, entry_count);
/* Shift snum[i]-1 items in whole. Shift sbytes[i] directory entries from directory item number snum[i] */
- leaf_move_items
- (LEAF_FROM_S_TO_SNEW,
- tb, snum[i],
- sbytes[i] - 1,
- S_new[i]);
+ leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], sbytes[i] - 1, S_new[i]);
/* Paste given directory entry to directory item */
buffer_info_init_bh(tb, &bi, S_new[i]);
- leaf_paste_in_buffer
- (&bi, 0,
- pos_in_item -
- entry_count +
- sbytes[i] - 1,
- tb->insert_size[0],
- body, zeros_num);
+ leaf_paste_in_buffer(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1,
+ tb->insert_size[0], body, zeros_num);
/* paste new directory entry */
- leaf_paste_entries(&bi,
- 0,
- pos_in_item
- -
- entry_count
- +
- sbytes
- [i] -
- 1, 1,
- (struct
- reiserfs_de_head
- *)
- body,
- body
- +
- DEH_SIZE,
- tb->
- insert_size
- [0]
- );
+ leaf_paste_entries(&bi, 0, pos_in_item - entry_count + sbytes[i] - 1, 1,
+ (struct reiserfs_de_head *) body,
+ body + DEH_SIZE, tb->insert_size[0]);
tb->insert_size[0] = 0;
pos_in_item++;
} else { /* new directory entry doesn't fall into S_new[i] */
- leaf_move_items
- (LEAF_FROM_S_TO_SNEW,
- tb, snum[i],
- sbytes[i],
- S_new[i]);
+ leaf_move_items(LEAF_FROM_S_TO_SNEW,tb, snum[i], sbytes[i], S_new[i]);
}
} else { /* regular object */
- int n_shift, n_rem,
- r_zeros_number;
+ int n_shift, n_rem, r_zeros_number;
const char *r_body;
- RFALSE(pos_in_item !=
- ih_item_len
- (B_N_PITEM_HEAD
- (tbS0, item_pos))
- || tb->insert_size[0] <=
- 0,
+ RFALSE(pos_in_item != ih_item_len(B_N_PITEM_HEAD(tbS0, item_pos)) || tb->insert_size[0] <= 0,
"PAP-12225: item too short or insert_size <= 0");
/* Calculate number of bytes which must be shifted from appended item */
- n_shift =
- sbytes[i] -
- tb->insert_size[0];
+ n_shift = sbytes[i] - tb->insert_size[0];
if (n_shift < 0)
n_shift = 0;
- leaf_move_items
- (LEAF_FROM_S_TO_SNEW, tb,
- snum[i], n_shift,
- S_new[i]);
+ leaf_move_items(LEAF_FROM_S_TO_SNEW, tb, snum[i], n_shift, S_new[i]);
/* Calculate number of bytes which must remain in body after append to S_new[i] */
- n_rem =
- tb->insert_size[0] -
- sbytes[i];
+ n_rem = tb->insert_size[0] - sbytes[i];
if (n_rem < 0)
n_rem = 0;
/* Append part of body into S_new[0] */
buffer_info_init_bh(tb, &bi, S_new[i]);
if (n_rem > zeros_num) {
r_zeros_number = 0;
- r_body =
- body + n_rem -
- zeros_num;
+ r_body = body + n_rem - zeros_num;
} else {
r_body = body;
- r_zeros_number =
- zeros_num - n_rem;
- zeros_num -=
- r_zeros_number;
+ r_zeros_number = zeros_num - n_rem;
+ zeros_num -= r_zeros_number;
}
- leaf_paste_in_buffer(&bi, 0,
- n_shift,
- tb->
- insert_size
- [0] -
- n_rem,
- r_body,
- r_zeros_number);
+ leaf_paste_in_buffer(&bi, 0, n_shift,
+ tb->insert_size[0] - n_rem,
+ r_body, r_zeros_number);
{
struct item_head *tmp;
- tmp =
- B_N_PITEM_HEAD(S_new
- [i],
- 0);
+ tmp = B_N_PITEM_HEAD(S_new[i], 0);
if (is_indirect_le_ih
(tmp)) {
- set_ih_free_space
- (tmp, 0);
- set_le_ih_k_offset
- (tmp,
- le_ih_k_offset
- (tmp) +
- (n_rem <<
- (tb->
- tb_sb->
- s_blocksize_bits
- -
- UNFM_P_SHIFT)));
+ set_ih_free_space(tmp, 0);
+ set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + (n_rem << (tb->tb_sb->s_blocksize_bits - UNFM_P_SHIFT)));
} else {
- set_le_ih_k_offset
- (tmp,
- le_ih_k_offset
- (tmp) +
- n_rem);
+ set_le_ih_k_offset(tmp, le_ih_k_offset(tmp) + n_rem);
}
}
@@ -1426,8 +973,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
struct item_head *pasted;
#ifdef CONFIG_REISERFS_CHECK
- struct item_head *ih_check =
- B_N_PITEM_HEAD(tbS0, item_pos);
+ struct item_head *ih_check = B_N_PITEM_HEAD(tbS0, item_pos);
if (!is_direntry_le_ih(ih_check)
&& (pos_in_item != ih_item_len(ih_check)
@@ -1439,8 +985,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
"to ih_item_len");
#endif /* CONFIG_REISERFS_CHECK */
- leaf_mi =
- leaf_move_items(LEAF_FROM_S_TO_SNEW,
+ leaf_mi = leaf_move_items(LEAF_FROM_S_TO_SNEW,
tb, snum[i],
sbytes[i],
S_new[i]);
@@ -1452,30 +997,19 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
/* paste into item */
buffer_info_init_bh(tb, &bi, S_new[i]);
leaf_paste_in_buffer(&bi,
- item_pos - n +
- snum[i],
+ item_pos - n + snum[i],
pos_in_item,
tb->insert_size[0],
body, zeros_num);
- pasted =
- B_N_PITEM_HEAD(S_new[i],
- item_pos - n +
- snum[i]);
+ pasted = B_N_PITEM_HEAD(S_new[i], item_pos - n + snum[i]);
if (is_direntry_le_ih(pasted)) {
leaf_paste_entries(&bi,
- item_pos -
- n + snum[i],
- pos_in_item,
- 1,
- (struct
- reiserfs_de_head
- *)body,
- body +
- DEH_SIZE,
- tb->
- insert_size
- [0]
+ item_pos - n + snum[i],
+ pos_in_item, 1,
+ (struct reiserfs_de_head *)body,
+ body + DEH_SIZE,
+ tb->insert_size[0]
);
}
@@ -1495,11 +1029,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
default: /* cases d and t */
reiserfs_panic(tb->tb_sb, "PAP-12245",
"blknum > 2: unexpected mode: %s(%d)",
- (flag ==
- M_DELETE) ? "DELETE" : ((flag ==
- M_CUT) ? "CUT"
- : "UNKNOWN"),
- flag);
+ (flag == M_DELETE) ? "DELETE" : ((flag == M_CUT) ? "CUT" : "UNKNOWN"), flag);
}
memcpy(insert_key + i, B_N_PKEY(S_new[i], 0), KEY_SIZE);
@@ -1524,9 +1054,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
/* If we insert the first key change the delimiting key */
if (item_pos == 0) {
if (tb->CFL[0]) /* can be 0 in reiserfsck */
- replace_key(tb, tb->CFL[0], tb->lkey[0],
- tbS0, 0);
-
+ replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
}
break;
@@ -1536,53 +1064,27 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
pasted = B_N_PITEM_HEAD(tbS0, item_pos);
/* when directory, may be new entry already pasted */
if (is_direntry_le_ih(pasted)) {
- if (pos_in_item >= 0 &&
- pos_in_item <=
- ih_entry_count(pasted)) {
+ if (pos_in_item >= 0 && pos_in_item <= ih_entry_count(pasted)) {
RFALSE(!tb->insert_size[0],
"PAP-12260: insert_size is 0 already");
/* prepare space */
buffer_info_init_tbS0(tb, &bi);
- leaf_paste_in_buffer(&bi,
- item_pos,
- pos_in_item,
- tb->
- insert_size
- [0], body,
+ leaf_paste_in_buffer(&bi, item_pos, pos_in_item,
+ tb->insert_size[0], body,
zeros_num);
/* paste entry */
- leaf_paste_entries(&bi,
- item_pos,
- pos_in_item,
- 1,
- (struct
- reiserfs_de_head
- *)body,
- body +
- DEH_SIZE,
- tb->
- insert_size
- [0]
- );
+ leaf_paste_entries(&bi, item_pos, pos_in_item, 1,
+ (struct reiserfs_de_head *)body,
+ body + DEH_SIZE,
+ tb->insert_size[0]);
if (!item_pos && !pos_in_item) {
- RFALSE(!tb->CFL[0]
- || !tb->L[0],
+ RFALSE(!tb->CFL[0] || !tb->L[0],
"PAP-12270: CFL[0]/L[0] must be specified");
- if (tb->CFL[0]) {
- replace_key(tb,
- tb->
- CFL
- [0],
- tb->
- lkey
- [0],
- tbS0,
- 0);
-
- }
+ if (tb->CFL[0])
+ replace_key(tb, tb->CFL[0], tb->lkey[0], tbS0, 0);
}
tb->insert_size[0] = 0;
}
@@ -1593,13 +1095,8 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
"PAP-12275: insert size must not be %d",
tb->insert_size[0]);
buffer_info_init_tbS0(tb, &bi);
- leaf_paste_in_buffer(&bi,
- item_pos,
- pos_in_item,
- tb->
- insert_size
- [0], body,
- zeros_num);
+ leaf_paste_in_buffer(&bi, item_pos, pos_in_item,
+ tb->insert_size[0], body, zeros_num);
if (is_indirect_le_ih(pasted)) {
#if 0
@@ -1611,8 +1108,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
tb->
insert_size[0]);
#endif
- set_ih_free_space
- (pasted, 0);
+ set_ih_free_space(pasted, 0);
}
tb->insert_size[0] = 0;
}
@@ -1620,8 +1116,7 @@ static int balance_leaf(struct tree_balance *tb, struct item_head *ih, /* item h
else {
if (tb->insert_size[0]) {
print_cur_tb("12285");
- reiserfs_panic(tb->
- tb_sb,
+ reiserfs_panic(tb->tb_sb,
"PAP-12285",
"insert_size "
"must be 0 "
diff --git a/fs/sync.c b/fs/sync.c
index f15537452231..b28d1dd10e8b 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -27,11 +27,10 @@
* wait == 1 case since in that case write_inode() functions do
* sync_dirty_buffer() and thus effectively write one block at a time.
*/
-static int __sync_filesystem(struct super_block *sb, int wait,
- unsigned long start)
+static int __sync_filesystem(struct super_block *sb, int wait)
{
if (wait)
- sync_inodes_sb(sb, start);
+ sync_inodes_sb(sb);
else
writeback_inodes_sb(sb, WB_REASON_SYNC);
@@ -48,7 +47,6 @@ static int __sync_filesystem(struct super_block *sb, int wait,
int sync_filesystem(struct super_block *sb)
{
int ret;
- unsigned long start = jiffies;
/*
* We need to be protected against the filesystem going from
@@ -62,17 +60,17 @@ int sync_filesystem(struct super_block *sb)
if (sb->s_flags & MS_RDONLY)
return 0;
- ret = __sync_filesystem(sb, 0, start);
+ ret = __sync_filesystem(sb, 0);
if (ret < 0)
return ret;
- return __sync_filesystem(sb, 1, start);
+ return __sync_filesystem(sb, 1);
}
EXPORT_SYMBOL_GPL(sync_filesystem);
static void sync_inodes_one_sb(struct super_block *sb, void *arg)
{
if (!(sb->s_flags & MS_RDONLY))
- sync_inodes_sb(sb, *((unsigned long *)arg));
+ sync_inodes_sb(sb);
}
static void sync_fs_one_sb(struct super_block *sb, void *arg)
@@ -104,10 +102,9 @@ static void fdatawait_one_bdev(struct block_device *bdev, void *arg)
SYSCALL_DEFINE0(sync)
{
int nowait = 0, wait = 1;
- unsigned long start = jiffies;
wakeup_flusher_threads(0, WB_REASON_SYNC);
- iterate_supers(sync_inodes_one_sb, &start);
+ iterate_supers(sync_inodes_one_sb, NULL);
iterate_supers(sync_fs_one_sb, &nowait);
iterate_supers(sync_fs_one_sb, &wait);
iterate_bdevs(fdatawrite_one_bdev, NULL);
@@ -222,23 +219,6 @@ SYSCALL_DEFINE1(fdatasync, unsigned int, fd)
return do_fsync(fd, 1);
}
-/**
- * generic_write_sync - perform syncing after a write if file / inode is sync
- * @file: file to which the write happened
- * @pos: offset where the write started
- * @count: length of the write
- *
- * This is just a simple wrapper about our general syncing function.
- */
-int generic_write_sync(struct file *file, loff_t pos, loff_t count)
-{
- if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
- return 0;
- return vfs_fsync_range(file, pos, pos + count - 1,
- (file->f_flags & __O_SYNC) ? 0 : 1);
-}
-EXPORT_SYMBOL(generic_write_sync);
-
/*
* sys_sync_file_range() permits finely controlled syncing over a segment of
* a file in the range offset .. (offset+nbytes-1) inclusive. If nbytes is
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 6211230814fd..3eaf5c6622eb 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -27,6 +27,7 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
{
struct dentry *root;
void *ns;
+ bool new_sb;
if (!(flags & MS_KERNMOUNT)) {
if (!capable(CAP_SYS_ADMIN) && !fs_fully_visible(fs_type))
@@ -37,8 +38,8 @@ static struct dentry *sysfs_mount(struct file_system_type *fs_type,
}
ns = kobj_ns_grab_current(KOBJ_NS_TYPE_NET);
- root = kernfs_mount_ns(fs_type, flags, sysfs_root, ns);
- if (IS_ERR(root))
+ root = kernfs_mount_ns(fs_type, flags, sysfs_root, &new_sb, ns);
+ if (IS_ERR(root) || !new_sb)
kobj_ns_drop(KOBJ_NS_TYPE_NET, ns);
return root;
}
diff --git a/fs/udf/file.c b/fs/udf/file.c
index c02a27a19c6d..1037637957c7 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -144,6 +144,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
size_t count = iocb->ki_nbytes;
struct udf_inode_info *iinfo = UDF_I(inode);
+ mutex_lock(&inode->i_mutex);
down_write(&iinfo->i_data_sem);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
if (file->f_flags & O_APPEND)
@@ -156,6 +157,7 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
pos + count)) {
err = udf_expand_file_adinicb(inode);
if (err) {
+ mutex_unlock(&inode->i_mutex);
udf_debug("udf_expand_adinicb: err=%d\n", err);
return err;
}
@@ -169,9 +171,17 @@ static ssize_t udf_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
} else
up_write(&iinfo->i_data_sem);
- retval = generic_file_aio_write(iocb, iov, nr_segs, ppos);
- if (retval > 0)
+ retval = __generic_file_aio_write(iocb, iov, nr_segs, &iocb->ki_pos);
+ mutex_unlock(&inode->i_mutex);
+
+ if (retval > 0) {
+ ssize_t err;
+
mark_inode_dirty(inode);
+ err = generic_write_sync(file, iocb->ki_pos - retval, retval);
+ if (err < 0)
+ retval = err;
+ }
return retval;
}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 062b7925bca0..982ce05c87ed 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -265,6 +265,7 @@ int udf_expand_file_adinicb(struct inode *inode)
.nr_to_write = 1,
};
+ WARN_ON_ONCE(!mutex_is_locked(&inode->i_mutex));
if (!iinfo->i_lenAlloc) {
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 2e7989e3a2d6..64b48eade91d 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -799,7 +799,7 @@ xfs_file_aio_write(
XFS_STATS_ADD(xs_write_bytes, ret);
/* Handle various SYNC-type writes */
- err = generic_write_sync(file, pos, ret);
+ err = generic_write_sync(file, iocb->ki_pos - ret, ret);
if (err < 0)
ret = err;
}
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index f35d5c953ff9..9ddfb8190ca1 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -705,7 +705,6 @@ xfs_setattr_size(
{
struct xfs_mount *mp = ip->i_mount;
struct inode *inode = VFS_I(ip);
- int mask = iattr->ia_valid;
xfs_off_t oldsize, newsize;
struct xfs_trans *tp;
int error;
@@ -726,8 +725,8 @@ xfs_setattr_size(
ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
ASSERT(S_ISREG(ip->i_d.di_mode));
- ASSERT((mask & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
- ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
+ ASSERT((iattr->ia_valid & (ATTR_UID|ATTR_GID|ATTR_ATIME|ATTR_ATIME_SET|
+ ATTR_MTIME_SET|ATTR_KILL_PRIV|ATTR_TIMES_SET)) == 0);
oldsize = inode->i_size;
newsize = iattr->ia_size;
@@ -736,7 +735,7 @@ xfs_setattr_size(
* Short circuit the truncate case for zero length files.
*/
if (newsize == 0 && oldsize == 0 && ip->i_d.di_nextents == 0) {
- if (!(mask & (ATTR_CTIME|ATTR_MTIME)))
+ if (!(iattr->ia_valid & (ATTR_CTIME|ATTR_MTIME)))
return 0;
/*
@@ -824,10 +823,11 @@ xfs_setattr_size(
* these flags set. For all other operations the VFS set these flags
* explicitly if it wants a timestamp update.
*/
- if (newsize != oldsize && (!(mask & (ATTR_CTIME | ATTR_MTIME)))) {
+ if (newsize != oldsize &&
+ !(iattr->ia_valid & (ATTR_CTIME | ATTR_MTIME))) {
iattr->ia_ctime = iattr->ia_mtime =
current_fs_time(inode->i_sb);
- mask |= ATTR_CTIME | ATTR_MTIME;
+ iattr->ia_valid |= ATTR_CTIME | ATTR_MTIME;
}
/*
@@ -863,9 +863,9 @@ xfs_setattr_size(
xfs_inode_clear_eofblocks_tag(ip);
}
- if (mask & ATTR_MODE)
+ if (iattr->ia_valid & ATTR_MODE)
xfs_setattr_mode(ip, iattr);
- if (mask & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
+ if (iattr->ia_valid & (ATTR_ATIME|ATTR_CTIME|ATTR_MTIME))
xfs_setattr_time(ip, iattr);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index cdebd832c3db..4ef6fdbced78 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -205,16 +205,25 @@ xlog_cil_insert_format_items(
/*
* We 64-bit align the length of each iovec so that the start
* of the next one is naturally aligned. We'll need to
- * account for that slack space here.
+ * account for that slack space here. Then round nbytes up
+ * to 64-bit alignment so that the initial buffer alignment is
+ * easy to calculate and verify.
*/
nbytes += niovecs * sizeof(uint64_t);
+ nbytes = round_up(nbytes, sizeof(uint64_t));
/* grab the old item if it exists for reservation accounting */
old_lv = lip->li_lv;
- /* calc buffer size */
- buf_size = sizeof(struct xfs_log_vec) + nbytes +
- niovecs * sizeof(struct xfs_log_iovec);
+ /*
+ * The data buffer needs to start 64-bit aligned, so round up
+ * that space to ensure we can align it appropriately and not
+ * overrun the buffer.
+ */
+ buf_size = nbytes +
+ round_up((sizeof(struct xfs_log_vec) +
+ niovecs * sizeof(struct xfs_log_iovec)),
+ sizeof(uint64_t));
/* compare to existing item size */
if (lip->li_lv && buf_size <= lip->li_lv->lv_size) {
@@ -251,6 +260,8 @@ xlog_cil_insert_format_items(
/* The allocated data region lies beyond the iovec region */
lv->lv_buf_len = 0;
lv->lv_buf = (char *)lv + buf_size - nbytes;
+ ASSERT(IS_ALIGNED((unsigned long)lv->lv_buf, sizeof(uint64_t)));
+
lip->li_ops->iop_format(lip, lv);
insert:
ASSERT(lv->lv_buf_len <= nbytes);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 02df7b408a26..f96c05669a9e 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -282,22 +282,29 @@ xfs_readsb(
struct xfs_sb *sbp = &mp->m_sb;
int error;
int loud = !(flags & XFS_MFSI_QUIET);
+ const struct xfs_buf_ops *buf_ops;
ASSERT(mp->m_sb_bp == NULL);
ASSERT(mp->m_ddev_targp != NULL);
/*
+ * For the initial read, we must guess at the sector
+ * size based on the block device. It's enough to
+ * get the sb_sectsize out of the superblock and
+ * then reread with the proper length.
+ * We don't verify it yet, because it may not be complete.
+ */
+ sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
+ buf_ops = NULL;
+
+ /*
* Allocate a (locked) buffer to hold the superblock.
* This will be kept around at all times to optimize
* access to the superblock.
*/
- sector_size = xfs_getsize_buftarg(mp->m_ddev_targp);
-
reread:
bp = xfs_buf_read_uncached(mp->m_ddev_targp, XFS_SB_DADDR,
- BTOBB(sector_size), 0,
- loud ? &xfs_sb_buf_ops
- : &xfs_sb_quiet_buf_ops);
+ BTOBB(sector_size), 0, buf_ops);
if (!bp) {
if (loud)
xfs_warn(mp, "SB buffer read failed");
@@ -328,12 +335,13 @@ reread:
}
/*
- * If device sector size is smaller than the superblock size,
- * re-read the superblock so the buffer is correctly sized.
+ * Re-read the superblock so the buffer is correctly sized,
+ * and properly verified.
*/
- if (sector_size < sbp->sb_sectsize) {
+ if (buf_ops == NULL) {
xfs_buf_relse(bp);
sector_size = sbp->sb_sectsize;
+ buf_ops = loud ? &xfs_sb_buf_ops : &xfs_sb_quiet_buf_ops;
goto reread;
}
diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
index b7c9aea77f8f..1e116794bb66 100644
--- a/fs/xfs/xfs_sb.c
+++ b/fs/xfs/xfs_sb.c
@@ -295,8 +295,7 @@ xfs_mount_validate_sb(
sbp->sb_dblocks == 0 ||
sbp->sb_dblocks > XFS_MAX_DBLOCKS(sbp) ||
sbp->sb_dblocks < XFS_MIN_DBLOCKS(sbp))) {
- XFS_CORRUPTION_ERROR("SB sanity check failed",
- XFS_ERRLEVEL_LOW, mp, sbp);
+ xfs_notice(mp, "SB sanity check failed");
return XFS_ERROR(EFSCORRUPTED);
}
@@ -611,10 +610,10 @@ xfs_sb_read_verify(
XFS_SB_VERSION_5) ||
dsb->sb_crc != 0)) {
- if (!xfs_verify_cksum(bp->b_addr, be16_to_cpu(dsb->sb_sectsize),
+ if (!xfs_verify_cksum(bp->b_addr, BBTOB(bp->b_length),
offsetof(struct xfs_sb, sb_crc))) {
/* Only fail bad secondaries on a known V5 filesystem */
- if (bp->b_bn != XFS_SB_DADDR &&
+ if (bp->b_bn == XFS_SB_DADDR ||
xfs_sb_version_hascrc(&mp->m_sb)) {
error = EFSCORRUPTED;
goto out_error;
@@ -625,7 +624,7 @@ xfs_sb_read_verify(
out_error:
if (error) {
- if (error != EWRONGFS)
+ if (error == EFSCORRUPTED)
XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW,
mp, bp->b_addr);
xfs_buf_ioerror(bp, error);
@@ -644,7 +643,6 @@ xfs_sb_quiet_read_verify(
{
struct xfs_dsb *dsb = XFS_BUF_TO_SBP(bp);
-
if (dsb->sb_magicnum == cpu_to_be32(XFS_SB_MAGIC)) {
/* XFS filesystem, verify noisily! */
xfs_sb_read_verify(bp);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index f317488263dd..d971f4932b5d 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -913,7 +913,7 @@ xfs_flush_inodes(
struct super_block *sb = mp->m_super;
if (down_read_trylock(&sb->s_umount)) {
- sync_inodes_sb(sb, jiffies);
+ sync_inodes_sb(sb);
up_read(&sb->s_umount);
}
}
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 8e4f41d9af4d..34c7bdc06014 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -701,6 +701,18 @@ static inline pte_t pte_mknuma(pte_t pte)
}
#endif
+#ifndef ptep_set_numa
+static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_t ptent = *ptep;
+
+ ptent = pte_mknuma(ptent);
+ set_pte_at(mm, addr, ptep, ptent);
+ return;
+}
+#endif
+
#ifndef pmd_mknuma
static inline pmd_t pmd_mknuma(pmd_t pmd)
{
@@ -708,6 +720,18 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
return pmd_clear_flags(pmd, _PAGE_PRESENT);
}
#endif
+
+#ifndef pmdp_set_numa
+static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp)
+{
+ pmd_t pmd = *pmdp;
+
+ pmd = pmd_mknuma(pmd);
+ set_pmd_at(mm, addr, pmdp, pmd);
+ return;
+}
+#endif
#else
extern int pte_numa(pte_t pte);
extern int pmd_numa(pmd_t pmd);
@@ -715,6 +739,8 @@ extern pte_t pte_mknonnuma(pte_t pte);
extern pmd_t pmd_mknonnuma(pmd_t pmd);
extern pte_t pte_mknuma(pte_t pte);
extern pmd_t pmd_mknuma(pmd_t pmd);
+extern void ptep_set_numa(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
+extern void pmdp_set_numa(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp);
#endif /* CONFIG_ARCH_USES_NUMA_PROT_NONE */
#else
static inline int pmd_numa(pmd_t pmd)
@@ -742,10 +768,23 @@ static inline pte_t pte_mknuma(pte_t pte)
return pte;
}
+static inline void ptep_set_numa(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ return;
+}
+
+
static inline pmd_t pmd_mknuma(pmd_t pmd)
{
return pmd;
}
+
+static inline void pmdp_set_numa(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp)
+{
+ return ;
+}
#endif /* CONFIG_NUMA_BALANCING */
#endif /* CONFIG_MMU */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 04086c5be930..04a7f31301f8 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -199,6 +199,9 @@ int drm_err(const char *func, const char *format, ...);
#define DRM_INFO(fmt, ...) \
printk(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+#define DRM_INFO_ONCE(fmt, ...) \
+ printk_once(KERN_INFO "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+
/**
* Debug output.
*
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 71727b6210ae..8f3dee097579 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -907,6 +907,9 @@ struct drm_mode_config {
/* whether async page flip is supported or not */
bool async_page_flip;
+
+ /* cursor size */
+ uint32_t cursor_width, cursor_height;
};
#define obj_to_crtc(x) container_of(x, struct drm_crtc, base)
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index d1f61bfe0ebe..49a828425fa2 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -29,6 +29,8 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_memory.h>
+struct device;
+
/**
* Initialize pool allocator.
*/
diff --git a/include/dt-bindings/clock/tegra124-car.h b/include/dt-bindings/clock/tegra124-car.h
index a1116a3b54ef..8c1603b10665 100644
--- a/include/dt-bindings/clock/tegra124-car.h
+++ b/include/dt-bindings/clock/tegra124-car.h
@@ -36,10 +36,10 @@
#define TEGRA124_CLK_PWM 17
#define TEGRA124_CLK_I2S2 18
/* 20 (register bit affects vi and vi_sensor) */
-#define TEGRA124_CLK_GR_2D 21
+/* 21 */
#define TEGRA124_CLK_USBD 22
#define TEGRA124_CLK_ISP 23
-#define TEGRA124_CLK_GR_3D 24
+/* 26 */
/* 25 */
#define TEGRA124_CLK_DISP2 26
#define TEGRA124_CLK_DISP1 27
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index be85127bfed3..f27000f55a83 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -171,6 +171,11 @@ static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 add
return 0;
}
+static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write)
+{
+ return -ENXIO;
+}
+
static inline int kvm_vgic_init(struct kvm *kvm)
{
return 0;
diff --git a/include/linux/audit.h b/include/linux/audit.h
index aa865a9a4c4f..ec1464df4c60 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -43,6 +43,7 @@ struct mq_attr;
struct mqstat;
struct audit_watch;
struct audit_tree;
+struct sk_buff;
struct audit_krule {
int vers_ops;
@@ -463,7 +464,7 @@ extern int audit_filter_user(int type);
extern int audit_filter_type(int type);
extern int audit_rule_change(int type, __u32 portid, int seq,
void *data, size_t datasz);
-extern int audit_list_rules_send(__u32 portid, int seq);
+extern int audit_list_rules_send(struct sk_buff *request_skb, int seq);
extern u32 audit_enabled;
#else /* CONFIG_AUDIT */
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index fd8bf3219ef7..b4a745d7d9a9 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -115,7 +115,6 @@ extern int copy_strings_kernel(int argc, const char *const *argv,
extern int prepare_bprm_creds(struct linux_binprm *bprm);
extern void install_exec_creds(struct linux_binprm *bprm);
extern void set_binfmt(struct linux_binfmt *new);
-extern void free_bprm(struct linux_binprm *);
extern ssize_t read_code(struct file *, unsigned long, loff_t, size_t);
#endif /* _LINUX_BINFMTS_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 70654521dab6..5a4d39b4686b 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -250,6 +250,17 @@ static inline unsigned bio_segments(struct bio *bio)
struct bio_vec bv;
struct bvec_iter iter;
+ /*
+ * We special case discard/write same, because they interpret bi_size
+ * differently:
+ */
+
+ if (bio->bi_rw & REQ_DISCARD)
+ return 1;
+
+ if (bio->bi_rw & REQ_WRITE_SAME)
+ return 1;
+
bio_for_each_segment(bv, bio, iter)
segs++;
@@ -332,6 +343,7 @@ extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
extern struct bio_set *fs_bio_set;
+unsigned int bio_integrity_tag_size(struct bio *bio);
static inline struct bio *bio_alloc(gfp_t gfp_mask, unsigned int nr_iovecs)
{
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 161b23105b1e..2ff2e8d982be 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -83,6 +83,8 @@ struct blk_mq_ops {
*/
rq_timed_out_fn *timeout;
+ softirq_done_fn *complete;
+
/*
* Override for hctx allocations (should probably go)
*/
@@ -119,11 +121,11 @@ void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struc
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
-void blk_mq_insert_request(struct request_queue *, struct request *, bool);
+void blk_mq_insert_request(struct request *, bool, bool, bool);
void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
-struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, bool reserved);
+struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp);
struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp);
struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag);
@@ -131,7 +133,15 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_ind
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int);
void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
-void blk_mq_end_io(struct request *rq, int error);
+bool blk_mq_end_io_partial(struct request *rq, int error,
+ unsigned int nr_bytes);
+static inline void blk_mq_end_io(struct request *rq, int error)
+{
+ bool done = !blk_mq_end_io_partial(rq, error, blk_rq_bytes(rq));
+ BUG_ON(!done);
+}
+
+void blk_mq_complete_request(struct request *rq);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8678c4322b44..4afa4f8f6090 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -98,7 +98,7 @@ struct request {
struct list_head queuelist;
union {
struct call_single_data csd;
- struct work_struct mq_flush_data;
+ struct work_struct mq_flush_work;
};
struct request_queue *q;
@@ -448,13 +448,8 @@ struct request_queue {
unsigned long flush_pending_since;
struct list_head flush_queue[2];
struct list_head flush_data_in_flight;
- union {
- struct request flush_rq;
- struct {
- spinlock_t mq_flush_lock;
- struct work_struct mq_flush_work;
- };
- };
+ struct request *flush_rq;
+ spinlock_t mq_flush_lock;
struct mutex sysfs_lock;
diff --git a/include/linux/can/skb.h b/include/linux/can/skb.h
index 2f0543f7510c..f9bbbb472663 100644
--- a/include/linux/can/skb.h
+++ b/include/linux/can/skb.h
@@ -11,7 +11,9 @@
#define CAN_SKB_H
#include <linux/types.h>
+#include <linux/skbuff.h>
#include <linux/can.h>
+#include <net/sock.h>
/*
* The struct can_skb_priv is used to transport additional information along
@@ -42,4 +44,40 @@ static inline void can_skb_reserve(struct sk_buff *skb)
skb_reserve(skb, sizeof(struct can_skb_priv));
}
+static inline void can_skb_destructor(struct sk_buff *skb)
+{
+ sock_put(skb->sk);
+}
+
+static inline void can_skb_set_owner(struct sk_buff *skb, struct sock *sk)
+{
+ if (sk) {
+ sock_hold(sk);
+ skb->destructor = can_skb_destructor;
+ skb->sk = sk;
+ }
+}
+
+/*
+ * returns an unshared skb owned by the original sock to be echo'ed back
+ */
+static inline struct sk_buff *can_create_echo_skb(struct sk_buff *skb)
+{
+ if (skb_shared(skb)) {
+ struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
+
+ if (likely(nskb)) {
+ can_skb_set_owner(nskb, skb->sk);
+ consume_skb(skb);
+ return nskb;
+ } else {
+ kfree_skb(skb);
+ return NULL;
+ }
+ }
+
+ /* we can assume to have an unshared skb with proper owner */
+ return skb;
+}
+
#endif /* CAN_SKB_H */
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 2623cffc73a1..25bfb0eff772 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -373,8 +373,9 @@ extern const char *ceph_mds_op_name(int op);
/*
* Ceph setxattr request flags.
*/
-#define CEPH_XATTR_CREATE 1
-#define CEPH_XATTR_REPLACE 2
+#define CEPH_XATTR_CREATE (1 << 0)
+#define CEPH_XATTR_REPLACE (1 << 1)
+#define CEPH_XATTR_REMOVE (1 << 31)
union ceph_mds_request_args {
struct {
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 5c097596104b..9450f025fe0c 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -166,6 +166,8 @@ struct cgroup {
*
* The ID of the root cgroup is always 0, and a new cgroup
* will be assigned with a smallest available ID.
+ *
+ * Allocating/Removing ID must be protected by cgroup_mutex.
*/
int id;
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 092b64168d7f..4a21a872dbbd 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -245,6 +245,10 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate,
void omap2_init_clk_clkdm(struct clk_hw *clk);
unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw,
unsigned long parent_rate);
+int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate);
+long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *prate);
int omap2_clkops_enable_clkdm(struct clk_hw *hw);
void omap2_clkops_disable_clkdm(struct clk_hw *hw);
int omap2_clk_disable_autoidle_all(void);
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index ded429966c1f..2507fd2a1eb4 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -75,11 +75,7 @@
*
* (asm goto is automatically volatile - the naming reflects this.)
*/
-#if GCC_VERSION <= 40801
-# define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
-#else
-# define asm_volatile_goto(x...) do { asm goto(x); } while (0)
-#endif
+#define asm_volatile_goto(x...) do { asm goto(x); asm (""); } while (0)
#ifdef CONFIG_ARCH_USE_BUILTIN_BSWAP
#if GCC_VERSION >= 40400
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index dfac5ed31120..f886985a28b2 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -171,7 +171,7 @@ struct dma_buf *dma_buf_export_named(void *priv, const struct dma_buf_ops *ops,
size_t size, int flags, const char *);
#define dma_buf_export(priv, ops, size, flags) \
- dma_buf_export_named(priv, ops, size, flags, __FILE__)
+ dma_buf_export_named(priv, ops, size, flags, KBUILD_MODNAME)
int dma_buf_fd(struct dma_buf *dmabuf, int flags);
struct dma_buf *dma_buf_get(int fd);
diff --git a/include/linux/file.h b/include/linux/file.h
index cbacf4faf447..4d69123377a2 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -28,33 +28,36 @@ static inline void fput_light(struct file *file, int fput_needed)
struct fd {
struct file *file;
- int need_put;
+ unsigned int flags;
};
+#define FDPUT_FPUT 1
+#define FDPUT_POS_UNLOCK 2
static inline void fdput(struct fd fd)
{
- if (fd.need_put)
+ if (fd.flags & FDPUT_FPUT)
fput(fd.file);
}
extern struct file *fget(unsigned int fd);
-extern struct file *fget_light(unsigned int fd, int *fput_needed);
+extern struct file *fget_raw(unsigned int fd);
+extern unsigned long __fdget(unsigned int fd);
+extern unsigned long __fdget_raw(unsigned int fd);
+extern unsigned long __fdget_pos(unsigned int fd);
-static inline struct fd fdget(unsigned int fd)
+static inline struct fd __to_fd(unsigned long v)
{
- int b;
- struct file *f = fget_light(fd, &b);
- return (struct fd){f,b};
+ return (struct fd){(struct file *)(v & ~3),v & 3};
}
-extern struct file *fget_raw(unsigned int fd);
-extern struct file *fget_raw_light(unsigned int fd, int *fput_needed);
+static inline struct fd fdget(unsigned int fd)
+{
+ return __to_fd(__fdget(fd));
+}
static inline struct fd fdget_raw(unsigned int fd)
{
- int b;
- struct file *f = fget_raw_light(fd, &b);
- return (struct fd){f,b};
+ return __to_fd(__fdget_raw(fd));
}
extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
diff --git a/include/linux/firewire.h b/include/linux/firewire.h
index 5d7782e42b8f..c3683bdf28fe 100644
--- a/include/linux/firewire.h
+++ b/include/linux/firewire.h
@@ -200,6 +200,7 @@ struct fw_device {
unsigned irmc:1;
unsigned bc_implemented:2;
+ work_func_t workfn;
struct delayed_work work;
struct fw_attribute_group attribute_group;
};
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 09f553c59813..23b2a35d712e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -123,6 +123,9 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* File is opened with O_PATH; almost nothing can be done with it */
#define FMODE_PATH ((__force fmode_t)0x4000)
+/* File needs atomic accesses to f_pos */
+#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000)
+
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
@@ -780,13 +783,14 @@ struct file {
const struct file_operations *f_op;
/*
- * Protects f_ep_links, f_flags, f_pos vs i_size in lseek SEEK_CUR.
+ * Protects f_ep_links, f_flags.
* Must not be taken from IRQ context.
*/
spinlock_t f_lock;
atomic_long_t f_count;
unsigned int f_flags;
fmode_t f_mode;
+ struct mutex f_pos_lock;
loff_t f_pos;
struct fown_struct f_owner;
const struct cred *f_cred;
@@ -808,7 +812,7 @@ struct file {
#ifdef CONFIG_DEBUG_WRITECOUNT
unsigned long f_mnt_write_state;
#endif
-};
+} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */
struct file_handle {
__u32 handle_bytes;
@@ -2079,6 +2083,7 @@ extern struct file * dentry_open(const struct path *, int, const struct cred *);
extern int filp_close(struct file *, fl_owner_t id);
extern struct filename *getname(const char __user *);
+extern struct filename *getname_kernel(const char *);
enum {
FILE_CREATED = 1,
@@ -2273,7 +2278,13 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
extern int vfs_fsync_range(struct file *file, loff_t start, loff_t end,
int datasync);
extern int vfs_fsync(struct file *file, int datasync);
-extern int generic_write_sync(struct file *file, loff_t pos, loff_t count);
+static inline int generic_write_sync(struct file *file, loff_t pos, loff_t count)
+{
+ if (!(file->f_flags & O_DSYNC) && !IS_SYNC(file->f_mapping->host))
+ return 0;
+ return vfs_fsync_range(file, pos, pos + count - 1,
+ (file->f_flags & __O_SYNC) ? 0 : 1);
+}
extern void emergency_sync(void);
extern void emergency_remount(void);
#ifdef CONFIG_BLOCK
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 3d286ff49ab0..64cf3ef50696 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -99,7 +99,7 @@ struct fsnotify_ops {
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
u32 mask, void *data, int data_type,
- const unsigned char *file_name);
+ const unsigned char *file_name, u32 cookie);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
void (*free_event)(struct fsnotify_event *event);
@@ -160,7 +160,7 @@ struct fsnotify_group {
struct fasync_struct *fsn_fa; /* async notification */
- struct fsnotify_event overflow_event; /* Event we queue when the
+ struct fsnotify_event *overflow_event; /* Event we queue when the
* notification list is too
* full */
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 4e4cc28623ad..4cdb3a17bcb5 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -495,10 +495,6 @@ enum {
FILTER_TRACE_FN,
};
-#define EVENT_STORAGE_SIZE 128
-extern struct mutex event_storage_mutex;
-extern char event_storage[EVENT_STORAGE_SIZE];
-
extern int trace_event_raw_init(struct ftrace_event_call *call);
extern int trace_define_field(struct ftrace_event_call *call, const char *type,
const char *name, int offset, int size,
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 0437439bc047..39b81dc7d01a 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -123,6 +123,10 @@ struct vm_area_struct;
__GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \
__GFP_NO_KSWAPD)
+/*
+ * GFP_THISNODE does not perform any reclaim, you most likely want to
+ * use __GFP_THISNODE to allocate from a given node without fallback!
+ */
#ifdef CONFIG_NUMA
#define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY)
#else
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 4d34dbbbad4d..7a8144fef406 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -4,8 +4,6 @@
#include <linux/err.h>
#include <linux/kernel.h>
-#ifdef CONFIG_GPIOLIB
-
struct device;
struct gpio_chip;
@@ -18,6 +16,8 @@ struct gpio_chip;
*/
struct gpio_desc;
+#ifdef CONFIG_GPIOLIB
+
/* Acquire and dispose GPIOs */
struct gpio_desc *__must_check gpiod_get(struct device *dev,
const char *con_id);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index db512014e061..b826239bdce0 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -157,46 +157,6 @@ static inline int hpage_nr_pages(struct page *page)
return HPAGE_PMD_NR;
return 1;
}
-/*
- * compound_trans_head() should be used instead of compound_head(),
- * whenever the "page" passed as parameter could be the tail of a
- * transparent hugepage that could be undergoing a
- * __split_huge_page_refcount(). The page structure layout often
- * changes across releases and it makes extensive use of unions. So if
- * the page structure layout will change in a way that
- * page->first_page gets clobbered by __split_huge_page_refcount, the
- * implementation making use of smp_rmb() will be required.
- *
- * Currently we define compound_trans_head as compound_head, because
- * page->private is in the same union with page->first_page, and
- * page->private isn't clobbered. However this also means we're
- * currently leaving dirt into the page->private field of anonymous
- * pages resulting from a THP split, instead of setting page->private
- * to zero like for every other page that has PG_private not set. But
- * anonymous pages don't use page->private so this is not a problem.
- */
-#if 0
-/* This will be needed if page->private will be clobbered in split_huge_page */
-static inline struct page *compound_trans_head(struct page *page)
-{
- if (PageTail(page)) {
- struct page *head;
- head = page->first_page;
- smp_rmb();
- /*
- * head may be a dangling pointer.
- * __split_huge_page_refcount clears PageTail before
- * overwriting first_page, so if PageTail is still
- * there it means the head pointer isn't dangling.
- */
- if (PageTail(page))
- return head;
- }
- return page;
-}
-#else
-#define compound_trans_head(page) compound_head(page)
-#endif
extern int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pmd_t pmd, pmd_t *pmdp);
@@ -226,7 +186,6 @@ static inline int split_huge_page(struct page *page)
do { } while (0)
#define split_huge_page_pmd_mm(__mm, __address, __pmd) \
do { } while (0)
-#define compound_trans_head(page) compound_head(page)
static inline int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
{
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 15da677478dd..344883dce584 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -875,7 +875,7 @@ struct vmbus_channel_relid_released {
struct vmbus_channel_initiate_contact {
struct vmbus_channel_message_header header;
u32 vmbus_version_requested;
- u32 padding2;
+ u32 target_vcpu; /* The VCPU the host should respond to */
u64 interrupt_page;
u64 monitor_page1;
u64 monitor_page2;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 0053adde0ed9..a2678d35b5a2 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -158,6 +158,11 @@ devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
devname, dev_id);
}
+extern int __must_check
+devm_request_any_context_irq(struct device *dev, unsigned int irq,
+ irq_handler_t handler, unsigned long irqflags,
+ const char *devname, void *dev_id);
+
extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
/*
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index e7831d203737..35e7eca4e33b 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -118,9 +118,7 @@ extern int mq_init_ns(struct ipc_namespace *ns);
* the new maximum will handle anyone else. I may have to revisit this
* in the future.
*/
-#define MIN_QUEUESMAX 1
#define DFLT_QUEUESMAX 256
-#define HARD_QUEUESMAX 1024
#define MIN_MSGMAX 1
#define DFLT_MSG 10U
#define DFLT_MSGMAX 10
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 5be9f0228a3b..d267623c28cf 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -249,7 +249,8 @@ void kernfs_notify(struct kernfs_node *kn);
const void *kernfs_super_ns(struct super_block *sb);
struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
- struct kernfs_root *root, const void *ns);
+ struct kernfs_root *root, bool *new_sb_created,
+ const void *ns);
void kernfs_kill_sb(struct super_block *sb);
void kernfs_init(void);
@@ -317,7 +318,7 @@ static inline const void *kernfs_super_ns(struct super_block *sb)
static inline struct dentry *
kernfs_mount_ns(struct file_system_type *fs_type, int flags,
- struct kernfs_root *root, const void *ns)
+ struct kernfs_root *root, bool *new_sb_created, const void *ns)
{ return ERR_PTR(-ENOSYS); }
static inline void kernfs_kill_sb(struct super_block *sb) { }
@@ -368,9 +369,9 @@ static inline int kernfs_remove_by_name(struct kernfs_node *parent,
static inline struct dentry *
kernfs_mount(struct file_system_type *fs_type, int flags,
- struct kernfs_root *root)
+ struct kernfs_root *root, bool *new_sb_created)
{
- return kernfs_mount_ns(fs_type, flags, root, NULL);
+ return kernfs_mount_ns(fs_type, flags, root, new_sb_created, NULL);
}
#endif /* __LINUX_KERNFS_H */
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h
index ad1ae7f345ad..78c76cd4d37b 100644
--- a/include/linux/mfd/max8997-private.h
+++ b/include/linux/mfd/max8997-private.h
@@ -387,7 +387,7 @@ struct max8997_dev {
struct i2c_client *muic; /* slave addr 0x4a */
struct mutex iolock;
- int type;
+ unsigned long type;
struct platform_device *battery; /* battery control (not fuel gauge) */
int irq;
diff --git a/include/linux/mfd/max8998-private.h b/include/linux/mfd/max8998-private.h
index 4ecb24b4b863..d68ada502ff3 100644
--- a/include/linux/mfd/max8998-private.h
+++ b/include/linux/mfd/max8998-private.h
@@ -163,7 +163,7 @@ struct max8998_dev {
int ono;
u8 irq_masks_cur[MAX8998_NUM_IRQ_REGS];
u8 irq_masks_cache[MAX8998_NUM_IRQ_REGS];
- int type;
+ unsigned long type;
bool wakeup;
};
diff --git a/include/linux/mfd/tps65217.h b/include/linux/mfd/tps65217.h
index a5a7f0130e96..54b5458ec084 100644
--- a/include/linux/mfd/tps65217.h
+++ b/include/linux/mfd/tps65217.h
@@ -252,7 +252,7 @@ struct tps65217_board {
struct tps65217 {
struct device *dev;
struct tps65217_board *pdata;
- unsigned int id;
+ unsigned long id;
struct regulator_desc desc[TPS65217_NUM_REGULATOR];
struct regulator_dev *rdev[TPS65217_NUM_REGULATOR];
struct regmap *regmap;
@@ -263,7 +263,7 @@ static inline struct tps65217 *dev_to_tps65217(struct device *dev)
return dev_get_drvdata(dev);
}
-static inline int tps65217_chip_id(struct tps65217 *tps65217)
+static inline unsigned long tps65217_chip_id(struct tps65217 *tps65217)
{
return tps65217->id;
}
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 554548cd3dd4..130bc8d77fa5 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -38,8 +38,10 @@
#include <linux/pci.h>
#include <linux/spinlock_types.h>
#include <linux/semaphore.h>
+#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/radix-tree.h>
+
#include <linux/mlx5/device.h>
#include <linux/mlx5/doorbell.h>
@@ -227,6 +229,7 @@ struct mlx5_uuar_info {
* protect uuar allocation data structs
*/
struct mutex lock;
+ u32 ver;
};
struct mlx5_bf {
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f28f46eade6a..c1b7414c7bef 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -175,7 +175,7 @@ extern unsigned int kobjsize(const void *objp);
* Special vmas that are non-mergable, non-mlock()able.
* Note: mm/huge_memory.c VM_NO_THP depends on this definition.
*/
-#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP)
+#define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP | VM_MIXEDMAP)
/*
* mapping from the currently active vm_flags protection bits (the
@@ -399,8 +399,18 @@ static inline void compound_unlock_irqrestore(struct page *page,
static inline struct page *compound_head(struct page *page)
{
- if (unlikely(PageTail(page)))
- return page->first_page;
+ if (unlikely(PageTail(page))) {
+ struct page *head = page->first_page;
+
+ /*
+ * page->first_page may be a dangling pointer to an old
+ * compound page, so recheck that it is still a tail
+ * page before returning.
+ */
+ smp_rmb();
+ if (likely(PageTail(page)))
+ return head;
+ }
return page;
}
@@ -757,7 +767,7 @@ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
{
- return xchg(&page->_last_cpupid, cpupid);
+ return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
}
static inline int page_cpupid_last(struct page *page)
@@ -766,7 +776,7 @@ static inline int page_cpupid_last(struct page *page)
}
static inline void page_cpupid_reset_last(struct page *page)
{
- page->_last_cpupid = -1;
+ page->_last_cpupid = -1 & LAST_CPUPID_MASK;
}
#else
static inline int page_cpupid_last(struct page *page)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 5f2052c83154..9b61b9bf81ac 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -590,10 +590,10 @@ static inline bool zone_is_empty(struct zone *zone)
/*
* The NUMA zonelists are doubled because we need zonelists that restrict the
- * allocations to a single node for GFP_THISNODE.
+ * allocations to a single node for __GFP_THISNODE.
*
* [0] : Zonelist with fallback
- * [1] : No fallback (GFP_THISNODE)
+ * [1] : No fallback (__GFP_THISNODE)
*/
#define MAX_ZONELISTS 2
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 440a02ee6f92..e8eeebd49a98 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -752,6 +752,9 @@ struct netdev_phys_port_id {
unsigned char id_len;
};
+typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
+ struct sk_buff *skb);
+
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
@@ -783,7 +786,7 @@ struct netdev_phys_port_id {
* Required can not be NULL.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
- * void *accel_priv);
+ * void *accel_priv, select_queue_fallback_t fallback);
* Called to decide which queue to when device supports multiple
* transmit queues.
*
@@ -1005,7 +1008,8 @@ struct net_device_ops {
struct net_device *dev);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv);
+ void *accel_priv,
+ select_queue_fallback_t fallback);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
void (*ndo_set_rx_mode)(struct net_device *dev);
@@ -1551,7 +1555,6 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
void *accel_priv);
-u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb);
/*
* Net namespace inlines
@@ -2276,6 +2279,26 @@ static inline void netdev_reset_queue(struct net_device *dev_queue)
}
/**
+ * netdev_cap_txqueue - check if selected tx queue exceeds device queues
+ * @dev: network device
+ * @queue_index: given tx queue index
+ *
+ * Returns 0 if given tx queue index >= number of device tx queues,
+ * otherwise returns the originally passed tx queue index.
+ */
+static inline u16 netdev_cap_txqueue(struct net_device *dev, u16 queue_index)
+{
+ if (unlikely(queue_index >= dev->real_num_tx_queues)) {
+ net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
+ dev->name, queue_index,
+ dev->real_num_tx_queues);
+ return 0;
+ }
+
+ return queue_index;
+}
+
+/**
* netif_running - test if up
* @dev: network device
*
@@ -3068,7 +3091,12 @@ void netdev_change_features(struct net_device *dev);
void netif_stacked_transfer_operstate(const struct net_device *rootdev,
struct net_device *dev);
-netdev_features_t netif_skb_features(struct sk_buff *skb);
+netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
+ const struct net_device *dev);
+static inline netdev_features_t netif_skb_features(struct sk_buff *skb)
+{
+ return netif_skb_dev_features(skb, skb->dev);
+}
static inline bool net_gso_ok(netdev_features_t features, int gso_type)
{
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 3ccfcecf8999..5624e4e2763c 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -379,12 +379,14 @@ struct nfs_openres {
* Arguments to the open_confirm call.
*/
struct nfs_open_confirmargs {
+ struct nfs4_sequence_args seq_args;
const struct nfs_fh * fh;
nfs4_stateid * stateid;
struct nfs_seqid * seqid;
};
struct nfs_open_confirmres {
+ struct nfs4_sequence_res seq_res;
nfs4_stateid stateid;
struct nfs_seqid * seqid;
};
@@ -465,9 +467,14 @@ struct nfs_lockt_res {
};
struct nfs_release_lockowner_args {
+ struct nfs4_sequence_args seq_args;
struct nfs_lowner lock_owner;
};
+struct nfs_release_lockowner_res {
+ struct nfs4_sequence_res seq_res;
+};
+
struct nfs4_delegreturnargs {
struct nfs4_sequence_args seq_args;
const struct nfs_fh *fhandle;
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 26ebcf41c213..69ae03f6eb15 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -80,13 +80,14 @@ struct nvme_dev {
struct dma_pool *prp_small_pool;
int instance;
int queue_count;
- int db_stride;
+ u32 db_stride;
u32 ctrl_config;
struct msix_entry *entry;
struct nvme_bar __iomem *bar;
struct list_head namespaces;
struct kref kref;
struct miscdevice miscdev;
+ struct work_struct reset_work;
char name[12];
char serial[20];
char model[40];
@@ -94,6 +95,8 @@ struct nvme_dev {
u32 max_hw_sectors;
u32 stripe_size;
u16 oncs;
+ u16 abort_limit;
+ u8 initialized;
};
/*
@@ -165,6 +168,7 @@ int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
struct sg_io_hdr;
int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
+int nvme_sg_io32(struct nvme_ns *ns, unsigned long arg);
int nvme_sg_get_version_num(int __user *ip);
#endif /* _LINUX_NVME_H */
diff --git a/include/linux/of.h b/include/linux/of.h
index 70c64ba17fa5..435cb995904d 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -169,35 +169,15 @@ static inline const char *of_node_full_name(const struct device_node *np)
extern struct device_node *of_find_node_by_name(struct device_node *from,
const char *name);
-#define for_each_node_by_name(dn, name) \
- for (dn = of_find_node_by_name(NULL, name); dn; \
- dn = of_find_node_by_name(dn, name))
extern struct device_node *of_find_node_by_type(struct device_node *from,
const char *type);
-#define for_each_node_by_type(dn, type) \
- for (dn = of_find_node_by_type(NULL, type); dn; \
- dn = of_find_node_by_type(dn, type))
extern struct device_node *of_find_compatible_node(struct device_node *from,
const char *type, const char *compat);
-#define for_each_compatible_node(dn, type, compatible) \
- for (dn = of_find_compatible_node(NULL, type, compatible); dn; \
- dn = of_find_compatible_node(dn, type, compatible))
extern struct device_node *of_find_matching_node_and_match(
struct device_node *from,
const struct of_device_id *matches,
const struct of_device_id **match);
-static inline struct device_node *of_find_matching_node(
- struct device_node *from,
- const struct of_device_id *matches)
-{
- return of_find_matching_node_and_match(from, matches, NULL);
-}
-#define for_each_matching_node(dn, matches) \
- for (dn = of_find_matching_node(NULL, matches); dn; \
- dn = of_find_matching_node(dn, matches))
-#define for_each_matching_node_and_match(dn, matches, match) \
- for (dn = of_find_matching_node_and_match(NULL, matches, match); \
- dn; dn = of_find_matching_node_and_match(dn, matches, match))
+
extern struct device_node *of_find_node_by_path(const char *path);
extern struct device_node *of_find_node_by_phandle(phandle handle);
extern struct device_node *of_get_parent(const struct device_node *node);
@@ -209,43 +189,11 @@ extern struct device_node *of_get_next_available_child(
extern struct device_node *of_get_child_by_name(const struct device_node *node,
const char *name);
-#define for_each_child_of_node(parent, child) \
- for (child = of_get_next_child(parent, NULL); child != NULL; \
- child = of_get_next_child(parent, child))
-
-#define for_each_available_child_of_node(parent, child) \
- for (child = of_get_next_available_child(parent, NULL); child != NULL; \
- child = of_get_next_available_child(parent, child))
-
-static inline int of_get_child_count(const struct device_node *np)
-{
- struct device_node *child;
- int num = 0;
-
- for_each_child_of_node(np, child)
- num++;
-
- return num;
-}
-
-static inline int of_get_available_child_count(const struct device_node *np)
-{
- struct device_node *child;
- int num = 0;
-
- for_each_available_child_of_node(np, child)
- num++;
-
- return num;
-}
/* cache lookup */
extern struct device_node *of_find_next_cache_node(const struct device_node *);
extern struct device_node *of_find_node_with_property(
struct device_node *from, const char *prop_name);
-#define for_each_node_with_property(dn, prop_name) \
- for (dn = of_find_node_with_property(NULL, prop_name); dn; \
- dn = of_find_node_with_property(dn, prop_name))
extern struct property *of_find_property(const struct device_node *np,
const char *name,
@@ -367,42 +315,53 @@ static inline struct device_node *of_find_node_by_name(struct device_node *from,
return NULL;
}
-static inline struct device_node *of_get_parent(const struct device_node *node)
+static inline struct device_node *of_find_node_by_type(struct device_node *from,
+ const char *type)
{
return NULL;
}
-static inline bool of_have_populated_dt(void)
+static inline struct device_node *of_find_matching_node_and_match(
+ struct device_node *from,
+ const struct of_device_id *matches,
+ const struct of_device_id **match)
{
- return false;
+ return NULL;
}
-/* Kill an unused variable warning on a device_node pointer */
-static inline void __of_use_dn(const struct device_node *np)
+static inline struct device_node *of_get_parent(const struct device_node *node)
{
+ return NULL;
}
-#define for_each_child_of_node(parent, child) \
- while (__of_use_dn(parent), __of_use_dn(child), 0)
+static inline struct device_node *of_get_next_child(
+ const struct device_node *node, struct device_node *prev)
+{
+ return NULL;
+}
-#define for_each_available_child_of_node(parent, child) \
- while (0)
+static inline struct device_node *of_get_next_available_child(
+ const struct device_node *node, struct device_node *prev)
+{
+ return NULL;
+}
-static inline struct device_node *of_get_child_by_name(
- const struct device_node *node,
- const char *name)
+static inline struct device_node *of_find_node_with_property(
+ struct device_node *from, const char *prop_name)
{
return NULL;
}
-static inline int of_get_child_count(const struct device_node *np)
+static inline bool of_have_populated_dt(void)
{
- return 0;
+ return false;
}
-static inline int of_get_available_child_count(const struct device_node *np)
+static inline struct device_node *of_get_child_by_name(
+ const struct device_node *node,
+ const char *name)
{
- return 0;
+ return NULL;
}
static inline int of_device_is_compatible(const struct device_node *device,
@@ -569,6 +528,13 @@ extern int of_node_to_nid(struct device_node *np);
static inline int of_node_to_nid(struct device_node *device) { return 0; }
#endif
+static inline struct device_node *of_find_matching_node(
+ struct device_node *from,
+ const struct of_device_id *matches)
+{
+ return of_find_matching_node_and_match(from, matches, NULL);
+}
+
/**
* of_property_read_bool - Findfrom a property
* @np: device node from which the property value is to be read.
@@ -618,6 +584,55 @@ static inline int of_property_read_u32(const struct device_node *np,
s; \
s = of_prop_next_string(prop, s))
+#define for_each_node_by_name(dn, name) \
+ for (dn = of_find_node_by_name(NULL, name); dn; \
+ dn = of_find_node_by_name(dn, name))
+#define for_each_node_by_type(dn, type) \
+ for (dn = of_find_node_by_type(NULL, type); dn; \
+ dn = of_find_node_by_type(dn, type))
+#define for_each_compatible_node(dn, type, compatible) \
+ for (dn = of_find_compatible_node(NULL, type, compatible); dn; \
+ dn = of_find_compatible_node(dn, type, compatible))
+#define for_each_matching_node(dn, matches) \
+ for (dn = of_find_matching_node(NULL, matches); dn; \
+ dn = of_find_matching_node(dn, matches))
+#define for_each_matching_node_and_match(dn, matches, match) \
+ for (dn = of_find_matching_node_and_match(NULL, matches, match); \
+ dn; dn = of_find_matching_node_and_match(dn, matches, match))
+
+#define for_each_child_of_node(parent, child) \
+ for (child = of_get_next_child(parent, NULL); child != NULL; \
+ child = of_get_next_child(parent, child))
+#define for_each_available_child_of_node(parent, child) \
+ for (child = of_get_next_available_child(parent, NULL); child != NULL; \
+ child = of_get_next_available_child(parent, child))
+
+#define for_each_node_with_property(dn, prop_name) \
+ for (dn = of_find_node_with_property(NULL, prop_name); dn; \
+ dn = of_find_node_with_property(dn, prop_name))
+
+static inline int of_get_child_count(const struct device_node *np)
+{
+ struct device_node *child;
+ int num = 0;
+
+ for_each_child_of_node(np, child)
+ num++;
+
+ return num;
+}
+
+static inline int of_get_available_child_count(const struct device_node *np)
+{
+ struct device_node *child;
+ int num = 0;
+
+ for_each_available_child_of_node(np, child)
+ num++;
+
+ return num;
+}
+
#if defined(CONFIG_PROC_FS) && defined(CONFIG_PROC_DEVICETREE)
extern void proc_device_tree_add_node(struct device_node *, struct proc_dir_entry *);
extern void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop);
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 8d7dd6768cb7..ef370210ffb2 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -78,11 +78,13 @@ static inline int of_device_uevent_modalias(struct device *dev,
static inline void of_device_node_put(struct device *dev) { }
-static inline const struct of_device_id *of_match_device(
+static inline const struct of_device_id *__of_match_device(
const struct of_device_id *matches, const struct device *dev)
{
return NULL;
}
+#define of_match_device(matches, dev) \
+ __of_match_device(of_match_ptr(matches), (dev))
static inline struct device_node *of_cpu_device_node_get(int cpu)
{
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index e464b4e987e8..d1fe1a761047 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -228,9 +228,9 @@ PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
PAGEFLAG(MappedToDisk, mappedtodisk)
-/* PG_readahead is only used for file reads; PG_reclaim is only for writes */
+/* PG_readahead is only used for reads; PG_reclaim is only for writes */
PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
-PAGEFLAG(Readahead, reclaim) /* Reminder to do async read-ahead */
+PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim)
#ifdef CONFIG_HIGHMEM
/*
diff --git a/include/linux/pci.h b/include/linux/pci.h
index fb57c892b214..33aa2caf0f0c 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1169,8 +1169,23 @@ void msi_remove_pci_irq_vectors(struct pci_dev *dev);
void pci_restore_msi_state(struct pci_dev *dev);
int pci_msi_enabled(void);
int pci_enable_msi_range(struct pci_dev *dev, int minvec, int maxvec);
+static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
+{
+ int rc = pci_enable_msi_range(dev, nvec, nvec);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
int minvec, int maxvec);
+static inline int pci_enable_msix_exact(struct pci_dev *dev,
+ struct msix_entry *entries, int nvec)
+{
+ int rc = pci_enable_msix_range(dev, entries, nvec, nvec);
+ if (rc < 0)
+ return rc;
+ return 0;
+}
#else
static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
static inline int pci_enable_msi_block(struct pci_dev *dev, int nvec)
@@ -1189,9 +1204,14 @@ static inline int pci_msi_enabled(void) { return 0; }
static inline int pci_enable_msi_range(struct pci_dev *dev, int minvec,
int maxvec)
{ return -ENOSYS; }
+static inline int pci_enable_msi_exact(struct pci_dev *dev, int nvec)
+{ return -ENOSYS; }
static inline int pci_enable_msix_range(struct pci_dev *dev,
struct msix_entry *entries, int minvec, int maxvec)
{ return -ENOSYS; }
+static inline int pci_enable_msix_exact(struct pci_dev *dev,
+ struct msix_entry *entries, int nvec)
+{ return -ENOSYS; }
#endif
#ifdef CONFIG_PCIEPORTBUS
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index e273e5ac19c9..3f83459dbb20 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -146,7 +146,9 @@ static inline void phy_set_bus_width(struct phy *phy, int bus_width)
phy->attrs.bus_width = bus_width;
}
struct phy *phy_get(struct device *dev, const char *string);
+struct phy *phy_optional_get(struct device *dev, const char *string);
struct phy *devm_phy_get(struct device *dev, const char *string);
+struct phy *devm_phy_optional_get(struct device *dev, const char *string);
void phy_put(struct phy *phy);
void devm_phy_put(struct device *dev, struct phy *phy);
struct phy *of_phy_simple_xlate(struct device *dev,
@@ -232,11 +234,23 @@ static inline struct phy *phy_get(struct device *dev, const char *string)
return ERR_PTR(-ENOSYS);
}
+static inline struct phy *phy_optional_get(struct device *dev,
+ const char *string)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
static inline struct phy *devm_phy_get(struct device *dev, const char *string)
{
return ERR_PTR(-ENOSYS);
}
+static inline struct phy *devm_phy_optional_get(struct device *dev,
+ const char *string)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
static inline void phy_put(struct phy *phy)
{
}
diff --git a/include/linux/platform_data/spi-s3c64xx.h b/include/linux/platform_data/spi-s3c64xx.h
index 8447f634c7f5..d3889b98a1a1 100644
--- a/include/linux/platform_data/spi-s3c64xx.h
+++ b/include/linux/platform_data/spi-s3c64xx.h
@@ -1,5 +1,4 @@
-/* linux/arch/arm/plat-samsung/include/plat/s3c64xx-spi.h
- *
+/*
* Copyright (C) 2009 Samsung Electronics Ltd.
* Jaswinder Singh <jassi.brar@samsung.com>
*
@@ -8,8 +7,8 @@
* published by the Free Software Foundation.
*/
-#ifndef __S3C64XX_PLAT_SPI_H
-#define __S3C64XX_PLAT_SPI_H
+#ifndef __SPI_S3C64XX_H
+#define __SPI_S3C64XX_H
#include <linux/dmaengine.h>
@@ -68,4 +67,4 @@ extern int s3c64xx_spi2_cfg_gpio(void);
extern struct s3c64xx_spi_info s3c64xx_spi0_pdata;
extern struct s3c64xx_spi_info s3c64xx_spi1_pdata;
extern struct s3c64xx_spi_info s3c64xx_spi2_pdata;
-#endif /* __S3C64XX_PLAT_SPI_H */
+#endif /*__SPI_S3C64XX_H */
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 16c9a62fa1c0..2a5897a4afbc 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -26,9 +26,13 @@
#ifdef CONFIG_PM
extern int pm_generic_runtime_suspend(struct device *dev);
extern int pm_generic_runtime_resume(struct device *dev);
+extern int pm_runtime_force_suspend(struct device *dev);
+extern int pm_runtime_force_resume(struct device *dev);
#else
static inline int pm_generic_runtime_suspend(struct device *dev) { return 0; }
static inline int pm_generic_runtime_resume(struct device *dev) { return 0; }
+static inline int pm_runtime_force_suspend(struct device *dev) { return 0; }
+static inline int pm_runtime_force_resume(struct device *dev) { return 0; }
#endif
#ifdef CONFIG_PM_RUNTIME
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 1da693d51255..b66c2110cb1f 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -250,8 +250,7 @@ struct rmap_walk_control {
int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *arg);
int (*done)(struct page *page);
- int (*file_nonlinear)(struct page *, struct address_space *,
- struct vm_area_struct *vma);
+ int (*file_nonlinear)(struct page *, struct address_space *, void *arg);
struct anon_vma *(*anon_lock)(struct page *page);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 68a0e84463a0..a781dec1cd0b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -128,6 +128,7 @@ struct bio_list;
struct fs_struct;
struct perf_event_context;
struct blk_plug;
+struct filename;
/*
* List of flags we want to share for kernel threads,
@@ -2311,7 +2312,7 @@ extern void do_group_exit(int);
extern int allow_signal(int);
extern int disallow_signal(int);
-extern int do_execve(const char *,
+extern int do_execve(struct filename *,
const char __user * const __user *,
const char __user * const __user *);
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
diff --git a/include/linux/security.h b/include/linux/security.h
index 5623a7f965b7..2fc42d191f79 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -1040,6 +1040,7 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
* Allocate a security structure to the xp->security field; the security
* field is initialized to NULL when the xfrm_policy is allocated.
* Return 0 if operation was successful (memory to allocate, legal context)
+ * @gfp is to specify the context for the allocation
* @xfrm_policy_clone_security:
* @old_ctx contains an existing xfrm_sec_ctx.
* @new_ctxp contains a new xfrm_sec_ctx being cloned from old.
@@ -1683,7 +1684,7 @@ struct security_operations {
#ifdef CONFIG_SECURITY_NETWORK_XFRM
int (*xfrm_policy_alloc_security) (struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *sec_ctx);
+ struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp);
int (*xfrm_policy_clone_security) (struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctx);
void (*xfrm_policy_free_security) (struct xfrm_sec_ctx *ctx);
int (*xfrm_policy_delete_security) (struct xfrm_sec_ctx *ctx);
@@ -2859,7 +2860,8 @@ static inline void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
#ifdef CONFIG_SECURITY_NETWORK_XFRM
-int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx);
+int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *sec_ctx, gfp_t gfp);
int security_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp);
void security_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx);
@@ -2877,7 +2879,9 @@ void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl);
#else /* CONFIG_SECURITY_NETWORK_XFRM */
-static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx)
+static inline int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *sec_ctx,
+ gfp_t gfp)
{
return 0;
}
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index f589c9af8cbf..5e1e6f2d98c2 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2725,7 +2725,7 @@ static inline void nf_reset(struct sk_buff *skb)
static inline void nf_reset_trace(struct sk_buff *skb)
{
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
skb->nf_trace = 0;
#endif
}
@@ -2742,6 +2742,9 @@ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src)
dst->nf_bridge = src->nf_bridge;
nf_bridge_get(src->nf_bridge);
#endif
+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE) || defined(CONFIG_NF_TABLES)
+ dst->nf_trace = src->nf_trace;
+#endif
}
static inline void nf_copy(struct sk_buff *dst, const struct sk_buff *src)
@@ -2916,5 +2919,22 @@ static inline bool skb_head_is_locked(const struct sk_buff *skb)
{
return !skb->head_frag || skb_cloned(skb);
}
+
+/**
+ * skb_gso_network_seglen - Return length of individual segments of a gso packet
+ *
+ * @skb: GSO skb
+ *
+ * skb_gso_network_seglen is used to determine the real size of the
+ * individual segments, including Layer3 (IP, IPv6) and L4 headers (TCP/UDP).
+ *
+ * The MAC/L2 header is not accounted for.
+ */
+static inline unsigned int skb_gso_network_seglen(const struct sk_buff *skb)
+{
+ unsigned int hdr_len = skb_transport_header(skb) -
+ skb_network_header(skb);
+ return hdr_len + skb_gso_transport_seglen(skb);
+}
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 9260abdd67df..b5b2df60299e 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -410,7 +410,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
*
* %GFP_NOWAIT - Allocation will not sleep.
*
- * %GFP_THISNODE - Allocate node-local memory only.
+ * %__GFP_THISNODE - Allocate node-local memory only.
*
* %GFP_DMA - Allocation suitable for DMA.
* Should only be used for kmalloc() caches. Otherwise, use a
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 3834f43f9993..6ae004e437ea 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -188,6 +188,9 @@ static inline void kick_all_cpus_sync(void) { }
*/
extern void arch_disable_smp_support(void);
+extern void arch_enable_nonboot_cpus_begin(void);
+extern void arch_enable_nonboot_cpus_end(void);
+
void smp_setup_processor_id(void);
#endif /* __LINUX_SMP_H */
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index a1d4ca290862..36c86ef51ff3 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -24,6 +24,9 @@
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/completion.h>
+#include <linux/scatterlist.h>
+
+struct dma_chan;
/*
* INTERFACES between SPI master-side drivers and SPI infrastructure.
@@ -266,6 +269,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @auto_runtime_pm: the core should ensure a runtime PM reference is held
* while the hardware is prepared, using the parent
* device for the spidev
+ * @max_dma_len: Maximum length of a DMA transfer for the device.
* @prepare_transfer_hardware: a message will soon arrive from the queue
* so the subsystem requests the driver to prepare the transfer hardware
* by issuing this call
@@ -273,7 +277,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* message while queuing transfers that arrive in the meantime. When the
* driver is finished with this message, it must call
* spi_finalize_current_message() so the subsystem can issue the next
- * transfer
+ * message
* @unprepare_transfer_hardware: there are currently no more messages on the
* queue so the subsystem notifies the driver that it may relax the
* hardware by issuing this call
@@ -287,7 +291,10 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* - return 1 if the transfer is still in progress. When
* the driver is finished with this transfer it must
* call spi_finalize_current_transfer() so the subsystem
- * can issue the next transfer
+ * can issue the next transfer. Note: transfer_one and
+ * transfer_one_message are mutually exclusive; when both
+ * are set, the generic subsystem does not call your
+ * transfer_one callback.
* @unprepare_message: undo any work done by prepare_message().
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that
@@ -345,6 +352,8 @@ struct spi_master {
#define SPI_MASTER_HALF_DUPLEX BIT(0) /* can't do full duplex */
#define SPI_MASTER_NO_RX BIT(1) /* can't do buffer read */
#define SPI_MASTER_NO_TX BIT(2) /* can't do buffer write */
+#define SPI_MASTER_MUST_RX BIT(3) /* requires rx */
+#define SPI_MASTER_MUST_TX BIT(4) /* requires tx */
/* lock and mutex for SPI bus locking */
spinlock_t bus_lock_spinlock;
@@ -387,6 +396,17 @@ struct spi_master {
void (*cleanup)(struct spi_device *spi);
/*
+ * Used to enable core support for DMA handling, if can_dma()
+ * exists and returns true then the transfer will be mapped
+ * prior to transfer_one() being called. The driver should
+ * not modify or store xfer and dma_tx and dma_rx must be set
+ * while the device is prepared.
+ */
+ bool (*can_dma)(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer);
+
+ /*
* These hooks are for drivers that want to use the generic
* master transfer queueing mechanism. If these are used, the
* transfer() function above must NOT be specified by the driver.
@@ -404,7 +424,9 @@ struct spi_master {
bool rt;
bool auto_runtime_pm;
bool cur_msg_prepared;
+ bool cur_msg_mapped;
struct completion xfer_completion;
+ size_t max_dma_len;
int (*prepare_transfer_hardware)(struct spi_master *master);
int (*transfer_one_message)(struct spi_master *master,
@@ -425,6 +447,14 @@ struct spi_master {
/* gpio chip select */
int *cs_gpios;
+
+ /* DMA channels for use with core dmaengine helpers */
+ struct dma_chan *dma_tx;
+ struct dma_chan *dma_rx;
+
+ /* dummy data for full duplex devices */
+ void *dummy_rx;
+ void *dummy_tx;
};
static inline void *spi_master_get_devdata(struct spi_master *master)
@@ -509,6 +539,8 @@ extern struct spi_master *spi_busnum_to_master(u16 busnum);
* (optionally) changing the chipselect status, then starting
* the next transfer or completing this @spi_message.
* @transfer_list: transfers are sequenced through @spi_message.transfers
+ * @tx_sg: Scatterlist for transmit, currently not for client use
+ * @rx_sg: Scatterlist for receive, currently not for client use
*
* SPI transfers always write the same number of bytes as they read.
* Protocol drivers should always provide @rx_buf and/or @tx_buf.
@@ -576,6 +608,8 @@ struct spi_transfer {
dma_addr_t tx_dma;
dma_addr_t rx_dma;
+ struct sg_table tx_sg;
+ struct sg_table rx_sg;
unsigned cs_change:1;
unsigned tx_nbits:3;
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index daebaba886aa..85578d4be034 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -42,6 +42,6 @@ extern int spi_bitbang_setup_transfer(struct spi_device *spi,
/* start or stop queue processing */
extern int spi_bitbang_start(struct spi_bitbang *spi);
-extern int spi_bitbang_stop(struct spi_bitbang *spi);
+extern void spi_bitbang_stop(struct spi_bitbang *spi);
#endif /* __SPI_BITBANG_H */
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 40ed9e9a77e5..a747a77ea584 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -281,13 +281,15 @@ asmlinkage long sys_sched_setscheduler(pid_t pid, int policy,
asmlinkage long sys_sched_setparam(pid_t pid,
struct sched_param __user *param);
asmlinkage long sys_sched_setattr(pid_t pid,
- struct sched_attr __user *attr);
+ struct sched_attr __user *attr,
+ unsigned int flags);
asmlinkage long sys_sched_getscheduler(pid_t pid);
asmlinkage long sys_sched_getparam(pid_t pid,
struct sched_param __user *param);
asmlinkage long sys_sched_getattr(pid_t pid,
struct sched_attr __user *attr,
- unsigned int size);
+ unsigned int size,
+ unsigned int flags);
asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr);
asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index accc497f8d72..7159a0a933df 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -60,6 +60,12 @@ struct tp_module {
unsigned int num_tracepoints;
struct tracepoint * const *tracepoints_ptrs;
};
+bool trace_module_has_bad_taint(struct module *mod);
+#else
+static inline bool trace_module_has_bad_taint(struct module *mod)
+{
+ return false;
+}
#endif /* CONFIG_MODULES */
struct tracepoint_iter {
diff --git a/include/linux/usb.h b/include/linux/usb.h
index c716da18c668..7f6eb859873e 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1265,8 +1265,6 @@ typedef void (*usb_complete_t)(struct urb *);
* @sg: scatter gather buffer list, the buffer size of each element in
* the list (except the last) must be divisible by the endpoint's
* max packet size if no_sg_constraint isn't set in 'struct usb_bus'
- * (FIXME: scatter-gather under xHCI is broken for periodic transfers.
- * Do not use urb->sg for interrupt endpoints for now, only bulk.)
* @num_mapped_sgs: (internal) number of mapped sg entries
* @num_sgs: number of entries in the sg list
* @transfer_buffer_length: How big is transfer_buffer. The transfer may
diff --git a/include/linux/usb/cdc_ncm.h b/include/linux/usb/cdc_ncm.h
index c3fa80745996..2c14d9cdd57a 100644
--- a/include/linux/usb/cdc_ncm.h
+++ b/include/linux/usb/cdc_ncm.h
@@ -88,6 +88,7 @@
#define cdc_ncm_data_intf_is_mbim(x) ((x)->desc.bInterfaceProtocol == USB_CDC_MBIM_PROTO_NTB)
struct cdc_ncm_ctx {
+ struct usb_cdc_ncm_ntb_parameters ncm_parm;
struct hrtimer tx_timer;
struct tasklet_struct bh;
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index c557c6d096de..3a712e2e7d76 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -71,12 +71,14 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
THP_ZERO_PAGE_ALLOC,
THP_ZERO_PAGE_ALLOC_FAILED,
#endif
+#ifdef CONFIG_DEBUG_TLBFLUSH
#ifdef CONFIG_SMP
NR_TLB_REMOTE_FLUSH, /* cpu tried to flush others' tlbs */
NR_TLB_REMOTE_FLUSH_RECEIVED,/* cpu received ipi for flush */
-#endif
+#endif /* CONFIG_SMP */
NR_TLB_LOCAL_FLUSH_ALL,
NR_TLB_LOCAL_FLUSH_ONE,
+#endif /* CONFIG_DEBUG_TLBFLUSH */
NR_VM_EVENT_ITEMS
};
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index a67b38415768..67ce70c8279b 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -83,6 +83,14 @@ static inline void vm_events_fold_cpu(int cpu)
#define count_vm_numa_events(x, y) do { (void)(y); } while (0)
#endif /* CONFIG_NUMA_BALANCING */
+#ifdef CONFIG_DEBUG_TLBFLUSH
+#define count_vm_tlb_event(x) count_vm_event(x)
+#define count_vm_tlb_events(x, y) count_vm_events(x, y)
+#else
+#define count_vm_tlb_event(x) do {} while (0)
+#define count_vm_tlb_events(x, y) do { (void)(y); } while (0)
+#endif
+
#define __count_zone_vm_events(item, zone, delta) \
__count_vm_events(item##_NORMAL - ZONE_NORMAL + \
zone_idx(zone), delta)
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 594521ba0d43..704f4f652d0a 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -419,10 +419,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
static struct lock_class_key __key; \
const char *__lock_name; \
\
- if (__builtin_constant_p(fmt)) \
- __lock_name = (fmt); \
- else \
- __lock_name = #fmt; \
+ __lock_name = #fmt#args; \
\
__alloc_workqueue_key((fmt), (flags), (max_active), \
&__key, __lock_name, ##args); \
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index fc0e4320aa6d..021b8a319b9e 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -97,7 +97,7 @@ void writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
int try_to_writeback_inodes_sb(struct super_block *, enum wb_reason reason);
int try_to_writeback_inodes_sb_nr(struct super_block *, unsigned long nr,
enum wb_reason reason);
-void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this);
+void sync_inodes_sb(struct super_block *);
void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
void inode_wait_for_writeback(struct inode *inode);
diff --git a/include/net/datalink.h b/include/net/datalink.h
index deb7ca75db48..93cb18f729b5 100644
--- a/include/net/datalink.h
+++ b/include/net/datalink.h
@@ -15,4 +15,6 @@ struct datalink_proto {
struct list_head node;
};
+struct datalink_proto *make_EII_client(void);
+void destroy_EII_client(struct datalink_proto *dl);
#endif
diff --git a/include/net/dn.h b/include/net/dn.h
index ccc15588d108..913b73d239f5 100644
--- a/include/net/dn.h
+++ b/include/net/dn.h
@@ -200,6 +200,8 @@ static inline void dn_sk_ports_copy(struct flowidn *fld, struct dn_scp *scp)
}
unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu);
+void dn_register_sysctl(void);
+void dn_unregister_sysctl(void);
#define DN_MENUVER_ACC 0x01
#define DN_MENUVER_USR 0x02
diff --git a/include/net/dn_route.h b/include/net/dn_route.h
index b409ad6b8d7a..55df9939bca2 100644
--- a/include/net/dn_route.h
+++ b/include/net/dn_route.h
@@ -20,6 +20,8 @@ int dn_route_output_sock(struct dst_entry __rcu **pprt, struct flowidn *,
struct sock *sk, int flags);
int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb);
void dn_rt_cache_flush(int delay);
+int dn_route_rcv(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pt, struct net_device *orig_dev);
/* Masks for flags field */
#define DN_RT_F_PID 0x07 /* Mask for packet type */
diff --git a/include/net/ethoc.h b/include/net/ethoc.h
index 96f3789b27bc..2a2d6bb34eb8 100644
--- a/include/net/ethoc.h
+++ b/include/net/ethoc.h
@@ -16,6 +16,7 @@
struct ethoc_platform_data {
u8 hwaddr[IFHWADDRLEN];
s8 phy_id;
+ u32 eth_clkfreq;
};
#endif /* !LINUX_NET_ETHOC_H */
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 48ed75c21260..e77c10405d51 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -129,6 +129,7 @@ int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
struct ip_tunnel_parm *p);
void ip_tunnel_setup(struct net_device *dev, int net_id);
+void ip_tunnel_dst_reset_all(struct ip_tunnel *t);
/* Extract dsfield from inner protocol */
static inline u8 ip_tunnel_get_dsfield(const struct iphdr *iph,
diff --git a/include/net/ipx.h b/include/net/ipx.h
index 9e9e35465baf..0143180fecc9 100644
--- a/include/net/ipx.h
+++ b/include/net/ipx.h
@@ -140,6 +140,17 @@ static __inline__ void ipxitf_hold(struct ipx_interface *intrfc)
}
void ipxitf_down(struct ipx_interface *intrfc);
+struct ipx_interface *ipxitf_find_using_net(__be32 net);
+int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb, char *node);
+__be16 ipx_cksum(struct ipxhdr *packet, int length);
+int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
+ unsigned char *node);
+void ipxrtr_del_routes(struct ipx_interface *intrfc);
+int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
+ struct iovec *iov, size_t len, int noblock);
+int ipxrtr_route_skb(struct sk_buff *skb);
+struct ipx_route *ipxrtr_lookup(__be32 net);
+int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
static __inline__ void ipxitf_put(struct ipx_interface *intrfc)
{
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index da68c9a90ac5..991dcd94cbbf 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -162,6 +162,14 @@ extern struct list_head net_namespace_list;
struct net *get_net_ns_by_pid(pid_t pid);
struct net *get_net_ns_by_fd(int pid);
+#ifdef CONFIG_SYSCTL
+void ipx_register_sysctl(void);
+void ipx_unregister_sysctl(void);
+#else
+#define ipx_register_sysctl()
+#define ipx_unregister_sysctl()
+#endif
+
#ifdef CONFIG_NET_NS
void __put_net(struct net *net);
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 01ea6eed1bb1..b2ac6246b7e0 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -284,6 +284,8 @@ extern unsigned int nf_conntrack_max;
extern unsigned int nf_conntrack_hash_rnd;
void init_nf_conntrack_hash_rnd(void);
+void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl);
+
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 57c8ff7955df..e7e14ffe0f6a 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -252,6 +252,7 @@ void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
* @owner: module reference
* @policy: netlink attribute policy
* @maxattr: highest netlink attribute number
+ * @family: address family for AF-specific types
*/
struct nft_expr_type {
const struct nft_expr_ops *(*select_ops)(const struct nft_ctx *,
@@ -262,6 +263,7 @@ struct nft_expr_type {
struct module *owner;
const struct nla_policy *policy;
unsigned int maxattr;
+ u8 family;
};
/**
@@ -320,7 +322,6 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
* struct nft_rule - nf_tables rule
*
* @list: used internally
- * @rcu_head: used internally for rcu
* @handle: rule handle
* @genmask: generation mask
* @dlen: length of expression data
@@ -328,7 +329,6 @@ static inline void *nft_expr_priv(const struct nft_expr *expr)
*/
struct nft_rule {
struct list_head list;
- struct rcu_head rcu_head;
u64 handle:46,
genmask:2,
dlen:16;
@@ -389,7 +389,6 @@ enum nft_chain_flags {
*
* @rules: list of rules in the chain
* @list: used internally
- * @rcu_head: used internally
* @net: net namespace that this chain belongs to
* @table: table that this chain belongs to
* @handle: chain handle
@@ -401,7 +400,6 @@ enum nft_chain_flags {
struct nft_chain {
struct list_head rules;
struct list_head list;
- struct rcu_head rcu_head;
struct net *net;
struct nft_table *table;
u64 handle;
@@ -529,6 +527,9 @@ void nft_unregister_expr(struct nft_expr_type *);
#define MODULE_ALIAS_NFT_CHAIN(family, name) \
MODULE_ALIAS("nft-chain-" __stringify(family) "-" name)
+#define MODULE_ALIAS_NFT_AF_EXPR(family, name) \
+ MODULE_ALIAS("nft-expr-" __stringify(family) "-" name)
+
#define MODULE_ALIAS_NFT_EXPR(name) \
MODULE_ALIAS("nft-expr-" name)
diff --git a/include/net/netfilter/nft_reject.h b/include/net/netfilter/nft_reject.h
new file mode 100644
index 000000000000..36b0da2d55bb
--- /dev/null
+++ b/include/net/netfilter/nft_reject.h
@@ -0,0 +1,25 @@
+#ifndef _NFT_REJECT_H_
+#define _NFT_REJECT_H_
+
+struct nft_reject {
+ enum nft_reject_types type:8;
+ u8 icmp_code;
+};
+
+extern const struct nla_policy nft_reject_policy[];
+
+int nft_reject_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[]);
+
+int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr);
+
+void nft_reject_ipv4_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt);
+
+void nft_reject_ipv6_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt);
+
+#endif
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index d992ca3145fe..6ee76c804893 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1653,17 +1653,6 @@ struct sctp_association {
/* This is the last advertised value of rwnd over a SACK chunk. */
__u32 a_rwnd;
- /* Number of bytes by which the rwnd has slopped. The rwnd is allowed
- * to slop over a maximum of the association's frag_point.
- */
- __u32 rwnd_over;
-
- /* Keeps treack of rwnd pressure. This happens when we have
- * a window, but not recevie buffer (i.e small packets). This one
- * is releases slowly (1 PMTU at a time ).
- */
- __u32 rwnd_press;
-
/* This is the sndbuf size in use for the association.
* This corresponds to the sndbuf size for the association,
* as specified in the sk->sndbuf.
@@ -1892,8 +1881,7 @@ void sctp_assoc_update(struct sctp_association *old,
__u32 sctp_association_get_next_tsn(struct sctp_association *);
void sctp_assoc_sync_pmtu(struct sock *, struct sctp_association *);
-void sctp_assoc_rwnd_increase(struct sctp_association *, unsigned int);
-void sctp_assoc_rwnd_decrease(struct sctp_association *, unsigned int);
+void sctp_assoc_rwnd_update(struct sctp_association *, bool);
void sctp_assoc_set_primary(struct sctp_association *,
struct sctp_transport *);
void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
diff --git a/include/net/sock.h b/include/net/sock.h
index 5c3f7c3624aa..b9586a137cad 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1488,6 +1488,11 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
*/
#define sock_owned_by_user(sk) ((sk)->sk_lock.owned)
+static inline void sock_release_ownership(struct sock *sk)
+{
+ sk->sk_lock.owned = 0;
+}
+
/*
* Macro so as to not evaluate some arguments when
* lockdep is not enabled.
@@ -2186,7 +2191,6 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
{
#define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \
(1UL << SOCK_RCVTSTAMP) | \
- (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \
(1UL << SOCK_TIMESTAMPING_SOFTWARE) | \
(1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \
(1UL << SOCK_TIMESTAMPING_SYS_HARDWARE))
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 56fc366da6d5..743accec6c76 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -480,20 +480,21 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
#ifdef CONFIG_SYN_COOKIES
#include <linux/ktime.h>
-/* Syncookies use a monotonic timer which increments every 64 seconds.
+/* Syncookies use a monotonic timer which increments every 60 seconds.
* This counter is used both as a hash input and partially encoded into
* the cookie value. A cookie is only validated further if the delta
* between the current counter value and the encoded one is less than this,
- * i.e. a sent cookie is valid only at most for 128 seconds (or less if
+ * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
* the counter advances immediately after a cookie is generated).
*/
#define MAX_SYNCOOKIE_AGE 2
static inline u32 tcp_cookie_time(void)
{
- struct timespec now;
- getnstimeofday(&now);
- return now.tv_sec >> 6; /* 64 seconds granularity */
+ u64 val = get_jiffies_64();
+
+ do_div(val, 60 * HZ);
+ return val;
}
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
@@ -1303,7 +1304,8 @@ struct tcp_fastopen_request {
/* Fast Open cookie. Size 0 means a cookie request */
struct tcp_fastopen_cookie cookie;
struct msghdr *data; /* data in MSG_FASTOPEN */
- u16 copied; /* queued in tcp_connect() */
+ size_t size;
+ int copied; /* queued in tcp_connect() */
};
void tcp_free_fastopen_req(struct tcp_sock *tp);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index afa5730fb3bd..fb5654a8ca3c 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -1648,6 +1648,11 @@ static inline int xfrm_aevent_is_on(struct net *net)
}
#endif
+static inline int aead_len(struct xfrm_algo_aead *alg)
+{
+ return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
+}
+
static inline int xfrm_alg_len(const struct xfrm_algo *alg)
{
return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
@@ -1686,6 +1691,12 @@ static inline int xfrm_replay_clone(struct xfrm_state *x,
return 0;
}
+static inline struct xfrm_algo_aead *xfrm_algo_aead_clone(struct xfrm_algo_aead *orig)
+{
+ return kmemdup(orig, aead_len(orig), GFP_KERNEL);
+}
+
+
static inline struct xfrm_algo *xfrm_algo_clone(struct xfrm_algo *orig)
{
return kmemdup(orig, xfrm_alg_len(orig), GFP_KERNEL);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 8d4a1c06f7e4..6793f32ccb58 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -226,7 +226,8 @@ enum ib_port_cap_flags {
IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
IB_PORT_BOOT_MGMT_SUP = 1 << 23,
IB_PORT_LINK_LATENCY_SUP = 1 << 24,
- IB_PORT_CLIENT_REG_SUP = 1 << 25
+ IB_PORT_CLIENT_REG_SUP = 1 << 25,
+ IB_PORT_IP_BASED_GIDS = 1 << 26
};
enum ib_port_width {
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 68d92e36facd..6e89ef6c11c1 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -449,14 +449,22 @@ void snd_soc_dapm_debugfs_init(struct snd_soc_dapm_context *dapm,
/* dapm audio pin control and status */
int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm,
const char *pin);
+int snd_soc_dapm_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
+ const char *pin);
int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm,
const char *pin);
+int snd_soc_dapm_disable_pin_unlocked(struct snd_soc_dapm_context *dapm,
+ const char *pin);
int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin);
+int snd_soc_dapm_nc_pin_unlocked(struct snd_soc_dapm_context *dapm,
+ const char *pin);
int snd_soc_dapm_get_pin_status(struct snd_soc_dapm_context *dapm,
const char *pin);
int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm);
int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm,
const char *pin);
+int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
+ const char *pin);
int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
const char *pin);
void snd_soc_dapm_auto_nc_codec_pins(struct snd_soc_codec *codec);
diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h
index ae5a17111968..4483fadfa68d 100644
--- a/include/target/iscsi/iscsi_transport.h
+++ b/include/target/iscsi/iscsi_transport.h
@@ -12,6 +12,7 @@ struct iscsit_transport {
int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *);
int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *);
void (*iscsit_free_np)(struct iscsi_np *);
+ void (*iscsit_wait_conn)(struct iscsi_conn *);
void (*iscsit_free_conn)(struct iscsi_conn *);
int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *);
int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32);
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index c9c791209cd1..1772fadcff62 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -525,7 +525,6 @@ struct se_cmd {
#define CMD_T_COMPLETE (1 << 2)
#define CMD_T_SENT (1 << 4)
#define CMD_T_STOP (1 << 5)
-#define CMD_T_FAILED (1 << 6)
#define CMD_T_DEV_ACTIVE (1 << 7)
#define CMD_T_REQUEST_STOP (1 << 8)
#define CMD_T_BUSY (1 << 9)
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 9e9475c85de5..e5bf9a76f169 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -42,7 +42,6 @@ TRACE_EVENT(pstate_sample,
u32 state,
u64 mperf,
u64 aperf,
- u32 energy,
u32 freq
),
@@ -51,7 +50,6 @@ TRACE_EVENT(pstate_sample,
state,
mperf,
aperf,
- energy,
freq
),
@@ -61,7 +59,6 @@ TRACE_EVENT(pstate_sample,
__field(u32, state)
__field(u64, mperf)
__field(u64, aperf)
- __field(u32, energy)
__field(u32, freq)
),
@@ -72,17 +69,15 @@ TRACE_EVENT(pstate_sample,
__entry->state = state;
__entry->mperf = mperf;
__entry->aperf = aperf;
- __entry->energy = energy;
__entry->freq = freq;
),
- TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu energy=%lu freq=%lu ",
+ TP_printk("core_busy=%lu scaled=%lu state=%lu mperf=%llu aperf=%llu freq=%lu ",
(unsigned long)__entry->core_busy,
(unsigned long)__entry->scaled_busy,
(unsigned long)__entry->state,
(unsigned long long)__entry->mperf,
(unsigned long long)__entry->aperf,
- (unsigned long)__entry->energy,
(unsigned long)__entry->freq
)
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index ddc179b7a105..1fef3e6e9436 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -83,7 +83,7 @@ DECLARE_EVENT_CLASS(rpc_task_running,
),
TP_fast_assign(
- __entry->client_id = clnt->cl_clid;
+ __entry->client_id = clnt ? clnt->cl_clid : -1;
__entry->task_id = task->tk_pid;
__entry->action = action;
__entry->runstate = task->tk_runstate;
@@ -91,7 +91,7 @@ DECLARE_EVENT_CLASS(rpc_task_running,
__entry->flags = task->tk_flags;
),
- TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d action=%pf",
+ TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d action=%pf",
__entry->task_id, __entry->client_id,
__entry->flags,
__entry->runstate,
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index c7bbbe794e65..464ea82e10db 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -287,11 +287,11 @@ TRACE_EVENT(writeback_queue_io,
__field(int, reason)
),
TP_fast_assign(
- unsigned long older_than_this = work->older_than_this;
+ unsigned long *older_than_this = work->older_than_this;
strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
- __entry->older = older_than_this;
+ __entry->older = older_than_this ? *older_than_this : 0;
__entry->age = older_than_this ?
- (jiffies - older_than_this) * 1000 / HZ : -1;
+ (jiffies - *older_than_this) * 1000 / HZ : -1;
__entry->moved = moved;
__entry->reason = work->reason;
),
diff --git a/include/trace/ftrace.h b/include/trace/ftrace.h
index 1a8b28db3775..1ee19a24cc5f 100644
--- a/include/trace/ftrace.h
+++ b/include/trace/ftrace.h
@@ -310,15 +310,12 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
#undef __array
#define __array(type, item, len) \
do { \
- mutex_lock(&event_storage_mutex); \
+ char *type_str = #type"["__stringify(len)"]"; \
BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
- snprintf(event_storage, sizeof(event_storage), \
- "%s[%d]", #type, len); \
- ret = trace_define_field(event_call, event_storage, #item, \
+ ret = trace_define_field(event_call, type_str, #item, \
offsetof(typeof(field), item), \
sizeof(field.item), \
is_signed_type(type), FILTER_OTHER); \
- mutex_unlock(&event_storage_mutex); \
if (ret) \
return ret; \
} while (0);
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index a20a9b4d3871..dde8041f40d2 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -692,9 +692,13 @@ __SC_COMP(__NR_process_vm_writev, sys_process_vm_writev, \
__SYSCALL(__NR_kcmp, sys_kcmp)
#define __NR_finit_module 273
__SYSCALL(__NR_finit_module, sys_finit_module)
+#define __NR_sched_setattr 274
+__SYSCALL(__NR_sched_setattr, sys_sched_setattr)
+#define __NR_sched_getattr 275
+__SYSCALL(__NR_sched_getattr, sys_sched_getattr)
#undef __NR_syscalls
-#define __NR_syscalls 274
+#define __NR_syscalls 276
/*
* All syscalls below here should go away really,
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 3c9a833992e8..b06c8ed68707 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -619,6 +619,8 @@ struct drm_gem_open {
#define DRM_PRIME_CAP_EXPORT 0x2
#define DRM_CAP_TIMESTAMP_MONOTONIC 0x6
#define DRM_CAP_ASYNC_PAGE_FLIP 0x7
+#define DRM_CAP_CURSOR_WIDTH 0x8
+#define DRM_CAP_CURSOR_HEIGHT 0x9
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 9971c560ed9a..87792a5fee3b 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -87,6 +87,7 @@
#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
#define DRM_VMW_PARAM_3D_CAPS_SIZE 8
#define DRM_VMW_PARAM_MAX_MOB_MEMORY 9
+#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
/**
* struct drm_vmw_getparam_arg
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 1b8a0f4c9590..b4d69092fbdb 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -558,7 +558,6 @@ static inline char *btrfs_err_str(enum btrfs_err_code err_code)
#define BTRFS_IOC_DEFAULT_SUBVOL _IOW(BTRFS_IOCTL_MAGIC, 19, __u64)
#define BTRFS_IOC_SPACE_INFO _IOWR(BTRFS_IOCTL_MAGIC, 20, \
struct btrfs_ioctl_space_args)
-#define BTRFS_IOC_GLOBAL_RSV _IOR(BTRFS_IOCTL_MAGIC, 20, __u64)
#define BTRFS_IOC_START_SYNC _IOR(BTRFS_IOCTL_MAGIC, 24, __u64)
#define BTRFS_IOC_WAIT_SYNC _IOW(BTRFS_IOCTL_MAGIC, 22, __u64)
#define BTRFS_IOC_SNAP_CREATE_V2 _IOW(BTRFS_IOCTL_MAGIC, 23, \
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index 633b93cac1ed..e9a1d2d973b6 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -128,22 +128,13 @@ struct in6_flowlabel_req {
* IPV6 extension headers
*/
#if __UAPI_DEF_IPPROTO_V6
-enum {
- IPPROTO_HOPOPTS = 0, /* IPv6 hop-by-hop options */
-#define IPPROTO_HOPOPTS IPPROTO_HOPOPTS
- IPPROTO_ROUTING = 43, /* IPv6 routing header */
-#define IPPROTO_ROUTING IPPROTO_ROUTING
- IPPROTO_FRAGMENT = 44, /* IPv6 fragmentation header */
-#define IPPROTO_FRAGMENT IPPROTO_FRAGMENT
- IPPROTO_ICMPV6 = 58, /* ICMPv6 */
-#define IPPROTO_ICMPV6 IPPROTO_ICMPV6
- IPPROTO_NONE = 59, /* IPv6 no next header */
-#define IPPROTO_NONE IPPROTO_NONE
- IPPROTO_DSTOPTS = 60, /* IPv6 destination options */
-#define IPPROTO_DSTOPTS IPPROTO_DSTOPTS
- IPPROTO_MH = 135, /* IPv6 mobility header */
-#define IPPROTO_MH IPPROTO_MH
-};
+#define IPPROTO_HOPOPTS 0 /* IPv6 hop-by-hop options */
+#define IPPROTO_ROUTING 43 /* IPv6 routing header */
+#define IPPROTO_FRAGMENT 44 /* IPv6 fragmentation header */
+#define IPPROTO_ICMPV6 58 /* ICMPv6 */
+#define IPPROTO_NONE 59 /* IPv6 no next header */
+#define IPPROTO_DSTOPTS 60 /* IPv6 destination options */
+#define IPPROTO_MH 135 /* IPv6 mobility header */
#endif /* __UAPI_DEF_IPPROTO_V6 */
/*
diff --git a/include/uapi/linux/mic_ioctl.h b/include/uapi/linux/mic_ioctl.h
index 7fabba5059cf..feb0b4c0814c 100644
--- a/include/uapi/linux/mic_ioctl.h
+++ b/include/uapi/linux/mic_ioctl.h
@@ -39,7 +39,7 @@ struct mic_copy_desc {
#else
struct iovec *iov;
#endif
- int iovcnt;
+ __u32 iovcnt;
__u8 vr_idx;
__u8 update_used;
__u32 out_len;
diff --git a/include/uapi/linux/nvme.h b/include/uapi/linux/nvme.h
index 989c04e0c563..e5ab62201119 100644
--- a/include/uapi/linux/nvme.h
+++ b/include/uapi/linux/nvme.h
@@ -350,6 +350,16 @@ struct nvme_delete_queue {
__u32 rsvd11[5];
};
+struct nvme_abort_cmd {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[9];
+ __le16 sqid;
+ __u16 cid;
+ __u32 rsvd11[5];
+};
+
struct nvme_download_firmware {
__u8 opcode;
__u8 flags;
@@ -384,6 +394,7 @@ struct nvme_command {
struct nvme_download_firmware dlfw;
struct nvme_format_cmd format;
struct nvme_dsm_cmd dsm;
+ struct nvme_abort_cmd abort;
};
};
diff --git a/include/uapi/linux/spi/spidev.h b/include/uapi/linux/spi/spidev.h
index 52d9ed01855f..dd5f21e75805 100644
--- a/include/uapi/linux/spi/spidev.h
+++ b/include/uapi/linux/spi/spidev.h
@@ -42,6 +42,10 @@
#define SPI_LOOP 0x20
#define SPI_NO_CS 0x40
#define SPI_READY 0x80
+#define SPI_TX_DUAL 0x100
+#define SPI_TX_QUAD 0x200
+#define SPI_RX_DUAL 0x400
+#define SPI_RX_QUAD 0x800
/*---------------------------------------------------------------------------*/
@@ -92,7 +96,9 @@ struct spi_ioc_transfer {
__u16 delay_usecs;
__u8 bits_per_word;
__u8 cs_change;
- __u32 pad;
+ __u8 tx_nbits;
+ __u8 rx_nbits;
+ __u16 pad;
/* If the contents of 'struct spi_ioc_transfer' ever change
* incompatibly, then the ioctl number (currently 0) must change;
@@ -110,7 +116,7 @@ struct spi_ioc_transfer {
#define SPI_IOC_MESSAGE(N) _IOW(SPI_IOC_MAGIC, 0, char[SPI_MSGSIZE(N)])
-/* Read / Write of SPI mode (SPI_MODE_0..SPI_MODE_3) */
+/* Read / Write of SPI mode (SPI_MODE_0..SPI_MODE_3) (limited to 8 bits) */
#define SPI_IOC_RD_MODE _IOR(SPI_IOC_MAGIC, 1, __u8)
#define SPI_IOC_WR_MODE _IOW(SPI_IOC_MAGIC, 1, __u8)
@@ -126,6 +132,10 @@ struct spi_ioc_transfer {
#define SPI_IOC_RD_MAX_SPEED_HZ _IOR(SPI_IOC_MAGIC, 4, __u32)
#define SPI_IOC_WR_MAX_SPEED_HZ _IOW(SPI_IOC_MAGIC, 4, __u32)
+/* Read / Write of the SPI mode field */
+#define SPI_IOC_RD_MODE32 _IOR(SPI_IOC_MAGIC, 5, __u32)
+#define SPI_IOC_WR_MODE32 _IOW(SPI_IOC_MAGIC, 5, __u32)
+
#endif /* SPIDEV_H */
diff --git a/include/uapi/xen/Kbuild b/include/uapi/xen/Kbuild
index 61257cb14653..5c459628e8c7 100644
--- a/include/uapi/xen/Kbuild
+++ b/include/uapi/xen/Kbuild
@@ -1,3 +1,5 @@
# UAPI Header export list
header-y += evtchn.h
+header-y += gntalloc.h
+header-y += gntdev.h
header-y += privcmd.h
diff --git a/include/xen/gntalloc.h b/include/uapi/xen/gntalloc.h
index 76bd58065f4f..76bd58065f4f 100644
--- a/include/xen/gntalloc.h
+++ b/include/uapi/xen/gntalloc.h
diff --git a/include/xen/gntdev.h b/include/uapi/xen/gntdev.h
index 5304bd3c84c5..5304bd3c84c5 100644
--- a/include/xen/gntdev.h
+++ b/include/uapi/xen/gntdev.h
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 7ad033dbc845..a5af2a26d94f 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -191,15 +191,11 @@ void gnttab_free_auto_xlat_frames(void);
#define gnttab_map_vaddr(map) ((void *)(map.host_virt_addr))
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
+ struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
-int gnttab_map_refs_userspace(struct gnttab_map_grant_ref *map_ops,
- struct gnttab_map_grant_ref *kmap_ops,
- struct page **pages, unsigned int count);
int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
+ struct gnttab_map_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
-int gnttab_unmap_refs_userspace(struct gnttab_unmap_grant_ref *unmap_ops,
- struct gnttab_map_grant_ref *kunmap_ops,
- struct page **pages, unsigned int count);
/* Perform a batch of grant map/copy operations. Retry every batch slot
* for which the hypervisor returns GNTST_eagain. This is typically due
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index ae665ac59c36..32ec05a6572f 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -113,13 +113,13 @@ typedef uint64_t blkif_sector_t;
* it's less than the number provided by the backend. The indirect_grefs field
* in blkif_request_indirect should be filled by the frontend with the
* grant references of the pages that are holding the indirect segments.
- * This pages are filled with an array of blkif_request_segment_aligned
- * that hold the information about the segments. The number of indirect
- * pages to use is determined by the maximum number of segments
- * a indirect request contains. Every indirect page can contain a maximum
- * of 512 segments (PAGE_SIZE/sizeof(blkif_request_segment_aligned)),
- * so to calculate the number of indirect pages to use we have to do
- * ceil(indirect_segments/512).
+ * These pages are filled with an array of blkif_request_segment that hold the
+ * information about the segments. The number of indirect pages to use is
+ * determined by the number of segments an indirect request contains. Every
+ * indirect page can contain a maximum of
+ * (PAGE_SIZE / sizeof(struct blkif_request_segment)) segments, so to
+ * calculate the number of indirect pages to use we have to do
+ * ceil(indirect_segments / (PAGE_SIZE / sizeof(struct blkif_request_segment))).
*
* If a backend does not recognize BLKIF_OP_INDIRECT, it should *not*
* create the "feature-max-indirect-segments" node!
@@ -135,13 +135,12 @@ typedef uint64_t blkif_sector_t;
#define BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST 8
-struct blkif_request_segment_aligned {
- grant_ref_t gref; /* reference to I/O buffer frame */
- /* @first_sect: first sector in frame to transfer (inclusive). */
- /* @last_sect: last sector in frame to transfer (inclusive). */
- uint8_t first_sect, last_sect;
- uint16_t _pad; /* padding to make it 8 bytes, so it's cache-aligned */
-} __attribute__((__packed__));
+struct blkif_request_segment {
+ grant_ref_t gref; /* reference to I/O buffer frame */
+ /* @first_sect: first sector in frame to transfer (inclusive). */
+ /* @last_sect: last sector in frame to transfer (inclusive). */
+ uint8_t first_sect, last_sect;
+};
struct blkif_request_rw {
uint8_t nr_segments; /* number of segments */
@@ -151,12 +150,7 @@ struct blkif_request_rw {
#endif
uint64_t id; /* private guest value, echoed in resp */
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
- struct blkif_request_segment {
- grant_ref_t gref; /* reference to I/O buffer frame */
- /* @first_sect: first sector in frame to transfer (inclusive). */
- /* @last_sect: last sector in frame to transfer (inclusive). */
- uint8_t first_sect, last_sect;
- } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
} __attribute__((__packed__));
struct blkif_request_discard {
diff --git a/include/xen/interface/xencomm.h b/include/xen/interface/xencomm.h
deleted file mode 100644
index ac45e0712afa..000000000000
--- a/include/xen/interface/xencomm.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/*
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to
- * deal in the Software without restriction, including without limitation the
- * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
- * sell copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
- * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
- * DEALINGS IN THE SOFTWARE.
- *
- * Copyright (C) IBM Corp. 2006
- */
-
-#ifndef _XEN_XENCOMM_H_
-#define _XEN_XENCOMM_H_
-
-/* A xencomm descriptor is a scatter/gather list containing physical
- * addresses corresponding to a virtually contiguous memory area. The
- * hypervisor translates these physical addresses to machine addresses to copy
- * to and from the virtually contiguous area.
- */
-
-#define XENCOMM_MAGIC 0x58434F4D /* 'XCOM' */
-#define XENCOMM_INVALID (~0UL)
-
-struct xencomm_desc {
- uint32_t magic;
- uint32_t nr_addrs; /* the number of entries in address[] */
- uint64_t address[0];
-};
-
-#endif /* _XEN_XENCOMM_H_ */
diff --git a/include/xen/xencomm.h b/include/xen/xencomm.h
deleted file mode 100644
index e43b039be112..000000000000
--- a/include/xen/xencomm.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Copyright (C) IBM Corp. 2006
- *
- * Authors: Hollis Blanchard <hollisb@us.ibm.com>
- * Jerone Young <jyoung5@us.ibm.com>
- */
-
-#ifndef _LINUX_XENCOMM_H_
-#define _LINUX_XENCOMM_H_
-
-#include <xen/interface/xencomm.h>
-
-#define XENCOMM_MINI_ADDRS 3
-struct xencomm_mini {
- struct xencomm_desc _desc;
- uint64_t address[XENCOMM_MINI_ADDRS];
-};
-
-/* To avoid additionnal virt to phys conversion, an opaque structure is
- presented. */
-struct xencomm_handle;
-
-extern void xencomm_free(struct xencomm_handle *desc);
-extern struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes);
-extern struct xencomm_handle *__xencomm_map_no_alloc(void *ptr,
- unsigned long bytes, struct xencomm_mini *xc_area);
-
-#if 0
-#define XENCOMM_MINI_ALIGNED(xc_desc, n) \
- struct xencomm_mini xc_desc ## _base[(n)] \
- __attribute__((__aligned__(sizeof(struct xencomm_mini)))); \
- struct xencomm_mini *xc_desc = &xc_desc ## _base[0];
-#else
-/*
- * gcc bug workaround:
- * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=16660
- * gcc doesn't handle properly stack variable with
- * __attribute__((__align__(sizeof(struct xencomm_mini))))
- */
-#define XENCOMM_MINI_ALIGNED(xc_desc, n) \
- unsigned char xc_desc ## _base[((n) + 1 ) * \
- sizeof(struct xencomm_mini)]; \
- struct xencomm_mini *xc_desc = (struct xencomm_mini *) \
- ((unsigned long)xc_desc ## _base + \
- (sizeof(struct xencomm_mini) - \
- ((unsigned long)xc_desc ## _base) % \
- sizeof(struct xencomm_mini)));
-#endif
-#define xencomm_map_no_alloc(ptr, bytes) \
- ({ XENCOMM_MINI_ALIGNED(xc_desc, 1); \
- __xencomm_map_no_alloc(ptr, bytes, xc_desc); })
-
-/* provided by architecture code: */
-extern unsigned long xencomm_vtop(unsigned long vaddr);
-
-static inline void *xencomm_pa(void *ptr)
-{
- return (void *)xencomm_vtop((unsigned long)ptr);
-}
-
-#define xen_guest_handle(hnd) ((hnd).p)
-
-#endif /* _LINUX_XENCOMM_H_ */
diff --git a/init/main.c b/init/main.c
index 2fd9cef70ee8..9c7fd4c9249f 100644
--- a/init/main.c
+++ b/init/main.c
@@ -561,7 +561,6 @@ asmlinkage void __init start_kernel(void)
init_timers();
hrtimers_init();
softirq_init();
- acpi_early_init();
timekeeping_init();
time_init();
sched_clock_postinit();
@@ -613,6 +612,7 @@ asmlinkage void __init start_kernel(void)
calibrate_delay();
pidmap_init();
anon_vma_init();
+ acpi_early_init();
#ifdef CONFIG_X86
if (efi_enabled(EFI_RUNTIME_SERVICES))
efi_enter_virtual_mode();
@@ -812,7 +812,7 @@ void __init load_default_modules(void)
static int run_init_process(const char *init_filename)
{
argv_init[0] = init_filename;
- return do_execve(init_filename,
+ return do_execve(getname_kernel(init_filename),
(const char __user *const __user *)argv_init,
(const char __user *const __user *)envp_init);
}
diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
index 383d638340b8..5bb8bfe67149 100644
--- a/ipc/mq_sysctl.c
+++ b/ipc/mq_sysctl.c
@@ -22,6 +22,16 @@ static void *get_mq(ctl_table *table)
return which;
}
+static int proc_mq_dointvec(ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct ctl_table mq_table;
+ memcpy(&mq_table, table, sizeof(mq_table));
+ mq_table.data = get_mq(table);
+
+ return proc_dointvec(&mq_table, write, buffer, lenp, ppos);
+}
+
static int proc_mq_dointvec_minmax(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
{
@@ -33,12 +43,10 @@ static int proc_mq_dointvec_minmax(ctl_table *table, int write,
lenp, ppos);
}
#else
+#define proc_mq_dointvec NULL
#define proc_mq_dointvec_minmax NULL
#endif
-static int msg_queues_limit_min = MIN_QUEUESMAX;
-static int msg_queues_limit_max = HARD_QUEUESMAX;
-
static int msg_max_limit_min = MIN_MSGMAX;
static int msg_max_limit_max = HARD_MSGMAX;
@@ -51,9 +59,7 @@ static ctl_table mq_sysctls[] = {
.data = &init_ipc_ns.mq_queues_max,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = proc_mq_dointvec_minmax,
- .extra1 = &msg_queues_limit_min,
- .extra2 = &msg_queues_limit_max,
+ .proc_handler = proc_mq_dointvec,
},
{
.procname = "msg_max",
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index ccf1f9fd263a..c3b31179122c 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -433,9 +433,9 @@ static int mqueue_create(struct inode *dir, struct dentry *dentry,
error = -EACCES;
goto out_unlock;
}
- if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX ||
- (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
- !capable(CAP_SYS_RESOURCE))) {
+
+ if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
+ !capable(CAP_SYS_RESOURCE)) {
error = -ENOSPC;
goto out_unlock;
}
diff --git a/ipc/msg.c b/ipc/msg.c
index 245db1140ad6..649853105a5d 100644
--- a/ipc/msg.c
+++ b/ipc/msg.c
@@ -901,6 +901,8 @@ long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgfl
return -EINVAL;
if (msgflg & MSG_COPY) {
+ if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT))
+ return -EINVAL;
copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
if (IS_ERR(copy))
return PTR_ERR(copy);
diff --git a/kernel/audit.c b/kernel/audit.c
index 34c5a2310fbf..3392d3e0254a 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -182,7 +182,7 @@ struct audit_buffer {
struct audit_reply {
__u32 portid;
- pid_t pid;
+ struct net *net;
struct sk_buff *skb;
};
@@ -500,7 +500,7 @@ int audit_send_list(void *_dest)
{
struct audit_netlink_list *dest = _dest;
struct sk_buff *skb;
- struct net *net = get_net_ns_by_pid(dest->pid);
+ struct net *net = dest->net;
struct audit_net *aunet = net_generic(net, audit_net_id);
/* wait for parent to finish and send an ACK */
@@ -510,6 +510,7 @@ int audit_send_list(void *_dest)
while ((skb = __skb_dequeue(&dest->q)) != NULL)
netlink_unicast(aunet->nlsk, skb, dest->portid, 0);
+ put_net(net);
kfree(dest);
return 0;
@@ -543,7 +544,7 @@ out_kfree_skb:
static int audit_send_reply_thread(void *arg)
{
struct audit_reply *reply = (struct audit_reply *)arg;
- struct net *net = get_net_ns_by_pid(reply->pid);
+ struct net *net = reply->net;
struct audit_net *aunet = net_generic(net, audit_net_id);
mutex_lock(&audit_cmd_mutex);
@@ -552,12 +553,13 @@ static int audit_send_reply_thread(void *arg)
/* Ignore failure. It'll only happen if the sender goes away,
because our timeout is set to infinite. */
netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0);
+ put_net(net);
kfree(reply);
return 0;
}
/**
* audit_send_reply - send an audit reply message via netlink
- * @portid: netlink port to which to send reply
+ * @request_skb: skb of request we are replying to (used to target the reply)
* @seq: sequence number
* @type: audit message type
* @done: done (last) flag
@@ -568,9 +570,11 @@ static int audit_send_reply_thread(void *arg)
* Allocates an skb, builds the netlink message, and sends it to the port id.
* No failure notifications.
*/
-static void audit_send_reply(__u32 portid, int seq, int type, int done,
+static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done,
int multi, const void *payload, int size)
{
+ u32 portid = NETLINK_CB(request_skb).portid;
+ struct net *net = sock_net(NETLINK_CB(request_skb).sk);
struct sk_buff *skb;
struct task_struct *tsk;
struct audit_reply *reply = kmalloc(sizeof(struct audit_reply),
@@ -583,8 +587,8 @@ static void audit_send_reply(__u32 portid, int seq, int type, int done,
if (!skb)
goto out;
+ reply->net = get_net(net);
reply->portid = portid;
- reply->pid = task_pid_vnr(current);
reply->skb = skb;
tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply");
@@ -673,8 +677,7 @@ static int audit_get_feature(struct sk_buff *skb)
seq = nlmsg_hdr(skb)->nlmsg_seq;
- audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
- &af, sizeof(af));
+ audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &af, sizeof(af));
return 0;
}
@@ -794,8 +797,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
s.backlog = skb_queue_len(&audit_skb_queue);
s.version = AUDIT_VERSION_LATEST;
s.backlog_wait_time = audit_backlog_wait_time;
- audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
- &s, sizeof(s));
+ audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s));
break;
}
case AUDIT_SET: {
@@ -905,7 +907,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
seq, data, nlmsg_len(nlh));
break;
case AUDIT_LIST_RULES:
- err = audit_list_rules_send(NETLINK_CB(skb).portid, seq);
+ err = audit_list_rules_send(skb, seq);
break;
case AUDIT_TRIM:
audit_trim_trees();
@@ -970,8 +972,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
memcpy(sig_data->ctx, ctx, len);
security_release_secctx(ctx, len);
}
- audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_SIGNAL_INFO,
- 0, 0, sig_data, sizeof(*sig_data) + len);
+ audit_send_reply(skb, seq, AUDIT_SIGNAL_INFO, 0, 0,
+ sig_data, sizeof(*sig_data) + len);
kfree(sig_data);
break;
case AUDIT_TTY_GET: {
@@ -983,8 +985,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
s.log_passwd = tsk->signal->audit_tty_log_passwd;
spin_unlock(&tsk->sighand->siglock);
- audit_send_reply(NETLINK_CB(skb).portid, seq,
- AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
+ audit_send_reply(skb, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
break;
}
case AUDIT_TTY_SET: {
diff --git a/kernel/audit.h b/kernel/audit.h
index 57cc64d67718..8df132214606 100644
--- a/kernel/audit.h
+++ b/kernel/audit.h
@@ -247,7 +247,7 @@ extern void audit_panic(const char *message);
struct audit_netlink_list {
__u32 portid;
- pid_t pid;
+ struct net *net;
struct sk_buff_head q;
};
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 67ccf0e7cca9..135944a7b28a 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -916,7 +916,7 @@ static int audit_tree_handle_event(struct fsnotify_group *group,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
u32 mask, void *data, int data_type,
- const unsigned char *file_name)
+ const unsigned char *file_name, u32 cookie)
{
return 0;
}
diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
index 2596fac5dcb4..70b4554d2fbe 100644
--- a/kernel/audit_watch.c
+++ b/kernel/audit_watch.c
@@ -471,7 +471,7 @@ static int audit_watch_handle_event(struct fsnotify_group *group,
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
u32 mask, void *data, int data_type,
- const unsigned char *dname)
+ const unsigned char *dname, u32 cookie)
{
struct inode *inode;
struct audit_parent *parent;
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 14a78cca384e..92062fd6cc8c 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -29,6 +29,8 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/security.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
#include "audit.h"
/*
@@ -1065,11 +1067,13 @@ int audit_rule_change(int type, __u32 portid, int seq, void *data,
/**
* audit_list_rules_send - list the audit rules
- * @portid: target portid for netlink audit messages
+ * @request_skb: skb of request we are replying to (used to target the reply)
* @seq: netlink audit message sequence (serial) number
*/
-int audit_list_rules_send(__u32 portid, int seq)
+int audit_list_rules_send(struct sk_buff *request_skb, int seq)
{
+ u32 portid = NETLINK_CB(request_skb).portid;
+ struct net *net = sock_net(NETLINK_CB(request_skb).sk);
struct task_struct *tsk;
struct audit_netlink_list *dest;
int err = 0;
@@ -1083,8 +1087,8 @@ int audit_list_rules_send(__u32 portid, int seq)
dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL);
if (!dest)
return -ENOMEM;
+ dest->net = get_net(net);
dest->portid = portid;
- dest->pid = task_pid_vnr(current);
skb_queue_head_init(&dest->q);
mutex_lock(&audit_filter_mutex);
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 10176cd5956a..7aef2f4b6c64 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1719,7 +1719,7 @@ void audit_putname(struct filename *name)
struct audit_context *context = current->audit_context;
BUG_ON(!context);
- if (!context->in_syscall) {
+ if (!name->aname || !context->in_syscall) {
#if AUDIT_DEBUG == 2
printk(KERN_ERR "%s:%d(:%d): final_putname(%p)\n",
__FILE__, __LINE__, context->serial, name);
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e2f46ba37f72..0c753ddd223b 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -886,7 +886,9 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
* per-subsystem and moved to css->id so that lookups are
* successful until the target css is released.
*/
+ mutex_lock(&cgroup_mutex);
idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
+ mutex_unlock(&cgroup_mutex);
cgrp->id = -1;
call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
@@ -1566,10 +1568,10 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
mutex_lock(&cgroup_mutex);
mutex_lock(&cgroup_root_mutex);
- root_cgrp->id = idr_alloc(&root->cgroup_idr, root_cgrp,
- 0, 1, GFP_KERNEL);
- if (root_cgrp->id < 0)
+ ret = idr_alloc(&root->cgroup_idr, root_cgrp, 0, 1, GFP_KERNEL);
+ if (ret < 0)
goto unlock_drop;
+ root_cgrp->id = ret;
/* Check for name clashes with existing mounts */
ret = -EBUSY;
@@ -2763,10 +2765,7 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
*/
update_before = cgroup_serial_nr_next;
- mutex_unlock(&cgroup_mutex);
-
/* add/rm files for all cgroups created before */
- rcu_read_lock();
css_for_each_descendant_pre(css, cgroup_css(root, ss)) {
struct cgroup *cgrp = css->cgroup;
@@ -2775,23 +2774,19 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
inode = cgrp->dentry->d_inode;
dget(cgrp->dentry);
- rcu_read_unlock();
-
dput(prev);
prev = cgrp->dentry;
+ mutex_unlock(&cgroup_mutex);
mutex_lock(&inode->i_mutex);
mutex_lock(&cgroup_mutex);
if (cgrp->serial_nr < update_before && !cgroup_is_dead(cgrp))
ret = cgroup_addrm_files(cgrp, cfts, is_add);
- mutex_unlock(&cgroup_mutex);
mutex_unlock(&inode->i_mutex);
-
- rcu_read_lock();
if (ret)
break;
}
- rcu_read_unlock();
+ mutex_unlock(&cgroup_mutex);
dput(prev);
deactivate_super(sb);
return ret;
@@ -2910,9 +2905,14 @@ static void cgroup_enable_task_cg_lists(void)
* We should check if the process is exiting, otherwise
* it will race with cgroup_exit() in that the list
* entry won't be deleted though the process has exited.
+ * Do it while holding siglock so that we don't end up
+ * racing against cgroup_exit().
*/
+ spin_lock_irq(&p->sighand->siglock);
if (!(p->flags & PF_EXITING) && list_empty(&p->cg_list))
list_add(&p->cg_list, &task_css_set(p)->tasks);
+ spin_unlock_irq(&p->sighand->siglock);
+
task_unlock(p);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
@@ -4112,17 +4112,17 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
err = percpu_ref_init(&css->refcnt, css_release);
if (err)
- goto err_free;
+ goto err_free_css;
init_css(css, ss, cgrp);
err = cgroup_populate_dir(cgrp, 1 << ss->subsys_id);
if (err)
- goto err_free;
+ goto err_free_percpu_ref;
err = online_css(css);
if (err)
- goto err_free;
+ goto err_clear_dir;
dget(cgrp->dentry);
css_get(css->parent);
@@ -4138,8 +4138,11 @@ static int create_css(struct cgroup *cgrp, struct cgroup_subsys *ss)
return 0;
-err_free:
+err_clear_dir:
+ cgroup_clear_dir(css->cgroup, 1 << css->ss->subsys_id);
+err_free_percpu_ref:
percpu_ref_cancel_init(&css->refcnt);
+err_free_css:
ss->css_free(css);
return err;
}
@@ -4158,7 +4161,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
struct cgroup *cgrp;
struct cgroup_name *name;
struct cgroupfs_root *root = parent->root;
- int ssid, err = 0;
+ int ssid, err;
struct cgroup_subsys *ss;
struct super_block *sb = root->sb;
@@ -4168,19 +4171,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
return -ENOMEM;
name = cgroup_alloc_name(dentry);
- if (!name)
+ if (!name) {
+ err = -ENOMEM;
goto err_free_cgrp;
+ }
rcu_assign_pointer(cgrp->name, name);
/*
- * Temporarily set the pointer to NULL, so idr_find() won't return
- * a half-baked cgroup.
- */
- cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
- if (cgrp->id < 0)
- goto err_free_name;
-
- /*
* Only live parents can have children. Note that the liveliness
* check isn't strictly necessary because cgroup_mkdir() and
* cgroup_rmdir() are fully synchronized by i_mutex; however, do it
@@ -4189,7 +4186,17 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
*/
if (!cgroup_lock_live_group(parent)) {
err = -ENODEV;
- goto err_free_id;
+ goto err_free_name;
+ }
+
+ /*
+ * Temporarily set the pointer to NULL, so idr_find() won't return
+ * a half-baked cgroup.
+ */
+ cgrp->id = idr_alloc(&root->cgroup_idr, NULL, 1, 0, GFP_KERNEL);
+ if (cgrp->id < 0) {
+ err = -ENOMEM;
+ goto err_unlock;
}
/* Grab a reference on the superblock so the hierarchy doesn't
@@ -4221,7 +4228,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
*/
err = cgroup_create_file(dentry, S_IFDIR | mode, sb);
if (err < 0)
- goto err_unlock;
+ goto err_free_id;
lockdep_assert_held(&dentry->d_inode->i_mutex);
cgrp->serial_nr = cgroup_serial_nr_next++;
@@ -4257,12 +4264,12 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
return 0;
-err_unlock:
- mutex_unlock(&cgroup_mutex);
- /* Release the reference count that we took on the superblock */
- deactivate_super(sb);
err_free_id:
idr_remove(&root->cgroup_idr, cgrp->id);
+ /* Release the reference count that we took on the superblock */
+ deactivate_super(sb);
+err_unlock:
+ mutex_unlock(&cgroup_mutex);
err_free_name:
kfree(rcu_dereference_raw(cgrp->name));
err_free_cgrp:
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 4410ac6a55f1..e6b1b66afe52 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -974,12 +974,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
* Temporarilly set tasks mems_allowed to target nodes of migration,
* so that the migration code can allocate pages on these nodes.
*
- * Call holding cpuset_mutex, so current's cpuset won't change
- * during this call, as manage_mutex holds off any cpuset_attach()
- * calls. Therefore we don't need to take task_lock around the
- * call to guarantee_online_mems(), as we know no one is changing
- * our task's cpuset.
- *
* While the mm_struct we are migrating is typically from some
* other task, the task_struct mems_allowed that we are hacking
* is for our current task, which must allocate new pages for that
@@ -996,8 +990,10 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
+ rcu_read_lock();
mems_cs = effective_nodemask_cpuset(task_cs(tsk));
guarantee_online_mems(mems_cs, &tsk->mems_allowed);
+ rcu_read_unlock();
}
/*
@@ -2486,9 +2482,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
task_lock(current);
cs = nearest_hardwall_ancestor(task_cs(current));
+ allowed = node_isset(node, cs->mems_allowed);
task_unlock(current);
- allowed = node_isset(node, cs->mems_allowed);
mutex_unlock(&callback_mutex);
return allowed;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 56003c6edfd3..fa0b2d4ad83c 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7856,14 +7856,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
static void __perf_event_exit_context(void *__info)
{
struct perf_event_context *ctx = __info;
- struct perf_event *event, *tmp;
+ struct perf_event *event;
perf_pmu_rotate_stop(ctx->pmu);
- list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
- __perf_remove_from_context(event);
- list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
+ rcu_read_lock();
+ list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
__perf_remove_from_context(event);
+ rcu_read_unlock();
}
static void perf_event_exit_cpu_context(int cpu)
@@ -7887,11 +7887,11 @@ static void perf_event_exit_cpu(int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
+ perf_event_exit_cpu_context(cpu);
+
mutex_lock(&swhash->hlist_mutex);
swevent_hlist_release(swhash);
mutex_unlock(&swhash->hlist_mutex);
-
- perf_event_exit_cpu_context(cpu);
}
#else
static inline void perf_event_exit_cpu(int cpu) { }
diff --git a/kernel/futex.c b/kernel/futex.c
index 44a1261cb9ff..08ec814ad9d2 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -234,6 +234,7 @@ static const struct futex_q futex_q_init = {
* waiting on a futex.
*/
struct futex_hash_bucket {
+ atomic_t waiters;
spinlock_t lock;
struct plist_head chain;
} ____cacheline_aligned_in_smp;
@@ -253,22 +254,37 @@ static inline void futex_get_mm(union futex_key *key)
smp_mb__after_atomic_inc();
}
-static inline bool hb_waiters_pending(struct futex_hash_bucket *hb)
+/*
+ * Reflects a new waiter being added to the waitqueue.
+ */
+static inline void hb_waiters_inc(struct futex_hash_bucket *hb)
{
#ifdef CONFIG_SMP
+ atomic_inc(&hb->waiters);
/*
- * Tasks trying to enter the critical region are most likely
- * potential waiters that will be added to the plist. Ensure
- * that wakers won't miss to-be-slept tasks in the window between
- * the wait call and the actual plist_add.
+ * Full barrier (A), see the ordering comment above.
*/
- if (spin_is_locked(&hb->lock))
- return true;
- smp_rmb(); /* Make sure we check the lock state first */
+ smp_mb__after_atomic_inc();
+#endif
+}
+
+/*
+ * Reflects a waiter being removed from the waitqueue by wakeup
+ * paths.
+ */
+static inline void hb_waiters_dec(struct futex_hash_bucket *hb)
+{
+#ifdef CONFIG_SMP
+ atomic_dec(&hb->waiters);
+#endif
+}
- return !plist_head_empty(&hb->chain);
+static inline int hb_waiters_pending(struct futex_hash_bucket *hb)
+{
+#ifdef CONFIG_SMP
+ return atomic_read(&hb->waiters);
#else
- return true;
+ return 1;
#endif
}
@@ -954,6 +970,7 @@ static void __unqueue_futex(struct futex_q *q)
hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock);
plist_del(&q->list, &hb->chain);
+ hb_waiters_dec(hb);
}
/*
@@ -1257,7 +1274,9 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
*/
if (likely(&hb1->chain != &hb2->chain)) {
plist_del(&q->list, &hb1->chain);
+ hb_waiters_dec(hb1);
plist_add(&q->list, &hb2->chain);
+ hb_waiters_inc(hb2);
q->lock_ptr = &hb2->lock;
}
get_futex_key_refs(key2);
@@ -1600,6 +1619,17 @@ static inline struct futex_hash_bucket *queue_lock(struct futex_q *q)
struct futex_hash_bucket *hb;
hb = hash_futex(&q->key);
+
+ /*
+ * Increment the counter before taking the lock so that
+ * a potential waker won't miss a to-be-slept task that is
+ * waiting for the spinlock. This is safe as all queue_lock()
+ * users end up calling queue_me(). Similarly, for housekeeping,
+ * decrement the counter at queue_unlock() when some error has
+ * occurred and we don't end up adding the task to the list.
+ */
+ hb_waiters_inc(hb);
+
q->lock_ptr = &hb->lock;
spin_lock(&hb->lock); /* implies MB (A) */
@@ -1611,6 +1641,7 @@ queue_unlock(struct futex_hash_bucket *hb)
__releases(&hb->lock)
{
spin_unlock(&hb->lock);
+ hb_waiters_dec(hb);
}
/**
@@ -2342,6 +2373,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
* Unqueue the futex_q and determine which it was.
*/
plist_del(&q->list, &hb->chain);
+ hb_waiters_dec(hb);
/* Handle spurious wakeups gracefully */
ret = -EWOULDBLOCK;
@@ -2875,6 +2907,7 @@ static int __init futex_init(void)
futex_cmpxchg_enabled = 1;
for (i = 0; i < futex_hashsize; i++) {
+ atomic_set(&futex_queues[i].waiters, 0);
plist_head_init(&futex_queues[i].chain);
spin_lock_init(&futex_queues[i].lock);
}
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index 4a1fef09f658..07cbdfea9ae2 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -40,6 +40,7 @@ config IRQ_EDGE_EOI_HANDLER
# Generic configurable interrupt chip implementation
config GENERIC_IRQ_CHIP
bool
+ select IRQ_DOMAIN
# Generic irq_domain hw <--> linux irq number translation
config IRQ_DOMAIN
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
index bd8e788d71e0..1ef0606797c9 100644
--- a/kernel/irq/devres.c
+++ b/kernel/irq/devres.c
@@ -73,6 +73,51 @@ int devm_request_threaded_irq(struct device *dev, unsigned int irq,
EXPORT_SYMBOL(devm_request_threaded_irq);
/**
+ * devm_request_any_context_irq - allocate an interrupt line for a managed device
+ * @dev: device to request interrupt for
+ * @irq: Interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs
+ * @thread_fn: function to be called in a threaded interrupt context. NULL
+ * for devices which handle everything in @handler
+ * @irqflags: Interrupt type flags
+ * @devname: An ascii name for the claiming device
+ * @dev_id: A cookie passed back to the handler function
+ *
+ * Except for the extra @dev argument, this function takes the
+ * same arguments and performs the same function as
+ * request_any_context_irq(). IRQs requested with this function will be
+ * automatically freed on driver detach.
+ *
+ * If an IRQ allocated with this function needs to be freed
+ * separately, devm_free_irq() must be used.
+ */
+int devm_request_any_context_irq(struct device *dev, unsigned int irq,
+ irq_handler_t handler, unsigned long irqflags,
+ const char *devname, void *dev_id)
+{
+ struct irq_devres *dr;
+ int rc;
+
+ dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres),
+ GFP_KERNEL);
+ if (!dr)
+ return -ENOMEM;
+
+ rc = request_any_context_irq(irq, handler, irqflags, devname, dev_id);
+ if (rc) {
+ devres_free(dr);
+ return rc;
+ }
+
+ dr->irq = irq;
+ dr->dev_id = dev_id;
+ devres_add(dev, dr);
+
+ return 0;
+}
+EXPORT_SYMBOL(devm_request_any_context_irq);
+
+/**
* devm_free_irq - free an interrupt
* @dev: device to free interrupt for
* @irq: Interrupt line to free
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 192a302d6cfd..8ab8e9390297 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -274,6 +274,7 @@ struct irq_desc *irq_to_desc(unsigned int irq)
{
return (irq < NR_IRQS) ? irq_desc + irq : NULL;
}
+EXPORT_SYMBOL(irq_to_desc);
static void free_desc(unsigned int irq)
{
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c
index cf68bb36fe58..f14033700c25 100644
--- a/kernel/irq/irqdomain.c
+++ b/kernel/irq/irqdomain.c
@@ -10,6 +10,7 @@
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_irq.h>
#include <linux/topology.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 481a13c43b17..d3bf660cb57f 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -802,8 +802,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
static void wake_threads_waitq(struct irq_desc *desc)
{
- if (atomic_dec_and_test(&desc->threads_active) &&
- waitqueue_active(&desc->wait_for_threads))
+ if (atomic_dec_and_test(&desc->threads_active))
wake_up(&desc->wait_for_threads);
}
diff --git a/kernel/kmod.c b/kernel/kmod.c
index b086006c59e7..6b375af4958d 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -239,7 +239,7 @@ static int ____call_usermodehelper(void *data)
commit_creds(new);
- retval = do_execve(sub_info->path,
+ retval = do_execve(getname_kernel(sub_info->path),
(const char __user *const __user *)sub_info->argv,
(const char __user *const __user *)sub_info->envp);
if (!retval)
diff --git a/kernel/power/console.c b/kernel/power/console.c
index eacb8bd8cab4..aba9c545a0e3 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -9,6 +9,7 @@
#include <linux/kbd_kern.h>
#include <linux/vt.h>
#include <linux/module.h>
+#include <linux/slab.h>
#include "power.h"
#define SUSPEND_CONSOLE (MAX_NR_CONSOLES-1)
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index b1d255f04135..4dae9cbe9259 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1076,7 +1076,6 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
next_seq = log_next_seq;
len = 0;
- prev = 0;
while (len >= 0 && seq < next_seq) {
struct printk_log *msg = log_from_idx(idx);
int textlen;
@@ -2788,7 +2787,6 @@ bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
next_idx = idx;
l = 0;
- prev = 0;
while (seq < dumper->next_seq) {
struct printk_log *msg = log_from_idx(idx);
diff --git a/kernel/profile.c b/kernel/profile.c
index 6631e1ef55ab..ebdd9c1a86b4 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -549,14 +549,14 @@ static int create_hash_tables(void)
struct page *page;
page = alloc_pages_exact_node(node,
- GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
0);
if (!page)
goto out_cleanup;
per_cpu(cpu_profile_hits, cpu)[1]
= (struct profile_hit *)page_address(page);
page = alloc_pages_exact_node(node,
- GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+ GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE,
0);
if (!page)
goto out_cleanup;
diff --git a/kernel/sched/clock.c b/kernel/sched/clock.c
index 43c2bcc35761..b30a2924ef14 100644
--- a/kernel/sched/clock.c
+++ b/kernel/sched/clock.c
@@ -301,14 +301,14 @@ u64 sched_clock_cpu(int cpu)
if (unlikely(!sched_clock_running))
return 0ull;
- preempt_disable();
+ preempt_disable_notrace();
scd = cpu_sdc(cpu);
if (cpu != smp_processor_id())
clock = sched_clock_remote(scd);
else
clock = sched_clock_local(scd);
- preempt_enable();
+ preempt_enable_notrace();
return clock;
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b46131ef6aab..f5c6635b806c 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1952,7 +1952,7 @@ static int dl_overflow(struct task_struct *p, int policy,
{
struct dl_bw *dl_b = dl_bw_of(task_cpu(p));
- u64 period = attr->sched_period;
+ u64 period = attr->sched_period ?: attr->sched_deadline;
u64 runtime = attr->sched_runtime;
u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
int cpus, err = -1;
@@ -3338,6 +3338,15 @@ recheck:
return -EPERM;
}
+ /*
+ * Can't set/change SCHED_DEADLINE policy at all for now
+ * (safest behavior); in the future we would like to allow
+ * unprivileged DL tasks to increase their relative deadline
+ * or reduce their runtime (both ways reducing utilization)
+ */
+ if (dl_policy(policy))
+ return -EPERM;
+
/*
* Treat SCHED_IDLE as nice 20. Only allow a switch to
* SCHED_NORMAL if the RLIMIT_NICE would normally permit it.
@@ -3661,13 +3670,14 @@ SYSCALL_DEFINE2(sched_setparam, pid_t, pid, struct sched_param __user *, param)
* @pid: the pid in question.
* @uattr: structure containing the extended parameters.
*/
-SYSCALL_DEFINE2(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr)
+SYSCALL_DEFINE3(sched_setattr, pid_t, pid, struct sched_attr __user *, uattr,
+ unsigned int, flags)
{
struct sched_attr attr;
struct task_struct *p;
int retval;
- if (!uattr || pid < 0)
+ if (!uattr || pid < 0 || flags)
return -EINVAL;
if (sched_copy_attr(uattr, &attr))
@@ -3786,7 +3796,7 @@ static int sched_read_attr(struct sched_attr __user *uattr,
attr->size = usize;
}
- ret = copy_to_user(uattr, attr, usize);
+ ret = copy_to_user(uattr, attr, attr->size);
if (ret)
return -EFAULT;
@@ -3804,8 +3814,8 @@ err_size:
* @uattr: structure containing the extended parameters.
* @size: sizeof(attr) for fwd/bwd comp.
*/
-SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
- unsigned int, size)
+SYSCALL_DEFINE4(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
+ unsigned int, size, unsigned int, flags)
{
struct sched_attr attr = {
.size = sizeof(struct sched_attr),
@@ -3814,7 +3824,7 @@ SYSCALL_DEFINE3(sched_getattr, pid_t, pid, struct sched_attr __user *, uattr,
int retval;
if (!uattr || pid < 0 || size > PAGE_SIZE ||
- size < SCHED_ATTR_SIZE_VER0)
+ size < SCHED_ATTR_SIZE_VER0 || flags)
return -EINVAL;
rcu_read_lock();
@@ -7422,6 +7432,7 @@ static int sched_dl_global_constraints(void)
u64 period = global_rt_period();
u64 new_bw = to_ratio(period, runtime);
int cpu, ret = 0;
+ unsigned long flags;
/*
* Here we want to check the bandwidth not being set to some
@@ -7435,10 +7446,10 @@ static int sched_dl_global_constraints(void)
for_each_possible_cpu(cpu) {
struct dl_bw *dl_b = dl_bw_of(cpu);
- raw_spin_lock(&dl_b->lock);
+ raw_spin_lock_irqsave(&dl_b->lock, flags);
if (new_bw < dl_b->total_bw)
ret = -EBUSY;
- raw_spin_unlock(&dl_b->lock);
+ raw_spin_unlock_irqrestore(&dl_b->lock, flags);
if (ret)
break;
@@ -7451,6 +7462,7 @@ static void sched_dl_do_global(void)
{
u64 new_bw = -1;
int cpu;
+ unsigned long flags;
def_dl_bandwidth.dl_period = global_rt_period();
def_dl_bandwidth.dl_runtime = global_rt_runtime();
@@ -7464,9 +7476,9 @@ static void sched_dl_do_global(void)
for_each_possible_cpu(cpu) {
struct dl_bw *dl_b = dl_bw_of(cpu);
- raw_spin_lock(&dl_b->lock);
+ raw_spin_lock_irqsave(&dl_b->lock, flags);
dl_b->bw = new_bw;
- raw_spin_unlock(&dl_b->lock);
+ raw_spin_unlock_irqrestore(&dl_b->lock, flags);
}
}
@@ -7475,7 +7487,8 @@ static int sched_rt_global_validate(void)
if (sysctl_sched_rt_period <= 0)
return -EINVAL;
- if (sysctl_sched_rt_runtime > sysctl_sched_rt_period)
+ if ((sysctl_sched_rt_runtime != RUNTIME_INF) &&
+ (sysctl_sched_rt_runtime > sysctl_sched_rt_period))
return -EINVAL;
return 0;
diff --git a/kernel/sched/cpudeadline.c b/kernel/sched/cpudeadline.c
index 045fc74e3f09..5b9bb42b2d47 100644
--- a/kernel/sched/cpudeadline.c
+++ b/kernel/sched/cpudeadline.c
@@ -70,7 +70,7 @@ static void cpudl_heapify(struct cpudl *cp, int idx)
static void cpudl_change_key(struct cpudl *cp, int idx, u64 new_dl)
{
- WARN_ON(idx > num_present_cpus() || idx == IDX_INVALID);
+ WARN_ON(idx == IDX_INVALID || !cpu_present(idx));
if (dl_time_before(new_dl, cp->elements[idx].dl)) {
cp->elements[idx].dl = new_dl;
@@ -117,7 +117,7 @@ int cpudl_find(struct cpudl *cp, struct task_struct *p,
}
out:
- WARN_ON(best_cpu > num_present_cpus() && best_cpu != -1);
+ WARN_ON(best_cpu != -1 && !cpu_present(best_cpu));
return best_cpu;
}
@@ -137,7 +137,7 @@ void cpudl_set(struct cpudl *cp, int cpu, u64 dl, int is_valid)
int old_idx, new_cpu;
unsigned long flags;
- WARN_ON(cpu > num_present_cpus());
+ WARN_ON(!cpu_present(cpu));
raw_spin_lock_irqsave(&cp->lock, flags);
old_idx = cp->cpu_to_idx[cpu];
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 0dd5e0971a07..6e79b3faa4cd 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -121,7 +121,7 @@ static inline void dl_clear_overload(struct rq *rq)
static void update_dl_migration(struct dl_rq *dl_rq)
{
- if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_total > 1) {
+ if (dl_rq->dl_nr_migratory && dl_rq->dl_nr_running > 1) {
if (!dl_rq->overloaded) {
dl_set_overload(rq_of_dl_rq(dl_rq));
dl_rq->overloaded = 1;
@@ -135,9 +135,7 @@ static void update_dl_migration(struct dl_rq *dl_rq)
static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
struct task_struct *p = dl_task_of(dl_se);
- dl_rq = &rq_of_dl_rq(dl_rq)->dl;
- dl_rq->dl_nr_total++;
if (p->nr_cpus_allowed > 1)
dl_rq->dl_nr_migratory++;
@@ -147,9 +145,7 @@ static void inc_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
static void dec_dl_migration(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
{
struct task_struct *p = dl_task_of(dl_se);
- dl_rq = &rq_of_dl_rq(dl_rq)->dl;
- dl_rq->dl_nr_total--;
if (p->nr_cpus_allowed > 1)
dl_rq->dl_nr_migratory--;
@@ -566,6 +562,8 @@ int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
return 1;
}
+extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
+
/*
* Update the current task's runtime statistics (provided it is still
* a -deadline task and has not been removed from the dl_rq).
@@ -629,11 +627,13 @@ static void update_curr_dl(struct rq *rq)
struct rt_rq *rt_rq = &rq->rt;
raw_spin_lock(&rt_rq->rt_runtime_lock);
- rt_rq->rt_time += delta_exec;
/*
* We'll let actual RT tasks worry about the overflow here, we
- * have our own CBS to keep us inline -- see above.
+ * have our own CBS to keep us inline; only account when RT
+ * bandwidth is relevant.
*/
+ if (sched_rt_bandwidth_account(rt_rq))
+ rt_rq->rt_time += delta_exec;
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
}
@@ -717,6 +717,7 @@ void inc_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
WARN_ON(!dl_prio(prio));
dl_rq->dl_nr_running++;
+ inc_nr_running(rq_of_dl_rq(dl_rq));
inc_dl_deadline(dl_rq, deadline);
inc_dl_migration(dl_se, dl_rq);
@@ -730,6 +731,7 @@ void dec_dl_tasks(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq)
WARN_ON(!dl_prio(prio));
WARN_ON(!dl_rq->dl_nr_running);
dl_rq->dl_nr_running--;
+ dec_nr_running(rq_of_dl_rq(dl_rq));
dec_dl_deadline(dl_rq, dl_se->deadline);
dec_dl_migration(dl_se, dl_rq);
@@ -836,8 +838,6 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_dl_task(rq, p);
-
- inc_nr_running(rq);
}
static void __dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
@@ -850,8 +850,6 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags)
{
update_curr_dl(rq);
__dequeue_task_dl(rq, p, flags);
-
- dec_nr_running(rq);
}
/*
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 966cc2bfcb77..9b4c4f320130 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1757,6 +1757,8 @@ void task_numa_work(struct callback_head *work)
start = end;
if (pages <= 0)
goto out;
+
+ cond_resched();
} while (end != vma->vm_end);
}
@@ -6999,15 +7001,15 @@ static void switched_from_fair(struct rq *rq, struct task_struct *p)
struct cfs_rq *cfs_rq = cfs_rq_of(se);
/*
- * Ensure the task's vruntime is normalized, so that when its
+ * Ensure the task's vruntime is normalized, so that when it's
* switched back to the fair class the enqueue_entity(.flags=0) will
* do the right thing.
*
- * If it was on_rq, then the dequeue_entity(.flags=0) will already
- * have normalized the vruntime, if it was !on_rq, then only when
+ * If it's on_rq, then the dequeue_entity(.flags=0) will already
+ * have normalized the vruntime, if it's !on_rq, then only when
* the task is sleeping will it still have non-normalized vruntime.
*/
- if (!se->on_rq && p->state != TASK_RUNNING) {
+ if (!p->on_rq && p->state != TASK_RUNNING) {
/*
* Fix up our vruntime so that the current sleep doesn't
* cause 'unlimited' sleep bonus.
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index a2740b775b45..1999021042c7 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -538,6 +538,14 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
#endif /* CONFIG_RT_GROUP_SCHED */
+bool sched_rt_bandwidth_account(struct rt_rq *rt_rq)
+{
+ struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
+
+ return (hrtimer_active(&rt_b->rt_period_timer) ||
+ rt_rq->rt_time < rt_b->rt_runtime);
+}
+
#ifdef CONFIG_SMP
/*
* We ran out of runtime, see if we can borrow some from our neighbours.
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c2119fd20f8b..f964add50f38 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -462,7 +462,6 @@ struct dl_rq {
} earliest_dl;
unsigned long dl_nr_migratory;
- unsigned long dl_nr_total;
int overloaded;
/*
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 84571e09c907..01fbae5b97b7 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -293,7 +293,7 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
*/
smp_call_function_single(min(cpu1, cpu2),
&irq_cpu_stop_queue_work,
- &call_args, 0);
+ &call_args, 1);
lg_local_unlock(&stop_cpus_lock);
preempt_enable();
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 7a925ba456fb..a6a5bf53e86d 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -51,7 +51,13 @@
* HZ shrinks, so values greater than 8 overflow 32bits when
* HZ=100.
*/
+#if HZ < 34
+#define JIFFIES_SHIFT 6
+#elif HZ < 67
+#define JIFFIES_SHIFT 7
+#else
#define JIFFIES_SHIFT 8
+#endif
static cycle_t jiffies_read(struct clocksource *cs)
{
diff --git a/kernel/time/sched_clock.c b/kernel/time/sched_clock.c
index 0abb36464281..4d23dc4d8139 100644
--- a/kernel/time/sched_clock.c
+++ b/kernel/time/sched_clock.c
@@ -116,20 +116,42 @@ static enum hrtimer_restart sched_clock_poll(struct hrtimer *hrt)
void __init sched_clock_register(u64 (*read)(void), int bits,
unsigned long rate)
{
+ u64 res, wrap, new_mask, new_epoch, cyc, ns;
+ u32 new_mult, new_shift;
+ ktime_t new_wrap_kt;
unsigned long r;
- u64 res, wrap;
char r_unit;
if (cd.rate > rate)
return;
WARN_ON(!irqs_disabled());
- read_sched_clock = read;
- sched_clock_mask = CLOCKSOURCE_MASK(bits);
- cd.rate = rate;
/* calculate the mult/shift to convert counter ticks to ns. */
- clocks_calc_mult_shift(&cd.mult, &cd.shift, rate, NSEC_PER_SEC, 3600);
+ clocks_calc_mult_shift(&new_mult, &new_shift, rate, NSEC_PER_SEC, 3600);
+
+ new_mask = CLOCKSOURCE_MASK(bits);
+
+ /* calculate how many ns until we wrap */
+ wrap = clocks_calc_max_nsecs(new_mult, new_shift, 0, new_mask);
+ new_wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
+
+ /* update epoch for new counter and update epoch_ns from old counter*/
+ new_epoch = read();
+ cyc = read_sched_clock();
+ ns = cd.epoch_ns + cyc_to_ns((cyc - cd.epoch_cyc) & sched_clock_mask,
+ cd.mult, cd.shift);
+
+ raw_write_seqcount_begin(&cd.seq);
+ read_sched_clock = read;
+ sched_clock_mask = new_mask;
+ cd.rate = rate;
+ cd.wrap_kt = new_wrap_kt;
+ cd.mult = new_mult;
+ cd.shift = new_shift;
+ cd.epoch_cyc = new_epoch;
+ cd.epoch_ns = ns;
+ raw_write_seqcount_end(&cd.seq);
r = rate;
if (r >= 4000000) {
@@ -141,22 +163,12 @@ void __init sched_clock_register(u64 (*read)(void), int bits,
} else
r_unit = ' ';
- /* calculate how many ns until we wrap */
- wrap = clocks_calc_max_nsecs(cd.mult, cd.shift, 0, sched_clock_mask);
- cd.wrap_kt = ns_to_ktime(wrap - (wrap >> 3));
-
/* calculate the ns resolution of this counter */
- res = cyc_to_ns(1ULL, cd.mult, cd.shift);
+ res = cyc_to_ns(1ULL, new_mult, new_shift);
+
pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lluns\n",
bits, r, r_unit, res, wrap);
- update_sched_clock();
-
- /*
- * Ensure that sched_clock() starts off at 0ns
- */
- cd.epoch_ns = 0;
-
/* Enable IRQ time accounting if we have a fast enough sched_clock */
if (irqtime > 0 || (irqtime == -1 && rate >= 1000000))
enable_sched_clock_irqtime();
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 43780ab5e279..98977a57ac72 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -756,6 +756,7 @@ out:
static void tick_broadcast_clear_oneshot(int cpu)
{
cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
+ cpumask_clear_cpu(cpu, tick_broadcast_pending_mask);
}
static void tick_broadcast_init_next_event(struct cpumask *mask,
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 294b8a271a04..fc4da2d97f9b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2397,6 +2397,13 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
write &= RB_WRITE_MASK;
tail = write - length;
+ /*
+ * If this is the first commit on the page, then it has the same
+ * timestamp as the page itself.
+ */
+ if (!tail)
+ delta = 0;
+
/* See if we shot pass the end of this buffer page */
if (unlikely(write > BUF_PAGE_SIZE))
return rb_move_tail(cpu_buffer, length, tail,
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index e71ffd4eccb5..7b16d40bd64d 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -27,12 +27,6 @@
DEFINE_MUTEX(event_mutex);
-DEFINE_MUTEX(event_storage_mutex);
-EXPORT_SYMBOL_GPL(event_storage_mutex);
-
-char event_storage[EVENT_STORAGE_SIZE];
-EXPORT_SYMBOL_GPL(event_storage);
-
LIST_HEAD(ftrace_events);
static LIST_HEAD(ftrace_common_fields);
@@ -1777,6 +1771,16 @@ static void trace_module_add_events(struct module *mod)
{
struct ftrace_event_call **call, **start, **end;
+ if (!mod->num_trace_events)
+ return;
+
+ /* Don't add infrastructure for mods without tracepoints */
+ if (trace_module_has_bad_taint(mod)) {
+ pr_err("%s: module has bad taint, not creating trace events\n",
+ mod->name);
+ return;
+ }
+
start = mod->trace_events;
end = mod->trace_events + mod->num_trace_events;
diff --git a/kernel/trace/trace_export.c b/kernel/trace/trace_export.c
index 7c3e3e72e2b6..ee0a5098ac43 100644
--- a/kernel/trace/trace_export.c
+++ b/kernel/trace/trace_export.c
@@ -95,15 +95,12 @@ static void __always_unused ____ftrace_check_##name(void) \
#undef __array
#define __array(type, item, len) \
do { \
+ char *type_str = #type"["__stringify(len)"]"; \
BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
- mutex_lock(&event_storage_mutex); \
- snprintf(event_storage, sizeof(event_storage), \
- "%s[%d]", #type, len); \
- ret = trace_define_field(event_call, event_storage, #item, \
+ ret = trace_define_field(event_call, type_str, #item, \
offsetof(typeof(field), item), \
sizeof(field.item), \
is_signed_type(type), filter_type); \
- mutex_unlock(&event_storage_mutex); \
if (ret) \
return ret; \
} while (0);
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
index 29f26540e9c9..031cc5655a51 100644
--- a/kernel/tracepoint.c
+++ b/kernel/tracepoint.c
@@ -631,6 +631,11 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter)
EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
#ifdef CONFIG_MODULES
+bool trace_module_has_bad_taint(struct module *mod)
+{
+ return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP));
+}
+
static int tracepoint_module_coming(struct module *mod)
{
struct tp_module *tp_mod, *iter;
@@ -641,7 +646,7 @@ static int tracepoint_module_coming(struct module *mod)
* module headers (for forced load), to make sure we don't cause a crash.
* Staging and out-of-tree GPL modules are fine.
*/
- if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)))
+ if (trace_module_has_bad_taint(mod))
return 0;
mutex_lock(&tracepoints_mutex);
tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL);
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 240fb62cf394..dd06439b9c84 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -225,7 +225,7 @@ static u32 map_id_up(struct uid_gid_map *map, u32 id)
*
* When there is no mapping defined for the user-namespace uid
* pair INVALID_UID is returned. Callers are expected to test
- * for and handle handle INVALID_UID being returned. INVALID_UID
+ * for and handle INVALID_UID being returned. INVALID_UID
* may be tested for using uid_valid().
*/
kuid_t make_kuid(struct user_namespace *ns, uid_t uid)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 82ef9f3b7473..193e977a10ea 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1851,6 +1851,12 @@ static void destroy_worker(struct worker *worker)
if (worker->flags & WORKER_IDLE)
pool->nr_idle--;
+ /*
+ * Once WORKER_DIE is set, the kworker may destroy itself at any
+ * point. Pin to ensure the task stays until we're done with it.
+ */
+ get_task_struct(worker->task);
+
list_del_init(&worker->entry);
worker->flags |= WORKER_DIE;
@@ -1859,6 +1865,7 @@ static void destroy_worker(struct worker *worker)
spin_unlock_irq(&pool->lock);
kthread_stop(worker->task);
+ put_task_struct(worker->task);
kfree(worker);
spin_lock_irq(&pool->lock);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index dbf94a7d25a8..a48abeac753f 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -119,7 +119,7 @@ menu "Compile-time checks and compiler options"
config DEBUG_INFO
bool "Compile the kernel with debug info"
- depends on DEBUG_KERNEL
+ depends on DEBUG_KERNEL && !COMPILE_TEST
help
If you say Y here the resulting kernel image will include
debugging info resulting in a larger kernel image.
diff --git a/lib/Makefile b/lib/Makefile
index 126b34f2eb16..48140e3ba73f 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o
obj-$(CONFIG_CHECK_SIGNATURE) += check_signature.o
obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o
+GCOV_PROFILE_hweight.o := n
CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS))
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
diff --git a/lib/dma-debug.c b/lib/dma-debug.c
index 2defd1308b04..98f2d7e91a91 100644
--- a/lib/dma-debug.c
+++ b/lib/dma-debug.c
@@ -424,111 +424,134 @@ void debug_dma_dump_mappings(struct device *dev)
EXPORT_SYMBOL(debug_dma_dump_mappings);
/*
- * For each page mapped (initial page in the case of
- * dma_alloc_coherent/dma_map_{single|page}, or each page in a
- * scatterlist) insert into this tree using the pfn as the key. At
+ * For each mapping (initial cacheline in the case of
+ * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
+ * scatterlist, or the cacheline specified in dma_map_single) insert
+ * into this tree using the cacheline as the key. At
* dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
- * the pfn already exists at insertion time add a tag as a reference
+ * the entry already exists at insertion time add a tag as a reference
* count for the overlapping mappings. For now, the overlap tracking
- * just ensures that 'unmaps' balance 'maps' before marking the pfn
- * idle, but we should also be flagging overlaps as an API violation.
+ * just ensures that 'unmaps' balance 'maps' before marking the
+ * cacheline idle, but we should also be flagging overlaps as an API
+ * violation.
*
* Memory usage is mostly constrained by the maximum number of available
* dma-debug entries in that we need a free dma_debug_entry before
- * inserting into the tree. In the case of dma_map_{single|page} and
- * dma_alloc_coherent there is only one dma_debug_entry and one pfn to
- * track per event. dma_map_sg(), on the other hand,
- * consumes a single dma_debug_entry, but inserts 'nents' entries into
- * the tree.
+ * inserting into the tree. In the case of dma_map_page and
+ * dma_alloc_coherent there is only one dma_debug_entry and one
+ * dma_active_cacheline entry to track per event. dma_map_sg(), on the
+ * other hand, consumes a single dma_debug_entry, but inserts 'nents'
+ * entries into the tree.
*
* At any time debug_dma_assert_idle() can be called to trigger a
- * warning if the given page is in the active set.
+ * warning if any cachelines in the given page are in the active set.
*/
-static RADIX_TREE(dma_active_pfn, GFP_NOWAIT);
+static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
static DEFINE_SPINLOCK(radix_lock);
-#define ACTIVE_PFN_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
+#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
+#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
+#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
-static int active_pfn_read_overlap(unsigned long pfn)
+static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
+{
+ return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
+ (entry->offset >> L1_CACHE_SHIFT);
+}
+
+static int active_cacheline_read_overlap(phys_addr_t cln)
{
int overlap = 0, i;
for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
- if (radix_tree_tag_get(&dma_active_pfn, pfn, i))
+ if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
overlap |= 1 << i;
return overlap;
}
-static int active_pfn_set_overlap(unsigned long pfn, int overlap)
+static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
{
int i;
- if (overlap > ACTIVE_PFN_MAX_OVERLAP || overlap < 0)
+ if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
return overlap;
for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
if (overlap & 1 << i)
- radix_tree_tag_set(&dma_active_pfn, pfn, i);
+ radix_tree_tag_set(&dma_active_cacheline, cln, i);
else
- radix_tree_tag_clear(&dma_active_pfn, pfn, i);
+ radix_tree_tag_clear(&dma_active_cacheline, cln, i);
return overlap;
}
-static void active_pfn_inc_overlap(unsigned long pfn)
+static void active_cacheline_inc_overlap(phys_addr_t cln)
{
- int overlap = active_pfn_read_overlap(pfn);
+ int overlap = active_cacheline_read_overlap(cln);
- overlap = active_pfn_set_overlap(pfn, ++overlap);
+ overlap = active_cacheline_set_overlap(cln, ++overlap);
/* If we overflowed the overlap counter then we're potentially
* leaking dma-mappings. Otherwise, if maps and unmaps are
* balanced then this overflow may cause false negatives in
- * debug_dma_assert_idle() as the pfn may be marked idle
+ * debug_dma_assert_idle() as the cacheline may be marked idle
* prematurely.
*/
- WARN_ONCE(overlap > ACTIVE_PFN_MAX_OVERLAP,
- "DMA-API: exceeded %d overlapping mappings of pfn %lx\n",
- ACTIVE_PFN_MAX_OVERLAP, pfn);
+ WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
+ "DMA-API: exceeded %d overlapping mappings of cacheline %pa\n",
+ ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
}
-static int active_pfn_dec_overlap(unsigned long pfn)
+static int active_cacheline_dec_overlap(phys_addr_t cln)
{
- int overlap = active_pfn_read_overlap(pfn);
+ int overlap = active_cacheline_read_overlap(cln);
- return active_pfn_set_overlap(pfn, --overlap);
+ return active_cacheline_set_overlap(cln, --overlap);
}
-static int active_pfn_insert(struct dma_debug_entry *entry)
+static int active_cacheline_insert(struct dma_debug_entry *entry)
{
+ phys_addr_t cln = to_cacheline_number(entry);
unsigned long flags;
int rc;
+ /* If the device is not writing memory then we don't have any
+ * concerns about the cpu consuming stale data. This mitigates
+ * legitimate usages of overlapping mappings.
+ */
+ if (entry->direction == DMA_TO_DEVICE)
+ return 0;
+
spin_lock_irqsave(&radix_lock, flags);
- rc = radix_tree_insert(&dma_active_pfn, entry->pfn, entry);
+ rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
if (rc == -EEXIST)
- active_pfn_inc_overlap(entry->pfn);
+ active_cacheline_inc_overlap(cln);
spin_unlock_irqrestore(&radix_lock, flags);
return rc;
}
-static void active_pfn_remove(struct dma_debug_entry *entry)
+static void active_cacheline_remove(struct dma_debug_entry *entry)
{
+ phys_addr_t cln = to_cacheline_number(entry);
unsigned long flags;
+ /* ...mirror the insert case */
+ if (entry->direction == DMA_TO_DEVICE)
+ return;
+
spin_lock_irqsave(&radix_lock, flags);
/* since we are counting overlaps the final put of the
- * entry->pfn will occur when the overlap count is 0.
- * active_pfn_dec_overlap() returns -1 in that case
+ * cacheline will occur when the overlap count is 0.
+ * active_cacheline_dec_overlap() returns -1 in that case
*/
- if (active_pfn_dec_overlap(entry->pfn) < 0)
- radix_tree_delete(&dma_active_pfn, entry->pfn);
+ if (active_cacheline_dec_overlap(cln) < 0)
+ radix_tree_delete(&dma_active_cacheline, cln);
spin_unlock_irqrestore(&radix_lock, flags);
}
/**
* debug_dma_assert_idle() - assert that a page is not undergoing dma
- * @page: page to lookup in the dma_active_pfn tree
+ * @page: page to lookup in the dma_active_cacheline tree
*
* Place a call to this routine in cases where the cpu touching the page
* before the dma completes (page is dma_unmapped) will lead to data
@@ -536,22 +559,38 @@ static void active_pfn_remove(struct dma_debug_entry *entry)
*/
void debug_dma_assert_idle(struct page *page)
{
+ static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
+ struct dma_debug_entry *entry = NULL;
+ void **results = (void **) &ents;
+ unsigned int nents, i;
unsigned long flags;
- struct dma_debug_entry *entry;
+ phys_addr_t cln;
if (!page)
return;
+ cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
spin_lock_irqsave(&radix_lock, flags);
- entry = radix_tree_lookup(&dma_active_pfn, page_to_pfn(page));
+ nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
+ CACHELINES_PER_PAGE);
+ for (i = 0; i < nents; i++) {
+ phys_addr_t ent_cln = to_cacheline_number(ents[i]);
+
+ if (ent_cln == cln) {
+ entry = ents[i];
+ break;
+ } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
+ break;
+ }
spin_unlock_irqrestore(&radix_lock, flags);
if (!entry)
return;
+ cln = to_cacheline_number(entry);
err_printk(entry->dev, entry,
- "DMA-API: cpu touching an active dma mapped page "
- "[pfn=0x%lx]\n", entry->pfn);
+ "DMA-API: cpu touching an active dma mapped cacheline [cln=%pa]\n",
+ &cln);
}
/*
@@ -568,9 +607,9 @@ static void add_dma_entry(struct dma_debug_entry *entry)
hash_bucket_add(bucket, entry);
put_hash_bucket(bucket, &flags);
- rc = active_pfn_insert(entry);
+ rc = active_cacheline_insert(entry);
if (rc == -ENOMEM) {
- pr_err("DMA-API: pfn tracking ENOMEM, dma-debug disabled\n");
+ pr_err("DMA-API: cacheline tracking ENOMEM, dma-debug disabled\n");
global_disable = true;
}
@@ -631,7 +670,7 @@ static void dma_entry_free(struct dma_debug_entry *entry)
{
unsigned long flags;
- active_pfn_remove(entry);
+ active_cacheline_remove(entry);
/*
* add to beginning of the list - this way the entries are
diff --git a/lib/fonts/Kconfig b/lib/fonts/Kconfig
index 4dc1b990aa23..34fd931b54b5 100644
--- a/lib/fonts/Kconfig
+++ b/lib/fonts/Kconfig
@@ -9,7 +9,7 @@ if FONT_SUPPORT
config FONTS
bool "Select compiled-in fonts"
- depends on FRAMEBUFFER_CONSOLE
+ depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
help
Say Y here if you would like to use fonts other than the default
your frame buffer console usually use.
@@ -22,7 +22,7 @@ config FONTS
config FONT_8x8
bool "VGA 8x8 font" if FONTS
- depends on FRAMEBUFFER_CONSOLE
+ depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
default y if !SPARC && !FONTS
help
This is the "high resolution" font for the VGA frame buffer (the one
@@ -45,7 +45,7 @@ config FONT_8x16
config FONT_6x11
bool "Mac console 6x11 font (not supported by all drivers)" if FONTS
- depends on FRAMEBUFFER_CONSOLE
+ depends on FRAMEBUFFER_CONSOLE || STI_CONSOLE
default y if !SPARC && !FONTS && MAC
help
Small console font with Macintosh-style high-half glyphs. Some Mac
diff --git a/lib/percpu_ida.c b/lib/percpu_ida.c
index 7be235f1a70b..93d145e5539c 100644
--- a/lib/percpu_ida.c
+++ b/lib/percpu_ida.c
@@ -54,9 +54,7 @@ static inline void move_tags(unsigned *dst, unsigned *dst_nr,
/*
* Try to steal tags from a remote cpu's percpu freelist.
*
- * We first check how many percpu freelists have tags - we don't steal tags
- * unless enough percpu freelists have tags on them that it's possible more than
- * half the total tags could be stuck on remote percpu freelists.
+ * We first check how many percpu freelists have tags
*
* Then we iterate through the cpus until we find some tags - we don't attempt
* to find the "best" cpu to steal from, to keep cacheline bouncing to a
@@ -69,8 +67,7 @@ static inline void steal_tags(struct percpu_ida *pool,
struct percpu_ida_cpu *remote;
for (cpus_have_tags = cpumask_weight(&pool->cpus_have_tags);
- cpus_have_tags * pool->percpu_max_size > pool->nr_tags / 2;
- cpus_have_tags--) {
+ cpus_have_tags; cpus_have_tags--) {
cpu = cpumask_next(cpu, &pool->cpus_have_tags);
if (cpu >= nr_cpu_ids) {
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 7811ed3b4e70..bd4a8dfdf0b8 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -1253,8 +1253,10 @@ unsigned long radix_tree_locate_item(struct radix_tree_root *root, void *item)
node = indirect_to_ptr(node);
max_index = radix_tree_maxindex(node->height);
- if (cur_index > max_index)
+ if (cur_index > max_index) {
+ rcu_read_unlock();
break;
+ }
cur_index = __locate(node, item, cur_index, &found_index);
rcu_read_unlock();
diff --git a/mm/Kconfig b/mm/Kconfig
index 2d9f1504d75e..2888024e0b0a 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -575,5 +575,5 @@ config PGTABLE_MAPPING
then you should select this. This causes zsmalloc to use page table
mapping rather than copying for object mapping.
- You can check speed with zsmalloc benchmark[1].
- [1] https://github.com/spartacus06/zsmalloc
+ You can check speed with zsmalloc benchmark:
+ https://github.com/spartacus06/zsmapbench
diff --git a/mm/compaction.c b/mm/compaction.c
index b48c5259ea33..918577595ea8 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -251,7 +251,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
{
int nr_scanned = 0, total_isolated = 0;
struct page *cursor, *valid_page = NULL;
- unsigned long nr_strict_required = end_pfn - blockpfn;
unsigned long flags;
bool locked = false;
@@ -264,11 +263,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
nr_scanned++;
if (!pfn_valid_within(blockpfn))
- continue;
+ goto isolate_fail;
+
if (!valid_page)
valid_page = page;
if (!PageBuddy(page))
- continue;
+ goto isolate_fail;
/*
* The zone lock must be held to isolate freepages.
@@ -289,12 +289,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
/* Recheck this is a buddy page under lock */
if (!PageBuddy(page))
- continue;
+ goto isolate_fail;
/* Found a free page, break it into order-0 pages */
isolated = split_free_page(page);
- if (!isolated && strict)
- break;
total_isolated += isolated;
for (i = 0; i < isolated; i++) {
list_add(&page->lru, freelist);
@@ -305,7 +303,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
if (isolated) {
blockpfn += isolated - 1;
cursor += isolated - 1;
+ continue;
}
+
+isolate_fail:
+ if (strict)
+ break;
+ else
+ continue;
+
}
trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
@@ -315,7 +321,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
* pages requested were isolated. If there were any failures, 0 is
* returned and CMA will fail.
*/
- if (strict && nr_strict_required > total_isolated)
+ if (strict && blockpfn < end_pfn)
total_isolated = 0;
if (locked)
diff --git a/mm/filemap.c b/mm/filemap.c
index d56d3c145b9f..7a13f6ac5421 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -2553,8 +2553,8 @@ ssize_t generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
if (ret > 0) {
ssize_t err;
- err = generic_write_sync(file, pos, ret);
- if (err < 0 && ret > 0)
+ err = generic_write_sync(file, iocb->ki_pos - ret, ret);
+ if (err < 0)
ret = err;
}
return ret;
diff --git a/mm/fremap.c b/mm/fremap.c
index bbc4d660221a..34feba60a17e 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -23,28 +23,44 @@
#include "internal.h"
+static int mm_counter(struct page *page)
+{
+ return PageAnon(page) ? MM_ANONPAGES : MM_FILEPAGES;
+}
+
static void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep)
{
pte_t pte = *ptep;
+ struct page *page;
+ swp_entry_t entry;
if (pte_present(pte)) {
- struct page *page;
-
flush_cache_page(vma, addr, pte_pfn(pte));
pte = ptep_clear_flush(vma, addr, ptep);
page = vm_normal_page(vma, addr, pte);
if (page) {
if (pte_dirty(pte))
set_page_dirty(page);
+ update_hiwater_rss(mm);
+ dec_mm_counter(mm, mm_counter(page));
page_remove_rmap(page);
page_cache_release(page);
+ }
+ } else { /* zap_pte() is not called when pte_none() */
+ if (!pte_file(pte)) {
update_hiwater_rss(mm);
- dec_mm_counter(mm, MM_FILEPAGES);
+ entry = pte_to_swp_entry(pte);
+ if (non_swap_entry(entry)) {
+ if (is_migration_entry(entry)) {
+ page = migration_entry_to_page(entry);
+ dec_mm_counter(mm, mm_counter(page));
+ }
+ } else {
+ free_swap_and_cache(entry);
+ dec_mm_counter(mm, MM_SWAPENTS);
+ }
}
- } else {
- if (!pte_file(pte))
- free_swap_and_cache(pte_to_swp_entry(pte));
pte_clear_not_present_full(mm, addr, ptep, 0);
}
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 82166bf974e1..1546655a2d78 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1166,8 +1166,10 @@ alloc:
} else {
ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
pmd, orig_pmd, page, haddr);
- if (ret & VM_FAULT_OOM)
+ if (ret & VM_FAULT_OOM) {
split_huge_page(page);
+ ret |= VM_FAULT_FALLBACK;
+ }
put_page(page);
}
count_vm_event(THP_FAULT_FALLBACK);
@@ -1179,9 +1181,10 @@ alloc:
if (page) {
split_huge_page(page);
put_page(page);
- }
+ } else
+ split_huge_page_pmd(vma, address, pmd);
+ ret |= VM_FAULT_FALLBACK;
count_vm_event(THP_FAULT_FALLBACK);
- ret |= VM_FAULT_OOM;
goto out;
}
@@ -1545,6 +1548,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
entry = pmd_mknonnuma(entry);
entry = pmd_modify(entry, newprot);
ret = HPAGE_PMD_NR;
+ set_pmd_at(mm, addr, pmd, entry);
BUG_ON(pmd_write(entry));
} else {
struct page *page = pmd_page(*pmd);
@@ -1557,16 +1561,10 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
*/
if (!is_huge_zero_page(page) &&
!pmd_numa(*pmd)) {
- entry = *pmd;
- entry = pmd_mknuma(entry);
+ pmdp_set_numa(mm, addr, pmd);
ret = HPAGE_PMD_NR;
}
}
-
- /* Set PMD if cleared earlier */
- if (ret == HPAGE_PMD_NR)
- set_pmd_at(mm, addr, pmd, entry);
-
spin_unlock(ptl);
}
@@ -1963,7 +1961,7 @@ out:
return ret;
}
-#define VM_NO_THP (VM_SPECIAL|VM_MIXEDMAP|VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
+#define VM_NO_THP (VM_SPECIAL | VM_HUGETLB | VM_SHARED | VM_MAYSHARE)
int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
diff --git a/mm/ksm.c b/mm/ksm.c
index aa4c7c7250c1..68710e80994a 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -444,7 +444,7 @@ static void break_cow(struct rmap_item *rmap_item)
static struct page *page_trans_compound_anon(struct page *page)
{
if (PageTransCompound(page)) {
- struct page *head = compound_trans_head(page);
+ struct page *head = compound_head(page);
/*
* head may actually be splitted and freed from under
* us but it's ok here.
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 53385cd4e6f0..5b6b0039f725 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1127,8 +1127,8 @@ skip_node:
* skipping css reference should be safe.
*/
if (next_css) {
- if ((next_css->flags & CSS_ONLINE) &&
- (next_css == &root->css || css_tryget(next_css)))
+ if ((next_css == &root->css) ||
+ ((next_css->flags & CSS_ONLINE) && css_tryget(next_css)))
return mem_cgroup_from_css(next_css);
prev_css = next_css;
@@ -1687,7 +1687,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
* protects memcg_name and makes sure that parallel ooms do not
* interleave
*/
- static DEFINE_SPINLOCK(oom_info_lock);
+ static DEFINE_MUTEX(oom_info_lock);
struct cgroup *task_cgrp;
struct cgroup *mem_cgrp;
static char memcg_name[PATH_MAX];
@@ -1698,7 +1698,7 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
if (!p)
return;
- spin_lock(&oom_info_lock);
+ mutex_lock(&oom_info_lock);
rcu_read_lock();
mem_cgrp = memcg->css.cgroup;
@@ -1767,7 +1767,7 @@ done:
pr_cont("\n");
}
- spin_unlock(&oom_info_lock);
+ mutex_unlock(&oom_info_lock);
}
/*
@@ -6595,6 +6595,7 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
struct mem_cgroup_event *event, *tmp;
+ struct cgroup_subsys_state *iter;
/*
* Unregister events and notify userspace.
@@ -6611,7 +6612,14 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
kmem_cgroup_css_offline(memcg);
mem_cgroup_invalidate_reclaim_iterators(memcg);
- mem_cgroup_reparent_charges(memcg);
+
+ /*
+ * This requires that offlining is serialized. Right now that is
+ * guaranteed because css_killed_work_fn() holds the cgroup_mutex.
+ */
+ css_for_each_descendant_post(iter, css)
+ mem_cgroup_reparent_charges(mem_cgroup_from_css(iter));
+
mem_cgroup_destroy_all_caches(memcg);
vmpressure_cleanup(&memcg->vmpressure);
}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 4f08a2d61487..90002ea43638 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -945,8 +945,10 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
* to it. Similarly, page lock is shifted.
*/
if (hpage != p) {
- put_page(hpage);
- get_page(p);
+ if (!(flags & MF_COUNT_INCREASED)) {
+ put_page(hpage);
+ get_page(p);
+ }
lock_page(p);
unlock_page(hpage);
*hpagep = p;
@@ -1649,7 +1651,7 @@ int soft_offline_page(struct page *page, int flags)
{
int ret;
unsigned long pfn = page_to_pfn(page);
- struct page *hpage = compound_trans_head(page);
+ struct page *hpage = compound_head(page);
if (PageHWPoison(page)) {
pr_info("soft offline: %#lx page already poisoned\n", pfn);
diff --git a/mm/memory.c b/mm/memory.c
index be6a0c0d4ae0..22dfa617bddb 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3348,6 +3348,7 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (ret & VM_FAULT_LOCKED)
unlock_page(vmf.page);
ret = VM_FAULT_HWPOISON;
+ page_cache_release(vmf.page);
goto uncharge_out;
}
@@ -3703,7 +3704,6 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(is_vm_hugetlb_page(vma)))
return hugetlb_fault(mm, vma, address, flags);
-retry:
pgd = pgd_offset(mm, address);
pud = pud_alloc(mm, pgd, address);
if (!pud)
@@ -3741,20 +3741,13 @@ retry:
if (dirty && !pmd_write(orig_pmd)) {
ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
orig_pmd);
- /*
- * If COW results in an oom, the huge pmd will
- * have been split, so retry the fault on the
- * pte for a smaller charge.
- */
- if (unlikely(ret & VM_FAULT_OOM))
- goto retry;
- return ret;
+ if (!(ret & VM_FAULT_FALLBACK))
+ return ret;
} else {
huge_pmd_set_accessed(mm, vma, address, pmd,
orig_pmd, dirty);
+ return 0;
}
-
- return 0;
}
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 482a33d89134..bed48809e5d0 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -178,6 +178,37 @@ out:
}
/*
+ * Congratulations to trinity for discovering this bug.
+ * mm/fremap.c's remap_file_pages() accepts any range within a single vma to
+ * convert that vma to VM_NONLINEAR; and generic_file_remap_pages() will then
+ * replace the specified range by file ptes throughout (maybe populated after).
+ * If page migration finds a page within that range, while it's still located
+ * by vma_interval_tree rather than lost to i_mmap_nonlinear list, no problem:
+ * zap_pte() clears the temporary migration entry before mmap_sem is dropped.
+ * But if the migrating page is in a part of the vma outside the range to be
+ * remapped, then it will not be cleared, and remove_migration_ptes() needs to
+ * deal with it. Fortunately, this part of the vma is of course still linear,
+ * so we just need to use linear location on the nonlinear list.
+ */
+static int remove_linear_migration_ptes_from_nonlinear(struct page *page,
+ struct address_space *mapping, void *arg)
+{
+ struct vm_area_struct *vma;
+ /* hugetlbfs does not support remap_pages, so no huge pgoff worries */
+ pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+ unsigned long addr;
+
+ list_for_each_entry(vma,
+ &mapping->i_mmap_nonlinear, shared.nonlinear) {
+
+ addr = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
+ if (addr >= vma->vm_start && addr < vma->vm_end)
+ remove_migration_pte(page, vma, addr, arg);
+ }
+ return SWAP_AGAIN;
+}
+
+/*
* Get rid of all migration entries and replace them by
* references to the indicated page.
*/
@@ -186,6 +217,7 @@ static void remove_migration_ptes(struct page *old, struct page *new)
struct rmap_walk_control rwc = {
.rmap_one = remove_migration_pte,
.arg = old,
+ .file_nonlinear = remove_linear_migration_ptes_from_nonlinear,
};
rmap_walk(new, &rwc);
@@ -1158,7 +1190,7 @@ static struct page *new_page_node(struct page *p, unsigned long private,
pm->node);
else
return alloc_pages_exact_node(pm->node,
- GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0);
+ GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0);
}
/*
@@ -1544,9 +1576,9 @@ static struct page *alloc_misplaced_dst_page(struct page *page,
struct page *newpage;
newpage = alloc_pages_exact_node(nid,
- (GFP_HIGHUSER_MOVABLE | GFP_THISNODE |
- __GFP_NOMEMALLOC | __GFP_NORETRY |
- __GFP_NOWARN) &
+ (GFP_HIGHUSER_MOVABLE |
+ __GFP_THISNODE | __GFP_NOMEMALLOC |
+ __GFP_NORETRY | __GFP_NOWARN) &
~GFP_IOFS, 0);
return newpage;
@@ -1747,7 +1779,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
goto out_dropref;
new_page = alloc_pages_node(node,
- (GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER);
+ (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT,
+ HPAGE_PMD_ORDER);
if (!new_page)
goto out_fail;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 7332c1785744..769a67a15803 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -58,36 +58,27 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (pte_numa(ptent))
ptent = pte_mknonnuma(ptent);
ptent = pte_modify(ptent, newprot);
+ /*
+ * Avoid taking write faults for pages we
+ * know to be dirty.
+ */
+ if (dirty_accountable && pte_dirty(ptent))
+ ptent = pte_mkwrite(ptent);
+ ptep_modify_prot_commit(mm, addr, pte, ptent);
updated = true;
} else {
struct page *page;
- ptent = *pte;
page = vm_normal_page(vma, addr, oldpte);
if (page && !PageKsm(page)) {
if (!pte_numa(oldpte)) {
- ptent = pte_mknuma(ptent);
- set_pte_at(mm, addr, pte, ptent);
+ ptep_set_numa(mm, addr, pte);
updated = true;
}
}
}
-
- /*
- * Avoid taking write faults for pages we know to be
- * dirty.
- */
- if (dirty_accountable && pte_dirty(ptent)) {
- ptent = pte_mkwrite(ptent);
- updated = true;
- }
-
if (updated)
pages++;
-
- /* Only !prot_numa always clears the pte */
- if (!prot_numa)
- ptep_modify_prot_commit(mm, addr, pte, ptent);
} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
swp_entry_t entry = pte_to_swp_entry(oldpte);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 2d30e2cfe804..7106cb1aca8e 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2173,11 +2173,12 @@ int __set_page_dirty_nobuffers(struct page *page)
if (!TestSetPageDirty(page)) {
struct address_space *mapping = page_mapping(page);
struct address_space *mapping2;
+ unsigned long flags;
if (!mapping)
return 1;
- spin_lock_irq(&mapping->tree_lock);
+ spin_lock_irqsave(&mapping->tree_lock, flags);
mapping2 = page_mapping(page);
if (mapping2) { /* Race with truncate? */
BUG_ON(mapping2 != mapping);
@@ -2186,7 +2187,7 @@ int __set_page_dirty_nobuffers(struct page *page)
radix_tree_tag_set(&mapping->page_tree,
page_index(page), PAGECACHE_TAG_DIRTY);
}
- spin_unlock_irq(&mapping->tree_lock);
+ spin_unlock_irqrestore(&mapping->tree_lock, flags);
if (mapping->host) {
/* !PageAnon && !swapper_space */
__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e3758a09a009..3bac76ae4b30 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -369,9 +369,11 @@ void prep_compound_page(struct page *page, unsigned long order)
__SetPageHead(page);
for (i = 1; i < nr_pages; i++) {
struct page *p = page + i;
- __SetPageTail(p);
set_page_count(p, 0);
p->first_page = page;
+ /* Make sure p->first_page is always valid for PageTail() */
+ smp_wmb();
+ __SetPageTail(p);
}
}
@@ -1236,6 +1238,15 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
}
local_irq_restore(flags);
}
+static bool gfp_thisnode_allocation(gfp_t gfp_mask)
+{
+ return (gfp_mask & GFP_THISNODE) == GFP_THISNODE;
+}
+#else
+static bool gfp_thisnode_allocation(gfp_t gfp_mask)
+{
+ return false;
+}
#endif
/*
@@ -1572,7 +1583,13 @@ again:
get_pageblock_migratetype(page));
}
- __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
+ /*
+ * NOTE: GFP_THISNODE allocations do not partake in the kswapd
+ * aging protocol, so they can't be fair.
+ */
+ if (!gfp_thisnode_allocation(gfp_flags))
+ __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order));
+
__count_zone_vm_events(PGALLOC, zone, 1 << order);
zone_statistics(preferred_zone, zone, gfp_flags);
local_irq_restore(flags);
@@ -1944,8 +1961,12 @@ zonelist_scan:
* ultimately fall back to remote zones that do not
* partake in the fairness round-robin cycle of this
* zonelist.
+ *
+ * NOTE: GFP_THISNODE allocations do not partake in
+ * the kswapd aging protocol, so they can't be fair.
*/
- if (alloc_flags & ALLOC_WMARK_LOW) {
+ if ((alloc_flags & ALLOC_WMARK_LOW) &&
+ !gfp_thisnode_allocation(gfp_mask)) {
if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
continue;
if (!zone_local(preferred_zone, zone))
@@ -2501,8 +2522,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
* allowed per node queues are empty and that nodes are
* over allocated.
*/
- if (IS_ENABLED(CONFIG_NUMA) &&
- (gfp_mask & GFP_THISNODE) == GFP_THISNODE)
+ if (gfp_thisnode_allocation(gfp_mask))
goto nopage;
restart:
diff --git a/mm/rmap.c b/mm/rmap.c
index d9d42316a99a..8fc049f9a5a6 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1360,8 +1360,9 @@ static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
}
static int try_to_unmap_nonlinear(struct page *page,
- struct address_space *mapping, struct vm_area_struct *vma)
+ struct address_space *mapping, void *arg)
{
+ struct vm_area_struct *vma;
int ret = SWAP_AGAIN;
unsigned long cursor;
unsigned long max_nl_cursor = 0;
@@ -1663,7 +1664,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
if (list_empty(&mapping->i_mmap_nonlinear))
goto done;
- ret = rwc->file_nonlinear(page, mapping, vma);
+ ret = rwc->file_nonlinear(page, mapping, rwc->arg);
done:
mutex_unlock(&mapping->i_mmap_mutex);
diff --git a/mm/slub.c b/mm/slub.c
index 7e3e0458bce4..25f14ad8f817 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1004,21 +1004,19 @@ static inline void slab_free_hook(struct kmem_cache *s, void *x)
static void add_full(struct kmem_cache *s,
struct kmem_cache_node *n, struct page *page)
{
- lockdep_assert_held(&n->list_lock);
-
if (!(s->flags & SLAB_STORE_USER))
return;
+ lockdep_assert_held(&n->list_lock);
list_add(&page->lru, &n->full);
}
static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
{
- lockdep_assert_held(&n->list_lock);
-
if (!(s->flags & SLAB_STORE_USER))
return;
+ lockdep_assert_held(&n->list_lock);
list_del(&page->lru);
}
@@ -1520,11 +1518,9 @@ static void discard_slab(struct kmem_cache *s, struct page *page)
/*
* Management of partially allocated slabs.
*/
-static inline void add_partial(struct kmem_cache_node *n,
- struct page *page, int tail)
+static inline void
+__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
{
- lockdep_assert_held(&n->list_lock);
-
n->nr_partial++;
if (tail == DEACTIVATE_TO_TAIL)
list_add_tail(&page->lru, &n->partial);
@@ -1532,15 +1528,27 @@ static inline void add_partial(struct kmem_cache_node *n,
list_add(&page->lru, &n->partial);
}
-static inline void remove_partial(struct kmem_cache_node *n,
- struct page *page)
+static inline void add_partial(struct kmem_cache_node *n,
+ struct page *page, int tail)
{
lockdep_assert_held(&n->list_lock);
+ __add_partial(n, page, tail);
+}
+static inline void
+__remove_partial(struct kmem_cache_node *n, struct page *page)
+{
list_del(&page->lru);
n->nr_partial--;
}
+static inline void remove_partial(struct kmem_cache_node *n,
+ struct page *page)
+{
+ lockdep_assert_held(&n->list_lock);
+ __remove_partial(n, page);
+}
+
/*
* Remove slab from the partial list, freeze it and
* return the pointer to the freelist.
@@ -2906,12 +2914,10 @@ static void early_kmem_cache_node_alloc(int node)
inc_slabs_node(kmem_cache_node, node, page->objects);
/*
- * the lock is for lockdep's sake, not for any actual
- * race protection
+ * No locks need to be taken here as it has just been
+ * initialized and there is no concurrent access.
*/
- spin_lock(&n->list_lock);
- add_partial(n, page, DEACTIVATE_TO_HEAD);
- spin_unlock(&n->list_lock);
+ __add_partial(n, page, DEACTIVATE_TO_HEAD);
}
static void free_kmem_cache_nodes(struct kmem_cache *s)
@@ -3197,7 +3203,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
- remove_partial(n, page);
+ __remove_partial(n, page);
discard_slab(s, page);
} else {
list_slab_objects(s, page,
diff --git a/mm/swap.c b/mm/swap.c
index b31ba67d440a..0092097b3f4c 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -98,7 +98,7 @@ static void put_compound_page(struct page *page)
}
/* __split_huge_page_refcount can run under us */
- page_head = compound_trans_head(page);
+ page_head = compound_head(page);
/*
* THP can not break up slab pages so avoid taking
@@ -253,7 +253,7 @@ bool __get_page_tail(struct page *page)
*/
unsigned long flags;
bool got;
- struct page *page_head = compound_trans_head(page);
+ struct page *page_head = compound_head(page);
/* Ref to put_compound_page() comment. */
if (!__compound_tail_refcounted(page_head)) {
diff --git a/mm/swap_state.c b/mm/swap_state.c
index 98e85e9c2b2d..e76ace30d436 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -63,6 +63,8 @@ unsigned long total_swapcache_pages(void)
return ret;
}
+static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
+
void show_swap_cache_info(void)
{
printk("%lu pages in swap cache\n", total_swapcache_pages());
@@ -286,8 +288,11 @@ struct page * lookup_swap_cache(swp_entry_t entry)
page = find_get_page(swap_address_space(entry), entry.val);
- if (page)
+ if (page) {
INC_CACHE_INFO(find_success);
+ if (TestClearPageReadahead(page))
+ atomic_inc(&swapin_readahead_hits);
+ }
INC_CACHE_INFO(find_total);
return page;
@@ -389,6 +394,50 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
return found_page;
}
+static unsigned long swapin_nr_pages(unsigned long offset)
+{
+ static unsigned long prev_offset;
+ unsigned int pages, max_pages, last_ra;
+ static atomic_t last_readahead_pages;
+
+ max_pages = 1 << ACCESS_ONCE(page_cluster);
+ if (max_pages <= 1)
+ return 1;
+
+ /*
+ * This heuristic has been found to work well on both sequential and
+ * random loads, swapping to hard disk or to SSD: please don't ask
+ * what the "+ 2" means, it just happens to work well, that's all.
+ */
+ pages = atomic_xchg(&swapin_readahead_hits, 0) + 2;
+ if (pages == 2) {
+ /*
+ * We can have no readahead hits to judge by: but must not get
+ * stuck here forever, so check for an adjacent offset instead
+ * (and don't even bother to check whether swap type is same).
+ */
+ if (offset != prev_offset + 1 && offset != prev_offset - 1)
+ pages = 1;
+ prev_offset = offset;
+ } else {
+ unsigned int roundup = 4;
+ while (roundup < pages)
+ roundup <<= 1;
+ pages = roundup;
+ }
+
+ if (pages > max_pages)
+ pages = max_pages;
+
+ /* Don't shrink readahead too fast */
+ last_ra = atomic_read(&last_readahead_pages) / 2;
+ if (pages < last_ra)
+ pages = last_ra;
+ atomic_set(&last_readahead_pages, pages);
+
+ return pages;
+}
+
/**
* swapin_readahead - swap in pages in hope we need them soon
* @entry: swap entry of this memory
@@ -412,11 +461,16 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct vm_area_struct *vma, unsigned long addr)
{
struct page *page;
- unsigned long offset = swp_offset(entry);
+ unsigned long entry_offset = swp_offset(entry);
+ unsigned long offset = entry_offset;
unsigned long start_offset, end_offset;
- unsigned long mask = (1UL << page_cluster) - 1;
+ unsigned long mask;
struct blk_plug plug;
+ mask = swapin_nr_pages(offset) - 1;
+ if (!mask)
+ goto skip;
+
/* Read a page_cluster sized and aligned cluster around offset. */
start_offset = offset & ~mask;
end_offset = offset | mask;
@@ -430,10 +484,13 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
gfp_mask, vma, addr);
if (!page)
continue;
+ if (offset != entry_offset)
+ SetPageReadahead(page);
page_cache_release(page);
}
blk_finish_plug(&plug);
lru_add_drain(); /* Push any new pages onto the LRU now */
+skip:
return read_swap_cache_async(entry, gfp_mask, vma, addr);
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index c6c13b050a58..4a7f7e6992b6 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1923,7 +1923,6 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
p->swap_map = NULL;
cluster_info = p->cluster_info;
p->cluster_info = NULL;
- p->flags = 0;
frontswap_map = frontswap_map_get(p);
spin_unlock(&p->lock);
spin_unlock(&swap_lock);
@@ -1949,6 +1948,16 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
mutex_unlock(&inode->i_mutex);
}
filp_close(swap_file, NULL);
+
+ /*
+ * Clear the SWP_USED flag after all resources are freed so that swapon
+ * can reuse this swap_info in alloc_swap_info() safely. It is ok to
+ * not hold p->lock after we cleared its SWP_WRITEOK.
+ */
+ spin_lock(&swap_lock);
+ p->flags = 0;
+ spin_unlock(&swap_lock);
+
err = 0;
atomic_inc(&proc_poll_event);
wake_up_interruptible(&proc_poll_wait);
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index 196970a4541f..d4042e75f7c7 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/vmstat.h>
#include <linux/eventfd.h>
+#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/printk.h>
#include <linux/vmpressure.h>
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 72496140ac08..def5dd2fbe61 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -851,12 +851,14 @@ const char * const vmstat_text[] = {
"thp_zero_page_alloc",
"thp_zero_page_alloc_failed",
#endif
+#ifdef CONFIG_DEBUG_TLBFLUSH
#ifdef CONFIG_SMP
"nr_tlb_remote_flush",
"nr_tlb_remote_flush_received",
-#endif
+#endif /* CONFIG_SMP */
"nr_tlb_local_flush_all",
"nr_tlb_local_flush_one",
+#endif /* CONFIG_DEBUG_TLBFLUSH */
#endif /* CONFIG_VM_EVENTS_COUNTERS */
};
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index de51c48c4393..4b65aa492fb6 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -538,6 +538,9 @@ static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev
struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
struct net_device *real_dev = vlan->real_dev;
+ if (saddr == NULL)
+ saddr = dev->dev_addr;
+
return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
}
diff --git a/net/9p/client.c b/net/9p/client.c
index a5e4d2dcb03e..9186550d77a6 100644
--- a/net/9p/client.c
+++ b/net/9p/client.c
@@ -204,7 +204,7 @@ free_and_return:
return ret;
}
-struct p9_fcall *p9_fcall_alloc(int alloc_msize)
+static struct p9_fcall *p9_fcall_alloc(int alloc_msize)
{
struct p9_fcall *fc;
fc = kmalloc(sizeof(struct p9_fcall) + alloc_msize, GFP_NOFS);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index cd1e1ede73a4..ac2666c1d011 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -340,7 +340,10 @@ static int p9_get_mapped_pages(struct virtio_chan *chan,
int count = nr_pages;
while (nr_pages) {
s = rest_of_page(data);
- pages[index++] = kmap_to_page(data);
+ if (is_vmalloc_addr(data))
+ pages[index++] = vmalloc_to_page(data);
+ else
+ pages[index++] = kmap_to_page(data);
data += s;
nr_pages--;
}
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index 512159bf607f..8323bced8e5b 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -241,19 +241,19 @@ batadv_iv_ogm_orig_get(struct batadv_priv *bat_priv, const uint8_t *addr)
size = bat_priv->num_ifaces * sizeof(uint8_t);
orig_node->bat_iv.bcast_own_sum = kzalloc(size, GFP_ATOMIC);
if (!orig_node->bat_iv.bcast_own_sum)
- goto free_bcast_own;
+ goto free_orig_node;
hash_added = batadv_hash_add(bat_priv->orig_hash, batadv_compare_orig,
batadv_choose_orig, orig_node,
&orig_node->hash_entry);
if (hash_added != 0)
- goto free_bcast_own;
+ goto free_orig_node;
return orig_node;
-free_bcast_own:
- kfree(orig_node->bat_iv.bcast_own);
free_orig_node:
+ /* free twice, as batadv_orig_node_new sets refcount to 2 */
+ batadv_orig_node_free_ref(orig_node);
batadv_orig_node_free_ref(orig_node);
return NULL;
@@ -266,7 +266,7 @@ batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
struct batadv_orig_node *orig_neigh)
{
struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
- struct batadv_neigh_node *neigh_node;
+ struct batadv_neigh_node *neigh_node, *tmp_neigh_node;
neigh_node = batadv_neigh_node_new(hard_iface, neigh_addr, orig_node);
if (!neigh_node)
@@ -281,14 +281,24 @@ batadv_iv_ogm_neigh_new(struct batadv_hard_iface *hard_iface,
neigh_node->orig_node = orig_neigh;
neigh_node->if_incoming = hard_iface;
- batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
- "Creating new neighbor %pM for orig_node %pM on interface %s\n",
- neigh_addr, orig_node->orig, hard_iface->net_dev->name);
-
spin_lock_bh(&orig_node->neigh_list_lock);
- hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
+ tmp_neigh_node = batadv_neigh_node_get(orig_node, hard_iface,
+ neigh_addr);
+ if (!tmp_neigh_node) {
+ hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list);
+ } else {
+ kfree(neigh_node);
+ batadv_hardif_free_ref(hard_iface);
+ neigh_node = tmp_neigh_node;
+ }
spin_unlock_bh(&orig_node->neigh_list_lock);
+ if (!tmp_neigh_node)
+ batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
+ "Creating new neighbor %pM for orig_node %pM on interface %s\n",
+ neigh_addr, orig_node->orig,
+ hard_iface->net_dev->name);
+
out:
return neigh_node;
}
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 3d417d3641c6..b851cc580853 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -241,7 +241,7 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
{
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
const struct batadv_hard_iface *hard_iface;
- int min_mtu = ETH_DATA_LEN;
+ int min_mtu = INT_MAX;
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
@@ -256,8 +256,6 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
}
rcu_read_unlock();
- atomic_set(&bat_priv->packet_size_max, min_mtu);
-
if (atomic_read(&bat_priv->fragmentation) == 0)
goto out;
@@ -268,13 +266,21 @@ int batadv_hardif_min_mtu(struct net_device *soft_iface)
min_mtu = min_t(int, min_mtu, BATADV_FRAG_MAX_FRAG_SIZE);
min_mtu -= sizeof(struct batadv_frag_packet);
min_mtu *= BATADV_FRAG_MAX_FRAGMENTS;
- atomic_set(&bat_priv->packet_size_max, min_mtu);
-
- /* with fragmentation enabled we can fragment external packets easily */
- min_mtu = min_t(int, min_mtu, ETH_DATA_LEN);
out:
- return min_mtu - batadv_max_header_len();
+ /* report to the other components the maximum amount of bytes that
+ * batman-adv can send over the wire (without considering the payload
+ * overhead). For example, this value is used by TT to compute the
+ * maximum local table table size
+ */
+ atomic_set(&bat_priv->packet_size_max, min_mtu);
+
+ /* the real soft-interface MTU is computed by removing the payload
+ * overhead from the maximum amount of bytes that was just computed.
+ *
+ * However batman-adv does not support MTUs bigger than ETH_DATA_LEN
+ */
+ return min_t(int, min_mtu - batadv_max_header_len(), ETH_DATA_LEN);
}
/* adjusts the MTU if a new interface with a smaller MTU appeared. */
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 6df12a2e3605..853941629dc1 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -458,6 +458,42 @@ out:
}
/**
+ * batadv_neigh_node_get - retrieve a neighbour from the list
+ * @orig_node: originator which the neighbour belongs to
+ * @hard_iface: the interface where this neighbour is connected to
+ * @addr: the address of the neighbour
+ *
+ * Looks for and possibly returns a neighbour belonging to this originator list
+ * which is connected through the provided hard interface.
+ * Returns NULL if the neighbour is not found.
+ */
+struct batadv_neigh_node *
+batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
+ const struct batadv_hard_iface *hard_iface,
+ const uint8_t *addr)
+{
+ struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
+
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
+ if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
+ continue;
+
+ if (tmp_neigh_node->if_incoming != hard_iface)
+ continue;
+
+ if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
+ continue;
+
+ res = tmp_neigh_node;
+ break;
+ }
+ rcu_read_unlock();
+
+ return res;
+}
+
+/**
* batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
* @rcu: rcu pointer of the orig_ifinfo object
*/
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index 37be290f63f6..db3a9ed734cb 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -29,6 +29,10 @@ void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node);
struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
const uint8_t *addr);
struct batadv_neigh_node *
+batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
+ const struct batadv_hard_iface *hard_iface,
+ const uint8_t *addr);
+struct batadv_neigh_node *
batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
const uint8_t *neigh_addr,
struct batadv_orig_node *orig_node);
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 1ed9f7c9ecea..a953d5b196a3 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -688,7 +688,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,
int is_old_ttvn;
/* check if there is enough data before accessing it */
- if (pskb_may_pull(skb, hdr_len + ETH_HLEN) < 0)
+ if (!pskb_may_pull(skb, hdr_len + ETH_HLEN))
return 0;
/* create a copy of the skb (in case of for re-routing) to modify it. */
@@ -918,6 +918,8 @@ int batadv_recv_unicast_tvlv(struct sk_buff *skb,
if (ret != NET_RX_SUCCESS)
ret = batadv_route_unicast_packet(skb, recv_if);
+ else
+ consume_skb(skb);
return ret;
}
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 579f5f00a385..843febd1e519 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -254,9 +254,9 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
struct batadv_orig_node *orig_node,
unsigned short vid)
{
- struct ethhdr *ethhdr = (struct ethhdr *)skb->data;
+ struct ethhdr *ethhdr;
struct batadv_unicast_packet *unicast_packet;
- int ret = NET_XMIT_DROP;
+ int ret = NET_XMIT_DROP, hdr_size;
if (!orig_node)
goto out;
@@ -265,12 +265,16 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
case BATADV_UNICAST:
if (!batadv_send_skb_prepare_unicast(skb, orig_node))
goto out;
+
+ hdr_size = sizeof(*unicast_packet);
break;
case BATADV_UNICAST_4ADDR:
if (!batadv_send_skb_prepare_unicast_4addr(bat_priv, skb,
orig_node,
packet_subtype))
goto out;
+
+ hdr_size = sizeof(struct batadv_unicast_4addr_packet);
break;
default:
/* this function supports UNICAST and UNICAST_4ADDR only. It
@@ -279,6 +283,7 @@ static int batadv_send_skb_unicast(struct batadv_priv *bat_priv,
goto out;
}
+ ethhdr = (struct ethhdr *)(skb->data + hdr_size);
unicast_packet = (struct batadv_unicast_packet *)skb->data;
/* inform the destination node that we are still missing a correct route
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index b6071f675a3e..959dde721c46 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1975,6 +1975,7 @@ static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
struct hlist_head *head;
uint32_t i, crc_tmp, crc = 0;
uint8_t flags;
+ __be16 tmp_vid;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -2011,8 +2012,11 @@ static uint32_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
orig_node))
continue;
- crc_tmp = crc32c(0, &tt_common->vid,
- sizeof(tt_common->vid));
+ /* use network order to read the VID: this ensures that
+ * every node reads the bytes in the same order.
+ */
+ tmp_vid = htons(tt_common->vid);
+ crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid));
/* compute the CRC on flags that have to be kept in sync
* among nodes
@@ -2046,6 +2050,7 @@ static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv,
struct hlist_head *head;
uint32_t i, crc_tmp, crc = 0;
uint8_t flags;
+ __be16 tmp_vid;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
@@ -2064,8 +2069,11 @@ static uint32_t batadv_tt_local_crc(struct batadv_priv *bat_priv,
if (tt_common->flags & BATADV_TT_CLIENT_NEW)
continue;
- crc_tmp = crc32c(0, &tt_common->vid,
- sizeof(tt_common->vid));
+ /* use network order to read the VID: this ensures that
+ * every node reads the bytes in the same order.
+ */
+ tmp_vid = htons(tt_common->vid);
+ crc_tmp = crc32c(0, &tmp_vid, sizeof(tmp_vid));
/* compute the CRC on flags that have to be kept in sync
* among nodes
@@ -2262,6 +2270,7 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
{
struct batadv_tvlv_tt_vlan_data *tt_vlan_tmp;
struct batadv_orig_node_vlan *vlan;
+ uint32_t crc;
int i;
/* check if each received CRC matches the locally stored one */
@@ -2281,7 +2290,10 @@ static bool batadv_tt_global_check_crc(struct batadv_orig_node *orig_node,
if (!vlan)
return false;
- if (vlan->tt.crc != ntohl(tt_vlan_tmp->crc))
+ crc = vlan->tt.crc;
+ batadv_orig_node_vlan_free_ref(vlan);
+
+ if (crc != ntohl(tt_vlan_tmp->crc))
return false;
}
@@ -3218,7 +3230,6 @@ static void batadv_tt_update_orig(struct batadv_priv *bat_priv,
spin_lock_bh(&orig_node->tt_lock);
- tt_change = (struct batadv_tvlv_tt_change *)tt_buff;
batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
ttvn, tt_change);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index 292e619db896..d9fb93451442 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -430,6 +430,16 @@ static void hidp_del_timer(struct hidp_session *session)
del_timer(&session->timer);
}
+static void hidp_process_report(struct hidp_session *session,
+ int type, const u8 *data, int len, int intr)
+{
+ if (len > HID_MAX_BUFFER_SIZE)
+ len = HID_MAX_BUFFER_SIZE;
+
+ memcpy(session->input_buf, data, len);
+ hid_input_report(session->hid, type, session->input_buf, len, intr);
+}
+
static void hidp_process_handshake(struct hidp_session *session,
unsigned char param)
{
@@ -502,7 +512,8 @@ static int hidp_process_data(struct hidp_session *session, struct sk_buff *skb,
hidp_input_report(session, skb);
if (session->hid)
- hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 0);
+ hidp_process_report(session, HID_INPUT_REPORT,
+ skb->data, skb->len, 0);
break;
case HIDP_DATA_RTYPE_OTHER:
@@ -584,7 +595,8 @@ static void hidp_recv_intr_frame(struct hidp_session *session,
hidp_input_report(session, skb);
if (session->hid) {
- hid_input_report(session->hid, HID_INPUT_REPORT, skb->data, skb->len, 1);
+ hidp_process_report(session, HID_INPUT_REPORT,
+ skb->data, skb->len, 1);
BT_DBG("report len %d", skb->len);
}
} else {
diff --git a/net/bluetooth/hidp/hidp.h b/net/bluetooth/hidp/hidp.h
index ab5241400cf7..8798492a6e99 100644
--- a/net/bluetooth/hidp/hidp.h
+++ b/net/bluetooth/hidp/hidp.h
@@ -24,6 +24,7 @@
#define __HIDP_H
#include <linux/types.h>
+#include <linux/hid.h>
#include <linux/kref.h>
#include <net/bluetooth/bluetooth.h>
#include <net/bluetooth/l2cap.h>
@@ -179,6 +180,9 @@ struct hidp_session {
/* Used in hidp_output_raw_report() */
int output_report_success; /* boolean */
+
+ /* temporary input buffer */
+ u8 input_buf[HID_MAX_BUFFER_SIZE];
};
/* HIDP init defines */
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index e4401a531afb..63f0455c0bc3 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -187,8 +187,7 @@ static int br_set_mac_address(struct net_device *dev, void *p)
spin_lock_bh(&br->lock);
if (!ether_addr_equal(dev->dev_addr, addr->sa_data)) {
- memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
- br_fdb_change_mac_address(br, addr->sa_data);
+ /* Mac address will be changed in br_stp_change_bridge_id(). */
br_stp_change_bridge_id(br, addr->sa_data);
}
spin_unlock_bh(&br->lock);
@@ -226,6 +225,33 @@ static void br_netpoll_cleanup(struct net_device *dev)
br_netpoll_disable(p);
}
+static int __br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
+{
+ struct netpoll *np;
+ int err;
+
+ np = kzalloc(sizeof(*p->np), gfp);
+ if (!np)
+ return -ENOMEM;
+
+ err = __netpoll_setup(np, p->dev, gfp);
+ if (err) {
+ kfree(np);
+ return err;
+ }
+
+ p->np = np;
+ return err;
+}
+
+int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
+{
+ if (!p->br->dev->npinfo)
+ return 0;
+
+ return __br_netpoll_enable(p, gfp);
+}
+
static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
gfp_t gfp)
{
@@ -236,7 +262,7 @@ static int br_netpoll_setup(struct net_device *dev, struct netpoll_info *ni,
list_for_each_entry(p, &br->port_list, list) {
if (!p->dev)
continue;
- err = br_netpoll_enable(p, gfp);
+ err = __br_netpoll_enable(p, gfp);
if (err)
goto fail;
}
@@ -249,28 +275,6 @@ fail:
goto out;
}
-int br_netpoll_enable(struct net_bridge_port *p, gfp_t gfp)
-{
- struct netpoll *np;
- int err;
-
- if (!p->br->dev->npinfo)
- return 0;
-
- np = kzalloc(sizeof(*p->np), gfp);
- if (!np)
- return -ENOMEM;
-
- err = __netpoll_setup(np, p->dev, gfp);
- if (err) {
- kfree(np);
- return err;
- }
-
- p->np = np;
- return err;
-}
-
void br_netpoll_disable(struct net_bridge_port *p)
{
struct netpoll *np = p->np;
diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c
index c5f5a4a933f4..9203d5a1943f 100644
--- a/net/bridge/br_fdb.c
+++ b/net/bridge/br_fdb.c
@@ -27,6 +27,9 @@
#include "br_private.h"
static struct kmem_cache *br_fdb_cache __read_mostly;
+static struct net_bridge_fdb_entry *fdb_find(struct hlist_head *head,
+ const unsigned char *addr,
+ __u16 vid);
static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid);
static void fdb_notify(struct net_bridge *br,
@@ -89,11 +92,57 @@ static void fdb_delete(struct net_bridge *br, struct net_bridge_fdb_entry *f)
call_rcu(&f->rcu, fdb_rcu_free);
}
+/* Delete a local entry if no other port had the same address. */
+static void fdb_delete_local(struct net_bridge *br,
+ const struct net_bridge_port *p,
+ struct net_bridge_fdb_entry *f)
+{
+ const unsigned char *addr = f->addr.addr;
+ u16 vid = f->vlan_id;
+ struct net_bridge_port *op;
+
+ /* Maybe another port has same hw addr? */
+ list_for_each_entry(op, &br->port_list, list) {
+ if (op != p && ether_addr_equal(op->dev->dev_addr, addr) &&
+ (!vid || nbp_vlan_find(op, vid))) {
+ f->dst = op;
+ f->added_by_user = 0;
+ return;
+ }
+ }
+
+ /* Maybe bridge device has same hw addr? */
+ if (p && ether_addr_equal(br->dev->dev_addr, addr) &&
+ (!vid || br_vlan_find(br, vid))) {
+ f->dst = NULL;
+ f->added_by_user = 0;
+ return;
+ }
+
+ fdb_delete(br, f);
+}
+
+void br_fdb_find_delete_local(struct net_bridge *br,
+ const struct net_bridge_port *p,
+ const unsigned char *addr, u16 vid)
+{
+ struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
+ struct net_bridge_fdb_entry *f;
+
+ spin_lock_bh(&br->hash_lock);
+ f = fdb_find(head, addr, vid);
+ if (f && f->is_local && !f->added_by_user && f->dst == p)
+ fdb_delete_local(br, p, f);
+ spin_unlock_bh(&br->hash_lock);
+}
+
void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
{
struct net_bridge *br = p->br;
- bool no_vlan = (nbp_get_vlan_info(p) == NULL) ? true : false;
+ struct net_port_vlans *pv = nbp_get_vlan_info(p);
+ bool no_vlan = !pv;
int i;
+ u16 vid;
spin_lock_bh(&br->hash_lock);
@@ -104,38 +153,34 @@ void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr)
struct net_bridge_fdb_entry *f;
f = hlist_entry(h, struct net_bridge_fdb_entry, hlist);
- if (f->dst == p && f->is_local) {
- /* maybe another port has same hw addr? */
- struct net_bridge_port *op;
- u16 vid = f->vlan_id;
- list_for_each_entry(op, &br->port_list, list) {
- if (op != p &&
- ether_addr_equal(op->dev->dev_addr,
- f->addr.addr) &&
- nbp_vlan_find(op, vid)) {
- f->dst = op;
- goto insert;
- }
- }
-
+ if (f->dst == p && f->is_local && !f->added_by_user) {
/* delete old one */
- fdb_delete(br, f);
-insert:
- /* insert new address, may fail if invalid
- * address or dup.
- */
- fdb_insert(br, p, newaddr, vid);
+ fdb_delete_local(br, p, f);
/* if this port has no vlan information
* configured, we can safely be done at
* this point.
*/
if (no_vlan)
- goto done;
+ goto insert;
}
}
}
+insert:
+ /* insert new address, may fail if invalid address or dup. */
+ fdb_insert(br, p, newaddr, 0);
+
+ if (no_vlan)
+ goto done;
+
+ /* Now add entries for every VLAN configured on the port.
+ * This function runs under RTNL so the bitmap will not change
+ * from under us.
+ */
+ for_each_set_bit(vid, pv->vlan_bitmap, VLAN_N_VID)
+ fdb_insert(br, p, newaddr, vid);
+
done:
spin_unlock_bh(&br->hash_lock);
}
@@ -146,10 +191,12 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
struct net_port_vlans *pv;
u16 vid = 0;
+ spin_lock_bh(&br->hash_lock);
+
/* If old entry was unassociated with any port, then delete it. */
f = __br_fdb_get(br, br->dev->dev_addr, 0);
if (f && f->is_local && !f->dst)
- fdb_delete(br, f);
+ fdb_delete_local(br, NULL, f);
fdb_insert(br, NULL, newaddr, 0);
@@ -159,14 +206,16 @@ void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr)
*/
pv = br_get_vlan_info(br);
if (!pv)
- return;
+ goto out;
for_each_set_bit_from(vid, pv->vlan_bitmap, VLAN_N_VID) {
f = __br_fdb_get(br, br->dev->dev_addr, vid);
if (f && f->is_local && !f->dst)
- fdb_delete(br, f);
+ fdb_delete_local(br, NULL, f);
fdb_insert(br, NULL, newaddr, vid);
}
+out:
+ spin_unlock_bh(&br->hash_lock);
}
void br_fdb_cleanup(unsigned long _data)
@@ -235,25 +284,11 @@ void br_fdb_delete_by_port(struct net_bridge *br,
if (f->is_static && !do_all)
continue;
- /*
- * if multiple ports all have the same device address
- * then when one port is deleted, assign
- * the local entry to other port
- */
- if (f->is_local) {
- struct net_bridge_port *op;
- list_for_each_entry(op, &br->port_list, list) {
- if (op != p &&
- ether_addr_equal(op->dev->dev_addr,
- f->addr.addr)) {
- f->dst = op;
- goto skip_delete;
- }
- }
- }
- fdb_delete(br, f);
- skip_delete: ;
+ if (f->is_local)
+ fdb_delete_local(br, p, f);
+ else
+ fdb_delete(br, f);
}
}
spin_unlock_bh(&br->hash_lock);
@@ -397,6 +432,7 @@ static struct net_bridge_fdb_entry *fdb_create(struct hlist_head *head,
fdb->vlan_id = vid;
fdb->is_local = 0;
fdb->is_static = 0;
+ fdb->added_by_user = 0;
fdb->updated = fdb->used = jiffies;
hlist_add_head_rcu(&fdb->hlist, head);
}
@@ -447,7 +483,7 @@ int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
}
void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
- const unsigned char *addr, u16 vid)
+ const unsigned char *addr, u16 vid, bool added_by_user)
{
struct hlist_head *head = &br->hash[br_mac_hash(addr, vid)];
struct net_bridge_fdb_entry *fdb;
@@ -473,13 +509,18 @@ void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
/* fastpath: update of existing entry */
fdb->dst = source;
fdb->updated = jiffies;
+ if (unlikely(added_by_user))
+ fdb->added_by_user = 1;
}
} else {
spin_lock(&br->hash_lock);
if (likely(!fdb_find(head, addr, vid))) {
fdb = fdb_create(head, source, addr, vid);
- if (fdb)
+ if (fdb) {
+ if (unlikely(added_by_user))
+ fdb->added_by_user = 1;
fdb_notify(br, fdb, RTM_NEWNEIGH);
+ }
}
/* else we lose race and someone else inserts
* it first, don't bother updating
@@ -647,6 +688,7 @@ static int fdb_add_entry(struct net_bridge_port *source, const __u8 *addr,
modified = true;
}
+ fdb->added_by_user = 1;
fdb->used = jiffies;
if (modified) {
@@ -664,7 +706,7 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
if (ndm->ndm_flags & NTF_USE) {
rcu_read_lock();
- br_fdb_update(p->br, p, addr, vid);
+ br_fdb_update(p->br, p, addr, vid, true);
rcu_read_unlock();
} else {
spin_lock_bh(&p->br->hash_lock);
@@ -749,8 +791,7 @@ out:
return err;
}
-int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr,
- u16 vlan)
+static int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vlan)
{
struct hlist_head *head = &br->hash[br_mac_hash(addr, vlan)];
struct net_bridge_fdb_entry *fdb;
diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c
index cffe1d666ba1..54d207d3a31c 100644
--- a/net/bridge/br_if.c
+++ b/net/bridge/br_if.c
@@ -389,6 +389,9 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
if (br->dev->needed_headroom < dev->needed_headroom)
br->dev->needed_headroom = dev->needed_headroom;
+ if (br_fdb_insert(br, p, dev->dev_addr, 0))
+ netdev_err(dev, "failed insert local address bridge forwarding table\n");
+
spin_lock_bh(&br->lock);
changed_addr = br_stp_recalculate_bridge_id(br);
@@ -404,9 +407,6 @@ int br_add_if(struct net_bridge *br, struct net_device *dev)
dev_set_mtu(br->dev, br_min_mtu(br));
- if (br_fdb_insert(br, p, dev->dev_addr, 0))
- netdev_err(dev, "failed insert local address bridge forwarding table\n");
-
kobject_uevent(&p->kobj, KOBJ_ADD);
return 0;
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index bf8dc7d308d6..28d544627422 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -77,7 +77,7 @@ int br_handle_frame_finish(struct sk_buff *skb)
/* insert into forwarding database after filtering to avoid spoofing */
br = p->br;
if (p->flags & BR_LEARNING)
- br_fdb_update(br, p, eth_hdr(skb)->h_source, vid);
+ br_fdb_update(br, p, eth_hdr(skb)->h_source, vid, false);
if (!is_broadcast_ether_addr(dest) && is_multicast_ether_addr(dest) &&
br_multicast_rcv(br, p, skb, vid))
@@ -148,7 +148,7 @@ static int br_handle_local_finish(struct sk_buff *skb)
br_vlan_get_tag(skb, &vid);
if (p->flags & BR_LEARNING)
- br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid);
+ br_fdb_update(p->br, p, eth_hdr(skb)->h_source, vid, false);
return 0; /* process further */
}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index ef66365b7354..93067ecdb9a2 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1127,9 +1127,10 @@ static void br_multicast_query_received(struct net_bridge *br,
struct net_bridge_port *port,
struct bridge_mcast_querier *querier,
int saddr,
+ bool is_general_query,
unsigned long max_delay)
{
- if (saddr)
+ if (saddr && is_general_query)
br_multicast_update_querier_timer(br, querier, max_delay);
else if (timer_pending(&querier->timer))
return;
@@ -1181,8 +1182,16 @@ static int br_ip4_multicast_query(struct net_bridge *br,
IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1;
}
+ /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer
+ * all-systems destination addresses (224.0.0.1) for general queries
+ */
+ if (!group && iph->daddr != htonl(INADDR_ALLHOSTS_GROUP)) {
+ err = -EINVAL;
+ goto out;
+ }
+
br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr,
- max_delay);
+ !group, max_delay);
if (!group)
goto out;
@@ -1228,6 +1237,7 @@ static int br_ip6_multicast_query(struct net_bridge *br,
unsigned long max_delay;
unsigned long now = jiffies;
const struct in6_addr *group = NULL;
+ bool is_general_query;
int err = 0;
spin_lock(&br->multicast_lock);
@@ -1235,6 +1245,12 @@ static int br_ip6_multicast_query(struct net_bridge *br,
(port && port->state == BR_STATE_DISABLED))
goto out;
+ /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */
+ if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) {
+ err = -EINVAL;
+ goto out;
+ }
+
if (skb->len == sizeof(*mld)) {
if (!pskb_may_pull(skb, sizeof(*mld))) {
err = -EINVAL;
@@ -1256,8 +1272,19 @@ static int br_ip6_multicast_query(struct net_bridge *br,
max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL);
}
+ is_general_query = group && ipv6_addr_any(group);
+
+ /* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer
+ * all-nodes destination address (ff02::1) for general queries
+ */
+ if (is_general_query && !ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) {
+ err = -EINVAL;
+ goto out;
+ }
+
br_multicast_query_received(br, port, &br->ip6_querier,
- !ipv6_addr_any(&ip6h->saddr), max_delay);
+ !ipv6_addr_any(&ip6h->saddr),
+ is_general_query, max_delay);
if (!group)
goto out;
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index fcd12333c59b..3ba11bc99b65 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -104,6 +104,7 @@ struct net_bridge_fdb_entry
mac_addr addr;
unsigned char is_local;
unsigned char is_static;
+ unsigned char added_by_user;
__u16 vlan_id;
};
@@ -370,6 +371,9 @@ static inline void br_netpoll_disable(struct net_bridge_port *p)
int br_fdb_init(void);
void br_fdb_fini(void);
void br_fdb_flush(struct net_bridge *br);
+void br_fdb_find_delete_local(struct net_bridge *br,
+ const struct net_bridge_port *p,
+ const unsigned char *addr, u16 vid);
void br_fdb_changeaddr(struct net_bridge_port *p, const unsigned char *newaddr);
void br_fdb_change_mac_address(struct net_bridge *br, const u8 *newaddr);
void br_fdb_cleanup(unsigned long arg);
@@ -383,8 +387,7 @@ int br_fdb_fillbuf(struct net_bridge *br, void *buf, unsigned long count,
int br_fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr, u16 vid);
void br_fdb_update(struct net_bridge *br, struct net_bridge_port *source,
- const unsigned char *addr, u16 vid);
-int fdb_delete_by_addr(struct net_bridge *br, const u8 *addr, u16 vid);
+ const unsigned char *addr, u16 vid, bool added_by_user);
int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
struct net_device *dev, const unsigned char *addr);
@@ -584,6 +587,7 @@ struct sk_buff *br_handle_vlan(struct net_bridge *br,
int br_vlan_add(struct net_bridge *br, u16 vid, u16 flags);
int br_vlan_delete(struct net_bridge *br, u16 vid);
void br_vlan_flush(struct net_bridge *br);
+bool br_vlan_find(struct net_bridge *br, u16 vid);
int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val);
int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags);
int nbp_vlan_delete(struct net_bridge_port *port, u16 vid);
@@ -665,6 +669,11 @@ static inline void br_vlan_flush(struct net_bridge *br)
{
}
+static inline bool br_vlan_find(struct net_bridge *br, u16 vid)
+{
+ return false;
+}
+
static inline int nbp_vlan_add(struct net_bridge_port *port, u16 vid, u16 flags)
{
return -EOPNOTSUPP;
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index 656a6f3e40de..189ba1e7d851 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -194,6 +194,8 @@ void br_stp_change_bridge_id(struct net_bridge *br, const unsigned char *addr)
wasroot = br_is_root_bridge(br);
+ br_fdb_change_mac_address(br, addr);
+
memcpy(oldaddr, br->bridge_id.addr, ETH_ALEN);
memcpy(br->bridge_id.addr, addr, ETH_ALEN);
memcpy(br->dev->dev_addr, addr, ETH_ALEN);
diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c
index 4ca4d0a0151c..8249ca764c79 100644
--- a/net/bridge/br_vlan.c
+++ b/net/bridge/br_vlan.c
@@ -275,9 +275,7 @@ int br_vlan_delete(struct net_bridge *br, u16 vid)
if (!pv)
return -EINVAL;
- spin_lock_bh(&br->hash_lock);
- fdb_delete_by_addr(br, br->dev->dev_addr, vid);
- spin_unlock_bh(&br->hash_lock);
+ br_fdb_find_delete_local(br, NULL, br->dev->dev_addr, vid);
__vlan_del(pv, vid);
return 0;
@@ -295,6 +293,25 @@ void br_vlan_flush(struct net_bridge *br)
__vlan_flush(pv);
}
+bool br_vlan_find(struct net_bridge *br, u16 vid)
+{
+ struct net_port_vlans *pv;
+ bool found = false;
+
+ rcu_read_lock();
+ pv = rcu_dereference(br->vlan_info);
+
+ if (!pv)
+ goto out;
+
+ if (test_bit(vid, pv->vlan_bitmap))
+ found = true;
+
+out:
+ rcu_read_unlock();
+ return found;
+}
+
int br_vlan_filter_toggle(struct net_bridge *br, unsigned long val)
{
if (!rtnl_trylock())
@@ -359,9 +376,7 @@ int nbp_vlan_delete(struct net_bridge_port *port, u16 vid)
if (!pv)
return -EINVAL;
- spin_lock_bh(&port->br->hash_lock);
- fdb_delete_by_addr(port->br, port->dev->dev_addr, vid);
- spin_unlock_bh(&port->br->hash_lock);
+ br_fdb_find_delete_local(port->br, port, port->dev->dev_addr, vid);
return __vlan_del(pv, vid);
}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 4dca159435cf..edbca468fa73 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -22,6 +22,7 @@
#include <net/pkt_sched.h>
#include <net/caif/caif_device.h>
#include <net/caif/caif_layer.h>
+#include <net/caif/caif_dev.h>
#include <net/caif/cfpkt.h>
#include <net/caif/cfcnfg.h>
#include <net/caif/cfserl.h>
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
index 353f793d1b3b..a6e115463052 100644
--- a/net/caif/cfsrvl.c
+++ b/net/caif/cfsrvl.c
@@ -15,6 +15,7 @@
#include <net/caif/caif_layer.h>
#include <net/caif/cfsrvl.h>
#include <net/caif/cfpkt.h>
+#include <net/caif/caif_dev.h>
#define SRVL_CTRL_PKT_SIZE 1
#define SRVL_FLOW_OFF 0x81
diff --git a/net/can/af_can.c b/net/can/af_can.c
index d249874a366d..a27f8aad9e99 100644
--- a/net/can/af_can.c
+++ b/net/can/af_can.c
@@ -57,6 +57,7 @@
#include <linux/skbuff.h>
#include <linux/can.h>
#include <linux/can/core.h>
+#include <linux/can/skb.h>
#include <linux/ratelimit.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -290,7 +291,7 @@ int can_send(struct sk_buff *skb, int loop)
return -ENOMEM;
}
- newskb->sk = skb->sk;
+ can_skb_set_owner(newskb, skb->sk);
newskb->ip_summed = CHECKSUM_UNNECESSARY;
newskb->pkt_type = PACKET_BROADCAST;
}
diff --git a/net/can/bcm.c b/net/can/bcm.c
index 3fc737b214c7..dcb75c0e66c1 100644
--- a/net/can/bcm.c
+++ b/net/can/bcm.c
@@ -268,7 +268,7 @@ static void bcm_can_tx(struct bcm_op *op)
/* send with loopback */
skb->dev = dev;
- skb->sk = op->sk;
+ can_skb_set_owner(skb, op->sk);
can_send(skb, 1);
/* update statistics */
@@ -1223,7 +1223,7 @@ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk)
can_skb_prv(skb)->ifindex = dev->ifindex;
skb->dev = dev;
- skb->sk = sk;
+ can_skb_set_owner(skb, sk);
err = can_send(skb, 1); /* send with loopback */
dev_put(dev);
diff --git a/net/can/raw.c b/net/can/raw.c
index 07d72d852324..081e81fd017f 100644
--- a/net/can/raw.c
+++ b/net/can/raw.c
@@ -121,13 +121,9 @@ static void raw_rcv(struct sk_buff *oskb, void *data)
if (!ro->recv_own_msgs && oskb->sk == sk)
return;
- /* do not pass frames with DLC > 8 to a legacy socket */
- if (!ro->fd_frames) {
- struct canfd_frame *cfd = (struct canfd_frame *)oskb->data;
-
- if (unlikely(cfd->len > CAN_MAX_DLEN))
- return;
- }
+ /* do not pass non-CAN2.0 frames to a legacy socket */
+ if (!ro->fd_frames && oskb->len != CAN_MTU)
+ return;
/* clone the given skb to be able to enqueue it into the rcv queue */
skb = skb_clone(oskb, GFP_ATOMIC);
@@ -715,6 +711,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct socket *sock,
skb->dev = dev;
skb->sk = sk;
+ skb->priority = sk->sk_priority;
err = can_send(skb, ro->loopback);
@@ -737,9 +734,7 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t size, int flags)
{
struct sock *sk = sock->sk;
- struct raw_sock *ro = raw_sk(sk);
struct sk_buff *skb;
- int rxmtu;
int err = 0;
int noblock;
@@ -750,20 +745,10 @@ static int raw_recvmsg(struct kiocb *iocb, struct socket *sock,
if (!skb)
return err;
- /*
- * when serving a legacy socket the DLC <= 8 is already checked inside
- * raw_rcv(). Now check if we need to pass a canfd_frame to a legacy
- * socket and cut the possible CANFD_MTU/CAN_MTU length to CAN_MTU
- */
- if (!ro->fd_frames)
- rxmtu = CAN_MTU;
- else
- rxmtu = skb->len;
-
- if (size < rxmtu)
+ if (size < skb->len)
msg->msg_flags |= MSG_TRUNC;
else
- size = rxmtu;
+ size = skb->len;
err = memcpy_toiovec(msg->msg_iov, skb->data, size);
if (err < 0) {
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 0e478a0f4204..30efc5c18622 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -840,9 +840,13 @@ static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
if (!cursor->bvec_iter.bi_size) {
bio = bio->bi_next;
- cursor->bvec_iter = bio->bi_iter;
+ cursor->bio = bio;
+ if (bio)
+ cursor->bvec_iter = bio->bi_iter;
+ else
+ memset(&cursor->bvec_iter, 0,
+ sizeof(cursor->bvec_iter));
}
- cursor->bio = bio;
if (!cursor->last_piece) {
BUG_ON(!cursor->resid);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 010ff3bd58ad..0676f2b199d6 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1427,6 +1427,40 @@ static void __send_queued(struct ceph_osd_client *osdc)
}
/*
+ * Caller should hold map_sem for read and request_mutex.
+ */
+static int __ceph_osdc_start_request(struct ceph_osd_client *osdc,
+ struct ceph_osd_request *req,
+ bool nofail)
+{
+ int rc;
+
+ __register_request(osdc, req);
+ req->r_sent = 0;
+ req->r_got_reply = 0;
+ rc = __map_request(osdc, req, 0);
+ if (rc < 0) {
+ if (nofail) {
+ dout("osdc_start_request failed map, "
+ " will retry %lld\n", req->r_tid);
+ rc = 0;
+ } else {
+ __unregister_request(osdc, req);
+ }
+ return rc;
+ }
+
+ if (req->r_osd == NULL) {
+ dout("send_request %p no up osds in pg\n", req);
+ ceph_monc_request_next_osdmap(&osdc->client->monc);
+ } else {
+ __send_queued(osdc);
+ }
+
+ return 0;
+}
+
+/*
* Timeout callback, called every N seconds when 1 or more osd
* requests has been active for more than N seconds. When this
* happens, we ping all OSDs with requests who have timed out to
@@ -1653,6 +1687,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
osdmap_epoch = ceph_decode_32(&p);
/* lookup */
+ down_read(&osdc->map_sem);
mutex_lock(&osdc->request_mutex);
req = __lookup_request(osdc, tid);
if (req == NULL) {
@@ -1709,7 +1744,6 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
dout("redirect pool %lld\n", redir.oloc.pool);
__unregister_request(osdc, req);
- mutex_unlock(&osdc->request_mutex);
req->r_target_oloc = redir.oloc; /* struct */
@@ -1721,10 +1755,10 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
* successfully. In the future we might want to follow
* original request's nofail setting here.
*/
- err = ceph_osdc_start_request(osdc, req, true);
+ err = __ceph_osdc_start_request(osdc, req, true);
BUG_ON(err);
- goto done;
+ goto out_unlock;
}
already_completed = req->r_got_reply;
@@ -1742,8 +1776,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
req->r_got_reply = 1;
} else if ((flags & CEPH_OSD_FLAG_ONDISK) == 0) {
dout("handle_reply tid %llu dup ack\n", tid);
- mutex_unlock(&osdc->request_mutex);
- goto done;
+ goto out_unlock;
}
dout("handle_reply tid %llu flags %d\n", tid, flags);
@@ -1758,6 +1791,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
__unregister_request(osdc, req);
mutex_unlock(&osdc->request_mutex);
+ up_read(&osdc->map_sem);
if (!already_completed) {
if (req->r_unsafe_callback &&
@@ -1775,10 +1809,14 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
complete_request(req);
}
-done:
+out:
dout("req=%p req->r_linger=%d\n", req, req->r_linger);
ceph_osdc_put_request(req);
return;
+out_unlock:
+ mutex_unlock(&osdc->request_mutex);
+ up_read(&osdc->map_sem);
+ goto out;
bad_put:
req->r_result = -EIO;
@@ -1791,6 +1829,7 @@ bad_put:
ceph_osdc_put_request(req);
bad_mutex:
mutex_unlock(&osdc->request_mutex);
+ up_read(&osdc->map_sem);
bad:
pr_err("corrupt osd_op_reply got %d %d\n",
(int)msg->front.iov_len, le32_to_cpu(msg->hdr.front_len));
@@ -2351,34 +2390,16 @@ int ceph_osdc_start_request(struct ceph_osd_client *osdc,
struct ceph_osd_request *req,
bool nofail)
{
- int rc = 0;
+ int rc;
down_read(&osdc->map_sem);
mutex_lock(&osdc->request_mutex);
- __register_request(osdc, req);
- req->r_sent = 0;
- req->r_got_reply = 0;
- rc = __map_request(osdc, req, 0);
- if (rc < 0) {
- if (nofail) {
- dout("osdc_start_request failed map, "
- " will retry %lld\n", req->r_tid);
- rc = 0;
- } else {
- __unregister_request(osdc, req);
- }
- goto out_unlock;
- }
- if (req->r_osd == NULL) {
- dout("send_request %p no up osds in pg\n", req);
- ceph_monc_request_next_osdmap(&osdc->client->monc);
- } else {
- __send_queued(osdc);
- }
- rc = 0;
-out_unlock:
+
+ rc = __ceph_osdc_start_request(osdc, req, nofail);
+
mutex_unlock(&osdc->request_mutex);
up_read(&osdc->map_sem);
+
return rc;
}
EXPORT_SYMBOL(ceph_osdc_start_request);
@@ -2504,9 +2525,12 @@ int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
err = -ENOMEM;
osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
if (!osdc->notify_wq)
- goto out_msgpool;
+ goto out_msgpool_reply;
+
return 0;
+out_msgpool_reply:
+ ceph_msgpool_destroy(&osdc->msgpool_op_reply);
out_msgpool:
ceph_msgpool_destroy(&osdc->msgpool_op);
out_mempool:
diff --git a/net/core/dev.c b/net/core/dev.c
index 3721db716350..b1b0c8d4d7df 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2420,7 +2420,7 @@ EXPORT_SYMBOL(netdev_rx_csum_fault);
* 2. No high memory really exists on this machine.
*/
-static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
+static int illegal_highdma(const struct net_device *dev, struct sk_buff *skb)
{
#ifdef CONFIG_HIGHMEM
int i;
@@ -2495,34 +2495,36 @@ static int dev_gso_segment(struct sk_buff *skb, netdev_features_t features)
}
static netdev_features_t harmonize_features(struct sk_buff *skb,
- netdev_features_t features)
+ const struct net_device *dev,
+ netdev_features_t features)
{
if (skb->ip_summed != CHECKSUM_NONE &&
!can_checksum_protocol(features, skb_network_protocol(skb))) {
features &= ~NETIF_F_ALL_CSUM;
- } else if (illegal_highdma(skb->dev, skb)) {
+ } else if (illegal_highdma(dev, skb)) {
features &= ~NETIF_F_SG;
}
return features;
}
-netdev_features_t netif_skb_features(struct sk_buff *skb)
+netdev_features_t netif_skb_dev_features(struct sk_buff *skb,
+ const struct net_device *dev)
{
__be16 protocol = skb->protocol;
- netdev_features_t features = skb->dev->features;
+ netdev_features_t features = dev->features;
- if (skb_shinfo(skb)->gso_segs > skb->dev->gso_max_segs)
+ if (skb_shinfo(skb)->gso_segs > dev->gso_max_segs)
features &= ~NETIF_F_GSO_MASK;
if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
protocol = veh->h_vlan_encapsulated_proto;
} else if (!vlan_tx_tag_present(skb)) {
- return harmonize_features(skb, features);
+ return harmonize_features(skb, dev, features);
}
- features &= (skb->dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
+ features &= (dev->vlan_features | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD))
@@ -2530,9 +2532,9 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
NETIF_F_GEN_CSUM | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX;
- return harmonize_features(skb, features);
+ return harmonize_features(skb, dev, features);
}
-EXPORT_SYMBOL(netif_skb_features);
+EXPORT_SYMBOL(netif_skb_dev_features);
int dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq)
@@ -2803,7 +2805,7 @@ EXPORT_SYMBOL(dev_loopback_xmit);
* the BH enable code must have IRQs enabled so that it will not deadlock.
* --BLG
*/
-int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
+static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
{
struct net_device *dev = skb->dev;
struct netdev_queue *txq;
@@ -4637,7 +4639,7 @@ struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
}
EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
-int netdev_adjacent_sysfs_add(struct net_device *dev,
+static int netdev_adjacent_sysfs_add(struct net_device *dev,
struct net_device *adj_dev,
struct list_head *dev_list)
{
@@ -4647,7 +4649,7 @@ int netdev_adjacent_sysfs_add(struct net_device *dev,
return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
linkname);
}
-void netdev_adjacent_sysfs_del(struct net_device *dev,
+static void netdev_adjacent_sysfs_del(struct net_device *dev,
char *name,
struct list_head *dev_list)
{
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index f409e0bd35c0..185c341fafbd 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -745,6 +745,13 @@ static int fib_rules_event(struct notifier_block *this, unsigned long event,
attach_rules(&ops->rules_list, dev);
break;
+ case NETDEV_CHANGENAME:
+ list_for_each_entry(ops, &net->rules_ops, list) {
+ detach_rules(&ops->rules_list, dev);
+ attach_rules(&ops->rules_list, dev);
+ }
+ break;
+
case NETDEV_UNREGISTER:
list_for_each_entry(ops, &net->rules_ops, list)
detach_rules(&ops->rules_list, dev);
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index 87577d447554..e29e810663d7 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -323,17 +323,6 @@ u32 __skb_get_poff(const struct sk_buff *skb)
return poff;
}
-static inline u16 dev_cap_txqueue(struct net_device *dev, u16 queue_index)
-{
- if (unlikely(queue_index >= dev->real_num_tx_queues)) {
- net_warn_ratelimited("%s selects TX queue %d, but real number of TX queues is %d\n",
- dev->name, queue_index,
- dev->real_num_tx_queues);
- return 0;
- }
- return queue_index;
-}
-
static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
{
#ifdef CONFIG_XPS
@@ -372,7 +361,7 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
#endif
}
-u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
+static u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
{
struct sock *sk = skb->sk;
int queue_index = sk_tx_queue_get(sk);
@@ -392,7 +381,6 @@ u16 __netdev_pick_tx(struct net_device *dev, struct sk_buff *skb)
return queue_index;
}
-EXPORT_SYMBOL(__netdev_pick_tx);
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
@@ -403,13 +391,13 @@ struct netdev_queue *netdev_pick_tx(struct net_device *dev,
if (dev->real_num_tx_queues != 1) {
const struct net_device_ops *ops = dev->netdev_ops;
if (ops->ndo_select_queue)
- queue_index = ops->ndo_select_queue(dev, skb,
- accel_priv);
+ queue_index = ops->ndo_select_queue(dev, skb, accel_priv,
+ __netdev_pick_tx);
else
queue_index = __netdev_pick_tx(dev, skb);
if (!accel_priv)
- queue_index = dev_cap_txqueue(dev, queue_index);
+ queue_index = netdev_cap_txqueue(dev, queue_index);
}
skb_set_queue_mapping(skb, queue_index);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index b9e9e0d38672..e16129019c66 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -766,9 +766,6 @@ static void neigh_periodic_work(struct work_struct *work)
nht = rcu_dereference_protected(tbl->nht,
lockdep_is_held(&tbl->lock));
- if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
- goto out;
-
/*
* periodically recompute ReachableTime from random function
*/
@@ -781,6 +778,9 @@ static void neigh_periodic_work(struct work_struct *work)
neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
}
+ if (atomic_read(&tbl->entries) < tbl->gc_thresh1)
+ goto out;
+
for (i = 0 ; i < (1 << nht->hash_shift); i++) {
np = &nht->hash_buckets[i];
@@ -3046,7 +3046,7 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
if (!t)
goto err;
- for (i = 0; i < ARRAY_SIZE(t->neigh_vars); i++) {
+ for (i = 0; i < NEIGH_VAR_GC_INTERVAL; i++) {
t->neigh_vars[i].data += (long) p;
t->neigh_vars[i].extra1 = dev;
t->neigh_vars[i].extra2 = p;
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index c03f3dec4763..df9e6b1a9759 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -742,7 +742,7 @@ static bool pkt_is_ns(struct sk_buff *skb)
struct nd_msg *msg;
struct ipv6hdr *hdr;
- if (skb->protocol != htons(ETH_P_ARP))
+ if (skb->protocol != htons(ETH_P_IPV6))
return false;
if (!pskb_may_pull(skb, sizeof(struct ipv6hdr) + sizeof(struct nd_msg)))
return false;
@@ -948,6 +948,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
{
char *cur=opt, *delim;
int ipv6;
+ bool ipversion_set = false;
if (*cur != '@') {
if ((delim = strchr(cur, '@')) == NULL)
@@ -960,6 +961,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
cur++;
if (*cur != '/') {
+ ipversion_set = true;
if ((delim = strchr(cur, '/')) == NULL)
goto parse_failed;
*delim = 0;
@@ -1002,7 +1004,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
ipv6 = netpoll_parse_ip_addr(cur, &np->remote_ip);
if (ipv6 < 0)
goto parse_failed;
- else if (np->ipv6 != (bool)ipv6)
+ else if (ipversion_set && np->ipv6 != (bool)ipv6)
goto parse_failed;
else
np->ipv6 = (bool)ipv6;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 393b1bc9a618..120eecc0f5a4 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -374,7 +374,7 @@ static size_t rtnl_link_get_slave_info_data_size(const struct net_device *dev)
if (!master_dev)
return 0;
ops = master_dev->rtnl_link_ops;
- if (!ops->get_slave_size)
+ if (!ops || !ops->get_slave_size)
return 0;
/* IFLA_INFO_SLAVE_DATA + nested data */
return nla_total_size(sizeof(struct nlattr)) +
@@ -1963,16 +1963,21 @@ replay:
dev->ifindex = ifm->ifi_index;
- if (ops->newlink)
+ if (ops->newlink) {
err = ops->newlink(net, dev, tb, data);
- else
+ /* Drivers should call free_netdev() in ->destructor
+ * and unregister it on failure so that device could be
+ * finally freed in rtnl_unlock.
+ */
+ if (err < 0)
+ goto out;
+ } else {
err = register_netdevice(dev);
-
- if (err < 0) {
- free_netdev(dev);
- goto out;
+ if (err < 0) {
+ free_netdev(dev);
+ goto out;
+ }
}
-
err = rtnl_configure_link(dev, ifm);
if (err < 0)
unregister_netdevice(dev);
@@ -2116,12 +2121,13 @@ EXPORT_SYMBOL(rtmsg_ifinfo);
static int nlmsg_populate_fdb_fill(struct sk_buff *skb,
struct net_device *dev,
u8 *addr, u32 pid, u32 seq,
- int type, unsigned int flags)
+ int type, unsigned int flags,
+ int nlflags)
{
struct nlmsghdr *nlh;
struct ndmsg *ndm;
- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), NLM_F_MULTI);
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), nlflags);
if (!nlh)
return -EMSGSIZE;
@@ -2159,7 +2165,7 @@ static void rtnl_fdb_notify(struct net_device *dev, u8 *addr, int type)
if (!skb)
goto errout;
- err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF);
+ err = nlmsg_populate_fdb_fill(skb, dev, addr, 0, 0, type, NTF_SELF, 0);
if (err < 0) {
kfree_skb(skb);
goto errout;
@@ -2384,7 +2390,8 @@ static int nlmsg_populate_fdb(struct sk_buff *skb,
err = nlmsg_populate_fdb_fill(skb, dev, ha->addr,
portid, seq,
- RTM_NEWNEIGH, NTF_SELF);
+ RTM_NEWNEIGH, NTF_SELF,
+ NLM_F_MULTI);
if (err < 0)
return err;
skip:
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 5976ef0846bd..869c7afe3b07 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -707,9 +707,6 @@ static void __copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->mark = old->mark;
new->skb_iif = old->skb_iif;
__nf_copy(new, old);
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
- new->nf_trace = old->nf_trace;
-#endif
#ifdef CONFIG_NET_SCHED
new->tc_index = old->tc_index;
#ifdef CONFIG_NET_CLS_ACT
@@ -2841,81 +2838,84 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum);
/**
* skb_segment - Perform protocol segmentation on skb.
- * @skb: buffer to segment
+ * @head_skb: buffer to segment
* @features: features for the output path (see dev->features)
*
* This function performs segmentation on the given skb. It returns
* a pointer to the first in a list of new skbs for the segments.
* In case of error it returns ERR_PTR(err).
*/
-struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
+struct sk_buff *skb_segment(struct sk_buff *head_skb,
+ netdev_features_t features)
{
struct sk_buff *segs = NULL;
struct sk_buff *tail = NULL;
- struct sk_buff *fskb = skb_shinfo(skb)->frag_list;
- skb_frag_t *skb_frag = skb_shinfo(skb)->frags;
- unsigned int mss = skb_shinfo(skb)->gso_size;
- unsigned int doffset = skb->data - skb_mac_header(skb);
+ struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list;
+ skb_frag_t *frag = skb_shinfo(head_skb)->frags;
+ unsigned int mss = skb_shinfo(head_skb)->gso_size;
+ unsigned int doffset = head_skb->data - skb_mac_header(head_skb);
+ struct sk_buff *frag_skb = head_skb;
unsigned int offset = doffset;
- unsigned int tnl_hlen = skb_tnl_header_len(skb);
+ unsigned int tnl_hlen = skb_tnl_header_len(head_skb);
unsigned int headroom;
unsigned int len;
__be16 proto;
bool csum;
int sg = !!(features & NETIF_F_SG);
- int nfrags = skb_shinfo(skb)->nr_frags;
+ int nfrags = skb_shinfo(head_skb)->nr_frags;
int err = -ENOMEM;
int i = 0;
int pos;
- proto = skb_network_protocol(skb);
+ proto = skb_network_protocol(head_skb);
if (unlikely(!proto))
return ERR_PTR(-EINVAL);
csum = !!can_checksum_protocol(features, proto);
- __skb_push(skb, doffset);
- headroom = skb_headroom(skb);
- pos = skb_headlen(skb);
+ __skb_push(head_skb, doffset);
+ headroom = skb_headroom(head_skb);
+ pos = skb_headlen(head_skb);
do {
struct sk_buff *nskb;
- skb_frag_t *frag;
+ skb_frag_t *nskb_frag;
int hsize;
int size;
- len = skb->len - offset;
+ len = head_skb->len - offset;
if (len > mss)
len = mss;
- hsize = skb_headlen(skb) - offset;
+ hsize = skb_headlen(head_skb) - offset;
if (hsize < 0)
hsize = 0;
if (hsize > len || !sg)
hsize = len;
- if (!hsize && i >= nfrags && skb_headlen(fskb) &&
- (skb_headlen(fskb) == len || sg)) {
- BUG_ON(skb_headlen(fskb) > len);
+ if (!hsize && i >= nfrags && skb_headlen(list_skb) &&
+ (skb_headlen(list_skb) == len || sg)) {
+ BUG_ON(skb_headlen(list_skb) > len);
i = 0;
- nfrags = skb_shinfo(fskb)->nr_frags;
- skb_frag = skb_shinfo(fskb)->frags;
- pos += skb_headlen(fskb);
+ nfrags = skb_shinfo(list_skb)->nr_frags;
+ frag = skb_shinfo(list_skb)->frags;
+ frag_skb = list_skb;
+ pos += skb_headlen(list_skb);
while (pos < offset + len) {
BUG_ON(i >= nfrags);
- size = skb_frag_size(skb_frag);
+ size = skb_frag_size(frag);
if (pos + size > offset + len)
break;
i++;
pos += size;
- skb_frag++;
+ frag++;
}
- nskb = skb_clone(fskb, GFP_ATOMIC);
- fskb = fskb->next;
+ nskb = skb_clone(list_skb, GFP_ATOMIC);
+ list_skb = list_skb->next;
if (unlikely(!nskb))
goto err;
@@ -2936,7 +2936,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
__skb_push(nskb, doffset);
} else {
nskb = __alloc_skb(hsize + doffset + headroom,
- GFP_ATOMIC, skb_alloc_rx_flag(skb),
+ GFP_ATOMIC, skb_alloc_rx_flag(head_skb),
NUMA_NO_NODE);
if (unlikely(!nskb))
@@ -2952,12 +2952,12 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
segs = nskb;
tail = nskb;
- __copy_skb_header(nskb, skb);
- nskb->mac_len = skb->mac_len;
+ __copy_skb_header(nskb, head_skb);
+ nskb->mac_len = head_skb->mac_len;
skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom);
- skb_copy_from_linear_data_offset(skb, -tnl_hlen,
+ skb_copy_from_linear_data_offset(head_skb, -tnl_hlen,
nskb->data - tnl_hlen,
doffset + tnl_hlen);
@@ -2966,30 +2966,32 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
if (!sg) {
nskb->ip_summed = CHECKSUM_NONE;
- nskb->csum = skb_copy_and_csum_bits(skb, offset,
+ nskb->csum = skb_copy_and_csum_bits(head_skb, offset,
skb_put(nskb, len),
len, 0);
continue;
}
- frag = skb_shinfo(nskb)->frags;
+ nskb_frag = skb_shinfo(nskb)->frags;
- skb_copy_from_linear_data_offset(skb, offset,
+ skb_copy_from_linear_data_offset(head_skb, offset,
skb_put(nskb, hsize), hsize);
- skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG;
+ skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags &
+ SKBTX_SHARED_FRAG;
while (pos < offset + len) {
if (i >= nfrags) {
- BUG_ON(skb_headlen(fskb));
+ BUG_ON(skb_headlen(list_skb));
i = 0;
- nfrags = skb_shinfo(fskb)->nr_frags;
- skb_frag = skb_shinfo(fskb)->frags;
+ nfrags = skb_shinfo(list_skb)->nr_frags;
+ frag = skb_shinfo(list_skb)->frags;
+ frag_skb = list_skb;
BUG_ON(!nfrags);
- fskb = fskb->next;
+ list_skb = list_skb->next;
}
if (unlikely(skb_shinfo(nskb)->nr_frags >=
@@ -3000,27 +3002,30 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features)
goto err;
}
- *frag = *skb_frag;
- __skb_frag_ref(frag);
- size = skb_frag_size(frag);
+ if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC)))
+ goto err;
+
+ *nskb_frag = *frag;
+ __skb_frag_ref(nskb_frag);
+ size = skb_frag_size(nskb_frag);
if (pos < offset) {
- frag->page_offset += offset - pos;
- skb_frag_size_sub(frag, offset - pos);
+ nskb_frag->page_offset += offset - pos;
+ skb_frag_size_sub(nskb_frag, offset - pos);
}
skb_shinfo(nskb)->nr_frags++;
if (pos + size <= offset + len) {
i++;
- skb_frag++;
+ frag++;
pos += size;
} else {
- skb_frag_size_sub(frag, pos + size - (offset + len));
+ skb_frag_size_sub(nskb_frag, pos + size - (offset + len));
goto skip_fraglist;
}
- frag++;
+ nskb_frag++;
}
skip_fraglist:
@@ -3034,7 +3039,7 @@ perform_csum_check:
nskb->len - doffset, 0);
nskb->ip_summed = CHECKSUM_NONE;
}
- } while ((offset += len) < skb->len);
+ } while ((offset += len) < head_skb->len);
return segs;
diff --git a/net/core/sock.c b/net/core/sock.c
index 0c127dcdf6a8..c0fc6bdad1e3 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1775,7 +1775,9 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
while (order) {
if (npages >= 1 << order) {
page = alloc_pages(sk->sk_allocation |
- __GFP_COMP | __GFP_NOWARN,
+ __GFP_COMP |
+ __GFP_NOWARN |
+ __GFP_NORETRY,
order);
if (page)
goto fill_page;
@@ -1845,7 +1847,7 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t prio)
gfp_t gfp = prio;
if (order)
- gfp |= __GFP_COMP | __GFP_NOWARN;
+ gfp |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
pfrag->page = alloc_pages(gfp, order);
if (likely(pfrag->page)) {
pfrag->offset = 0;
@@ -2355,10 +2357,13 @@ void release_sock(struct sock *sk)
if (sk->sk_backlog.tail)
__release_sock(sk);
+ /* Warning : release_cb() might need to release sk ownership,
+ * ie call sock_release_ownership(sk) before us.
+ */
if (sk->sk_prot->release_cb)
sk->sk_prot->release_cb(sk);
- sk->sk_lock.owned = 0;
+ sock_release_ownership(sk);
if (waitqueue_active(&sk->sk_lock.wq))
wake_up(&sk->sk_lock.wq);
spin_unlock_bh(&sk->sk_lock.slock);
diff --git a/net/dccp/ccids/lib/tfrc.c b/net/dccp/ccids/lib/tfrc.c
index c073b81a1f3e..62b5828acde0 100644
--- a/net/dccp/ccids/lib/tfrc.c
+++ b/net/dccp/ccids/lib/tfrc.c
@@ -8,7 +8,7 @@
#include "tfrc.h"
#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
-static bool tfrc_debug;
+bool tfrc_debug;
module_param(tfrc_debug, bool, 0644);
MODULE_PARM_DESC(tfrc_debug, "Enable TFRC debug messages");
#endif
diff --git a/net/dccp/ccids/lib/tfrc.h b/net/dccp/ccids/lib/tfrc.h
index a3d8f7c76ae0..40ee7d62b652 100644
--- a/net/dccp/ccids/lib/tfrc.h
+++ b/net/dccp/ccids/lib/tfrc.h
@@ -21,6 +21,7 @@
#include "packet_history.h"
#ifdef CONFIG_IP_DCCP_TFRC_DEBUG
+extern bool tfrc_debug;
#define tfrc_pr_debug(format, a...) DCCP_PR_DEBUG(tfrc_debug, format, ##a)
#else
#define tfrc_pr_debug(format, a...)
diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
index 2954dcbca832..4c04848953bd 100644
--- a/net/decnet/af_decnet.c
+++ b/net/decnet/af_decnet.c
@@ -2104,8 +2104,6 @@ static struct notifier_block dn_dev_notifier = {
.notifier_call = dn_device_event,
};
-extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *);
-
static struct packet_type dn_dix_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_DNA_RT),
.func = dn_route_rcv,
@@ -2353,9 +2351,6 @@ static const struct proto_ops dn_proto_ops = {
.sendpage = sock_no_sendpage,
};
-void dn_register_sysctl(void);
-void dn_unregister_sysctl(void);
-
MODULE_DESCRIPTION("The Linux DECnet Network Protocol");
MODULE_AUTHOR("Linux DECnet Project Team");
MODULE_LICENSE("GPL");
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 327060c6c874..7ae0d7f6dbd0 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -297,7 +297,7 @@ static bool seq_nr_after(u16 a, u16 b)
void hsr_register_frame_in(struct node_entry *node, enum hsr_dev_idx dev_idx)
{
- if ((dev_idx < 0) || (dev_idx >= HSR_MAX_DEV)) {
+ if ((dev_idx < 0) || (dev_idx >= HSR_MAX_SLAVE)) {
WARN_ONCE(1, "%s: Invalid dev_idx (%d)\n", __func__, dev_idx);
return;
}
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 48b25c0af4d0..8edfea5da572 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -106,7 +106,6 @@ static int lowpan_header_create(struct sk_buff *skb,
unsigned short type, const void *_daddr,
const void *_saddr, unsigned int len)
{
- struct ipv6hdr *hdr;
const u8 *saddr = _saddr;
const u8 *daddr = _daddr;
struct ieee802154_addr sa, da;
@@ -117,8 +116,6 @@ static int lowpan_header_create(struct sk_buff *skb,
if (type != ETH_P_IPV6)
return 0;
- hdr = ipv6_hdr(skb);
-
if (!saddr)
saddr = dev->dev_addr;
@@ -533,7 +530,27 @@ static struct header_ops lowpan_header_ops = {
.create = lowpan_header_create,
};
+static struct lock_class_key lowpan_tx_busylock;
+static struct lock_class_key lowpan_netdev_xmit_lock_key;
+
+static void lowpan_set_lockdep_class_one(struct net_device *dev,
+ struct netdev_queue *txq,
+ void *_unused)
+{
+ lockdep_set_class(&txq->_xmit_lock,
+ &lowpan_netdev_xmit_lock_key);
+}
+
+
+static int lowpan_dev_init(struct net_device *dev)
+{
+ netdev_for_each_tx_queue(dev, lowpan_set_lockdep_class_one, NULL);
+ dev->qdisc_tx_busylock = &lowpan_tx_busylock;
+ return 0;
+}
+
static const struct net_device_ops lowpan_netdev_ops = {
+ .ndo_init = lowpan_dev_init,
.ndo_start_xmit = lowpan_xmit,
.ndo_set_mac_address = lowpan_set_address,
};
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index ecd2c3f245ce..19ab78aca547 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1296,8 +1296,11 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
segs = ERR_PTR(-EPROTONOSUPPORT);
- /* Note : following gso_segment() might change skb->encapsulation */
- udpfrag = !skb->encapsulation && proto == IPPROTO_UDP;
+ if (skb->encapsulation &&
+ skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP))
+ udpfrag = proto == IPPROTO_UDP && encap;
+ else
+ udpfrag = proto == IPPROTO_UDP && !skb->encapsulation;
ops = rcu_dereference(inet_offloads[proto]);
if (likely(ops && ops->callbacks.gso_segment))
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index ac2dff3c2c1c..bdbf68bb2e2d 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1443,7 +1443,8 @@ static size_t inet_nlmsg_size(void)
+ nla_total_size(4) /* IFA_LOCAL */
+ nla_total_size(4) /* IFA_BROADCAST */
+ nla_total_size(IFNAMSIZ) /* IFA_LABEL */
- + nla_total_size(4); /* IFA_FLAGS */
+ + nla_total_size(4) /* IFA_FLAGS */
+ + nla_total_size(sizeof(struct ifa_cacheinfo)); /* IFA_CACHEINFO */
}
static inline u32 cstamp_delta(unsigned long cstamp)
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index bb075fc9a14f..3b01959bf4bb 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -208,7 +208,7 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
}
work = frag_mem_limit(nf) - nf->low_thresh;
- while (work > 0) {
+ while (work > 0 || force) {
spin_lock(&nf->lru_lock);
if (list_empty(&nf->lru_list)) {
@@ -278,9 +278,10 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
atomic_inc(&qp->refcnt);
hlist_add_head(&qp->list, &hb->chain);
+ inet_frag_lru_add(nf, qp);
spin_unlock(&hb->chain_lock);
read_unlock(&f->lock);
- inet_frag_lru_add(nf, qp);
+
return qp;
}
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index e9f1217a8afd..f3869c186d97 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -39,6 +39,71 @@
#include <net/route.h>
#include <net/xfrm.h>
+static bool ip_may_fragment(const struct sk_buff *skb)
+{
+ return unlikely((ip_hdr(skb)->frag_off & htons(IP_DF)) == 0) ||
+ !skb->local_df;
+}
+
+static bool ip_exceeds_mtu(const struct sk_buff *skb, unsigned int mtu)
+{
+ if (skb->len <= mtu || skb->local_df)
+ return false;
+
+ if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
+ return false;
+
+ return true;
+}
+
+static bool ip_gso_exceeds_dst_mtu(const struct sk_buff *skb)
+{
+ unsigned int mtu;
+
+ if (skb->local_df || !skb_is_gso(skb))
+ return false;
+
+ mtu = ip_dst_mtu_maybe_forward(skb_dst(skb), true);
+
+ /* if seglen > mtu, do software segmentation for IP fragmentation on
+ * output. DF bit cannot be set since ip_forward would have sent
+ * icmp error.
+ */
+ return skb_gso_network_seglen(skb) > mtu;
+}
+
+/* called if GSO skb needs to be fragmented on forward */
+static int ip_forward_finish_gso(struct sk_buff *skb)
+{
+ struct dst_entry *dst = skb_dst(skb);
+ netdev_features_t features;
+ struct sk_buff *segs;
+ int ret = 0;
+
+ features = netif_skb_dev_features(skb, dst->dev);
+ segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
+ if (IS_ERR(segs)) {
+ kfree_skb(skb);
+ return -ENOMEM;
+ }
+
+ consume_skb(skb);
+
+ do {
+ struct sk_buff *nskb = segs->next;
+ int err;
+
+ segs->next = NULL;
+ err = dst_output(segs);
+
+ if (err && ret == 0)
+ ret = err;
+ segs = nskb;
+ } while (segs);
+
+ return ret;
+}
+
static int ip_forward_finish(struct sk_buff *skb)
{
struct ip_options *opt = &(IPCB(skb)->opt);
@@ -49,6 +114,9 @@ static int ip_forward_finish(struct sk_buff *skb)
if (unlikely(opt->optlen))
ip_forward_options(skb);
+ if (ip_gso_exceeds_dst_mtu(skb))
+ return ip_forward_finish_gso(skb);
+
return dst_output(skb);
}
@@ -91,8 +159,7 @@ int ip_forward(struct sk_buff *skb)
IPCB(skb)->flags |= IPSKB_FORWARDED;
mtu = ip_dst_mtu_maybe_forward(&rt->dst, true);
- if (unlikely(skb->len > mtu && !skb_is_gso(skb) &&
- (ip_hdr(skb)->frag_off & htons(IP_DF))) && !skb->local_df) {
+ if (!ip_may_fragment(skb) && ip_exceeds_mtu(skb, mtu)) {
IP_INC_STATS(dev_net(rt->dst.dev), IPSTATS_MIB_FRAGFAILS);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 8971780aec7c..73c6b63bba74 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -422,9 +422,6 @@ static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->tc_index = from->tc_index;
#endif
nf_copy(to, from);
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
- to->nf_trace = from->nf_trace;
-#endif
#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
to->ipvs_property = from->ipvs_property;
#endif
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index bd28f386bd02..78a89e61925d 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -93,83 +93,32 @@ static void tunnel_dst_reset(struct ip_tunnel *t)
tunnel_dst_set(t, NULL);
}
-static void tunnel_dst_reset_all(struct ip_tunnel *t)
+void ip_tunnel_dst_reset_all(struct ip_tunnel *t)
{
int i;
for_each_possible_cpu(i)
__tunnel_dst_set(per_cpu_ptr(t->dst_cache, i), NULL);
}
+EXPORT_SYMBOL(ip_tunnel_dst_reset_all);
-static struct dst_entry *tunnel_dst_get(struct ip_tunnel *t)
+static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
{
struct dst_entry *dst;
rcu_read_lock();
dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
- if (dst)
+ if (dst) {
+ if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
+ rcu_read_unlock();
+ tunnel_dst_reset(t);
+ return NULL;
+ }
dst_hold(dst);
- rcu_read_unlock();
- return dst;
-}
-
-static struct dst_entry *tunnel_dst_check(struct ip_tunnel *t, u32 cookie)
-{
- struct dst_entry *dst = tunnel_dst_get(t);
-
- if (dst && dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
- tunnel_dst_reset(t);
- return NULL;
- }
-
- return dst;
-}
-
-/* Often modified stats are per cpu, other are shared (netdev->stats) */
-struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
- struct rtnl_link_stats64 *tot)
-{
- int i;
-
- for_each_possible_cpu(i) {
- const struct pcpu_sw_netstats *tstats =
- per_cpu_ptr(dev->tstats, i);
- u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
- unsigned int start;
-
- do {
- start = u64_stats_fetch_begin_bh(&tstats->syncp);
- rx_packets = tstats->rx_packets;
- tx_packets = tstats->tx_packets;
- rx_bytes = tstats->rx_bytes;
- tx_bytes = tstats->tx_bytes;
- } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
-
- tot->rx_packets += rx_packets;
- tot->tx_packets += tx_packets;
- tot->rx_bytes += rx_bytes;
- tot->tx_bytes += tx_bytes;
}
-
- tot->multicast = dev->stats.multicast;
-
- tot->rx_crc_errors = dev->stats.rx_crc_errors;
- tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
- tot->rx_length_errors = dev->stats.rx_length_errors;
- tot->rx_frame_errors = dev->stats.rx_frame_errors;
- tot->rx_errors = dev->stats.rx_errors;
-
- tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
- tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
- tot->tx_dropped = dev->stats.tx_dropped;
- tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
- tot->tx_errors = dev->stats.tx_errors;
-
- tot->collisions = dev->stats.collisions;
-
- return tot;
+ rcu_read_unlock();
+ return (struct rtable *)dst;
}
-EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
static bool ip_tunnel_key_match(const struct ip_tunnel_parm *p,
__be16 flags, __be32 key)
@@ -584,7 +533,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
struct flowi4 fl4;
u8 tos, ttl;
__be16 df;
- struct rtable *rt = NULL; /* Route to the other host */
+ struct rtable *rt; /* Route to the other host */
unsigned int max_headroom; /* The extra header space needed */
__be32 dst;
int err;
@@ -657,8 +606,7 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev,
init_tunnel_flow(&fl4, protocol, dst, tnl_params->saddr,
tunnel->parms.o_key, RT_TOS(tos), tunnel->parms.link);
- if (connected)
- rt = (struct rtable *)tunnel_dst_check(tunnel, 0);
+ rt = connected ? tunnel_rtable_get(tunnel, 0) : NULL;
if (!rt) {
rt = ip_route_output_key(tunnel->net, &fl4);
@@ -766,7 +714,7 @@ static void ip_tunnel_update(struct ip_tunnel_net *itn,
if (set_mtu)
dev->mtu = mtu;
}
- tunnel_dst_reset_all(t);
+ ip_tunnel_dst_reset_all(t);
netdev_state_change(dev);
}
@@ -1095,7 +1043,7 @@ void ip_tunnel_uninit(struct net_device *dev)
if (itn->fb_tunnel_dev != dev)
ip_tunnel_del(netdev_priv(dev));
- tunnel_dst_reset_all(tunnel);
+ ip_tunnel_dst_reset_all(tunnel);
}
EXPORT_SYMBOL_GPL(ip_tunnel_uninit);
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 6156f4ef5e91..6f847dd56dbc 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -108,7 +108,6 @@ int iptunnel_pull_header(struct sk_buff *skb, int hdr_len, __be16 inner_proto)
nf_reset(skb);
secpath_reset(skb);
skb_clear_hash_if_not_l4(skb);
- skb_dst_drop(skb);
skb->vlan_tci = 0;
skb_set_queue_mapping(skb, 0);
skb->pkt_type = PACKET_HOST;
@@ -148,3 +147,49 @@ error:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(iptunnel_handle_offloads);
+
+/* Often modified stats are per cpu, other are shared (netdev->stats) */
+struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *tot)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ const struct pcpu_sw_netstats *tstats =
+ per_cpu_ptr(dev->tstats, i);
+ u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
+ unsigned int start;
+
+ do {
+ start = u64_stats_fetch_begin_bh(&tstats->syncp);
+ rx_packets = tstats->rx_packets;
+ tx_packets = tstats->tx_packets;
+ rx_bytes = tstats->rx_bytes;
+ tx_bytes = tstats->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+ tot->rx_packets += rx_packets;
+ tot->tx_packets += tx_packets;
+ tot->rx_bytes += rx_bytes;
+ tot->tx_bytes += tx_bytes;
+ }
+
+ tot->multicast = dev->stats.multicast;
+
+ tot->rx_crc_errors = dev->stats.rx_crc_errors;
+ tot->rx_fifo_errors = dev->stats.rx_fifo_errors;
+ tot->rx_length_errors = dev->stats.rx_length_errors;
+ tot->rx_frame_errors = dev->stats.rx_frame_errors;
+ tot->rx_errors = dev->stats.rx_errors;
+
+ tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+ tot->tx_carrier_errors = dev->stats.tx_carrier_errors;
+ tot->tx_dropped = dev->stats.tx_dropped;
+ tot->tx_aborted_errors = dev->stats.tx_aborted_errors;
+ tot->tx_errors = dev->stats.tx_errors;
+
+ tot->collisions = dev->stats.collisions;
+
+ return tot;
+}
+EXPORT_SYMBOL_GPL(ip_tunnel_get_stats64);
diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
index efa1138fa523..b3e86ea7b71b 100644
--- a/net/ipv4/ipconfig.c
+++ b/net/ipv4/ipconfig.c
@@ -273,7 +273,7 @@ static int __init ic_open_devs(void)
msleep(1);
- if time_before(jiffies, next_msg)
+ if (time_before(jiffies, next_msg))
continue;
elapsed = jiffies_to_msecs(jiffies - start);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index b9b3472975ba..28863570dd60 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -2255,13 +2255,14 @@ int ipmr_get_route(struct net *net, struct sk_buff *skb,
}
static int ipmr_fill_mroute(struct mr_table *mrt, struct sk_buff *skb,
- u32 portid, u32 seq, struct mfc_cache *c, int cmd)
+ u32 portid, u32 seq, struct mfc_cache *c, int cmd,
+ int flags)
{
struct nlmsghdr *nlh;
struct rtmsg *rtm;
int err;
- nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
+ nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
if (nlh == NULL)
return -EMSGSIZE;
@@ -2329,7 +2330,7 @@ static void mroute_netlink_event(struct mr_table *mrt, struct mfc_cache *mfc,
if (skb == NULL)
goto errout;
- err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
+ err = ipmr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
if (err < 0)
goto errout;
@@ -2368,7 +2369,8 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
if (ipmr_fill_mroute(mrt, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- mfc, RTM_NEWROUTE) < 0)
+ mfc, RTM_NEWROUTE,
+ NLM_F_MULTI) < 0)
goto done;
next_entry:
e++;
@@ -2382,7 +2384,8 @@ next_entry:
if (ipmr_fill_mroute(mrt, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- mfc, RTM_NEWROUTE) < 0) {
+ mfc, RTM_NEWROUTE,
+ NLM_F_MULTI) < 0) {
spin_unlock_bh(&mfc_unres_lock);
goto done;
}
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 81c6910cfa92..a26ce035e3fa 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -61,6 +61,11 @@ config NFT_CHAIN_NAT_IPV4
packet transformations such as the source, destination address and
source and destination ports.
+config NFT_REJECT_IPV4
+ depends on NF_TABLES_IPV4
+ default NFT_REJECT
+ tristate
+
config NF_TABLES_ARP
depends on NF_TABLES
tristate "ARP nf_tables support"
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index c16be9d58420..90b82405331e 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -30,6 +30,7 @@ obj-$(CONFIG_NF_NAT_PROTO_GRE) += nf_nat_proto_gre.o
obj-$(CONFIG_NF_TABLES_IPV4) += nf_tables_ipv4.o
obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV4) += nft_chain_route_ipv4.o
obj-$(CONFIG_NFT_CHAIN_NAT_IPV4) += nft_chain_nat_ipv4.o
+obj-$(CONFIG_NFT_REJECT_IPV4) += nft_reject_ipv4.o
obj-$(CONFIG_NF_TABLES_ARP) += nf_tables_arp.o
# generic IP tables
diff --git a/net/ipv4/netfilter/nf_nat_h323.c b/net/ipv4/netfilter/nf_nat_h323.c
index 9eea059dd621..574f7ebba0b6 100644
--- a/net/ipv4/netfilter/nf_nat_h323.c
+++ b/net/ipv4/netfilter/nf_nat_h323.c
@@ -229,7 +229,10 @@ static int nat_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
ret = nf_ct_expect_related(rtcp_exp);
if (ret == 0)
break;
- else if (ret != -EBUSY) {
+ else if (ret == -EBUSY) {
+ nf_ct_unexpect_related(rtp_exp);
+ continue;
+ } else if (ret < 0) {
nf_ct_unexpect_related(rtp_exp);
nated_port = 0;
break;
diff --git a/net/ipv4/netfilter/nf_nat_snmp_basic.c b/net/ipv4/netfilter/nf_nat_snmp_basic.c
index d551e31b416e..7c676671329d 100644
--- a/net/ipv4/netfilter/nf_nat_snmp_basic.c
+++ b/net/ipv4/netfilter/nf_nat_snmp_basic.c
@@ -1198,8 +1198,8 @@ static int snmp_translate(struct nf_conn *ct,
map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip);
} else {
/* DNAT replies */
- map.from = NOCT1(&ct->tuplehash[dir].tuple.src.u3.ip);
- map.to = NOCT1(&ct->tuplehash[!dir].tuple.dst.u3.ip);
+ map.from = NOCT1(&ct->tuplehash[!dir].tuple.src.u3.ip);
+ map.to = NOCT1(&ct->tuplehash[dir].tuple.dst.u3.ip);
}
if (map.from == map.to)
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
new file mode 100644
index 000000000000..e79718a382f2
--- /dev/null
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2013 Eric Leblond <eric@regit.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/icmp.h>
+#include <net/netfilter/ipv4/nf_reject.h>
+#include <net/netfilter/nft_reject.h>
+
+void nft_reject_ipv4_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_reject *priv = nft_expr_priv(expr);
+
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ nf_send_unreach(pkt->skb, priv->icmp_code);
+ break;
+ case NFT_REJECT_TCP_RST:
+ nf_send_reset(pkt->skb, pkt->ops->hooknum);
+ break;
+ }
+
+ data[NFT_REG_VERDICT].verdict = NF_DROP;
+}
+EXPORT_SYMBOL_GPL(nft_reject_ipv4_eval);
+
+static struct nft_expr_type nft_reject_ipv4_type;
+static const struct nft_expr_ops nft_reject_ipv4_ops = {
+ .type = &nft_reject_ipv4_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
+ .eval = nft_reject_ipv4_eval,
+ .init = nft_reject_init,
+ .dump = nft_reject_dump,
+};
+
+static struct nft_expr_type nft_reject_ipv4_type __read_mostly = {
+ .family = NFPROTO_IPV4,
+ .name = "reject",
+ .ops = &nft_reject_ipv4_ops,
+ .policy = nft_reject_policy,
+ .maxattr = NFTA_REJECT_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_reject_ipv4_module_init(void)
+{
+ return nft_register_expr(&nft_reject_ipv4_type);
+}
+
+static void __exit nft_reject_ipv4_module_exit(void)
+{
+ nft_unregister_expr(&nft_reject_ipv4_type);
+}
+
+module_init(nft_reject_ipv4_module_init);
+module_exit(nft_reject_ipv4_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET, "reject");
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 25071b48921c..4c011ec69ed4 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1597,6 +1597,7 @@ static int __mkroute_input(struct sk_buff *skb,
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
+ RT_CACHE_STAT_INC(in_slow_tot);
rth->dst.input = ip_forward;
rth->dst.output = ip_output;
@@ -1695,10 +1696,11 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
fl4.daddr = daddr;
fl4.saddr = saddr;
err = fib_lookup(net, &fl4, &res);
- if (err != 0)
+ if (err != 0) {
+ if (!IN_DEV_FORWARD(in_dev))
+ err = -EHOSTUNREACH;
goto no_route;
-
- RT_CACHE_STAT_INC(in_slow_tot);
+ }
if (res.type == RTN_BROADCAST)
goto brd_input;
@@ -1712,8 +1714,10 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
goto local_input;
}
- if (!IN_DEV_FORWARD(in_dev))
+ if (!IN_DEV_FORWARD(in_dev)) {
+ err = -EHOSTUNREACH;
goto no_route;
+ }
if (res.type != RTN_UNICAST)
goto martian_destination;
@@ -1768,6 +1772,7 @@ local_input:
rth->rt_gateway = 0;
rth->rt_uses_gateway = 0;
INIT_LIST_HEAD(&rth->rt_uncached);
+ RT_CACHE_STAT_INC(in_slow_tot);
if (res.type == RTN_UNREACHABLE) {
rth->dst.input= ip_error;
rth->dst.error= -err;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 4475b3bb494d..97c8f5620c43 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1044,7 +1044,8 @@ void tcp_free_fastopen_req(struct tcp_sock *tp)
}
}
-static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
+static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
+ int *copied, size_t size)
{
struct tcp_sock *tp = tcp_sk(sk);
int err, flags;
@@ -1059,11 +1060,12 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg, int *size)
if (unlikely(tp->fastopen_req == NULL))
return -ENOBUFS;
tp->fastopen_req->data = msg;
+ tp->fastopen_req->size = size;
flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
msg->msg_namelen, flags);
- *size = tp->fastopen_req->copied;
+ *copied = tp->fastopen_req->copied;
tcp_free_fastopen_req(tp);
return err;
}
@@ -1083,7 +1085,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
flags = msg->msg_flags;
if (flags & MSG_FASTOPEN) {
- err = tcp_sendmsg_fastopen(sk, msg, &copied_syn);
+ err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
if (err == -EINPROGRESS && copied_syn > 0)
goto out;
else if (err)
@@ -2229,7 +2231,7 @@ adjudge_to_death:
/* This is a (useful) BSD violating of the RFC. There is a
* problem with TCP as specified in that the other end could
* keep a socket open forever with no application left this end.
- * We use a 3 minute timeout (about the same as BSD) then kill
+ * We use a 1 minute timeout (about the same as BSD) then kill
* our end. If they send after that then tough - BUT: long enough
* that we won't make the old 4*rto = almost no time - whoops
* reset mistake.
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index ad37bf18ae4b..2388275adb9b 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -290,8 +290,7 @@ bool tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
left = tp->snd_cwnd - in_flight;
if (sk_can_gso(sk) &&
left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd &&
- left * tp->mss_cache < sk->sk_gso_max_size &&
- left < sk->sk_gso_max_segs)
+ left < tp->xmit_size_goal_segs)
return true;
return left <= tcp_max_tso_deferred_mss(tp);
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 65cf90e063d5..eeaac399420d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -671,6 +671,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
{
struct tcp_sock *tp = tcp_sk(sk);
long m = mrtt; /* RTT */
+ u32 srtt = tp->srtt;
/* The following amusing code comes from Jacobson's
* article in SIGCOMM '88. Note that rtt and mdev
@@ -688,11 +689,9 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
* does not matter how to _calculate_ it. Seems, it was trap
* that VJ failed to avoid. 8)
*/
- if (m == 0)
- m = 1;
- if (tp->srtt != 0) {
- m -= (tp->srtt >> 3); /* m is now error in rtt est */
- tp->srtt += m; /* rtt = 7/8 rtt + 1/8 new */
+ if (srtt != 0) {
+ m -= (srtt >> 3); /* m is now error in rtt est */
+ srtt += m; /* rtt = 7/8 rtt + 1/8 new */
if (m < 0) {
m = -m; /* m is now abs(error) */
m -= (tp->mdev >> 2); /* similar update on mdev */
@@ -723,11 +722,12 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
}
} else {
/* no previous measure. */
- tp->srtt = m << 3; /* take the measured time to be rtt */
+ srtt = m << 3; /* take the measured time to be rtt */
tp->mdev = m << 1; /* make sure rto = 3*rtt */
tp->mdev_max = tp->rttvar = max(tp->mdev, tcp_rto_min(sk));
tp->rtt_seq = tp->snd_nxt;
}
+ tp->srtt = max(1U, srtt);
}
/* Set the sk_pacing_rate to allow proper sizing of TSO packets.
@@ -746,8 +746,10 @@ static void tcp_update_pacing_rate(struct sock *sk)
rate *= max(tp->snd_cwnd, tp->packets_out);
- /* Correction for small srtt : minimum srtt being 8 (1 jiffy << 3),
- * be conservative and assume srtt = 1 (125 us instead of 1.25 ms)
+ /* Correction for small srtt and scheduling constraints.
+ * For small rtt, consider noise is too high, and use
+ * the minimal value (srtt = 1 -> 125 us for HZ=1000)
+ *
* We probably need usec resolution in the future.
* Note: This also takes care of possible srtt=0 case,
* when tcp_rtt_estimator() was not yet called.
@@ -1943,8 +1945,9 @@ void tcp_enter_loss(struct sock *sk, int how)
if (skb == tcp_send_head(sk))
break;
- if (TCP_SKB_CB(skb)->sacked & TCPCB_RETRANS)
+ if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS)
tp->undo_marker = 0;
+
TCP_SKB_CB(skb)->sacked &= (~TCPCB_TAGBITS)|TCPCB_SACKED_ACKED;
if (!(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_ACKED) || how) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 03d26b85eab8..17a11e65e57f 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -698,7 +698,8 @@ static void tcp_tsq_handler(struct sock *sk)
if ((1 << sk->sk_state) &
(TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_CLOSING |
TCPF_CLOSE_WAIT | TCPF_LAST_ACK))
- tcp_write_xmit(sk, tcp_current_mss(sk), 0, 0, GFP_ATOMIC);
+ tcp_write_xmit(sk, tcp_current_mss(sk), tcp_sk(sk)->nonagle,
+ 0, GFP_ATOMIC);
}
/*
* One tasklet per cpu tries to send more skbs.
@@ -766,6 +767,17 @@ void tcp_release_cb(struct sock *sk)
if (flags & (1UL << TCP_TSQ_DEFERRED))
tcp_tsq_handler(sk);
+ /* Here begins the tricky part :
+ * We are called from release_sock() with :
+ * 1) BH disabled
+ * 2) sk_lock.slock spinlock held
+ * 3) socket owned by us (sk->sk_lock.owned == 1)
+ *
+ * But following code is meant to be called from BH handlers,
+ * so we should keep BH disabled, but early release socket ownership
+ */
+ sock_release_ownership(sk);
+
if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) {
tcp_write_timer_handler(sk);
__sock_put(sk);
@@ -863,8 +875,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
if (unlikely(skb->fclone == SKB_FCLONE_ORIG &&
fclone->fclone == SKB_FCLONE_CLONE))
- NET_INC_STATS_BH(sock_net(sk),
- LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
+ NET_INC_STATS(sock_net(sk),
+ LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
if (unlikely(skb_cloned(skb)))
skb = pskb_copy(skb, gfp_mask);
@@ -1904,7 +1916,15 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (atomic_read(&sk->sk_wmem_alloc) > limit) {
set_bit(TSQ_THROTTLED, &tp->tsq_flags);
- break;
+ /* It is possible TX completion already happened
+ * before we set TSQ_THROTTLED, so we must
+ * test again the condition.
+ * We abuse smp_mb__after_clear_bit() because
+ * there is no smp_mb__after_set_bit() yet
+ */
+ smp_mb__after_clear_bit();
+ if (atomic_read(&sk->sk_wmem_alloc) > limit)
+ break;
}
limit = mss_now;
@@ -1977,7 +1997,7 @@ bool tcp_schedule_loss_probe(struct sock *sk)
/* Schedule a loss probe in 2*RTT for SACK capable connections
* in Open state, that are either limited by cwnd or application.
*/
- if (sysctl_tcp_early_retrans < 3 || !rtt || !tp->packets_out ||
+ if (sysctl_tcp_early_retrans < 3 || !tp->srtt || !tp->packets_out ||
!tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
return false;
@@ -2328,6 +2348,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
unsigned int cur_mss;
+ int err;
/* Inconslusive MTU probe */
if (icsk->icsk_mtup.probe_size) {
@@ -2391,11 +2412,15 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
skb_headroom(skb) >= 0xFFFF)) {
struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,
GFP_ATOMIC);
- return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
- -ENOBUFS;
+ err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
+ -ENOBUFS;
} else {
- return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
+ err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
}
+
+ if (likely(!err))
+ TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
+ return err;
}
int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
@@ -2899,7 +2924,12 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
space = __tcp_mtu_to_mss(sk, inet_csk(sk)->icsk_pmtu_cookie) -
MAX_TCP_OPTION_SPACE;
- syn_data = skb_copy_expand(syn, skb_headroom(syn), space,
+ space = min_t(size_t, space, fo->size);
+
+ /* limit to order-0 allocations */
+ space = min_t(size_t, space, SKB_MAX_HEAD(MAX_TCP_HEADER));
+
+ syn_data = skb_copy_expand(syn, MAX_TCP_HEADER, space,
sk->sk_allocation);
if (syn_data == NULL)
goto fallback;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 25f5cee3a08a..88b4023ecfcf 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -17,6 +17,8 @@
static DEFINE_SPINLOCK(udp_offload_lock);
static struct udp_offload_priv __rcu *udp_offload_base __read_mostly;
+#define udp_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&udp_offload_lock))
+
struct udp_offload_priv {
struct udp_offload *offload;
struct rcu_head rcu;
@@ -100,8 +102,7 @@ out:
int udp_add_offload(struct udp_offload *uo)
{
- struct udp_offload_priv __rcu **head = &udp_offload_base;
- struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_KERNEL);
+ struct udp_offload_priv *new_offload = kzalloc(sizeof(*new_offload), GFP_ATOMIC);
if (!new_offload)
return -ENOMEM;
@@ -109,8 +110,8 @@ int udp_add_offload(struct udp_offload *uo)
new_offload->offload = uo;
spin_lock(&udp_offload_lock);
- rcu_assign_pointer(new_offload->next, rcu_dereference(*head));
- rcu_assign_pointer(*head, new_offload);
+ new_offload->next = udp_offload_base;
+ rcu_assign_pointer(udp_offload_base, new_offload);
spin_unlock(&udp_offload_lock);
return 0;
@@ -130,12 +131,12 @@ void udp_del_offload(struct udp_offload *uo)
spin_lock(&udp_offload_lock);
- uo_priv = rcu_dereference(*head);
+ uo_priv = udp_deref_protected(*head);
for (; uo_priv != NULL;
- uo_priv = rcu_dereference(*head)) {
-
+ uo_priv = udp_deref_protected(*head)) {
if (uo_priv->offload == uo) {
- rcu_assign_pointer(*head, rcu_dereference(uo_priv->next));
+ rcu_assign_pointer(*head,
+ udp_deref_protected(uo_priv->next));
goto unlock;
}
head = &uo_priv->next;
diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig
index d92e5586783e..438a73aa777c 100644
--- a/net/ipv6/Kconfig
+++ b/net/ipv6/Kconfig
@@ -138,6 +138,7 @@ config INET6_XFRM_MODE_ROUTEOPTIMIZATION
config IPV6_VTI
tristate "Virtual (secure) IPv6: tunneling"
select IPV6_TUNNEL
+ select NET_IP_TUNNEL
depends on INET6_XFRM_MODE_TUNNEL
---help---
Tunneling means encapsulating data of one protocol type within
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ad235690684c..344e972426df 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1103,8 +1103,11 @@ retry:
* Lifetime is greater than REGEN_ADVANCE time units. In particular,
* an implementation must not create a temporary address with a zero
* Preferred Lifetime.
+ * Use age calculation as in addrconf_verify to avoid unnecessary
+ * temporary addresses being generated.
*/
- if (tmp_prefered_lft <= regen_advance) {
+ age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
+ if (tmp_prefered_lft <= regen_advance + age) {
in6_ifa_put(ifp);
in6_dev_put(idev);
ret = -1;
@@ -2783,6 +2786,8 @@ static void addrconf_gre_config(struct net_device *dev)
ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
if (!ipv6_generate_eui64(addr.s6_addr + 8, dev))
addrconf_add_linklocal(idev, &addr);
+ else
+ addrconf_prefix_route(&addr, 64, dev, 0, 0);
}
#endif
diff --git a/net/ipv6/exthdrs_core.c b/net/ipv6/exthdrs_core.c
index 140748debc4a..8af3eb57f438 100644
--- a/net/ipv6/exthdrs_core.c
+++ b/net/ipv6/exthdrs_core.c
@@ -212,7 +212,7 @@ int ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset,
found = (nexthdr == target);
if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) {
- if (target < 0)
+ if (target < 0 || found)
break;
return -ENOENT;
}
diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c
index cf77f3abfd06..447a7fbd1bb6 100644
--- a/net/ipv6/exthdrs_offload.c
+++ b/net/ipv6/exthdrs_offload.c
@@ -25,11 +25,11 @@ int __init ipv6_exthdrs_offload_init(void)
int ret;
ret = inet6_add_offload(&rthdr_offload, IPPROTO_ROUTING);
- if (!ret)
+ if (ret)
goto out;
ret = inet6_add_offload(&dstopt_offload, IPPROTO_DSTOPTS);
- if (!ret)
+ if (ret)
goto out_rt;
out:
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index f81f59686f21..f2610e157660 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -414,7 +414,7 @@ static void icmp6_send(struct sk_buff *skb, u8 type, u8 code, __u32 info)
addr_type = ipv6_addr_type(&hdr->daddr);
if (ipv6_chk_addr(net, &hdr->daddr, skb->dev, 0) ||
- ipv6_anycast_destination(skb))
+ ipv6_chk_acast_addr_src(net, skb->dev, &hdr->daddr))
saddr = &hdr->daddr;
/*
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 1e8683b135bb..59f95affceb0 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -89,7 +89,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
unsigned int unfrag_ip6hlen;
u8 *prevhdr;
int offset = 0;
- bool tunnel;
+ bool encap, udpfrag;
int nhoff;
if (unlikely(skb_shinfo(skb)->gso_type &
@@ -110,8 +110,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
goto out;
- tunnel = SKB_GSO_CB(skb)->encap_level > 0;
- if (tunnel)
+ encap = SKB_GSO_CB(skb)->encap_level > 0;
+ if (encap)
features = skb->dev->hw_enc_features & netif_skb_features(skb);
SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
@@ -121,6 +121,12 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
proto = ipv6_gso_pull_exthdrs(skb, ipv6h->nexthdr);
+ if (skb->encapsulation &&
+ skb_shinfo(skb)->gso_type & (SKB_GSO_SIT|SKB_GSO_IPIP))
+ udpfrag = proto == IPPROTO_UDP && encap;
+ else
+ udpfrag = proto == IPPROTO_UDP && !skb->encapsulation;
+
ops = rcu_dereference(inet6_offloads[proto]);
if (likely(ops && ops->callbacks.gso_segment)) {
skb_reset_transport_header(skb);
@@ -133,13 +139,9 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
for (skb = segs; skb; skb = skb->next) {
ipv6h = (struct ipv6hdr *)(skb_mac_header(skb) + nhoff);
ipv6h->payload_len = htons(skb->len - nhoff - sizeof(*ipv6h));
- if (tunnel) {
- skb_reset_inner_headers(skb);
- skb->encapsulation = 1;
- }
skb->network_header = (u8 *)ipv6h - skb->head;
- if (!tunnel && proto == IPPROTO_UDP) {
+ if (udpfrag) {
unfrag_ip6hlen = ip6_find_1stfragopt(skb, &prevhdr);
fptr = (struct frag_hdr *)((u8 *)ipv6h + unfrag_ip6hlen);
fptr->frag_off = htons(offset);
@@ -148,6 +150,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
offset += (ntohs(ipv6h->payload_len) -
sizeof(struct frag_hdr));
}
+ if (encap)
+ skb_reset_inner_headers(skb);
}
out:
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index ef02b26ccf81..64d6073731d3 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -342,6 +342,20 @@ static unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
return mtu;
}
+static bool ip6_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
+{
+ if (skb->len <= mtu || skb->local_df)
+ return false;
+
+ if (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)
+ return true;
+
+ if (skb_is_gso(skb) && skb_gso_network_seglen(skb) <= mtu)
+ return false;
+
+ return true;
+}
+
int ip6_forward(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
@@ -466,8 +480,7 @@ int ip6_forward(struct sk_buff *skb)
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
- if ((!skb->local_df && skb->len > mtu && !skb_is_gso(skb)) ||
- (IP6CB(skb)->frag_max_size && IP6CB(skb)->frag_max_size > mtu)) {
+ if (ip6_pkt_too_big(skb, mtu)) {
/* Again, force OUTPUT device used as source address */
skb->dev = dst->dev;
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
@@ -517,9 +530,6 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
to->tc_index = from->tc_index;
#endif
nf_copy(to, from);
-#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
- to->nf_trace = from->nf_trace;
-#endif
skb_copy_secmark(to, from);
}
@@ -1091,21 +1101,19 @@ static void ip6_append_data_mtu(unsigned int *mtu,
unsigned int fragheaderlen,
struct sk_buff *skb,
struct rt6_info *rt,
- bool pmtuprobe)
+ unsigned int orig_mtu)
{
if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
if (skb == NULL) {
/* first fragment, reserve header_len */
- *mtu = *mtu - rt->dst.header_len;
+ *mtu = orig_mtu - rt->dst.header_len;
} else {
/*
* this fragment is not first, the headers
* space is regarded as data space.
*/
- *mtu = min(*mtu, pmtuprobe ?
- rt->dst.dev->mtu :
- dst_mtu(rt->dst.path));
+ *mtu = orig_mtu;
}
*maxfraglen = ((*mtu - fragheaderlen) & ~7)
+ fragheaderlen - sizeof(struct frag_hdr);
@@ -1122,7 +1130,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
struct ipv6_pinfo *np = inet6_sk(sk);
struct inet_cork *cork;
struct sk_buff *skb, *skb_prev = NULL;
- unsigned int maxfraglen, fragheaderlen, mtu;
+ unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
int exthdrlen;
int dst_exthdrlen;
int hh_len;
@@ -1204,6 +1212,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
dst_exthdrlen = 0;
mtu = cork->fragsize;
}
+ orig_mtu = mtu;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
@@ -1301,8 +1310,7 @@ alloc_new_skb:
if (skb == NULL || skb_prev == NULL)
ip6_append_data_mtu(&mtu, &maxfraglen,
fragheaderlen, skb, rt,
- np->pmtudisc >=
- IPV6_PMTUDISC_PROBE);
+ orig_mtu);
skb_prev = skb;
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 0eb4038a4d63..8737400af0a0 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2349,13 +2349,14 @@ int ip6mr_get_route(struct net *net,
}
static int ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
- u32 portid, u32 seq, struct mfc6_cache *c, int cmd)
+ u32 portid, u32 seq, struct mfc6_cache *c, int cmd,
+ int flags)
{
struct nlmsghdr *nlh;
struct rtmsg *rtm;
int err;
- nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), NLM_F_MULTI);
+ nlh = nlmsg_put(skb, portid, seq, cmd, sizeof(*rtm), flags);
if (nlh == NULL)
return -EMSGSIZE;
@@ -2423,7 +2424,7 @@ static void mr6_netlink_event(struct mr6_table *mrt, struct mfc6_cache *mfc,
if (skb == NULL)
goto errout;
- err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd);
+ err = ip6mr_fill_mroute(mrt, skb, 0, 0, mfc, cmd, 0);
if (err < 0)
goto errout;
@@ -2462,7 +2463,8 @@ static int ip6mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
if (ip6mr_fill_mroute(mrt, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- mfc, RTM_NEWROUTE) < 0)
+ mfc, RTM_NEWROUTE,
+ NLM_F_MULTI) < 0)
goto done;
next_entry:
e++;
@@ -2476,7 +2478,8 @@ next_entry:
if (ip6mr_fill_mroute(mrt, skb,
NETLINK_CB(cb->skb).portid,
cb->nlh->nlmsg_seq,
- mfc, RTM_NEWROUTE) < 0) {
+ mfc, RTM_NEWROUTE,
+ NLM_F_MULTI) < 0) {
spin_unlock_bh(&mfc_unres_lock);
goto done;
}
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index 35750df744dc..4bff1f297e39 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -50,6 +50,11 @@ config NFT_CHAIN_NAT_IPV6
packet transformations such as the source, destination address and
source and destination ports.
+config NFT_REJECT_IPV6
+ depends on NF_TABLES_IPV6
+ default NFT_REJECT
+ tristate
+
config IP6_NF_IPTABLES
tristate "IP6 tables support (required for filtering)"
depends on INET && IPV6
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index d1b4928f34f7..70d3dd66f2cd 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
obj-$(CONFIG_NFT_CHAIN_NAT_IPV6) += nft_chain_nat_ipv6.o
+obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o
# matches
obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o
diff --git a/net/ipv6/netfilter/nft_reject_ipv6.c b/net/ipv6/netfilter/nft_reject_ipv6.c
new file mode 100644
index 000000000000..0bc19fa87821
--- /dev/null
+++ b/net/ipv6/netfilter/nft_reject_ipv6.c
@@ -0,0 +1,76 @@
+/*
+ * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2013 Eric Leblond <eric@regit.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Development of this code funded by Astaro AG (http://www.astaro.com/)
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nft_reject.h>
+#include <net/netfilter/ipv6/nf_reject.h>
+
+void nft_reject_ipv6_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ struct nft_reject *priv = nft_expr_priv(expr);
+ struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
+
+ switch (priv->type) {
+ case NFT_REJECT_ICMP_UNREACH:
+ nf_send_unreach6(net, pkt->skb, priv->icmp_code,
+ pkt->ops->hooknum);
+ break;
+ case NFT_REJECT_TCP_RST:
+ nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
+ break;
+ }
+
+ data[NFT_REG_VERDICT].verdict = NF_DROP;
+}
+EXPORT_SYMBOL_GPL(nft_reject_ipv6_eval);
+
+static struct nft_expr_type nft_reject_ipv6_type;
+static const struct nft_expr_ops nft_reject_ipv6_ops = {
+ .type = &nft_reject_ipv6_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
+ .eval = nft_reject_ipv6_eval,
+ .init = nft_reject_init,
+ .dump = nft_reject_dump,
+};
+
+static struct nft_expr_type nft_reject_ipv6_type __read_mostly = {
+ .family = NFPROTO_IPV6,
+ .name = "reject",
+ .ops = &nft_reject_ipv6_ops,
+ .policy = nft_reject_policy,
+ .maxattr = NFTA_REJECT_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_reject_ipv6_module_init(void)
+{
+ return nft_register_expr(&nft_reject_ipv6_type);
+}
+
+static void __exit nft_reject_ipv6_module_exit(void)
+{
+ nft_unregister_expr(&nft_reject_ipv6_type);
+}
+
+module_init(nft_reject_ipv6_module_init);
+module_exit(nft_reject_ipv6_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_INET6, "reject");
diff --git a/net/ipv6/ping.c b/net/ipv6/ping.c
index fb9beb78f00b..587bbdcb22b4 100644
--- a/net/ipv6/ping.c
+++ b/net/ipv6/ping.c
@@ -135,6 +135,7 @@ int ping_v6_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
fl6.flowi6_proto = IPPROTO_ICMPV6;
fl6.saddr = np->saddr;
fl6.daddr = *daddr;
+ fl6.flowi6_mark = sk->sk_mark;
fl6.fl6_icmp_type = user_icmph.icmp6_type;
fl6.fl6_icmp_code = user_icmph.icmp6_code;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 11dac21e6586..fba54a407bb2 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1513,7 +1513,7 @@ int ip6_route_add(struct fib6_config *cfg)
if (!table)
goto out;
- rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table);
+ rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table);
if (!rt) {
err = -ENOMEM;
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 3dfbcf1dcb1c..b4d74c86586c 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -475,6 +475,7 @@ static void ipip6_tunnel_uninit(struct net_device *dev)
ipip6_tunnel_unlink(sitn, tunnel);
ipip6_tunnel_del_prl(tunnel, NULL);
}
+ ip_tunnel_dst_reset_all(tunnel);
dev_put(dev);
}
@@ -1082,6 +1083,7 @@ static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p)
t->parms.link = p->link;
ipip6_tunnel_bind_dev(t->dev);
}
+ ip_tunnel_dst_reset_all(t);
netdev_state_change(t->dev);
}
@@ -1112,6 +1114,7 @@ static int ipip6_tunnel_update_6rd(struct ip_tunnel *t,
t->ip6rd.relay_prefix = relay_prefix;
t->ip6rd.prefixlen = ip6rd->prefixlen;
t->ip6rd.relay_prefixlen = ip6rd->relay_prefixlen;
+ ip_tunnel_dst_reset_all(t);
netdev_state_change(t->dev);
return 0;
}
@@ -1271,6 +1274,7 @@ ipip6_tunnel_ioctl (struct net_device *dev, struct ifreq *ifr, int cmd)
err = ipip6_tunnel_add_prl(t, &prl, cmd == SIOCCHGPRL);
break;
}
+ ip_tunnel_dst_reset_all(t);
netdev_state_change(dev);
break;
@@ -1326,6 +1330,9 @@ static const struct net_device_ops ipip6_netdev_ops = {
static void ipip6_dev_free(struct net_device *dev)
{
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+
+ free_percpu(tunnel->dst_cache);
free_percpu(dev->tstats);
free_netdev(dev);
}
@@ -1375,6 +1382,12 @@ static int ipip6_tunnel_init(struct net_device *dev)
u64_stats_init(&ipip6_tunnel_stats->syncp);
}
+ tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
+ if (!tunnel->dst_cache) {
+ free_percpu(dev->tstats);
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -1405,6 +1418,12 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
u64_stats_init(&ipip6_fb_stats->syncp);
}
+ tunnel->dst_cache = alloc_percpu(struct ip_tunnel_dst);
+ if (!tunnel->dst_cache) {
+ free_percpu(dev->tstats);
+ return -ENOMEM;
+ }
+
dev_hold(dev);
rcu_assign_pointer(sitn->tunnels_wc[0], tunnel);
return 0;
diff --git a/net/ipv6/udp_offload.c b/net/ipv6/udp_offload.c
index e7359f9eaa8d..b261ee8b83fc 100644
--- a/net/ipv6/udp_offload.c
+++ b/net/ipv6/udp_offload.c
@@ -113,7 +113,7 @@ static struct sk_buff *udp6_ufo_fragment(struct sk_buff *skb,
fptr = (struct frag_hdr *)(skb_network_header(skb) + unfrag_ip6hlen);
fptr->nexthdr = nexthdr;
fptr->reserved = 0;
- ipv6_select_ident(fptr, (struct rt6_info *)skb_dst(skb));
+ fptr->identification = skb_shinfo(skb)->ip6_frag_id;
/* Fragment the skb. ipv6 header and the remaining fields of the
* fragment header are updated in ipv6_gso_segment()
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 994e28bfb32e..00b2a6d1c009 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -52,18 +52,12 @@
#include <net/p8022.h>
#include <net/psnap.h>
#include <net/sock.h>
+#include <net/datalink.h>
#include <net/tcp_states.h>
+#include <net/net_namespace.h>
#include <asm/uaccess.h>
-#ifdef CONFIG_SYSCTL
-extern void ipx_register_sysctl(void);
-extern void ipx_unregister_sysctl(void);
-#else
-#define ipx_register_sysctl()
-#define ipx_unregister_sysctl()
-#endif
-
/* Configuration Variables */
static unsigned char ipxcfg_max_hops = 16;
static char ipxcfg_auto_select_primary;
@@ -84,15 +78,6 @@ DEFINE_SPINLOCK(ipx_interfaces_lock);
struct ipx_interface *ipx_primary_net;
struct ipx_interface *ipx_internal_net;
-extern int ipxrtr_add_route(__be32 network, struct ipx_interface *intrfc,
- unsigned char *node);
-extern void ipxrtr_del_routes(struct ipx_interface *intrfc);
-extern int ipxrtr_route_packet(struct sock *sk, struct sockaddr_ipx *usipx,
- struct iovec *iov, size_t len, int noblock);
-extern int ipxrtr_route_skb(struct sk_buff *skb);
-extern struct ipx_route *ipxrtr_lookup(__be32 net);
-extern int ipxrtr_ioctl(unsigned int cmd, void __user *arg);
-
struct ipx_interface *ipx_interfaces_head(void)
{
struct ipx_interface *rc = NULL;
@@ -1986,9 +1971,6 @@ static struct notifier_block ipx_dev_notifier = {
.notifier_call = ipxitf_device_event,
};
-extern struct datalink_proto *make_EII_client(void);
-extern void destroy_EII_client(struct datalink_proto *);
-
static const unsigned char ipx_8022_type = 0xE0;
static const unsigned char ipx_snap_id[5] = { 0x0, 0x0, 0x0, 0x81, 0x37 };
static const char ipx_EII_err_msg[] __initconst =
diff --git a/net/ipx/ipx_route.c b/net/ipx/ipx_route.c
index 30f4519b092f..c1f03185c5e1 100644
--- a/net/ipx/ipx_route.c
+++ b/net/ipx/ipx_route.c
@@ -20,15 +20,11 @@ DEFINE_RWLOCK(ipx_routes_lock);
extern struct ipx_interface *ipx_internal_net;
-extern __be16 ipx_cksum(struct ipxhdr *packet, int length);
extern struct ipx_interface *ipxitf_find_using_net(__be32 net);
extern int ipxitf_demux_socket(struct ipx_interface *intrfc,
struct sk_buff *skb, int copy);
extern int ipxitf_demux_socket(struct ipx_interface *intrfc,
struct sk_buff *skb, int copy);
-extern int ipxitf_send(struct ipx_interface *intrfc, struct sk_buff *skb,
- char *node);
-extern struct ipx_interface *ipxitf_find_using_net(__be32 net);
struct ipx_route *ipxrtr_lookup(__be32 net)
{
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 1a04c1329362..79326978517a 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -433,12 +433,13 @@ static inline int verify_sec_ctx_len(const void *p)
return 0;
}
-static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx)
+static inline struct xfrm_user_sec_ctx *pfkey_sadb2xfrm_user_sec_ctx(const struct sadb_x_sec_ctx *sec_ctx,
+ gfp_t gfp)
{
struct xfrm_user_sec_ctx *uctx = NULL;
int ctx_size = sec_ctx->sadb_x_ctx_len;
- uctx = kmalloc((sizeof(*uctx)+ctx_size), GFP_KERNEL);
+ uctx = kmalloc((sizeof(*uctx)+ctx_size), gfp);
if (!uctx)
return NULL;
@@ -1124,7 +1125,7 @@ static struct xfrm_state * pfkey_msg2xfrm_state(struct net *net,
sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
if (sec_ctx != NULL) {
- struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
+ struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL);
if (!uctx)
goto out;
@@ -2231,14 +2232,14 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_
sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
if (sec_ctx != NULL) {
- struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
+ struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL);
if (!uctx) {
err = -ENOBUFS;
goto out;
}
- err = security_xfrm_policy_alloc(&xp->security, uctx);
+ err = security_xfrm_policy_alloc(&xp->security, uctx, GFP_KERNEL);
kfree(uctx);
if (err)
@@ -2335,12 +2336,12 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa
sec_ctx = ext_hdrs[SADB_X_EXT_SEC_CTX - 1];
if (sec_ctx != NULL) {
- struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
+ struct xfrm_user_sec_ctx *uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_KERNEL);
if (!uctx)
return -ENOMEM;
- err = security_xfrm_policy_alloc(&pol_ctx, uctx);
+ err = security_xfrm_policy_alloc(&pol_ctx, uctx, GFP_KERNEL);
kfree(uctx);
if (err)
return err;
@@ -3239,8 +3240,8 @@ static struct xfrm_policy *pfkey_compile_policy(struct sock *sk, int opt,
}
if ((*dir = verify_sec_ctx_len(p)))
goto out;
- uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx);
- *dir = security_xfrm_policy_alloc(&xp->security, uctx);
+ uctx = pfkey_sadb2xfrm_user_sec_ctx(sec_ctx, GFP_ATOMIC);
+ *dir = security_xfrm_policy_alloc(&xp->security, uctx, GFP_ATOMIC);
kfree(uctx);
if (*dir)
diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c
index 735d0f60c83a..85d9d94c0a3c 100644
--- a/net/l2tp/l2tp_core.c
+++ b/net/l2tp/l2tp_core.c
@@ -112,7 +112,6 @@ struct l2tp_net {
spinlock_t l2tp_session_hlist_lock;
};
-static void l2tp_session_set_header_len(struct l2tp_session *session, int version);
static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel);
static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk)
@@ -1863,7 +1862,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_delete);
/* We come here whenever a session's send_seq, cookie_len or
* l2specific_len parameters are set.
*/
-static void l2tp_session_set_header_len(struct l2tp_session *session, int version)
+void l2tp_session_set_header_len(struct l2tp_session *session, int version)
{
if (version == L2TP_HDR_VER_2) {
session->hdr_len = 6;
@@ -1876,6 +1875,7 @@ static void l2tp_session_set_header_len(struct l2tp_session *session, int versio
}
}
+EXPORT_SYMBOL_GPL(l2tp_session_set_header_len);
struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg)
{
diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h
index 1f01ba3435bc..3f93ccd6ba97 100644
--- a/net/l2tp/l2tp_core.h
+++ b/net/l2tp/l2tp_core.h
@@ -263,6 +263,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,
int length, int (*payload_hook)(struct sk_buff *skb));
int l2tp_session_queue_purge(struct l2tp_session *session);
int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);
+void l2tp_session_set_header_len(struct l2tp_session *session, int version);
int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb,
int hdr_len);
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 4cfd722e9153..bd7387adea9e 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -578,8 +578,10 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf
if (info->attrs[L2TP_ATTR_RECV_SEQ])
session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]);
- if (info->attrs[L2TP_ATTR_SEND_SEQ])
+ if (info->attrs[L2TP_ATTR_SEND_SEQ]) {
session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]);
+ l2tp_session_set_header_len(session, session->tunnel->version);
+ }
if (info->attrs[L2TP_ATTR_LNS_MODE])
session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]);
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index be5fadf34739..5990919356a5 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -254,12 +254,14 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int
po = pppox_sk(sk);
ppp_input(&po->chan, skb);
} else {
- l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: socket not bound\n",
- session->name);
+ l2tp_dbg(session, PPPOL2TP_MSG_DATA,
+ "%s: recv %d byte data frame, passing to L2TP socket\n",
+ session->name, data_len);
- /* Not bound. Nothing we can do, so discard. */
- atomic_long_inc(&session->stats.rx_errors);
- kfree_skb(skb);
+ if (sock_queue_rcv_skb(sk, skb) < 0) {
+ atomic_long_inc(&session->stats.rx_errors);
+ kfree_skb(skb);
+ }
}
return;
@@ -1312,6 +1314,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk,
po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ :
PPPOL2TP_L2TP_HDR_SIZE_NOSEQ;
}
+ l2tp_session_set_header_len(session, session->tunnel->version);
l2tp_info(session, PPPOL2TP_MSG_CONTROL,
"%s: set send_seq=%d\n",
session->name, session->send_seq);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index f9ae9b85d4c1..453e974287d1 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1021,8 +1021,10 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
IEEE80211_P2P_OPPPS_ENABLE_BIT;
err = ieee80211_assign_beacon(sdata, &params->beacon);
- if (err < 0)
+ if (err < 0) {
+ ieee80211_vif_release_channel(sdata);
return err;
+ }
changed |= err;
err = drv_start_ap(sdata->local, sdata);
@@ -1032,6 +1034,7 @@ static int ieee80211_start_ap(struct wiphy *wiphy, struct net_device *dev,
if (old)
kfree_rcu(old, rcu_head);
RCU_INIT_POINTER(sdata->u.ap.beacon, NULL);
+ ieee80211_vif_release_channel(sdata);
return err;
}
@@ -1090,8 +1093,6 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
kfree(sdata->u.ap.next_beacon);
sdata->u.ap.next_beacon = NULL;
- cancel_work_sync(&sdata->u.ap.request_smps_work);
-
/* turn off carrier for this interface and dependent VLANs */
list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
netif_carrier_off(vlan->dev);
@@ -1103,6 +1104,7 @@ static int ieee80211_stop_ap(struct wiphy *wiphy, struct net_device *dev)
kfree_rcu(old_beacon, rcu_head);
if (old_probe_resp)
kfree_rcu(old_probe_resp, rcu_head);
+ sdata->u.ap.driver_smps_mode = IEEE80211_SMPS_OFF;
__sta_info_flush(sdata, true);
ieee80211_free_keys(sdata, true);
@@ -2638,6 +2640,24 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
INIT_DELAYED_WORK(&roc->work, ieee80211_sw_roc_work);
INIT_LIST_HEAD(&roc->dependents);
+ /*
+ * cookie is either the roc cookie (for normal roc)
+ * or the SKB (for mgmt TX)
+ */
+ if (!txskb) {
+ /* local->mtx protects this */
+ local->roc_cookie_counter++;
+ roc->cookie = local->roc_cookie_counter;
+ /* wow, you wrapped 64 bits ... more likely a bug */
+ if (WARN_ON(roc->cookie == 0)) {
+ roc->cookie = 1;
+ local->roc_cookie_counter++;
+ }
+ *cookie = roc->cookie;
+ } else {
+ *cookie = (unsigned long)txskb;
+ }
+
/* if there's one pending or we're scanning, queue this one */
if (!list_empty(&local->roc_list) ||
local->scanning || local->radar_detect_enabled)
@@ -2772,24 +2792,6 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
if (!queued)
list_add_tail(&roc->list, &local->roc_list);
- /*
- * cookie is either the roc cookie (for normal roc)
- * or the SKB (for mgmt TX)
- */
- if (!txskb) {
- /* local->mtx protects this */
- local->roc_cookie_counter++;
- roc->cookie = local->roc_cookie_counter;
- /* wow, you wrapped 64 bits ... more likely a bug */
- if (WARN_ON(roc->cookie == 0)) {
- roc->cookie = 1;
- local->roc_cookie_counter++;
- }
- *cookie = roc->cookie;
- } else {
- *cookie = (unsigned long)txskb;
- }
-
return 0;
}
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index f43613a97dd6..0c1ecfdf9a12 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -100,6 +100,12 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local,
}
max_bw = max(max_bw, width);
}
+
+ /* use the configured bandwidth in case of monitor interface */
+ sdata = rcu_dereference(local->monitor_sdata);
+ if (sdata && rcu_access_pointer(sdata->vif.chanctx_conf) == conf)
+ max_bw = max(max_bw, conf->def.width);
+
rcu_read_unlock();
return max_bw;
diff --git a/net/mac80211/ht.c b/net/mac80211/ht.c
index fab7b91923e0..70dd013de836 100644
--- a/net/mac80211/ht.c
+++ b/net/mac80211/ht.c
@@ -466,7 +466,9 @@ void ieee80211_request_smps_ap_work(struct work_struct *work)
u.ap.request_smps_work);
sdata_lock(sdata);
- __ieee80211_request_smps_ap(sdata, sdata->u.ap.driver_smps_mode);
+ if (sdata_dereference(sdata->u.ap.beacon, sdata))
+ __ieee80211_request_smps_ap(sdata,
+ sdata->u.ap.driver_smps_mode);
sdata_unlock(sdata);
}
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 771080ec7212..2796a198728f 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -695,12 +695,9 @@ static void ieee80211_ibss_disconnect(struct ieee80211_sub_if_data *sdata)
struct cfg80211_bss *cbss;
struct beacon_data *presp;
struct sta_info *sta;
- int active_ibss;
u16 capability;
- active_ibss = ieee80211_sta_active_ibss(sdata);
-
- if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) {
+ if (!is_zero_ether_addr(ifibss->bssid)) {
capability = WLAN_CAPABILITY_IBSS;
if (ifibss->privacy)
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 3701930c6649..5e44e3179e02 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1692,14 +1692,8 @@ void ieee80211_stop_queue_by_reason(struct ieee80211_hw *hw, int queue,
void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue);
void ieee80211_add_pending_skb(struct ieee80211_local *local,
struct sk_buff *skb);
-void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
- struct sk_buff_head *skbs,
- void (*fn)(void *data), void *data);
-static inline void ieee80211_add_pending_skbs(struct ieee80211_local *local,
- struct sk_buff_head *skbs)
-{
- ieee80211_add_pending_skbs_fn(local, skbs, NULL, NULL);
-}
+void ieee80211_add_pending_skbs(struct ieee80211_local *local,
+ struct sk_buff_head *skbs);
void ieee80211_flush_queues(struct ieee80211_local *local,
struct ieee80211_sub_if_data *sdata);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 3dfd20a453ab..ce1c44370610 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -418,20 +418,24 @@ int ieee80211_add_virtual_monitor(struct ieee80211_local *local)
return ret;
}
+ mutex_lock(&local->iflist_mtx);
+ rcu_assign_pointer(local->monitor_sdata, sdata);
+ mutex_unlock(&local->iflist_mtx);
+
mutex_lock(&local->mtx);
ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef,
IEEE80211_CHANCTX_EXCLUSIVE);
mutex_unlock(&local->mtx);
if (ret) {
+ mutex_lock(&local->iflist_mtx);
+ rcu_assign_pointer(local->monitor_sdata, NULL);
+ mutex_unlock(&local->iflist_mtx);
+ synchronize_net();
drv_remove_interface(local, sdata);
kfree(sdata);
return ret;
}
- mutex_lock(&local->iflist_mtx);
- rcu_assign_pointer(local->monitor_sdata, sdata);
- mutex_unlock(&local->iflist_mtx);
-
return 0;
}
@@ -770,12 +774,19 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
ieee80211_roc_purge(local, sdata);
- if (sdata->vif.type == NL80211_IFTYPE_STATION)
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_STATION:
ieee80211_mgd_stop(sdata);
-
- if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
+ break;
+ case NL80211_IFTYPE_ADHOC:
ieee80211_ibss_stop(sdata);
-
+ break;
+ case NL80211_IFTYPE_AP:
+ cancel_work_sync(&sdata->u.ap.request_smps_work);
+ break;
+ default:
+ break;
+ }
/*
* Remove all stations associated with this interface.
@@ -1046,7 +1057,8 @@ static void ieee80211_uninit(struct net_device *dev)
static u16 ieee80211_netdev_select_queue(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv,
+ select_queue_fallback_t fallback)
{
return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb);
}
@@ -1064,7 +1076,8 @@ static const struct net_device_ops ieee80211_dataif_ops = {
static u16 ieee80211_monitor_select_queue(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv)
+ void *accel_priv,
+ select_queue_fallback_t fallback)
{
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = sdata->local;
diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c
index 2802f9d9279d..ad8b377b4b9f 100644
--- a/net/mac80211/mesh_ps.c
+++ b/net/mac80211/mesh_ps.c
@@ -36,6 +36,7 @@ static struct sk_buff *mps_qos_null_get(struct sta_info *sta)
sdata->vif.addr);
nullfunc->frame_control = fc;
nullfunc->duration_id = 0;
+ nullfunc->seq_ctrl = 0;
/* no address resolution for this frame -> set addr 1 immediately */
memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
memset(skb_put(skb, 2), 0, 2); /* append QoS control field */
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index fc1d82465b3c..245dce969b31 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -222,6 +222,7 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
switch (vht_oper->chan_width) {
case IEEE80211_VHT_CHANWIDTH_USE_HT:
vht_chandef.width = chandef->width;
+ vht_chandef.center_freq1 = chandef->center_freq1;
break;
case IEEE80211_VHT_CHANWIDTH_80MHZ:
vht_chandef.width = NL80211_CHAN_WIDTH_80;
@@ -271,6 +272,28 @@ ieee80211_determine_chantype(struct ieee80211_sub_if_data *sdata,
ret = 0;
out:
+ /*
+ * When tracking the current AP, don't do any further checks if the
+ * new chandef is identical to the one we're currently using for the
+ * connection. This keeps us from playing ping-pong with regulatory,
+ * without it the following can happen (for example):
+ * - connect to an AP with 80 MHz, world regdom allows 80 MHz
+ * - AP advertises regdom US
+ * - CRDA loads regdom US with 80 MHz prohibited (old database)
+ * - the code below detects an unsupported channel, downgrades, and
+ * we disconnect from the AP in the caller
+ * - disconnect causes CRDA to reload world regdomain and the game
+ * starts anew.
+ * (see https://bugzilla.kernel.org/show_bug.cgi?id=70881)
+ *
+ * It seems possible that there are still scenarios with CSA or real
+ * bandwidth changes where a this could happen, but those cases are
+ * less common and wouldn't completely prevent using the AP.
+ */
+ if (tracking &&
+ cfg80211_chandef_identical(chandef, &sdata->vif.bss_conf.chandef))
+ return ret;
+
/* don't print the message below for VHT mismatch if VHT is disabled */
if (ret & IEEE80211_STA_DISABLE_VHT)
vht_chandef = *chandef;
@@ -3753,6 +3776,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
if (WARN_ON(!chanctx_conf)) {
rcu_read_unlock();
+ sta_info_free(local, new_sta);
return -EINVAL;
}
rate_flags = ieee80211_chandef_rate_flags(&chanctx_conf->def);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index c24ca0d0f469..3e57f96c9666 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1128,6 +1128,13 @@ static void sta_ps_end(struct sta_info *sta)
sta->sta.addr, sta->sta.aid);
if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
+ /*
+ * Clear the flag only if the other one is still set
+ * so that the TX path won't start TX'ing new frames
+ * directly ... In the case that the driver flag isn't
+ * set ieee80211_sta_ps_deliver_wakeup() will clear it.
+ */
+ clear_sta_flag(sta, WLAN_STA_PS_STA);
ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
sta->sta.addr, sta->sta.aid);
return;
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index decd30c1e290..137a192e64bc 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -91,7 +91,7 @@ static int sta_info_hash_del(struct ieee80211_local *local,
return -ENOENT;
}
-static void cleanup_single_sta(struct sta_info *sta)
+static void __cleanup_single_sta(struct sta_info *sta)
{
int ac, i;
struct tid_ampdu_tx *tid_tx;
@@ -99,7 +99,8 @@ static void cleanup_single_sta(struct sta_info *sta)
struct ieee80211_local *local = sdata->local;
struct ps_data *ps;
- if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
+ if (test_sta_flag(sta, WLAN_STA_PS_STA) ||
+ test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
ps = &sdata->bss->ps;
@@ -109,6 +110,7 @@ static void cleanup_single_sta(struct sta_info *sta)
return;
clear_sta_flag(sta, WLAN_STA_PS_STA);
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
atomic_dec(&ps->num_sta_ps);
sta_info_recalc_tim(sta);
@@ -139,7 +141,14 @@ static void cleanup_single_sta(struct sta_info *sta)
ieee80211_purge_tx_queue(&local->hw, &tid_tx->pending);
kfree(tid_tx);
}
+}
+static void cleanup_single_sta(struct sta_info *sta)
+{
+ struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_local *local = sdata->local;
+
+ __cleanup_single_sta(sta);
sta_info_free(local, sta);
}
@@ -330,6 +339,7 @@ struct sta_info *sta_info_alloc(struct ieee80211_sub_if_data *sdata,
rcu_read_unlock();
spin_lock_init(&sta->lock);
+ spin_lock_init(&sta->ps_lock);
INIT_WORK(&sta->drv_unblock_wk, sta_unblock);
INIT_WORK(&sta->ampdu_mlme.work, ieee80211_ba_session_work);
mutex_init(&sta->ampdu_mlme.mtx);
@@ -487,21 +497,26 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
goto out_err;
}
- /* notify driver */
- err = sta_info_insert_drv_state(local, sdata, sta);
- if (err)
- goto out_err;
-
local->num_sta++;
local->sta_generation++;
smp_mb();
+ /* simplify things and don't accept BA sessions yet */
+ set_sta_flag(sta, WLAN_STA_BLOCK_BA);
+
/* make the station visible */
sta_info_hash_add(local, sta);
list_add_rcu(&sta->list, &local->sta_list);
+ /* notify driver */
+ err = sta_info_insert_drv_state(local, sdata, sta);
+ if (err)
+ goto out_remove;
+
set_sta_flag(sta, WLAN_STA_INSERTED);
+ /* accept BA sessions now */
+ clear_sta_flag(sta, WLAN_STA_BLOCK_BA);
ieee80211_recalc_min_chandef(sdata);
ieee80211_sta_debugfs_add(sta);
@@ -522,6 +537,12 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
mesh_accept_plinks_update(sdata);
return 0;
+ out_remove:
+ sta_info_hash_del(local, sta);
+ list_del_rcu(&sta->list);
+ local->num_sta--;
+ synchronize_net();
+ __cleanup_single_sta(sta);
out_err:
mutex_unlock(&local->sta_mtx);
rcu_read_lock();
@@ -1071,10 +1092,14 @@ struct ieee80211_sta *ieee80211_find_sta(struct ieee80211_vif *vif,
}
EXPORT_SYMBOL(ieee80211_find_sta);
-static void clear_sta_ps_flags(void *_sta)
+/* powersave support code */
+void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
{
- struct sta_info *sta = _sta;
struct ieee80211_sub_if_data *sdata = sta->sdata;
+ struct ieee80211_local *local = sdata->local;
+ struct sk_buff_head pending;
+ int filtered = 0, buffered = 0, ac;
+ unsigned long flags;
struct ps_data *ps;
if (sdata->vif.type == NL80211_IFTYPE_AP ||
@@ -1085,20 +1110,6 @@ static void clear_sta_ps_flags(void *_sta)
else
return;
- clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
- if (test_and_clear_sta_flag(sta, WLAN_STA_PS_STA))
- atomic_dec(&ps->num_sta_ps);
-}
-
-/* powersave support code */
-void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
-{
- struct ieee80211_sub_if_data *sdata = sta->sdata;
- struct ieee80211_local *local = sdata->local;
- struct sk_buff_head pending;
- int filtered = 0, buffered = 0, ac;
- unsigned long flags;
-
clear_sta_flag(sta, WLAN_STA_SP);
BUILD_BUG_ON(BITS_TO_LONGS(IEEE80211_NUM_TIDS) > 1);
@@ -1109,6 +1120,8 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
skb_queue_head_init(&pending);
+ /* sync with ieee80211_tx_h_unicast_ps_buf */
+ spin_lock(&sta->ps_lock);
/* Send all buffered frames to the station */
for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
int count = skb_queue_len(&pending), tmp;
@@ -1127,7 +1140,12 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta)
buffered += tmp - count;
}
- ieee80211_add_pending_skbs_fn(local, &pending, clear_sta_ps_flags, sta);
+ ieee80211_add_pending_skbs(local, &pending);
+ clear_sta_flag(sta, WLAN_STA_PS_DRIVER);
+ clear_sta_flag(sta, WLAN_STA_PS_STA);
+ spin_unlock(&sta->ps_lock);
+
+ atomic_dec(&ps->num_sta_ps);
/* This station just woke up and isn't aware of our SMPS state */
if (!ieee80211_smps_is_restrictive(sta->known_smps_mode,
@@ -1188,6 +1206,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata,
memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN);
memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN);
memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN);
+ nullfunc->seq_ctrl = 0;
skb->priority = tid;
skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]);
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index d77ff7090630..d3a6d8208f2f 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -267,6 +267,7 @@ struct ieee80211_tx_latency_stat {
* @drv_unblock_wk: used for driver PS unblocking
* @listen_interval: listen interval of this station, when we're acting as AP
* @_flags: STA flags, see &enum ieee80211_sta_info_flags, do not use directly
+ * @ps_lock: used for powersave (when mac80211 is the AP) related locking
* @ps_tx_buf: buffers (per AC) of frames to transmit to this station
* when it leaves power saving state or polls
* @tx_filtered: buffers (per AC) of frames we already tried to
@@ -356,10 +357,8 @@ struct sta_info {
/* use the accessors defined below */
unsigned long _flags;
- /*
- * STA powersave frame queues, no more than the internal
- * locking required.
- */
+ /* STA powersave lock and frame queues */
+ spinlock_t ps_lock;
struct sk_buff_head ps_tx_buf[IEEE80211_NUM_ACS];
struct sk_buff_head tx_filtered[IEEE80211_NUM_ACS];
unsigned long driver_buffered_tids;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 27c990bf2320..4080c615636f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -478,6 +478,20 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
sta->sta.addr, sta->sta.aid, ac);
if (tx->local->total_ps_buffered >= TOTAL_MAX_TX_BUFFER)
purge_old_ps_buffers(tx->local);
+
+ /* sync with ieee80211_sta_ps_deliver_wakeup */
+ spin_lock(&sta->ps_lock);
+ /*
+ * STA woke up the meantime and all the frames on ps_tx_buf have
+ * been queued to pending queue. No reordering can happen, go
+ * ahead and Tx the packet.
+ */
+ if (!test_sta_flag(sta, WLAN_STA_PS_STA) &&
+ !test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
+ spin_unlock(&sta->ps_lock);
+ return TX_CONTINUE;
+ }
+
if (skb_queue_len(&sta->ps_tx_buf[ac]) >= STA_MAX_TX_BUFFER) {
struct sk_buff *old = skb_dequeue(&sta->ps_tx_buf[ac]);
ps_dbg(tx->sdata,
@@ -492,6 +506,7 @@ ieee80211_tx_h_unicast_ps_buf(struct ieee80211_tx_data *tx)
info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS;
skb_queue_tail(&sta->ps_tx_buf[ac], tx->skb);
+ spin_unlock(&sta->ps_lock);
if (!timer_pending(&local->sta_cleanup))
mod_timer(&local->sta_cleanup,
@@ -878,7 +893,7 @@ static int ieee80211_fragment(struct ieee80211_tx_data *tx,
}
/* adjust first fragment's length */
- skb->len = hdrlen + per_fragm;
+ skb_trim(skb, hdrlen + per_fragm);
return 0;
}
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 676dc0967f37..b8700d417a9c 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -435,9 +435,8 @@ void ieee80211_add_pending_skb(struct ieee80211_local *local,
spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
}
-void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
- struct sk_buff_head *skbs,
- void (*fn)(void *data), void *data)
+void ieee80211_add_pending_skbs(struct ieee80211_local *local,
+ struct sk_buff_head *skbs)
{
struct ieee80211_hw *hw = &local->hw;
struct sk_buff *skb;
@@ -461,9 +460,6 @@ void ieee80211_add_pending_skbs_fn(struct ieee80211_local *local,
__skb_queue_tail(&local->pending[queue], skb);
}
- if (fn)
- fn(data);
-
for (i = 0; i < hw->queues; i++)
__ieee80211_wake_queue(hw, i,
IEEE80211_QUEUE_STOP_REASON_SKB_ADD);
@@ -1741,6 +1737,26 @@ int ieee80211_reconfig(struct ieee80211_local *local)
IEEE80211_QUEUE_STOP_REASON_SUSPEND);
/*
+ * Reconfigure sched scan if it was interrupted by FW restart or
+ * suspend.
+ */
+ mutex_lock(&local->mtx);
+ sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata,
+ lockdep_is_held(&local->mtx));
+ if (sched_scan_sdata && local->sched_scan_req)
+ /*
+ * Sched scan stopped, but we don't want to report it. Instead,
+ * we're trying to reschedule.
+ */
+ if (__ieee80211_request_sched_scan_start(sched_scan_sdata,
+ local->sched_scan_req))
+ sched_scan_stopped = true;
+ mutex_unlock(&local->mtx);
+
+ if (sched_scan_stopped)
+ cfg80211_sched_scan_stopped(local->hw.wiphy);
+
+ /*
* If this is for hw restart things are still running.
* We may want to change that later, however.
*/
@@ -1768,26 +1784,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
WARN_ON(1);
#endif
- /*
- * Reconfigure sched scan if it was interrupted by FW restart or
- * suspend.
- */
- mutex_lock(&local->mtx);
- sched_scan_sdata = rcu_dereference_protected(local->sched_scan_sdata,
- lockdep_is_held(&local->mtx));
- if (sched_scan_sdata && local->sched_scan_req)
- /*
- * Sched scan stopped, but we don't want to report it. Instead,
- * we're trying to reschedule.
- */
- if (__ieee80211_request_sched_scan_start(sched_scan_sdata,
- local->sched_scan_req))
- sched_scan_stopped = true;
- mutex_unlock(&local->mtx);
-
- if (sched_scan_stopped)
- cfg80211_sched_scan_stopped(local->hw.wiphy);
-
return 0;
}
diff --git a/net/mac80211/wme.c b/net/mac80211/wme.c
index 21211c60ca98..d51422c778de 100644
--- a/net/mac80211/wme.c
+++ b/net/mac80211/wme.c
@@ -154,6 +154,11 @@ u16 ieee80211_select_queue(struct ieee80211_sub_if_data *sdata,
return IEEE80211_AC_BE;
}
+ if (skb->protocol == sdata->control_port_protocol) {
+ skb->priority = 7;
+ return ieee80211_downgrade_queue(sdata, skb);
+ }
+
/* use the data classifier to determine what 802.1d tag the
* data frame has */
rcu_read_lock();
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index c37467562fd0..e9410d17619d 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -513,7 +513,6 @@ config NFT_QUEUE
config NFT_REJECT
depends on NF_TABLES
- depends on NF_TABLES_IPV6 || !NF_TABLES_IPV6
default m if NETFILTER_ADVANCED=n
tristate "Netfilter nf_tables reject support"
help
@@ -521,6 +520,11 @@ config NFT_REJECT
explicitly deny and notify via TCP reset/ICMP informational errors
unallowed traffic.
+config NFT_REJECT_INET
+ depends on NF_TABLES_INET
+ default NFT_REJECT
+ tristate
+
config NFT_COMPAT
depends on NF_TABLES
depends on NETFILTER_XTABLES
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index ee9c4de5f8ed..bffdad774da7 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -79,6 +79,7 @@ obj-$(CONFIG_NFT_LIMIT) += nft_limit.o
obj-$(CONFIG_NFT_NAT) += nft_nat.o
obj-$(CONFIG_NFT_QUEUE) += nft_queue.o
obj-$(CONFIG_NFT_REJECT) += nft_reject.o
+obj-$(CONFIG_NFT_REJECT_INET) += nft_reject_inet.o
obj-$(CONFIG_NFT_RBTREE) += nft_rbtree.o
obj-$(CONFIG_NFT_HASH) += nft_hash.o
obj-$(CONFIG_NFT_COUNTER) += nft_counter.o
diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
index 59a1a85bcb3e..a8eb0a89326a 100644
--- a/net/netfilter/ipvs/ip_vs_conn.c
+++ b/net/netfilter/ipvs/ip_vs_conn.c
@@ -871,11 +871,11 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
cp->protocol = p->protocol;
ip_vs_addr_set(p->af, &cp->caddr, p->caddr);
cp->cport = p->cport;
- ip_vs_addr_set(p->af, &cp->vaddr, p->vaddr);
- cp->vport = p->vport;
- /* proto should only be IPPROTO_IP if d_addr is a fwmark */
+ /* proto should only be IPPROTO_IP if p->vaddr is a fwmark */
ip_vs_addr_set(p->protocol == IPPROTO_IP ? AF_UNSPEC : p->af,
- &cp->daddr, daddr);
+ &cp->vaddr, p->vaddr);
+ cp->vport = p->vport;
+ ip_vs_addr_set(p->af, &cp->daddr, daddr);
cp->dport = dport;
cp->flags = flags;
cp->fwmark = fwmark;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 8824ed0ccc9c..356bef519fe5 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -312,6 +312,21 @@ static void death_by_timeout(unsigned long ul_conntrack)
nf_ct_delete((struct nf_conn *)ul_conntrack, 0, 0);
}
+static inline bool
+nf_ct_key_equal(struct nf_conntrack_tuple_hash *h,
+ const struct nf_conntrack_tuple *tuple,
+ u16 zone)
+{
+ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+
+ /* A conntrack can be recreated with the equal tuple,
+ * so we need to check that the conntrack is confirmed
+ */
+ return nf_ct_tuple_equal(tuple, &h->tuple) &&
+ nf_ct_zone(ct) == zone &&
+ nf_ct_is_confirmed(ct);
+}
+
/*
* Warning :
* - Caller must take a reference on returned object
@@ -333,8 +348,7 @@ ____nf_conntrack_find(struct net *net, u16 zone,
local_bh_disable();
begin:
hlist_nulls_for_each_entry_rcu(h, n, &net->ct.hash[bucket], hnnode) {
- if (nf_ct_tuple_equal(tuple, &h->tuple) &&
- nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)) == zone) {
+ if (nf_ct_key_equal(h, tuple, zone)) {
NF_CT_STAT_INC(net, found);
local_bh_enable();
return h;
@@ -372,8 +386,7 @@ begin:
!atomic_inc_not_zero(&ct->ct_general.use)))
h = NULL;
else {
- if (unlikely(!nf_ct_tuple_equal(tuple, &h->tuple) ||
- nf_ct_zone(ct) != zone)) {
+ if (unlikely(!nf_ct_key_equal(h, tuple, zone))) {
nf_ct_put(ct);
goto begin;
}
@@ -435,7 +448,9 @@ nf_conntrack_hash_check_insert(struct nf_conn *ct)
goto out;
add_timer(&ct->timeout);
- nf_conntrack_get(&ct->ct_general);
+ smp_wmb();
+ /* The caller holds a reference to this object */
+ atomic_set(&ct->ct_general.use, 2);
__nf_conntrack_hash_insert(ct, hash, repl_hash);
NF_CT_STAT_INC(net, insert);
spin_unlock_bh(&nf_conntrack_lock);
@@ -449,6 +464,21 @@ out:
}
EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
+/* deletion from this larval template list happens via nf_ct_put() */
+void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl)
+{
+ __set_bit(IPS_TEMPLATE_BIT, &tmpl->status);
+ __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
+ nf_conntrack_get(&tmpl->ct_general);
+
+ spin_lock_bh(&nf_conntrack_lock);
+ /* Overload tuple linked list to put us in template list. */
+ hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
+ &net->ct.tmpl);
+ spin_unlock_bh(&nf_conntrack_lock);
+}
+EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
+
/* Confirm a connection given skb; places it in hash table */
int
__nf_conntrack_confirm(struct sk_buff *skb)
@@ -720,11 +750,10 @@ __nf_conntrack_alloc(struct net *net, u16 zone,
nf_ct_zone->id = zone;
}
#endif
- /*
- * changes to lookup keys must be done before setting refcnt to 1
+ /* Because we use RCU lookups, we set ct_general.use to zero before
+ * this is inserted in any list.
*/
- smp_wmb();
- atomic_set(&ct->ct_general.use, 1);
+ atomic_set(&ct->ct_general.use, 0);
return ct;
#ifdef CONFIG_NF_CONNTRACK_ZONES
@@ -748,6 +777,11 @@ void nf_conntrack_free(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
+ /* A freed object has refcnt == 0, that's
+ * the golden rule for SLAB_DESTROY_BY_RCU
+ */
+ NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 0);
+
nf_ct_ext_destroy(ct);
nf_ct_ext_free(ct);
kmem_cache_free(net->ct.nf_conntrack_cachep, ct);
@@ -843,6 +877,9 @@ init_conntrack(struct net *net, struct nf_conn *tmpl,
NF_CT_STAT_INC(net, new);
}
+ /* Now it is inserted into the unconfirmed list, bump refcount */
+ nf_conntrack_get(&ct->ct_general);
+
/* Overload tuple linked list to put us in unconfirmed list. */
hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
&net->ct.unconfirmed);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index bb322d0beb48..b9f0e0374322 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -1310,27 +1310,22 @@ ctnetlink_change_status(struct nf_conn *ct, const struct nlattr * const cda[])
}
static int
-ctnetlink_change_nat(struct nf_conn *ct, const struct nlattr * const cda[])
+ctnetlink_setup_nat(struct nf_conn *ct, const struct nlattr * const cda[])
{
#ifdef CONFIG_NF_NAT_NEEDED
int ret;
- if (cda[CTA_NAT_DST]) {
- ret = ctnetlink_parse_nat_setup(ct,
- NF_NAT_MANIP_DST,
- cda[CTA_NAT_DST]);
- if (ret < 0)
- return ret;
- }
- if (cda[CTA_NAT_SRC]) {
- ret = ctnetlink_parse_nat_setup(ct,
- NF_NAT_MANIP_SRC,
- cda[CTA_NAT_SRC]);
- if (ret < 0)
- return ret;
- }
- return 0;
+ ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_DST,
+ cda[CTA_NAT_DST]);
+ if (ret < 0)
+ return ret;
+
+ ret = ctnetlink_parse_nat_setup(ct, NF_NAT_MANIP_SRC,
+ cda[CTA_NAT_SRC]);
+ return ret;
#else
+ if (!cda[CTA_NAT_DST] && !cda[CTA_NAT_SRC])
+ return 0;
return -EOPNOTSUPP;
#endif
}
@@ -1659,11 +1654,9 @@ ctnetlink_create_conntrack(struct net *net, u16 zone,
goto err2;
}
- if (cda[CTA_NAT_SRC] || cda[CTA_NAT_DST]) {
- err = ctnetlink_change_nat(ct, cda);
- if (err < 0)
- goto err2;
- }
+ err = ctnetlink_setup_nat(ct, cda);
+ if (err < 0)
+ goto err2;
nf_ct_acct_ext_add(ct, GFP_ATOMIC);
nf_ct_tstamp_ext_add(ct, GFP_ATOMIC);
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index d3f5cd6dd962..52ca952b802c 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -432,15 +432,15 @@ nf_nat_setup_info(struct nf_conn *ct,
}
EXPORT_SYMBOL(nf_nat_setup_info);
-unsigned int
-nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
+static unsigned int
+__nf_nat_alloc_null_binding(struct nf_conn *ct, enum nf_nat_manip_type manip)
{
/* Force range to this IP; let proto decide mapping for
* per-proto parts (hence not IP_NAT_RANGE_PROTO_SPECIFIED).
* Use reply in case it's already been mangled (eg local packet).
*/
union nf_inet_addr ip =
- (HOOK2MANIP(hooknum) == NF_NAT_MANIP_SRC ?
+ (manip == NF_NAT_MANIP_SRC ?
ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3 :
ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3);
struct nf_nat_range range = {
@@ -448,7 +448,13 @@ nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
.min_addr = ip,
.max_addr = ip,
};
- return nf_nat_setup_info(ct, &range, HOOK2MANIP(hooknum));
+ return nf_nat_setup_info(ct, &range, manip);
+}
+
+unsigned int
+nf_nat_alloc_null_binding(struct nf_conn *ct, unsigned int hooknum)
+{
+ return __nf_nat_alloc_null_binding(ct, HOOK2MANIP(hooknum));
}
EXPORT_SYMBOL_GPL(nf_nat_alloc_null_binding);
@@ -702,9 +708,9 @@ static const struct nla_policy nat_nla_policy[CTA_NAT_MAX+1] = {
static int
nfnetlink_parse_nat(const struct nlattr *nat,
- const struct nf_conn *ct, struct nf_nat_range *range)
+ const struct nf_conn *ct, struct nf_nat_range *range,
+ const struct nf_nat_l3proto *l3proto)
{
- const struct nf_nat_l3proto *l3proto;
struct nlattr *tb[CTA_NAT_MAX+1];
int err;
@@ -714,38 +720,46 @@ nfnetlink_parse_nat(const struct nlattr *nat,
if (err < 0)
return err;
- rcu_read_lock();
- l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
- if (l3proto == NULL) {
- err = -EAGAIN;
- goto out;
- }
err = l3proto->nlattr_to_range(tb, range);
if (err < 0)
- goto out;
+ return err;
if (!tb[CTA_NAT_PROTO])
- goto out;
+ return 0;
- err = nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
-out:
- rcu_read_unlock();
- return err;
+ return nfnetlink_parse_nat_proto(tb[CTA_NAT_PROTO], ct, range);
}
+/* This function is called under rcu_read_lock() */
static int
nfnetlink_parse_nat_setup(struct nf_conn *ct,
enum nf_nat_manip_type manip,
const struct nlattr *attr)
{
struct nf_nat_range range;
+ const struct nf_nat_l3proto *l3proto;
int err;
- err = nfnetlink_parse_nat(attr, ct, &range);
+ /* Should not happen, restricted to creating new conntracks
+ * via ctnetlink.
+ */
+ if (WARN_ON_ONCE(nf_nat_initialized(ct, manip)))
+ return -EEXIST;
+
+ /* Make sure that L3 NAT is there by when we call nf_nat_setup_info to
+ * attach the null binding, otherwise this may oops.
+ */
+ l3proto = __nf_nat_l3proto_find(nf_ct_l3num(ct));
+ if (l3proto == NULL)
+ return -EAGAIN;
+
+ /* No NAT information has been passed, allocate the null-binding */
+ if (attr == NULL)
+ return __nf_nat_alloc_null_binding(ct, manip);
+
+ err = nfnetlink_parse_nat(attr, ct, &range, l3proto);
if (err < 0)
return err;
- if (nf_nat_initialized(ct, manip))
- return -EEXIST;
return nf_nat_setup_info(ct, &range, manip);
}
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 9858e3e51a3a..52e20c9a46a5 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -363,9 +363,8 @@ static int __net_init synproxy_net_init(struct net *net)
goto err2;
if (!nfct_synproxy_ext_add(ct))
goto err2;
- __set_bit(IPS_TEMPLATE_BIT, &ct->status);
- __set_bit(IPS_CONFIRMED_BIT, &ct->status);
+ nf_conntrack_tmpl_insert(net, ct);
snet->tmpl = ct;
snet->stats = alloc_percpu(struct synproxy_stats);
@@ -390,7 +389,7 @@ static void __net_exit synproxy_net_exit(struct net *net)
{
struct synproxy_net *snet = synproxy_pernet(net);
- nf_conntrack_free(snet->tmpl);
+ nf_ct_put(snet->tmpl);
synproxy_proc_exit(net);
free_percpu(snet->stats);
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 117bbaaddde6..adce01e8bb57 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1008,10 +1008,8 @@ notify:
return 0;
}
-static void nf_tables_rcu_chain_destroy(struct rcu_head *head)
+static void nf_tables_chain_destroy(struct nft_chain *chain)
{
- struct nft_chain *chain = container_of(head, struct nft_chain, rcu_head);
-
BUG_ON(chain->use > 0);
if (chain->flags & NFT_BASE_CHAIN) {
@@ -1045,7 +1043,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
if (IS_ERR(chain))
return PTR_ERR(chain);
- if (!list_empty(&chain->rules))
+ if (!list_empty(&chain->rules) || chain->use > 0)
return -EBUSY;
list_del(&chain->list);
@@ -1059,7 +1057,9 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
family);
/* Make sure all rule references are gone before this is released */
- call_rcu(&chain->rcu_head, nf_tables_rcu_chain_destroy);
+ synchronize_rcu();
+
+ nf_tables_chain_destroy(chain);
return 0;
}
@@ -1114,35 +1114,45 @@ void nft_unregister_expr(struct nft_expr_type *type)
}
EXPORT_SYMBOL_GPL(nft_unregister_expr);
-static const struct nft_expr_type *__nft_expr_type_get(struct nlattr *nla)
+static const struct nft_expr_type *__nft_expr_type_get(u8 family,
+ struct nlattr *nla)
{
const struct nft_expr_type *type;
list_for_each_entry(type, &nf_tables_expressions, list) {
- if (!nla_strcmp(nla, type->name))
+ if (!nla_strcmp(nla, type->name) &&
+ (!type->family || type->family == family))
return type;
}
return NULL;
}
-static const struct nft_expr_type *nft_expr_type_get(struct nlattr *nla)
+static const struct nft_expr_type *nft_expr_type_get(u8 family,
+ struct nlattr *nla)
{
const struct nft_expr_type *type;
if (nla == NULL)
return ERR_PTR(-EINVAL);
- type = __nft_expr_type_get(nla);
+ type = __nft_expr_type_get(family, nla);
if (type != NULL && try_module_get(type->owner))
return type;
#ifdef CONFIG_MODULES
if (type == NULL) {
nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+ request_module("nft-expr-%u-%.*s", family,
+ nla_len(nla), (char *)nla_data(nla));
+ nfnl_lock(NFNL_SUBSYS_NFTABLES);
+ if (__nft_expr_type_get(family, nla))
+ return ERR_PTR(-EAGAIN);
+
+ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
request_module("nft-expr-%.*s",
nla_len(nla), (char *)nla_data(nla));
nfnl_lock(NFNL_SUBSYS_NFTABLES);
- if (__nft_expr_type_get(nla))
+ if (__nft_expr_type_get(family, nla))
return ERR_PTR(-EAGAIN);
}
#endif
@@ -1193,7 +1203,7 @@ static int nf_tables_expr_parse(const struct nft_ctx *ctx,
if (err < 0)
return err;
- type = nft_expr_type_get(tb[NFTA_EXPR_NAME]);
+ type = nft_expr_type_get(ctx->afi->family, tb[NFTA_EXPR_NAME]);
if (IS_ERR(type))
return PTR_ERR(type);
@@ -1521,9 +1531,8 @@ err:
return err;
}
-static void nf_tables_rcu_rule_destroy(struct rcu_head *head)
+static void nf_tables_rule_destroy(struct nft_rule *rule)
{
- struct nft_rule *rule = container_of(head, struct nft_rule, rcu_head);
struct nft_expr *expr;
/*
@@ -1538,11 +1547,6 @@ static void nf_tables_rcu_rule_destroy(struct rcu_head *head)
kfree(rule);
}
-static void nf_tables_rule_destroy(struct nft_rule *rule)
-{
- call_rcu(&rule->rcu_head, nf_tables_rcu_rule_destroy);
-}
-
#define NFT_RULE_MAXEXPRS 128
static struct nft_expr_info *info;
@@ -1809,9 +1813,6 @@ static int nf_tables_commit(struct sk_buff *skb)
synchronize_rcu();
list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
- /* Delete this rule from the dirty list */
- list_del(&rupd->list);
-
/* This rule was inactive in the past and just became active.
* Clear the next bit of the genmask since its meaning has
* changed, now it is the future.
@@ -1822,6 +1823,7 @@ static int nf_tables_commit(struct sk_buff *skb)
rupd->chain, rupd->rule,
NFT_MSG_NEWRULE, 0,
rupd->family);
+ list_del(&rupd->list);
kfree(rupd);
continue;
}
@@ -1831,7 +1833,15 @@ static int nf_tables_commit(struct sk_buff *skb)
nf_tables_rule_notify(skb, rupd->nlh, rupd->table, rupd->chain,
rupd->rule, NFT_MSG_DELRULE, 0,
rupd->family);
+ }
+
+ /* Make sure we don't see any packet traversing old rules */
+ synchronize_rcu();
+
+ /* Now we can safely release unused old rules */
+ list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
nf_tables_rule_destroy(rupd->rule);
+ list_del(&rupd->list);
kfree(rupd);
}
@@ -1844,20 +1854,26 @@ static int nf_tables_abort(struct sk_buff *skb)
struct nft_rule_trans *rupd, *tmp;
list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
- /* Delete all rules from the dirty list */
- list_del(&rupd->list);
-
if (!nft_rule_is_active_next(net, rupd->rule)) {
nft_rule_clear(net, rupd->rule);
+ list_del(&rupd->list);
kfree(rupd);
continue;
}
/* This rule is inactive, get rid of it */
list_del_rcu(&rupd->rule->list);
+ }
+
+ /* Make sure we don't see any packet accessing aborted rules */
+ synchronize_rcu();
+
+ list_for_each_entry_safe(rupd, tmp, &net->nft.commit_list, list) {
nf_tables_rule_destroy(rupd->rule);
+ list_del(&rupd->list);
kfree(rupd);
}
+
return 0;
}
@@ -1943,6 +1959,9 @@ static int nft_ctx_init_from_setattr(struct nft_ctx *ctx,
}
if (nla[NFTA_SET_TABLE] != NULL) {
+ if (afi == NULL)
+ return -EAFNOSUPPORT;
+
table = nf_tables_table_lookup(afi, nla[NFTA_SET_TABLE]);
if (IS_ERR(table))
return PTR_ERR(table);
@@ -1989,13 +2008,13 @@ static int nf_tables_set_alloc_name(struct nft_ctx *ctx, struct nft_set *set,
if (!sscanf(i->name, name, &tmp))
continue;
- if (tmp < 0 || tmp > BITS_PER_LONG * PAGE_SIZE)
+ if (tmp < 0 || tmp >= BITS_PER_BYTE * PAGE_SIZE)
continue;
set_bit(tmp, inuse);
}
- n = find_first_zero_bit(inuse, BITS_PER_LONG * PAGE_SIZE);
+ n = find_first_zero_bit(inuse, BITS_PER_BYTE * PAGE_SIZE);
free_page((unsigned long)inuse);
}
@@ -2428,6 +2447,8 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
struct nft_ctx ctx;
int err;
+ if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
+ return -EAFNOSUPPORT;
if (nla[NFTA_SET_TABLE] == NULL)
return -EINVAL;
@@ -2435,9 +2456,6 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
if (err < 0)
return err;
- if (nfmsg->nfgen_family == NFPROTO_UNSPEC)
- return -EAFNOSUPPORT;
-
set = nf_tables_set_lookup(ctx.table, nla[NFTA_SET_NAME]);
if (IS_ERR(set))
return PTR_ERR(set);
@@ -2723,6 +2741,9 @@ static int nft_add_set_elem(const struct nft_ctx *ctx, struct nft_set *set,
if (nla[NFTA_SET_ELEM_DATA] == NULL &&
!(elem.flags & NFT_SET_ELEM_INTERVAL_END))
return -EINVAL;
+ if (nla[NFTA_SET_ELEM_DATA] != NULL &&
+ elem.flags & NFT_SET_ELEM_INTERVAL_END)
+ return -EINVAL;
} else {
if (nla[NFTA_SET_ELEM_DATA] != NULL)
return -EINVAL;
@@ -2977,6 +2998,9 @@ static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
const struct nft_set_iter *iter,
const struct nft_set_elem *elem)
{
+ if (elem->flags & NFT_SET_ELEM_INTERVAL_END)
+ return 0;
+
switch (elem->data.verdict) {
case NFT_JUMP:
case NFT_GOTO:
diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c
index 0d879fcb8763..90998a6ff8b9 100644
--- a/net/netfilter/nf_tables_core.c
+++ b/net/netfilter/nf_tables_core.c
@@ -103,9 +103,9 @@ static struct nf_loginfo trace_loginfo = {
},
};
-static inline void nft_trace_packet(const struct nft_pktinfo *pkt,
- const struct nft_chain *chain,
- int rulenum, enum nft_trace type)
+static void nft_trace_packet(const struct nft_pktinfo *pkt,
+ const struct nft_chain *chain,
+ int rulenum, enum nft_trace type)
{
struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c
index 917052e20602..46e275403838 100644
--- a/net/netfilter/nft_ct.c
+++ b/net/netfilter/nft_ct.c
@@ -226,6 +226,7 @@ static int nft_ct_init_validate_get(const struct nft_expr *expr,
if (tb[NFTA_CT_DIRECTION] != NULL)
return -EINVAL;
break;
+ case NFT_CT_L3PROTOCOL:
case NFT_CT_PROTOCOL:
case NFT_CT_SRC:
case NFT_CT_DST:
@@ -311,8 +312,19 @@ static int nft_ct_get_dump(struct sk_buff *skb, const struct nft_expr *expr)
goto nla_put_failure;
if (nla_put_be32(skb, NFTA_CT_KEY, htonl(priv->key)))
goto nla_put_failure;
- if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
- goto nla_put_failure;
+
+ switch (priv->key) {
+ case NFT_CT_PROTOCOL:
+ case NFT_CT_SRC:
+ case NFT_CT_DST:
+ case NFT_CT_PROTO_SRC:
+ case NFT_CT_PROTO_DST:
+ if (nla_put_u8(skb, NFTA_CT_DIRECTION, priv->dir))
+ goto nla_put_failure;
+ default:
+ break;
+ }
+
return 0;
nla_put_failure:
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 5af790123ad8..26c5154e05f3 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -23,7 +23,6 @@ static const char *nft_log_null_prefix = "";
struct nft_log {
struct nf_loginfo loginfo;
char *prefix;
- int family;
};
static void nft_log_eval(const struct nft_expr *expr,
@@ -33,7 +32,7 @@ static void nft_log_eval(const struct nft_expr *expr,
const struct nft_log *priv = nft_expr_priv(expr);
struct net *net = dev_net(pkt->in ? pkt->in : pkt->out);
- nf_log_packet(net, priv->family, pkt->ops->hooknum, pkt->skb, pkt->in,
+ nf_log_packet(net, pkt->ops->pf, pkt->ops->hooknum, pkt->skb, pkt->in,
pkt->out, &priv->loginfo, "%s", priv->prefix);
}
@@ -52,8 +51,6 @@ static int nft_log_init(const struct nft_ctx *ctx,
struct nf_loginfo *li = &priv->loginfo;
const struct nlattr *nla;
- priv->family = ctx->afi->family;
-
nla = tb[NFTA_LOG_PREFIX];
if (nla != NULL) {
priv->prefix = kmalloc(nla_len(nla) + 1, GFP_KERNEL);
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index 8a6116b75b5a..bb4ef4cccb6e 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -16,6 +16,7 @@
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_core.h>
struct nft_lookup {
struct nft_set *set;
diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c
index e8254ad2e5a9..425cf39af890 100644
--- a/net/netfilter/nft_meta.c
+++ b/net/netfilter/nft_meta.c
@@ -116,7 +116,7 @@ static void nft_meta_get_eval(const struct nft_expr *expr,
skb->sk->sk_socket->file->f_cred->fsgid);
read_unlock_bh(&skb->sk->sk_callback_lock);
break;
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
case NFT_META_RTCLASSID: {
const struct dst_entry *dst = skb_dst(skb);
@@ -199,7 +199,7 @@ static int nft_meta_init_validate_get(uint32_t key)
case NFT_META_OIFTYPE:
case NFT_META_SKUID:
case NFT_META_SKGID:
-#ifdef CONFIG_NET_CLS_ROUTE
+#ifdef CONFIG_IP_ROUTE_CLASSID
case NFT_META_RTCLASSID:
#endif
#ifdef CONFIG_NETWORK_SECMARK
diff --git a/net/netfilter/nft_payload.c b/net/netfilter/nft_payload.c
index a2aeb318678f..85daa84bfdfe 100644
--- a/net/netfilter/nft_payload.c
+++ b/net/netfilter/nft_payload.c
@@ -135,7 +135,8 @@ nft_payload_select_ops(const struct nft_ctx *ctx,
if (len == 0 || len > FIELD_SIZEOF(struct nft_data, data))
return ERR_PTR(-EINVAL);
- if (len <= 4 && IS_ALIGNED(offset, len) && base != NFT_PAYLOAD_LL_HEADER)
+ if (len <= 4 && is_power_of_2(len) && IS_ALIGNED(offset, len) &&
+ base != NFT_PAYLOAD_LL_HEADER)
return &nft_payload_fast_ops;
else
return &nft_payload_ops;
diff --git a/net/netfilter/nft_queue.c b/net/netfilter/nft_queue.c
index cbea473d69e9..e8ae2f6bf232 100644
--- a/net/netfilter/nft_queue.c
+++ b/net/netfilter/nft_queue.c
@@ -25,7 +25,6 @@ struct nft_queue {
u16 queuenum;
u16 queues_total;
u16 flags;
- u8 family;
};
static void nft_queue_eval(const struct nft_expr *expr,
@@ -43,7 +42,7 @@ static void nft_queue_eval(const struct nft_expr *expr,
queue = priv->queuenum + cpu % priv->queues_total;
} else {
queue = nfqueue_hash(pkt->skb, queue,
- priv->queues_total, priv->family,
+ priv->queues_total, pkt->ops->pf,
jhash_initval);
}
}
@@ -71,7 +70,6 @@ static int nft_queue_init(const struct nft_ctx *ctx,
return -EINVAL;
init_hashrandom(&jhash_initval);
- priv->family = ctx->afi->family;
priv->queuenum = ntohs(nla_get_be16(tb[NFTA_QUEUE_NUM]));
if (tb[NFTA_QUEUE_TOTAL] != NULL)
diff --git a/net/netfilter/nft_rbtree.c b/net/netfilter/nft_rbtree.c
index ca0c1b231bfe..e21d69d13506 100644
--- a/net/netfilter/nft_rbtree.c
+++ b/net/netfilter/nft_rbtree.c
@@ -69,8 +69,10 @@ static void nft_rbtree_elem_destroy(const struct nft_set *set,
struct nft_rbtree_elem *rbe)
{
nft_data_uninit(&rbe->key, NFT_DATA_VALUE);
- if (set->flags & NFT_SET_MAP)
+ if (set->flags & NFT_SET_MAP &&
+ !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
nft_data_uninit(rbe->data, set->dtype);
+
kfree(rbe);
}
@@ -108,7 +110,8 @@ static int nft_rbtree_insert(const struct nft_set *set,
int err;
size = sizeof(*rbe);
- if (set->flags & NFT_SET_MAP)
+ if (set->flags & NFT_SET_MAP &&
+ !(elem->flags & NFT_SET_ELEM_INTERVAL_END))
size += sizeof(rbe->data[0]);
rbe = kzalloc(size, GFP_KERNEL);
@@ -117,7 +120,8 @@ static int nft_rbtree_insert(const struct nft_set *set,
rbe->flags = elem->flags;
nft_data_copy(&rbe->key, &elem->key);
- if (set->flags & NFT_SET_MAP)
+ if (set->flags & NFT_SET_MAP &&
+ !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
nft_data_copy(rbe->data, &elem->data);
err = __nft_rbtree_insert(set, rbe);
@@ -153,7 +157,8 @@ static int nft_rbtree_get(const struct nft_set *set, struct nft_set_elem *elem)
parent = parent->rb_right;
else {
elem->cookie = rbe;
- if (set->flags & NFT_SET_MAP)
+ if (set->flags & NFT_SET_MAP &&
+ !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
nft_data_copy(&elem->data, rbe->data);
elem->flags = rbe->flags;
return 0;
@@ -177,7 +182,8 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
rbe = rb_entry(node, struct nft_rbtree_elem, node);
nft_data_copy(&elem.key, &rbe->key);
- if (set->flags & NFT_SET_MAP)
+ if (set->flags & NFT_SET_MAP &&
+ !(rbe->flags & NFT_SET_ELEM_INTERVAL_END))
nft_data_copy(&elem.data, rbe->data);
elem.flags = rbe->flags;
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index 5e204711d704..f3448c296446 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -16,65 +16,23 @@
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
-#include <net/icmp.h>
-#include <net/netfilter/ipv4/nf_reject.h>
+#include <net/netfilter/nft_reject.h>
-#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
-#include <net/netfilter/ipv6/nf_reject.h>
-#endif
-
-struct nft_reject {
- enum nft_reject_types type:8;
- u8 icmp_code;
- u8 family;
-};
-
-static void nft_reject_eval(const struct nft_expr *expr,
- struct nft_data data[NFT_REG_MAX + 1],
- const struct nft_pktinfo *pkt)
-{
- struct nft_reject *priv = nft_expr_priv(expr);
-#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
- struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
-#endif
- switch (priv->type) {
- case NFT_REJECT_ICMP_UNREACH:
- if (priv->family == NFPROTO_IPV4)
- nf_send_unreach(pkt->skb, priv->icmp_code);
-#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
- else if (priv->family == NFPROTO_IPV6)
- nf_send_unreach6(net, pkt->skb, priv->icmp_code,
- pkt->ops->hooknum);
-#endif
- break;
- case NFT_REJECT_TCP_RST:
- if (priv->family == NFPROTO_IPV4)
- nf_send_reset(pkt->skb, pkt->ops->hooknum);
-#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
- else if (priv->family == NFPROTO_IPV6)
- nf_send_reset6(net, pkt->skb, pkt->ops->hooknum);
-#endif
- break;
- }
-
- data[NFT_REG_VERDICT].verdict = NF_DROP;
-}
-
-static const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
+const struct nla_policy nft_reject_policy[NFTA_REJECT_MAX + 1] = {
[NFTA_REJECT_TYPE] = { .type = NLA_U32 },
[NFTA_REJECT_ICMP_CODE] = { .type = NLA_U8 },
};
+EXPORT_SYMBOL_GPL(nft_reject_policy);
-static int nft_reject_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
+int nft_reject_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
{
struct nft_reject *priv = nft_expr_priv(expr);
if (tb[NFTA_REJECT_TYPE] == NULL)
return -EINVAL;
- priv->family = ctx->afi->family;
priv->type = ntohl(nla_get_be32(tb[NFTA_REJECT_TYPE]));
switch (priv->type) {
case NFT_REJECT_ICMP_UNREACH:
@@ -89,8 +47,9 @@ static int nft_reject_init(const struct nft_ctx *ctx,
return 0;
}
+EXPORT_SYMBOL_GPL(nft_reject_init);
-static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
+int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_reject *priv = nft_expr_priv(expr);
@@ -109,37 +68,7 @@ static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
nla_put_failure:
return -1;
}
-
-static struct nft_expr_type nft_reject_type;
-static const struct nft_expr_ops nft_reject_ops = {
- .type = &nft_reject_type,
- .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
- .eval = nft_reject_eval,
- .init = nft_reject_init,
- .dump = nft_reject_dump,
-};
-
-static struct nft_expr_type nft_reject_type __read_mostly = {
- .name = "reject",
- .ops = &nft_reject_ops,
- .policy = nft_reject_policy,
- .maxattr = NFTA_REJECT_MAX,
- .owner = THIS_MODULE,
-};
-
-static int __init nft_reject_module_init(void)
-{
- return nft_register_expr(&nft_reject_type);
-}
-
-static void __exit nft_reject_module_exit(void)
-{
- nft_unregister_expr(&nft_reject_type);
-}
-
-module_init(nft_reject_module_init);
-module_exit(nft_reject_module_exit);
+EXPORT_SYMBOL_GPL(nft_reject_dump);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
-MODULE_ALIAS_NFT_EXPR("reject");
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
new file mode 100644
index 000000000000..b718a52a4654
--- /dev/null
+++ b/net/netfilter/nft_reject_inet.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (c) 2014 Patrick McHardy <kaber@trash.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nft_reject.h>
+
+static void nft_reject_inet_eval(const struct nft_expr *expr,
+ struct nft_data data[NFT_REG_MAX + 1],
+ const struct nft_pktinfo *pkt)
+{
+ switch (pkt->ops->pf) {
+ case NFPROTO_IPV4:
+ return nft_reject_ipv4_eval(expr, data, pkt);
+ case NFPROTO_IPV6:
+ return nft_reject_ipv6_eval(expr, data, pkt);
+ }
+}
+
+static struct nft_expr_type nft_reject_inet_type;
+static const struct nft_expr_ops nft_reject_inet_ops = {
+ .type = &nft_reject_inet_type,
+ .size = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
+ .eval = nft_reject_inet_eval,
+ .init = nft_reject_init,
+ .dump = nft_reject_dump,
+};
+
+static struct nft_expr_type nft_reject_inet_type __read_mostly = {
+ .family = NFPROTO_INET,
+ .name = "reject",
+ .ops = &nft_reject_inet_ops,
+ .policy = nft_reject_policy,
+ .maxattr = NFTA_REJECT_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init nft_reject_inet_module_init(void)
+{
+ return nft_register_expr(&nft_reject_inet_type);
+}
+
+static void __exit nft_reject_inet_module_exit(void)
+{
+ nft_unregister_expr(&nft_reject_inet_type);
+}
+
+module_init(nft_reject_inet_module_init);
+module_exit(nft_reject_inet_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
+MODULE_ALIAS_NFT_AF_EXPR(1, "reject");
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 5929be622c5c..75747aecdebe 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -228,12 +228,7 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
goto err3;
}
- __set_bit(IPS_TEMPLATE_BIT, &ct->status);
- __set_bit(IPS_CONFIRMED_BIT, &ct->status);
-
- /* Overload tuple linked list to put us in template list. */
- hlist_nulls_add_head_rcu(&ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
- &par->net->ct.tmpl);
+ nf_conntrack_tmpl_insert(par->net, ct);
out:
info->ct = ct;
return 0;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index fdf51353cf78..04748ab649c2 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1489,8 +1489,8 @@ static int netlink_connect(struct socket *sock, struct sockaddr *addr,
if (addr->sa_family != AF_NETLINK)
return -EINVAL;
- /* Only superuser is allowed to send multicasts */
- if (nladdr->nl_groups && !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
+ if ((nladdr->nl_groups || nladdr->nl_pid) &&
+ !netlink_capable(sock, NL_CFG_F_NONROOT_SEND))
return -EPERM;
if (!nlk->portid)
diff --git a/net/nfc/nci/core.c b/net/nfc/nci/core.c
index 46bda010bf11..56db888b1cd5 100644
--- a/net/nfc/nci/core.c
+++ b/net/nfc/nci/core.c
@@ -301,7 +301,7 @@ static int nci_open_device(struct nci_dev *ndev)
rc = __nci_request(ndev, nci_reset_req, 0,
msecs_to_jiffies(NCI_RESET_TIMEOUT));
- if (ndev->ops->setup(ndev))
+ if (ndev->ops->setup)
ndev->ops->setup(ndev);
if (!rc) {
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index df4692826ead..8601b320b443 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -55,6 +55,7 @@
#include "datapath.h"
#include "flow.h"
+#include "flow_table.h"
#include "flow_netlink.h"
#include "vport-internal_dev.h"
#include "vport-netdev.h"
@@ -160,7 +161,6 @@ static void destroy_dp_rcu(struct rcu_head *rcu)
{
struct datapath *dp = container_of(rcu, struct datapath, rcu);
- ovs_flow_tbl_destroy(&dp->table);
free_percpu(dp->stats_percpu);
release_net(ovs_dp_get_net(dp));
kfree(dp->ports);
@@ -466,6 +466,14 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
skb_zerocopy(user_skb, skb, skb->len, hlen);
+ /* Pad OVS_PACKET_ATTR_PACKET if linear copy was performed */
+ if (!(dp->user_features & OVS_DP_F_UNALIGNED)) {
+ size_t plen = NLA_ALIGN(user_skb->len) - user_skb->len;
+
+ if (plen > 0)
+ memset(skb_put(user_skb, plen), 0, plen);
+ }
+
((struct nlmsghdr *) user_skb->data)->nlmsg_len = user_skb->len;
err = genlmsg_unicast(ovs_dp_get_net(dp), user_skb, upcall_info->portid);
@@ -852,11 +860,8 @@ static int ovs_flow_cmd_new_or_set(struct sk_buff *skb, struct genl_info *info)
goto err_unlock_ovs;
/* The unmasked key has to be the same for flow updates. */
- error = -EINVAL;
- if (!ovs_flow_cmp_unmasked_key(flow, &match)) {
- OVS_NLERR("Flow modification message rejected, unmasked key does not match.\n");
+ if (!ovs_flow_cmp_unmasked_key(flow, &match))
goto err_unlock_ovs;
- }
/* Update actions. */
old_acts = ovsl_dereference(flow->sf_acts);
@@ -1079,6 +1084,7 @@ static size_t ovs_dp_cmd_msg_size(void)
msgsize += nla_total_size(IFNAMSIZ);
msgsize += nla_total_size(sizeof(struct ovs_dp_stats));
msgsize += nla_total_size(sizeof(struct ovs_dp_megaflow_stats));
+ msgsize += nla_total_size(sizeof(u32)); /* OVS_DP_ATTR_USER_FEATURES */
return msgsize;
}
@@ -1168,7 +1174,7 @@ static void ovs_dp_reset_user_features(struct sk_buff *skb, struct genl_info *in
struct datapath *dp;
dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
- if (!dp)
+ if (IS_ERR(dp))
return;
WARN(dp->user_features, "Dropping previously announced user features\n");
@@ -1279,7 +1285,7 @@ err_destroy_ports_array:
err_destroy_percpu:
free_percpu(dp->stats_percpu);
err_destroy_table:
- ovs_flow_tbl_destroy(&dp->table);
+ ovs_flow_tbl_destroy(&dp->table, false);
err_free_dp:
release_net(ovs_dp_get_net(dp));
kfree(dp);
@@ -1306,10 +1312,13 @@ static void __dp_destroy(struct datapath *dp)
list_del_rcu(&dp->list_node);
/* OVSP_LOCAL is datapath internal port. We need to make sure that
- * all port in datapath are destroyed first before freeing datapath.
+ * all ports in datapath are destroyed first before freeing datapath.
*/
ovs_dp_detach_port(ovs_vport_ovsl(dp, OVSP_LOCAL));
+ /* RCU destroy the flow table */
+ ovs_flow_tbl_destroy(&dp->table, true);
+
call_rcu(&dp->rcu, destroy_dp_rcu);
}
@@ -1753,11 +1762,12 @@ static int ovs_vport_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
int bucket = cb->args[0], skip = cb->args[1];
int i, j = 0;
+ rcu_read_lock();
dp = get_dp(sock_net(skb->sk), ovs_header->dp_ifindex);
- if (!dp)
+ if (!dp) {
+ rcu_read_unlock();
return -ENODEV;
-
- rcu_read_lock();
+ }
for (i = bucket; i < DP_VPORT_HASH_BUCKETS; i++) {
struct vport *vport;
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 16f4b46161d4..dda451f4429c 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -73,6 +73,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
if ((flow->key.eth.type == htons(ETH_P_IP) ||
flow->key.eth.type == htons(ETH_P_IPV6)) &&
+ flow->key.ip.frag != OVS_FRAG_TYPE_LATER &&
flow->key.ip.proto == IPPROTO_TCP &&
likely(skb->len >= skb_transport_offset(skb) + sizeof(struct tcphdr))) {
tcp_flags = TCP_FLAGS_BE16(tcp_hdr(skb));
@@ -91,7 +92,7 @@ static void stats_read(struct flow_stats *stats,
unsigned long *used, __be16 *tcp_flags)
{
spin_lock(&stats->lock);
- if (time_after(stats->used, *used))
+ if (!*used || time_after(stats->used, *used))
*used = stats->used;
*tcp_flags |= stats->tcp_flags;
ovs_stats->n_packets += stats->packet_count;
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index c58a0fe3c889..3c268b3d71c3 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -153,29 +153,29 @@ static void rcu_free_flow_callback(struct rcu_head *rcu)
flow_free(flow);
}
-static void flow_mask_del_ref(struct sw_flow_mask *mask, bool deferred)
-{
- if (!mask)
- return;
-
- BUG_ON(!mask->ref_count);
- mask->ref_count--;
-
- if (!mask->ref_count) {
- list_del_rcu(&mask->list);
- if (deferred)
- kfree_rcu(mask, rcu);
- else
- kfree(mask);
- }
-}
-
void ovs_flow_free(struct sw_flow *flow, bool deferred)
{
if (!flow)
return;
- flow_mask_del_ref(flow->mask, deferred);
+ if (flow->mask) {
+ struct sw_flow_mask *mask = flow->mask;
+
+ /* ovs-lock is required to protect mask-refcount and
+ * mask list.
+ */
+ ASSERT_OVSL();
+ BUG_ON(!mask->ref_count);
+ mask->ref_count--;
+
+ if (!mask->ref_count) {
+ list_del_rcu(&mask->list);
+ if (deferred)
+ kfree_rcu(mask, rcu);
+ else
+ kfree(mask);
+ }
+ }
if (deferred)
call_rcu(&flow->rcu, rcu_free_flow_callback);
@@ -188,26 +188,9 @@ static void free_buckets(struct flex_array *buckets)
flex_array_free(buckets);
}
+
static void __table_instance_destroy(struct table_instance *ti)
{
- int i;
-
- if (ti->keep_flows)
- goto skip_flows;
-
- for (i = 0; i < ti->n_buckets; i++) {
- struct sw_flow *flow;
- struct hlist_head *head = flex_array_get(ti->buckets, i);
- struct hlist_node *n;
- int ver = ti->node_ver;
-
- hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
- hlist_del(&flow->hash_node[ver]);
- ovs_flow_free(flow, false);
- }
- }
-
-skip_flows:
free_buckets(ti->buckets);
kfree(ti);
}
@@ -258,20 +241,38 @@ static void flow_tbl_destroy_rcu_cb(struct rcu_head *rcu)
static void table_instance_destroy(struct table_instance *ti, bool deferred)
{
+ int i;
+
if (!ti)
return;
+ if (ti->keep_flows)
+ goto skip_flows;
+
+ for (i = 0; i < ti->n_buckets; i++) {
+ struct sw_flow *flow;
+ struct hlist_head *head = flex_array_get(ti->buckets, i);
+ struct hlist_node *n;
+ int ver = ti->node_ver;
+
+ hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
+ hlist_del_rcu(&flow->hash_node[ver]);
+ ovs_flow_free(flow, deferred);
+ }
+ }
+
+skip_flows:
if (deferred)
call_rcu(&ti->rcu, flow_tbl_destroy_rcu_cb);
else
__table_instance_destroy(ti);
}
-void ovs_flow_tbl_destroy(struct flow_table *table)
+void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred)
{
struct table_instance *ti = ovsl_dereference(table->ti);
- table_instance_destroy(ti, false);
+ table_instance_destroy(ti, deferred);
}
struct sw_flow *ovs_flow_tbl_dump_next(struct table_instance *ti,
@@ -504,16 +505,11 @@ static struct sw_flow_mask *mask_alloc(void)
mask = kmalloc(sizeof(*mask), GFP_KERNEL);
if (mask)
- mask->ref_count = 0;
+ mask->ref_count = 1;
return mask;
}
-static void mask_add_ref(struct sw_flow_mask *mask)
-{
- mask->ref_count++;
-}
-
static bool mask_equal(const struct sw_flow_mask *a,
const struct sw_flow_mask *b)
{
@@ -554,9 +550,11 @@ static int flow_mask_insert(struct flow_table *tbl, struct sw_flow *flow,
mask->key = new->key;
mask->range = new->range;
list_add_rcu(&mask->list, &tbl->mask_list);
+ } else {
+ BUG_ON(!mask->ref_count);
+ mask->ref_count++;
}
- mask_add_ref(mask);
flow->mask = mask;
return 0;
}
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index 1996e34c0fd8..baaeb101924d 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -60,7 +60,7 @@ void ovs_flow_free(struct sw_flow *, bool deferred);
int ovs_flow_tbl_init(struct flow_table *);
int ovs_flow_tbl_count(struct flow_table *table);
-void ovs_flow_tbl_destroy(struct flow_table *table);
+void ovs_flow_tbl_destroy(struct flow_table *table, bool deferred);
int ovs_flow_tbl_flush(struct flow_table *flow_table);
int ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 6a2bb37506c5..48a6a93db296 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -308,11 +308,27 @@ static bool packet_use_direct_xmit(const struct packet_sock *po)
return po->xmit == packet_direct_xmit;
}
-static u16 packet_pick_tx_queue(struct net_device *dev)
+static u16 __packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
{
return (u16) raw_smp_processor_id() % dev->real_num_tx_queues;
}
+static void packet_pick_tx_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ const struct net_device_ops *ops = dev->netdev_ops;
+ u16 queue_index;
+
+ if (ops->ndo_select_queue) {
+ queue_index = ops->ndo_select_queue(dev, skb, NULL,
+ __packet_pick_tx_queue);
+ queue_index = netdev_cap_txqueue(dev, queue_index);
+ } else {
+ queue_index = __packet_pick_tx_queue(dev, skb);
+ }
+
+ skb_set_queue_mapping(skb, queue_index);
+}
+
/* register_prot_hook must be invoked with the po->bind_lock held,
* or from a context in which asynchronous accesses to the packet
* socket is not possible (packet_create()).
@@ -2285,7 +2301,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
}
}
- skb_set_queue_mapping(skb, packet_pick_tx_queue(dev));
+ packet_pick_tx_queue(dev, skb);
+
skb->destructor = tpacket_destruct_skb;
__packet_set_status(po, ph, TP_STATUS_SENDING);
packet_inc_pending(&po->tx_ring);
@@ -2499,7 +2516,8 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
skb->dev = dev;
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
- skb_set_queue_mapping(skb, packet_pick_tx_queue(dev));
+
+ packet_pick_tx_queue(dev, skb);
if (po->has_vnet_hdr) {
if (vnet_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
@@ -3786,7 +3804,7 @@ static int packet_set_ring(struct sock *sk, union tpacket_req_u *req_u,
*/
if (!tx_ring)
init_prb_bdqc(po, rb, pg_vec, req_u, tx_ring);
- break;
+ break;
default:
break;
}
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 1313145e3b86..a07d55e75698 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -273,11 +273,12 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle)
void qdisc_list_add(struct Qdisc *q)
{
- struct Qdisc *root = qdisc_dev(q)->qdisc;
+ if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) {
+ struct Qdisc *root = qdisc_dev(q)->qdisc;
- WARN_ON_ONCE(root == &noop_qdisc);
- if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS))
+ WARN_ON_ONCE(root == &noop_qdisc);
list_add_tail(&q->list, &root->list);
+ }
}
EXPORT_SYMBOL(qdisc_list_add);
diff --git a/net/sched/sch_fq.c b/net/sched/sch_fq.c
index 08ef7a42c0e4..21e251766eb1 100644
--- a/net/sched/sch_fq.c
+++ b/net/sched/sch_fq.c
@@ -601,6 +601,7 @@ static int fq_resize(struct Qdisc *sch, u32 log)
{
struct fq_sched_data *q = qdisc_priv(sch);
struct rb_root *array;
+ void *old_fq_root;
u32 idx;
if (q->fq_root && log == q->fq_trees_log)
@@ -615,13 +616,19 @@ static int fq_resize(struct Qdisc *sch, u32 log)
for (idx = 0; idx < (1U << log); idx++)
array[idx] = RB_ROOT;
- if (q->fq_root) {
- fq_rehash(q, q->fq_root, q->fq_trees_log, array, log);
- fq_free(q->fq_root);
- }
+ sch_tree_lock(sch);
+
+ old_fq_root = q->fq_root;
+ if (old_fq_root)
+ fq_rehash(q, old_fq_root, q->fq_trees_log, array, log);
+
q->fq_root = array;
q->fq_trees_log = log;
+ sch_tree_unlock(sch);
+
+ fq_free(old_fq_root);
+
return 0;
}
@@ -697,9 +704,11 @@ static int fq_change(struct Qdisc *sch, struct nlattr *opt)
q->flow_refill_delay = usecs_to_jiffies(usecs_delay);
}
- if (!err)
+ if (!err) {
+ sch_tree_unlock(sch);
err = fq_resize(sch, fq_log);
-
+ sch_tree_lock(sch);
+ }
while (sch->q.qlen > sch->limit) {
struct sk_buff *skb = fq_dequeue(sch);
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index a255d0200a59..fefeeb73f15f 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -15,6 +15,11 @@
*
* ECN support is added by Naeem Khademi <naeemk@ifi.uio.no>
* University of Oslo, Norway.
+ *
+ * References:
+ * IETF draft submission: http://tools.ietf.org/html/draft-pan-aqm-pie-00
+ * IEEE Conference on High Performance Switching and Routing 2013 :
+ * "PIE: A * Lightweight Control Scheme to Address the Bufferbloat Problem"
*/
#include <linux/module.h>
@@ -36,7 +41,7 @@ struct pie_params {
psched_time_t target; /* user specified target delay in pschedtime */
u32 tupdate; /* timer frequency (in jiffies) */
u32 limit; /* number of packets that can be enqueued */
- u32 alpha; /* alpha and beta are between -4 and 4 */
+ u32 alpha; /* alpha and beta are between 0 and 32 */
u32 beta; /* and are used for shift relative to 1 */
bool ecn; /* true if ecn is enabled */
bool bytemode; /* to scale drop early prob based on pkt size */
@@ -326,10 +331,16 @@ static void calculate_probability(struct Qdisc *sch)
if (qdelay == 0 && qlen != 0)
update_prob = false;
- /* Add ranges for alpha and beta, more aggressive for high dropping
- * mode and gentle steps for light dropping mode
- * In light dropping mode, take gentle steps; in medium dropping mode,
- * take medium steps; in high dropping mode, take big steps.
+ /* In the algorithm, alpha and beta are between 0 and 2 with typical
+ * value for alpha as 0.125. In this implementation, we use values 0-32
+ * passed from user space to represent this. Also, alpha and beta have
+ * unit of HZ and need to be scaled before they can used to update
+ * probability. alpha/beta are updated locally below by 1) scaling them
+ * appropriately 2) scaling down by 16 to come to 0-2 range.
+ * Please see paper for details.
+ *
+ * We scale alpha and beta differently depending on whether we are in
+ * light, medium or high dropping mode.
*/
if (q->vars.prob < MAX_PROB / 100) {
alpha =
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index 1cb413fead89..4f505a006896 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -334,18 +334,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
tb[TCA_TBF_PTAB]));
- if (q->qdisc != &noop_qdisc) {
- err = fifo_set_limit(q->qdisc, qopt->limit);
- if (err)
- goto done;
- } else if (qopt->limit > 0) {
- child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
- if (IS_ERR(child)) {
- err = PTR_ERR(child);
- goto done;
- }
- }
-
buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
@@ -390,6 +378,18 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
goto done;
}
+ if (q->qdisc != &noop_qdisc) {
+ err = fifo_set_limit(q->qdisc, qopt->limit);
+ if (err)
+ goto done;
+ } else if (qopt->limit > 0) {
+ child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit);
+ if (IS_ERR(child)) {
+ err = PTR_ERR(child);
+ goto done;
+ }
+ }
+
sch_tree_lock(sch);
if (child) {
qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 5ae609200674..ee13d28d39d1 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1239,78 +1239,107 @@ void sctp_assoc_update(struct sctp_association *asoc,
}
/* Update the retran path for sending a retransmitted packet.
- * Round-robin through the active transports, else round-robin
- * through the inactive transports as this is the next best thing
- * we can try.
+ * See also RFC4960, 6.4. Multi-Homed SCTP Endpoints:
+ *
+ * When there is outbound data to send and the primary path
+ * becomes inactive (e.g., due to failures), or where the
+ * SCTP user explicitly requests to send data to an
+ * inactive destination transport address, before reporting
+ * an error to its ULP, the SCTP endpoint should try to send
+ * the data to an alternate active destination transport
+ * address if one exists.
+ *
+ * When retransmitting data that timed out, if the endpoint
+ * is multihomed, it should consider each source-destination
+ * address pair in its retransmission selection policy.
+ * When retransmitting timed-out data, the endpoint should
+ * attempt to pick the most divergent source-destination
+ * pair from the original source-destination pair to which
+ * the packet was transmitted.
+ *
+ * Note: Rules for picking the most divergent source-destination
+ * pair are an implementation decision and are not specified
+ * within this document.
+ *
+ * Our basic strategy is to round-robin transports in priorities
+ * according to sctp_state_prio_map[] e.g., if no such
+ * transport with state SCTP_ACTIVE exists, round-robin through
+ * SCTP_UNKNOWN, etc. You get the picture.
*/
-void sctp_assoc_update_retran_path(struct sctp_association *asoc)
+static const u8 sctp_trans_state_to_prio_map[] = {
+ [SCTP_ACTIVE] = 3, /* best case */
+ [SCTP_UNKNOWN] = 2,
+ [SCTP_PF] = 1,
+ [SCTP_INACTIVE] = 0, /* worst case */
+};
+
+static u8 sctp_trans_score(const struct sctp_transport *trans)
{
- struct sctp_transport *t, *next;
- struct list_head *head = &asoc->peer.transport_addr_list;
- struct list_head *pos;
+ return sctp_trans_state_to_prio_map[trans->state];
+}
- if (asoc->peer.transport_count == 1)
- return;
+static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr,
+ struct sctp_transport *best)
+{
+ if (best == NULL)
+ return curr;
- /* Find the next transport in a round-robin fashion. */
- t = asoc->peer.retran_path;
- pos = &t->transports;
- next = NULL;
+ return sctp_trans_score(curr) > sctp_trans_score(best) ? curr : best;
+}
- while (1) {
- /* Skip the head. */
- if (pos->next == head)
- pos = head->next;
- else
- pos = pos->next;
+void sctp_assoc_update_retran_path(struct sctp_association *asoc)
+{
+ struct sctp_transport *trans = asoc->peer.retran_path;
+ struct sctp_transport *trans_next = NULL;
- t = list_entry(pos, struct sctp_transport, transports);
+ /* We're done as we only have the one and only path. */
+ if (asoc->peer.transport_count == 1)
+ return;
+ /* If active_path and retran_path are the same and active,
+ * then this is the only active path. Use it.
+ */
+ if (asoc->peer.active_path == asoc->peer.retran_path &&
+ asoc->peer.active_path->state == SCTP_ACTIVE)
+ return;
- /* We have exhausted the list, but didn't find any
- * other active transports. If so, use the next
- * transport.
- */
- if (t == asoc->peer.retran_path) {
- t = next;
+ /* Iterate from retran_path's successor back to retran_path. */
+ for (trans = list_next_entry(trans, transports); 1;
+ trans = list_next_entry(trans, transports)) {
+ /* Manually skip the head element. */
+ if (&trans->transports == &asoc->peer.transport_addr_list)
+ continue;
+ if (trans->state == SCTP_UNCONFIRMED)
+ continue;
+ trans_next = sctp_trans_elect_best(trans, trans_next);
+ /* Active is good enough for immediate return. */
+ if (trans_next->state == SCTP_ACTIVE)
break;
- }
-
- /* Try to find an active transport. */
-
- if ((t->state == SCTP_ACTIVE) ||
- (t->state == SCTP_UNKNOWN)) {
+ /* We've reached the end, time to update path. */
+ if (trans == asoc->peer.retran_path)
break;
- } else {
- /* Keep track of the next transport in case
- * we don't find any active transport.
- */
- if (t->state != SCTP_UNCONFIRMED && !next)
- next = t;
- }
}
- if (t)
- asoc->peer.retran_path = t;
- else
- t = asoc->peer.retran_path;
+ if (trans_next != NULL)
+ asoc->peer.retran_path = trans_next;
- pr_debug("%s: association:%p addr:%pISpc\n", __func__, asoc,
- &t->ipaddr.sa);
+ pr_debug("%s: association:%p updated new path to addr:%pISpc\n",
+ __func__, asoc, &asoc->peer.retran_path->ipaddr.sa);
}
-/* Choose the transport for sending retransmit packet. */
-struct sctp_transport *sctp_assoc_choose_alter_transport(
- struct sctp_association *asoc, struct sctp_transport *last_sent_to)
+struct sctp_transport *
+sctp_assoc_choose_alter_transport(struct sctp_association *asoc,
+ struct sctp_transport *last_sent_to)
{
/* If this is the first time packet is sent, use the active path,
* else use the retran path. If the last packet was sent over the
* retran path, update the retran path and use it.
*/
- if (!last_sent_to)
+ if (last_sent_to == NULL) {
return asoc->peer.active_path;
- else {
+ } else {
if (last_sent_to == asoc->peer.retran_path)
sctp_assoc_update_retran_path(asoc);
+
return asoc->peer.retran_path;
}
}
@@ -1367,44 +1396,35 @@ static inline bool sctp_peer_needs_update(struct sctp_association *asoc)
return false;
}
-/* Increase asoc's rwnd by len and send any window update SACK if needed. */
-void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
+/* Update asoc's rwnd for the approximated state in the buffer,
+ * and check whether SACK needs to be sent.
+ */
+void sctp_assoc_rwnd_update(struct sctp_association *asoc, bool update_peer)
{
+ int rx_count;
struct sctp_chunk *sack;
struct timer_list *timer;
- if (asoc->rwnd_over) {
- if (asoc->rwnd_over >= len) {
- asoc->rwnd_over -= len;
- } else {
- asoc->rwnd += (len - asoc->rwnd_over);
- asoc->rwnd_over = 0;
- }
- } else {
- asoc->rwnd += len;
- }
+ if (asoc->ep->rcvbuf_policy)
+ rx_count = atomic_read(&asoc->rmem_alloc);
+ else
+ rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
- /* If we had window pressure, start recovering it
- * once our rwnd had reached the accumulated pressure
- * threshold. The idea is to recover slowly, but up
- * to the initial advertised window.
- */
- if (asoc->rwnd_press && asoc->rwnd >= asoc->rwnd_press) {
- int change = min(asoc->pathmtu, asoc->rwnd_press);
- asoc->rwnd += change;
- asoc->rwnd_press -= change;
- }
+ if ((asoc->base.sk->sk_rcvbuf - rx_count) > 0)
+ asoc->rwnd = (asoc->base.sk->sk_rcvbuf - rx_count) >> 1;
+ else
+ asoc->rwnd = 0;
- pr_debug("%s: asoc:%p rwnd increased by %d to (%u, %u) - %u\n",
- __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
- asoc->a_rwnd);
+ pr_debug("%s: asoc:%p rwnd=%u, rx_count=%d, sk_rcvbuf=%d\n",
+ __func__, asoc, asoc->rwnd, rx_count,
+ asoc->base.sk->sk_rcvbuf);
/* Send a window update SACK if the rwnd has increased by at least the
* minimum of the association's PMTU and half of the receive buffer.
* The algorithm used is similar to the one described in
* Section 4.2.3.3 of RFC 1122.
*/
- if (sctp_peer_needs_update(asoc)) {
+ if (update_peer && sctp_peer_needs_update(asoc)) {
asoc->a_rwnd = asoc->rwnd;
pr_debug("%s: sending window update SACK- asoc:%p rwnd:%u "
@@ -1426,45 +1446,6 @@ void sctp_assoc_rwnd_increase(struct sctp_association *asoc, unsigned int len)
}
}
-/* Decrease asoc's rwnd by len. */
-void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned int len)
-{
- int rx_count;
- int over = 0;
-
- if (unlikely(!asoc->rwnd || asoc->rwnd_over))
- pr_debug("%s: association:%p has asoc->rwnd:%u, "
- "asoc->rwnd_over:%u!\n", __func__, asoc,
- asoc->rwnd, asoc->rwnd_over);
-
- if (asoc->ep->rcvbuf_policy)
- rx_count = atomic_read(&asoc->rmem_alloc);
- else
- rx_count = atomic_read(&asoc->base.sk->sk_rmem_alloc);
-
- /* If we've reached or overflowed our receive buffer, announce
- * a 0 rwnd if rwnd would still be positive. Store the
- * the potential pressure overflow so that the window can be restored
- * back to original value.
- */
- if (rx_count >= asoc->base.sk->sk_rcvbuf)
- over = 1;
-
- if (asoc->rwnd >= len) {
- asoc->rwnd -= len;
- if (over) {
- asoc->rwnd_press += asoc->rwnd;
- asoc->rwnd = 0;
- }
- } else {
- asoc->rwnd_over = len - asoc->rwnd;
- asoc->rwnd = 0;
- }
-
- pr_debug("%s: asoc:%p rwnd decreased by %d to (%u, %u, %u)\n",
- __func__, asoc, len, asoc->rwnd, asoc->rwnd_over,
- asoc->rwnd_press);
-}
/* Build the bind address list for the association based on info from the
* local endpoint and the remote peer.
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 0f6259a6a932..2b1738ef9394 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -662,6 +662,8 @@ static struct sock *sctp_v6_create_accept_sk(struct sock *sk,
*/
sctp_v6_to_sk_daddr(&asoc->peer.primary_addr, newsk);
+ newsk->sk_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+
sk_refcnt_debug_inc(newsk);
if (newsk->sk_prot->init(newsk)) {
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index 632090b961c3..3a1767ef3201 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -1421,8 +1421,8 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk)
BUG_ON(!list_empty(&chunk->list));
list_del_init(&chunk->transmitted_list);
- /* Free the chunk skb data and the SCTP_chunk stub itself. */
- dev_kfree_skb(chunk->skb);
+ consume_skb(chunk->skb);
+ consume_skb(chunk->auth_chunk);
SCTP_DBG_OBJCNT_DEC(chunk);
kmem_cache_free(sctp_chunk_cachep, chunk);
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index bd859154000e..5d6883ff00c3 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -495,11 +495,12 @@ static void sctp_do_8_2_transport_strike(sctp_cmd_seq_t *commands,
}
/* If the transport error count is greater than the pf_retrans
- * threshold, and less than pathmaxrtx, then mark this transport
- * as Partially Failed, ee SCTP Quick Failover Draft, secon 5.1,
- * point 1
+ * threshold, and less than pathmaxrtx, and if the current state
+ * is not SCTP_UNCONFIRMED, then mark this transport as Partially
+ * Failed, see SCTP Quick Failover Draft, section 5.1
*/
if ((transport->state != SCTP_PF) &&
+ (transport->state != SCTP_UNCONFIRMED) &&
(asoc->pf_retrans < transport->pathmaxrxt) &&
(transport->error_count > asoc->pf_retrans)) {
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 483dcd71b3c5..01e002430c85 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -758,6 +758,12 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
struct sctp_chunk auth;
sctp_ierror_t ret;
+ /* Make sure that we and the peer are AUTH capable */
+ if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) {
+ sctp_association_free(new_asoc);
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+ }
+
/* set-up our fake chunk so that we can process it */
auth.skb = chunk->auth_chunk;
auth.asoc = chunk->asoc;
@@ -768,10 +774,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
auth.transport = chunk->transport;
ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth);
-
- /* We can now safely free the auth_chunk clone */
- kfree_skb(chunk->auth_chunk);
-
if (ret != SCTP_IERROR_NO_ERROR) {
sctp_association_free(new_asoc);
return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
@@ -6176,7 +6178,7 @@ static int sctp_eat_data(const struct sctp_association *asoc,
* PMTU. In cases, such as loopback, this might be a rather
* large spill over.
*/
- if ((!chunk->data_accepted) && (!asoc->rwnd || asoc->rwnd_over ||
+ if ((!chunk->data_accepted) && (!asoc->rwnd ||
(datalen > asoc->rwnd + asoc->frag_point))) {
/* If this is the next TSN, consider reneging to make
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 9e91d6e5df63..981aaf8b6ace 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -64,6 +64,7 @@
#include <linux/crypto.h>
#include <linux/slab.h>
#include <linux/file.h>
+#include <linux/compat.h>
#include <net/ip.h>
#include <net/icmp.h>
@@ -1368,11 +1369,19 @@ static int sctp_setsockopt_connectx(struct sock *sk,
/*
* New (hopefully final) interface for the API.
* We use the sctp_getaddrs_old structure so that use-space library
- * can avoid any unnecessary allocations. The only defferent part
+ * can avoid any unnecessary allocations. The only different part
* is that we store the actual length of the address buffer into the
- * addrs_num structure member. That way we can re-use the existing
+ * addrs_num structure member. That way we can re-use the existing
* code.
*/
+#ifdef CONFIG_COMPAT
+struct compat_sctp_getaddrs_old {
+ sctp_assoc_t assoc_id;
+ s32 addr_num;
+ compat_uptr_t addrs; /* struct sockaddr * */
+};
+#endif
+
static int sctp_getsockopt_connectx3(struct sock *sk, int len,
char __user *optval,
int __user *optlen)
@@ -1381,16 +1390,30 @@ static int sctp_getsockopt_connectx3(struct sock *sk, int len,
sctp_assoc_t assoc_id = 0;
int err = 0;
- if (len < sizeof(param))
- return -EINVAL;
+#ifdef CONFIG_COMPAT
+ if (is_compat_task()) {
+ struct compat_sctp_getaddrs_old param32;
- if (copy_from_user(&param, optval, sizeof(param)))
- return -EFAULT;
+ if (len < sizeof(param32))
+ return -EINVAL;
+ if (copy_from_user(&param32, optval, sizeof(param32)))
+ return -EFAULT;
- err = __sctp_setsockopt_connectx(sk,
- (struct sockaddr __user *)param.addrs,
- param.addr_num, &assoc_id);
+ param.assoc_id = param32.assoc_id;
+ param.addr_num = param32.addr_num;
+ param.addrs = compat_ptr(param32.addrs);
+ } else
+#endif
+ {
+ if (len < sizeof(param))
+ return -EINVAL;
+ if (copy_from_user(&param, optval, sizeof(param)))
+ return -EFAULT;
+ }
+ err = __sctp_setsockopt_connectx(sk, (struct sockaddr __user *)
+ param.addrs, param.addr_num,
+ &assoc_id);
if (err == 0 || err == -EINPROGRESS) {
if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
return -EFAULT;
@@ -2092,12 +2115,6 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
sctp_skb_pull(skb, copied);
skb_queue_head(&sk->sk_receive_queue, skb);
- /* When only partial message is copied to the user, increase
- * rwnd by that amount. If all the data in the skb is read,
- * rwnd is updated when the event is freed.
- */
- if (!sctp_ulpevent_is_notification(event))
- sctp_assoc_rwnd_increase(event->asoc, copied);
goto out;
} else if ((event->msg_flags & MSG_NOTIFICATION) ||
(event->msg_flags & MSG_EOR))
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 7135e617ab0f..35c8923b5554 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -151,6 +151,7 @@ static struct ctl_table sctp_net_table[] = {
},
{
.procname = "cookie_hmac_alg",
+ .data = &init_net.sctp.sctp_hmac_alg,
.maxlen = 8,
.mode = 0644,
.proc_handler = proc_sctp_do_hmac_alg,
@@ -401,15 +402,18 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
int sctp_sysctl_net_register(struct net *net)
{
- struct ctl_table *table;
- int i;
+ struct ctl_table *table = sctp_net_table;
+
+ if (!net_eq(net, &init_net)) {
+ int i;
- table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
+ table = kmemdup(sctp_net_table, sizeof(sctp_net_table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
- for (i = 0; table[i].data; i++)
- table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
+ for (i = 0; table[i].data; i++)
+ table[i].data += (char *)(&net->sctp) - (char *)&init_net.sctp;
+ }
net->sctp.sysctl_header = register_net_sysctl(net, "net/sctp", table);
return 0;
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 85c64658bd0b..8d198ae03606 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -989,7 +989,7 @@ static void sctp_ulpevent_receive_data(struct sctp_ulpevent *event,
skb = sctp_event2skb(event);
/* Set the owner and charge rwnd for bytes received. */
sctp_ulpevent_set_owner(event, asoc);
- sctp_assoc_rwnd_decrease(asoc, skb_headlen(skb));
+ sctp_assoc_rwnd_update(asoc, false);
if (!skb->data_len)
return;
@@ -1011,6 +1011,7 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
{
struct sk_buff *skb, *frag;
unsigned int len;
+ struct sctp_association *asoc;
/* Current stack structures assume that the rcv buffer is
* per socket. For UDP style sockets this is not true as
@@ -1035,8 +1036,11 @@ static void sctp_ulpevent_release_data(struct sctp_ulpevent *event)
}
done:
- sctp_assoc_rwnd_increase(event->asoc, len);
+ asoc = event->asoc;
+ sctp_association_hold(asoc);
sctp_ulpevent_release_owner(event);
+ sctp_assoc_rwnd_update(asoc, true);
+ sctp_association_put(asoc);
}
static void sctp_ulpevent_release_frag_data(struct sctp_ulpevent *event)
diff --git a/net/socket.c b/net/socket.c
index 879933aaed4c..a19ae1968d37 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -450,16 +450,17 @@ EXPORT_SYMBOL(sockfd_lookup);
static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed)
{
- struct file *file;
+ struct fd f = fdget(fd);
struct socket *sock;
*err = -EBADF;
- file = fget_light(fd, fput_needed);
- if (file) {
- sock = sock_from_file(file, err);
- if (sock)
+ if (f.file) {
+ sock = sock_from_file(f.file, err);
+ if (likely(sock)) {
+ *fput_needed = f.flags;
return sock;
- fput_light(file, *fput_needed);
+ }
+ fdput(f);
}
return NULL;
}
@@ -1985,6 +1986,10 @@ static int copy_msghdr_from_user(struct msghdr *kmsg,
{
if (copy_from_user(kmsg, umsg, sizeof(struct msghdr)))
return -EFAULT;
+
+ if (kmsg->msg_namelen < 0)
+ return -EINVAL;
+
if (kmsg->msg_namelen > sizeof(struct sockaddr_storage))
kmsg->msg_namelen = sizeof(struct sockaddr_storage);
return 0;
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 6c0513a7f992..36e431ee1c90 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -108,6 +108,7 @@ struct gss_auth {
static DEFINE_SPINLOCK(pipe_version_lock);
static struct rpc_wait_queue pipe_version_rpc_waitqueue;
static DECLARE_WAIT_QUEUE_HEAD(pipe_version_waitqueue);
+static void gss_put_auth(struct gss_auth *gss_auth);
static void gss_free_ctx(struct gss_cl_ctx *);
static const struct rpc_pipe_ops gss_upcall_ops_v0;
@@ -320,6 +321,7 @@ gss_release_msg(struct gss_upcall_msg *gss_msg)
if (gss_msg->ctx != NULL)
gss_put_ctx(gss_msg->ctx);
rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
+ gss_put_auth(gss_msg->auth);
kfree(gss_msg);
}
@@ -498,9 +500,12 @@ gss_alloc_msg(struct gss_auth *gss_auth,
default:
err = gss_encode_v1_msg(gss_msg, service_name, gss_auth->target_name);
if (err)
- goto err_free_msg;
+ goto err_put_pipe_version;
};
+ kref_get(&gss_auth->kref);
return gss_msg;
+err_put_pipe_version:
+ put_pipe_version(gss_auth->net);
err_free_msg:
kfree(gss_msg);
err:
@@ -991,6 +996,8 @@ gss_create_new(struct rpc_auth_create_args *args, struct rpc_clnt *clnt)
gss_auth->service = gss_pseudoflavor_to_service(gss_auth->mech, flavor);
if (gss_auth->service == 0)
goto err_put_mech;
+ if (!gssd_running(gss_auth->net))
+ goto err_put_mech;
auth = &gss_auth->rpc_auth;
auth->au_cslack = GSS_CRED_SLACK >> 2;
auth->au_rslack = GSS_VERF_SLACK >> 2;
@@ -1062,6 +1069,12 @@ gss_free_callback(struct kref *kref)
}
static void
+gss_put_auth(struct gss_auth *gss_auth)
+{
+ kref_put(&gss_auth->kref, gss_free_callback);
+}
+
+static void
gss_destroy(struct rpc_auth *auth)
{
struct gss_auth *gss_auth = container_of(auth,
@@ -1082,7 +1095,7 @@ gss_destroy(struct rpc_auth *auth)
gss_auth->gss_pipe[1] = NULL;
rpcauth_destroy_credcache(auth);
- kref_put(&gss_auth->kref, gss_free_callback);
+ gss_put_auth(gss_auth);
}
/*
@@ -1253,7 +1266,7 @@ gss_destroy_nullcred(struct rpc_cred *cred)
call_rcu(&cred->cr_rcu, gss_free_cred_callback);
if (ctx)
gss_put_ctx(ctx);
- kref_put(&gss_auth->kref, gss_free_callback);
+ gss_put_auth(gss_auth);
}
static void
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 890a29912d5a..e860d4f7ed2a 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -64,7 +64,6 @@ static void xprt_free_allocation(struct rpc_rqst *req)
free_page((unsigned long)xbufp->head[0].iov_base);
xbufp = &req->rq_snd_buf;
free_page((unsigned long)xbufp->head[0].iov_base);
- list_del(&req->rq_bc_pa_list);
kfree(req);
}
@@ -168,8 +167,10 @@ out_free:
/*
* Memory allocation failed, free the temporary list
*/
- list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
+ list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list) {
+ list_del(&req->rq_bc_pa_list);
xprt_free_allocation(req);
+ }
dprintk("RPC: setup backchannel transport failed\n");
return -ENOMEM;
@@ -198,6 +199,7 @@ void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
xprt_dec_alloc_count(xprt, max_reqs);
list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
dprintk("RPC: req=%p\n", req);
+ list_del(&req->rq_bc_pa_list);
xprt_free_allocation(req);
if (--max_reqs == 0)
break;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 80a6640f329b..06c6ff0cb911 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -571,7 +571,7 @@ static void svc_check_conn_limits(struct svc_serv *serv)
}
}
-int svc_alloc_arg(struct svc_rqst *rqstp)
+static int svc_alloc_arg(struct svc_rqst *rqstp)
{
struct svc_serv *serv = rqstp->rq_server;
struct xdr_buf *arg;
@@ -612,7 +612,7 @@ int svc_alloc_arg(struct svc_rqst *rqstp)
return 0;
}
-struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
+static struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
{
struct svc_xprt *xprt;
struct svc_pool *pool = rqstp->rq_pool;
@@ -691,7 +691,7 @@ struct svc_xprt *svc_get_next_xprt(struct svc_rqst *rqstp, long timeout)
return xprt;
}
-void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
+static void svc_add_new_temp_xprt(struct svc_serv *serv, struct svc_xprt *newxpt)
{
spin_lock_bh(&serv->sv_lock);
set_bit(XPT_TEMP, &newxpt->xpt_flags);
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index 817a1e523969..0addefca8e77 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -510,6 +510,7 @@ static int xs_nospace(struct rpc_task *task)
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt);
+ struct sock *sk = transport->inet;
int ret = -EAGAIN;
dprintk("RPC: %5u xmit incomplete (%u left of %u)\n",
@@ -527,7 +528,7 @@ static int xs_nospace(struct rpc_task *task)
* window size
*/
set_bit(SOCK_NOSPACE, &transport->sock->flags);
- transport->inet->sk_write_pending++;
+ sk->sk_write_pending++;
/* ...and wait for more buffer space */
xprt_wait_for_buffer_space(task, xs_nospace_callback);
}
@@ -537,6 +538,9 @@ static int xs_nospace(struct rpc_task *task)
}
spin_unlock_bh(&xprt->transport_lock);
+
+ /* Race breaker in case memory is freed before above code is called */
+ sk->sk_write_space(sk);
return ret;
}
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index a38c89969c68..574b86193b15 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -610,8 +610,13 @@ static struct notifier_block notifier = {
int tipc_bearer_setup(void)
{
+ int err;
+
+ err = register_netdevice_notifier(&notifier);
+ if (err)
+ return err;
dev_add_pack(&tipc_packet_type);
- return register_netdevice_notifier(&notifier);
+ return 0;
}
void tipc_bearer_cleanup(void)
diff --git a/net/tipc/config.c b/net/tipc/config.c
index c301a9a592d8..e6d721692ae0 100644
--- a/net/tipc/config.c
+++ b/net/tipc/config.c
@@ -181,7 +181,7 @@ static struct sk_buff *cfg_set_own_addr(void)
if (tipc_own_addr)
return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED
" (cannot change node address once assigned)");
- tipc_core_start_net(addr);
+ tipc_net_start(addr);
return tipc_cfg_reply_none();
}
@@ -376,7 +376,6 @@ static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr,
struct tipc_cfg_msg_hdr *req_hdr;
struct tipc_cfg_msg_hdr *rep_hdr;
struct sk_buff *rep_buf;
- int ret;
/* Validate configuration message header (ignore invalid message) */
req_hdr = (struct tipc_cfg_msg_hdr *)buf;
@@ -398,12 +397,8 @@ static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr,
memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr));
rep_hdr->tcm_len = htonl(rep_buf->len);
rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST);
-
- ret = tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data,
- rep_buf->len);
- if (ret < 0)
- pr_err("Sending cfg reply message failed, no memory\n");
-
+ tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data,
+ rep_buf->len);
kfree_skb(rep_buf);
}
}
diff --git a/net/tipc/core.c b/net/tipc/core.c
index f9e88d8b04ca..80c20647b3d2 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -77,37 +77,13 @@ struct sk_buff *tipc_buf_acquire(u32 size)
}
/**
- * tipc_core_stop_net - shut down TIPC networking sub-systems
- */
-static void tipc_core_stop_net(void)
-{
- tipc_net_stop();
- tipc_bearer_cleanup();
-}
-
-/**
- * start_net - start TIPC networking sub-systems
- */
-int tipc_core_start_net(unsigned long addr)
-{
- int res;
-
- tipc_net_start(addr);
- res = tipc_bearer_setup();
- if (res < 0)
- goto err;
- return res;
-
-err:
- tipc_core_stop_net();
- return res;
-}
-
-/**
* tipc_core_stop - switch TIPC from SINGLE NODE to NOT RUNNING mode
*/
static void tipc_core_stop(void)
{
+ tipc_handler_stop();
+ tipc_net_stop();
+ tipc_bearer_cleanup();
tipc_netlink_stop();
tipc_cfg_stop();
tipc_subscr_stop();
@@ -122,30 +98,65 @@ static void tipc_core_stop(void)
*/
static int tipc_core_start(void)
{
- int res;
+ int err;
get_random_bytes(&tipc_random, sizeof(tipc_random));
- res = tipc_handler_start();
- if (!res)
- res = tipc_ref_table_init(tipc_max_ports, tipc_random);
- if (!res)
- res = tipc_nametbl_init();
- if (!res)
- res = tipc_netlink_start();
- if (!res)
- res = tipc_socket_init();
- if (!res)
- res = tipc_register_sysctl();
- if (!res)
- res = tipc_subscr_start();
- if (!res)
- res = tipc_cfg_init();
- if (res) {
- tipc_handler_stop();
- tipc_core_stop();
- }
- return res;
+ err = tipc_handler_start();
+ if (err)
+ goto out_handler;
+
+ err = tipc_ref_table_init(tipc_max_ports, tipc_random);
+ if (err)
+ goto out_reftbl;
+
+ err = tipc_nametbl_init();
+ if (err)
+ goto out_nametbl;
+
+ err = tipc_netlink_start();
+ if (err)
+ goto out_netlink;
+
+ err = tipc_socket_init();
+ if (err)
+ goto out_socket;
+
+ err = tipc_register_sysctl();
+ if (err)
+ goto out_sysctl;
+
+ err = tipc_subscr_start();
+ if (err)
+ goto out_subscr;
+
+ err = tipc_cfg_init();
+ if (err)
+ goto out_cfg;
+
+ err = tipc_bearer_setup();
+ if (err)
+ goto out_bearer;
+
+ return 0;
+out_bearer:
+ tipc_cfg_stop();
+out_cfg:
+ tipc_subscr_stop();
+out_subscr:
+ tipc_unregister_sysctl();
+out_sysctl:
+ tipc_socket_stop();
+out_socket:
+ tipc_netlink_stop();
+out_netlink:
+ tipc_nametbl_stop();
+out_nametbl:
+ tipc_ref_table_stop();
+out_reftbl:
+ tipc_handler_stop();
+out_handler:
+ return err;
}
static int __init tipc_init(void)
@@ -174,8 +185,6 @@ static int __init tipc_init(void)
static void __exit tipc_exit(void)
{
- tipc_handler_stop();
- tipc_core_stop_net();
tipc_core_stop();
pr_info("Deactivated\n");
}
diff --git a/net/tipc/core.h b/net/tipc/core.h
index 1ff477b0450d..4dfe137587bb 100644
--- a/net/tipc/core.h
+++ b/net/tipc/core.h
@@ -90,7 +90,6 @@ extern int tipc_random __read_mostly;
/*
* Routines available to privileged subsystems
*/
-int tipc_core_start_net(unsigned long);
int tipc_handler_start(void);
void tipc_handler_stop(void);
int tipc_netlink_start(void);
@@ -192,6 +191,7 @@ static inline void k_term_timer(struct timer_list *timer)
struct tipc_skb_cb {
void *handle;
+ bool deferred;
};
#define TIPC_SKB_CB(__skb) ((struct tipc_skb_cb *)&((__skb)->cb[0]))
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index e4bc8a296744..1fabf160501f 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -58,7 +58,6 @@ unsigned int tipc_k_signal(Handler routine, unsigned long argument)
spin_lock_bh(&qitem_lock);
if (!handler_enabled) {
- pr_err("Signal request ignored by handler\n");
spin_unlock_bh(&qitem_lock);
return -ENOPROTOOPT;
}
diff --git a/net/tipc/link.c b/net/tipc/link.c
index d4b5de41b682..da6018beb6eb 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1391,6 +1391,12 @@ static int link_recv_buf_validate(struct sk_buff *buf)
u32 hdr_size;
u32 min_hdr_size;
+ /* If this packet comes from the defer queue, the skb has already
+ * been validated
+ */
+ if (unlikely(TIPC_SKB_CB(buf)->deferred))
+ return 1;
+
if (unlikely(buf->len < MIN_H_SIZE))
return 0;
@@ -1703,6 +1709,7 @@ static void link_handle_out_of_seq_msg(struct tipc_link *l_ptr,
&l_ptr->newest_deferred_in, buf)) {
l_ptr->deferred_inqueue_sz++;
l_ptr->stats.deferred_recv++;
+ TIPC_SKB_CB(buf)->deferred = true;
if ((l_ptr->deferred_inqueue_sz % 16) == 1)
tipc_link_send_proto_msg(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
} else
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 92a1533af4e0..042e8e3cabc0 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -941,20 +941,48 @@ int tipc_nametbl_init(void)
return 0;
}
-void tipc_nametbl_stop(void)
+/**
+ * tipc_purge_publications - remove all publications for a given type
+ *
+ * tipc_nametbl_lock must be held when calling this function
+ */
+static void tipc_purge_publications(struct name_seq *seq)
{
- u32 i;
+ struct publication *publ, *safe;
+ struct sub_seq *sseq;
+ struct name_info *info;
- if (!table.types)
+ if (!seq->sseqs) {
+ nameseq_delete_empty(seq);
return;
+ }
+ sseq = seq->sseqs;
+ info = sseq->info;
+ list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) {
+ tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node,
+ publ->ref, publ->key);
+ }
+}
+
+void tipc_nametbl_stop(void)
+{
+ u32 i;
+ struct name_seq *seq;
+ struct hlist_head *seq_head;
+ struct hlist_node *safe;
- /* Verify name table is empty, then release it */
+ /* Verify name table is empty and purge any lingering
+ * publications, then release the name table
+ */
write_lock_bh(&tipc_nametbl_lock);
for (i = 0; i < TIPC_NAMETBL_SIZE; i++) {
if (hlist_empty(&table.types[i]))
continue;
- pr_err("nametbl_stop(): orphaned hash chain detected\n");
- break;
+ seq_head = &table.types[i];
+ hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) {
+ tipc_purge_publications(seq);
+ }
+ continue;
}
kfree(table.types);
table.types = NULL;
diff --git a/net/tipc/netlink.c b/net/tipc/netlink.c
index 9f72a6376362..3aaf73de9e2d 100644
--- a/net/tipc/netlink.c
+++ b/net/tipc/netlink.c
@@ -83,8 +83,6 @@ static struct genl_ops tipc_genl_ops[] = {
},
};
-static int tipc_genl_family_registered;
-
int tipc_netlink_start(void)
{
int res;
@@ -94,16 +92,10 @@ int tipc_netlink_start(void)
pr_err("Failed to register netlink interface\n");
return res;
}
-
- tipc_genl_family_registered = 1;
return 0;
}
void tipc_netlink_stop(void)
{
- if (!tipc_genl_family_registered)
- return;
-
genl_unregister_family(&tipc_genl_family);
- tipc_genl_family_registered = 0;
}
diff --git a/net/tipc/ref.c b/net/tipc/ref.c
index 2a2a938dc22c..de3d593e2fee 100644
--- a/net/tipc/ref.c
+++ b/net/tipc/ref.c
@@ -126,9 +126,6 @@ int tipc_ref_table_init(u32 requested_size, u32 start)
*/
void tipc_ref_table_stop(void)
{
- if (!tipc_ref_table.entries)
- return;
-
vfree(tipc_ref_table.entries);
tipc_ref_table.entries = NULL;
}
diff --git a/net/tipc/server.c b/net/tipc/server.c
index b635ca347a87..646a930eefbf 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -87,7 +87,6 @@ static void tipc_clean_outqueues(struct tipc_conn *con);
static void tipc_conn_kref_release(struct kref *kref)
{
struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
- struct tipc_server *s = con->server;
if (con->sock) {
tipc_sock_release_local(con->sock);
@@ -95,10 +94,6 @@ static void tipc_conn_kref_release(struct kref *kref)
}
tipc_clean_outqueues(con);
-
- if (con->conid)
- s->tipc_conn_shutdown(con->conid, con->usr_data);
-
kfree(con);
}
@@ -181,6 +176,9 @@ static void tipc_close_conn(struct tipc_conn *con)
struct tipc_server *s = con->server;
if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
+ if (con->conid)
+ s->tipc_conn_shutdown(con->conid, con->usr_data);
+
spin_lock_bh(&s->idr_lock);
idr_remove(&s->conn_idr, con->conid);
s->idr_in_use--;
@@ -429,10 +427,12 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
list_add_tail(&e->list, &con->outqueue);
spin_unlock_bh(&con->outqueue_lock);
- if (test_bit(CF_CONNECTED, &con->flags))
+ if (test_bit(CF_CONNECTED, &con->flags)) {
if (!queue_work(s->send_wq, &con->swork))
conn_put(con);
-
+ } else {
+ conn_put(con);
+ }
return 0;
}
@@ -573,7 +573,6 @@ int tipc_server_start(struct tipc_server *s)
kmem_cache_destroy(s->rcvbuf_cache);
return ret;
}
- s->enabled = 1;
return ret;
}
@@ -583,10 +582,6 @@ void tipc_server_stop(struct tipc_server *s)
int total = 0;
int id;
- if (!s->enabled)
- return;
-
- s->enabled = 0;
spin_lock_bh(&s->idr_lock);
for (id = 0; total < s->idr_in_use; id++) {
con = idr_find(&s->conn_idr, id);
diff --git a/net/tipc/server.h b/net/tipc/server.h
index 98b23f20bc0f..be817b0b547e 100644
--- a/net/tipc/server.h
+++ b/net/tipc/server.h
@@ -56,7 +56,6 @@
* @name: server name
* @imp: message importance
* @type: socket type
- * @enabled: identify whether server is launched or not
*/
struct tipc_server {
struct idr conn_idr;
@@ -74,7 +73,6 @@ struct tipc_server {
const char name[TIPC_SERVER_NAME_LEN];
int imp;
int type;
- int enabled;
};
int tipc_conn_sendmsg(struct tipc_server *s, int conid,
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index aab4948f0aff..0ed0eaa62f29 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -70,8 +70,6 @@ static const struct proto_ops msg_ops;
static struct proto tipc_proto;
static struct proto tipc_proto_kern;
-static int sockets_enabled;
-
/*
* Revised TIPC socket locking policy:
*
@@ -999,7 +997,7 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo)
for (;;) {
prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
- if (skb_queue_empty(&sk->sk_receive_queue)) {
+ if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
if (sock->state == SS_DISCONNECTING) {
err = -ENOTCONN;
break;
@@ -1625,7 +1623,7 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo)
for (;;) {
prepare_to_wait_exclusive(sk_sleep(sk), &wait,
TASK_INTERRUPTIBLE);
- if (skb_queue_empty(&sk->sk_receive_queue)) {
+ if (timeo && skb_queue_empty(&sk->sk_receive_queue)) {
release_sock(sk);
timeo = schedule_timeout(timeo);
lock_sock(sk);
@@ -2027,8 +2025,6 @@ int tipc_socket_init(void)
proto_unregister(&tipc_proto);
goto out;
}
-
- sockets_enabled = 1;
out:
return res;
}
@@ -2038,10 +2034,6 @@ int tipc_socket_init(void)
*/
void tipc_socket_stop(void)
{
- if (!sockets_enabled)
- return;
-
- sockets_enabled = 0;
sock_unregister(tipc_family_ops.family);
proto_unregister(&tipc_proto);
}
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 7cb0bd5b1176..642437231ad5 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -96,20 +96,16 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower,
{
struct tipc_subscriber *subscriber = sub->subscriber;
struct kvec msg_sect;
- int ret;
msg_sect.iov_base = (void *)&sub->evt;
msg_sect.iov_len = sizeof(struct tipc_event);
-
sub->evt.event = htohl(event, sub->swap);
sub->evt.found_lower = htohl(found_lower, sub->swap);
sub->evt.found_upper = htohl(found_upper, sub->swap);
sub->evt.port.ref = htohl(port_ref, sub->swap);
sub->evt.port.node = htohl(node, sub->swap);
- ret = tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL,
- msg_sect.iov_base, msg_sect.iov_len);
- if (ret < 0)
- pr_err("Sending subscription event failed, no memory\n");
+ tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, msg_sect.iov_base,
+ msg_sect.iov_len);
}
/**
@@ -153,14 +149,6 @@ static void subscr_timeout(struct tipc_subscription *sub)
/* The spin lock per subscriber is used to protect its members */
spin_lock_bh(&subscriber->lock);
- /* Validate if the connection related to the subscriber is
- * closed (in case subscriber is terminating)
- */
- if (subscriber->conid == 0) {
- spin_unlock_bh(&subscriber->lock);
- return;
- }
-
/* Validate timeout (in case subscription is being cancelled) */
if (sub->timeout == TIPC_WAIT_FOREVER) {
spin_unlock_bh(&subscriber->lock);
@@ -215,9 +203,6 @@ static void subscr_release(struct tipc_subscriber *subscriber)
spin_lock_bh(&subscriber->lock);
- /* Invalidate subscriber reference */
- subscriber->conid = 0;
-
/* Destroy any existing subscriptions for subscriber */
list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list,
subscription_list) {
@@ -278,9 +263,9 @@ static void subscr_cancel(struct tipc_subscr *s,
*
* Called with subscriber lock held.
*/
-static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
- struct tipc_subscriber *subscriber)
-{
+static int subscr_subscribe(struct tipc_subscr *s,
+ struct tipc_subscriber *subscriber,
+ struct tipc_subscription **sub_p) {
struct tipc_subscription *sub;
int swap;
@@ -291,23 +276,21 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
if (s->filter & htohl(TIPC_SUB_CANCEL, swap)) {
s->filter &= ~htohl(TIPC_SUB_CANCEL, swap);
subscr_cancel(s, subscriber);
- return NULL;
+ return 0;
}
/* Refuse subscription if global limit exceeded */
if (atomic_read(&subscription_count) >= TIPC_MAX_SUBSCRIPTIONS) {
pr_warn("Subscription rejected, limit reached (%u)\n",
TIPC_MAX_SUBSCRIPTIONS);
- subscr_terminate(subscriber);
- return NULL;
+ return -EINVAL;
}
/* Allocate subscription object */
sub = kmalloc(sizeof(*sub), GFP_ATOMIC);
if (!sub) {
pr_warn("Subscription rejected, no memory\n");
- subscr_terminate(subscriber);
- return NULL;
+ return -ENOMEM;
}
/* Initialize subscription object */
@@ -321,8 +304,7 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
(sub->seq.lower > sub->seq.upper)) {
pr_warn("Subscription rejected, illegal request\n");
kfree(sub);
- subscr_terminate(subscriber);
- return NULL;
+ return -EINVAL;
}
INIT_LIST_HEAD(&sub->nameseq_list);
list_add(&sub->subscription_list, &subscriber->subscription_list);
@@ -335,8 +317,8 @@ static struct tipc_subscription *subscr_subscribe(struct tipc_subscr *s,
(Handler)subscr_timeout, (unsigned long)sub);
k_start_timer(&sub->timer, sub->timeout);
}
-
- return sub;
+ *sub_p = sub;
+ return 0;
}
/* Handle one termination request for the subscriber */
@@ -350,10 +332,14 @@ static void subscr_conn_msg_event(int conid, struct sockaddr_tipc *addr,
void *usr_data, void *buf, size_t len)
{
struct tipc_subscriber *subscriber = usr_data;
- struct tipc_subscription *sub;
+ struct tipc_subscription *sub = NULL;
spin_lock_bh(&subscriber->lock);
- sub = subscr_subscribe((struct tipc_subscr *)buf, subscriber);
+ if (subscr_subscribe((struct tipc_subscr *)buf, subscriber, &sub) < 0) {
+ spin_unlock_bh(&subscriber->lock);
+ subscr_terminate(subscriber);
+ return;
+ }
if (sub)
tipc_nametbl_subscribe(sub);
spin_unlock_bh(&subscriber->lock);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 29fc8bee9702..ce6ec6c2f4de 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -163,9 +163,8 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb)
static inline unsigned int unix_hash_fold(__wsum n)
{
- unsigned int hash = (__force unsigned int)n;
+ unsigned int hash = (__force unsigned int)csum_fold(n);
- hash ^= hash>>16;
hash ^= hash>>8;
return hash&(UNIX_HASH_SIZE-1);
}
diff --git a/net/wireless/core.c b/net/wireless/core.c
index d89dee2259b5..a3bf18d11609 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -203,8 +203,11 @@ void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev,
rdev->opencount--;
- WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev &&
- !rdev->scan_req->notified);
+ if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
+ if (WARN_ON(!rdev->scan_req->notified))
+ rdev->scan_req->aborted = true;
+ ___cfg80211_scan_done(rdev, false);
+ }
}
static int cfg80211_rfkill_set_block(void *data, bool blocked)
@@ -440,9 +443,6 @@ int wiphy_register(struct wiphy *wiphy)
int i;
u16 ifmodes = wiphy->interface_modes;
- /* support for 5/10 MHz is broken due to nl80211 API mess - disable */
- wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_5_10_MHZ;
-
/*
* There are major locking problems in nl80211/mac80211 for CSA,
* disable for all drivers until this has been reworked.
@@ -788,8 +788,6 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev,
default:
break;
}
-
- wdev->beacon_interval = 0;
}
static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
@@ -859,8 +857,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
break;
case NETDEV_DOWN:
cfg80211_update_iface_num(rdev, wdev->iftype, -1);
- WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev &&
- !rdev->scan_req->notified);
+ if (rdev->scan_req && rdev->scan_req->wdev == wdev) {
+ if (WARN_ON(!rdev->scan_req->notified))
+ rdev->scan_req->aborted = true;
+ ___cfg80211_scan_done(rdev, false);
+ }
if (WARN_ON(rdev->sched_scan_req &&
rdev->sched_scan_req->dev == wdev->netdev)) {
diff --git a/net/wireless/core.h b/net/wireless/core.h
index 37ec16d7bb1a..f1d193b557b6 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -62,6 +62,7 @@ struct cfg80211_registered_device {
struct rb_root bss_tree;
u32 bss_generation;
struct cfg80211_scan_request *scan_req; /* protected by RTNL */
+ struct sk_buff *scan_msg;
struct cfg80211_sched_scan_request *sched_scan_req;
unsigned long suspend_at;
struct work_struct scan_done_wk;
@@ -361,7 +362,8 @@ int cfg80211_validate_key_settings(struct cfg80211_registered_device *rdev,
struct key_params *params, int key_idx,
bool pairwise, const u8 *mac_addr);
void __cfg80211_scan_done(struct work_struct *wk);
-void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev);
+void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
+ bool send_message);
void __cfg80211_sched_scan_results(struct work_struct *wk);
int __cfg80211_stop_sched_scan(struct cfg80211_registered_device *rdev,
bool driver_initiated);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 7a742594916e..4fe2e6e2bc76 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1719,9 +1719,10 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)
* We can then retry with the larger buffer.
*/
if ((ret == -ENOBUFS || ret == -EMSGSIZE) &&
- !skb->len &&
+ !skb->len && !state->split &&
cb->min_dump_alloc < 4096) {
cb->min_dump_alloc = 4096;
+ state->split_start = 0;
rtnl_unlock();
return 1;
}
@@ -5244,7 +5245,7 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->scan)
return -EOPNOTSUPP;
- if (rdev->scan_req) {
+ if (rdev->scan_req || rdev->scan_msg) {
err = -EBUSY;
goto unlock;
}
@@ -10011,40 +10012,31 @@ void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
NL80211_MCGRP_SCAN, GFP_KERNEL);
}
-void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev)
+struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev, bool aborted)
{
struct sk_buff *msg;
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
- return;
+ return NULL;
if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
- NL80211_CMD_NEW_SCAN_RESULTS) < 0) {
+ aborted ? NL80211_CMD_SCAN_ABORTED :
+ NL80211_CMD_NEW_SCAN_RESULTS) < 0) {
nlmsg_free(msg);
- return;
+ return NULL;
}
- genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
- NL80211_MCGRP_SCAN, GFP_KERNEL);
+ return msg;
}
-void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev)
+void nl80211_send_scan_result(struct cfg80211_registered_device *rdev,
+ struct sk_buff *msg)
{
- struct sk_buff *msg;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return;
- if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
- NL80211_CMD_SCAN_ABORTED) < 0) {
- nlmsg_free(msg);
- return;
- }
-
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
NL80211_MCGRP_SCAN, GFP_KERNEL);
}
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index b1b231324e10..75799746d845 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -8,10 +8,10 @@ void nl80211_exit(void);
void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev);
void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
-void nl80211_send_scan_done(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev);
-void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev,
- struct wireless_dev *wdev);
+struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
+ struct wireless_dev *wdev, bool aborted);
+void nl80211_send_scan_result(struct cfg80211_registered_device *rdev,
+ struct sk_buff *msg);
void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
struct net_device *netdev, u32 cmd);
void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 9b897fca7487..f0541370e68e 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1700,7 +1700,7 @@ static void reg_process_hint(struct regulatory_request *reg_request)
return;
case NL80211_REGDOM_SET_BY_USER:
treatment = reg_process_hint_user(reg_request);
- if (treatment == REG_REQ_OK ||
+ if (treatment == REG_REQ_IGNORE ||
treatment == REG_REQ_ALREADY_SET)
return;
schedule_delayed_work(&reg_timeout, msecs_to_jiffies(3142));
@@ -2373,6 +2373,7 @@ static int reg_set_rd_country_ie(const struct ieee80211_regdomain *rd,
int set_regdom(const struct ieee80211_regdomain *rd)
{
struct regulatory_request *lr;
+ bool user_reset = false;
int r;
if (!reg_is_valid_request(rd->alpha2)) {
@@ -2389,6 +2390,7 @@ int set_regdom(const struct ieee80211_regdomain *rd)
break;
case NL80211_REGDOM_SET_BY_USER:
r = reg_set_rd_user(rd, lr);
+ user_reset = true;
break;
case NL80211_REGDOM_SET_BY_DRIVER:
r = reg_set_rd_driver(rd, lr);
@@ -2402,8 +2404,14 @@ int set_regdom(const struct ieee80211_regdomain *rd)
}
if (r) {
- if (r == -EALREADY)
+ switch (r) {
+ case -EALREADY:
reg_set_request_processed();
+ break;
+ default:
+ /* Back to world regulatory in case of errors */
+ restore_regulatory_settings(user_reset);
+ }
kfree(rd);
return r;
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index b528e31da2cf..d1ed4aebbbb7 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -161,18 +161,25 @@ static void __cfg80211_bss_expire(struct cfg80211_registered_device *dev,
dev->bss_generation++;
}
-void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev)
+void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
+ bool send_message)
{
struct cfg80211_scan_request *request;
struct wireless_dev *wdev;
+ struct sk_buff *msg;
#ifdef CONFIG_CFG80211_WEXT
union iwreq_data wrqu;
#endif
ASSERT_RTNL();
- request = rdev->scan_req;
+ if (rdev->scan_msg) {
+ nl80211_send_scan_result(rdev, rdev->scan_msg);
+ rdev->scan_msg = NULL;
+ return;
+ }
+ request = rdev->scan_req;
if (!request)
return;
@@ -186,18 +193,16 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev)
if (wdev->netdev)
cfg80211_sme_scan_done(wdev->netdev);
- if (request->aborted) {
- nl80211_send_scan_aborted(rdev, wdev);
- } else {
- if (request->flags & NL80211_SCAN_FLAG_FLUSH) {
- /* flush entries from previous scans */
- spin_lock_bh(&rdev->bss_lock);
- __cfg80211_bss_expire(rdev, request->scan_start);
- spin_unlock_bh(&rdev->bss_lock);
- }
- nl80211_send_scan_done(rdev, wdev);
+ if (!request->aborted &&
+ request->flags & NL80211_SCAN_FLAG_FLUSH) {
+ /* flush entries from previous scans */
+ spin_lock_bh(&rdev->bss_lock);
+ __cfg80211_bss_expire(rdev, request->scan_start);
+ spin_unlock_bh(&rdev->bss_lock);
}
+ msg = nl80211_build_scan_msg(rdev, wdev, request->aborted);
+
#ifdef CONFIG_CFG80211_WEXT
if (wdev->netdev && !request->aborted) {
memset(&wrqu, 0, sizeof(wrqu));
@@ -211,6 +216,11 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev)
rdev->scan_req = NULL;
kfree(request);
+
+ if (!send_message)
+ rdev->scan_msg = msg;
+ else
+ nl80211_send_scan_result(rdev, msg);
}
void __cfg80211_scan_done(struct work_struct *wk)
@@ -221,7 +231,7 @@ void __cfg80211_scan_done(struct work_struct *wk)
scan_done_wk);
rtnl_lock();
- ___cfg80211_scan_done(rdev);
+ ___cfg80211_scan_done(rdev, true);
rtnl_unlock();
}
@@ -1079,7 +1089,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,
if (IS_ERR(rdev))
return PTR_ERR(rdev);
- if (rdev->scan_req) {
+ if (rdev->scan_req || rdev->scan_msg) {
err = -EBUSY;
goto out;
}
@@ -1481,7 +1491,7 @@ int cfg80211_wext_giwscan(struct net_device *dev,
if (IS_ERR(rdev))
return PTR_ERR(rdev);
- if (rdev->scan_req)
+ if (rdev->scan_req || rdev->scan_msg)
return -EAGAIN;
res = ieee80211_scan_results(rdev, info, extra, data->length);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index a63509118508..f04d4c32e96e 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -67,7 +67,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
ASSERT_RDEV_LOCK(rdev);
ASSERT_WDEV_LOCK(wdev);
- if (rdev->scan_req)
+ if (rdev->scan_req || rdev->scan_msg)
return -EBUSY;
if (wdev->conn->params.channel)
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 4b98b25793c5..1d5c7bf29938 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1158,7 +1158,7 @@ static struct xfrm_policy *__xfrm_policy_unlink(struct xfrm_policy *pol,
if (hlist_unhashed(&pol->bydst))
return NULL;
- hlist_del(&pol->bydst);
+ hlist_del_init(&pol->bydst);
hlist_del(&pol->byidx);
list_del(&pol->walk.all);
net->xfrm.policy_count[dir]--;
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index a26b7aa79475..40f1b3e92e78 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -1159,6 +1159,11 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
}
x->props.aalgo = orig->props.aalgo;
+ if (orig->aead) {
+ x->aead = xfrm_algo_aead_clone(orig->aead);
+ if (!x->aead)
+ goto error;
+ }
if (orig->ealg) {
x->ealg = xfrm_algo_clone(orig->ealg);
if (!x->ealg)
@@ -1201,6 +1206,9 @@ static struct xfrm_state *xfrm_state_clone(struct xfrm_state *orig, int *errp)
x->props.flags = orig->props.flags;
x->props.extra_flags = orig->props.extra_flags;
+ x->tfcpad = orig->tfcpad;
+ x->replay_maxdiff = orig->replay_maxdiff;
+ x->replay_maxage = orig->replay_maxage;
x->curlft.add_time = orig->curlft.add_time;
x->km.state = orig->km.state;
x->km.seq = orig->km.seq;
@@ -1215,11 +1223,12 @@ out:
return NULL;
}
-/* net->xfrm.xfrm_state_lock is held */
struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *net)
{
unsigned int h;
- struct xfrm_state *x;
+ struct xfrm_state *x = NULL;
+
+ spin_lock_bh(&net->xfrm.xfrm_state_lock);
if (m->reqid) {
h = xfrm_dst_hash(net, &m->old_daddr, &m->old_saddr,
@@ -1236,7 +1245,7 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n
m->old_family))
continue;
xfrm_state_hold(x);
- return x;
+ break;
}
} else {
h = xfrm_src_hash(net, &m->old_daddr, &m->old_saddr,
@@ -1251,11 +1260,13 @@ struct xfrm_state *xfrm_migrate_state_find(struct xfrm_migrate *m, struct net *n
m->old_family))
continue;
xfrm_state_hold(x);
- return x;
+ break;
}
}
- return NULL;
+ spin_unlock_bh(&net->xfrm.xfrm_state_lock);
+
+ return x;
}
EXPORT_SYMBOL(xfrm_migrate_state_find);
@@ -1451,7 +1462,7 @@ xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
{
int err = 0;
struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
- struct net *net = xs_net(*dst);
+ struct net *net = xs_net(*src);
if (!afinfo)
return -EAFNOSUPPORT;
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index 1ae3ec7c18b0..2f7ddc3a59b4 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -32,11 +32,6 @@
#include <linux/in6.h>
#endif
-static inline int aead_len(struct xfrm_algo_aead *alg)
-{
- return sizeof(*alg) + ((alg->alg_key_len + 7) / 8);
-}
-
static int verify_one_alg(struct nlattr **attrs, enum xfrm_attr_type_t type)
{
struct nlattr *rt = attrs[type];
@@ -1226,7 +1221,7 @@ static int copy_from_user_sec_ctx(struct xfrm_policy *pol, struct nlattr **attrs
return 0;
uctx = nla_data(rt);
- return security_xfrm_policy_alloc(&pol->security, uctx);
+ return security_xfrm_policy_alloc(&pol->security, uctx, GFP_KERNEL);
}
static void copy_templates(struct xfrm_policy *xp, struct xfrm_user_tmpl *ut,
@@ -1631,7 +1626,7 @@ static int xfrm_get_policy(struct sk_buff *skb, struct nlmsghdr *nlh,
if (rt) {
struct xfrm_user_sec_ctx *uctx = nla_data(rt);
- err = security_xfrm_policy_alloc(&ctx, uctx);
+ err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
if (err)
return err;
}
@@ -1933,7 +1928,7 @@ static int xfrm_add_pol_expire(struct sk_buff *skb, struct nlmsghdr *nlh,
if (rt) {
struct xfrm_user_sec_ctx *uctx = nla_data(rt);
- err = security_xfrm_policy_alloc(&ctx, uctx);
+ err = security_xfrm_policy_alloc(&ctx, uctx, GFP_KERNEL);
if (err)
return err;
}
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 49392ecbef17..79c059e70860 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -152,6 +152,7 @@ ld_flags = $(LDFLAGS) $(ldflags-y)
dtc_cpp_flags = -Wp,-MD,$(depfile).pre.tmp -nostdinc \
-I$(srctree)/arch/$(SRCARCH)/boot/dts \
-I$(srctree)/arch/$(SRCARCH)/boot/dts/include \
+ -I$(srctree)/drivers/of/testcase-data \
-undef -D__DTS__
# Finds the multi-part object the current object will be linked into
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 0ea2a1e24ade..464dcef79b35 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -471,7 +471,7 @@ sub seed_camelcase_includes {
$camelcase_seeded = 1;
- if (-d ".git") {
+ if (-e ".git") {
my $git_last_include_commit = `git log --no-merges --pretty=format:"%h%n" -1 -- include`;
chomp $git_last_include_commit;
$camelcase_cache = ".checkpatch-camelcase.git.$git_last_include_commit";
@@ -499,7 +499,7 @@ sub seed_camelcase_includes {
return;
}
- if (-d ".git") {
+ if (-e ".git") {
$files = `git ls-files "include/*.h"`;
@include_files = split('\n', $files);
}
diff --git a/scripts/gen_initramfs_list.sh b/scripts/gen_initramfs_list.sh
index ef474098d9f1..17fa901418ae 100644
--- a/scripts/gen_initramfs_list.sh
+++ b/scripts/gen_initramfs_list.sh
@@ -257,7 +257,7 @@ case "$arg" in
&& compr="lzop -9 -f"
echo "$output_file" | grep -q "\.lz4$" \
&& [ -x "`which lz4 2> /dev/null`" ] \
- && compr="lz4 -9 -f"
+ && compr="lz4 -l -9 -f"
echo "$output_file" | grep -q "\.cpio$" && compr="cat"
shift
;;
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 9c3986f4140c..41987885bd31 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -95,7 +95,7 @@ my %VCS_cmds;
my %VCS_cmds_git = (
"execute_cmd" => \&git_execute_cmd,
- "available" => '(which("git") ne "") && (-d ".git")',
+ "available" => '(which("git") ne "") && (-e ".git")',
"find_signers_cmd" =>
"git log --no-color --follow --since=\$email_git_since " .
'--numstat --no-merges ' .
diff --git a/scripts/mod/file2alias.c b/scripts/mod/file2alias.c
index 23708636b05c..25e5cb0aaef6 100644
--- a/scripts/mod/file2alias.c
+++ b/scripts/mod/file2alias.c
@@ -210,8 +210,8 @@ static void do_usb_entry(void *symval,
range_lo < 0x9 ? "[%X-9" : "[%X",
range_lo);
sprintf(alias + strlen(alias),
- range_hi > 0xA ? "a-%X]" : "%X]",
- range_lo);
+ range_hi > 0xA ? "A-%X]" : "%X]",
+ range_hi);
}
}
if (bcdDevice_initial_digits < (sizeof(bcdDevice_lo) * 2 - 1))
diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c
index 40610984a1b5..99a45fdc1bbf 100644
--- a/scripts/mod/modpost.c
+++ b/scripts/mod/modpost.c
@@ -1502,6 +1502,16 @@ static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
#define R_ARM_JUMP24 29
#endif
+#ifndef R_ARM_THM_CALL
+#define R_ARM_THM_CALL 10
+#endif
+#ifndef R_ARM_THM_JUMP24
+#define R_ARM_THM_JUMP24 30
+#endif
+#ifndef R_ARM_THM_JUMP19
+#define R_ARM_THM_JUMP19 51
+#endif
+
static int addend_arm_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
{
unsigned int r_typ = ELF_R_TYPE(r->r_info);
@@ -1515,6 +1525,9 @@ static int addend_arm_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r)
case R_ARM_PC24:
case R_ARM_CALL:
case R_ARM_JUMP24:
+ case R_ARM_THM_CALL:
+ case R_ARM_THM_JUMP24:
+ case R_ARM_THM_JUMP19:
/* From ARM ABI: ((S + A) | T) - P */
r->r_addend = (int)(long)(elf->hdr +
sechdr->sh_offset +
diff --git a/security/Kconfig b/security/Kconfig
index e9c6ac724fef..beb86b500adf 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -103,7 +103,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
- default 32768 if ARM
+ default 32768 if ARM || (ARM64 && COMPAT)
default 65536
help
This is the portion of low virtual memory which should be protected
diff --git a/security/capability.c b/security/capability.c
index 8b4f24ae4338..21e2b9cae685 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -757,7 +757,8 @@ static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk)
#ifdef CONFIG_SECURITY_NETWORK_XFRM
static int cap_xfrm_policy_alloc_security(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *sec_ctx)
+ struct xfrm_user_sec_ctx *sec_ctx,
+ gfp_t gfp)
{
return 0;
}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index d46cbc5e335e..2fb2576dc644 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1000,7 +1000,11 @@ static int keyring_detect_cycle_iterator(const void *object,
kenter("{%d}", key->serial);
- BUG_ON(key != ctx->match_data);
+ /* We might get a keyring with matching index-key that is nonetheless a
+ * different keyring. */
+ if (key != ctx->match_data)
+ return 0;
+
ctx->result = ERR_PTR(-EDEADLK);
return 1;
}
diff --git a/security/security.c b/security/security.c
index 15b6928592ef..919cad93ac82 100644
--- a/security/security.c
+++ b/security/security.c
@@ -1317,9 +1317,11 @@ void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
#ifdef CONFIG_SECURITY_NETWORK_XFRM
-int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx)
+int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *sec_ctx,
+ gfp_t gfp)
{
- return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx);
+ return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx, gfp);
}
EXPORT_SYMBOL(security_xfrm_policy_alloc);
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 4b34847208cc..b332e2cc0954 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -668,7 +668,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
if (flags[i] == SBLABEL_MNT)
continue;
rc = security_context_to_sid(mount_options[i],
- strlen(mount_options[i]), &sid);
+ strlen(mount_options[i]), &sid, GFP_KERNEL);
if (rc) {
printk(KERN_WARNING "SELinux: security_context_to_sid"
"(%s) failed for (dev %s, type %s) errno=%d\n",
@@ -2489,7 +2489,8 @@ static int selinux_sb_remount(struct super_block *sb, void *data)
if (flags[i] == SBLABEL_MNT)
continue;
len = strlen(mount_options[i]);
- rc = security_context_to_sid(mount_options[i], len, &sid);
+ rc = security_context_to_sid(mount_options[i], len, &sid,
+ GFP_KERNEL);
if (rc) {
printk(KERN_WARNING "SELinux: security_context_to_sid"
"(%s) failed for (dev %s, type %s) errno=%d\n",
@@ -2893,7 +2894,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
if (rc)
return rc;
- rc = security_context_to_sid(value, size, &newsid);
+ rc = security_context_to_sid(value, size, &newsid, GFP_KERNEL);
if (rc == -EINVAL) {
if (!capable(CAP_MAC_ADMIN)) {
struct audit_buffer *ab;
@@ -3050,7 +3051,7 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,
if (!value || !size)
return -EACCES;
- rc = security_context_to_sid((void *)value, size, &newsid);
+ rc = security_context_to_sid((void *)value, size, &newsid, GFP_KERNEL);
if (rc)
return rc;
@@ -5529,7 +5530,7 @@ static int selinux_setprocattr(struct task_struct *p,
str[size-1] = 0;
size--;
}
- error = security_context_to_sid(value, size, &sid);
+ error = security_context_to_sid(value, size, &sid, GFP_KERNEL);
if (error == -EINVAL && !strcmp(name, "fscreate")) {
if (!capable(CAP_MAC_ADMIN)) {
struct audit_buffer *ab;
@@ -5638,7 +5639,7 @@ static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
{
- return security_context_to_sid(secdata, seclen, secid);
+ return security_context_to_sid(secdata, seclen, secid, GFP_KERNEL);
}
static void selinux_release_secctx(char *secdata, u32 seclen)
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 8ed8daf7f1ee..ce7852cf526b 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -134,7 +134,7 @@ int security_sid_to_context(u32 sid, char **scontext,
int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len);
int security_context_to_sid(const char *scontext, u32 scontext_len,
- u32 *out_sid);
+ u32 *out_sid, gfp_t gfp);
int security_context_to_sid_default(const char *scontext, u32 scontext_len,
u32 *out_sid, u32 def_sid, gfp_t gfp_flags);
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 48c3cc94c168..9f0584710c85 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -10,7 +10,8 @@
#include <net/flow.h>
int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *uctx);
+ struct xfrm_user_sec_ctx *uctx,
+ gfp_t gfp);
int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
struct xfrm_sec_ctx **new_ctxp);
void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 332ac8a80cf5..2df7b900e259 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -17,6 +17,7 @@
#include <linux/inet_diag.h>
#include <linux/xfrm.h>
#include <linux/audit.h>
+#include <linux/sock_diag.h>
#include "flask.h"
#include "av_permissions.h"
@@ -78,6 +79,7 @@ static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
{
{ TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
{ DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+ { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
};
static struct nlmsg_perm nlmsg_xfrm_perms[] =
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 5122affe06a8..d60c0ee66387 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -576,7 +576,7 @@ static ssize_t sel_write_context(struct file *file, char *buf, size_t size)
if (length)
goto out;
- length = security_context_to_sid(buf, size, &sid);
+ length = security_context_to_sid(buf, size, &sid, GFP_KERNEL);
if (length)
goto out;
@@ -731,11 +731,13 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out;
- length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
+ GFP_KERNEL);
if (length)
goto out;
- length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
+ GFP_KERNEL);
if (length)
goto out;
@@ -817,11 +819,13 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
objname = namebuf;
}
- length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
+ GFP_KERNEL);
if (length)
goto out;
- length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
+ GFP_KERNEL);
if (length)
goto out;
@@ -878,11 +882,13 @@ static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out;
- length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
+ GFP_KERNEL);
if (length)
goto out;
- length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
+ GFP_KERNEL);
if (length)
goto out;
@@ -934,7 +940,7 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s", con, user) != 2)
goto out;
- length = security_context_to_sid(con, strlen(con) + 1, &sid);
+ length = security_context_to_sid(con, strlen(con) + 1, &sid, GFP_KERNEL);
if (length)
goto out;
@@ -994,11 +1000,13 @@ static ssize_t sel_write_member(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out;
- length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
+ GFP_KERNEL);
if (length)
goto out;
- length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
+ GFP_KERNEL);
if (length)
goto out;
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index c0f498842129..9c5cdc2caaef 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -3338,10 +3338,10 @@ static int filename_write_helper(void *key, void *data, void *ptr)
if (rc)
return rc;
- buf[0] = ft->stype;
- buf[1] = ft->ttype;
- buf[2] = ft->tclass;
- buf[3] = otype->otype;
+ buf[0] = cpu_to_le32(ft->stype);
+ buf[1] = cpu_to_le32(ft->ttype);
+ buf[2] = cpu_to_le32(ft->tclass);
+ buf[3] = cpu_to_le32(otype->otype);
rc = put_entry(buf, sizeof(u32), 4, fp);
if (rc)
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index c93c21127f0c..4bca49414a40 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -1232,6 +1232,10 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
struct context context;
int rc = 0;
+ /* An empty security context is never valid. */
+ if (!scontext_len)
+ return -EINVAL;
+
if (!ss_initialized) {
int i;
@@ -1285,16 +1289,18 @@ out:
* @scontext: security context
* @scontext_len: length in bytes
* @sid: security identifier, SID
+ * @gfp: context for the allocation
*
* Obtains a SID associated with the security context that
* has the string representation specified by @scontext.
* Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
* memory is available, or 0 on success.
*/
-int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid)
+int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid,
+ gfp_t gfp)
{
return security_context_to_sid_core(scontext, scontext_len,
- sid, SECSID_NULL, GFP_KERNEL, 0);
+ sid, SECSID_NULL, gfp, 0);
}
/**
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 0462cb3ff0a7..98b042630a9e 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -78,7 +78,8 @@ static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
* xfrm_user_sec_ctx context.
*/
static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *uctx)
+ struct xfrm_user_sec_ctx *uctx,
+ gfp_t gfp)
{
int rc;
const struct task_security_struct *tsec = current_security();
@@ -94,7 +95,7 @@ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
if (str_len >= PAGE_SIZE)
return -ENOMEM;
- ctx = kmalloc(sizeof(*ctx) + str_len + 1, GFP_KERNEL);
+ ctx = kmalloc(sizeof(*ctx) + str_len + 1, gfp);
if (!ctx)
return -ENOMEM;
@@ -103,7 +104,7 @@ static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
ctx->ctx_len = str_len;
memcpy(ctx->ctx_str, &uctx[1], str_len);
ctx->ctx_str[str_len] = '\0';
- rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid);
+ rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid, gfp);
if (rc)
goto err;
@@ -282,9 +283,10 @@ int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
* LSM hook implementation that allocs and transfers uctx spec to xfrm_policy.
*/
int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *uctx)
+ struct xfrm_user_sec_ctx *uctx,
+ gfp_t gfp)
{
- return selinux_xfrm_alloc_user(ctxp, uctx);
+ return selinux_xfrm_alloc_user(ctxp, uctx, gfp);
}
/*
@@ -332,7 +334,7 @@ int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
int selinux_xfrm_state_alloc(struct xfrm_state *x,
struct xfrm_user_sec_ctx *uctx)
{
- return selinux_xfrm_alloc_user(&x->security, uctx);
+ return selinux_xfrm_alloc_user(&x->security, uctx, GFP_KERNEL);
}
/*
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index 7a20897d33db..7403f348ed14 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -133,7 +133,7 @@ static int snd_compr_open(struct inode *inode, struct file *f)
kfree(data);
}
snd_card_unref(compr->card);
- return 0;
+ return ret;
}
static int snd_compr_free(struct inode *inode, struct file *f)
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index ec4536c8d8d4..dafcf82139e2 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -932,7 +932,7 @@ int snd_hda_bus_new(struct snd_card *card,
}
EXPORT_SYMBOL_GPL(snd_hda_bus_new);
-#ifdef CONFIG_SND_HDA_GENERIC
+#if IS_ENABLED(CONFIG_SND_HDA_GENERIC)
#define is_generic_config(codec) \
(codec->modelname && !strcmp(codec->modelname, "generic"))
#else
@@ -1339,23 +1339,15 @@ get_hda_cvt_setup(struct hda_codec *codec, hda_nid_t nid)
/*
* Dynamic symbol binding for the codec parsers
*/
-#ifdef MODULE
-#define load_parser_sym(sym) ((int (*)(struct hda_codec *))symbol_request(sym))
-#define unload_parser_addr(addr) symbol_put_addr(addr)
-#else
-#define load_parser_sym(sym) (sym)
-#define unload_parser_addr(addr) do {} while (0)
-#endif
#define load_parser(codec, sym) \
- ((codec)->parser = load_parser_sym(sym))
+ ((codec)->parser = (int (*)(struct hda_codec *))symbol_request(sym))
static void unload_parser(struct hda_codec *codec)
{
- if (codec->parser) {
- unload_parser_addr(codec->parser);
- codec->parser = NULL;
- }
+ if (codec->parser)
+ symbol_put_addr(codec->parser);
+ codec->parser = NULL;
}
/*
@@ -1570,7 +1562,7 @@ int snd_hda_codec_update_widgets(struct hda_codec *codec)
EXPORT_SYMBOL_GPL(snd_hda_codec_update_widgets);
-#ifdef CONFIG_SND_HDA_CODEC_HDMI
+#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI)
/* if all audio out widgets are digital, let's assume the codec as a HDMI/DP */
static bool is_likely_hdmi_codec(struct hda_codec *codec)
{
@@ -1620,12 +1612,20 @@ int snd_hda_codec_configure(struct hda_codec *codec)
patch = codec->preset->patch;
if (!patch) {
unload_parser(codec); /* to be sure */
- if (is_likely_hdmi_codec(codec))
+ if (is_likely_hdmi_codec(codec)) {
+#if IS_MODULE(CONFIG_SND_HDA_CODEC_HDMI)
patch = load_parser(codec, snd_hda_parse_hdmi_codec);
-#ifdef CONFIG_SND_HDA_GENERIC
- if (!patch)
+#elif IS_BUILTIN(CONFIG_SND_HDA_CODEC_HDMI)
+ patch = snd_hda_parse_hdmi_codec;
+#endif
+ }
+ if (!patch) {
+#if IS_MODULE(CONFIG_SND_HDA_GENERIC)
patch = load_parser(codec, snd_hda_parse_generic_codec);
+#elif IS_BUILTIN(CONFIG_SND_HDA_GENERIC)
+ patch = snd_hda_parse_generic_codec;
#endif
+ }
if (!patch) {
printk(KERN_ERR "hda-codec: No codec parser is available\n");
return -ENODEV;
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 8321a97d5c05..d9a09bdd09db 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -3269,7 +3269,7 @@ static int cap_put_caller(struct snd_kcontrol *kcontrol,
mutex_unlock(&codec->control_mutex);
snd_hda_codec_flush_cache(codec); /* flush the updates */
if (err >= 0 && spec->cap_sync_hook)
- spec->cap_sync_hook(codec, ucontrol);
+ spec->cap_sync_hook(codec, kcontrol, ucontrol);
return err;
}
@@ -3390,7 +3390,7 @@ static int cap_single_sw_put(struct snd_kcontrol *kcontrol,
return ret;
if (spec->cap_sync_hook)
- spec->cap_sync_hook(codec, ucontrol);
+ spec->cap_sync_hook(codec, kcontrol, ucontrol);
return ret;
}
@@ -3795,7 +3795,7 @@ static int mux_select(struct hda_codec *codec, unsigned int adc_idx,
return 0;
snd_hda_activate_path(codec, path, true, false);
if (spec->cap_sync_hook)
- spec->cap_sync_hook(codec, NULL);
+ spec->cap_sync_hook(codec, NULL, NULL);
path_power_down_sync(codec, old_path);
return 1;
}
@@ -5270,7 +5270,7 @@ static void init_input_src(struct hda_codec *codec)
}
if (spec->cap_sync_hook)
- spec->cap_sync_hook(codec, NULL);
+ spec->cap_sync_hook(codec, NULL, NULL);
}
/* set right pin controls for digital I/O */
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 07f767231c9f..c908afbe4d94 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -274,6 +274,7 @@ struct hda_gen_spec {
void (*init_hook)(struct hda_codec *codec);
void (*automute_hook)(struct hda_codec *codec);
void (*cap_sync_hook)(struct hda_codec *codec,
+ struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
/* PCM hooks */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index fa2879a21a50..e354ab1ec20f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -198,7 +198,7 @@ MODULE_DESCRIPTION("Intel HDA driver");
#endif
#if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO)
-#ifdef CONFIG_SND_HDA_CODEC_HDMI
+#if IS_ENABLED(CONFIG_SND_HDA_CODEC_HDMI)
#define SUPPORT_VGA_SWITCHEROO
#endif
#endif
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index 7a426ed491f2..8ed0bcc01386 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -244,6 +244,19 @@ static void ad_fixup_inv_jack_detect(struct hda_codec *codec,
}
}
+/* Toshiba Satellite L40 implements EAPD in a standard way unlike others */
+static void ad1986a_fixup_eapd(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct ad198x_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ codec->inv_eapd = 0;
+ spec->gen.keep_eapd_on = 1;
+ spec->eapd_nid = 0x1b;
+ }
+}
+
enum {
AD1986A_FIXUP_INV_JACK_DETECT,
AD1986A_FIXUP_ULTRA,
@@ -251,6 +264,7 @@ enum {
AD1986A_FIXUP_3STACK,
AD1986A_FIXUP_LAPTOP,
AD1986A_FIXUP_LAPTOP_IMIC,
+ AD1986A_FIXUP_EAPD,
};
static const struct hda_fixup ad1986a_fixups[] = {
@@ -311,6 +325,10 @@ static const struct hda_fixup ad1986a_fixups[] = {
.chained_before = 1,
.chain_id = AD1986A_FIXUP_LAPTOP,
},
+ [AD1986A_FIXUP_EAPD] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = ad1986a_fixup_eapd,
+ },
};
static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
@@ -318,6 +336,7 @@ static const struct snd_pci_quirk ad1986a_fixup_tbl[] = {
SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8100, "ASUS P5", AD1986A_FIXUP_3STACK),
SND_PCI_QUIRK_MASK(0x1043, 0xff00, 0x8200, "ASUS M2", AD1986A_FIXUP_3STACK),
SND_PCI_QUIRK(0x10de, 0xcb84, "ASUS A8N-VM", AD1986A_FIXUP_3STACK),
+ SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba Satellite L40", AD1986A_FIXUP_EAPD),
SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_FIXUP_LAPTOP),
SND_PCI_QUIRK_MASK(0x144d, 0xff00, 0xc000, "Samsung", AD1986A_FIXUP_SAMSUNG),
SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_FIXUP_ULTRA),
@@ -472,6 +491,8 @@ static int ad1983_add_spdif_mux_ctl(struct hda_codec *codec)
static int patch_ad1983(struct hda_codec *codec)
{
struct ad198x_spec *spec;
+ static hda_nid_t conn_0c[] = { 0x08 };
+ static hda_nid_t conn_0d[] = { 0x09 };
int err;
err = alloc_ad_spec(codec);
@@ -479,8 +500,14 @@ static int patch_ad1983(struct hda_codec *codec)
return err;
spec = codec->spec;
+ spec->gen.mixer_nid = 0x0e;
spec->gen.beep_nid = 0x10;
set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
+
+ /* limit the loopback routes not to confuse the parser */
+ snd_hda_override_conn_list(codec, 0x0c, ARRAY_SIZE(conn_0c), conn_0c);
+ snd_hda_override_conn_list(codec, 0x0d, ARRAY_SIZE(conn_0d), conn_0d);
+
err = ad198x_parse_auto_config(codec, false);
if (err < 0)
goto error;
@@ -999,6 +1026,9 @@ static void ad1884_fixup_thinkpad(struct hda_codec *codec,
spec->gen.keep_eapd_on = 1;
spec->gen.vmaster_mute.hook = ad_vmaster_eapd_hook;
spec->eapd_nid = 0x12;
+ /* Analog PC Beeper - allow firmware/ACPI beeps */
+ spec->beep_amp = HDA_COMPOSE_AMP_VAL(0x20, 3, 3, HDA_INPUT);
+ spec->gen.beep_nid = 0; /* no digital beep */
}
}
@@ -1065,6 +1095,7 @@ static int patch_ad1884(struct hda_codec *codec)
spec = codec->spec;
spec->gen.mixer_nid = 0x20;
+ spec->gen.mixer_merge_nid = 0x21;
spec->gen.beep_nid = 0x10;
set_beep_amp(spec, 0x10, 0, HDA_OUTPUT);
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 54d14793725a..46ecdbb9053f 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -2662,60 +2662,6 @@ static bool dspload_wait_loaded(struct hda_codec *codec)
}
/*
- * PCM stuffs
- */
-static void ca0132_setup_stream(struct hda_codec *codec, hda_nid_t nid,
- u32 stream_tag,
- int channel_id, int format)
-{
- unsigned int oldval, newval;
-
- if (!nid)
- return;
-
- snd_printdd(
- "ca0132_setup_stream: NID=0x%x, stream=0x%x, "
- "channel=%d, format=0x%x\n",
- nid, stream_tag, channel_id, format);
-
- /* update the format-id if changed */
- oldval = snd_hda_codec_read(codec, nid, 0,
- AC_VERB_GET_STREAM_FORMAT,
- 0);
- if (oldval != format) {
- msleep(20);
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_STREAM_FORMAT,
- format);
- }
-
- oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
- newval = (stream_tag << 4) | channel_id;
- if (oldval != newval) {
- snd_hda_codec_write(codec, nid, 0,
- AC_VERB_SET_CHANNEL_STREAMID,
- newval);
- }
-}
-
-static void ca0132_cleanup_stream(struct hda_codec *codec, hda_nid_t nid)
-{
- unsigned int val;
-
- if (!nid)
- return;
-
- snd_printdd(KERN_INFO "ca0132_cleanup_stream: NID=0x%x\n", nid);
-
- val = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0);
- if (!val)
- return;
-
- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_STREAM_FORMAT, 0);
- snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CHANNEL_STREAMID, 0);
-}
-
-/*
* PCM callbacks
*/
static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
@@ -2726,7 +2672,7 @@ static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
{
struct ca0132_spec *spec = codec->spec;
- ca0132_setup_stream(codec, spec->dacs[0], stream_tag, 0, format);
+ snd_hda_codec_setup_stream(codec, spec->dacs[0], stream_tag, 0, format);
return 0;
}
@@ -2745,7 +2691,7 @@ static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo,
if (spec->effects_switch[PLAY_ENHANCEMENT - EFFECT_START_NID])
msleep(50);
- ca0132_cleanup_stream(codec, spec->dacs[0]);
+ snd_hda_codec_cleanup_stream(codec, spec->dacs[0]);
return 0;
}
@@ -2822,10 +2768,8 @@ static int ca0132_capture_pcm_prepare(struct hda_pcm_stream *hinfo,
unsigned int format,
struct snd_pcm_substream *substream)
{
- struct ca0132_spec *spec = codec->spec;
-
- ca0132_setup_stream(codec, spec->adcs[substream->number],
- stream_tag, 0, format);
+ snd_hda_codec_setup_stream(codec, hinfo->nid,
+ stream_tag, 0, format);
return 0;
}
@@ -2839,7 +2783,7 @@ static int ca0132_capture_pcm_cleanup(struct hda_pcm_stream *hinfo,
if (spec->dsp_state == DSP_DOWNLOADING)
return 0;
- ca0132_cleanup_stream(codec, hinfo->nid);
+ snd_hda_codec_cleanup_stream(codec, hinfo->nid);
return 0;
}
@@ -4742,6 +4686,8 @@ static int patch_ca0132(struct hda_codec *codec)
return err;
codec->patch_ops = ca0132_patch_ops;
+ codec->pcm_format_first = 1;
+ codec->no_sticky_stream = 1;
return 0;
}
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 4e0ec146553d..bcf91bea3317 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -3291,7 +3291,8 @@ static void cxt_update_headset_mode(struct hda_codec *codec)
}
static void cxt_update_headset_mode_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
cxt_update_headset_mode(codec);
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 56a8f1876603..8d0a84436674 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -708,7 +708,8 @@ static void alc_inv_dmic_sync(struct hda_codec *codec, bool force)
}
static void alc_inv_dmic_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
alc_inv_dmic_sync(codec, false);
}
@@ -1821,6 +1822,7 @@ enum {
ALC889_FIXUP_IMAC91_VREF,
ALC889_FIXUP_MBA11_VREF,
ALC889_FIXUP_MBA21_VREF,
+ ALC889_FIXUP_MP11_VREF,
ALC882_FIXUP_INV_DMIC,
ALC882_FIXUP_NO_PRIMARY_HP,
ALC887_FIXUP_ASUS_BASS,
@@ -2190,6 +2192,12 @@ static const struct hda_fixup alc882_fixups[] = {
.chained = true,
.chain_id = ALC889_FIXUP_MBP_VREF,
},
+ [ALC889_FIXUP_MP11_VREF] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc889_fixup_mba11_vref,
+ .chained = true,
+ .chain_id = ALC885_FIXUP_MACPRO_GPIO,
+ },
[ALC882_FIXUP_INV_DMIC] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_inv_dmic_0x12,
@@ -2253,7 +2261,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
SND_PCI_QUIRK(0x106b, 0x00a1, "Macbook", ALC889_FIXUP_MBP_VREF),
SND_PCI_QUIRK(0x106b, 0x00a4, "MacbookPro 4,1", ALC889_FIXUP_MBP_VREF),
- SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC885_FIXUP_MACPRO_GPIO),
+ SND_PCI_QUIRK(0x106b, 0x0c00, "Mac Pro", ALC889_FIXUP_MP11_VREF),
SND_PCI_QUIRK(0x106b, 0x1000, "iMac 24", ALC885_FIXUP_MACPRO_GPIO),
SND_PCI_QUIRK(0x106b, 0x2800, "AppleTV", ALC885_FIXUP_MACPRO_GPIO),
SND_PCI_QUIRK(0x106b, 0x2c00, "MacbookPro rev3", ALC889_FIXUP_MBP_VREF),
@@ -3211,7 +3219,8 @@ static void alc269_fixup_hp_gpio_mute_hook(void *private_data, int enabled)
/* turn on/off mic-mute LED per capture hook */
static void alc269_fixup_hp_gpio_mic_mute_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct alc_spec *spec = codec->spec;
unsigned int oldval = spec->gpio_led;
@@ -3521,7 +3530,8 @@ static void alc_update_headset_mode(struct hda_codec *codec)
}
static void alc_update_headset_mode_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
alc_update_headset_mode(codec);
}
@@ -3606,6 +3616,19 @@ static void alc_fixup_auto_mute_via_amp(struct hda_codec *codec,
}
}
+static void alc_no_shutup(struct hda_codec *codec)
+{
+}
+
+static void alc_fixup_no_shutup(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ struct alc_spec *spec = codec->spec;
+ spec->shutup = alc_no_shutup;
+ }
+}
+
static void alc_fixup_headset_mode_alc668(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -3834,6 +3857,7 @@ enum {
ALC269_FIXUP_HP_GPIO_LED,
ALC269_FIXUP_INV_DMIC,
ALC269_FIXUP_LENOVO_DOCK,
+ ALC269_FIXUP_NO_SHUTUP,
ALC286_FIXUP_SONY_MIC_NO_PRESENCE,
ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
@@ -4010,6 +4034,10 @@ static const struct hda_fixup alc269_fixups[] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_inv_dmic_0x12,
},
+ [ALC269_FIXUP_NO_SHUTUP] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_no_shutup,
+ },
[ALC269_FIXUP_LENOVO_DOCK] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -4243,6 +4271,7 @@ static const struct hda_fixup alc269_fixups[] = {
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x1025, 0x0283, "Acer TravelMate 8371", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC),
SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700),
@@ -4298,7 +4327,9 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0651, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0652, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0653, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0657, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0658, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x065f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0662, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
@@ -4307,6 +4338,54 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x103c, 0x1973, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x1983, "HP Pavilion", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x218b, "HP", ALC269_FIXUP_LIMIT_INT_MIC_BOOST_MUTE_LED),
+ /* ALC282 */
+ SND_PCI_QUIRK(0x103c, 0x220f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2213, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2266, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2267, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2268, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2269, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x226a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x226b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x227a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x227b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x229e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22a0, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22b2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22b7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22bf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c0, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c1, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c2, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22cd, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22ce, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22d0, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ /* ALC290 */
+ SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2261, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2262, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2263, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2264, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2265, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x227d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x227e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x227f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2280, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2281, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2282, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x2289, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x228a, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x228b, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x228c, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x228d, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x228e, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c5, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c6, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c7, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c8, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c3, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x22c4, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK_VENDOR(0x103c, "HP", ALC269_FIXUP_HP_MUTE_LED),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -4322,6 +4401,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x8398, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x83ce, "ASUS P1005", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x8516, "ASUS X101CH", ALC269_FIXUP_ASUS_X101),
+ SND_PCI_QUIRK(0x104d, 0x90b5, "Sony VAIO Pro 11", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x104d, 0x90b6, "Sony VAIO Pro 13", ALC286_FIXUP_SONY_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x104d, 0x9073, "Sony VAIO", ALC275_FIXUP_SONY_VAIO_GPIO2),
SND_PCI_QUIRK(0x104d, 0x907b, "Sony VAIO", ALC275_FIXUP_SONY_HWEQ),
@@ -4343,6 +4423,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
+ SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC),
SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -5096,12 +5177,13 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x060a, "Dell XPS 13", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_AUTO_MUTE),
SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_AUTO_MUTE),
SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_AUTO_MUTE),
- SND_PCI_QUIRK(0x1028, 0x064e, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x064e, "Dell", ALC668_FIXUP_AUTO_MUTE),
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP),
SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP),
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 6998cf29b9bc..3bc29c9b2529 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -83,6 +83,7 @@ enum {
STAC_DELL_M6_BOTH,
STAC_DELL_EQ,
STAC_ALIENWARE_M17X,
+ STAC_92HD89XX_HP_FRONT_JACK,
STAC_92HD73XX_MODELS
};
@@ -97,6 +98,7 @@ enum {
STAC_92HD83XXX_HP_LED,
STAC_92HD83XXX_HP_INV_LED,
STAC_92HD83XXX_HP_MIC_LED,
+ STAC_HP_LED_GPIO10,
STAC_92HD83XXX_HEADSET_JACK,
STAC_92HD83XXX_HP,
STAC_HP_ENVY_BASS,
@@ -194,7 +196,7 @@ struct sigmatel_spec {
int default_polarity;
unsigned int mic_mute_led_gpio; /* capture mute LED GPIO */
- bool mic_mute_led_on; /* current mic mute state */
+ unsigned int mic_enabled; /* current mic mute state (bitmask) */
/* stream */
unsigned int stream_delay;
@@ -324,19 +326,26 @@ static void stac_gpio_set(struct hda_codec *codec, unsigned int mask,
/* hook for controlling mic-mute LED GPIO */
static void stac_capture_led_hook(struct hda_codec *codec,
- struct snd_ctl_elem_value *ucontrol)
+ struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct sigmatel_spec *spec = codec->spec;
- bool mute;
+ unsigned int mask;
+ bool cur_mute, prev_mute;
- if (!ucontrol)
+ if (!kcontrol || !ucontrol)
return;
- mute = !(ucontrol->value.integer.value[0] ||
- ucontrol->value.integer.value[1]);
- if (spec->mic_mute_led_on != mute) {
- spec->mic_mute_led_on = mute;
- if (mute)
+ mask = 1U << snd_ctl_get_ioffidx(kcontrol, &ucontrol->id);
+ prev_mute = !spec->mic_enabled;
+ if (ucontrol->value.integer.value[0] ||
+ ucontrol->value.integer.value[1])
+ spec->mic_enabled |= mask;
+ else
+ spec->mic_enabled &= ~mask;
+ cur_mute = !spec->mic_enabled;
+ if (cur_mute != prev_mute) {
+ if (cur_mute)
spec->gpio_data |= spec->mic_mute_led_gpio;
else
spec->gpio_data &= ~spec->mic_mute_led_gpio;
@@ -1788,6 +1797,12 @@ static const struct hda_pintbl intel_dg45id_pin_configs[] = {
{}
};
+static const struct hda_pintbl stac92hd89xx_hp_front_jack_pin_configs[] = {
+ { 0x0a, 0x02214030 },
+ { 0x0b, 0x02A19010 },
+ {}
+};
+
static void stac92hd73xx_fixup_ref(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -1906,6 +1921,10 @@ static const struct hda_fixup stac92hd73xx_fixups[] = {
[STAC_92HD73XX_NO_JD] = {
.type = HDA_FIXUP_FUNC,
.v.func = stac92hd73xx_fixup_no_jd,
+ },
+ [STAC_92HD89XX_HP_FRONT_JACK] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = stac92hd89xx_hp_front_jack_pin_configs,
}
};
@@ -1966,6 +1985,8 @@ static const struct snd_pci_quirk stac92hd73xx_fixup_tbl[] = {
"Alienware M17x", STAC_ALIENWARE_M17X),
SND_PCI_QUIRK(PCI_VENDOR_ID_DELL, 0x0490,
"Alienware M17x R3", STAC_DELL_EQ),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x2b17,
+ "unknown HP", STAC_92HD89XX_HP_FRONT_JACK),
{} /* terminator */
};
@@ -2110,6 +2131,17 @@ static void stac92hd83xxx_fixup_hp_mic_led(struct hda_codec *codec,
}
}
+static void stac92hd83xxx_fixup_hp_led_gpio10(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+{
+ struct sigmatel_spec *spec = codec->spec;
+
+ if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+ spec->gpio_led = 0x10; /* GPIO4 */
+ spec->default_polarity = 0;
+ }
+}
+
static void stac92hd83xxx_fixup_headset_jack(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
@@ -2604,6 +2636,12 @@ static const struct hda_fixup stac92hd83xxx_fixups[] = {
.chained = true,
.chain_id = STAC_92HD83XXX_HP,
},
+ [STAC_HP_LED_GPIO10] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = stac92hd83xxx_fixup_hp_led_gpio10,
+ .chained = true,
+ .chain_id = STAC_92HD83XXX_HP,
+ },
[STAC_92HD83XXX_HEADSET_JACK] = {
.type = HDA_FIXUP_FUNC,
.v.func = stac92hd83xxx_fixup_headset_jack,
@@ -2682,6 +2720,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
"HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1888,
"HP Envy Spectre", STAC_HP_ENVY_BASS),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1899,
+ "HP Folio 13", STAC_HP_LED_GPIO10),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x18df,
"HP Folio", STAC_HP_BNB13_EQ),
SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x18F8,
@@ -4462,7 +4502,7 @@ static void stac_setup_gpio(struct hda_codec *codec)
if (spec->mic_mute_led_gpio) {
spec->gpio_mask |= spec->mic_mute_led_gpio;
spec->gpio_dir |= spec->mic_mute_led_gpio;
- spec->mic_mute_led_on = true;
+ spec->mic_enabled = 0;
spec->gpio_data |= spec->mic_mute_led_gpio;
spec->gen.cap_sync_hook = stac_capture_led_hook;
diff --git a/sound/pci/hda/thinkpad_helper.c b/sound/pci/hda/thinkpad_helper.c
index 5799fbc24c28..8fe3b8c18ed4 100644
--- a/sound/pci/hda/thinkpad_helper.c
+++ b/sound/pci/hda/thinkpad_helper.c
@@ -39,6 +39,7 @@ static void update_tpacpi_mute_led(void *private_data, int enabled)
}
static void update_tpacpi_micmute_led(struct hda_codec *codec,
+ struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
if (!ucontrol || !led_set_func)
diff --git a/sound/pci/oxygen/xonar_dg.c b/sound/pci/oxygen/xonar_dg.c
index ed6f199f8a38..4cf3200e988b 100644
--- a/sound/pci/oxygen/xonar_dg.c
+++ b/sound/pci/oxygen/xonar_dg.c
@@ -238,11 +238,21 @@ void set_cs4245_adc_params(struct oxygen *chip,
cs4245_write_spi(chip, CS4245_MCLK_FREQ);
}
+static inline unsigned int shift_bits(unsigned int value,
+ unsigned int shift_from,
+ unsigned int shift_to,
+ unsigned int mask)
+{
+ if (shift_from < shift_to)
+ return (value << (shift_to - shift_from)) & mask;
+ else
+ return (value >> (shift_from - shift_to)) & mask;
+}
+
unsigned int adjust_dg_dac_routing(struct oxygen *chip,
unsigned int play_routing)
{
struct dg *data = chip->model_data;
- unsigned int routing = 0;
switch (data->output_sel) {
case PLAYBACK_DST_HP:
@@ -252,15 +262,23 @@ unsigned int adjust_dg_dac_routing(struct oxygen *chip,
OXYGEN_PLAY_MUTE67, OXYGEN_PLAY_MUTE_MASK);
break;
case PLAYBACK_DST_MULTICH:
- routing = (0 << OXYGEN_PLAY_DAC0_SOURCE_SHIFT) |
- (2 << OXYGEN_PLAY_DAC1_SOURCE_SHIFT) |
- (1 << OXYGEN_PLAY_DAC2_SOURCE_SHIFT) |
- (0 << OXYGEN_PLAY_DAC3_SOURCE_SHIFT);
oxygen_write8_masked(chip, OXYGEN_PLAY_ROUTING,
OXYGEN_PLAY_MUTE01, OXYGEN_PLAY_MUTE_MASK);
break;
}
- return routing;
+ return (play_routing & OXYGEN_PLAY_DAC0_SOURCE_MASK) |
+ shift_bits(play_routing,
+ OXYGEN_PLAY_DAC2_SOURCE_SHIFT,
+ OXYGEN_PLAY_DAC1_SOURCE_SHIFT,
+ OXYGEN_PLAY_DAC1_SOURCE_MASK) |
+ shift_bits(play_routing,
+ OXYGEN_PLAY_DAC1_SOURCE_SHIFT,
+ OXYGEN_PLAY_DAC2_SOURCE_SHIFT,
+ OXYGEN_PLAY_DAC2_SOURCE_MASK) |
+ shift_bits(play_routing,
+ OXYGEN_PLAY_DAC0_SOURCE_SHIFT,
+ OXYGEN_PLAY_DAC3_SOURCE_SHIFT,
+ OXYGEN_PLAY_DAC3_SOURCE_MASK);
}
void dump_cs4245_registers(struct oxygen *chip,
diff --git a/sound/soc/blackfin/Kconfig b/sound/soc/blackfin/Kconfig
index 54f74f8cbb75..4544d8eb1452 100644
--- a/sound/soc/blackfin/Kconfig
+++ b/sound/soc/blackfin/Kconfig
@@ -11,7 +11,7 @@ config SND_BF5XX_I2S
config SND_BF5XX_SOC_SSM2602
tristate "SoC SSM2602 Audio Codec Add-On Card support"
- depends on SND_BF5XX_I2S && (SPI_MASTER || I2C)
+ depends on SND_BF5XX_I2S && SND_SOC_I2C_AND_SPI
select SND_BF5XX_SOC_I2S if !BF60x
select SND_BF6XX_SOC_I2S if BF60x
select SND_SOC_SSM2602
@@ -21,10 +21,9 @@ config SND_BF5XX_SOC_SSM2602
config SND_SOC_BFIN_EVAL_ADAU1701
tristate "Support for the EVAL-ADAU1701MINIZ board on Blackfin eval boards"
- depends on SND_BF5XX_I2S
+ depends on SND_BF5XX_I2S && I2C
select SND_BF5XX_SOC_I2S
select SND_SOC_ADAU1701
- select I2C
help
Say Y if you want to add support for the Analog Devices EVAL-ADAU1701MINIZ
board connected to one of the Blackfin evaluation boards like the
@@ -45,7 +44,7 @@ config SND_SOC_BFIN_EVAL_ADAU1373
config SND_SOC_BFIN_EVAL_ADAV80X
tristate "Support for the EVAL-ADAV80X boards on Blackfin eval boards"
- depends on SND_BF5XX_I2S && (SPI_MASTER || I2C)
+ depends on SND_BF5XX_I2S && SND_SOC_I2C_AND_SPI
select SND_BF5XX_SOC_I2S
select SND_SOC_ADAV80X
help
@@ -58,7 +57,7 @@ config SND_SOC_BFIN_EVAL_ADAV80X
config SND_BF5XX_SOC_AD1836
tristate "SoC AD1836 Audio support for BF5xx"
- depends on SND_BF5XX_I2S
+ depends on SND_BF5XX_I2S && SPI_MASTER
select SND_BF5XX_SOC_I2S
select SND_SOC_AD1836
help
@@ -66,7 +65,7 @@ config SND_BF5XX_SOC_AD1836
config SND_BF5XX_SOC_AD193X
tristate "SoC AD193X Audio support for Blackfin"
- depends on SND_BF5XX_I2S
+ depends on SND_BF5XX_I2S && SND_SOC_I2C_AND_SPI
select SND_BF5XX_SOC_I2S
select SND_SOC_AD193X
help
diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c
index 75d0ad5d2dcb..647a72cda005 100644
--- a/sound/soc/codecs/88pm860x-codec.c
+++ b/sound/soc/codecs/88pm860x-codec.c
@@ -1328,6 +1328,9 @@ static int pm860x_probe(struct snd_soc_codec *codec)
pm860x->codec = codec;
codec->control_data = pm860x->regmap;
+ ret = snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
+ if (ret)
+ return ret;
for (i = 0; i < 4; i++) {
ret = request_threaded_irq(pm860x->irq[i], NULL,
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 7257a8885f42..34d965a4a040 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -57,8 +57,8 @@ static const u16 ad1980_reg[] = {
static const char *ad1980_rec_sel[] = {"Mic", "CD", "NC", "AUX", "Line",
"Stereo Mix", "Mono Mix", "Phone"};
-static const struct soc_enum ad1980_cap_src =
- SOC_ENUM_DOUBLE(AC97_REC_SEL, 8, 0, 7, ad1980_rec_sel);
+static SOC_ENUM_DOUBLE_DECL(ad1980_cap_src,
+ AC97_REC_SEL, 8, 0, ad1980_rec_sel);
static const struct snd_kcontrol_new ad1980_snd_ac97_controls[] = {
SOC_DOUBLE("Master Playback Volume", AC97_MASTER, 8, 0, 31, 1),
diff --git a/sound/soc/codecs/da732x.c b/sound/soc/codecs/da732x.c
index f295b6569910..f4d965ebc29e 100644
--- a/sound/soc/codecs/da732x.c
+++ b/sound/soc/codecs/da732x.c
@@ -1268,11 +1268,23 @@ static struct snd_soc_dai_driver da732x_dai[] = {
},
};
+static bool da732x_volatile(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case DA732X_REG_HPL_DAC_OFF_CNTL:
+ case DA732X_REG_HPR_DAC_OFF_CNTL:
+ return true;
+ default:
+ return false;
+ }
+}
+
static const struct regmap_config da732x_regmap = {
.reg_bits = 8,
.val_bits = 8,
.max_register = DA732X_MAX_REG,
+ .volatile_reg = da732x_volatile,
.reg_defaults = da732x_reg_cache,
.num_reg_defaults = ARRAY_SIZE(da732x_reg_cache),
.cache_type = REGCACHE_RBTREE,
diff --git a/sound/soc/codecs/da9055.c b/sound/soc/codecs/da9055.c
index 52b79a487ac7..422812613a28 100644
--- a/sound/soc/codecs/da9055.c
+++ b/sound/soc/codecs/da9055.c
@@ -1523,8 +1523,15 @@ static int da9055_remove(struct i2c_client *client)
return 0;
}
+/*
+ * DO NOT change the device Ids. The naming is intentionally specific as both
+ * the CODEC and PMIC parts of this chip are instantiated separately as I2C
+ * devices (both have configurable I2C addresses, and are to all intents and
+ * purposes separate). As a result there are specific DA9055 Ids for CODEC
+ * and PMIC, which must be different to operate together.
+ */
static const struct i2c_device_id da9055_i2c_id[] = {
- { "da9055", 0 },
+ { "da9055-codec", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, da9055_i2c_id);
@@ -1532,7 +1539,7 @@ MODULE_DEVICE_TABLE(i2c, da9055_i2c_id);
/* I2C codec control layer */
static struct i2c_driver da9055_i2c_driver = {
.driver = {
- .name = "da9055",
+ .name = "da9055-codec",
.owner = THIS_MODULE,
},
.probe = da9055_i2c_probe,
diff --git a/sound/soc/codecs/isabelle.c b/sound/soc/codecs/isabelle.c
index 5839048ec467..cb736ddc446d 100644
--- a/sound/soc/codecs/isabelle.c
+++ b/sound/soc/codecs/isabelle.c
@@ -140,13 +140,17 @@ static const char *isabelle_rx1_texts[] = {"VRX1", "ARX1"};
static const char *isabelle_rx2_texts[] = {"VRX2", "ARX2"};
static const struct soc_enum isabelle_rx1_enum[] = {
- SOC_ENUM_SINGLE(ISABELLE_VOICE_HPF_CFG_REG, 3, 1, isabelle_rx1_texts),
- SOC_ENUM_SINGLE(ISABELLE_AUDIO_HPF_CFG_REG, 5, 1, isabelle_rx1_texts),
+ SOC_ENUM_SINGLE(ISABELLE_VOICE_HPF_CFG_REG, 3,
+ ARRAY_SIZE(isabelle_rx1_texts), isabelle_rx1_texts),
+ SOC_ENUM_SINGLE(ISABELLE_AUDIO_HPF_CFG_REG, 5,
+ ARRAY_SIZE(isabelle_rx1_texts), isabelle_rx1_texts),
};
static const struct soc_enum isabelle_rx2_enum[] = {
- SOC_ENUM_SINGLE(ISABELLE_VOICE_HPF_CFG_REG, 2, 1, isabelle_rx2_texts),
- SOC_ENUM_SINGLE(ISABELLE_AUDIO_HPF_CFG_REG, 4, 1, isabelle_rx2_texts),
+ SOC_ENUM_SINGLE(ISABELLE_VOICE_HPF_CFG_REG, 2,
+ ARRAY_SIZE(isabelle_rx2_texts), isabelle_rx2_texts),
+ SOC_ENUM_SINGLE(ISABELLE_AUDIO_HPF_CFG_REG, 4,
+ ARRAY_SIZE(isabelle_rx2_texts), isabelle_rx2_texts),
};
/* Headset DAC playback switches */
@@ -161,13 +165,17 @@ static const char *isabelle_atx_texts[] = {"AMIC1", "DMIC"};
static const char *isabelle_vtx_texts[] = {"AMIC2", "DMIC"};
static const struct soc_enum isabelle_atx_enum[] = {
- SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 7, 1, isabelle_atx_texts),
- SOC_ENUM_SINGLE(ISABELLE_DMIC_CFG_REG, 0, 1, isabelle_atx_texts),
+ SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 7,
+ ARRAY_SIZE(isabelle_atx_texts), isabelle_atx_texts),
+ SOC_ENUM_SINGLE(ISABELLE_DMIC_CFG_REG, 0,
+ ARRAY_SIZE(isabelle_atx_texts), isabelle_atx_texts),
};
static const struct soc_enum isabelle_vtx_enum[] = {
- SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 6, 1, isabelle_vtx_texts),
- SOC_ENUM_SINGLE(ISABELLE_DMIC_CFG_REG, 0, 1, isabelle_vtx_texts),
+ SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 6,
+ ARRAY_SIZE(isabelle_vtx_texts), isabelle_vtx_texts),
+ SOC_ENUM_SINGLE(ISABELLE_DMIC_CFG_REG, 0,
+ ARRAY_SIZE(isabelle_vtx_texts), isabelle_vtx_texts),
};
static const struct snd_kcontrol_new atx_mux_controls =
@@ -183,17 +191,13 @@ static const char *isabelle_amic1_texts[] = {
/* Left analog microphone selection */
static const char *isabelle_amic2_texts[] = {"Sub Mic", "Aux/FM Right"};
-static const struct soc_enum isabelle_amic1_enum[] = {
- SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 5,
- ARRAY_SIZE(isabelle_amic1_texts),
- isabelle_amic1_texts),
-};
+static SOC_ENUM_SINGLE_DECL(isabelle_amic1_enum,
+ ISABELLE_AMIC_CFG_REG, 5,
+ isabelle_amic1_texts);
-static const struct soc_enum isabelle_amic2_enum[] = {
- SOC_ENUM_SINGLE(ISABELLE_AMIC_CFG_REG, 4,
- ARRAY_SIZE(isabelle_amic2_texts),
- isabelle_amic2_texts),
-};
+static SOC_ENUM_SINGLE_DECL(isabelle_amic2_enum,
+ ISABELLE_AMIC_CFG_REG, 4,
+ isabelle_amic2_texts);
static const struct snd_kcontrol_new amic1_control =
SOC_DAPM_ENUM("Route", isabelle_amic1_enum);
@@ -206,16 +210,20 @@ static const char *isabelle_st_audio_texts[] = {"ATX1", "ATX2"};
static const char *isabelle_st_voice_texts[] = {"VTX1", "VTX2"};
static const struct soc_enum isabelle_st_audio_enum[] = {
- SOC_ENUM_SINGLE(ISABELLE_ATX_STPGA1_CFG_REG, 7, 1,
+ SOC_ENUM_SINGLE(ISABELLE_ATX_STPGA1_CFG_REG, 7,
+ ARRAY_SIZE(isabelle_st_audio_texts),
isabelle_st_audio_texts),
- SOC_ENUM_SINGLE(ISABELLE_ATX_STPGA2_CFG_REG, 7, 1,
+ SOC_ENUM_SINGLE(ISABELLE_ATX_STPGA2_CFG_REG, 7,
+ ARRAY_SIZE(isabelle_st_audio_texts),
isabelle_st_audio_texts),
};
static const struct soc_enum isabelle_st_voice_enum[] = {
- SOC_ENUM_SINGLE(ISABELLE_VTX_STPGA1_CFG_REG, 7, 1,
+ SOC_ENUM_SINGLE(ISABELLE_VTX_STPGA1_CFG_REG, 7,
+ ARRAY_SIZE(isabelle_st_voice_texts),
isabelle_st_voice_texts),
- SOC_ENUM_SINGLE(ISABELLE_VTX2_STPGA2_CFG_REG, 7, 1,
+ SOC_ENUM_SINGLE(ISABELLE_VTX2_STPGA2_CFG_REG, 7,
+ ARRAY_SIZE(isabelle_st_voice_texts),
isabelle_st_voice_texts),
};
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 51f9b3d16b41..9f714ea86613 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -336,6 +336,7 @@ static bool max98090_readable_register(struct device *dev, unsigned int reg)
case M98090_REG_RECORD_TDM_SLOT:
case M98090_REG_SAMPLE_RATE:
case M98090_REG_DMIC34_BIQUAD_BASE ... M98090_REG_DMIC34_BIQUAD_BASE + 0x0E:
+ case M98090_REG_REVISION_ID:
return true;
default:
return false;
@@ -1769,16 +1770,6 @@ static int max98090_set_bias_level(struct snd_soc_codec *codec,
switch (level) {
case SND_SOC_BIAS_ON:
- if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
- ret = regcache_sync(max98090->regmap);
-
- if (ret != 0) {
- dev_err(codec->dev,
- "Failed to sync cache: %d\n", ret);
- return ret;
- }
- }
-
if (max98090->jack_state == M98090_JACK_STATE_HEADSET) {
/*
* Set to normal bias level.
@@ -1792,6 +1783,16 @@ static int max98090_set_bias_level(struct snd_soc_codec *codec,
break;
case SND_SOC_BIAS_STANDBY:
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
+ ret = regcache_sync(max98090->regmap);
+ if (ret != 0) {
+ dev_err(codec->dev,
+ "Failed to sync cache: %d\n", ret);
+ return ret;
+ }
+ }
+ break;
+
case SND_SOC_BIAS_OFF:
/* Set internal pull-up to lowest power mode */
snd_soc_update_bits(codec, M98090_REG_JACK_DETECT,
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index a3fb41179636..886924934aa5 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -2093,6 +2093,7 @@ MODULE_DEVICE_TABLE(i2c, rt5640_i2c_id);
#ifdef CONFIG_ACPI
static struct acpi_device_id rt5640_acpi_match[] = {
{ "INT33CA", 0 },
+ { "10EC5640", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, rt5640_acpi_match);
diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c
index 52e7cb08434b..fa2b8e07f420 100644
--- a/sound/soc/codecs/si476x.c
+++ b/sound/soc/codecs/si476x.c
@@ -210,7 +210,7 @@ out:
static int si476x_codec_probe(struct snd_soc_codec *codec)
{
codec->control_data = dev_get_regmap(codec->dev->parent, NULL);
- return 0;
+ return snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP);
}
static struct snd_soc_dai_ops si476x_dai_ops = {
diff --git a/sound/soc/codecs/sta32x.c b/sound/soc/codecs/sta32x.c
index 06edb396e733..2735361a4c3c 100644
--- a/sound/soc/codecs/sta32x.c
+++ b/sound/soc/codecs/sta32x.c
@@ -187,42 +187,42 @@ static const unsigned int sta32x_limiter_drc_release_tlv[] = {
13, 16, TLV_DB_SCALE_ITEM(-1500, 300, 0),
};
-static const struct soc_enum sta32x_drc_ac_enum =
- SOC_ENUM_SINGLE(STA32X_CONFD, STA32X_CONFD_DRC_SHIFT,
- 2, sta32x_drc_ac);
-static const struct soc_enum sta32x_auto_eq_enum =
- SOC_ENUM_SINGLE(STA32X_AUTO1, STA32X_AUTO1_AMEQ_SHIFT,
- 3, sta32x_auto_eq_mode);
-static const struct soc_enum sta32x_auto_gc_enum =
- SOC_ENUM_SINGLE(STA32X_AUTO1, STA32X_AUTO1_AMGC_SHIFT,
- 4, sta32x_auto_gc_mode);
-static const struct soc_enum sta32x_auto_xo_enum =
- SOC_ENUM_SINGLE(STA32X_AUTO2, STA32X_AUTO2_XO_SHIFT,
- 16, sta32x_auto_xo_mode);
-static const struct soc_enum sta32x_preset_eq_enum =
- SOC_ENUM_SINGLE(STA32X_AUTO3, STA32X_AUTO3_PEQ_SHIFT,
- 32, sta32x_preset_eq_mode);
-static const struct soc_enum sta32x_limiter_ch1_enum =
- SOC_ENUM_SINGLE(STA32X_C1CFG, STA32X_CxCFG_LS_SHIFT,
- 3, sta32x_limiter_select);
-static const struct soc_enum sta32x_limiter_ch2_enum =
- SOC_ENUM_SINGLE(STA32X_C2CFG, STA32X_CxCFG_LS_SHIFT,
- 3, sta32x_limiter_select);
-static const struct soc_enum sta32x_limiter_ch3_enum =
- SOC_ENUM_SINGLE(STA32X_C3CFG, STA32X_CxCFG_LS_SHIFT,
- 3, sta32x_limiter_select);
-static const struct soc_enum sta32x_limiter1_attack_rate_enum =
- SOC_ENUM_SINGLE(STA32X_L1AR, STA32X_LxA_SHIFT,
- 16, sta32x_limiter_attack_rate);
-static const struct soc_enum sta32x_limiter2_attack_rate_enum =
- SOC_ENUM_SINGLE(STA32X_L2AR, STA32X_LxA_SHIFT,
- 16, sta32x_limiter_attack_rate);
-static const struct soc_enum sta32x_limiter1_release_rate_enum =
- SOC_ENUM_SINGLE(STA32X_L1AR, STA32X_LxR_SHIFT,
- 16, sta32x_limiter_release_rate);
-static const struct soc_enum sta32x_limiter2_release_rate_enum =
- SOC_ENUM_SINGLE(STA32X_L2AR, STA32X_LxR_SHIFT,
- 16, sta32x_limiter_release_rate);
+static SOC_ENUM_SINGLE_DECL(sta32x_drc_ac_enum,
+ STA32X_CONFD, STA32X_CONFD_DRC_SHIFT,
+ sta32x_drc_ac);
+static SOC_ENUM_SINGLE_DECL(sta32x_auto_eq_enum,
+ STA32X_AUTO1, STA32X_AUTO1_AMEQ_SHIFT,
+ sta32x_auto_eq_mode);
+static SOC_ENUM_SINGLE_DECL(sta32x_auto_gc_enum,
+ STA32X_AUTO1, STA32X_AUTO1_AMGC_SHIFT,
+ sta32x_auto_gc_mode);
+static SOC_ENUM_SINGLE_DECL(sta32x_auto_xo_enum,
+ STA32X_AUTO2, STA32X_AUTO2_XO_SHIFT,
+ sta32x_auto_xo_mode);
+static SOC_ENUM_SINGLE_DECL(sta32x_preset_eq_enum,
+ STA32X_AUTO3, STA32X_AUTO3_PEQ_SHIFT,
+ sta32x_preset_eq_mode);
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch1_enum,
+ STA32X_C1CFG, STA32X_CxCFG_LS_SHIFT,
+ sta32x_limiter_select);
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch2_enum,
+ STA32X_C2CFG, STA32X_CxCFG_LS_SHIFT,
+ sta32x_limiter_select);
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter_ch3_enum,
+ STA32X_C3CFG, STA32X_CxCFG_LS_SHIFT,
+ sta32x_limiter_select);
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter1_attack_rate_enum,
+ STA32X_L1AR, STA32X_LxA_SHIFT,
+ sta32x_limiter_attack_rate);
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter2_attack_rate_enum,
+ STA32X_L2AR, STA32X_LxA_SHIFT,
+ sta32x_limiter_attack_rate);
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter1_release_rate_enum,
+ STA32X_L1AR, STA32X_LxR_SHIFT,
+ sta32x_limiter_release_rate);
+static SOC_ENUM_SINGLE_DECL(sta32x_limiter2_release_rate_enum,
+ STA32X_L2AR, STA32X_LxR_SHIFT,
+ sta32x_limiter_release_rate);
/* byte array controls for setting biquad, mixer, scaling coefficients;
* for biquads all five coefficients need to be set in one go,
@@ -331,7 +331,7 @@ static int sta32x_sync_coef_shadow(struct snd_soc_codec *codec)
static int sta32x_cache_sync(struct snd_soc_codec *codec)
{
- struct sta32x_priv *sta32x = codec->control_data;
+ struct sta32x_priv *sta32x = snd_soc_codec_get_drvdata(codec);
unsigned int mute;
int rc;
@@ -434,7 +434,7 @@ SOC_SINGLE_TLV("Treble Tone Control", STA32X_TONE, STA32X_TONE_TTC_SHIFT, 15, 0,
SOC_ENUM("Limiter1 Attack Rate (dB/ms)", sta32x_limiter1_attack_rate_enum),
SOC_ENUM("Limiter2 Attack Rate (dB/ms)", sta32x_limiter2_attack_rate_enum),
SOC_ENUM("Limiter1 Release Rate (dB/ms)", sta32x_limiter1_release_rate_enum),
-SOC_ENUM("Limiter2 Release Rate (dB/ms)", sta32x_limiter1_release_rate_enum),
+SOC_ENUM("Limiter2 Release Rate (dB/ms)", sta32x_limiter2_release_rate_enum),
/* depending on mode, the attack/release thresholds have
* two different enum definitions; provide both
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c
index 48dc7d2fee36..6d684d934f4d 100644
--- a/sound/soc/codecs/wm8400.c
+++ b/sound/soc/codecs/wm8400.c
@@ -117,19 +117,23 @@ static int wm8400_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol,
static const char *wm8400_digital_sidetone[] =
{"None", "Left ADC", "Right ADC", "Reserved"};
-static const struct soc_enum wm8400_left_digital_sidetone_enum =
-SOC_ENUM_SINGLE(WM8400_DIGITAL_SIDE_TONE,
- WM8400_ADC_TO_DACL_SHIFT, 2, wm8400_digital_sidetone);
+static SOC_ENUM_SINGLE_DECL(wm8400_left_digital_sidetone_enum,
+ WM8400_DIGITAL_SIDE_TONE,
+ WM8400_ADC_TO_DACL_SHIFT,
+ wm8400_digital_sidetone);
-static const struct soc_enum wm8400_right_digital_sidetone_enum =
-SOC_ENUM_SINGLE(WM8400_DIGITAL_SIDE_TONE,
- WM8400_ADC_TO_DACR_SHIFT, 2, wm8400_digital_sidetone);
+static SOC_ENUM_SINGLE_DECL(wm8400_right_digital_sidetone_enum,
+ WM8400_DIGITAL_SIDE_TONE,
+ WM8400_ADC_TO_DACR_SHIFT,
+ wm8400_digital_sidetone);
static const char *wm8400_adcmode[] =
{"Hi-fi mode", "Voice mode 1", "Voice mode 2", "Voice mode 3"};
-static const struct soc_enum wm8400_right_adcmode_enum =
-SOC_ENUM_SINGLE(WM8400_ADC_CTRL, WM8400_ADC_HPF_CUT_SHIFT, 3, wm8400_adcmode);
+static SOC_ENUM_SINGLE_DECL(wm8400_right_adcmode_enum,
+ WM8400_ADC_CTRL,
+ WM8400_ADC_HPF_CUT_SHIFT,
+ wm8400_adcmode);
static const struct snd_kcontrol_new wm8400_snd_controls[] = {
/* INMIXL */
@@ -422,9 +426,10 @@ SOC_DAPM_SINGLE("RINPGA34 Switch", WM8400_INPUT_MIXER3, WM8400_L34MNB_SHIFT,
static const char *wm8400_ainlmux[] =
{"INMIXL Mix", "RXVOICE Mix", "DIFFINL Mix"};
-static const struct soc_enum wm8400_ainlmux_enum =
-SOC_ENUM_SINGLE( WM8400_INPUT_MIXER1, WM8400_AINLMODE_SHIFT,
- ARRAY_SIZE(wm8400_ainlmux), wm8400_ainlmux);
+static SOC_ENUM_SINGLE_DECL(wm8400_ainlmux_enum,
+ WM8400_INPUT_MIXER1,
+ WM8400_AINLMODE_SHIFT,
+ wm8400_ainlmux);
static const struct snd_kcontrol_new wm8400_dapm_ainlmux_controls =
SOC_DAPM_ENUM("Route", wm8400_ainlmux_enum);
@@ -435,9 +440,10 @@ SOC_DAPM_ENUM("Route", wm8400_ainlmux_enum);
static const char *wm8400_ainrmux[] =
{"INMIXR Mix", "RXVOICE Mix", "DIFFINR Mix"};
-static const struct soc_enum wm8400_ainrmux_enum =
-SOC_ENUM_SINGLE( WM8400_INPUT_MIXER1, WM8400_AINRMODE_SHIFT,
- ARRAY_SIZE(wm8400_ainrmux), wm8400_ainrmux);
+static SOC_ENUM_SINGLE_DECL(wm8400_ainrmux_enum,
+ WM8400_INPUT_MIXER1,
+ WM8400_AINRMODE_SHIFT,
+ wm8400_ainrmux);
static const struct snd_kcontrol_new wm8400_dapm_ainrmux_controls =
SOC_DAPM_ENUM("Route", wm8400_ainrmux_enum);
diff --git a/sound/soc/codecs/wm8770.c b/sound/soc/codecs/wm8770.c
index 89a18d82f303..5bce21013485 100644
--- a/sound/soc/codecs/wm8770.c
+++ b/sound/soc/codecs/wm8770.c
@@ -196,8 +196,8 @@ static const char *ain_text[] = {
"AIN5", "AIN6", "AIN7", "AIN8"
};
-static const struct soc_enum ain_enum =
- SOC_ENUM_DOUBLE(WM8770_ADCMUX, 0, 4, 8, ain_text);
+static SOC_ENUM_DOUBLE_DECL(ain_enum,
+ WM8770_ADCMUX, 0, 4, ain_text);
static const struct snd_kcontrol_new ain_mux =
SOC_DAPM_ENUM("Capture Mux", ain_enum);
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index e98bc7038a08..43c2201cb901 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -304,53 +304,53 @@ static const DECLARE_TLV_DB_SCALE(adc_tlv, -7200, 75, 1);
static const char *mic_bias_level_txt[] = { "0.9*AVDD", "0.65*AVDD" };
-static const struct soc_enum mic_bias_level =
-SOC_ENUM_SINGLE(WM8900_REG_INCTL, 8, 2, mic_bias_level_txt);
+static SOC_ENUM_SINGLE_DECL(mic_bias_level,
+ WM8900_REG_INCTL, 8, mic_bias_level_txt);
static const char *dac_mute_rate_txt[] = { "Fast", "Slow" };
-static const struct soc_enum dac_mute_rate =
-SOC_ENUM_SINGLE(WM8900_REG_DACCTRL, 7, 2, dac_mute_rate_txt);
+static SOC_ENUM_SINGLE_DECL(dac_mute_rate,
+ WM8900_REG_DACCTRL, 7, dac_mute_rate_txt);
static const char *dac_deemphasis_txt[] = {
"Disabled", "32kHz", "44.1kHz", "48kHz"
};
-static const struct soc_enum dac_deemphasis =
-SOC_ENUM_SINGLE(WM8900_REG_DACCTRL, 4, 4, dac_deemphasis_txt);
+static SOC_ENUM_SINGLE_DECL(dac_deemphasis,
+ WM8900_REG_DACCTRL, 4, dac_deemphasis_txt);
static const char *adc_hpf_cut_txt[] = {
"Hi-fi mode", "Voice mode 1", "Voice mode 2", "Voice mode 3"
};
-static const struct soc_enum adc_hpf_cut =
-SOC_ENUM_SINGLE(WM8900_REG_ADCCTRL, 5, 4, adc_hpf_cut_txt);
+static SOC_ENUM_SINGLE_DECL(adc_hpf_cut,
+ WM8900_REG_ADCCTRL, 5, adc_hpf_cut_txt);
static const char *lr_txt[] = {
"Left", "Right"
};
-static const struct soc_enum aifl_src =
-SOC_ENUM_SINGLE(WM8900_REG_AUDIO1, 15, 2, lr_txt);
+static SOC_ENUM_SINGLE_DECL(aifl_src,
+ WM8900_REG_AUDIO1, 15, lr_txt);
-static const struct soc_enum aifr_src =
-SOC_ENUM_SINGLE(WM8900_REG_AUDIO1, 14, 2, lr_txt);
+static SOC_ENUM_SINGLE_DECL(aifr_src,
+ WM8900_REG_AUDIO1, 14, lr_txt);
-static const struct soc_enum dacl_src =
-SOC_ENUM_SINGLE(WM8900_REG_AUDIO2, 15, 2, lr_txt);
+static SOC_ENUM_SINGLE_DECL(dacl_src,
+ WM8900_REG_AUDIO2, 15, lr_txt);
-static const struct soc_enum dacr_src =
-SOC_ENUM_SINGLE(WM8900_REG_AUDIO2, 14, 2, lr_txt);
+static SOC_ENUM_SINGLE_DECL(dacr_src,
+ WM8900_REG_AUDIO2, 14, lr_txt);
static const char *sidetone_txt[] = {
"Disabled", "Left ADC", "Right ADC"
};
-static const struct soc_enum dacl_sidetone =
-SOC_ENUM_SINGLE(WM8900_REG_SIDETONE, 2, 3, sidetone_txt);
+static SOC_ENUM_SINGLE_DECL(dacl_sidetone,
+ WM8900_REG_SIDETONE, 2, sidetone_txt);
-static const struct soc_enum dacr_sidetone =
-SOC_ENUM_SINGLE(WM8900_REG_SIDETONE, 0, 3, sidetone_txt);
+static SOC_ENUM_SINGLE_DECL(dacr_sidetone,
+ WM8900_REG_SIDETONE, 0, sidetone_txt);
static const struct snd_kcontrol_new wm8900_snd_controls[] = {
SOC_ENUM("Mic Bias Level", mic_bias_level),
@@ -496,8 +496,8 @@ SOC_DAPM_SINGLE("RINPUT3 Switch", WM8900_REG_INCTL, 0, 1, 0),
static const char *wm8900_lp_mux[] = { "Disabled", "Enabled" };
-static const struct soc_enum wm8900_lineout2_lp_mux =
-SOC_ENUM_SINGLE(WM8900_REG_LOUTMIXCTL1, 1, 2, wm8900_lp_mux);
+static SOC_ENUM_SINGLE_DECL(wm8900_lineout2_lp_mux,
+ WM8900_REG_LOUTMIXCTL1, 1, wm8900_lp_mux);
static const struct snd_kcontrol_new wm8900_lineout2_lp =
SOC_DAPM_ENUM("Route", wm8900_lineout2_lp_mux);
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
index b7488f190d2b..d4248e00160e 100644
--- a/sound/soc/codecs/wm8958-dsp2.c
+++ b/sound/soc/codecs/wm8958-dsp2.c
@@ -153,7 +153,7 @@ static int wm8958_dsp2_fw(struct snd_soc_codec *codec, const char *name,
data32 &= 0xffffff;
- wm8994_bulk_write(codec->control_data,
+ wm8994_bulk_write(wm8994->wm8994,
data32 & 0xffffff,
block_len / 2,
(void *)(data + 8));
diff --git a/sound/soc/codecs/wm8993.c b/sound/soc/codecs/wm8993.c
index 433d59a0f3ef..2ee23a39622c 100644
--- a/sound/soc/codecs/wm8993.c
+++ b/sound/soc/codecs/wm8993.c
@@ -1562,7 +1562,6 @@ static int wm8993_remove(struct snd_soc_codec *codec)
struct wm8993_priv *wm8993 = snd_soc_codec_get_drvdata(codec);
wm8993_set_bias_level(codec, SND_SOC_BIAS_OFF);
- regulator_bulk_free(ARRAY_SIZE(wm8993->supplies), wm8993->supplies);
return 0;
}
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index b9be9cbc4603..adb72063d44e 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -265,21 +265,21 @@ static const char *sidetone_hpf_text[] = {
"2.7kHz", "1.35kHz", "675Hz", "370Hz", "180Hz", "90Hz", "45Hz"
};
-static const struct soc_enum sidetone_hpf =
- SOC_ENUM_SINGLE(WM8994_SIDETONE, 7, 7, sidetone_hpf_text);
+static SOC_ENUM_SINGLE_DECL(sidetone_hpf,
+ WM8994_SIDETONE, 7, sidetone_hpf_text);
static const char *adc_hpf_text[] = {
"HiFi", "Voice 1", "Voice 2", "Voice 3"
};
-static const struct soc_enum aif1adc1_hpf =
- SOC_ENUM_SINGLE(WM8994_AIF1_ADC1_FILTERS, 13, 4, adc_hpf_text);
+static SOC_ENUM_SINGLE_DECL(aif1adc1_hpf,
+ WM8994_AIF1_ADC1_FILTERS, 13, adc_hpf_text);
-static const struct soc_enum aif1adc2_hpf =
- SOC_ENUM_SINGLE(WM8994_AIF1_ADC2_FILTERS, 13, 4, adc_hpf_text);
+static SOC_ENUM_SINGLE_DECL(aif1adc2_hpf,
+ WM8994_AIF1_ADC2_FILTERS, 13, adc_hpf_text);
-static const struct soc_enum aif2adc_hpf =
- SOC_ENUM_SINGLE(WM8994_AIF2_ADC_FILTERS, 13, 4, adc_hpf_text);
+static SOC_ENUM_SINGLE_DECL(aif2adc_hpf,
+ WM8994_AIF2_ADC_FILTERS, 13, adc_hpf_text);
static const DECLARE_TLV_DB_SCALE(aif_tlv, 0, 600, 0);
static const DECLARE_TLV_DB_SCALE(digital_tlv, -7200, 75, 1);
@@ -501,39 +501,39 @@ static const char *aif_chan_src_text[] = {
"Left", "Right"
};
-static const struct soc_enum aif1adcl_src =
- SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_1, 15, 2, aif_chan_src_text);
+static SOC_ENUM_SINGLE_DECL(aif1adcl_src,
+ WM8994_AIF1_CONTROL_1, 15, aif_chan_src_text);
-static const struct soc_enum aif1adcr_src =
- SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_1, 14, 2, aif_chan_src_text);
+static SOC_ENUM_SINGLE_DECL(aif1adcr_src,
+ WM8994_AIF1_CONTROL_1, 14, aif_chan_src_text);
-static const struct soc_enum aif2adcl_src =
- SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_1, 15, 2, aif_chan_src_text);
+static SOC_ENUM_SINGLE_DECL(aif2adcl_src,
+ WM8994_AIF2_CONTROL_1, 15, aif_chan_src_text);
-static const struct soc_enum aif2adcr_src =
- SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_1, 14, 2, aif_chan_src_text);
+static SOC_ENUM_SINGLE_DECL(aif2adcr_src,
+ WM8994_AIF2_CONTROL_1, 14, aif_chan_src_text);
-static const struct soc_enum aif1dacl_src =
- SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 15, 2, aif_chan_src_text);
+static SOC_ENUM_SINGLE_DECL(aif1dacl_src,
+ WM8994_AIF1_CONTROL_2, 15, aif_chan_src_text);
-static const struct soc_enum aif1dacr_src =
- SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, 14, 2, aif_chan_src_text);
+static SOC_ENUM_SINGLE_DECL(aif1dacr_src,
+ WM8994_AIF1_CONTROL_2, 14, aif_chan_src_text);
-static const struct soc_enum aif2dacl_src =
- SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 15, 2, aif_chan_src_text);
+static SOC_ENUM_SINGLE_DECL(aif2dacl_src,
+ WM8994_AIF2_CONTROL_2, 15, aif_chan_src_text);
-static const struct soc_enum aif2dacr_src =
- SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, 14, 2, aif_chan_src_text);
+static SOC_ENUM_SINGLE_DECL(aif2dacr_src,
+ WM8994_AIF2_CONTROL_2, 14, aif_chan_src_text);
static const char *osr_text[] = {
"Low Power", "High Performance",
};
-static const struct soc_enum dac_osr =
- SOC_ENUM_SINGLE(WM8994_OVERSAMPLING, 0, 2, osr_text);
+static SOC_ENUM_SINGLE_DECL(dac_osr,
+ WM8994_OVERSAMPLING, 0, osr_text);
-static const struct soc_enum adc_osr =
- SOC_ENUM_SINGLE(WM8994_OVERSAMPLING, 1, 2, osr_text);
+static SOC_ENUM_SINGLE_DECL(adc_osr,
+ WM8994_OVERSAMPLING, 1, osr_text);
static const struct snd_kcontrol_new wm8994_snd_controls[] = {
SOC_DOUBLE_R_TLV("AIF1ADC1 Volume", WM8994_AIF1_ADC1_LEFT_VOLUME,
@@ -690,17 +690,20 @@ static const char *wm8958_ng_text[] = {
"30ms", "125ms", "250ms", "500ms",
};
-static const struct soc_enum wm8958_aif1dac1_ng_hold =
- SOC_ENUM_SINGLE(WM8958_AIF1_DAC1_NOISE_GATE,
- WM8958_AIF1DAC1_NG_THR_SHIFT, 4, wm8958_ng_text);
+static SOC_ENUM_SINGLE_DECL(wm8958_aif1dac1_ng_hold,
+ WM8958_AIF1_DAC1_NOISE_GATE,
+ WM8958_AIF1DAC1_NG_THR_SHIFT,
+ wm8958_ng_text);
-static const struct soc_enum wm8958_aif1dac2_ng_hold =
- SOC_ENUM_SINGLE(WM8958_AIF1_DAC2_NOISE_GATE,
- WM8958_AIF1DAC2_NG_THR_SHIFT, 4, wm8958_ng_text);
+static SOC_ENUM_SINGLE_DECL(wm8958_aif1dac2_ng_hold,
+ WM8958_AIF1_DAC2_NOISE_GATE,
+ WM8958_AIF1DAC2_NG_THR_SHIFT,
+ wm8958_ng_text);
-static const struct soc_enum wm8958_aif2dac_ng_hold =
- SOC_ENUM_SINGLE(WM8958_AIF2_DAC_NOISE_GATE,
- WM8958_AIF2DAC_NG_THR_SHIFT, 4, wm8958_ng_text);
+static SOC_ENUM_SINGLE_DECL(wm8958_aif2dac_ng_hold,
+ WM8958_AIF2_DAC_NOISE_GATE,
+ WM8958_AIF2DAC_NG_THR_SHIFT,
+ wm8958_ng_text);
static const struct snd_kcontrol_new wm8958_snd_controls[] = {
SOC_SINGLE_TLV("AIF3 Boost Volume", WM8958_AIF3_CONTROL_2, 10, 3, 0, aif_tlv),
@@ -1341,8 +1344,8 @@ static const char *adc_mux_text[] = {
"DMIC",
};
-static const struct soc_enum adc_enum =
- SOC_ENUM_SINGLE(0, 0, 2, adc_mux_text);
+static SOC_ENUM_SINGLE_DECL(adc_enum,
+ 0, 0, adc_mux_text);
static const struct snd_kcontrol_new adcl_mux =
SOC_DAPM_ENUM_VIRT("ADCL Mux", adc_enum);
@@ -1478,14 +1481,14 @@ static const char *sidetone_text[] = {
"ADC/DMIC1", "DMIC2",
};
-static const struct soc_enum sidetone1_enum =
- SOC_ENUM_SINGLE(WM8994_SIDETONE, 0, 2, sidetone_text);
+static SOC_ENUM_SINGLE_DECL(sidetone1_enum,
+ WM8994_SIDETONE, 0, sidetone_text);
static const struct snd_kcontrol_new sidetone1_mux =
SOC_DAPM_ENUM("Left Sidetone Mux", sidetone1_enum);
-static const struct soc_enum sidetone2_enum =
- SOC_ENUM_SINGLE(WM8994_SIDETONE, 1, 2, sidetone_text);
+static SOC_ENUM_SINGLE_DECL(sidetone2_enum,
+ WM8994_SIDETONE, 1, sidetone_text);
static const struct snd_kcontrol_new sidetone2_mux =
SOC_DAPM_ENUM("Right Sidetone Mux", sidetone2_enum);
@@ -1498,22 +1501,24 @@ static const char *loopback_text[] = {
"None", "ADCDAT",
};
-static const struct soc_enum aif1_loopback_enum =
- SOC_ENUM_SINGLE(WM8994_AIF1_CONTROL_2, WM8994_AIF1_LOOPBACK_SHIFT, 2,
- loopback_text);
+static SOC_ENUM_SINGLE_DECL(aif1_loopback_enum,
+ WM8994_AIF1_CONTROL_2,
+ WM8994_AIF1_LOOPBACK_SHIFT,
+ loopback_text);
static const struct snd_kcontrol_new aif1_loopback =
SOC_DAPM_ENUM("AIF1 Loopback", aif1_loopback_enum);
-static const struct soc_enum aif2_loopback_enum =
- SOC_ENUM_SINGLE(WM8994_AIF2_CONTROL_2, WM8994_AIF2_LOOPBACK_SHIFT, 2,
- loopback_text);
+static SOC_ENUM_SINGLE_DECL(aif2_loopback_enum,
+ WM8994_AIF2_CONTROL_2,
+ WM8994_AIF2_LOOPBACK_SHIFT,
+ loopback_text);
static const struct snd_kcontrol_new aif2_loopback =
SOC_DAPM_ENUM("AIF2 Loopback", aif2_loopback_enum);
-static const struct soc_enum aif1dac_enum =
- SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 0, 2, aif1dac_text);
+static SOC_ENUM_SINGLE_DECL(aif1dac_enum,
+ WM8994_POWER_MANAGEMENT_6, 0, aif1dac_text);
static const struct snd_kcontrol_new aif1dac_mux =
SOC_DAPM_ENUM("AIF1DAC Mux", aif1dac_enum);
@@ -1522,8 +1527,8 @@ static const char *aif2dac_text[] = {
"AIF2DACDAT", "AIF3DACDAT",
};
-static const struct soc_enum aif2dac_enum =
- SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 1, 2, aif2dac_text);
+static SOC_ENUM_SINGLE_DECL(aif2dac_enum,
+ WM8994_POWER_MANAGEMENT_6, 1, aif2dac_text);
static const struct snd_kcontrol_new aif2dac_mux =
SOC_DAPM_ENUM("AIF2DAC Mux", aif2dac_enum);
@@ -1532,8 +1537,8 @@ static const char *aif2adc_text[] = {
"AIF2ADCDAT", "AIF3DACDAT",
};
-static const struct soc_enum aif2adc_enum =
- SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 2, 2, aif2adc_text);
+static SOC_ENUM_SINGLE_DECL(aif2adc_enum,
+ WM8994_POWER_MANAGEMENT_6, 2, aif2adc_text);
static const struct snd_kcontrol_new aif2adc_mux =
SOC_DAPM_ENUM("AIF2ADC Mux", aif2adc_enum);
@@ -1542,14 +1547,14 @@ static const char *aif3adc_text[] = {
"AIF1ADCDAT", "AIF2ADCDAT", "AIF2DACDAT", "Mono PCM",
};
-static const struct soc_enum wm8994_aif3adc_enum =
- SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 3, 3, aif3adc_text);
+static SOC_ENUM_SINGLE_DECL(wm8994_aif3adc_enum,
+ WM8994_POWER_MANAGEMENT_6, 3, aif3adc_text);
static const struct snd_kcontrol_new wm8994_aif3adc_mux =
SOC_DAPM_ENUM("AIF3ADC Mux", wm8994_aif3adc_enum);
-static const struct soc_enum wm8958_aif3adc_enum =
- SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 3, 4, aif3adc_text);
+static SOC_ENUM_SINGLE_DECL(wm8958_aif3adc_enum,
+ WM8994_POWER_MANAGEMENT_6, 3, aif3adc_text);
static const struct snd_kcontrol_new wm8958_aif3adc_mux =
SOC_DAPM_ENUM("AIF3ADC Mux", wm8958_aif3adc_enum);
@@ -1558,8 +1563,8 @@ static const char *mono_pcm_out_text[] = {
"None", "AIF2ADCL", "AIF2ADCR",
};
-static const struct soc_enum mono_pcm_out_enum =
- SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 9, 3, mono_pcm_out_text);
+static SOC_ENUM_SINGLE_DECL(mono_pcm_out_enum,
+ WM8994_POWER_MANAGEMENT_6, 9, mono_pcm_out_text);
static const struct snd_kcontrol_new mono_pcm_out_mux =
SOC_DAPM_ENUM("Mono PCM Out Mux", mono_pcm_out_enum);
@@ -1569,14 +1574,14 @@ static const char *aif2dac_src_text[] = {
};
/* Note that these two control shouldn't be simultaneously switched to AIF3 */
-static const struct soc_enum aif2dacl_src_enum =
- SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 7, 2, aif2dac_src_text);
+static SOC_ENUM_SINGLE_DECL(aif2dacl_src_enum,
+ WM8994_POWER_MANAGEMENT_6, 7, aif2dac_src_text);
static const struct snd_kcontrol_new aif2dacl_src_mux =
SOC_DAPM_ENUM("AIF2DACL Mux", aif2dacl_src_enum);
-static const struct soc_enum aif2dacr_src_enum =
- SOC_ENUM_SINGLE(WM8994_POWER_MANAGEMENT_6, 8, 2, aif2dac_src_text);
+static SOC_ENUM_SINGLE_DECL(aif2dacr_src_enum,
+ WM8994_POWER_MANAGEMENT_6, 8, aif2dac_src_text);
static const struct snd_kcontrol_new aif2dacr_src_mux =
SOC_DAPM_ENUM("AIF2DACR Mux", aif2dacr_src_enum);
diff --git a/sound/soc/davinci/davinci-evm.c b/sound/soc/davinci/davinci-evm.c
index 70ff3772079f..5e3bc3c6801a 100644
--- a/sound/soc/davinci/davinci-evm.c
+++ b/sound/soc/davinci/davinci-evm.c
@@ -399,6 +399,7 @@ static struct platform_driver davinci_evm_driver = {
.driver = {
.name = "davinci_evm",
.owner = THIS_MODULE,
+ .pm = &snd_soc_pm_ops,
.of_match_table = of_match_ptr(davinci_evm_dt_ids),
},
};
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index b7858bfa0295..670afa29e30d 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -263,7 +263,9 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
{
struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
+ int ret = 0;
+ pm_runtime_get_sync(mcasp->dev);
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_B:
case SND_SOC_DAIFMT_AC97:
@@ -317,7 +319,8 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
@@ -354,10 +357,12 @@ static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
-
- return 0;
+out:
+ pm_runtime_put_sync(mcasp->dev);
+ return ret;
}
static int davinci_mcasp_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div)
@@ -448,7 +453,7 @@ static int davinci_config_channel_size(struct davinci_mcasp *mcasp,
return 0;
}
-static int davinci_hw_common_param(struct davinci_mcasp *mcasp, int stream,
+static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream,
int channels)
{
int i;
@@ -524,12 +529,18 @@ static int davinci_hw_common_param(struct davinci_mcasp *mcasp, int stream,
return 0;
}
-static void davinci_hw_param(struct davinci_mcasp *mcasp, int stream)
+static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream)
{
int i, active_slots;
u32 mask = 0;
u32 busel = 0;
+ if ((mcasp->tdm_slots < 2) || (mcasp->tdm_slots > 32)) {
+ dev_err(mcasp->dev, "tdm slot %d not supported\n",
+ mcasp->tdm_slots);
+ return -EINVAL;
+ }
+
active_slots = (mcasp->tdm_slots > 31) ? 32 : mcasp->tdm_slots;
for (i = 0; i < active_slots; i++)
mask |= (1 << i);
@@ -539,35 +550,21 @@ static void davinci_hw_param(struct davinci_mcasp *mcasp, int stream)
if (!mcasp->dat_port)
busel = TXSEL;
- if (stream == SNDRV_PCM_STREAM_PLAYBACK) {
- /* bit stream is MSB first with no delay */
- /* DSP_B mode */
- mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, mask);
- mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, busel | TXORD);
-
- if ((mcasp->tdm_slots >= 2) && (mcasp->tdm_slots <= 32))
- mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG,
- FSXMOD(mcasp->tdm_slots), FSXMOD(0x1FF));
- else
- printk(KERN_ERR "playback tdm slot %d not supported\n",
- mcasp->tdm_slots);
- } else {
- /* bit stream is MSB first with no delay */
- /* DSP_B mode */
- mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD);
- mcasp_set_reg(mcasp, DAVINCI_MCASP_RXTDM_REG, mask);
-
- if ((mcasp->tdm_slots >= 2) && (mcasp->tdm_slots <= 32))
- mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG,
- FSRMOD(mcasp->tdm_slots), FSRMOD(0x1FF));
- else
- printk(KERN_ERR "capture tdm slot %d not supported\n",
- mcasp->tdm_slots);
- }
+ mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, mask);
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, busel | TXORD);
+ mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG,
+ FSXMOD(mcasp->tdm_slots), FSXMOD(0x1FF));
+
+ mcasp_set_reg(mcasp, DAVINCI_MCASP_RXTDM_REG, mask);
+ mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD);
+ mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG,
+ FSRMOD(mcasp->tdm_slots), FSRMOD(0x1FF));
+
+ return 0;
}
/* S/PDIF */
-static void davinci_hw_dit_param(struct davinci_mcasp *mcasp)
+static int mcasp_dit_hw_param(struct davinci_mcasp *mcasp)
{
/* Set the TX format : 24 bit right rotation, 32 bit slot, Pad 0
and LSB first */
@@ -589,6 +586,8 @@ static void davinci_hw_dit_param(struct davinci_mcasp *mcasp)
/* Enable the DIT */
mcasp_set_bits(mcasp, DAVINCI_MCASP_TXDITCTL_REG, DITEN);
+
+ return 0;
}
static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
@@ -605,13 +604,14 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
u8 slots = mcasp->tdm_slots;
u8 active_serializers;
int channels;
+ int ret;
struct snd_interval *pcm_channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
channels = pcm_channels->min;
active_serializers = (channels + slots - 1) / slots;
- if (davinci_hw_common_param(mcasp, substream->stream, channels) == -EINVAL)
+ if (mcasp_common_hw_param(mcasp, substream->stream, channels) == -EINVAL)
return -EINVAL;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
fifo_level = mcasp->txnumevt * active_serializers;
@@ -619,9 +619,12 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
fifo_level = mcasp->rxnumevt * active_serializers;
if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
- davinci_hw_dit_param(mcasp);
+ ret = mcasp_dit_hw_param(mcasp);
else
- davinci_hw_param(mcasp, substream->stream);
+ ret = mcasp_i2s_hw_param(mcasp, substream->stream);
+
+ if (ret)
+ return ret;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_U8:
@@ -678,19 +681,9 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
- ret = pm_runtime_get_sync(mcasp->dev);
- if (IS_ERR_VALUE(ret))
- dev_err(mcasp->dev, "pm_runtime_get_sync() failed\n");
davinci_mcasp_start(mcasp, substream->stream);
break;
-
case SNDRV_PCM_TRIGGER_SUSPEND:
- davinci_mcasp_stop(mcasp, substream->stream);
- ret = pm_runtime_put_sync(mcasp->dev);
- if (IS_ERR_VALUE(ret))
- dev_err(mcasp->dev, "pm_runtime_put_sync() failed\n");
- break;
-
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
davinci_mcasp_stop(mcasp, substream->stream);
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index d0c72ed261e7..c84026c99134 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -326,7 +326,7 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMA,
ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(tx_mask));
regmap_update_bits(esai_priv->regmap, REG_ESAI_TSMB,
- ESAI_xSMA_xS_MASK, ESAI_xSMB_xS(tx_mask));
+ ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(tx_mask));
regmap_update_bits(esai_priv->regmap, REG_ESAI_RCCR,
ESAI_xCCR_xDC_MASK, ESAI_xCCR_xDC(slots));
@@ -334,7 +334,7 @@ static int fsl_esai_set_dai_tdm_slot(struct snd_soc_dai *dai, u32 tx_mask,
regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMA,
ESAI_xSMA_xS_MASK, ESAI_xSMA_xS(rx_mask));
regmap_update_bits(esai_priv->regmap, REG_ESAI_RSMB,
- ESAI_xSMA_xS_MASK, ESAI_xSMB_xS(rx_mask));
+ ESAI_xSMB_xS_MASK, ESAI_xSMB_xS(rx_mask));
esai_priv->slot_width = slot_width;
diff --git a/sound/soc/fsl/fsl_esai.h b/sound/soc/fsl/fsl_esai.h
index 9c9f957fcae1..75e14033e8d8 100644
--- a/sound/soc/fsl/fsl_esai.h
+++ b/sound/soc/fsl/fsl_esai.h
@@ -322,7 +322,7 @@
#define ESAI_xSMB_xS_SHIFT 0
#define ESAI_xSMB_xS_WIDTH 16
#define ESAI_xSMB_xS_MASK (((1 << ESAI_xSMB_xS_WIDTH) - 1) << ESAI_xSMB_xS_SHIFT)
-#define ESAI_xSMB_xS(v) (((v) >> ESAI_xSMA_xS_WIDTH) & ESAI_xSMA_xS_MASK)
+#define ESAI_xSMB_xS(v) (((v) >> ESAI_xSMA_xS_WIDTH) & ESAI_xSMB_xS_MASK)
/* Port C Direction Register -- REG_ESAI_PRRC 0xF8 */
#define ESAI_PRRC_PDC_SHIFT 0
diff --git a/sound/soc/fsl/imx-mc13783.c b/sound/soc/fsl/imx-mc13783.c
index 79cee782dbbf..a2fd7321b5a9 100644
--- a/sound/soc/fsl/imx-mc13783.c
+++ b/sound/soc/fsl/imx-mc13783.c
@@ -160,7 +160,6 @@ static struct platform_driver imx_mc13783_audio_driver = {
.driver = {
.name = "imx_mc13783",
.owner = THIS_MODULE,
- .pm = &snd_soc_pm_ops,
},
.probe = imx_mc13783_probe,
.remove = imx_mc13783_remove
diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
index f2beae78969f..1cb22dd034eb 100644
--- a/sound/soc/fsl/imx-sgtl5000.c
+++ b/sound/soc/fsl/imx-sgtl5000.c
@@ -33,8 +33,7 @@ struct imx_sgtl5000_data {
static int imx_sgtl5000_dai_init(struct snd_soc_pcm_runtime *rtd)
{
- struct imx_sgtl5000_data *data = container_of(rtd->card,
- struct imx_sgtl5000_data, card);
+ struct imx_sgtl5000_data *data = snd_soc_card_get_drvdata(rtd->card);
struct device *dev = rtd->card->dev;
int ret;
@@ -159,13 +158,15 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
data->card.dapm_widgets = imx_sgtl5000_dapm_widgets;
data->card.num_dapm_widgets = ARRAY_SIZE(imx_sgtl5000_dapm_widgets);
+ platform_set_drvdata(pdev, &data->card);
+ snd_soc_card_set_drvdata(&data->card, data);
+
ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
goto fail;
}
- platform_set_drvdata(pdev, data);
of_node_put(ssi_np);
of_node_put(codec_np);
@@ -184,7 +185,8 @@ fail:
static int imx_sgtl5000_remove(struct platform_device *pdev)
{
- struct imx_sgtl5000_data *data = platform_get_drvdata(pdev);
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct imx_sgtl5000_data *data = snd_soc_card_get_drvdata(card);
clk_put(data->codec_clk);
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 3fd76bc391de..3a3d17ce6ba4 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -71,7 +71,7 @@ static int imx_wm8962_set_bias_level(struct snd_soc_card *card,
{
struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai;
struct imx_priv *priv = &card_priv;
- struct imx_wm8962_data *data = platform_get_drvdata(priv->pdev);
+ struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card);
struct device *dev = &priv->pdev->dev;
unsigned int pll_out;
int ret;
@@ -137,7 +137,7 @@ static int imx_wm8962_late_probe(struct snd_soc_card *card)
{
struct snd_soc_dai *codec_dai = card->rtd[0].codec_dai;
struct imx_priv *priv = &card_priv;
- struct imx_wm8962_data *data = platform_get_drvdata(priv->pdev);
+ struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card);
struct device *dev = &priv->pdev->dev;
int ret;
@@ -264,13 +264,15 @@ static int imx_wm8962_probe(struct platform_device *pdev)
data->card.late_probe = imx_wm8962_late_probe;
data->card.set_bias_level = imx_wm8962_set_bias_level;
+ platform_set_drvdata(pdev, &data->card);
+ snd_soc_card_set_drvdata(&data->card, data);
+
ret = devm_snd_soc_register_card(&pdev->dev, &data->card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
goto clk_fail;
}
- platform_set_drvdata(pdev, data);
of_node_put(ssi_np);
of_node_put(codec_np);
@@ -289,7 +291,8 @@ fail:
static int imx_wm8962_remove(struct platform_device *pdev)
{
- struct imx_wm8962_data *data = platform_get_drvdata(pdev);
+ struct snd_soc_card *card = platform_get_drvdata(pdev);
+ struct imx_wm8962_data *data = snd_soc_card_get_drvdata(card);
if (!IS_ERR(data->codec_clk))
clk_disable_unprepare(data->codec_clk);
diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c
index 3fde9e402710..d163e18d85d4 100644
--- a/sound/soc/omap/n810.c
+++ b/sound/soc/omap/n810.c
@@ -305,7 +305,9 @@ static int __init n810_soc_init(void)
int err;
struct device *dev;
- if (!(machine_is_nokia_n810() || machine_is_nokia_n810_wimax()))
+ if (!of_have_populated_dt() ||
+ (!of_machine_is_compatible("nokia,n810") &&
+ !of_machine_is_compatible("nokia,n810-wimax")))
return -ENODEV;
n810_snd_device = platform_device_alloc("soc-audio", -1);
diff --git a/sound/soc/samsung/Kconfig b/sound/soc/samsung/Kconfig
index 454f41cfc828..350757400391 100644
--- a/sound/soc/samsung/Kconfig
+++ b/sound/soc/samsung/Kconfig
@@ -59,7 +59,7 @@ config SND_SOC_SAMSUNG_JIVE_WM8750
select SND_SOC_WM8750
select SND_S3C2412_SOC_I2S
help
- Sat Y if you want to add support for SoC audio on the Jive.
+ Say Y if you want to add support for SoC audio on the Jive.
config SND_SOC_SAMSUNG_SMDK_WM8580
tristate "SoC I2S Audio support for WM8580 on SMDK"
@@ -145,11 +145,11 @@ config SND_SOC_SAMSUNG_RX1950_UDA1380
config SND_SOC_SAMSUNG_SMDK_WM9713
tristate "SoC AC97 Audio support for SMDK with WM9713"
- depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110 || MACH_SMDKV310 || MACH_SMDKC210)
+ depends on SND_SOC_SAMSUNG && (MACH_SMDK6410 || MACH_SMDKC100 || MACH_SMDKV210 || MACH_SMDKC110)
select SND_SOC_WM9713
select SND_SAMSUNG_AC97
help
- Sat Y if you want to add support for SoC audio on the SMDK.
+ Say Y if you want to add support for SoC audio on the SMDK.
config SND_SOC_SMARTQ
tristate "SoC I2S Audio support for SmartQ board"
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index dc8ff13187f7..b9dc6acbba8c 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -1218,7 +1218,7 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
ret = regulator_allow_bypass(w->regulator, false);
if (ret != 0)
dev_warn(w->dapm->dev,
- "ASoC: Failed to bypass %s: %d\n",
+ "ASoC: Failed to unbypass %s: %d\n",
w->name, ret);
}
@@ -1228,7 +1228,7 @@ int dapm_regulator_event(struct snd_soc_dapm_widget *w,
ret = regulator_allow_bypass(w->regulator, true);
if (ret != 0)
dev_warn(w->dapm->dev,
- "ASoC: Failed to unbypass %s: %d\n",
+ "ASoC: Failed to bypass %s: %d\n",
w->name, ret);
}
@@ -3210,15 +3210,11 @@ int snd_soc_dapm_put_pin_switch(struct snd_kcontrol *kcontrol,
struct snd_soc_card *card = snd_kcontrol_chip(kcontrol);
const char *pin = (const char *)kcontrol->private_value;
- mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
-
if (ucontrol->value.integer.value[0])
snd_soc_dapm_enable_pin(&card->dapm, pin);
else
snd_soc_dapm_disable_pin(&card->dapm, pin);
- mutex_unlock(&card->dapm_mutex);
-
snd_soc_dapm_sync(&card->dapm);
return 0;
}
@@ -3248,7 +3244,7 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
ret = regulator_allow_bypass(w->regulator, true);
if (ret != 0)
dev_warn(w->dapm->dev,
- "ASoC: Failed to unbypass %s: %d\n",
+ "ASoC: Failed to bypass %s: %d\n",
w->name, ret);
}
break;
@@ -3767,23 +3763,52 @@ void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
}
/**
+ * snd_soc_dapm_enable_pin_unlocked - enable pin.
+ * @dapm: DAPM context
+ * @pin: pin name
+ *
+ * Enables input/output pin and its parents or children widgets iff there is
+ * a valid audio route and active audio stream.
+ *
+ * Requires external locking.
+ *
+ * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
+ * do any widget power switching.
+ */
+int snd_soc_dapm_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
+ const char *pin)
+{
+ return snd_soc_dapm_set_pin(dapm, pin, 1);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_enable_pin_unlocked);
+
+/**
* snd_soc_dapm_enable_pin - enable pin.
* @dapm: DAPM context
* @pin: pin name
*
* Enables input/output pin and its parents or children widgets iff there is
* a valid audio route and active audio stream.
+ *
* NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
* do any widget power switching.
*/
int snd_soc_dapm_enable_pin(struct snd_soc_dapm_context *dapm, const char *pin)
{
- return snd_soc_dapm_set_pin(dapm, pin, 1);
+ int ret;
+
+ mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+
+ ret = snd_soc_dapm_set_pin(dapm, pin, 1);
+
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_enable_pin);
/**
- * snd_soc_dapm_force_enable_pin - force a pin to be enabled
+ * snd_soc_dapm_force_enable_pin_unlocked - force a pin to be enabled
* @dapm: DAPM context
* @pin: pin name
*
@@ -3791,11 +3816,13 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_enable_pin);
* intended for use with microphone bias supplies used in microphone
* jack detection.
*
+ * Requires external locking.
+ *
* NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
* do any widget power switching.
*/
-int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm,
- const char *pin)
+int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
+ const char *pin)
{
struct snd_soc_dapm_widget *w = dapm_find_widget(dapm, pin, true);
@@ -3811,25 +3838,103 @@ int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm,
return 0;
}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_force_enable_pin_unlocked);
+
+/**
+ * snd_soc_dapm_force_enable_pin - force a pin to be enabled
+ * @dapm: DAPM context
+ * @pin: pin name
+ *
+ * Enables input/output pin regardless of any other state. This is
+ * intended for use with microphone bias supplies used in microphone
+ * jack detection.
+ *
+ * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
+ * do any widget power switching.
+ */
+int snd_soc_dapm_force_enable_pin(struct snd_soc_dapm_context *dapm,
+ const char *pin)
+{
+ int ret;
+
+ mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+
+ ret = snd_soc_dapm_force_enable_pin_unlocked(dapm, pin);
+
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ return ret;
+}
EXPORT_SYMBOL_GPL(snd_soc_dapm_force_enable_pin);
/**
+ * snd_soc_dapm_disable_pin_unlocked - disable pin.
+ * @dapm: DAPM context
+ * @pin: pin name
+ *
+ * Disables input/output pin and its parents or children widgets.
+ *
+ * Requires external locking.
+ *
+ * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
+ * do any widget power switching.
+ */
+int snd_soc_dapm_disable_pin_unlocked(struct snd_soc_dapm_context *dapm,
+ const char *pin)
+{
+ return snd_soc_dapm_set_pin(dapm, pin, 0);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_disable_pin_unlocked);
+
+/**
* snd_soc_dapm_disable_pin - disable pin.
* @dapm: DAPM context
* @pin: pin name
*
* Disables input/output pin and its parents or children widgets.
+ *
* NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
* do any widget power switching.
*/
int snd_soc_dapm_disable_pin(struct snd_soc_dapm_context *dapm,
const char *pin)
{
- return snd_soc_dapm_set_pin(dapm, pin, 0);
+ int ret;
+
+ mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+
+ ret = snd_soc_dapm_set_pin(dapm, pin, 0);
+
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_disable_pin);
/**
+ * snd_soc_dapm_nc_pin_unlocked - permanently disable pin.
+ * @dapm: DAPM context
+ * @pin: pin name
+ *
+ * Marks the specified pin as being not connected, disabling it along
+ * any parent or child widgets. At present this is identical to
+ * snd_soc_dapm_disable_pin() but in future it will be extended to do
+ * additional things such as disabling controls which only affect
+ * paths through the pin.
+ *
+ * Requires external locking.
+ *
+ * NOTE: snd_soc_dapm_sync() needs to be called after this for DAPM to
+ * do any widget power switching.
+ */
+int snd_soc_dapm_nc_pin_unlocked(struct snd_soc_dapm_context *dapm,
+ const char *pin)
+{
+ return snd_soc_dapm_set_pin(dapm, pin, 0);
+}
+EXPORT_SYMBOL_GPL(snd_soc_dapm_nc_pin_unlocked);
+
+/**
* snd_soc_dapm_nc_pin - permanently disable pin.
* @dapm: DAPM context
* @pin: pin name
@@ -3845,7 +3950,15 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_disable_pin);
*/
int snd_soc_dapm_nc_pin(struct snd_soc_dapm_context *dapm, const char *pin)
{
- return snd_soc_dapm_set_pin(dapm, pin, 0);
+ int ret;
+
+ mutex_lock_nested(&dapm->card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+
+ ret = snd_soc_dapm_set_pin(dapm, pin, 0);
+
+ mutex_unlock(&dapm->card->dapm_mutex);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_nc_pin);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 47e1ce771e65..28522bd03b8e 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1989,6 +1989,7 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card)
paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list);
if (paths < 0) {
+ dpcm_path_put(&list);
dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
fe->dai_link->name, "playback");
mutex_unlock(&card->mutex);
@@ -2018,6 +2019,7 @@ capture:
paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list);
if (paths < 0) {
+ dpcm_path_put(&list);
dev_warn(fe->dev, "ASoC: %s no valid %s path\n",
fe->dai_link->name, "capture");
mutex_unlock(&card->mutex);
@@ -2082,6 +2084,7 @@ static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream)
fe->dpcm[stream].runtime = fe_substream->runtime;
if (dpcm_path_get(fe, stream, &list) <= 0) {
+ dpcm_path_put(&list);
dev_dbg(fe->dev, "ASoC: %s no valid %s route\n",
fe->dai_link->name, stream ? "capture" : "playback");
}
diff --git a/sound/soc/txx9/txx9aclc-ac97.c b/sound/soc/txx9/txx9aclc-ac97.c
index e0305a148568..9edd68db9f48 100644
--- a/sound/soc/txx9/txx9aclc-ac97.c
+++ b/sound/soc/txx9/txx9aclc-ac97.c
@@ -183,14 +183,16 @@ static int txx9aclc_ac97_dev_probe(struct platform_device *pdev)
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
+
+ drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
+ if (!drvdata)
+ return -ENOMEM;
+
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
drvdata->base = devm_ioremap_resource(&pdev->dev, r);
if (IS_ERR(drvdata->base))
return PTR_ERR(drvdata->base);
- drvdata = devm_kzalloc(&pdev->dev, sizeof(*drvdata), GFP_KERNEL);
- if (!drvdata)
- return -ENOMEM;
platform_set_drvdata(pdev, drvdata);
drvdata->physbase = r->start;
if (sizeof(drvdata->physbase) > sizeof(r->start) &&
diff --git a/sound/usb/Kconfig b/sound/usb/Kconfig
index de9408b83f75..e05a86b7c0da 100644
--- a/sound/usb/Kconfig
+++ b/sound/usb/Kconfig
@@ -14,6 +14,7 @@ config SND_USB_AUDIO
select SND_HWDEP
select SND_RAWMIDI
select SND_PCM
+ select BITREVERSE
help
Say Y here to include support for USB audio and USB MIDI
devices.
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 44b0ba4feab3..1bed780e21d9 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -883,6 +883,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
}
break;
+ case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
case USB_ID(0x046d, 0x0808):
case USB_ID(0x046d, 0x0809):
case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index 32af6b741ef5..d1d72ff50347 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -328,6 +328,11 @@ static struct usbmix_name_map gamecom780_map[] = {
{}
};
+static const struct usbmix_name_map kef_x300a_map[] = {
+ { 10, NULL }, /* firmware locks up (?) when we try to access this FU */
+ { 0 }
+};
+
/*
* Control map entries
*/
@@ -419,6 +424,10 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
.id = USB_ID(0x200c, 0x1018),
.map = ebox44_map,
},
+ {
+ .id = USB_ID(0x27ac, 0x1000),
+ .map = kef_x300a_map,
+ },
{ 0 } /* terminator */
};
diff --git a/tools/lib/lockdep/Makefile b/tools/lib/lockdep/Makefile
index da8b7aa3d351..07b0b7542511 100644
--- a/tools/lib/lockdep/Makefile
+++ b/tools/lib/lockdep/Makefile
@@ -87,8 +87,8 @@ endif # BUILD_SRC
# We process the rest of the Makefile if this is the final invocation of make
ifeq ($(skip-makefile),)
-srctree := $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR))
-objtree := $(CURDIR)
+srctree := $(realpath $(if $(BUILD_SRC),$(BUILD_SRC),$(CURDIR)))
+objtree := $(realpath $(CURDIR))
src := $(srctree)
obj := $(objtree)
@@ -112,7 +112,7 @@ export Q VERBOSE
LIBLOCKDEP_VERSION = $(LL_VERSION).$(LL_PATCHLEVEL).$(LL_EXTRAVERSION)
-INCLUDES = -I. -I/usr/local/include -I./uinclude $(CONFIG_INCLUDES)
+INCLUDES = -I. -I/usr/local/include -I./uinclude -I./include $(CONFIG_INCLUDES)
# Set compile option CFLAGS if not set elsewhere
CFLAGS ?= -g -DCONFIG_LOCKDEP -DCONFIG_STACKTRACE -DCONFIG_PROVE_LOCKING -DBITS_PER_LONG=__WORDSIZE -DLIBLOCKDEP_VERSION='"$(LIBLOCKDEP_VERSION)"' -rdynamic -O0 -g
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c
index f8465a811aa5..23bd69cb5ade 100644
--- a/tools/lib/lockdep/preload.c
+++ b/tools/lib/lockdep/preload.c
@@ -418,7 +418,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
__attribute__((constructor)) static void init_preload(void)
{
- if (__init_state != done)
+ if (__init_state == done)
return;
#ifndef __GLIBC__
diff --git a/tools/lib/lockdep/run_tests.sh b/tools/lib/lockdep/run_tests.sh
index 5334ad9d39b7..5334ad9d39b7 100644..100755
--- a/tools/lib/lockdep/run_tests.sh
+++ b/tools/lib/lockdep/run_tests.sh
diff --git a/tools/lib/lockdep/uinclude/asm/hash.h b/tools/lib/lockdep/uinclude/asm/hash.h
new file mode 100644
index 000000000000..d82b170bb216
--- /dev/null
+++ b/tools/lib/lockdep/uinclude/asm/hash.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_GENERIC_HASH_H
+#define __ASM_GENERIC_HASH_H
+
+/* Stub */
+
+#endif /* __ASM_GENERIC_HASH_H */
diff --git a/tools/lib/lockdep/uinclude/linux/rcu.h b/tools/lib/lockdep/uinclude/linux/rcu.h
index 4c99fcb5da27..042ee8e463c9 100644
--- a/tools/lib/lockdep/uinclude/linux/rcu.h
+++ b/tools/lib/lockdep/uinclude/linux/rcu.h
@@ -13,4 +13,9 @@ static inline int rcu_is_cpu_idle(void)
return 1;
}
+static inline bool rcu_is_watching(void)
+{
+ return false;
+}
+
#endif
diff --git a/tools/net/Makefile b/tools/net/Makefile
index 004cd74734b6..ee577ea03ba5 100644
--- a/tools/net/Makefile
+++ b/tools/net/Makefile
@@ -12,7 +12,7 @@ YACC = bison
all : bpf_jit_disasm bpf_dbg bpf_asm
-bpf_jit_disasm : CFLAGS = -Wall -O2
+bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm'
bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl
bpf_jit_disasm : bpf_jit_disasm.o
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index d4c83c60b9b2..97d86d828190 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -1593,6 +1593,7 @@ static void init_params(struct params *p, const char *name, int argc, const char
p->data_rand_walk = true;
p->nr_loops = -1;
p->init_random = true;
+ p->run_all = argc == 1;
}
static int run_bench_numa(const char *name, const char **argv)
diff --git a/tools/perf/builtin-bench.c b/tools/perf/builtin-bench.c
index e47f90cc7b98..8a987d252780 100644
--- a/tools/perf/builtin-bench.c
+++ b/tools/perf/builtin-bench.c
@@ -76,7 +76,7 @@ static struct collection collections[] = {
/* Iterate over all benchmarks within a collection: */
#define for_each_bench(coll, bench) \
- for (bench = coll->benchmarks; bench->name; bench++)
+ for (bench = coll->benchmarks; bench && bench->name; bench++)
static void dump_benchmarks(struct collection *coll)
{
diff --git a/tools/perf/builtin-buildid-cache.c b/tools/perf/builtin-buildid-cache.c
index cfede86161d8..b22dbb16f877 100644
--- a/tools/perf/builtin-buildid-cache.c
+++ b/tools/perf/builtin-buildid-cache.c
@@ -63,11 +63,35 @@ static int build_id_cache__kcore_dir(char *dir, size_t sz)
return 0;
}
+static bool same_kallsyms_reloc(const char *from_dir, char *to_dir)
+{
+ char from[PATH_MAX];
+ char to[PATH_MAX];
+ const char *name;
+ u64 addr1 = 0, addr2 = 0;
+ int i;
+
+ scnprintf(from, sizeof(from), "%s/kallsyms", from_dir);
+ scnprintf(to, sizeof(to), "%s/kallsyms", to_dir);
+
+ for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
+ addr1 = kallsyms__get_function_start(from, name);
+ if (addr1)
+ break;
+ }
+
+ if (name)
+ addr2 = kallsyms__get_function_start(to, name);
+
+ return addr1 == addr2;
+}
+
static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir,
size_t to_dir_sz)
{
char from[PATH_MAX];
char to[PATH_MAX];
+ char to_subdir[PATH_MAX];
struct dirent *dent;
int ret = -1;
DIR *d;
@@ -86,10 +110,11 @@ static int build_id_cache__kcore_existing(const char *from_dir, char *to_dir,
continue;
scnprintf(to, sizeof(to), "%s/%s/modules", to_dir,
dent->d_name);
- if (!compare_proc_modules(from, to)) {
- scnprintf(to, sizeof(to), "%s/%s", to_dir,
- dent->d_name);
- strlcpy(to_dir, to, to_dir_sz);
+ scnprintf(to_subdir, sizeof(to_subdir), "%s/%s",
+ to_dir, dent->d_name);
+ if (!compare_proc_modules(from, to) &&
+ same_kallsyms_reloc(from_dir, to_subdir)) {
+ strlcpy(to_dir, to_subdir, to_dir_sz);
ret = 0;
break;
}
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 3c394bf16fa8..af47531b82ec 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -287,10 +287,7 @@ static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
* have no _text sometimes.
*/
err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
- machine, "_text");
- if (err < 0)
- err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
- machine, "_stext");
+ machine);
if (err < 0)
pr_err("Couldn't record guest kernel [%d]'s reference"
" relocation symbol.\n", machine->pid);
@@ -457,10 +454,7 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
}
err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
- machine, "_text");
- if (err < 0)
- err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
- machine, "_stext");
+ machine);
if (err < 0)
pr_err("Couldn't record kernel reference relocation symbol\n"
"Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 3c53ec268fbc..02f985f3a396 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -113,14 +113,16 @@ static int report__add_mem_hist_entry(struct perf_tool *tool, struct addr_locati
if (!he)
return -ENOMEM;
- err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
- if (err)
- goto out;
+ if (ui__has_annotation()) {
+ err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
+ if (err)
+ goto out;
- mx = he->mem_info;
- err = addr_map_symbol__inc_samples(&mx->daddr, evsel->idx);
- if (err)
- goto out;
+ mx = he->mem_info;
+ err = addr_map_symbol__inc_samples(&mx->daddr, evsel->idx);
+ if (err)
+ goto out;
+ }
evsel->hists.stats.total_period += cost;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
@@ -164,14 +166,18 @@ static int report__add_branch_hist_entry(struct perf_tool *tool, struct addr_loc
he = __hists__add_entry(&evsel->hists, al, parent, &bi[i], NULL,
1, 1, 0);
if (he) {
- bx = he->branch_info;
- err = addr_map_symbol__inc_samples(&bx->from, evsel->idx);
- if (err)
- goto out;
-
- err = addr_map_symbol__inc_samples(&bx->to, evsel->idx);
- if (err)
- goto out;
+ if (ui__has_annotation()) {
+ bx = he->branch_info;
+ err = addr_map_symbol__inc_samples(&bx->from,
+ evsel->idx);
+ if (err)
+ goto out;
+
+ err = addr_map_symbol__inc_samples(&bx->to,
+ evsel->idx);
+ if (err)
+ goto out;
+ }
evsel->hists.stats.total_period += 1;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
@@ -205,7 +211,9 @@ static int report__add_hist_entry(struct perf_tool *tool, struct perf_evsel *evs
if (err)
goto out;
- err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
+ if (ui__has_annotation())
+ err = hist_entry__inc_addr_samples(he, evsel->idx, al->addr);
+
evsel->hists.stats.total_period += sample->period;
hists__inc_nr_events(&evsel->hists, PERF_RECORD_SAMPLE);
out:
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 76cd510d34d0..5f989a7d8bc2 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -176,7 +176,7 @@ static void perf_top__record_precise_ip(struct perf_top *top,
{
struct annotation *notes;
struct symbol *sym;
- int err;
+ int err = 0;
if (he == NULL || he->ms.sym == NULL ||
((top->sym_filter_entry == NULL ||
@@ -190,7 +190,9 @@ static void perf_top__record_precise_ip(struct perf_top *top,
return;
ip = he->ms.map->map_ip(he->ms.map, ip);
- err = hist_entry__inc_addr_samples(he, counter, ip);
+
+ if (ui__has_annotation())
+ err = hist_entry__inc_addr_samples(he, counter, ip);
pthread_mutex_unlock(&notes->lock);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 896f27047ed6..f954c26de231 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -37,6 +37,10 @@
# define MADV_UNMERGEABLE 13
#endif
+#ifndef EFD_SEMAPHORE
+# define EFD_SEMAPHORE 1
+#endif
+
struct tp_field {
int offset;
union {
@@ -279,6 +283,11 @@ static size_t syscall_arg__scnprintf_strarray(char *bf, size_t size,
#define SCA_STRARRAY syscall_arg__scnprintf_strarray
+#if defined(__i386__) || defined(__x86_64__)
+/*
+ * FIXME: Make this available to all arches as soon as the ioctl beautifier
+ * gets rewritten to support all arches.
+ */
static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size,
struct syscall_arg *arg)
{
@@ -286,6 +295,7 @@ static size_t syscall_arg__scnprintf_strhexarray(char *bf, size_t size,
}
#define SCA_STRHEXARRAY syscall_arg__scnprintf_strhexarray
+#endif /* defined(__i386__) || defined(__x86_64__) */
static size_t syscall_arg__scnprintf_fd(char *bf, size_t size,
struct syscall_arg *arg);
@@ -815,7 +825,6 @@ static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscal
P_SIGNUM(PIPE);
P_SIGNUM(ALRM);
P_SIGNUM(TERM);
- P_SIGNUM(STKFLT);
P_SIGNUM(CHLD);
P_SIGNUM(CONT);
P_SIGNUM(STOP);
@@ -831,6 +840,15 @@ static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscal
P_SIGNUM(IO);
P_SIGNUM(PWR);
P_SIGNUM(SYS);
+#ifdef SIGEMT
+ P_SIGNUM(EMT);
+#endif
+#ifdef SIGSTKFLT
+ P_SIGNUM(STKFLT);
+#endif
+#ifdef SIGSWI
+ P_SIGNUM(SWI);
+#endif
default: break;
}
@@ -839,6 +857,10 @@ static size_t syscall_arg__scnprintf_signum(char *bf, size_t size, struct syscal
#define SCA_SIGNUM syscall_arg__scnprintf_signum
+#if defined(__i386__) || defined(__x86_64__)
+/*
+ * FIXME: Make this available to all arches.
+ */
#define TCGETS 0x5401
static const char *tioctls[] = {
@@ -860,6 +882,7 @@ static const char *tioctls[] = {
};
static DEFINE_STRARRAY_OFFSET(tioctls, 0x5401);
+#endif /* defined(__i386__) || defined(__x86_64__) */
#define STRARRAY(arg, name, array) \
.arg_scnprintf = { [arg] = SCA_STRARRAY, }, \
@@ -941,9 +964,16 @@ static struct syscall_fmt {
{ .name = "getrlimit", .errmsg = true, STRARRAY(0, resource, rlimit_resources), },
{ .name = "ioctl", .errmsg = true,
.arg_scnprintf = { [0] = SCA_FD, /* fd */
+#if defined(__i386__) || defined(__x86_64__)
+/*
+ * FIXME: Make this available to all arches.
+ */
[1] = SCA_STRHEXARRAY, /* cmd */
[2] = SCA_HEX, /* arg */ },
.arg_parm = { [1] = &strarray__tioctls, /* cmd */ }, },
+#else
+ [2] = SCA_HEX, /* arg */ }, },
+#endif
{ .name = "kill", .errmsg = true,
.arg_scnprintf = { [1] = SCA_SIGNUM, /* sig */ }, },
{ .name = "linkat", .errmsg = true,
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index c48d44958172..0331ea2701a3 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -478,7 +478,7 @@ else
endif
ifeq ($(feature-libbfd), 1)
- EXTLIBS += -lbfd
+ EXTLIBS += -lbfd -lz -liberty
endif
ifdef NO_DEMANGLE
diff --git a/tools/perf/config/feature-checks/Makefile b/tools/perf/config/feature-checks/Makefile
index 12e551346fa6..523b7bc10553 100644
--- a/tools/perf/config/feature-checks/Makefile
+++ b/tools/perf/config/feature-checks/Makefile
@@ -121,7 +121,7 @@ test-libpython-version.bin:
$(BUILD) $(FLAGS_PYTHON_EMBED)
test-libbfd.bin:
- $(BUILD) -DPACKAGE='"perf"' -lbfd -ldl
+ $(BUILD) -DPACKAGE='"perf"' -lbfd -lz -liberty -ldl
test-liberty.bin:
$(CC) -o $(OUTPUT)$@ test-libbfd.c -DPACKAGE='"perf"' -lbfd -ldl -liberty
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index 67e5d0cace85..63a0e6f04a01 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -454,7 +454,6 @@ So to start with, in order to add HAVE_PERF_EVENTS to your Kconfig, you
will need at least this:
- asm/perf_event.h - a basic stub will suffice at first
- support for atomic64 types (and associated helper functions)
- - set_perf_event_pending() implemented
If your architecture does have hardware capabilities, you can override the
weak stub hw_perf_event_init() to register hardware counters.
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 7daa806d9050..e84fa26bc1be 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -100,8 +100,8 @@
#ifdef __aarch64__
#define mb() asm volatile("dmb ish" ::: "memory")
-#define wmb() asm volatile("dmb ishld" ::: "memory")
-#define rmb() asm volatile("dmb ishst" ::: "memory")
+#define wmb() asm volatile("dmb ishst" ::: "memory")
+#define rmb() asm volatile("dmb ishld" ::: "memory")
#define cpu_relax() asm volatile("yield" ::: "memory")
#endif
diff --git a/tools/perf/tests/vmlinux-kallsyms.c b/tools/perf/tests/vmlinux-kallsyms.c
index 2bd13edcbc17..3d9088003a5b 100644
--- a/tools/perf/tests/vmlinux-kallsyms.c
+++ b/tools/perf/tests/vmlinux-kallsyms.c
@@ -26,7 +26,6 @@ int test__vmlinux_matches_kallsyms(void)
struct map *kallsyms_map, *vmlinux_map;
struct machine kallsyms, vmlinux;
enum map_type type = MAP__FUNCTION;
- struct ref_reloc_sym ref_reloc_sym = { .name = "_stext", };
u64 mem_start, mem_end;
/*
@@ -70,14 +69,6 @@ int test__vmlinux_matches_kallsyms(void)
*/
kallsyms_map = machine__kernel_map(&kallsyms, type);
- sym = map__find_symbol_by_name(kallsyms_map, ref_reloc_sym.name, NULL);
- if (sym == NULL) {
- pr_debug("dso__find_symbol_by_name ");
- goto out;
- }
-
- ref_reloc_sym.addr = UM(sym->start);
-
/*
* Step 5:
*
@@ -89,7 +80,6 @@ int test__vmlinux_matches_kallsyms(void)
}
vmlinux_map = machine__kernel_map(&vmlinux, type);
- map__kmap(vmlinux_map)->ref_reloc_sym = &ref_reloc_sym;
/*
* Step 6:
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 469eb679fb9d..3aa555ff9d89 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -8,6 +8,8 @@
*/
#include "util.h"
+#include "ui/ui.h"
+#include "sort.h"
#include "build-id.h"
#include "color.h"
#include "cache.h"
@@ -489,7 +491,7 @@ static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
{
struct annotation *notes;
- if (sym == NULL || use_browser != 1 || !sort__has_sym)
+ if (sym == NULL)
return 0;
notes = symbol__annotation(sym);
@@ -1399,3 +1401,8 @@ int hist_entry__annotate(struct hist_entry *he, size_t privsize)
{
return symbol__annotate(he->ms.sym, he->ms.map, privsize);
}
+
+bool ui__has_annotation(void)
+{
+ return use_browser == 1 && sort__has_sym;
+}
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index b2aef59d6bb2..56ad4f5287de 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -151,6 +151,8 @@ void symbol__annotate_zero_histogram(struct symbol *sym, int evidx);
void symbol__annotate_decay_histogram(struct symbol *sym, int evidx);
void disasm__purge(struct list_head *head);
+bool ui__has_annotation(void);
+
int symbol__tty_annotate(struct symbol *sym, struct map *map,
struct perf_evsel *evsel, bool print_lines,
bool full_paths, int min_pcnt, int max_lines);
diff --git a/tools/perf/util/event.c b/tools/perf/util/event.c
index 1fc1c2f04772..b0f3ca850e9e 100644
--- a/tools/perf/util/event.c
+++ b/tools/perf/util/event.c
@@ -470,23 +470,32 @@ static int find_symbol_cb(void *arg, const char *name, char type,
return 1;
}
+u64 kallsyms__get_function_start(const char *kallsyms_filename,
+ const char *symbol_name)
+{
+ struct process_symbol_args args = { .name = symbol_name, };
+
+ if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
+ return 0;
+
+ return args.start;
+}
+
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
perf_event__handler_t process,
- struct machine *machine,
- const char *symbol_name)
+ struct machine *machine)
{
size_t size;
- const char *filename, *mmap_name;
- char path[PATH_MAX];
+ const char *mmap_name;
char name_buff[PATH_MAX];
struct map *map;
+ struct kmap *kmap;
int err;
/*
* We should get this from /sys/kernel/sections/.text, but till that is
* available use this, and after it is use this as a fallback for older
* kernels.
*/
- struct process_symbol_args args = { .name = symbol_name, };
union perf_event *event = zalloc((sizeof(event->mmap) +
machine->id_hdr_size));
if (event == NULL) {
@@ -502,30 +511,19 @@ int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
* see kernel/perf_event.c __perf_event_mmap
*/
event->header.misc = PERF_RECORD_MISC_KERNEL;
- filename = "/proc/kallsyms";
} else {
event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
- if (machine__is_default_guest(machine))
- filename = (char *) symbol_conf.default_guest_kallsyms;
- else {
- sprintf(path, "%s/proc/kallsyms", machine->root_dir);
- filename = path;
- }
- }
-
- if (kallsyms__parse(filename, &args, find_symbol_cb) <= 0) {
- free(event);
- return -ENOENT;
}
map = machine->vmlinux_maps[MAP__FUNCTION];
+ kmap = map__kmap(map);
size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
- "%s%s", mmap_name, symbol_name) + 1;
+ "%s%s", mmap_name, kmap->ref_reloc_sym->name) + 1;
size = PERF_ALIGN(size, sizeof(u64));
event->mmap.header.type = PERF_RECORD_MMAP;
event->mmap.header.size = (sizeof(event->mmap) -
(sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
- event->mmap.pgoff = args.start;
+ event->mmap.pgoff = kmap->ref_reloc_sym->addr;
event->mmap.start = map->start;
event->mmap.len = map->end - event->mmap.start;
event->mmap.pid = machine->pid;
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index faf6e219be21..851fa06f4a42 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -214,8 +214,7 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
struct machine *machine, bool mmap_data);
int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
perf_event__handler_t process,
- struct machine *machine,
- const char *symbol_name);
+ struct machine *machine);
int perf_event__synthesize_modules(struct perf_tool *tool,
perf_event__handler_t process,
@@ -279,4 +278,7 @@ size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
size_t perf_event__fprintf(union perf_event *event, FILE *fp);
+u64 kallsyms__get_function_start(const char *kallsyms_filename,
+ const char *symbol_name);
+
#endif /* __PERF_RECORD_H */
diff --git a/tools/perf/util/include/asm/hash.h b/tools/perf/util/include/asm/hash.h
new file mode 100644
index 000000000000..d82b170bb216
--- /dev/null
+++ b/tools/perf/util/include/asm/hash.h
@@ -0,0 +1,6 @@
+#ifndef __ASM_GENERIC_HASH_H
+#define __ASM_GENERIC_HASH_H
+
+/* Stub */
+
+#endif /* __ASM_GENERIC_HASH_H */
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
index 45cf10a562bd..dadfa7e54287 100644
--- a/tools/perf/util/include/linux/bitops.h
+++ b/tools/perf/util/include/linux/bitops.h
@@ -87,13 +87,15 @@ static __always_inline unsigned long __ffs(unsigned long word)
return num;
}
+typedef const unsigned long __attribute__((__may_alias__)) long_alias_t;
+
/*
* Find the first set bit in a memory region.
*/
static inline unsigned long
find_first_bit(const unsigned long *addr, unsigned long size)
{
- const unsigned long *p = addr;
+ long_alias_t *p = (long_alias_t *) addr;
unsigned long result = 0;
unsigned long tmp;
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index ded74590b92f..620a1983b76b 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -496,19 +496,22 @@ static int symbol__in_kernel(void *arg, const char *name,
return 1;
}
+static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
+ size_t bufsz)
+{
+ if (machine__is_default_guest(machine))
+ scnprintf(buf, bufsz, "%s", symbol_conf.default_guest_kallsyms);
+ else
+ scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
+}
+
/* Figure out the start address of kernel map from /proc/kallsyms */
static u64 machine__get_kernel_start_addr(struct machine *machine)
{
- const char *filename;
- char path[PATH_MAX];
+ char filename[PATH_MAX];
struct process_args args;
- if (machine__is_default_guest(machine))
- filename = (char *)symbol_conf.default_guest_kallsyms;
- else {
- sprintf(path, "%s/proc/kallsyms", machine->root_dir);
- filename = path;
- }
+ machine__get_kallsyms_filename(machine, filename, PATH_MAX);
if (symbol__restricted_filename(filename, "/proc/kallsyms"))
return 0;
@@ -829,9 +832,25 @@ static int machine__create_modules(struct machine *machine)
return 0;
}
+const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
+
int machine__create_kernel_maps(struct machine *machine)
{
struct dso *kernel = machine__get_kernel(machine);
+ char filename[PATH_MAX];
+ const char *name;
+ u64 addr = 0;
+ int i;
+
+ machine__get_kallsyms_filename(machine, filename, PATH_MAX);
+
+ for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
+ addr = kallsyms__get_function_start(filename, name);
+ if (addr)
+ break;
+ }
+ if (!addr)
+ return -1;
if (kernel == NULL ||
__machine__create_kernel_maps(machine, kernel) < 0)
@@ -850,6 +869,13 @@ int machine__create_kernel_maps(struct machine *machine)
* Now that we have all the maps created, just set the ->end of them:
*/
map_groups__fixup_end(&machine->kmaps);
+
+ if (maps__set_kallsyms_ref_reloc_sym(machine->vmlinux_maps, name,
+ addr)) {
+ machine__destroy_kernel_maps(machine);
+ return -1;
+ }
+
return 0;
}
@@ -1187,7 +1213,7 @@ static void ip__resolve_ams(struct machine *machine, struct thread *thread,
*/
thread__find_addr_location(thread, machine, m, MAP__FUNCTION,
ip, &al);
- if (al.sym)
+ if (al.map)
goto found;
}
found:
diff --git a/tools/perf/util/machine.h b/tools/perf/util/machine.h
index 477133015440..f77e91e483dc 100644
--- a/tools/perf/util/machine.h
+++ b/tools/perf/util/machine.h
@@ -18,6 +18,8 @@ union perf_event;
#define HOST_KERNEL_ID (-1)
#define DEFAULT_GUEST_KERNEL_ID (0)
+extern const char *ref_reloc_sym_names[];
+
struct machine {
struct rb_node rb_node;
pid_t pid;
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index 3b97513f0e77..39cd2d0faff6 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -39,6 +39,7 @@ void map__init(struct map *map, enum map_type type,
map->start = start;
map->end = end;
map->pgoff = pgoff;
+ map->reloc = 0;
map->dso = dso;
map->map_ip = map__map_ip;
map->unmap_ip = map__unmap_ip;
@@ -288,7 +289,7 @@ u64 map__rip_2objdump(struct map *map, u64 rip)
if (map->dso->rel)
return rip - map->pgoff;
- return map->unmap_ip(map, rip);
+ return map->unmap_ip(map, rip) - map->reloc;
}
/**
@@ -311,7 +312,7 @@ u64 map__objdump_2mem(struct map *map, u64 ip)
if (map->dso->rel)
return map->unmap_ip(map, ip + map->pgoff);
- return ip;
+ return ip + map->reloc;
}
void map_groups__init(struct map_groups *mg)
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 18068c6b71c1..257e513205ce 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -36,6 +36,7 @@ struct map {
bool erange_warned;
u32 priv;
u64 pgoff;
+ u64 reloc;
u32 maj, min; /* only valid for MMAP2 record */
u64 ino; /* only valid for MMAP2 record */
u64 ino_generation;/* only valid for MMAP2 record */
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index d248fca6d7ed..1e15df10a88c 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1091,12 +1091,12 @@ int is_valid_tracepoint(const char *event_string)
static bool is_event_supported(u8 type, unsigned config)
{
bool ret = true;
+ int open_return;
struct perf_evsel *evsel;
struct perf_event_attr attr = {
.type = type,
.config = config,
.disabled = 1,
- .exclude_kernel = 1,
};
struct {
struct thread_map map;
@@ -1108,7 +1108,20 @@ static bool is_event_supported(u8 type, unsigned config)
evsel = perf_evsel__new(&attr);
if (evsel) {
- ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0;
+ open_return = perf_evsel__open(evsel, NULL, &tmap.map);
+ ret = open_return >= 0;
+
+ if (open_return == -EACCES) {
+ /*
+ * This happens if the paranoid value
+ * /proc/sys/kernel/perf_event_paranoid is set to 2
+ * Re-run with exclude_kernel set; we don't do that
+ * by default as some ARM machines do not support it.
+ *
+ */
+ evsel->attr.exclude_kernel = 1;
+ ret = perf_evsel__open(evsel, NULL, &tmap.map) >= 0;
+ }
perf_evsel__delete(evsel);
}
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index a8a9b6cd93a8..d8b048c20cde 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -336,8 +336,8 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
return ret;
for (i = 0; i < ntevs && ret >= 0; i++) {
+ /* point.address is the addres of point.symbol + point.offset */
offset = tevs[i].point.address - stext;
- offset += tevs[i].point.offset;
tevs[i].point.offset = 0;
zfree(&tevs[i].point.symbol);
ret = e_snprintf(buf, 32, "0x%lx", offset);
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 0b39a48e5110..5da6ce74c676 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -1008,6 +1008,12 @@ static int perf_session__process_user_event(struct perf_session *session, union
if (err == 0)
perf_session__set_id_hdr_size(session);
return err;
+ case PERF_RECORD_HEADER_EVENT_TYPE:
+ /*
+ * Depreceated, but we need to handle it for sake
+ * of old data files create in pipe mode.
+ */
+ return 0;
case PERF_RECORD_HEADER_TRACING_DATA:
/* setup for reading amidst mmap */
lseek(fd, file_offset, SEEK_SET);
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 759456728703..516d19fb999b 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -151,15 +151,15 @@ Elf_Scn *elf_section_by_name(Elf *elf, GElf_Ehdr *ep,
gelf_getshdr(sec, shp);
str = elf_strptr(elf, ep->e_shstrndx, shp->sh_name);
- if (!strcmp(name, str)) {
+ if (str && !strcmp(name, str)) {
if (idx)
*idx = cnt;
- break;
+ return sec;
}
++cnt;
}
- return sec;
+ return NULL;
}
#define elf_section__for_each_rel(reldata, pos, pos_mem, idx, nr_entries) \
@@ -751,6 +751,8 @@ int dso__load_sym(struct dso *dso, struct map *map,
if (strcmp(elf_name, kmap->ref_reloc_sym->name))
continue;
kmap->ref_reloc_sym->unrelocated_addr = sym.st_value;
+ map->reloc = kmap->ref_reloc_sym->addr -
+ kmap->ref_reloc_sym->unrelocated_addr;
break;
}
}
@@ -922,6 +924,7 @@ int dso__load_sym(struct dso *dso, struct map *map,
(u64)shdr.sh_offset);
sym.st_value -= shdr.sh_addr - shdr.sh_offset;
}
+new_symbol:
/*
* We need to figure out if the object was created from C++ sources
* DWARF DW_compile_unit has this, but we don't always have access
@@ -933,7 +936,6 @@ int dso__load_sym(struct dso *dso, struct map *map,
if (demangled != NULL)
elf_name = demangled;
}
-new_symbol:
f = symbol__new(sym.st_value, sym.st_size,
GELF_ST_BIND(sym.st_info), elf_name);
free(demangled);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 39ce9adbaaf0..e89afc097d8a 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -627,7 +627,7 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
* kernel range is broken in several maps, named [kernel].N, as we don't have
* the original ELF section names vmlinux have.
*/
-static int dso__split_kallsyms(struct dso *dso, struct map *map,
+static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
symbol_filter_t filter)
{
struct map_groups *kmaps = map__kmap(map)->kmaps;
@@ -692,6 +692,12 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map,
char dso_name[PATH_MAX];
struct dso *ndso;
+ if (delta) {
+ /* Kernel was relocated at boot time */
+ pos->start -= delta;
+ pos->end -= delta;
+ }
+
if (count == 0) {
curr_map = map;
goto filter_symbol;
@@ -721,6 +727,10 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map,
curr_map->map_ip = curr_map->unmap_ip = identity__map_ip;
map_groups__insert(kmaps, curr_map);
++kernel_range;
+ } else if (delta) {
+ /* Kernel was relocated at boot time */
+ pos->start -= delta;
+ pos->end -= delta;
}
filter_symbol:
if (filter && filter(curr_map, pos)) {
@@ -976,6 +986,23 @@ static int validate_kcore_modules(const char *kallsyms_filename,
return 0;
}
+static int validate_kcore_addresses(const char *kallsyms_filename,
+ struct map *map)
+{
+ struct kmap *kmap = map__kmap(map);
+
+ if (kmap->ref_reloc_sym && kmap->ref_reloc_sym->name) {
+ u64 start;
+
+ start = kallsyms__get_function_start(kallsyms_filename,
+ kmap->ref_reloc_sym->name);
+ if (start != kmap->ref_reloc_sym->addr)
+ return -EINVAL;
+ }
+
+ return validate_kcore_modules(kallsyms_filename, map);
+}
+
struct kcore_mapfn_data {
struct dso *dso;
enum map_type type;
@@ -1019,8 +1046,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
kallsyms_filename))
return -EINVAL;
- /* All modules must be present at their original addresses */
- if (validate_kcore_modules(kallsyms_filename, map))
+ /* Modules and kernel must be present at their original addresses */
+ if (validate_kcore_addresses(kallsyms_filename, map))
return -EINVAL;
md.dso = dso;
@@ -1113,15 +1140,41 @@ out_err:
return -EINVAL;
}
+/*
+ * If the kernel is relocated at boot time, kallsyms won't match. Compute the
+ * delta based on the relocation reference symbol.
+ */
+static int kallsyms__delta(struct map *map, const char *filename, u64 *delta)
+{
+ struct kmap *kmap = map__kmap(map);
+ u64 addr;
+
+ if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->name)
+ return 0;
+
+ addr = kallsyms__get_function_start(filename,
+ kmap->ref_reloc_sym->name);
+ if (!addr)
+ return -1;
+
+ *delta = addr - kmap->ref_reloc_sym->addr;
+ return 0;
+}
+
int dso__load_kallsyms(struct dso *dso, const char *filename,
struct map *map, symbol_filter_t filter)
{
+ u64 delta = 0;
+
if (symbol__restricted_filename(filename, "/proc/kallsyms"))
return -1;
if (dso__load_all_kallsyms(dso, filename, map) < 0)
return -1;
+ if (kallsyms__delta(map, filename, &delta))
+ return -1;
+
symbols__fixup_duplicate(&dso->symbols[map->type]);
symbols__fixup_end(&dso->symbols[map->type]);
@@ -1133,7 +1186,7 @@ int dso__load_kallsyms(struct dso *dso, const char *filename,
if (!dso__load_kcore(dso, map, filename))
return dso__split_kallsyms_for_kcore(dso, map, filter);
else
- return dso__split_kallsyms(dso, map, filter);
+ return dso__split_kallsyms(dso, map, delta, filter);
}
static int dso__load_perf_map(struct dso *dso, struct map *map,
@@ -1283,6 +1336,8 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
if (syms_ss && runtime_ss)
break;
+ } else {
+ symsrc__destroy(ss);
}
}
@@ -1424,7 +1479,7 @@ static int find_matching_kcore(struct map *map, char *dir, size_t dir_sz)
continue;
scnprintf(kallsyms_filename, sizeof(kallsyms_filename),
"%s/%s/kallsyms", dir, dent->d_name);
- if (!validate_kcore_modules(kallsyms_filename, map)) {
+ if (!validate_kcore_addresses(kallsyms_filename, map)) {
strlcpy(dir, kallsyms_filename, dir_sz);
ret = 0;
break;
@@ -1479,7 +1534,7 @@ static char *dso__find_kallsyms(struct dso *dso, struct map *map)
if (fd != -1) {
close(fd);
/* If module maps match go with /proc/kallsyms */
- if (!validate_kcore_modules("/proc/kallsyms", map))
+ if (!validate_kcore_addresses("/proc/kallsyms", map))
goto proc_kallsyms;
}
diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c
index d66418237d21..aa290c0de6f5 100644
--- a/tools/testing/selftests/ipc/msgque.c
+++ b/tools/testing/selftests/ipc/msgque.c
@@ -201,6 +201,7 @@ int main(int argc, char **argv)
msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666);
if (msgque.msq_id == -1) {
+ err = -errno;
printf("Can't create queue\n");
goto err_out;
}
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index be456ce264d0..8ca405cd7c1a 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -24,6 +24,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
+#include <linux/uaccess.h>
#include <linux/irqchip/arm-gic.h>
diff --git a/virt/kvm/coalesced_mmio.c b/virt/kvm/coalesced_mmio.c
index 88b2fe3ddf42..00d86427af0f 100644
--- a/virt/kvm/coalesced_mmio.c
+++ b/virt/kvm/coalesced_mmio.c
@@ -154,17 +154,13 @@ int kvm_vm_ioctl_register_coalesced_mmio(struct kvm *kvm,
list_add_tail(&dev->list, &kvm->coalesced_zones);
mutex_unlock(&kvm->slots_lock);
- return ret;
+ return 0;
out_free_dev:
mutex_unlock(&kvm->slots_lock);
-
kfree(dev);
- if (dev == NULL)
- return -ENXIO;
-
- return 0;
+ return ret;
}
int kvm_vm_ioctl_unregister_coalesced_mmio(struct kvm *kvm,