summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap1
-rw-r--r--Documentation/ABI/testing/sysfs-class-mei15
-rw-r--r--Documentation/ABI/testing/sysfs-platform-dell-laptop60
-rw-r--r--Documentation/devicetree/bindings/arm/arm-boards2
-rw-r--r--Documentation/devicetree/bindings/arm/fw-cfg.txt72
-rw-r--r--Documentation/devicetree/bindings/graph.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-st.txt2
-rw-r--r--Documentation/devicetree/bindings/i2c/trivial-devices.txt1
-rw-r--r--Documentation/devicetree/bindings/input/gpio-keys.txt10
-rw-r--r--Documentation/devicetree/bindings/input/stmpe-keypad.txt2
-rw-r--r--Documentation/devicetree/bindings/net/davinci_emac.txt3
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt4
-rw-r--r--Documentation/kernel-parameters.txt1
-rw-r--r--Documentation/networking/ip-sysctl.txt2
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py49
-rw-r--r--Documentation/thermal/cpu-cooling-api.txt15
-rw-r--r--MAINTAINERS151
-rw-r--r--Makefile3
-rw-r--r--arch/alpha/kernel/pci.c8
-rw-r--r--arch/alpha/mm/fault.c2
-rw-r--r--arch/arc/mm/fault.c2
-rw-r--r--arch/arm/boot/dts/armada-370-db.dts24
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi2
-rw-r--r--arch/arm/boot/dts/berlin2q-marvell-dmp.dts2
-rw-r--r--arch/arm/boot/dts/berlin2q.dtsi63
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts10
-rw-r--r--arch/arm/boot/dts/dra7.dtsi6
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5420-arndale-octa.dts4
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi6
-rw-r--r--arch/arm/boot/dts/imx25.dtsi10
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts22
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dts15
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts4
-rw-r--r--arch/arm/boot/dts/rk3288-evb.dtsi30
-rw-r--r--arch/arm/boot/dts/sama5d3xmb.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d4.dtsi2
-rw-r--r--arch/arm/boot/dts/ste-nomadik-nhk15.dts8
-rw-r--r--arch/arm/boot/dts/sun4i-a10.dtsi20
-rw-r--r--arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts6
-rw-r--r--arch/arm/boot/dts/sun5i-a10s.dtsi8
-rw-r--r--arch/arm/boot/dts/sun5i-a13-hsg-h702.dts4
-rw-r--r--arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts4
-rw-r--r--arch/arm/boot/dts/sun5i-a13-olinuxino.dts4
-rw-r--r--arch/arm/boot/dts/sun5i-a13.dtsi9
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi6
-rw-r--r--arch/arm/boot/dts/sun7i-a20-bananapi.dts6
-rw-r--r--arch/arm/boot/dts/sun7i-a20-hummingbird.dts8
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts3
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi8
-rw-r--r--arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts4
-rw-r--r--arch/arm/boot/dts/sun8i-a23.dtsi9
-rw-r--r--arch/arm/boot/dts/sun9i-a80-optimus.dts5
-rw-r--r--arch/arm/boot/dts/sun9i-a80.dtsi10
-rw-r--r--arch/arm/boot/dts/tegra20-seaboard.dts2
-rw-r--r--arch/arm/boot/dts/vf610-twr.dts15
-rw-r--r--arch/arm/configs/exynos_defconfig18
-rw-r--r--arch/arm/configs/multi_v7_defconfig1
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/include/asm/kvm_emulate.h10
-rw-r--r--arch/arm/include/asm/kvm_host.h3
-rw-r--r--arch/arm/include/asm/kvm_mmu.h77
-rw-r--r--arch/arm/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm/kernel/calls.S1
-rw-r--r--arch/arm/kernel/entry-header.S13
-rw-r--r--arch/arm/kernel/perf_event.c10
-rw-r--r--arch/arm/kernel/perf_regs.c8
-rw-r--r--arch/arm/kernel/setup.c16
-rw-r--r--arch/arm/kernel/smp.c12
-rw-r--r--arch/arm/kvm/arm.c10
-rw-r--r--arch/arm/kvm/coproc.c70
-rw-r--r--arch/arm/kvm/coproc.h6
-rw-r--r--arch/arm/kvm/coproc_a15.c2
-rw-r--r--arch/arm/kvm/coproc_a7.c2
-rw-r--r--arch/arm/kvm/mmu.c164
-rw-r--r--arch/arm/kvm/trace.h39
-rw-r--r--arch/arm/mach-at91/board-dt-sama5.c18
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c2
-rw-r--r--arch/arm/mach-imx/clk-imx6sx.c3
-rw-r--r--arch/arm/mach-mvebu/coherency.c14
-rw-r--r--arch/arm/mach-omap2/board-generic.c18
-rw-r--r--arch/arm/mach-omap2/common.h2
-rw-r--r--arch/arm/mach-omap2/control.h4
-rw-r--r--arch/arm/mach-omap2/omap-headsmp.S21
-rw-r--r--arch/arm/mach-omap2/omap-smp.c13
-rw-r--r--arch/arm/mach-omap2/omap4-common.c32
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c10
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.h1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c5
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c1
-rw-r--r--arch/arm/mach-omap2/prcm-common.h1
-rw-r--r--arch/arm/mach-omap2/prm44xx.c5
-rw-r--r--arch/arm/mach-omap2/prm_common.c14
-rw-r--r--arch/arm/mach-omap2/timer.c44
-rw-r--r--arch/arm/mach-omap2/twl-common.c7
-rw-r--r--arch/arm/mach-rockchip/rockchip.c27
-rw-r--r--arch/arm/mach-shmobile/board-ape6evm.c20
-rw-r--r--arch/arm/mach-shmobile/board-lager.c13
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7740.c7
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7778.c9
-rw-r--r--arch/arm/mach-shmobile/setup-r8a7779.c9
-rw-r--r--arch/arm/mach-shmobile/setup-rcar-gen2.c2
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c3
-rw-r--r--arch/arm/mach-shmobile/timer.c12
-rw-r--r--arch/arm/mm/dma-mapping.c53
-rw-r--r--arch/arm/mm/dump.c9
-rw-r--r--arch/arm/mm/init.c4
-rw-r--r--arch/arm/mm/mmu.c4
-rw-r--r--arch/arm64/Makefile1
-rw-r--r--arch/arm64/boot/dts/Makefile2
-rw-r--r--arch/arm64/boot/dts/arm/juno.dts2
-rw-r--r--arch/arm64/configs/defconfig9
-rw-r--r--arch/arm64/include/asm/arch_timer.h1
-rw-r--r--arch/arm64/include/asm/cpu.h5
-rw-r--r--arch/arm64/include/asm/dma-mapping.h11
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h12
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h34
-rw-r--r--arch/arm64/include/asm/pgtable.h5
-rw-r--r--arch/arm64/include/asm/processor.h4
-rw-r--r--arch/arm64/include/asm/unistd.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h2
-rw-r--r--arch/arm64/kernel/cpuinfo.c10
-rw-r--r--arch/arm64/kernel/efi.c2
-rw-r--r--arch/arm64/kernel/module.c1
-rw-r--r--arch/arm64/kernel/perf_regs.c8
-rw-r--r--arch/arm64/kernel/setup.c1
-rw-r--r--arch/arm64/kernel/smp_spin_table.c1
-rw-r--r--arch/arm64/kernel/suspend.c14
-rw-r--r--arch/arm64/kvm/hyp.S1
-rw-r--r--arch/arm64/kvm/reset.c1
-rw-r--r--arch/arm64/kvm/sys_regs.c75
-rw-r--r--arch/arm64/mm/dump.c1
-rw-r--r--arch/arm64/mm/init.c8
-rw-r--r--arch/avr32/kernel/module.c13
-rw-r--r--arch/avr32/mm/fault.c2
-rw-r--r--arch/blackfin/mach-bf533/boards/stamp.c1
-rw-r--r--arch/cris/arch-v32/drivers/sync_serial.c2
-rw-r--r--arch/cris/kernel/module.c2
-rw-r--r--arch/cris/mm/fault.c2
-rw-r--r--arch/frv/mb93090-mb00/pci-frv.c2
-rw-r--r--arch/frv/mm/fault.c2
-rw-r--r--arch/ia64/include/asm/unistd.h2
-rw-r--r--arch/ia64/include/uapi/asm/unistd.h1
-rw-r--r--arch/ia64/kernel/acpi.c9
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/ia64/kernel/module.c6
-rw-r--r--arch/ia64/mm/fault.c2
-rw-r--r--arch/ia64/pci/pci.c48
-rw-r--r--arch/m32r/mm/fault.c2
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/m68k/mm/fault.c2
-rw-r--r--arch/metag/mm/fault.c2
-rw-r--r--arch/microblaze/mm/fault.c2
-rw-r--r--arch/microblaze/pci/pci-common.c13
-rw-r--r--arch/mips/mm/fault.c2
-rw-r--r--arch/mips/net/bpf_jit.c2
-rw-r--r--arch/mn10300/mm/fault.c2
-rw-r--r--arch/mn10300/unit-asb2305/pci-asb2305.c2
-rw-r--r--arch/mn10300/unit-asb2305/pci.c47
-rw-r--r--arch/nios2/kernel/cpuinfo.c1
-rw-r--r--arch/nios2/kernel/entry.S20
-rw-r--r--arch/nios2/kernel/module.c2
-rw-r--r--arch/nios2/kernel/signal.c2
-rw-r--r--arch/nios2/mm/fault.c2
-rw-r--r--arch/openrisc/mm/fault.c2
-rw-r--r--arch/parisc/include/asm/ldcw.h13
-rw-r--r--arch/parisc/kernel/module.c6
-rw-r--r--arch/parisc/mm/fault.c2
-rw-r--r--arch/powerpc/crypto/sha1.c1
-rw-r--r--arch/powerpc/include/asm/kexec.h10
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/thread_info.h13
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/machine_kexec_64.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c12
-rw-r--r--arch/powerpc/kernel/smp.c9
-rw-r--r--arch/powerpc/mm/copro_fault.c2
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S1
-rw-r--r--arch/powerpc/platforms/powernv/setup.c2
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c8
-rw-r--r--arch/powerpc/xmon/xmon.c1
-rw-r--r--arch/s390/hypfs/hypfs_vm.c2
-rw-r--r--arch/s390/include/asm/irqflags.h2
-rw-r--r--arch/s390/include/asm/timex.h10
-rw-r--r--arch/s390/include/uapi/asm/unistd.h3
-rw-r--r--arch/s390/kernel/module.c10
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/kernel/uprobes.c69
-rw-r--r--arch/s390/kernel/vtime.c2
-rw-r--r--arch/s390/mm/fault.c6
-rw-r--r--arch/s390/mm/pgtable.c5
-rw-r--r--arch/s390/net/bpf_jit.S28
-rw-r--r--arch/s390/net/bpf_jit_comp.c17
-rw-r--r--arch/score/mm/fault.c2
-rw-r--r--arch/sh/mm/fault.c2
-rw-r--r--arch/sparc/kernel/pci.c5
-rw-r--r--arch/sparc/mm/fault_32.c2
-rw-r--r--arch/sparc/mm/fault_64.c2
-rw-r--r--arch/sparc/net/bpf_jit_comp.c4
-rw-r--r--arch/tile/kernel/module.c4
-rw-r--r--arch/tile/mm/fault.c2
-rw-r--r--arch/um/Kconfig.common1
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/x86/Kconfig6
-rw-r--r--arch/x86/boot/Makefile1
-rw-r--r--arch/x86/boot/compressed/Makefile2
-rw-r--r--arch/x86/boot/compressed/misc.c9
-rw-r--r--arch/x86/crypto/Makefile2
-rw-r--r--arch/x86/crypto/aes_ctrby8_avx-x86_64.S46
-rw-r--r--arch/x86/crypto/sha-mb/sha1_mb.c2
-rw-r--r--arch/x86/include/asm/acpi.h1
-rw-r--r--arch/x86/include/asm/desc.h20
-rw-r--r--arch/x86/include/asm/mmu_context.h20
-rw-r--r--arch/x86/include/asm/vgtod.h6
-rw-r--r--arch/x86/kernel/acpi/boot.c35
-rw-r--r--arch/x86/kernel/cpu/Makefile1
-rw-r--r--arch/x86/kernel/cpu/mkcapflags.sh2
-rw-r--r--arch/x86/kernel/cpu/mshyperv.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_rapl.c46
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.c9
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore.h20
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c17
-rw-r--r--arch/x86/kernel/ftrace.c2
-rw-r--r--arch/x86/kernel/irq.c2
-rw-r--r--arch/x86/kernel/kprobes/core.c20
-rw-r--r--arch/x86/kernel/perf_regs.c90
-rw-r--r--arch/x86/kernel/tls.c25
-rw-r--r--arch/x86/kernel/tsc.c2
-rw-r--r--arch/x86/kvm/emulate.c31
-rw-r--r--arch/x86/kvm/lapic.c3
-rw-r--r--arch/x86/kvm/mmu.c2
-rw-r--r--arch/x86/kvm/vmx.c88
-rw-r--r--arch/x86/lib/insn.c2
-rw-r--r--arch/x86/mm/fault.c2
-rw-r--r--arch/x86/mm/init.c41
-rw-r--r--arch/x86/mm/mpx.c6
-rw-r--r--arch/x86/mm/pat.c7
-rw-r--r--arch/x86/pci/i386.c2
-rw-r--r--arch/x86/pci/xen.c49
-rw-r--r--arch/x86/tools/calc_run_size.pl39
-rw-r--r--arch/x86/tools/calc_run_size.sh42
-rw-r--r--arch/x86/um/sys_call_table_32.c2
-rw-r--r--arch/x86/um/sys_call_table_64.c2
-rw-r--r--arch/x86/vdso/vma.c45
-rw-r--r--arch/x86/xen/enlighten.c22
-rw-r--r--arch/x86/xen/p2m.c20
-rw-r--r--arch/x86/xen/setup.c42
-rw-r--r--arch/x86/xen/time.c18
-rw-r--r--arch/xtensa/mm/fault.c2
-rw-r--r--block/blk-core.c21
-rw-r--r--block/blk-mq-sysfs.c25
-rw-r--r--block/blk-mq-tag.c14
-rw-r--r--block/blk-mq-tag.h1
-rw-r--r--block/blk-mq.c81
-rw-r--r--block/blk-mq.h1
-rw-r--r--block/blk-timeout.c3
-rw-r--r--crypto/aes_generic.c1
-rw-r--r--crypto/af_alg.c3
-rw-r--r--crypto/ansi_cprng.c1
-rw-r--r--crypto/blowfish_generic.c1
-rw-r--r--crypto/camellia_generic.c1
-rw-r--r--crypto/cast5_generic.c1
-rw-r--r--crypto/cast6_generic.c1
-rw-r--r--crypto/crc32c_generic.c1
-rw-r--r--crypto/crct10dif_generic.c1
-rw-r--r--crypto/des_generic.c7
-rw-r--r--crypto/ghash-generic.c1
-rw-r--r--crypto/krng.c1
-rw-r--r--crypto/salsa20_generic.c1
-rw-r--r--crypto/serpent_generic.c1
-rw-r--r--crypto/sha1_generic.c1
-rw-r--r--crypto/sha256_generic.c2
-rw-r--r--crypto/sha512_generic.c2
-rw-r--r--crypto/tea.c1
-rw-r--r--crypto/tgr192.c1
-rw-r--r--crypto/twofish_generic.c1
-rw-r--r--crypto/wp512.c1
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/Makefile6
-rw-r--r--drivers/acpi/acpi_processor.c25
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/acpi/int340x_thermal.c11
-rw-r--r--drivers/acpi/pci_irq.c1
-rw-r--r--drivers/acpi/processor_core.c56
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/acpi/scan.c13
-rw-r--r--drivers/acpi/video.c27
-rw-r--r--drivers/ata/Kconfig1
-rw-r--r--drivers/ata/ahci.c1
-rw-r--r--drivers/ata/ahci_xgene.c14
-rw-r--r--drivers/ata/libahci.c2
-rw-r--r--drivers/ata/libata-core.c36
-rw-r--r--drivers/ata/libata-eh.c1
-rw-r--r--drivers/ata/libata-scsi.c10
-rw-r--r--drivers/ata/libata-sff.c12
-rw-r--r--drivers/ata/sata_dwc_460ex.c26
-rw-r--r--drivers/ata/sata_sil24.c2
-rw-r--r--drivers/base/power/domain.c3
-rw-r--r--drivers/base/power/opp.c39
-rw-r--r--drivers/block/null_blk.c2
-rw-r--r--drivers/block/nvme-core.c177
-rw-r--r--drivers/block/rbd.c25
-rw-r--r--drivers/block/virtio_blk.c2
-rw-r--r--drivers/bus/arm-cci.c3
-rw-r--r--drivers/bus/mvebu-mbus.c13
-rw-r--r--drivers/char/agp/ali-agp.c2
-rw-r--r--drivers/char/agp/amd64-agp.c2
-rw-r--r--drivers/char/agp/ati-agp.c2
-rw-r--r--drivers/char/agp/backend.c2
-rw-r--r--drivers/char/agp/intel-agp.c2
-rw-r--r--drivers/char/agp/intel-gtt.c2
-rw-r--r--drivers/char/agp/nvidia-agp.c2
-rw-r--r--drivers/char/agp/via-agp.c2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c46
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c4
-rw-r--r--drivers/clk/at91/clk-slow.c27
-rw-r--r--drivers/clk/berlin/bg2q.c1
-rw-r--r--drivers/clk/clk-ppc-corenet.c2
-rw-r--r--drivers/clk/clk.c2
-rw-r--r--drivers/clk/rockchip/clk-cpu.c10
-rw-r--r--drivers/clk/rockchip/clk-rk3188.c27
-rw-r--r--drivers/clk/rockchip/clk-rk3288.c28
-rw-r--r--drivers/clocksource/arm_arch_timer.c2
-rw-r--r--drivers/clocksource/bcm_kona_timer.c9
-rw-r--r--drivers/clocksource/exynos_mct.c4
-rw-r--r--drivers/clocksource/sh_tmu.c2
-rw-r--r--drivers/cpufreq/cpufreq-dt.c11
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cpuidle/governors/ladder.c7
-rw-r--r--drivers/cpuidle/governors/menu.c25
-rw-r--r--drivers/dma/dw/core.c2
-rw-r--r--drivers/dma/dw/platform.c5
-rw-r--r--drivers/gpio/gpio-crystalcove.c2
-rw-r--r--drivers/gpio/gpio-dln2.c156
-rw-r--r--drivers/gpio/gpio-grgpio.c3
-rw-r--r--drivers/gpio/gpiolib-of.c10
-rw-r--r--drivers/gpio/gpiolib-sysfs.c92
-rw-r--r--drivers/gpio/gpiolib.c58
-rw-r--r--drivers/gpio/gpiolib.h1
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/Makefile3
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c320
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c106
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c176
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_pasid.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h50
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c12
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c10
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h17
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c38
-rw-r--r--drivers/gpu/drm/drm_irq.c60
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c12
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c11
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c52
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c20
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h10
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c64
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c48
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c44
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_display.c10
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c2
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c42
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c3
-rw-r--r--drivers/gpu/drm/i915/intel_runtime_pm.c27
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c53
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c11
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.c9
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h2
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c69
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c3
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/core/event.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/core/notify.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nve0.c33
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c65
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c4
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c9
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c38
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/cik.c11
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c11
-rw-r--r--drivers/gpu/drm/radeon/cikd.h2
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c2
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c10
-rw-r--r--drivers/gpu/drm/radeon/ni.c10
-rw-r--r--drivers/gpu/drm/radeon/ni_dma.c7
-rw-r--r--drivers/gpu/drm/radeon/nid.h24
-rw-r--r--drivers/gpu/drm/radeon/r100.c10
-rw-r--r--drivers/gpu/drm/radeon/r300.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c42
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c54
-rw-r--r--drivers/gpu/drm/radeon/radeon_gem.c28
-rw-r--r--drivers/gpu/drm/radeon/radeon_kfd.c78
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c33
-rw-r--r--drivers/gpu/drm/radeon/radeon_state.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c6
-rw-r--r--drivers/gpu/drm/radeon/rs400.c14
-rw-r--r--drivers/gpu/drm/radeon/rs600.c14
-rw-r--r--drivers/gpu/drm/radeon/si.c10
-rw-r--r--drivers/gpu/drm/radeon/si_dma.c9
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c39
-rw-r--r--drivers/gpu/drm/radeon/sid.h18
-rw-r--r--drivers/gpu/drm/tegra/dc.c48
-rw-r--r--drivers/gpu/drm/tegra/drm.c16
-rw-r--r--drivers/gpu/drm/tegra/gem.c52
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fence.c18
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c36
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c8
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_irq.c25
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c2
-rw-r--r--drivers/hid/Kconfig3
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-ids.h1
-rw-r--r--drivers/hid/hid-input.c3
-rw-r--r--drivers/hid/hid-kye.c4
-rw-r--r--drivers/hid/hid-logitech-dj.c16
-rw-r--r--drivers/hid/hid-logitech-hidpp.c41
-rw-r--r--drivers/hid/hid-roccat-pyra.c8
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c5
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hwmon/Kconfig10
-rw-r--r--drivers/hwmon/Makefile1
-rw-r--r--drivers/hwmon/i5500_temp.c149
-rw-r--r--drivers/i2c/busses/Kconfig1
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c23
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c12
-rw-r--r--drivers/i2c/i2c-core.c2
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c4
-rw-r--r--drivers/iio/adc/ad799x.c15
-rw-r--r--drivers/iio/inkern.c3
-rw-r--r--drivers/infiniband/hw/mlx4/main.c3
-rw-r--r--drivers/input/evdev.c60
-rw-r--r--drivers/input/input.c22
-rw-r--r--drivers/input/keyboard/Kconfig1
-rw-r--r--drivers/input/keyboard/gpio_keys.c114
-rw-r--r--drivers/input/keyboard/hil_kbd.c6
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c141
-rw-r--r--drivers/input/mouse/alps.c84
-rw-r--r--drivers/input/mouse/elantech.c18
-rw-r--r--drivers/input/mouse/synaptics.c7
-rw-r--r--drivers/input/mouse/trackpoint.c4
-rw-r--r--drivers/input/mouse/trackpoint.h5
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h47
-rw-r--r--drivers/input/serio/i8042.c14
-rw-r--r--drivers/input/touchscreen/atmel_mxt_ts.c99
-rw-r--r--drivers/input/touchscreen/edt-ft5x06.c4
-rw-r--r--drivers/iommu/intel-iommu.c12
-rw-r--r--drivers/iommu/ipmmu-vmsa.c6
-rw-r--r--drivers/iommu/rockchip-iommu.c1
-rw-r--r--drivers/iommu/tegra-gart.c3
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c4
-rw-r--r--drivers/irqchip/irq-gic-v3-its.c2
-rw-r--r--drivers/irqchip/irq-hip04.c2
-rw-r--r--drivers/irqchip/irq-mtk-sysirq.c4
-rw-r--r--drivers/irqchip/irq-omap-intc.c26
-rw-r--r--drivers/isdn/hardware/eicon/message.c2
-rw-r--r--drivers/leds/leds-netxbig.c12
-rw-r--r--drivers/mcb/mcb-internal.h1
-rw-r--r--drivers/mcb/mcb-pci.c27
-rw-r--r--drivers/md/dm-cache-metadata.c104
-rw-r--r--drivers/md/dm-cache-target.c89
-rw-r--r--drivers/md/dm-thin.c35
-rw-r--r--drivers/md/dm.c11
-rw-r--r--drivers/media/pci/cx23885/cx23885-cards.c23
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c4
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c11
-rw-r--r--drivers/media/pci/cx23885/cx23885.h1
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c7
-rw-r--r--drivers/media/platform/soc_camera/atmel-isi.c5
-rw-r--r--drivers/media/platform/soc_camera/mx2_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/mx3_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/omap1_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/pxa_camera.c3
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c4
-rw-r--r--drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c4
-rw-r--r--drivers/media/usb/dvb-usb/cxusb.c2
-rw-r--r--drivers/media/usb/pvrusb2/pvrusb2-v4l2.c24
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c19
-rw-r--r--drivers/mfd/da9052-core.c3
-rw-r--r--drivers/mfd/rtsx_usb.c12
-rw-r--r--drivers/mfd/stmpe.c4
-rw-r--r--drivers/mfd/stmpe.h3
-rw-r--r--drivers/mfd/tps65218.c12
-rw-r--r--drivers/misc/cxl/context.c82
-rw-r--r--drivers/misc/cxl/file.c14
-rw-r--r--drivers/misc/mei/hw-me.c12
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/host/sdhci-acpi.c2
-rw-r--r--drivers/mmc/host/sdhci-pci.c25
-rw-r--r--drivers/mmc/host/sdhci-pci.h3
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c15
-rw-r--r--drivers/mmc/host/sdhci.c80
-rw-r--r--drivers/net/bonding/bond_main.c2
-rw-r--r--drivers/net/caif/caif_virtio.c2
-rw-r--r--drivers/net/can/c_can/c_can.c3
-rw-r--r--drivers/net/can/c_can/c_can_platform.c29
-rw-r--r--drivers/net/can/dev.c8
-rw-r--r--drivers/net/can/m_can/m_can.c5
-rw-r--r--drivers/net/can/usb/kvaser_usb.c57
-rw-r--r--drivers/net/ethernet/8390/ne2k-pci.c6
-rw-r--r--drivers/net/ethernet/Kconfig12
-rw-r--r--drivers/net/ethernet/Makefile1
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c4
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c15
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h9
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c4
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c24
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c8
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c57
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad_debugfs.c2
-rw-r--r--drivers/net/ethernet/cadence/at91_ether.c10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c144
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c56
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c20
-rw-r--r--drivers/net/ethernet/dnet.c18
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c47
-rw-r--r--drivers/net/ethernet/freescale/fec.h2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c10
-rw-r--r--drivers/net/ethernet/intel/Kconfig11
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/Makefile2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_debugfs.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_osdep.h4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.c104
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_txrx.h1
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c2
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c59
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mr.c9
-rw-r--r--drivers/net/ethernet/micrel/ksz884x.c6
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ethernet/neterion/s2io.c11
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c5
-rw-r--r--drivers/net/ethernet/qlogic/qla3xxx.c8
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c9
-rw-r--r--drivers/net/ethernet/realtek/8139too.c4
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c201
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h6
-rw-r--r--drivers/net/ethernet/s6gmac.c1058
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c21
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c8
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c19
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c1
-rw-r--r--drivers/net/ethernet/sun/sunvnet.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c79
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c10
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c96
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet.h2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c6
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c1
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c15
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/phy/micrel.c18
-rw-r--r--drivers/net/team/team.c16
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/usb/qmi_wwan.c10
-rw-r--r--drivers/net/usb/r8152.c47
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vxlan.c34
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c7
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c4
-rw-r--r--drivers/net/wireless/ipw2x00/Kconfig3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw-file.h6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h9
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c35
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c72
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c19
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/utils.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c17
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c32
-rw-r--r--drivers/net/xen-netback/xenbus.c1
-rw-r--r--drivers/net/xen-netfront.c71
-rw-r--r--drivers/of/overlay.c11
-rw-r--r--drivers/of/platform.c11
-rw-r--r--drivers/of/unittest-data/tests-overlay.dtsi55
-rw-r--r--drivers/of/unittest.c39
-rw-r--r--drivers/parisc/lba_pci.c5
-rw-r--r--drivers/pci/bus.c43
-rw-r--r--drivers/pci/pci.c40
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/quirks.c14
-rw-r--r--drivers/pci/setup-bus.c56
-rw-r--r--drivers/phy/phy-miphy28lp.c3
-rw-r--r--drivers/phy/phy-omap-control.c7
-rw-r--r--drivers/phy/phy-sun4i-usb.c3
-rw-r--r--drivers/phy/phy-ti-pipe3.c10
-rw-r--r--drivers/pinctrl/core.c5
-rw-r--r--drivers/pinctrl/pinctrl-at91.c108
-rw-r--r--drivers/pinctrl/pinctrl-rockchip.c102
-rw-r--r--drivers/pinctrl/pinctrl-st.c5
-rw-r--r--drivers/pinctrl/pinctrl-xway.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c4
-rw-r--r--drivers/platform/x86/dell-laptop.c1055
-rw-r--r--drivers/powercap/intel_rapl.c1
-rw-r--r--drivers/regulator/core.c4
-rw-r--r--drivers/regulator/s2mps11.c61
-rw-r--r--drivers/reset/reset-sunxi.c4
-rw-r--r--drivers/rtc/rtc-s5m.c1
-rw-r--r--drivers/s390/crypto/ap_bus.c10
-rw-r--r--drivers/s390/net/qeth_core_main.c117
-rw-r--r--drivers/s390/net/qeth_l2_main.c220
-rw-r--r--drivers/s390/net/qeth_l3_main.c50
-rw-r--r--drivers/scsi/fnic/fnic.h2
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c15
-rw-r--r--drivers/scsi/ipr.c92
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c4
-rw-r--r--drivers/scsi/scsi.c13
-rw-r--r--drivers/scsi/scsi_debug.c4
-rw-r--r--drivers/scsi/scsi_error.c4
-rw-r--r--drivers/scsi/scsi_lib.c15
-rw-r--r--drivers/scsi/sd.c5
-rw-r--r--drivers/spi/spi-dw-mid.c1
-rw-r--r--drivers/spi/spi-dw.c6
-rw-r--r--drivers/spi/spi-img-spfi.c8
-rw-r--r--drivers/spi/spi-pxa2xx.c2
-rw-r--r--drivers/spi/spi-sh-msiof.c7
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c2
-rw-r--r--drivers/staging/media/tlg2300/Kconfig1
-rw-r--r--drivers/staging/nvec/nvec.c9
-rw-r--r--drivers/staging/vt6655/baseband.c2
-rw-r--r--drivers/staging/vt6655/channel.c8
-rw-r--r--drivers/staging/vt6655/device_main.c13
-rw-r--r--drivers/staging/vt6655/rxtx.c5
-rw-r--r--drivers/target/iscsi/iscsi_target.c12
-rw-r--r--drivers/target/iscsi/iscsi_target_core.h4
-rw-r--r--drivers/target/target_core_device.c54
-rw-r--r--drivers/target/target_core_file.c12
-rw-r--r--drivers/target/target_core_iblock.c3
-rw-r--r--drivers/target/target_core_pr.c12
-rw-r--r--drivers/target/target_core_rd.c1
-rw-r--r--drivers/target/target_core_sbc.c15
-rw-r--r--drivers/target/target_core_spc.c5
-rw-r--r--drivers/target/target_core_user.c1
-rw-r--r--drivers/thermal/cpu_cooling.c360
-rw-r--r--drivers/thermal/db8500_cpufreq_cooling.c20
-rw-r--r--drivers/thermal/imx_thermal.c17
-rw-r--r--drivers/thermal/int340x_thermal/Makefile1
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.c24
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c1
-rw-r--r--drivers/thermal/int340x_thermal/int3402_thermal.c1
-rw-r--r--drivers/thermal/int340x_thermal/int3403_thermal.c4
-rw-r--r--drivers/thermal/int340x_thermal/processor_thermal_device.c311
-rw-r--r--drivers/thermal/intel_powerclamp.c1
-rw-r--r--drivers/thermal/of-thermal.c2
-rw-r--r--drivers/thermal/rcar_thermal.c17
-rw-r--r--drivers/thermal/rockchip_thermal.c1
-rw-r--r--drivers/thermal/samsung/Kconfig2
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.c12
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c5
-rw-r--r--drivers/thermal/thermal_core.c6
-rw-r--r--drivers/thermal/thermal_core.h4
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-thermal-common.c17
-rw-r--r--drivers/tty/n_tty.c9
-rw-r--r--drivers/tty/serial/8250/8250_pci.c29
-rw-r--r--drivers/tty/serial/samsung.c56
-rw-r--r--drivers/tty/serial/serial_core.c4
-rw-r--r--drivers/tty/tty_io.c7
-rw-r--r--drivers/usb/chipidea/core.c2
-rw-r--r--drivers/usb/chipidea/host.c1
-rw-r--r--drivers/usb/core/otg_whitelist.h5
-rw-r--r--drivers/usb/core/quirks.c4
-rw-r--r--drivers/usb/dwc2/core_intr.c6
-rw-r--r--drivers/usb/dwc2/gadget.c10
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c4
-rw-r--r--drivers/usb/dwc3/gadget.c6
-rw-r--r--drivers/usb/gadget/function/f_hid.c5
-rw-r--r--drivers/usb/gadget/function/f_midi.c2
-rw-r--r--drivers/usb/gadget/function/f_uac1.c2
-rw-r--r--drivers/usb/gadget/legacy/inode.c2
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c19
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c3
-rw-r--r--drivers/usb/host/ehci-sched.c14
-rw-r--r--drivers/usb/host/ehci-tegra.c2
-rw-r--r--drivers/usb/host/pci-quirks.c18
-rw-r--r--drivers/usb/host/xhci-pci.c2
-rw-r--r--drivers/usb/host/xhci.c9
-rw-r--r--drivers/usb/musb/Kconfig4
-rw-r--r--drivers/usb/musb/blackfin.c2
-rw-r--r--drivers/usb/musb/musb_cppi41.c4
-rw-r--r--drivers/usb/musb/musb_debugfs.c34
-rw-r--r--drivers/usb/musb/musb_host.c1
-rw-r--r--drivers/usb/phy/phy-mv-usb.c5
-rw-r--r--drivers/usb/phy/phy.c14
-rw-r--r--drivers/usb/serial/console.c16
-rw-r--r--drivers/usb/serial/cp210x.c4
-rw-r--r--drivers/usb/serial/generic.c4
-rw-r--r--drivers/usb/serial/keyspan.c20
-rw-r--r--drivers/usb/serial/option.c11
-rw-r--r--drivers/usb/serial/qcserial.c1
-rw-r--r--drivers/usb/storage/uas-detect.h33
-rw-r--r--drivers/usb/storage/unusual_devs.h9
-rw-r--r--drivers/usb/storage/unusual_uas.h53
-rw-r--r--drivers/vfio/pci/vfio_pci.c4
-rw-r--r--drivers/vhost/net.c2
-rw-r--r--drivers/vhost/scsi.c24
-rw-r--r--drivers/vhost/vhost.c10
-rw-r--r--drivers/video/fbdev/broadsheetfb.c8
-rw-r--r--drivers/video/fbdev/core/fb_defio.c5
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_pll.c2
-rw-r--r--drivers/video/fbdev/omap2/dss/pll.c3
-rw-r--r--drivers/video/fbdev/omap2/dss/sdi.c2
-rw-r--r--drivers/video/fbdev/simplefb.c2
-rw-r--r--drivers/video/logo/logo.c17
-rw-r--r--drivers/virtio/virtio_pci_common.c10
-rw-r--r--drivers/virtio/virtio_pci_common.h1
-rw-r--r--drivers/virtio/virtio_pci_legacy.c12
-rw-r--r--drivers/watchdog/cadence_wdt.c1
-rw-r--r--drivers/watchdog/imx2_wdt.c40
-rw-r--r--drivers/watchdog/meson_wdt.c1
-rw-r--r--fs/btrfs/backref.c13
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/delayed-inode.c8
-rw-r--r--fs/btrfs/extent-tree.c14
-rw-r--r--fs/btrfs/extent_io.c2
-rw-r--r--fs/btrfs/inode.c4
-rw-r--r--fs/btrfs/scrub.c6
-rw-r--r--fs/btrfs/super.c14
-rw-r--r--fs/btrfs/transaction.c2
-rw-r--r--fs/ceph/addr.c2
-rw-r--r--fs/cifs/cifsglob.h6
-rw-r--r--fs/cifs/ioctl.c21
-rw-r--r--fs/cifs/netmisc.c12
-rw-r--r--fs/cifs/readdir.c10
-rw-r--r--fs/cifs/smb2misc.c12
-rw-r--r--fs/cifs/smb2ops.c3
-rw-r--r--fs/cifs/smb2pdu.h2
-rw-r--r--fs/cifs/smb2transport.c2
-rw-r--r--fs/ext4/extents.c4
-rw-r--r--fs/ext4/file.c220
-rw-r--r--fs/ext4/resize.c24
-rw-r--r--fs/ext4/super.c2
-rw-r--r--fs/fcntl.c5
-rw-r--r--fs/fuse/dev.c51
-rw-r--r--fs/fuse/dir.c31
-rw-r--r--fs/fuse/fuse_i.h2
-rw-r--r--fs/fuse/inode.c5
-rw-r--r--fs/gfs2/quota.c49
-rw-r--r--fs/isofs/rock.c3
-rw-r--r--fs/kernfs/dir.c12
-rw-r--r--fs/lockd/svc.c8
-rw-r--r--fs/locks.c2
-rw-r--r--fs/nfs/direct.c6
-rw-r--r--fs/nfs/inode.c5
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/nfs4client.c44
-rw-r--r--fs/nfs/nfs4proc.c21
-rw-r--r--fs/nfsd/nfs4state.c2
-rw-r--r--fs/notify/fanotify/fanotify_user.c10
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c5
-rw-r--r--fs/ocfs2/namei.c43
-rw-r--r--fs/quota/dquot.c83
-rw-r--r--fs/quota/quota.c162
-rw-r--r--fs/udf/dir.c31
-rw-r--r--fs/udf/file.c2
-rw-r--r--fs/udf/inode.c14
-rw-r--r--fs/udf/namei.c17
-rw-r--r--fs/udf/symlink.c57
-rw-r--r--fs/udf/udfdecl.h3
-rw-r--r--fs/udf/unicode.c28
-rw-r--r--fs/xfs/xfs_qm.h4
-rw-r--r--fs/xfs/xfs_qm_syscalls.c156
-rw-r--r--fs/xfs/xfs_quotaops.c8
-rw-r--r--include/acpi/processor.h8
-rw-r--r--include/asm-generic/tlb.h8
-rw-r--r--include/drm/drmP.h4
-rw-r--r--include/drm/drm_gem.h7
-rw-r--r--include/dt-bindings/interrupt-controller/arm-gic.h4
-rw-r--r--include/dt-bindings/thermal/thermal.h2
-rw-r--r--include/linux/acpi.h4
-rw-r--r--include/linux/audit.h4
-rw-r--r--include/linux/blk-mq.h8
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/ceph/osd_client.h4
-rw-r--r--include/linux/compiler.h12
-rw-r--r--include/linux/cpu_cooling.h6
-rw-r--r--include/linux/cpuidle.h3
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/genetlink.h4
-rw-r--r--include/linux/i2c.h6
-rw-r--r--include/linux/kdb.h62
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/mfd/samsung/s2mps13.h2
-rw-r--r--include/linux/mfd/stmpe.h22
-rw-r--r--include/linux/mm.h8
-rw-r--r--include/linux/mmc/sdhci.h1
-rw-r--r--include/linux/module.h2
-rw-r--r--include/linux/moduleloader.h4
-rw-r--r--include/linux/netdevice.h26
-rw-r--r--include/linux/netlink.h4
-rw-r--r--include/linux/nfs_fs_sb.h3
-rw-r--r--include/linux/oom.h5
-rw-r--r--include/linux/pagemap.h13
-rw-r--r--include/linux/pci.h3
-rw-r--r--include/linux/perf_event.h18
-rw-r--r--include/linux/perf_regs.h16
-rw-r--r--include/linux/phy/omap_control_phy.h6
-rw-r--r--include/linux/pm_domain.h8
-rw-r--r--include/linux/printk.h15
-rw-r--r--include/linux/quota.h47
-rw-r--r--include/linux/quotaops.h4
-rw-r--r--include/linux/rmap.h10
-rw-r--r--include/linux/thermal.h2
-rw-r--r--include/linux/time.h13
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/net/genetlink.h14
-rw-r--r--include/net/ip.h11
-rw-r--r--include/net/mac80211.h7
-rw-r--r--include/net/neighbour.h1
-rw-r--r--include/net/vxlan.h28
-rw-r--r--include/sound/ak4113.h2
-rw-r--r--include/sound/ak4114.h2
-rw-r--r--include/sound/pcm.h10
-rw-r--r--include/target/target_core_backend.h1
-rw-r--r--include/target/target_core_backend_configfs.h2
-rw-r--r--include/target/target_core_base.h3
-rw-r--r--include/trace/events/kvm.h16
-rw-r--r--include/uapi/asm-generic/fcntl.h2
-rw-r--r--include/uapi/linux/can/netlink.h1
-rw-r--r--include/uapi/linux/in6.h3
-rw-r--r--include/uapi/linux/kfd_ioctl.h37
-rw-r--r--include/uapi/linux/libc-compat.h3
-rw-r--r--include/uapi/linux/openvswitch.h4
-rw-r--r--include/uapi/linux/uinput.h4
-rw-r--r--include/uapi/linux/virtio_ring.h7
-rw-r--r--include/xen/interface/nmi.h51
-rw-r--r--kernel/audit.c10
-rw-r--r--kernel/auditfilter.c23
-rw-r--r--kernel/auditsc.c49
-rw-r--r--kernel/bpf/core.c2
-rw-r--r--kernel/bpf/syscall.c25
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/debug/debug_core.c52
-rw-r--r--kernel/debug/kdb/kdb_bp.c37
-rw-r--r--kernel/debug/kdb/kdb_debugger.c4
-rw-r--r--kernel/debug/kdb/kdb_main.c269
-rw-r--r--kernel/debug/kdb/kdb_private.h3
-rw-r--r--kernel/events/core.c34
-rw-r--r--kernel/exit.c12
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/locking/mutex-debug.c2
-rw-r--r--kernel/module.c91
-rw-r--r--kernel/params.c3
-rw-r--r--kernel/range.c10
-rw-r--r--kernel/sched/core.c18
-rw-r--r--kernel/sched/deadline.c25
-rw-r--r--kernel/sched/fair.c6
-rw-r--r--kernel/sys.c4
-rw-r--r--kernel/time/ntp.c7
-rw-r--r--kernel/time/time.c4
-rw-r--r--kernel/trace/ftrace.c53
-rw-r--r--kernel/trace/trace.c1
-rw-r--r--kernel/trace/trace_events.c69
-rw-r--r--kernel/trace/trace_kdb.c4
-rw-r--r--kernel/workqueue.c25
-rw-r--r--lib/Kconfig.kgdb25
-rw-r--r--lib/assoc_array.c1
-rw-r--r--mm/Kconfig.debug9
-rw-r--r--mm/filemap.c29
-rw-r--r--mm/gup.c4
-rw-r--r--mm/ksm.c2
-rw-r--r--mm/memcontrol.c21
-rw-r--r--mm/memory.c45
-rw-r--r--mm/mmap.c13
-rw-r--r--mm/page-writeback.c43
-rw-r--r--mm/page_alloc.c82
-rw-r--r--mm/rmap.c42
-rw-r--r--mm/vmscan.c26
-rw-r--r--net/batman-adv/fragmentation.c4
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/batman-adv/multicast.c11
-rw-r--r--net/batman-adv/network-coding.c2
-rw-r--r--net/batman-adv/originator.c7
-rw-r--r--net/batman-adv/routing.c6
-rw-r--r--net/bluetooth/6lowpan.c1
-rw-r--r--net/bluetooth/bnep/core.c3
-rw-r--r--net/bluetooth/cmtp/core.c3
-rw-r--r--net/bluetooth/hci_event.c16
-rw-r--r--net/bluetooth/hidp/core.c3
-rw-r--r--net/bridge/br_input.c3
-rw-r--r--net/ceph/auth_x.c2
-rw-r--r--net/ceph/mon_client.c2
-rw-r--r--net/core/dev.c195
-rw-r--r--net/core/neighbour.c44
-rw-r--r--net/core/skbuff.c1
-rw-r--r--net/dsa/slave.c1
-rw-r--r--net/ipv4/geneve.c6
-rw-r--r--net/ipv4/ip_forward.c3
-rw-r--r--net/ipv4/ip_sockglue.c8
-rw-r--r--net/ipv4/netfilter/nft_redir_ipv4.c8
-rw-r--r--net/ipv4/ping.c5
-rw-r--r--net/ipv4/route.c9
-rw-r--r--net/ipv4/tcp_output.c4
-rw-r--r--net/ipv4/udp_diag.c4
-rw-r--r--net/ipv6/datagram.c10
-rw-r--r--net/ipv6/ip6_fib.c45
-rw-r--r--net/ipv6/netfilter/nft_redir_ipv6.c8
-rw-r--r--net/ipv6/route.c13
-rw-r--r--net/ipv6/tcp_ipv6.c45
-rw-r--r--net/ipv6/xfrm6_policy.c10
-rw-r--r--net/llc/sysctl_net_llc.c8
-rw-r--r--net/mac80211/key.c12
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/pm.c29
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mpls/mpls_gso.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_ftp.c10
-rw-r--r--net/netfilter/nf_conntrack_core.c20
-rw-r--r--net/netfilter/nf_tables_api.c14
-rw-r--r--net/netfilter/nfnetlink.c7
-rw-r--r--net/netfilter/nft_nat.c8
-rw-r--r--net/netlink/af_netlink.c48
-rw-r--r--net/netlink/af_netlink.h9
-rw-r--r--net/netlink/genetlink.c64
-rw-r--r--net/openvswitch/actions.c3
-rw-r--r--net/openvswitch/datapath.c6
-rw-r--r--net/openvswitch/flow.c5
-rw-r--r--net/openvswitch/flow_netlink.c13
-rw-r--r--net/openvswitch/vport-geneve.c3
-rw-r--r--net/openvswitch/vport-gre.c18
-rw-r--r--net/openvswitch/vport-vxlan.c2
-rw-r--r--net/openvswitch/vport.c7
-rw-r--r--net/packet/af_packet.c13
-rw-r--r--net/sched/cls_bpf.c15
-rw-r--r--net/sctp/associola.c1
-rw-r--r--net/sctp/socket.c8
-rw-r--r--net/socket.c3
-rw-r--r--net/sunrpc/xdr.c6
-rw-r--r--net/tipc/bcast.c5
-rw-r--r--net/wireless/Kconfig2
-rw-r--r--net/wireless/nl80211.c9
-rw-r--r--net/wireless/reg.c56
-rw-r--r--net/wireless/util.c6
-rw-r--r--samples/bpf/test_maps.c4
-rw-r--r--scripts/Makefile.clean16
-rwxr-xr-xscripts/recordmcount.pl1
-rw-r--r--security/keys/gc.c4
-rw-r--r--sound/core/seq/seq_dummy.c31
-rw-r--r--sound/firewire/amdtp.c71
-rw-r--r--sound/firewire/amdtp.h5
-rw-r--r--sound/firewire/bebob/bebob_stream.c7
-rw-r--r--sound/firewire/fireworks/fireworks_stream.c5
-rw-r--r--sound/firewire/fireworks/fireworks_transaction.c2
-rw-r--r--sound/i2c/other/ak4113.c17
-rw-r--r--sound/i2c/other/ak4114.c18
-rw-r--r--sound/pci/hda/hda_controller.c24
-rw-r--r--sound/pci/hda/hda_intel.c5
-rw-r--r--sound/pci/hda/hda_priv.h1
-rw-r--r--sound/pci/hda/patch_hdmi.c2
-rw-r--r--sound/pci/hda/patch_sigmatel.c4
-rw-r--r--sound/soc/adi/axi-i2s.c2
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c24
-rw-r--r--sound/soc/codecs/pcm512x.c2
-rw-r--r--sound/soc/codecs/rt286.c6
-rw-r--r--sound/soc/codecs/rt5640.c1
-rw-r--r--sound/soc/codecs/rt5677.c27
-rw-r--r--sound/soc/codecs/sgtl5000.c13
-rw-r--r--sound/soc/codecs/tlv320aic3x.c2
-rw-r--r--sound/soc/codecs/ts3a227e.c6
-rw-r--r--sound/soc/codecs/wm8731.c2
-rw-r--r--sound/soc/codecs/wm8904.c23
-rw-r--r--sound/soc/codecs/wm8960.c2
-rw-r--r--sound/soc/dwc/designware_i2s.c49
-rw-r--r--sound/soc/fsl/fsl_esai.h2
-rw-r--r--sound/soc/fsl/fsl_ssi.c4
-rw-r--r--sound/soc/fsl/imx-wm8962.c1
-rw-r--r--sound/soc/generic/simple-card.c7
-rw-r--r--sound/soc/intel/Kconfig4
-rw-r--r--sound/soc/intel/bytcr_dpcm_rt5640.c2
-rw-r--r--sound/soc/intel/sst-firmware.c15
-rw-r--r--sound/soc/intel/sst-haswell-ipc.c34
-rw-r--r--sound/soc/intel/sst/sst_acpi.c4
-rw-r--r--sound/soc/omap/omap-mcbsp.c2
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c5
-rw-r--r--sound/soc/rockchip/rockchip_i2s.h2
-rw-r--r--sound/soc/soc-compress.c9
-rw-r--r--sound/soc/soc-core.c14
-rw-r--r--sound/usb/caiaq/audio.c2
-rw-r--r--sound/usb/mixer.c1
-rw-r--r--tools/include/asm-generic/bitops.h2
-rw-r--r--tools/include/asm-generic/bitops/arch_hweight.h1
-rw-r--r--tools/include/asm-generic/bitops/const_hweight.h1
-rw-r--r--tools/include/asm-generic/bitops/hweight.h7
-rw-r--r--tools/include/linux/bitops.h7
-rw-r--r--tools/lib/api/fs/debugfs.c2
-rw-r--r--tools/lib/api/fs/fs.c2
-rw-r--r--tools/lib/lockdep/preload.c4
-rw-r--r--tools/perf/MANIFEST6
-rw-r--r--tools/perf/Makefile.perf11
-rw-r--r--tools/perf/arch/powerpc/util/skip-callchain-idx.c19
-rw-r--r--tools/perf/bench/sched-pipe.c2
-rw-r--r--tools/perf/builtin-annotate.c2
-rw-r--r--tools/perf/builtin-diff.c46
-rw-r--r--tools/perf/builtin-list.c13
-rw-r--r--tools/perf/builtin-report.c24
-rw-r--r--tools/perf/builtin-top.c5
-rw-r--r--tools/perf/config/Makefile2
-rw-r--r--tools/perf/config/Makefile.arch26
-rw-r--r--tools/perf/perf-sys.h1
-rw-r--r--tools/perf/scripts/perl/Perf-Trace-Util/Context.c5
-rw-r--r--tools/perf/tests/dwarf-unwind.c36
-rw-r--r--tools/perf/tests/hists_cumulate.c66
-rw-r--r--tools/perf/tests/hists_filter.c2
-rw-r--r--tools/perf/tests/hists_output.c10
-rw-r--r--tools/perf/ui/browsers/hists.c2
-rw-r--r--tools/perf/ui/hist.c3
-rw-r--r--tools/perf/ui/tui/setup.c26
-rw-r--r--tools/perf/util/annotate.c18
-rw-r--r--tools/perf/util/annotate.h8
-rw-r--r--tools/perf/util/cache.h2
-rw-r--r--tools/perf/util/callchain.c30
-rw-r--r--tools/perf/util/callchain.h2
-rw-r--r--tools/perf/util/evlist.c2
-rw-r--r--tools/perf/util/hist.c18
-rw-r--r--tools/perf/util/hist.h2
-rw-r--r--tools/perf/util/hweight.c31
-rw-r--r--tools/perf/util/include/asm/hweight.h8
-rw-r--r--tools/perf/util/machine.c4
-rw-r--r--tools/perf/util/map.h16
-rw-r--r--tools/perf/util/probe-event.c44
-rw-r--r--tools/perf/util/probe-finder.c18
-rw-r--r--tools/perf/util/python-ext-sources2
-rw-r--r--tools/perf/util/symbol.c31
-rw-r--r--tools/perf/util/symbol.h1
-rw-r--r--tools/perf/util/unwind-libunwind.c28
-rw-r--r--tools/power/cpupower/utils/cpupower.c2
-rw-r--r--tools/power/cpupower/utils/helpers/sysfs.c2
-rw-r--r--tools/testing/selftests/exec/execveat.c23
-rw-r--r--tools/testing/selftests/mqueue/mq_perf_tests.c3
-rw-r--r--tools/testing/selftests/vm/Makefile2
-rw-r--r--virt/kvm/kvm_main.c26
1084 files changed, 11772 insertions, 8307 deletions
diff --git a/.mailmap b/.mailmap
index ada8ad696b2e..d357e1bd2a43 100644
--- a/.mailmap
+++ b/.mailmap
@@ -51,6 +51,7 @@ Greg Kroah-Hartman <gregkh@suse.de>
Greg Kroah-Hartman <greg@kroah.com>
Henk Vergonet <Henk.Vergonet@gmail.com>
Henrik Kretzschmar <henne@nachtwindheim.de>
+Henrik Rydberg <rydberg@bitmath.org>
Herbert Xu <herbert@gondor.apana.org.au>
Jacob Shin <Jacob.Shin@amd.com>
James Bottomley <jejb@mulgrave.(none)>
diff --git a/Documentation/ABI/testing/sysfs-class-mei b/Documentation/ABI/testing/sysfs-class-mei
index 0ec8b8178c41..80d9888a8ece 100644
--- a/Documentation/ABI/testing/sysfs-class-mei
+++ b/Documentation/ABI/testing/sysfs-class-mei
@@ -14,3 +14,18 @@ Description:
The /sys/class/mei/meiN directory is created for
each probed mei device
+What: /sys/class/mei/meiN/fw_status
+Date: Nov 2014
+KernelVersion: 3.19
+Contact: Tomas Winkler <tomas.winkler@intel.com>
+Description: Display fw status registers content
+
+ The ME FW writes its status information into fw status
+ registers for BIOS and OS to monitor fw health.
+
+ The register contains running state, power management
+ state, error codes, and others. The way the registers
+ are decoded depends on PCH or SoC generation.
+ Also number of registers varies between 1 and 6
+ depending on generation.
+
diff --git a/Documentation/ABI/testing/sysfs-platform-dell-laptop b/Documentation/ABI/testing/sysfs-platform-dell-laptop
deleted file mode 100644
index 7969443ef0ef..000000000000
--- a/Documentation/ABI/testing/sysfs-platform-dell-laptop
+++ /dev/null
@@ -1,60 +0,0 @@
-What: /sys/class/leds/dell::kbd_backlight/als_setting
-Date: December 2014
-KernelVersion: 3.19
-Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
- Pali Rohár <pali.rohar@gmail.com>
-Description:
- This file allows to control the automatic keyboard
- illumination mode on some systems that have an ambient
- light sensor. Write 1 to this file to enable the auto
- mode, 0 to disable it.
-
-What: /sys/class/leds/dell::kbd_backlight/start_triggers
-Date: December 2014
-KernelVersion: 3.19
-Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
- Pali Rohár <pali.rohar@gmail.com>
-Description:
- This file allows to control the input triggers that
- turn on the keyboard backlight illumination that is
- disabled because of inactivity.
- Read the file to see the triggers available. The ones
- enabled are preceded by '+', those disabled by '-'.
-
- To enable a trigger, write its name preceded by '+' to
- this file. To disable a trigger, write its name preceded
- by '-' instead.
-
- For example, to enable the keyboard as trigger run:
- echo +keyboard > /sys/class/leds/dell::kbd_backlight/start_triggers
- To disable it:
- echo -keyboard > /sys/class/leds/dell::kbd_backlight/start_triggers
-
- Note that not all the available triggers can be configured.
-
-What: /sys/class/leds/dell::kbd_backlight/stop_timeout
-Date: December 2014
-KernelVersion: 3.19
-Contact: Gabriele Mazzotta <gabriele.mzt@gmail.com>,
- Pali Rohár <pali.rohar@gmail.com>
-Description:
- This file allows to specify the interval after which the
- keyboard illumination is disabled because of inactivity.
- The timeouts are expressed in seconds, minutes, hours and
- days, for which the symbols are 's', 'm', 'h' and 'd'
- respectively.
-
- To configure the timeout, write to this file a value along
- with any the above units. If no unit is specified, the value
- is assumed to be expressed in seconds.
-
- For example, to set the timeout to 10 minutes run:
- echo 10m > /sys/class/leds/dell::kbd_backlight/stop_timeout
-
- Note that when this file is read, the returned value might be
- expressed in a different unit than the one used when the timeout
- was set.
-
- Also note that only some timeouts are supported and that
- some systems might fall back to a specific timeout in case
- an invalid timeout is written to this file.
diff --git a/Documentation/devicetree/bindings/arm/arm-boards b/Documentation/devicetree/bindings/arm/arm-boards
index 556c8665fdbf..b78564b2b201 100644
--- a/Documentation/devicetree/bindings/arm/arm-boards
+++ b/Documentation/devicetree/bindings/arm/arm-boards
@@ -23,7 +23,7 @@ Required nodes:
range of 0x200 bytes.
- syscon: the root node of the Integrator platforms must have a
- system controller node pointong to the control registers,
+ system controller node pointing to the control registers,
with the compatible string
"arm,integrator-ap-syscon"
"arm,integrator-cp-syscon"
diff --git a/Documentation/devicetree/bindings/arm/fw-cfg.txt b/Documentation/devicetree/bindings/arm/fw-cfg.txt
new file mode 100644
index 000000000000..953fb640d9c4
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/fw-cfg.txt
@@ -0,0 +1,72 @@
+* QEMU Firmware Configuration bindings for ARM
+
+QEMU's arm-softmmu and aarch64-softmmu emulation / virtualization targets
+provide the following Firmware Configuration interface on the "virt" machine
+type:
+
+- A write-only, 16-bit wide selector (or control) register,
+- a read-write, 64-bit wide data register.
+
+QEMU exposes the control and data register to ARM guests as memory mapped
+registers; their location is communicated to the guest's UEFI firmware in the
+DTB that QEMU places at the bottom of the guest's DRAM.
+
+The guest writes a selector value (a key) to the selector register, and then
+can read the corresponding data (produced by QEMU) via the data register. If
+the selected entry is writable, the guest can rewrite it through the data
+register.
+
+The selector register takes keys in big endian byte order.
+
+The data register allows accesses with 8, 16, 32 and 64-bit width (only at
+offset 0 of the register). Accesses larger than a byte are interpreted as
+arrays, bundled together only for better performance. The bytes constituting
+such a word, in increasing address order, correspond to the bytes that would
+have been transferred by byte-wide accesses in chronological order.
+
+The interface allows guest firmware to download various parameters and blobs
+that affect how the firmware works and what tables it installs for the guest
+OS. For example, boot order of devices, ACPI tables, SMBIOS tables, kernel and
+initrd images for direct kernel booting, virtual machine UUID, SMP information,
+virtual NUMA topology, and so on.
+
+The authoritative registry of the valid selector values and their meanings is
+the QEMU source code; the structure of the data blobs corresponding to the
+individual key values is also defined in the QEMU source code.
+
+The presence of the registers can be verified by selecting the "signature" blob
+with key 0x0000, and reading four bytes from the data register. The returned
+signature is "QEMU".
+
+The outermost protocol (involving the write / read sequences of the control and
+data registers) is expected to be versioned, and/or described by feature bits.
+The interface revision / feature bitmap can be retrieved with key 0x0001. The
+blob to be read from the data register has size 4, and it is to be interpreted
+as a uint32_t value in little endian byte order. The current value
+(corresponding to the above outer protocol) is zero.
+
+The guest kernel is not expected to use these registers (although it is
+certainly allowed to); the device tree bindings are documented here because
+this is where device tree bindings reside in general.
+
+Required properties:
+
+- compatible: "qemu,fw-cfg-mmio".
+
+- reg: the MMIO region used by the device.
+ * Bytes 0x0 to 0x7 cover the data register.
+ * Bytes 0x8 to 0x9 cover the selector register.
+ * Further registers may be appended to the region in case of future interface
+ revisions / feature bits.
+
+Example:
+
+/ {
+ #size-cells = <0x2>;
+ #address-cells = <0x2>;
+
+ fw-cfg@9020000 {
+ compatible = "qemu,fw-cfg-mmio";
+ reg = <0x0 0x9020000 0x0 0xa>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/graph.txt b/Documentation/devicetree/bindings/graph.txt
index 1a69c078adf2..fcb1c6a4787b 100644
--- a/Documentation/devicetree/bindings/graph.txt
+++ b/Documentation/devicetree/bindings/graph.txt
@@ -19,7 +19,7 @@ type of the connections, they just map their existence. Specific properties
may be described by specialized bindings depending on the type of connection.
To see how this binding applies to video pipelines, for example, see
-Documentation/device-tree/bindings/media/video-interfaces.txt.
+Documentation/devicetree/bindings/media/video-interfaces.txt.
Here the ports describe data interfaces, and the links between them are
the connecting data buses. A single port with multiple connections can
correspond to multiple devices being connected to the same physical bus.
diff --git a/Documentation/devicetree/bindings/i2c/i2c-st.txt b/Documentation/devicetree/bindings/i2c/i2c-st.txt
index 437e0db3823c..4c26fda3844a 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-st.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-st.txt
@@ -31,7 +31,7 @@ i2c0: i2c@fed40000 {
compatible = "st,comms-ssc4-i2c";
reg = <0xfed40000 0x110>;
interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&CLK_S_ICN_REG_0>;
+ clocks = <&clk_s_a0_ls CLK_ICN_REG>;
clock-names = "ssc";
clock-frequency = <400000>;
pinctrl-names = "default";
diff --git a/Documentation/devicetree/bindings/i2c/trivial-devices.txt b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
index 9f4e3824e71e..9f41d05be3be 100644
--- a/Documentation/devicetree/bindings/i2c/trivial-devices.txt
+++ b/Documentation/devicetree/bindings/i2c/trivial-devices.txt
@@ -47,6 +47,7 @@ dallas,ds3232 Extremely Accurate I²C RTC with Integrated Crystal and SRAM
dallas,ds4510 CPU Supervisor with Nonvolatile Memory and Programmable I/O
dallas,ds75 Digital Thermometer and Thermostat
dlg,da9053 DA9053: flexible system level PMIC with multicore support
+dlg,da9063 DA9063: system PMIC for quad-core application processors
epson,rx8025 High-Stability. I2C-Bus INTERFACE REAL TIME CLOCK MODULE
epson,rx8581 I2C-BUS INTERFACE REAL TIME CLOCK MODULE
fsl,mag3110 MAG3110: Xtrinsic High Accuracy, 3D Magnetometer
diff --git a/Documentation/devicetree/bindings/input/gpio-keys.txt b/Documentation/devicetree/bindings/input/gpio-keys.txt
index a4a38fcf2ed6..44b705767aca 100644
--- a/Documentation/devicetree/bindings/input/gpio-keys.txt
+++ b/Documentation/devicetree/bindings/input/gpio-keys.txt
@@ -10,12 +10,13 @@ Optional properties:
Each button (key) is represented as a sub-node of "gpio-keys":
Subnode properties:
+ - gpios: OF device-tree gpio specification.
+ - interrupts: the interrupt line for that input.
- label: Descriptive name of the key.
- linux,code: Keycode to emit.
-Required mutual exclusive subnode-properties:
- - gpios: OF device-tree gpio specification.
- - interrupts: the interrupt line for that input
+Note that either "interrupts" or "gpios" properties can be omitted, but not
+both at the same time. Specifying both properties is allowed.
Optional subnode-properties:
- linux,input-type: Specify event type this button/key generates.
@@ -23,6 +24,9 @@ Optional subnode-properties:
- debounce-interval: Debouncing interval time in milliseconds.
If not specified defaults to 5.
- gpio-key,wakeup: Boolean, button can wake-up the system.
+ - linux,can-disable: Boolean, indicates that button is connected
+ to dedicated (not shared) interrupt which can be disabled to
+ suppress events from the button.
Example nodes:
diff --git a/Documentation/devicetree/bindings/input/stmpe-keypad.txt b/Documentation/devicetree/bindings/input/stmpe-keypad.txt
index 1b97222e8a0b..12bb771d66d4 100644
--- a/Documentation/devicetree/bindings/input/stmpe-keypad.txt
+++ b/Documentation/devicetree/bindings/input/stmpe-keypad.txt
@@ -8,6 +8,8 @@ Optional properties:
- debounce-interval : Debouncing interval time in milliseconds
- st,scan-count : Scanning cycles elapsed before key data is updated
- st,no-autorepeat : If specified device will not autorepeat
+ - keypad,num-rows : See ./matrix-keymap.txt
+ - keypad,num-columns : See ./matrix-keymap.txt
Example:
diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt
index 032808843f90..24c5cdaba8d2 100644
--- a/Documentation/devicetree/bindings/net/davinci_emac.txt
+++ b/Documentation/devicetree/bindings/net/davinci_emac.txt
@@ -4,7 +4,8 @@ This file provides information, what the device node
for the davinci_emac interface contains.
Required properties:
-- compatible: "ti,davinci-dm6467-emac" or "ti,am3517-emac"
+- compatible: "ti,davinci-dm6467-emac", "ti,am3517-emac" or
+ "ti,dm816-emac"
- reg: Offset and length of the register set for the device
- ti,davinci-ctrl-reg-offset: offset to control register
- ti,davinci-ctrl-mod-reg-offset: offset to control module register
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index b1df0ad1306c..d443279c95dc 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -9,7 +9,6 @@ ad Avionic Design GmbH
adapteva Adapteva, Inc.
adi Analog Devices, Inc.
aeroflexgaisler Aeroflex Gaisler AB
-ak Asahi Kasei Corp.
allwinner Allwinner Technology Co., Ltd.
altr Altera Corp.
amcc Applied Micro Circuits Corporation (APM, formally AMCC)
@@ -20,6 +19,7 @@ amstaos AMS-Taos Inc.
apm Applied Micro Circuits Corporation (APM)
arm ARM Ltd.
armadeus ARMadeus Systems SARL
+asahi-kasei Asahi Kasei Corp.
atmel Atmel Corporation
auo AU Optronics Corporation
avago Avago Technologies
@@ -127,6 +127,7 @@ pixcir PIXCIR MICROELECTRONICS Co., Ltd
powervr PowerVR (deprecated, use img)
qca Qualcomm Atheros, Inc.
qcom Qualcomm Technologies, Inc
+qemu QEMU, a generic and open source machine emulator and virtualizer
qnap QNAP Systems, Inc.
radxa Radxa
raidsonic RaidSonic Technology GmbH
@@ -168,6 +169,7 @@ usi Universal Scientific Industrial Co., Ltd.
v3 V3 Semiconductor
variscite Variscite Ltd.
via VIA Technologies, Inc.
+virtio Virtual I/O Device Specification, developed by the OASIS consortium
voipac Voipac Technologies s.r.o.
winbond Winbond Electronics corp.
wlf Wolfson Microelectronics
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 4df73da11adc..176d4fe4f076 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1277,6 +1277,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
i8042.notimeout [HW] Ignore timeout condition signalled by controller
i8042.reset [HW] Reset the controller during init and cleanup
i8042.unlock [HW] Unlock (ignore) the keylock
+ i8042.kbdreset [HW] Reset device connected to KBD port
i810= [HW,DRM]
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 9bffdfc648dc..85b022179104 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -66,6 +66,8 @@ fwmark_reflect - BOOLEAN
route/max_size - INTEGER
Maximum number of routes allowed in the kernel. Increase
this when using large numbers of interfaces and/or routes.
+ From linux kernel 3.6 onwards, this is deprecated for ipv4
+ as route cache is no longer used.
neigh/default/gc_thresh1 - INTEGER
Minimum number of entries to keep. Garbage collector will not
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 230ce71f4d75..2b47704f75cb 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -389,9 +389,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n"
buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n"
buf += " .close_session = " + fabric_mod_name + "_close_session,\n"
- buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n"
- buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n"
- buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n"
buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n"
buf += " .sess_get_initiator_sid = NULL,\n"
buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n"
@@ -402,7 +399,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n"
buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n"
buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n"
- buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n"
+ buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n"
buf += " /*\n"
buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n"
buf += " */\n"
@@ -428,7 +425,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += " /*\n"
buf += " * Register the top level struct config_item_type with TCM core\n"
buf += " */\n"
- buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n"
+ buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name + "\");\n"
buf += " if (IS_ERR(fabric)) {\n"
buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n"
buf += " return PTR_ERR(fabric);\n"
@@ -595,7 +592,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
if re.search('get_fabric_name', fo):
buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n"
buf += "{\n"
- buf += " return \"" + fabric_mod_name[4:] + "\";\n"
+ buf += " return \"" + fabric_mod_name + "\";\n"
buf += "}\n\n"
bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n"
continue
@@ -820,27 +817,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
buf += "}\n\n"
bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n"
- if re.search('stop_session\)\(', fo):
- buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n"
- buf += "{\n"
- buf += " return;\n"
- buf += "}\n\n"
- bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n"
-
- if re.search('fall_back_to_erl0\)\(', fo):
- buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n"
- buf += "{\n"
- buf += " return;\n"
- buf += "}\n\n"
- bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n"
-
- if re.search('sess_logged_in\)\(', fo):
- buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n"
- buf += "{\n"
- buf += " return 0;\n"
- buf += "}\n\n"
- bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n"
-
if re.search('sess_get_index\)\(', fo):
buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n"
buf += "{\n"
@@ -898,19 +874,18 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name):
bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n"
if re.search('queue_tm_rsp\)\(', fo):
- buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
+ buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n"
buf += "{\n"
- buf += " return 0;\n"
+ buf += " return;\n"
buf += "}\n\n"
- bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
+ bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n"
- if re.search('is_state_remove\)\(', fo):
- buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n"
+ if re.search('aborted_task\)\(', fo):
+ buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n"
buf += "{\n"
- buf += " return 0;\n"
+ buf += " return;\n"
buf += "}\n\n"
- bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n"
-
+ bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n"
ret = p.write(buf)
if ret:
@@ -1018,11 +993,11 @@ def main(modname, proto_ident):
tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name)
tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name)
- input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ")
+ input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kbuild(tcm_dir, fabric_mod_name)
- input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ")
+ input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ")
if input == "yes" or input == "y":
tcm_mod_add_kconfig(tcm_dir, fabric_mod_name)
diff --git a/Documentation/thermal/cpu-cooling-api.txt b/Documentation/thermal/cpu-cooling-api.txt
index fca24c931ec8..753e47cc2e20 100644
--- a/Documentation/thermal/cpu-cooling-api.txt
+++ b/Documentation/thermal/cpu-cooling-api.txt
@@ -3,7 +3,7 @@ CPU cooling APIs How To
Written by Amit Daniel Kachhap <amit.kachhap@linaro.org>
-Updated: 12 May 2012
+Updated: 6 Jan 2015
Copyright (c) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
@@ -25,7 +25,18 @@ the user. The registration APIs returns the cooling device pointer.
clip_cpus: cpumask of cpus where the frequency constraints will happen.
-1.1.2 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
+1.1.2 struct thermal_cooling_device *of_cpufreq_cooling_register(
+ struct device_node *np, const struct cpumask *clip_cpus)
+
+ This interface function registers the cpufreq cooling device with
+ the name "thermal-cpufreq-%x" linking it with a device tree node, in
+ order to bind it via the thermal DT code. This api can support multiple
+ instances of cpufreq cooling devices.
+
+ np: pointer to the cooling device device tree node
+ clip_cpus: cpumask of cpus where the frequency constraints will happen.
+
+1.1.3 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
This interface function unregisters the "thermal-cpufreq-%x" cooling device.
diff --git a/MAINTAINERS b/MAINTAINERS
index ddb9ac8d32b3..4871639b939f 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -696,7 +696,7 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
W: http://blackfin.uclinux.org/
S: Supported
F: sound/soc/blackfin/*
-
+
ANALOG DEVICES INC IIO DRIVERS
M: Lars-Peter Clausen <lars@metafoo.de>
M: Michael Hennerich <Michael.Hennerich@analog.com>
@@ -708,6 +708,16 @@ X: drivers/iio/*/adjd*
F: drivers/staging/iio/*/ad*
F: staging/iio/trigger/iio-trig-bfin-timer.c
+ANDROID DRIVERS
+M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+M: Arve Hjønnevåg <arve@android.com>
+M: Riley Andrews <riandrews@android.com>
+T: git git://git.kernel.org/pub/scm/linux/kernel/gregkh/staging.git
+L: devel@driverdev.osuosl.org
+S: Supported
+F: drivers/android/
+F: drivers/staging/android/
+
AOA (Apple Onboard Audio) ALSA DRIVER
M: Johannes Berg <johannes@sipsolutions.net>
L: linuxppc-dev@lists.ozlabs.org
@@ -724,15 +734,15 @@ F: include/uapi/linux/apm_bios.h
F: drivers/char/apm-emulation.c
APPLE BCM5974 MULTITOUCH DRIVER
-M: Henrik Rydberg <rydberg@euromail.se>
+M: Henrik Rydberg <rydberg@bitmath.org>
L: linux-input@vger.kernel.org
-S: Maintained
+S: Odd fixes
F: drivers/input/mouse/bcm5974.c
APPLE SMC DRIVER
-M: Henrik Rydberg <rydberg@euromail.se>
+M: Henrik Rydberg <rydberg@bitmath.org>
L: lm-sensors@lm-sensors.org
-S: Maintained
+S: Odd fixes
F: drivers/hwmon/applesmc.c
APPLETALK NETWORK LAYER
@@ -754,13 +764,6 @@ L: linux-media@vger.kernel.org
S: Maintained
F: drivers/media/i2c/aptina-pll.*
-ARASAN COMPACT FLASH PATA CONTROLLER
-M: Viresh Kumar <viresh.linux@gmail.com>
-L: linux-ide@vger.kernel.org
-S: Maintained
-F: include/linux/pata_arasan_cf_data.h
-F: drivers/ata/pata_arasan_cf.c
-
ARC FRAMEBUFFER DRIVER
M: Jaya Kumar <jayalk@intworks.biz>
S: Maintained
@@ -2259,6 +2262,7 @@ F: drivers/gpio/gpio-bt8xx.c
BTRFS FILE SYSTEM
M: Chris Mason <clm@fb.com>
M: Josef Bacik <jbacik@fb.com>
+M: David Sterba <dsterba@suse.cz>
L: linux-btrfs@vger.kernel.org
W: http://btrfs.wiki.kernel.org/
Q: http://patchwork.kernel.org/project/linux-btrfs/list/
@@ -2345,7 +2349,8 @@ CAN NETWORK LAYER
M: Oliver Hartkopp <socketcan@hartkopp.net>
L: linux-can@vger.kernel.org
W: http://gitorious.org/linux-can
-T: git git://gitorious.org/linux-can/linux-can-next.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
S: Maintained
F: Documentation/networking/can.txt
F: net/can/
@@ -2360,7 +2365,8 @@ M: Wolfgang Grandegger <wg@grandegger.com>
M: Marc Kleine-Budde <mkl@pengutronix.de>
L: linux-can@vger.kernel.org
W: http://gitorious.org/linux-can
-T: git git://gitorious.org/linux-can/linux-can-next.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git
S: Maintained
F: drivers/net/can/
F: include/linux/can/dev.h
@@ -3182,7 +3188,7 @@ L: dmaengine@vger.kernel.org
Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
S: Maintained
F: drivers/dma/
-F: include/linux/dma*
+F: include/linux/dmaengine.h
F: Documentation/dmaengine/
T: git git://git.infradead.org/users/vkoul/slave-dma.git
@@ -4748,20 +4754,20 @@ S: Supported
F: drivers/scsi/ipr.*
IBM Power Virtual Ethernet Device Driver
-M: Santiago Leon <santil@linux.vnet.ibm.com>
+M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com>
L: netdev@vger.kernel.org
S: Supported
F: drivers/net/ethernet/ibm/ibmveth.*
IBM Power Virtual SCSI Device Drivers
-M: Nathan Fontenot <nfont@linux.vnet.ibm.com>
+M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/ibmvscsi/ibmvscsi*
F: drivers/scsi/ibmvscsi/viosrp.h
IBM Power Virtual FC Device Drivers
-M: Brian King <brking@linux.vnet.ibm.com>
+M: Tyrel Datwyler <tyreld@linux.vnet.ibm.com>
L: linux-scsi@vger.kernel.org
S: Supported
F: drivers/scsi/ibmvscsi/ibmvfc*
@@ -4929,7 +4935,6 @@ F: include/uapi/linux/inotify.h
INPUT (KEYBOARD, MOUSE, JOYSTICK, TOUCHSCREEN) DRIVERS
M: Dmitry Torokhov <dmitry.torokhov@gmail.com>
-M: Dmitry Torokhov <dtor@mail.ru>
L: linux-input@vger.kernel.org
Q: http://patchwork.kernel.org/project/linux-input/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input.git
@@ -4940,18 +4945,27 @@ F: include/uapi/linux/input.h
F: include/linux/input/
INPUT MULTITOUCH (MT) PROTOCOL
-M: Henrik Rydberg <rydberg@euromail.se>
+M: Henrik Rydberg <rydberg@bitmath.org>
L: linux-input@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git
-S: Maintained
+S: Odd fixes
F: Documentation/input/multi-touch-protocol.txt
F: drivers/input/input-mt.c
K: \b(ABS|SYN)_MT_
+INTEL ASoC BDW/HSW DRIVERS
+M: Jie Yang <yang.jie@linux.intel.com>
+L: alsa-devel@alsa-project.org
+S: Supported
+F: sound/soc/intel/sst-haswell*
+F: sound/soc/intel/sst-dsp*
+F: sound/soc/intel/sst-firmware.c
+F: sound/soc/intel/broadwell.c
+F: sound/soc/intel/haswell.c
+
INTEL C600 SERIES SAS CONTROLLER DRIVER
M: Intel SCU Linux support <intel-linux-scu@intel.com>
M: Artur Paszkiewicz <artur.paszkiewicz@intel.com>
-M: Dave Jiang <dave.jiang@intel.com>
L: linux-scsi@vger.kernel.org
T: git git://git.code.sf.net/p/intel-sas/isci
S: Supported
@@ -5279,6 +5293,15 @@ W: www.open-iscsi.org
Q: http://patchwork.kernel.org/project/linux-rdma/list/
F: drivers/infiniband/ulp/iser/
+ISCSI EXTENSIONS FOR RDMA (ISER) TARGET
+M: Sagi Grimberg <sagig@mellanox.com>
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
+L: linux-rdma@vger.kernel.org
+L: target-devel@vger.kernel.org
+S: Supported
+W: http://www.linux-iscsi.org
+F: drivers/infiniband/ulp/isert
+
ISDN SUBSYSTEM
M: Karsten Keil <isdn@linux-pingi.de>
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
@@ -5693,6 +5716,49 @@ F: drivers/lguest/
F: include/linux/lguest*.h
F: tools/lguest/
+LIBATA SUBSYSTEM (Serial and Parallel ATA drivers)
+M: Tejun Heo <tj@kernel.org>
+L: linux-ide@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S: Maintained
+F: drivers/ata/
+F: include/linux/ata.h
+F: include/linux/libata.h
+
+LIBATA PATA ARASAN COMPACT FLASH CONTROLLER
+M: Viresh Kumar <viresh.linux@gmail.com>
+L: linux-ide@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S: Maintained
+F: include/linux/pata_arasan_cf_data.h
+F: drivers/ata/pata_arasan_cf.c
+
+LIBATA PATA DRIVERS
+M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
+M: Tejun Heo <tj@kernel.org>
+L: linux-ide@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S: Maintained
+F: drivers/ata/pata_*.c
+F: drivers/ata/ata_generic.c
+
+LIBATA SATA AHCI PLATFORM devices support
+M: Hans de Goede <hdegoede@redhat.com>
+M: Tejun Heo <tj@kernel.org>
+L: linux-ide@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S: Maintained
+F: drivers/ata/ahci_platform.c
+F: drivers/ata/libahci_platform.c
+F: include/linux/ahci_platform.h
+
+LIBATA SATA PROMISE TX2/TX4 CONTROLLER DRIVER
+M: Mikael Pettersson <mikpelinux@gmail.com>
+L: linux-ide@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S: Maintained
+F: drivers/ata/sata_promise.*
+
LIBLOCKDEP
M: Sasha Levin <sasha.levin@oracle.com>
S: Maintained
@@ -6977,14 +7043,12 @@ OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Grant Likely <grant.likely@linaro.org>
M: Rob Herring <robh+dt@kernel.org>
L: devicetree@vger.kernel.org
-W: http://fdt.secretlab.ca
-T: git git://git.secretlab.ca/git/linux-2.6.git
+W: http://www.devicetree.org/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/glikely/linux.git
S: Maintained
F: drivers/of/
F: include/linux/of*.h
F: scripts/dtc/
-K: of_get_property
-K: of_match_table
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
M: Rob Herring <robh+dt@kernel.org>
@@ -7229,7 +7293,7 @@ S: Maintained
F: drivers/pci/host/*layerscape*
PCI DRIVER FOR IMX6
-M: Richard Zhu <r65037@freescale.com>
+M: Richard Zhu <Richard.Zhu@freescale.com>
M: Lucas Stach <l.stach@pengutronix.de>
L: linux-pci@vger.kernel.org
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -7399,6 +7463,7 @@ F: drivers/crypto/picoxcell*
PIN CONTROL SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
L: linux-gpio@vger.kernel.org
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git
S: Maintained
F: drivers/pinctrl/
F: include/linux/pinctrl/
@@ -7566,12 +7631,6 @@ W: http://wireless.kernel.org/en/users/Drivers/p54
S: Obsolete
F: drivers/net/wireless/prism54/
-PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER
-M: Mikael Pettersson <mikpelinux@gmail.com>
-L: linux-ide@vger.kernel.org
-S: Maintained
-F: drivers/ata/sata_promise.*
-
PS3 NETWORK SUPPORT
M: Geoff Levand <geoff@infradead.org>
L: netdev@vger.kernel.org
@@ -7737,8 +7796,7 @@ F: Documentation/scsi/LICENSE.qla2xxx
F: drivers/scsi/qla2xxx/
QLOGIC QLA4XXX iSCSI DRIVER
-M: Vikas Chaudhary <vikas.chaudhary@qlogic.com>
-M: iscsi-driver@qlogic.com
+M: QLogic-Storage-Upstream@qlogic.com
L: linux-scsi@vger.kernel.org
S: Supported
F: Documentation/scsi/LICENSE.qla4xxx
@@ -8546,25 +8604,6 @@ S: Maintained
F: drivers/misc/phantom.c
F: include/uapi/linux/phantom.h
-SERIAL ATA (SATA) SUBSYSTEM
-M: Tejun Heo <tj@kernel.org>
-L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
-S: Supported
-F: drivers/ata/
-F: include/linux/ata.h
-F: include/linux/libata.h
-
-SERIAL ATA AHCI PLATFORM devices support
-M: Hans de Goede <hdegoede@redhat.com>
-M: Tejun Heo <tj@kernel.org>
-L: linux-ide@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
-S: Supported
-F: drivers/ata/ahci_platform.c
-F: drivers/ata/libahci_platform.c
-F: include/linux/ahci_platform.h
-
SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
L: linux-scsi@vger.kernel.org
@@ -9533,7 +9572,8 @@ F: drivers/platform/x86/thinkpad_acpi.c
TI BANDGAP AND THERMAL DRIVER
M: Eduardo Valentin <edubezval@gmail.com>
L: linux-pm@vger.kernel.org
-S: Supported
+L: linux-omap@vger.kernel.org
+S: Maintained
F: drivers/thermal/ti-soc-thermal/
TI CLOCK DRIVER
@@ -10146,6 +10186,7 @@ USERSPACE I/O (UIO)
M: "Hans J. Koch" <hjk@hansjkoch.de>
M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc.git
F: Documentation/DocBook/uio-howto.tmpl
F: drivers/uio/
F: include/linux/uio*.h
diff --git a/Makefile b/Makefile
index b1c3254441f3..c8e17c05f916 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 19
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc7
NAME = Diseased Newt
# *DOCUMENTATION*
@@ -391,6 +391,7 @@ USERINCLUDE := \
# Needed to be compatible with the O= option
LINUXINCLUDE := \
-I$(srctree)/arch/$(hdr-arch)/include \
+ -Iarch/$(hdr-arch)/include/generated/uapi \
-Iarch/$(hdr-arch)/include/generated \
$(if $(KBUILD_SRC), -I$(srctree)/include) \
-Iinclude \
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
index 076c35cd6cde..98a1525fa164 100644
--- a/arch/alpha/kernel/pci.c
+++ b/arch/alpha/kernel/pci.c
@@ -285,8 +285,12 @@ pcibios_claim_one_bus(struct pci_bus *b)
if (r->parent || !r->start || !r->flags)
continue;
if (pci_has_flag(PCI_PROBE_ONLY) ||
- (r->flags & IORESOURCE_PCI_FIXED))
- pci_claim_resource(dev, i);
+ (r->flags & IORESOURCE_PCI_FIXED)) {
+ if (pci_claim_resource(dev, i) == 0)
+ continue;
+
+ pci_claim_bridge_resource(dev, i);
+ }
}
}
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 98838a05ba6d..9d0ac091a52a 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -156,6 +156,8 @@ retry:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/arc/mm/fault.c b/arch/arc/mm/fault.c
index 6f7e3a68803a..563cb27e37f5 100644
--- a/arch/arc/mm/fault.c
+++ b/arch/arc/mm/fault.c
@@ -161,6 +161,8 @@ good_area:
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
diff --git a/arch/arm/boot/dts/armada-370-db.dts b/arch/arm/boot/dts/armada-370-db.dts
index 1466580be295..70b1943a86b1 100644
--- a/arch/arm/boot/dts/armada-370-db.dts
+++ b/arch/arm/boot/dts/armada-370-db.dts
@@ -203,27 +203,3 @@
compatible = "linux,spdif-dir";
};
};
-
-&pinctrl {
- /*
- * These pins might be muxed as I2S by
- * the bootloader, but it conflicts
- * with the real I2S pins that are
- * muxed using i2s_pins. We must mux
- * those pins to a function other than
- * I2S.
- */
- pinctrl-0 = <&hog_pins1 &hog_pins2>;
- pinctrl-names = "default";
-
- hog_pins1: hog-pins1 {
- marvell,pins = "mpp6", "mpp8", "mpp10",
- "mpp12", "mpp13";
- marvell,function = "gpio";
- };
-
- hog_pins2: hog-pins2 {
- marvell,pins = "mpp5", "mpp7", "mpp9";
- marvell,function = "gpo";
- };
-};
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index 1467750e3377..e8c6c600a5b6 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -953,6 +953,8 @@
interrupts = <26 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fb>;
+ clocks = <&lcd_clk>, <&lcd_clk>;
+ clock-names = "lcdc_clk", "hclk";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
index 28e7e2060c33..a98ac1bd8f65 100644
--- a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
+++ b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts
@@ -65,6 +65,8 @@
};
&sdhci2 {
+ broken-cd;
+ bus-width = <8>;
non-removable;
status = "okay";
};
diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi
index 35253c947a7c..e2f61f27944e 100644
--- a/arch/arm/boot/dts/berlin2q.dtsi
+++ b/arch/arm/boot/dts/berlin2q.dtsi
@@ -83,7 +83,8 @@
compatible = "mrvl,pxav3-mmc";
reg = <0xab1000 0x200>;
interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&chip CLKID_SDIO1XIN>;
+ clocks = <&chip CLKID_NFC_ECC>, <&chip CLKID_NFC>;
+ clock-names = "io", "core";
status = "disabled";
};
@@ -348,36 +349,6 @@
interrupt-parent = <&gic>;
interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>;
};
-
- gpio4: gpio@5000 {
- compatible = "snps,dw-apb-gpio";
- reg = <0x5000 0x400>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- porte: gpio-port@4 {
- compatible = "snps,dw-apb-gpio-port";
- gpio-controller;
- #gpio-cells = <2>;
- snps,nr-gpios = <32>;
- reg = <0>;
- };
- };
-
- gpio5: gpio@c000 {
- compatible = "snps,dw-apb-gpio";
- reg = <0xc000 0x400>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- portf: gpio-port@5 {
- compatible = "snps,dw-apb-gpio-port";
- gpio-controller;
- #gpio-cells = <2>;
- snps,nr-gpios = <32>;
- reg = <0>;
- };
- };
};
chip: chip-control@ea0000 {
@@ -466,6 +437,21 @@
ranges = <0 0xfc0000 0x10000>;
interrupt-parent = <&sic>;
+ sm_gpio1: gpio@5000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = <0x5000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ portf: gpio-port@5 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <32>;
+ reg = <0>;
+ };
+ };
+
i2c2: i2c@7000 {
compatible = "snps,designware-i2c";
#address-cells = <1>;
@@ -516,6 +502,21 @@
status = "disabled";
};
+ sm_gpio0: gpio@c000 {
+ compatible = "snps,dw-apb-gpio";
+ reg = <0xc000 0x400>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ porte: gpio-port@4 {
+ compatible = "snps,dw-apb-gpio-port";
+ gpio-controller;
+ #gpio-cells = <2>;
+ snps,nr-gpios = <32>;
+ reg = <0>;
+ };
+ };
+
sysctrl: pin-controller@d000 {
compatible = "marvell,berlin2q-system-ctrl";
reg = <0xd000 0x100>;
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 10b725c7bfc0..ad4118f7e1a6 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -499,23 +499,23 @@
};
partition@5 {
label = "QSPI.u-boot-spl-os";
- reg = <0x00140000 0x00010000>;
+ reg = <0x00140000 0x00080000>;
};
partition@6 {
label = "QSPI.u-boot-env";
- reg = <0x00150000 0x00010000>;
+ reg = <0x001c0000 0x00010000>;
};
partition@7 {
label = "QSPI.u-boot-env.backup1";
- reg = <0x00160000 0x0010000>;
+ reg = <0x001d0000 0x0010000>;
};
partition@8 {
label = "QSPI.kernel";
- reg = <0x00170000 0x0800000>;
+ reg = <0x001e0000 0x0800000>;
};
partition@9 {
label = "QSPI.file-system";
- reg = <0x00970000 0x01690000>;
+ reg = <0x009e0000 0x01620000>;
};
};
};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 22771bc1643a..63f8b007bdc5 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1257,6 +1257,8 @@
tx-fifo-resize;
maximum-speed = "super-speed";
dr_mode = "otg";
+ snps,dis_u3_susphy_quirk;
+ snps,dis_u2_susphy_quirk;
};
};
@@ -1278,6 +1280,8 @@
tx-fifo-resize;
maximum-speed = "high-speed";
dr_mode = "otg";
+ snps,dis_u3_susphy_quirk;
+ snps,dis_u2_susphy_quirk;
};
};
@@ -1299,6 +1303,8 @@
tx-fifo-resize;
maximum-speed = "high-speed";
dr_mode = "otg";
+ snps,dis_u3_susphy_quirk;
+ snps,dis_u2_susphy_quirk;
};
};
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index b8168f1f8139..24ff27049ce0 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -368,7 +368,7 @@
};
i2s1: i2s@13960000 {
- compatible = "samsung,s5pv210-i2s";
+ compatible = "samsung,s3c6410-i2s";
reg = <0x13960000 0x100>;
clocks = <&clock CLK_I2S1>;
clock-names = "iis";
@@ -378,7 +378,7 @@
};
i2s2: i2s@13970000 {
- compatible = "samsung,s5pv210-i2s";
+ compatible = "samsung,s3c6410-i2s";
reg = <0x13970000 0x100>;
clocks = <&clock CLK_I2S2>;
clock-names = "iis";
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 0a229fcd7acf..d75c89d7666a 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -736,7 +736,7 @@
dp_phy: video-phy@10040720 {
compatible = "samsung,exynos5250-dp-video-phy";
- reg = <0x10040720 4>;
+ samsung,pmu-syscon = <&pmu_system_controller>;
#phy-cells = <0>;
};
diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
index aa7a7d727a7e..db2c1c4cd900 100644
--- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts
+++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts
@@ -372,3 +372,7 @@
&usbdrd_dwc3_1 {
dr_mode = "host";
};
+
+&cci {
+ status = "disabled";
+};
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index 517e50f6760b..6d38f8bfd0e6 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -120,7 +120,7 @@
};
};
- cci@10d20000 {
+ cci: cci@10d20000 {
compatible = "arm,cci-400";
#address-cells = <1>;
#size-cells = <1>;
@@ -503,8 +503,8 @@
};
dp_phy: video-phy@10040728 {
- compatible = "samsung,exynos5250-dp-video-phy";
- reg = <0x10040728 4>;
+ compatible = "samsung,exynos5420-dp-video-phy";
+ samsung,pmu-syscon = <&pmu_system_controller>;
#phy-cells = <0>;
};
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index 58d3c3cf2923..e4d3aecc4ed2 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -162,7 +162,7 @@
#size-cells = <0>;
compatible = "fsl,imx25-cspi", "fsl,imx35-cspi";
reg = <0x43fa4000 0x4000>;
- clocks = <&clks 62>, <&clks 62>;
+ clocks = <&clks 78>, <&clks 78>;
clock-names = "ipg", "per";
interrupts = <14>;
status = "disabled";
@@ -369,7 +369,7 @@
compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
#pwm-cells = <2>;
reg = <0x53fa0000 0x4000>;
- clocks = <&clks 106>, <&clks 36>;
+ clocks = <&clks 106>, <&clks 52>;
clock-names = "ipg", "per";
interrupts = <36>;
};
@@ -388,7 +388,7 @@
compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
#pwm-cells = <2>;
reg = <0x53fa8000 0x4000>;
- clocks = <&clks 107>, <&clks 36>;
+ clocks = <&clks 107>, <&clks 52>;
clock-names = "ipg", "per";
interrupts = <41>;
};
@@ -429,7 +429,7 @@
pwm4: pwm@53fc8000 {
compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
reg = <0x53fc8000 0x4000>;
- clocks = <&clks 108>, <&clks 36>;
+ clocks = <&clks 108>, <&clks 52>;
clock-names = "ipg", "per";
interrupts = <42>;
};
@@ -476,7 +476,7 @@
compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
#pwm-cells = <2>;
reg = <0x53fe0000 0x4000>;
- clocks = <&clks 105>, <&clks 36>;
+ clocks = <&clks 105>, <&clks 52>;
clock-names = "ipg", "per";
interrupts = <26>;
};
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index 56569cecaa78..649befeb2cf9 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -127,24 +127,12 @@
#address-cells = <1>;
#size-cells = <0>;
- reg_usbh1_vbus: regulator@0 {
- compatible = "regulator-fixed";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_usbh1reg>;
- reg = <0>;
- regulator-name = "usbh1_vbus";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- gpio = <&gpio2 5 GPIO_ACTIVE_HIGH>;
- enable-active-high;
- };
-
- reg_usbotg_vbus: regulator@1 {
+ reg_hub_reset: regulator@0 {
compatible = "regulator-fixed";
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbotgreg>;
- reg = <1>;
- regulator-name = "usbotg_vbus";
+ reg = <0>;
+ regulator-name = "hub_reset";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
gpio = <&gpio1 7 GPIO_ACTIVE_HIGH>;
@@ -176,6 +164,7 @@
reg = <0>;
clocks = <&clks IMX5_CLK_DUMMY>;
clock-names = "main_clk";
+ reset-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>;
};
};
};
@@ -419,7 +408,7 @@
&usbh1 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_usbh1>;
- vbus-supply = <&reg_usbh1_vbus>;
+ vbus-supply = <&reg_hub_reset>;
fsl,usbphy = <&usbh1phy>;
phy_type = "ulpi";
status = "okay";
@@ -429,7 +418,6 @@
dr_mode = "otg";
disable-over-current;
phy_type = "utmi_wide";
- vbus-supply = <&reg_usbotg_vbus>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 4fc03b7f1cee..2109d0763c1b 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -335,8 +335,8 @@
vpu: vpu@02040000 {
compatible = "cnm,coda960";
reg = <0x02040000 0x3c000>;
- interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>,
- <0 12 IRQ_TYPE_LEVEL_HIGH>;
+ interrupts = <0 12 IRQ_TYPE_LEVEL_HIGH>,
+ <0 3 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "bit", "jpeg";
clocks = <&clks IMX6QDL_CLK_VPU_AXI>,
<&clks IMX6QDL_CLK_MMDC_CH0_AXI>,
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts
index 1e6e5cc1c14c..c108bb451337 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dts
+++ b/arch/arm/boot/dts/imx6sx-sdb.dts
@@ -159,13 +159,28 @@
pinctrl-0 = <&pinctrl_enet1>;
phy-supply = <&reg_enet_3v3>;
phy-mode = "rgmii";
+ phy-handle = <&ethphy1>;
status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+
+ ethphy2: ethernet-phy@2 {
+ reg = <2>;
+ };
+ };
};
&fec2 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_enet2>;
phy-mode = "rgmii";
+ phy-handle = <&ethphy2>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 657da14cb4b5..c70bb27ac65a 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -142,6 +142,7 @@
scfg: scfg@1570000 {
compatible = "fsl,ls1021a-scfg", "syscon";
reg = <0x0 0x1570000 0x0 0x10000>;
+ big-endian;
};
clockgen: clocking@1ee1000 {
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 53f3ca064140..b550c41b46f1 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -700,11 +700,9 @@
};
};
+ /* Ethernet is on some early development boards and qemu */
ethernet@gpmc {
compatible = "smsc,lan91c94";
-
- status = "disabled";
-
interrupt-parent = <&gpio2>;
interrupts = <22 IRQ_TYPE_LEVEL_HIGH>; /* gpio54 */
reg = <1 0x300 0xf>; /* 16 byte IO range at offset 0x300 */
diff --git a/arch/arm/boot/dts/rk3288-evb.dtsi b/arch/arm/boot/dts/rk3288-evb.dtsi
index 3e067dd65d0c..6194d673e80b 100644
--- a/arch/arm/boot/dts/rk3288-evb.dtsi
+++ b/arch/arm/boot/dts/rk3288-evb.dtsi
@@ -155,6 +155,15 @@
};
&pinctrl {
+ pcfg_pull_none_drv_8ma: pcfg-pull-none-drv-8ma {
+ drive-strength = <8>;
+ };
+
+ pcfg_pull_up_drv_8ma: pcfg-pull-up-drv-8ma {
+ bias-pull-up;
+ drive-strength = <8>;
+ };
+
backlight {
bl_en: bl-en {
rockchip,pins = <7 2 RK_FUNC_GPIO &pcfg_pull_none>;
@@ -173,6 +182,27 @@
};
};
+ sdmmc {
+ /*
+ * Default drive strength isn't enough to achieve even
+ * high-speed mode on EVB board so bump up to 8ma.
+ */
+ sdmmc_bus4: sdmmc-bus4 {
+ rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+ <6 17 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+ <6 18 RK_FUNC_1 &pcfg_pull_up_drv_8ma>,
+ <6 19 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ };
+
+ sdmmc_clk: sdmmc-clk {
+ rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_drv_8ma>;
+ };
+
+ sdmmc_cmd: sdmmc-cmd {
+ rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_8ma>;
+ };
+ };
+
usb {
host_vbus_drv: host-vbus-drv {
rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>;
diff --git a/arch/arm/boot/dts/sama5d3xmb.dtsi b/arch/arm/boot/dts/sama5d3xmb.dtsi
index 49c10d33df30..77e03655aca3 100644
--- a/arch/arm/boot/dts/sama5d3xmb.dtsi
+++ b/arch/arm/boot/dts/sama5d3xmb.dtsi
@@ -176,7 +176,7 @@
"Headphone Jack", "HPOUTR",
"IN2L", "Line In Jack",
"IN2R", "Line In Jack",
- "MICBIAS", "IN1L",
+ "Mic", "MICBIAS",
"IN1L", "Mic";
atmel,ssc-controller = <&ssc0>;
diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi
index 1b0f30c2c4a5..b94995d1889f 100644
--- a/arch/arm/boot/dts/sama5d4.dtsi
+++ b/arch/arm/boot/dts/sama5d4.dtsi
@@ -1008,7 +1008,7 @@
pit: timer@fc068630 {
compatible = "atmel,at91sam9260-pit";
- reg = <0xfc068630 0xf>;
+ reg = <0xfc068630 0x10>;
interrupts = <3 IRQ_TYPE_LEVEL_HIGH 5>;
clocks = <&h32ck>;
};
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
index a8c00ee7522a..3d0b8755caee 100644
--- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts
+++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
@@ -25,11 +25,11 @@
stmpe2401_1 {
stmpe2401_1_nhk_mode: stmpe2401_1_nhk {
nhk_cfg1 {
- ste,pins = "GPIO76_B20"; // IRQ line
+ pins = "GPIO76_B20"; // IRQ line
ste,input = <0>;
};
nhk_cfg2 {
- ste,pins = "GPIO77_B8"; // reset line
+ pins = "GPIO77_B8"; // reset line
ste,output = <1>;
};
};
@@ -37,11 +37,11 @@
stmpe2401_2 {
stmpe2401_2_nhk_mode: stmpe2401_2_nhk {
nhk_cfg1 {
- ste,pins = "GPIO78_A8"; // IRQ line
+ pins = "GPIO78_A8"; // IRQ line
ste,input = <0>;
};
nhk_cfg2 {
- ste,pins = "GPIO79_C9"; // reset line
+ pins = "GPIO79_C9"; // reset line
ste,output = <1>;
};
};
diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi
index 7b4099fcf817..d5c4669224b1 100644
--- a/arch/arm/boot/dts/sun4i-a10.dtsi
+++ b/arch/arm/boot/dts/sun4i-a10.dtsi
@@ -17,14 +17,6 @@
aliases {
ethernet0 = &emac;
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
- serial4 = &uart4;
- serial5 = &uart5;
- serial6 = &uart6;
- serial7 = &uart7;
};
chosen {
@@ -39,6 +31,14 @@
<&ahb_gates 44>;
status = "disabled";
};
+
+ framebuffer@1 {
+ compatible = "allwinner,simple-framebuffer", "simple-framebuffer";
+ allwinner,pipeline = "de_fe0-de_be0-lcd0-hdmi";
+ clocks = <&pll5 1>, <&ahb_gates 36>, <&ahb_gates 43>,
+ <&ahb_gates 44>, <&ahb_gates 46>;
+ status = "disabled";
+ };
};
cpus {
@@ -438,8 +438,8 @@
reg-names = "phy_ctrl", "pmu1", "pmu2";
clocks = <&usb_clk 8>;
clock-names = "usb_phy";
- resets = <&usb_clk 1>, <&usb_clk 2>;
- reset-names = "usb1_reset", "usb2_reset";
+ resets = <&usb_clk 0>, <&usb_clk 1>, <&usb_clk 2>;
+ reset-names = "usb0_reset", "usb1_reset", "usb2_reset";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
index fe3c559ca6a8..bfa742817690 100644
--- a/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a10s-olinuxino-micro.dts
@@ -55,6 +55,12 @@
model = "Olimex A10s-Olinuxino Micro";
compatible = "olimex,a10s-olinuxino-micro", "allwinner,sun5i-a10s";
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ };
+
soc@01c00000 {
emac: ethernet@01c0b000 {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi
index 1b76667f3182..2e7d8263799d 100644
--- a/arch/arm/boot/dts/sun5i-a10s.dtsi
+++ b/arch/arm/boot/dts/sun5i-a10s.dtsi
@@ -18,10 +18,6 @@
aliases {
ethernet0 = &emac;
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
};
chosen {
@@ -390,8 +386,8 @@
reg-names = "phy_ctrl", "pmu1";
clocks = <&usb_clk 8>;
clock-names = "usb_phy";
- resets = <&usb_clk 1>;
- reset-names = "usb1_reset";
+ resets = <&usb_clk 0>, <&usb_clk 1>;
+ reset-names = "usb0_reset", "usb1_reset";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
index eeed1f236ee8..c7be3abd9fcc 100644
--- a/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
+++ b/arch/arm/boot/dts/sun5i-a13-hsg-h702.dts
@@ -53,6 +53,10 @@
model = "HSG H702";
compatible = "hsg,h702", "allwinner,sun5i-a13";
+ aliases {
+ serial0 = &uart1;
+ };
+
soc@01c00000 {
mmc0: mmc@01c0f000 {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
index 916ee8bb826f..3decefb3c37a 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino-micro.dts
@@ -54,6 +54,10 @@
model = "Olimex A13-Olinuxino Micro";
compatible = "olimex,a13-olinuxino-micro", "allwinner,sun5i-a13";
+ aliases {
+ serial0 = &uart1;
+ };
+
soc@01c00000 {
mmc0: mmc@01c0f000 {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
index e31d291d14cb..b421f7fa197b 100644
--- a/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
+++ b/arch/arm/boot/dts/sun5i-a13-olinuxino.dts
@@ -55,6 +55,10 @@
model = "Olimex A13-Olinuxino";
compatible = "olimex,a13-olinuxino", "allwinner,sun5i-a13";
+ aliases {
+ serial0 = &uart1;
+ };
+
soc@01c00000 {
mmc0: mmc@01c0f000 {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi
index c35217ea1f64..c556688f8b8b 100644
--- a/arch/arm/boot/dts/sun5i-a13.dtsi
+++ b/arch/arm/boot/dts/sun5i-a13.dtsi
@@ -16,11 +16,6 @@
/ {
interrupt-parent = <&intc>;
- aliases {
- serial0 = &uart1;
- serial1 = &uart3;
- };
-
cpus {
#address-cells = <1>;
#size-cells = <0>;
@@ -349,8 +344,8 @@
reg-names = "phy_ctrl", "pmu1";
clocks = <&usb_clk 8>;
clock-names = "usb_phy";
- resets = <&usb_clk 1>;
- reset-names = "usb1_reset";
+ resets = <&usb_clk 0>, <&usb_clk 1>;
+ reset-names = "usb0_reset", "usb1_reset";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index f47156b6572b..1e7e7bcf8307 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -53,12 +53,6 @@
interrupt-parent = <&gic>;
aliases {
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
- serial4 = &uart4;
- serial5 = &uart5;
ethernet0 = &gmac;
};
diff --git a/arch/arm/boot/dts/sun7i-a20-bananapi.dts b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
index 1cf1214cc068..bd7b15add697 100644
--- a/arch/arm/boot/dts/sun7i-a20-bananapi.dts
+++ b/arch/arm/boot/dts/sun7i-a20-bananapi.dts
@@ -55,6 +55,12 @@
model = "LeMaker Banana Pi";
compatible = "lemaker,bananapi", "allwinner,sun7i-a20";
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart3;
+ serial2 = &uart7;
+ };
+
soc@01c00000 {
spi0: spi@01c05000 {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun7i-a20-hummingbird.dts b/arch/arm/boot/dts/sun7i-a20-hummingbird.dts
index 0e4bfa3b2b85..0bcefcbbb756 100644
--- a/arch/arm/boot/dts/sun7i-a20-hummingbird.dts
+++ b/arch/arm/boot/dts/sun7i-a20-hummingbird.dts
@@ -19,6 +19,14 @@
model = "Merrii A20 Hummingbird";
compatible = "merrii,a20-hummingbird", "allwinner,sun7i-a20";
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart2;
+ serial2 = &uart3;
+ serial3 = &uart4;
+ serial4 = &uart5;
+ };
+
soc@01c00000 {
mmc0: mmc@01c0f000 {
pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
index 9d669cdf031d..66cc77707198 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-micro.dts
@@ -20,6 +20,9 @@
compatible = "olimex,a20-olinuxino-micro", "allwinner,sun7i-a20";
aliases {
+ serial0 = &uart0;
+ serial1 = &uart6;
+ serial2 = &uart7;
spi0 = &spi1;
spi1 = &spi2;
};
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index e21ce5992d56..89749ce34a84 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -54,14 +54,6 @@
aliases {
ethernet0 = &gmac;
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
- serial4 = &uart4;
- serial5 = &uart5;
- serial6 = &uart6;
- serial7 = &uart7;
};
chosen {
diff --git a/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts b/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts
index 7f2117ce6985..32ad80804dbb 100644
--- a/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts
+++ b/arch/arm/boot/dts/sun8i-a23-ippo-q8h-v5.dts
@@ -55,6 +55,10 @@
model = "Ippo Q8H Dual Core Tablet (v5)";
compatible = "ippo,q8h-v5", "allwinner,sun8i-a23";
+ aliases {
+ serial0 = &r_uart;
+ };
+
chosen {
bootargs = "earlyprintk console=ttyS0,115200";
};
diff --git a/arch/arm/boot/dts/sun8i-a23.dtsi b/arch/arm/boot/dts/sun8i-a23.dtsi
index 0746cd1024d7..86584fcf5e32 100644
--- a/arch/arm/boot/dts/sun8i-a23.dtsi
+++ b/arch/arm/boot/dts/sun8i-a23.dtsi
@@ -52,15 +52,6 @@
/ {
interrupt-parent = <&gic>;
- aliases {
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
- serial4 = &uart4;
- serial5 = &r_uart;
- };
-
cpus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/sun9i-a80-optimus.dts b/arch/arm/boot/dts/sun9i-a80-optimus.dts
index 506948f582ee..11ec71072e81 100644
--- a/arch/arm/boot/dts/sun9i-a80-optimus.dts
+++ b/arch/arm/boot/dts/sun9i-a80-optimus.dts
@@ -54,6 +54,11 @@
model = "Merrii A80 Optimus Board";
compatible = "merrii,a80-optimus", "allwinner,sun9i-a80";
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart4;
+ };
+
chosen {
bootargs = "earlyprintk console=ttyS0,115200";
};
diff --git a/arch/arm/boot/dts/sun9i-a80.dtsi b/arch/arm/boot/dts/sun9i-a80.dtsi
index 494714f67b57..9ef4438206a9 100644
--- a/arch/arm/boot/dts/sun9i-a80.dtsi
+++ b/arch/arm/boot/dts/sun9i-a80.dtsi
@@ -52,16 +52,6 @@
/ {
interrupt-parent = <&gic>;
- aliases {
- serial0 = &uart0;
- serial1 = &uart1;
- serial2 = &uart2;
- serial3 = &uart3;
- serial4 = &uart4;
- serial5 = &uart5;
- serial6 = &r_uart;
- };
-
cpus {
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index ea282c7c0ca5..e2fed2712249 100644
--- a/arch/arm/boot/dts/tegra20-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -406,7 +406,7 @@
clock-frequency = <400000>;
magnetometer@c {
- compatible = "ak,ak8975";
+ compatible = "asahi-kasei,ak8975";
reg = <0xc>;
interrupt-parent = <&gpio>;
interrupts = <TEGRA_GPIO(N, 5) IRQ_TYPE_LEVEL_HIGH>;
diff --git a/arch/arm/boot/dts/vf610-twr.dts b/arch/arm/boot/dts/vf610-twr.dts
index a0f762159cb2..f2b64b1b00fa 100644
--- a/arch/arm/boot/dts/vf610-twr.dts
+++ b/arch/arm/boot/dts/vf610-twr.dts
@@ -129,13 +129,28 @@
&fec0 {
phy-mode = "rmii";
+ phy-handle = <&ethphy0>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fec0>;
status = "okay";
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ethphy0: ethernet-phy@0 {
+ reg = <0>;
+ };
+
+ ethphy1: ethernet-phy@1 {
+ reg = <1>;
+ };
+ };
};
&fec1 {
phy-mode = "rmii";
+ phy-handle = <&ethphy1>;
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_fec1>;
status = "okay";
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 5ef14de00a29..3d0c5d65c741 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -84,7 +84,8 @@ CONFIG_DEBUG_GPIO=y
CONFIG_POWER_SUPPLY=y
CONFIG_BATTERY_SBS=y
CONFIG_CHARGER_TPS65090=y
-# CONFIG_HWMON is not set
+CONFIG_HWMON=y
+CONFIG_SENSORS_LM90=y
CONFIG_THERMAL=y
CONFIG_EXYNOS_THERMAL=y
CONFIG_EXYNOS_THERMAL_CORE=y
@@ -109,11 +110,26 @@ CONFIG_REGULATOR_S2MPA01=y
CONFIG_REGULATOR_S2MPS11=y
CONFIG_REGULATOR_S5M8767=y
CONFIG_REGULATOR_TPS65090=y
+CONFIG_DRM=y
+CONFIG_DRM_BRIDGE=y
+CONFIG_DRM_PTN3460=y
+CONFIG_DRM_PS8622=y
+CONFIG_DRM_EXYNOS=y
+CONFIG_DRM_EXYNOS_FIMD=y
+CONFIG_DRM_EXYNOS_DP=y
+CONFIG_DRM_PANEL=y
+CONFIG_DRM_PANEL_SIMPLE=y
CONFIG_FB=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_SIMPLE=y
CONFIG_EXYNOS_VIDEO=y
CONFIG_EXYNOS_MIPI_DSI=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
+CONFIG_LCD_CLASS_DEVICE=y
+CONFIG_LCD_PLATFORM=y
+CONFIG_BACKLIGHT_CLASS_DEVICE=y
+CONFIG_BACKLIGHT_GENERIC=y
+CONFIG_BACKLIGHT_PWM=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FONTS=y
CONFIG_FONT_7x14=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 2328fe752e9c..bc393b7e5ece 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -338,6 +338,7 @@ CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_MVEBU=y
CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_EXYNOS=y
CONFIG_USB_EHCI_TEGRA=y
CONFIG_USB_EHCI_HCD_STI=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index c2c3a852af9f..667d9d52aa01 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -68,7 +68,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
-CONFIG_GENERIC_CPUFREQ_CPU0=y
+CONFIG_CPUFREQ_DT=y
# CONFIG_ARM_OMAP2PLUS_CPUFREQ is not set
CONFIG_CPU_IDLE=y
CONFIG_BINFMT_MISC=y
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 66ce17655bb9..7b0152321b20 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -38,6 +38,16 @@ static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
vcpu->arch.hcr = HCR_GUEST_MASK;
}
+static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.hcr;
+}
+
+static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
+{
+ vcpu->arch.hcr = hcr;
+}
+
static inline bool vcpu_mode_is_32bit(struct kvm_vcpu *vcpu)
{
return 1;
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index 254e0650e48b..04b4ea0b550a 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -125,9 +125,6 @@ struct kvm_vcpu_arch {
* Anything that is not used directly from assembly code goes
* here.
*/
- /* dcache set/way operation pending */
- int last_pcpu;
- cpumask_t require_dcache_flush;
/* Don't run the guest on this vcpu */
bool pause;
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index 63e0ecc04901..1bca8f8af442 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -44,6 +44,7 @@
#ifndef __ASSEMBLY__
+#include <linux/highmem.h>
#include <asm/cacheflush.h>
#include <asm/pgalloc.h>
@@ -161,13 +162,10 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
}
-static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
- unsigned long size,
- bool ipa_uncached)
+static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+ unsigned long size,
+ bool ipa_uncached)
{
- if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
- kvm_flush_dcache_to_poc((void *)hva, size);
-
/*
* If we are going to insert an instruction page and the icache is
* either VIPT or PIPT, there is a potential problem where the host
@@ -179,18 +177,77 @@ static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
*
* VIVT caches are tagged using both the ASID and the VMID and doesn't
* need any kind of flushing (DDI 0406C.b - Page B3-1392).
+ *
+ * We need to do this through a kernel mapping (using the
+ * user-space mapping has proved to be the wrong
+ * solution). For that, we need to kmap one page at a time,
+ * and iterate over the range.
*/
- if (icache_is_pipt()) {
- __cpuc_coherent_user_range(hva, hva + size);
- } else if (!icache_is_vivt_asid_tagged()) {
+
+ bool need_flush = !vcpu_has_cache_enabled(vcpu) || ipa_uncached;
+
+ VM_BUG_ON(size & PAGE_MASK);
+
+ if (!need_flush && !icache_is_pipt())
+ goto vipt_cache;
+
+ while (size) {
+ void *va = kmap_atomic_pfn(pfn);
+
+ if (need_flush)
+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+ if (icache_is_pipt())
+ __cpuc_coherent_user_range((unsigned long)va,
+ (unsigned long)va + PAGE_SIZE);
+
+ size -= PAGE_SIZE;
+ pfn++;
+
+ kunmap_atomic(va);
+ }
+
+vipt_cache:
+ if (!icache_is_pipt() && !icache_is_vivt_asid_tagged()) {
/* any kind of VIPT cache */
__flush_icache_all();
}
}
+static inline void __kvm_flush_dcache_pte(pte_t pte)
+{
+ void *va = kmap_atomic(pte_page(pte));
+
+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+ kunmap_atomic(va);
+}
+
+static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
+{
+ unsigned long size = PMD_SIZE;
+ pfn_t pfn = pmd_pfn(pmd);
+
+ while (size) {
+ void *va = kmap_atomic_pfn(pfn);
+
+ kvm_flush_dcache_to_poc(va, PAGE_SIZE);
+
+ pfn++;
+ size -= PAGE_SIZE;
+
+ kunmap_atomic(va);
+ }
+}
+
+static inline void __kvm_flush_dcache_pud(pud_t pud)
+{
+}
+
#define kvm_virt_to_phys(x) virt_to_idmap((unsigned long)(x))
-void stage2_flush_vm(struct kvm *kvm);
+void kvm_set_way_flush(struct kvm_vcpu *vcpu);
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
#endif /* !__ASSEMBLY__ */
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 705bb7620673..0c3f5a0dafd3 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -413,6 +413,7 @@
#define __NR_getrandom (__NR_SYSCALL_BASE+384)
#define __NR_memfd_create (__NR_SYSCALL_BASE+385)
#define __NR_bpf (__NR_SYSCALL_BASE+386)
+#define __NR_execveat (__NR_SYSCALL_BASE+387)
/*
* The following SWIs are ARM private.
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index e51833f8cc38..05745eb838c5 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -396,6 +396,7 @@
CALL(sys_getrandom)
/* 385 */ CALL(sys_memfd_create)
CALL(sys_bpf)
+ CALL(sys_execveat)
#ifndef syscalls_counted
.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
#define syscalls_counted
diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
index 4176df721bf0..1a0045abead7 100644
--- a/arch/arm/kernel/entry-header.S
+++ b/arch/arm/kernel/entry-header.S
@@ -253,21 +253,22 @@
.endm
.macro restore_user_regs, fast = 0, offset = 0
- ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
- ldr lr, [sp, #\offset + S_PC]! @ get pc
+ mov r2, sp
+ ldr r1, [r2, #\offset + S_PSR] @ get calling cpsr
+ ldr lr, [r2, #\offset + S_PC]! @ get pc
msr spsr_cxsf, r1 @ save in spsr_svc
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K)
@ We must avoid clrex due to Cortex-A15 erratum #830321
- strex r1, r2, [sp] @ clear the exclusive monitor
+ strex r1, r2, [r2] @ clear the exclusive monitor
#endif
.if \fast
- ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
+ ldmdb r2, {r1 - lr}^ @ get calling r1 - lr
.else
- ldmdb sp, {r0 - lr}^ @ get calling r0 - lr
+ ldmdb r2, {r0 - lr}^ @ get calling r0 - lr
.endif
mov r0, r0 @ ARMv5T and earlier require a nop
@ after ldm {}^
- add sp, sp, #S_FRAME_SIZE - S_PC
+ add sp, sp, #\offset + S_FRAME_SIZE
movs pc, lr @ return & move spsr_svc into cpsr
.endm
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index f7c65adaa428..557e128e4df0 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -116,8 +116,14 @@ int armpmu_event_set_period(struct perf_event *event)
ret = 1;
}
- if (left > (s64)armpmu->max_period)
- left = armpmu->max_period;
+ /*
+ * Limit the maximum period to prevent the counter value
+ * from overtaking the one we are about to program. In
+ * effect we are reducing max_period to account for
+ * interrupt latency (and we are being very conservative).
+ */
+ if (left > (armpmu->max_period >> 1))
+ left = armpmu->max_period >> 1;
local64_set(&hwc->prev_count, (u64)-left);
diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c
index 6e4379c67cbc..592dda3f21ff 100644
--- a/arch/arm/kernel/perf_regs.c
+++ b/arch/arm/kernel/perf_regs.c
@@ -28,3 +28,11 @@ u64 perf_reg_abi(struct task_struct *task)
{
return PERF_SAMPLE_REGS_ABI_32;
}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index f9c863911038..e55408e96559 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -657,10 +657,13 @@ int __init arm_add_memory(u64 start, u64 size)
/*
* Ensure that start/size are aligned to a page boundary.
- * Size is appropriately rounded down, start is rounded up.
+ * Size is rounded down, start is rounded up.
*/
- size -= start & ~PAGE_MASK;
aligned_start = PAGE_ALIGN(start);
+ if (aligned_start > start + size)
+ size = 0;
+ else
+ size -= aligned_start - start;
#ifndef CONFIG_ARCH_PHYS_ADDR_T_64BIT
if (aligned_start > ULONG_MAX) {
@@ -1046,6 +1049,15 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "model name\t: %s rev %d (%s)\n",
cpu_name, cpuid & 15, elf_platform);
+#if defined(CONFIG_SMP)
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+ per_cpu(cpu_data, i).loops_per_jiffy / (500000UL/HZ),
+ (per_cpu(cpu_data, i).loops_per_jiffy / (5000UL/HZ)) % 100);
+#else
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+ loops_per_jiffy / (500000/HZ),
+ (loops_per_jiffy / (5000/HZ)) % 100);
+#endif
/* dump out the processor features */
seq_puts(m, "Features\t: ");
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 5e6052e18850..86ef244c5a24 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -387,6 +387,18 @@ asmlinkage void secondary_start_kernel(void)
void __init smp_cpus_done(unsigned int max_cpus)
{
+ int cpu;
+ unsigned long bogosum = 0;
+
+ for_each_online_cpu(cpu)
+ bogosum += per_cpu(cpu_data, cpu).loops_per_jiffy;
+
+ printk(KERN_INFO "SMP: Total of %d processors activated "
+ "(%lu.%02lu BogoMIPS).\n",
+ num_online_cpus(),
+ bogosum / (500000/HZ),
+ (bogosum / (5000/HZ)) % 100);
+
hyp_mode_check();
}
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 2d6d91001062..0b0d58a905c4 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -281,15 +281,6 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
vcpu->cpu = cpu;
vcpu->arch.host_cpu_context = this_cpu_ptr(kvm_host_cpu_state);
- /*
- * Check whether this vcpu requires the cache to be flushed on
- * this physical CPU. This is a consequence of doing dcache
- * operations by set/way on this vcpu. We do it here to be in
- * a non-preemptible section.
- */
- if (cpumask_test_and_clear_cpu(cpu, &vcpu->arch.require_dcache_flush))
- flush_cache_all(); /* We'd really want v7_flush_dcache_all() */
-
kvm_arm_set_running_vcpu(vcpu);
}
@@ -541,7 +532,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
ret = kvm_call_hyp(__kvm_vcpu_run, vcpu);
vcpu->mode = OUTSIDE_GUEST_MODE;
- vcpu->arch.last_pcpu = smp_processor_id();
kvm_guest_exit();
trace_kvm_exit(*vcpu_pc(vcpu));
/*
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index 7928dbdf2102..f3d88dc388bc 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -189,82 +189,40 @@ static bool access_l2ectlr(struct kvm_vcpu *vcpu,
return true;
}
-/* See note at ARM ARM B1.14.4 */
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ */
static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
- unsigned long val;
- int cpu;
-
if (!p->is_write)
return read_from_write_only(vcpu, p);
- cpu = get_cpu();
-
- cpumask_setall(&vcpu->arch.require_dcache_flush);
- cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
-
- /* If we were already preempted, take the long way around */
- if (cpu != vcpu->arch.last_pcpu) {
- flush_cache_all();
- goto done;
- }
-
- val = *vcpu_reg(vcpu, p->Rt1);
-
- switch (p->CRm) {
- case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
- case 14: /* DCCISW */
- asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val));
- break;
-
- case 10: /* DCCSW */
- asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val));
- break;
- }
-
-done:
- put_cpu();
-
+ kvm_set_way_flush(vcpu);
return true;
}
/*
* Generic accessor for VM registers. Only called as long as HCR_TVM
- * is set.
+ * is set. If the guest enables the MMU, we stop trapping the VM
+ * sys_regs and leave it in complete control of the caches.
+ *
+ * Used by the cpu-specific code.
*/
-static bool access_vm_reg(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
+bool access_vm_reg(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r)
{
+ bool was_enabled = vcpu_has_cache_enabled(vcpu);
+
BUG_ON(!p->is_write);
vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
if (p->is_64bit)
vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
- return true;
-}
-
-/*
- * SCTLR accessor. Only called as long as HCR_TVM is set. If the
- * guest enables the MMU, we stop trapping the VM sys_regs and leave
- * it in complete control of the caches.
- *
- * Used by the cpu-specific code.
- */
-bool access_sctlr(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r)
-{
- access_vm_reg(vcpu, p, r);
-
- if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
- vcpu->arch.hcr &= ~HCR_TVM;
- stage2_flush_vm(vcpu->kvm);
- }
-
+ kvm_toggle_cache(vcpu, was_enabled);
return true;
}
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index 1a44bbe39643..88d24a3a9778 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -153,8 +153,8 @@ static inline int cmp_reg(const struct coproc_reg *i1,
#define is64 .is_64 = true
#define is32 .is_64 = false
-bool access_sctlr(struct kvm_vcpu *vcpu,
- const struct coproc_params *p,
- const struct coproc_reg *r);
+bool access_vm_reg(struct kvm_vcpu *vcpu,
+ const struct coproc_params *p,
+ const struct coproc_reg *r);
#endif /* __ARM_KVM_COPROC_LOCAL_H__ */
diff --git a/arch/arm/kvm/coproc_a15.c b/arch/arm/kvm/coproc_a15.c
index e6f4ae48bda9..a7136757d373 100644
--- a/arch/arm/kvm/coproc_a15.c
+++ b/arch/arm/kvm/coproc_a15.c
@@ -34,7 +34,7 @@
static const struct coproc_reg a15_regs[] = {
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
- access_sctlr, reset_val, c1_SCTLR, 0x00C50078 },
+ access_vm_reg, reset_val, c1_SCTLR, 0x00C50078 },
};
static struct kvm_coproc_target_table a15_target_table = {
diff --git a/arch/arm/kvm/coproc_a7.c b/arch/arm/kvm/coproc_a7.c
index 17fc7cd479d3..b19e46d1b2c0 100644
--- a/arch/arm/kvm/coproc_a7.c
+++ b/arch/arm/kvm/coproc_a7.c
@@ -37,7 +37,7 @@
static const struct coproc_reg a7_regs[] = {
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
- access_sctlr, reset_val, c1_SCTLR, 0x00C50878 },
+ access_vm_reg, reset_val, c1_SCTLR, 0x00C50878 },
};
static struct kvm_coproc_target_table a7_target_table = {
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 1dc9778a00af..136662547ca6 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -58,6 +58,26 @@ static void kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, kvm, ipa);
}
+/*
+ * D-Cache management functions. They take the page table entries by
+ * value, as they are flushing the cache using the kernel mapping (or
+ * kmap on 32bit).
+ */
+static void kvm_flush_dcache_pte(pte_t pte)
+{
+ __kvm_flush_dcache_pte(pte);
+}
+
+static void kvm_flush_dcache_pmd(pmd_t pmd)
+{
+ __kvm_flush_dcache_pmd(pmd);
+}
+
+static void kvm_flush_dcache_pud(pud_t pud)
+{
+ __kvm_flush_dcache_pud(pud);
+}
+
static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
int min, int max)
{
@@ -119,6 +139,26 @@ static void clear_pmd_entry(struct kvm *kvm, pmd_t *pmd, phys_addr_t addr)
put_page(virt_to_page(pmd));
}
+/*
+ * Unmapping vs dcache management:
+ *
+ * If a guest maps certain memory pages as uncached, all writes will
+ * bypass the data cache and go directly to RAM. However, the CPUs
+ * can still speculate reads (not writes) and fill cache lines with
+ * data.
+ *
+ * Those cache lines will be *clean* cache lines though, so a
+ * clean+invalidate operation is equivalent to an invalidate
+ * operation, because no cache lines are marked dirty.
+ *
+ * Those clean cache lines could be filled prior to an uncached write
+ * by the guest, and the cache coherent IO subsystem would therefore
+ * end up writing old data to disk.
+ *
+ * This is why right after unmapping a page/section and invalidating
+ * the corresponding TLBs, we call kvm_flush_dcache_p*() to make sure
+ * the IO subsystem will never hit in the cache.
+ */
static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
phys_addr_t addr, phys_addr_t end)
{
@@ -128,9 +168,16 @@ static void unmap_ptes(struct kvm *kvm, pmd_t *pmd,
start_pte = pte = pte_offset_kernel(pmd, addr);
do {
if (!pte_none(*pte)) {
+ pte_t old_pte = *pte;
+
kvm_set_pte(pte, __pte(0));
- put_page(virt_to_page(pte));
kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+ /* No need to invalidate the cache for device mappings */
+ if ((pte_val(old_pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+ kvm_flush_dcache_pte(old_pte);
+
+ put_page(virt_to_page(pte));
}
} while (pte++, addr += PAGE_SIZE, addr != end);
@@ -149,8 +196,13 @@ static void unmap_pmds(struct kvm *kvm, pud_t *pud,
next = kvm_pmd_addr_end(addr, end);
if (!pmd_none(*pmd)) {
if (kvm_pmd_huge(*pmd)) {
+ pmd_t old_pmd = *pmd;
+
pmd_clear(pmd);
kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+ kvm_flush_dcache_pmd(old_pmd);
+
put_page(virt_to_page(pmd));
} else {
unmap_ptes(kvm, pmd, addr, next);
@@ -173,8 +225,13 @@ static void unmap_puds(struct kvm *kvm, pgd_t *pgd,
next = kvm_pud_addr_end(addr, end);
if (!pud_none(*pud)) {
if (pud_huge(*pud)) {
+ pud_t old_pud = *pud;
+
pud_clear(pud);
kvm_tlb_flush_vmid_ipa(kvm, addr);
+
+ kvm_flush_dcache_pud(old_pud);
+
put_page(virt_to_page(pud));
} else {
unmap_pmds(kvm, pud, addr, next);
@@ -209,10 +266,9 @@ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
pte = pte_offset_kernel(pmd, addr);
do {
- if (!pte_none(*pte)) {
- hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
- kvm_flush_dcache_to_poc((void*)hva, PAGE_SIZE);
- }
+ if (!pte_none(*pte) &&
+ (pte_val(*pte) & PAGE_S2_DEVICE) != PAGE_S2_DEVICE)
+ kvm_flush_dcache_pte(*pte);
} while (pte++, addr += PAGE_SIZE, addr != end);
}
@@ -226,12 +282,10 @@ static void stage2_flush_pmds(struct kvm *kvm, pud_t *pud,
do {
next = kvm_pmd_addr_end(addr, end);
if (!pmd_none(*pmd)) {
- if (kvm_pmd_huge(*pmd)) {
- hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
- kvm_flush_dcache_to_poc((void*)hva, PMD_SIZE);
- } else {
+ if (kvm_pmd_huge(*pmd))
+ kvm_flush_dcache_pmd(*pmd);
+ else
stage2_flush_ptes(kvm, pmd, addr, next);
- }
}
} while (pmd++, addr = next, addr != end);
}
@@ -246,12 +300,10 @@ static void stage2_flush_puds(struct kvm *kvm, pgd_t *pgd,
do {
next = kvm_pud_addr_end(addr, end);
if (!pud_none(*pud)) {
- if (pud_huge(*pud)) {
- hva_t hva = gfn_to_hva(kvm, addr >> PAGE_SHIFT);
- kvm_flush_dcache_to_poc((void*)hva, PUD_SIZE);
- } else {
+ if (pud_huge(*pud))
+ kvm_flush_dcache_pud(*pud);
+ else
stage2_flush_pmds(kvm, pud, addr, next);
- }
}
} while (pud++, addr = next, addr != end);
}
@@ -278,7 +330,7 @@ static void stage2_flush_memslot(struct kvm *kvm,
* Go through the stage 2 page tables and invalidate any cache lines
* backing memory already mapped to the VM.
*/
-void stage2_flush_vm(struct kvm *kvm)
+static void stage2_flush_vm(struct kvm *kvm)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *memslot;
@@ -905,6 +957,12 @@ static bool kvm_is_device_pfn(unsigned long pfn)
return !pfn_valid(pfn);
}
+static void coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+ unsigned long size, bool uncached)
+{
+ __coherent_cache_guest_page(vcpu, pfn, size, uncached);
+}
+
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
struct kvm_memory_slot *memslot, unsigned long hva,
unsigned long fault_status)
@@ -994,8 +1052,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_set_s2pmd_writable(&new_pmd);
kvm_set_pfn_dirty(pfn);
}
- coherent_cache_guest_page(vcpu, hva & PMD_MASK, PMD_SIZE,
- fault_ipa_uncached);
+ coherent_cache_guest_page(vcpu, pfn, PMD_SIZE, fault_ipa_uncached);
ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd);
} else {
pte_t new_pte = pfn_pte(pfn, mem_type);
@@ -1003,8 +1060,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
kvm_set_s2pte_writable(&new_pte);
kvm_set_pfn_dirty(pfn);
}
- coherent_cache_guest_page(vcpu, hva, PAGE_SIZE,
- fault_ipa_uncached);
+ coherent_cache_guest_page(vcpu, pfn, PAGE_SIZE, fault_ipa_uncached);
ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte,
pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE));
}
@@ -1411,3 +1467,71 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
unmap_stage2_range(kvm, gpa, size);
spin_unlock(&kvm->mmu_lock);
}
+
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ *
+ * Main problems:
+ * - S/W ops are local to a CPU (not broadcast)
+ * - We have line migration behind our back (speculation)
+ * - System caches don't support S/W at all (damn!)
+ *
+ * In the face of the above, the best we can do is to try and convert
+ * S/W ops to VA ops. Because the guest is not allowed to infer the
+ * S/W to PA mapping, it can only use S/W to nuke the whole cache,
+ * which is a rather good thing for us.
+ *
+ * Also, it is only used when turning caches on/off ("The expected
+ * usage of the cache maintenance instructions that operate by set/way
+ * is associated with the cache maintenance instructions associated
+ * with the powerdown and powerup of caches, if this is required by
+ * the implementation.").
+ *
+ * We use the following policy:
+ *
+ * - If we trap a S/W operation, we enable VM trapping to detect
+ * caches being turned on/off, and do a full clean.
+ *
+ * - We flush the caches on both caches being turned on and off.
+ *
+ * - Once the caches are enabled, we stop trapping VM ops.
+ */
+void kvm_set_way_flush(struct kvm_vcpu *vcpu)
+{
+ unsigned long hcr = vcpu_get_hcr(vcpu);
+
+ /*
+ * If this is the first time we do a S/W operation
+ * (i.e. HCR_TVM not set) flush the whole memory, and set the
+ * VM trapping.
+ *
+ * Otherwise, rely on the VM trapping to wait for the MMU +
+ * Caches to be turned off. At that point, we'll be able to
+ * clean the caches again.
+ */
+ if (!(hcr & HCR_TVM)) {
+ trace_kvm_set_way_flush(*vcpu_pc(vcpu),
+ vcpu_has_cache_enabled(vcpu));
+ stage2_flush_vm(vcpu->kvm);
+ vcpu_set_hcr(vcpu, hcr | HCR_TVM);
+ }
+}
+
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled)
+{
+ bool now_enabled = vcpu_has_cache_enabled(vcpu);
+
+ /*
+ * If switching the MMU+caches on, need to invalidate the caches.
+ * If switching it off, need to clean the caches.
+ * Clean + invalidate does the trick always.
+ */
+ if (now_enabled != was_enabled)
+ stage2_flush_vm(vcpu->kvm);
+
+ /* Caches are now on, stop trapping VM ops (until a S/W op) */
+ if (now_enabled)
+ vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) & ~HCR_TVM);
+
+ trace_kvm_toggle_cache(*vcpu_pc(vcpu), was_enabled, now_enabled);
+}
diff --git a/arch/arm/kvm/trace.h b/arch/arm/kvm/trace.h
index b1d640f78623..b6a6e7102201 100644
--- a/arch/arm/kvm/trace.h
+++ b/arch/arm/kvm/trace.h
@@ -223,6 +223,45 @@ TRACE_EVENT(kvm_hvc,
__entry->vcpu_pc, __entry->r0, __entry->imm)
);
+TRACE_EVENT(kvm_set_way_flush,
+ TP_PROTO(unsigned long vcpu_pc, bool cache),
+ TP_ARGS(vcpu_pc, cache),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_pc )
+ __field( bool, cache )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ __entry->cache = cache;
+ ),
+
+ TP_printk("S/W flush at 0x%016lx (cache %s)",
+ __entry->vcpu_pc, __entry->cache ? "on" : "off")
+);
+
+TRACE_EVENT(kvm_toggle_cache,
+ TP_PROTO(unsigned long vcpu_pc, bool was, bool now),
+ TP_ARGS(vcpu_pc, was, now),
+
+ TP_STRUCT__entry(
+ __field( unsigned long, vcpu_pc )
+ __field( bool, was )
+ __field( bool, now )
+ ),
+
+ TP_fast_assign(
+ __entry->vcpu_pc = vcpu_pc;
+ __entry->was = was;
+ __entry->now = now;
+ ),
+
+ TP_printk("VM op at 0x%016lx (cache was %s, now %s)",
+ __entry->vcpu_pc, __entry->was ? "on" : "off",
+ __entry->now ? "on" : "off")
+);
+
#endif /* _TRACE_KVM_H */
#undef TRACE_INCLUDE_PATH
diff --git a/arch/arm/mach-at91/board-dt-sama5.c b/arch/arm/mach-at91/board-dt-sama5.c
index 8fb9ef5333f1..97f7367d32b8 100644
--- a/arch/arm/mach-at91/board-dt-sama5.c
+++ b/arch/arm/mach-at91/board-dt-sama5.c
@@ -17,6 +17,7 @@
#include <linux/of_platform.h>
#include <linux/phy.h>
#include <linux/clk-provider.h>
+#include <linux/phy.h>
#include <asm/setup.h>
#include <asm/irq.h>
@@ -26,8 +27,25 @@
#include "generic.h"
+static int ksz8081_phy_fixup(struct phy_device *phy)
+{
+ int value;
+
+ value = phy_read(phy, 0x16);
+ value &= ~0x20;
+ phy_write(phy, 0x16, value);
+
+ return 0;
+}
+
static void __init sama5_dt_device_init(void)
{
+ if (of_machine_is_compatible("atmel,sama5d4ek") &&
+ IS_ENABLED(CONFIG_PHYLIB)) {
+ phy_register_fixup_for_id("fc028000.etherne:00",
+ ksz8081_phy_fixup);
+ }
+
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 5951660d1bd2..2daef619d053 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -144,7 +144,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
post_div_table[1].div = 1;
post_div_table[2].div = 1;
video_div_table[1].div = 1;
- video_div_table[2].div = 1;
+ video_div_table[3].div = 1;
}
clk[IMX6QDL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
diff --git a/arch/arm/mach-imx/clk-imx6sx.c b/arch/arm/mach-imx/clk-imx6sx.c
index 17354a11356f..5a3e5a159e70 100644
--- a/arch/arm/mach-imx/clk-imx6sx.c
+++ b/arch/arm/mach-imx/clk-imx6sx.c
@@ -558,6 +558,9 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node)
clk_set_parent(clks[IMX6SX_CLK_GPU_CORE_SEL], clks[IMX6SX_CLK_PLL3_PFD0]);
clk_set_parent(clks[IMX6SX_CLK_GPU_AXI_SEL], clks[IMX6SX_CLK_PLL3_PFD0]);
+ clk_set_parent(clks[IMX6SX_CLK_QSPI1_SEL], clks[IMX6SX_CLK_PLL2_BUS]);
+ clk_set_parent(clks[IMX6SX_CLK_QSPI2_SEL], clks[IMX6SX_CLK_PLL2_BUS]);
+
/* Set initial power mode */
imx6q_set_lpm(WAIT_CLOCKED);
}
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 3585cb394e9b..ccef8806bb58 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -190,6 +190,13 @@ static void __init armada_375_380_coherency_init(struct device_node *np)
arch_ioremap_caller = armada_pcie_wa_ioremap_caller;
/*
+ * We should switch the PL310 to I/O coherency mode only if
+ * I/O coherency is actually enabled.
+ */
+ if (!coherency_available())
+ return;
+
+ /*
* Add the PL310 property "arm,io-coherent". This makes sure the
* outer sync operation is not used, which allows to
* workaround the system erratum that causes deadlocks when
@@ -246,9 +253,14 @@ static int coherency_type(void)
return type;
}
+/*
+ * As a precaution, we currently completely disable hardware I/O
+ * coherency, until enough testing is done with automatic I/O
+ * synchronization barriers to validate that it is a proper solution.
+ */
int coherency_available(void)
{
- return coherency_type() != COHERENCY_FABRIC_TYPE_NONE;
+ return false;
}
int __init coherency_init(void)
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 608079a1aba6..b61c049f92d6 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -77,6 +77,24 @@ MACHINE_END
#endif
#ifdef CONFIG_ARCH_OMAP3
+/* Some boards need board name for legacy userspace in /proc/cpuinfo */
+static const char *const n900_boards_compat[] __initconst = {
+ "nokia,omap3-n900",
+ NULL,
+};
+
+DT_MACHINE_START(OMAP3_N900_DT, "Nokia RX-51 board")
+ .reserve = omap_reserve,
+ .map_io = omap3_map_io,
+ .init_early = omap3430_init_early,
+ .init_machine = omap_generic_init,
+ .init_late = omap3_init_late,
+ .init_time = omap3_sync32k_timer_init,
+ .dt_compat = n900_boards_compat,
+ .restart = omap3xxx_restart,
+MACHINE_END
+
+/* Generic omap3 boards, most boards can use these */
static const char *const omap3_boards_compat[] __initconst = {
"ti,omap3430",
"ti,omap3",
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index 377eea849e7b..64e44d6d07c0 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -211,6 +211,7 @@ extern struct device *omap2_get_iva_device(void);
extern struct device *omap2_get_l3_device(void);
extern struct device *omap4_get_dsp_device(void);
+unsigned int omap4_xlate_irq(unsigned int hwirq);
void omap_gic_of_init(void);
#ifdef CONFIG_CACHE_L2X0
@@ -249,6 +250,7 @@ extern void omap4_cpu_die(unsigned int cpu);
extern struct smp_operations omap4_smp_ops;
extern void omap5_secondary_startup(void);
+extern void omap5_secondary_hyp_startup(void);
#endif
#if defined(CONFIG_SMP) && defined(CONFIG_PM)
diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h
index a3c013345c45..a80ac2d70bb1 100644
--- a/arch/arm/mach-omap2/control.h
+++ b/arch/arm/mach-omap2/control.h
@@ -286,6 +286,10 @@
#define OMAP5XXX_CONTROL_STATUS 0x134
#define OMAP5_DEVICETYPE_MASK (0x7 << 6)
+/* DRA7XX CONTROL CORE BOOTSTRAP */
+#define DRA7_CTRL_CORE_BOOTSTRAP 0x6c4
+#define DRA7_SPEEDSELECT_MASK (0x3 << 8)
+
/*
* REVISIT: This list of registers is not comprehensive - there are more
* that should be added.
diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S
index 4993d4bfe9b2..6d1dffca6c7b 100644
--- a/arch/arm/mach-omap2/omap-headsmp.S
+++ b/arch/arm/mach-omap2/omap-headsmp.S
@@ -22,6 +22,7 @@
/* Physical address needed since MMU not enabled yet on secondary core */
#define AUX_CORE_BOOT0_PA 0x48281800
+#define API_HYP_ENTRY 0x102
/*
* OMAP5 specific entry point for secondary CPU to jump from ROM
@@ -41,6 +42,26 @@ wait: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
b secondary_startup
ENDPROC(omap5_secondary_startup)
/*
+ * Same as omap5_secondary_startup except we call into the ROM to
+ * enable HYP mode first. This is called instead of
+ * omap5_secondary_startup if the primary CPU was put into HYP mode by
+ * the boot loader.
+ */
+ENTRY(omap5_secondary_hyp_startup)
+wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0
+ ldr r0, [r2]
+ mov r0, r0, lsr #5
+ mrc p15, 0, r4, c0, c0, 5
+ and r4, r4, #0x0f
+ cmp r0, r4
+ bne wait_2
+ ldr r12, =API_HYP_ENTRY
+ adr r0, hyp_boot
+ smc #0
+hyp_boot:
+ b secondary_startup
+ENDPROC(omap5_secondary_hyp_startup)
+/*
* OMAP4 specific entry point for secondary CPU to jump from ROM
* code. This routine also provides a holding flag into which
* secondary core is held until we're ready for it to initialise.
diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c
index 256e84ef0f67..5305ec7341ec 100644
--- a/arch/arm/mach-omap2/omap-smp.c
+++ b/arch/arm/mach-omap2/omap-smp.c
@@ -22,6 +22,7 @@
#include <linux/irqchip/arm-gic.h>
#include <asm/smp_scu.h>
+#include <asm/virt.h>
#include "omap-secure.h"
#include "omap-wakeupgen.h"
@@ -227,8 +228,16 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus)
if (omap_secure_apis_support())
omap_auxcoreboot_addr(virt_to_phys(startup_addr));
else
- writel_relaxed(virt_to_phys(omap5_secondary_startup),
- base + OMAP_AUX_CORE_BOOT_1);
+ /*
+ * If the boot CPU is in HYP mode then start secondary
+ * CPU in HYP mode as well.
+ */
+ if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE)
+ writel_relaxed(virt_to_phys(omap5_secondary_hyp_startup),
+ base + OMAP_AUX_CORE_BOOT_1);
+ else
+ writel_relaxed(virt_to_phys(omap5_secondary_startup),
+ base + OMAP_AUX_CORE_BOOT_1);
}
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index b7cb44abe49b..cc30e49a4cc2 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -256,6 +256,38 @@ static int __init omap4_sar_ram_init(void)
}
omap_early_initcall(omap4_sar_ram_init);
+static struct of_device_id gic_match[] = {
+ { .compatible = "arm,cortex-a9-gic", },
+ { .compatible = "arm,cortex-a15-gic", },
+ { },
+};
+
+static struct device_node *gic_node;
+
+unsigned int omap4_xlate_irq(unsigned int hwirq)
+{
+ struct of_phandle_args irq_data;
+ unsigned int irq;
+
+ if (!gic_node)
+ gic_node = of_find_matching_node(NULL, gic_match);
+
+ if (WARN_ON(!gic_node))
+ return hwirq;
+
+ irq_data.np = gic_node;
+ irq_data.args_count = 3;
+ irq_data.args[0] = 0;
+ irq_data.args[1] = hwirq - OMAP44XX_IRQ_GIC_START;
+ irq_data.args[2] = IRQ_TYPE_LEVEL_HIGH;
+
+ irq = irq_create_of_mapping(&irq_data);
+ if (WARN_ON(!irq))
+ irq = hwirq;
+
+ return irq;
+}
+
void __init omap_gic_of_init(void)
{
struct device_node *np;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index cbb908dc5cf0..9025ffffd2dc 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -3534,9 +3534,15 @@ int omap_hwmod_fill_resources(struct omap_hwmod *oh, struct resource *res)
mpu_irqs_cnt = _count_mpu_irqs(oh);
for (i = 0; i < mpu_irqs_cnt; i++) {
+ unsigned int irq;
+
+ if (oh->xlate_irq)
+ irq = oh->xlate_irq((oh->mpu_irqs + i)->irq);
+ else
+ irq = (oh->mpu_irqs + i)->irq;
(res + r)->name = (oh->mpu_irqs + i)->name;
- (res + r)->start = (oh->mpu_irqs + i)->irq;
- (res + r)->end = (oh->mpu_irqs + i)->irq;
+ (res + r)->start = irq;
+ (res + r)->end = irq;
(res + r)->flags = IORESOURCE_IRQ;
r++;
}
diff --git a/arch/arm/mach-omap2/omap_hwmod.h b/arch/arm/mach-omap2/omap_hwmod.h
index 35ca6efbec31..5b42fafcaf55 100644
--- a/arch/arm/mach-omap2/omap_hwmod.h
+++ b/arch/arm/mach-omap2/omap_hwmod.h
@@ -676,6 +676,7 @@ struct omap_hwmod {
spinlock_t _lock;
struct list_head node;
struct omap_hwmod_ocp_if *_mpu_port;
+ unsigned int (*xlate_irq)(unsigned int);
u16 flags;
u8 mpu_rt_idx;
u8 response_lat;
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index c314b3c31117..f5e68a782025 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -479,6 +479,7 @@ static struct omap_hwmod omap44xx_dma_system_hwmod = {
.class = &omap44xx_dma_hwmod_class,
.clkdm_name = "l3_dma_clkdm",
.mpu_irqs = omap44xx_dma_system_irqs,
+ .xlate_irq = omap4_xlate_irq,
.main_clk = "l3_div_ck",
.prcm = {
.omap4 = {
@@ -640,6 +641,7 @@ static struct omap_hwmod omap44xx_dss_dispc_hwmod = {
.class = &omap44xx_dispc_hwmod_class,
.clkdm_name = "l3_dss_clkdm",
.mpu_irqs = omap44xx_dss_dispc_irqs,
+ .xlate_irq = omap4_xlate_irq,
.sdma_reqs = omap44xx_dss_dispc_sdma_reqs,
.main_clk = "dss_dss_clk",
.prcm = {
@@ -693,6 +695,7 @@ static struct omap_hwmod omap44xx_dss_dsi1_hwmod = {
.class = &omap44xx_dsi_hwmod_class,
.clkdm_name = "l3_dss_clkdm",
.mpu_irqs = omap44xx_dss_dsi1_irqs,
+ .xlate_irq = omap4_xlate_irq,
.sdma_reqs = omap44xx_dss_dsi1_sdma_reqs,
.main_clk = "dss_dss_clk",
.prcm = {
@@ -726,6 +729,7 @@ static struct omap_hwmod omap44xx_dss_dsi2_hwmod = {
.class = &omap44xx_dsi_hwmod_class,
.clkdm_name = "l3_dss_clkdm",
.mpu_irqs = omap44xx_dss_dsi2_irqs,
+ .xlate_irq = omap4_xlate_irq,
.sdma_reqs = omap44xx_dss_dsi2_sdma_reqs,
.main_clk = "dss_dss_clk",
.prcm = {
@@ -784,6 +788,7 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
*/
.flags = HWMOD_SWSUP_SIDLE,
.mpu_irqs = omap44xx_dss_hdmi_irqs,
+ .xlate_irq = omap4_xlate_irq,
.sdma_reqs = omap44xx_dss_hdmi_sdma_reqs,
.main_clk = "dss_48mhz_clk",
.prcm = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index 3e9523084b2a..7c3fac035e93 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -288,6 +288,7 @@ static struct omap_hwmod omap54xx_dma_system_hwmod = {
.class = &omap54xx_dma_hwmod_class,
.clkdm_name = "dma_clkdm",
.mpu_irqs = omap54xx_dma_system_irqs,
+ .xlate_irq = omap4_xlate_irq,
.main_clk = "l3_iclk_div",
.prcm = {
.omap4 = {
diff --git a/arch/arm/mach-omap2/prcm-common.h b/arch/arm/mach-omap2/prcm-common.h
index a8e4b582c527..6163d66102a3 100644
--- a/arch/arm/mach-omap2/prcm-common.h
+++ b/arch/arm/mach-omap2/prcm-common.h
@@ -498,6 +498,7 @@ struct omap_prcm_irq_setup {
u8 nr_irqs;
const struct omap_prcm_irq *irqs;
int irq;
+ unsigned int (*xlate_irq)(unsigned int);
void (*read_pending_irqs)(unsigned long *events);
void (*ocp_barrier)(void);
void (*save_and_clear_irqen)(u32 *saved_mask);
diff --git a/arch/arm/mach-omap2/prm44xx.c b/arch/arm/mach-omap2/prm44xx.c
index cc170fb81ff7..408c64efb807 100644
--- a/arch/arm/mach-omap2/prm44xx.c
+++ b/arch/arm/mach-omap2/prm44xx.c
@@ -49,6 +49,7 @@ static struct omap_prcm_irq_setup omap4_prcm_irq_setup = {
.irqs = omap4_prcm_irqs,
.nr_irqs = ARRAY_SIZE(omap4_prcm_irqs),
.irq = 11 + OMAP44XX_IRQ_GIC_START,
+ .xlate_irq = omap4_xlate_irq,
.read_pending_irqs = &omap44xx_prm_read_pending_irqs,
.ocp_barrier = &omap44xx_prm_ocp_barrier,
.save_and_clear_irqen = &omap44xx_prm_save_and_clear_irqen,
@@ -751,8 +752,10 @@ static int omap44xx_prm_late_init(void)
}
/* Once OMAP4 DT is filled as well */
- if (irq_num >= 0)
+ if (irq_num >= 0) {
omap4_prcm_irq_setup.irq = irq_num;
+ omap4_prcm_irq_setup.xlate_irq = NULL;
+ }
}
omap44xx_prm_enable_io_wakeup();
diff --git a/arch/arm/mach-omap2/prm_common.c b/arch/arm/mach-omap2/prm_common.c
index 779940cb6e56..dea2833ca627 100644
--- a/arch/arm/mach-omap2/prm_common.c
+++ b/arch/arm/mach-omap2/prm_common.c
@@ -187,6 +187,7 @@ int omap_prcm_event_to_irq(const char *name)
*/
void omap_prcm_irq_cleanup(void)
{
+ unsigned int irq;
int i;
if (!prcm_irq_setup) {
@@ -211,7 +212,11 @@ void omap_prcm_irq_cleanup(void)
kfree(prcm_irq_setup->priority_mask);
prcm_irq_setup->priority_mask = NULL;
- irq_set_chained_handler(prcm_irq_setup->irq, NULL);
+ if (prcm_irq_setup->xlate_irq)
+ irq = prcm_irq_setup->xlate_irq(prcm_irq_setup->irq);
+ else
+ irq = prcm_irq_setup->irq;
+ irq_set_chained_handler(irq, NULL);
if (prcm_irq_setup->base_irq > 0)
irq_free_descs(prcm_irq_setup->base_irq,
@@ -259,6 +264,7 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
int offset, i;
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
+ unsigned int irq;
if (!irq_setup)
return -EINVAL;
@@ -298,7 +304,11 @@ int omap_prcm_register_chain_handler(struct omap_prcm_irq_setup *irq_setup)
1 << (offset & 0x1f);
}
- irq_set_chained_handler(irq_setup->irq, omap_prcm_irq_handler);
+ if (irq_setup->xlate_irq)
+ irq = irq_setup->xlate_irq(irq_setup->irq);
+ else
+ irq = irq_setup->irq;
+ irq_set_chained_handler(irq, omap_prcm_irq_handler);
irq_setup->base_irq = irq_alloc_descs(-1, 0, irq_setup->nr_regs * 32,
0);
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 4f61148ec168..7d45c84c69ba 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -54,6 +54,7 @@
#include "soc.h"
#include "common.h"
+#include "control.h"
#include "powerdomain.h"
#include "omap-secure.h"
@@ -496,7 +497,8 @@ static void __init realtime_counter_init(void)
void __iomem *base;
static struct clk *sys_clk;
unsigned long rate;
- unsigned int reg, num, den;
+ unsigned int reg;
+ unsigned long long num, den;
base = ioremap(REALTIME_COUNTER_BASE, SZ_32);
if (!base) {
@@ -511,13 +513,42 @@ static void __init realtime_counter_init(void)
}
rate = clk_get_rate(sys_clk);
+
+ if (soc_is_dra7xx()) {
+ /*
+ * Errata i856 says the 32.768KHz crystal does not start at
+ * power on, so the CPU falls back to an emulated 32KHz clock
+ * based on sysclk / 610 instead. This causes the master counter
+ * frequency to not be 6.144MHz but at sysclk / 610 * 375 / 2
+ * (OR sysclk * 75 / 244)
+ *
+ * This affects at least the DRA7/AM572x 1.0, 1.1 revisions.
+ * Of course any board built without a populated 32.768KHz
+ * crystal would also need this fix even if the CPU is fixed
+ * later.
+ *
+ * Either case can be detected by using the two speedselect bits
+ * If they are not 0, then the 32.768KHz clock driving the
+ * coarse counter that corrects the fine counter every time it
+ * ticks is actually rate/610 rather than 32.768KHz and we
+ * should compensate to avoid the 570ppm (at 20MHz, much worse
+ * at other rates) too fast system time.
+ */
+ reg = omap_ctrl_readl(DRA7_CTRL_CORE_BOOTSTRAP);
+ if (reg & DRA7_SPEEDSELECT_MASK) {
+ num = 75;
+ den = 244;
+ goto sysclk1_based;
+ }
+ }
+
/* Numerator/denumerator values refer TRM Realtime Counter section */
switch (rate) {
- case 1200000:
+ case 12000000:
num = 64;
den = 125;
break;
- case 1300000:
+ case 13000000:
num = 768;
den = 1625;
break;
@@ -529,11 +560,11 @@ static void __init realtime_counter_init(void)
num = 192;
den = 625;
break;
- case 2600000:
+ case 26000000:
num = 384;
den = 1625;
break;
- case 2700000:
+ case 27000000:
num = 256;
den = 1125;
break;
@@ -545,6 +576,7 @@ static void __init realtime_counter_init(void)
break;
}
+sysclk1_based:
/* Program numerator and denumerator registers */
reg = readl_relaxed(base + INCREMENTER_NUMERATOR_OFFSET) &
NUMERATOR_DENUMERATOR_MASK;
@@ -556,7 +588,7 @@ static void __init realtime_counter_init(void)
reg |= den;
writel_relaxed(reg, base + INCREMENTER_DENUMERATOR_RELOAD_OFFSET);
- arch_timer_freq = (rate / den) * num;
+ arch_timer_freq = DIV_ROUND_UP_ULL(rate * num, den);
set_cntfreq();
iounmap(base);
diff --git a/arch/arm/mach-omap2/twl-common.c b/arch/arm/mach-omap2/twl-common.c
index 4457e731f7a4..292eca0e78ed 100644
--- a/arch/arm/mach-omap2/twl-common.c
+++ b/arch/arm/mach-omap2/twl-common.c
@@ -66,19 +66,24 @@ void __init omap_pmic_init(int bus, u32 clkrate,
omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1);
}
+#ifdef CONFIG_ARCH_OMAP4
void __init omap4_pmic_init(const char *pmic_type,
struct twl4030_platform_data *pmic_data,
struct i2c_board_info *devices, int nr_devices)
{
/* PMIC part*/
+ unsigned int irq;
+
omap_mux_init_signal("sys_nirq1", OMAP_PIN_INPUT_PULLUP | OMAP_PIN_OFF_WAKEUPENABLE);
omap_mux_init_signal("fref_clk0_out.sys_drm_msecure", OMAP_PIN_OUTPUT);
- omap_pmic_init(1, 400, pmic_type, 7 + OMAP44XX_IRQ_GIC_START, pmic_data);
+ irq = omap4_xlate_irq(7 + OMAP44XX_IRQ_GIC_START);
+ omap_pmic_init(1, 400, pmic_type, irq, pmic_data);
/* Register additional devices on i2c1 bus if needed */
if (devices)
i2c_register_board_info(1, devices, nr_devices);
}
+#endif
void __init omap_pmic_late_init(void)
{
diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c
index d226b71d21d5..a611f4852582 100644
--- a/arch/arm/mach-rockchip/rockchip.c
+++ b/arch/arm/mach-rockchip/rockchip.c
@@ -19,11 +19,37 @@
#include <linux/init.h>
#include <linux/of_platform.h>
#include <linux/irqchip.h>
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/hardware/cache-l2x0.h>
#include "core.h"
+#define RK3288_GRF_SOC_CON0 0x244
+
+static void __init rockchip_timer_init(void)
+{
+ if (of_machine_is_compatible("rockchip,rk3288")) {
+ struct regmap *grf;
+
+ /*
+ * Disable auto jtag/sdmmc switching that causes issues
+ * with the mmc controllers making them unreliable
+ */
+ grf = syscon_regmap_lookup_by_compatible("rockchip,rk3288-grf");
+ if (!IS_ERR(grf))
+ regmap_write(grf, RK3288_GRF_SOC_CON0, 0x10000000);
+ else
+ pr_err("rockchip: could not get grf syscon\n");
+ }
+
+ of_clk_init(NULL);
+ clocksource_of_init();
+}
+
static void __init rockchip_dt_init(void)
{
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
@@ -42,6 +68,7 @@ static const char * const rockchip_board_dt_compat[] = {
DT_MACHINE_START(ROCKCHIP_DT, "Rockchip Cortex-A9 (Device Tree)")
.l2c_aux_val = 0,
.l2c_aux_mask = ~0,
+ .init_time = rockchip_timer_init,
.dt_compat = rockchip_board_dt_compat,
.init_machine = rockchip_dt_init,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-ape6evm.c b/arch/arm/mach-shmobile/board-ape6evm.c
index 66f67816a844..444f22d370f0 100644
--- a/arch/arm/mach-shmobile/board-ape6evm.c
+++ b/arch/arm/mach-shmobile/board-ape6evm.c
@@ -18,6 +18,8 @@
#include <linux/gpio_keys.h>
#include <linux/input.h>
#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
#include <linux/kernel.h>
#include <linux/mfd/tmio.h>
#include <linux/mmc/host.h>
@@ -273,6 +275,22 @@ static void __init ape6evm_add_standard_devices(void)
sizeof(ape6evm_leds_pdata));
}
+static void __init ape6evm_legacy_init_time(void)
+{
+ /* Do not invoke DT-based timers via clocksource_of_init() */
+}
+
+static void __init ape6evm_legacy_init_irq(void)
+{
+ void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000);
+ void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000);
+
+ gic_init(0, 29, gic_dist_base, gic_cpu_base);
+
+ /* Do not invoke DT-based interrupt code via irqchip_init() */
+}
+
+
static const char *ape6evm_boards_compat_dt[] __initdata = {
"renesas,ape6evm",
NULL,
@@ -280,7 +298,9 @@ static const char *ape6evm_boards_compat_dt[] __initdata = {
DT_MACHINE_START(APE6EVM_DT, "ape6evm")
.init_early = shmobile_init_delay,
+ .init_irq = ape6evm_legacy_init_irq,
.init_machine = ape6evm_add_standard_devices,
.init_late = shmobile_init_late,
.dt_compat = ape6evm_boards_compat_dt,
+ .init_time = ape6evm_legacy_init_time,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index f8197eb6e566..65b128dd4072 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -21,6 +21,8 @@
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/irqchip/arm-gic.h>
#include <linux/kernel.h>
#include <linux/leds.h>
#include <linux/mfd/tmio.h>
@@ -811,6 +813,16 @@ static void __init lager_init(void)
lager_ksz8041_fixup);
}
+static void __init lager_legacy_init_irq(void)
+{
+ void __iomem *gic_dist_base = ioremap_nocache(0xf1001000, 0x1000);
+ void __iomem *gic_cpu_base = ioremap_nocache(0xf1002000, 0x1000);
+
+ gic_init(0, 29, gic_dist_base, gic_cpu_base);
+
+ /* Do not invoke DT-based interrupt code via irqchip_init() */
+}
+
static const char * const lager_boards_compat_dt[] __initconst = {
"renesas,lager",
NULL,
@@ -819,6 +831,7 @@ static const char * const lager_boards_compat_dt[] __initconst = {
DT_MACHINE_START(LAGER_DT, "lager")
.smp = smp_ops(r8a7790_smp_ops),
.init_early = shmobile_init_delay,
+ .init_irq = lager_legacy_init_irq,
.init_time = rcar_gen2_timer_init,
.init_machine = lager_init,
.init_late = shmobile_init_late,
diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c
index 79ad93dfdae4..d191cf419731 100644
--- a/arch/arm/mach-shmobile/setup-r8a7740.c
+++ b/arch/arm/mach-shmobile/setup-r8a7740.c
@@ -800,7 +800,14 @@ void __init r8a7740_init_irq_of(void)
void __iomem *intc_msk_base = ioremap_nocache(0xe6900040, 0x10);
void __iomem *pfc_inta_ctrl = ioremap_nocache(0xe605807c, 0x4);
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+ void __iomem *gic_dist_base = ioremap_nocache(0xc2800000, 0x1000);
+ void __iomem *gic_cpu_base = ioremap_nocache(0xc2000000, 0x1000);
+
+ gic_init(0, 29, gic_dist_base, gic_cpu_base);
+#else
irqchip_init();
+#endif
/* route signals to GIC */
iowrite32(0x0, pfc_inta_ctrl);
diff --git a/arch/arm/mach-shmobile/setup-r8a7778.c b/arch/arm/mach-shmobile/setup-r8a7778.c
index 170bd146ba17..cef8895a9b82 100644
--- a/arch/arm/mach-shmobile/setup-r8a7778.c
+++ b/arch/arm/mach-shmobile/setup-r8a7778.c
@@ -576,11 +576,18 @@ void __init r8a7778_init_irq_extpin(int irlm)
void __init r8a7778_init_irq_dt(void)
{
void __iomem *base = ioremap_nocache(0xfe700000, 0x00100000);
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+ void __iomem *gic_dist_base = ioremap_nocache(0xfe438000, 0x1000);
+ void __iomem *gic_cpu_base = ioremap_nocache(0xfe430000, 0x1000);
+#endif
BUG_ON(!base);
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+ gic_init(0, 29, gic_dist_base, gic_cpu_base);
+#else
irqchip_init();
-
+#endif
/* route all interrupts to ARM */
__raw_writel(0x73ffffff, base + INT2NTSR0);
__raw_writel(0xffffffff, base + INT2NTSR1);
diff --git a/arch/arm/mach-shmobile/setup-r8a7779.c b/arch/arm/mach-shmobile/setup-r8a7779.c
index 6156d172cf31..27dceaf9e688 100644
--- a/arch/arm/mach-shmobile/setup-r8a7779.c
+++ b/arch/arm/mach-shmobile/setup-r8a7779.c
@@ -720,10 +720,17 @@ static int r8a7779_set_wake(struct irq_data *data, unsigned int on)
void __init r8a7779_init_irq_dt(void)
{
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+ void __iomem *gic_dist_base = ioremap_nocache(0xf0001000, 0x1000);
+ void __iomem *gic_cpu_base = ioremap_nocache(0xf0000100, 0x1000);
+#endif
gic_arch_extn.irq_set_wake = r8a7779_set_wake;
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+ gic_init(0, 29, gic_dist_base, gic_cpu_base);
+#else
irqchip_init();
-
+#endif
/* route all interrupts to ARM */
__raw_writel(0xffffffff, INT2NTSR0);
__raw_writel(0x3fffffff, INT2NTSR1);
diff --git a/arch/arm/mach-shmobile/setup-rcar-gen2.c b/arch/arm/mach-shmobile/setup-rcar-gen2.c
index 3dd6edd9bd1d..cc9470dfb1ce 100644
--- a/arch/arm/mach-shmobile/setup-rcar-gen2.c
+++ b/arch/arm/mach-shmobile/setup-rcar-gen2.c
@@ -133,7 +133,9 @@ void __init rcar_gen2_timer_init(void)
#ifdef CONFIG_COMMON_CLK
rcar_gen2_clocks_init(mode);
#endif
+#ifdef CONFIG_ARCH_SHMOBILE_MULTI
clocksource_of_init();
+#endif
}
struct memory_reserve_config {
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index 93ebe3430bfe..fb5e1bb34be8 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -595,6 +595,7 @@ static struct platform_device ipmmu_device = {
static struct renesas_intc_irqpin_config irqpin0_platform_data = {
.irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */
+ .control_parent = true,
};
static struct resource irqpin0_resources[] = {
@@ -656,6 +657,7 @@ static struct platform_device irqpin1_device = {
static struct renesas_intc_irqpin_config irqpin2_platform_data = {
.irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */
+ .control_parent = true,
};
static struct resource irqpin2_resources[] = {
@@ -686,6 +688,7 @@ static struct platform_device irqpin2_device = {
static struct renesas_intc_irqpin_config irqpin3_platform_data = {
.irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */
+ .control_parent = true,
};
static struct resource irqpin3_resources[] = {
diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c
index f1d027aa7a81..0edf2a6d2bbe 100644
--- a/arch/arm/mach-shmobile/timer.c
+++ b/arch/arm/mach-shmobile/timer.c
@@ -70,6 +70,18 @@ void __init shmobile_init_delay(void)
if (!max_freq)
return;
+#ifdef CONFIG_ARCH_SHMOBILE_LEGACY
+ /* Non-multiplatform r8a73a4 SoC cannot use arch timer due
+ * to GIC being initialized from C and arch timer via DT */
+ if (of_machine_is_compatible("renesas,r8a73a4"))
+ has_arch_timer = false;
+
+ /* Non-multiplatform r8a7790 SoC cannot use arch timer due
+ * to GIC being initialized from C and arch timer via DT */
+ if (of_machine_is_compatible("renesas,r8a7790"))
+ has_arch_timer = false;
+#endif
+
if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) {
if (is_a7_a8_a9)
shmobile_setup_delay_hz(max_freq, 1, 3);
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 7864797609b3..a673c7f7e208 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1940,13 +1940,32 @@ void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
}
EXPORT_SYMBOL_GPL(arm_iommu_release_mapping);
+static int __arm_iommu_attach_device(struct device *dev,
+ struct dma_iommu_mapping *mapping)
+{
+ int err;
+
+ err = iommu_attach_device(mapping->domain, dev);
+ if (err)
+ return err;
+
+ kref_get(&mapping->kref);
+ dev->archdata.mapping = mapping;
+
+ pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
+ return 0;
+}
+
/**
* arm_iommu_attach_device
* @dev: valid struct device pointer
* @mapping: io address space mapping structure (returned from
* arm_iommu_create_mapping)
*
- * Attaches specified io address space mapping to the provided device,
+ * Attaches specified io address space mapping to the provided device.
+ * This replaces the dma operations (dma_map_ops pointer) with the
+ * IOMMU aware version.
+ *
* More than one client might be attached to the same io address space
* mapping.
*/
@@ -1955,25 +1974,16 @@ int arm_iommu_attach_device(struct device *dev,
{
int err;
- err = iommu_attach_device(mapping->domain, dev);
+ err = __arm_iommu_attach_device(dev, mapping);
if (err)
return err;
- kref_get(&mapping->kref);
- dev->archdata.mapping = mapping;
-
- pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev));
+ set_dma_ops(dev, &iommu_ops);
return 0;
}
EXPORT_SYMBOL_GPL(arm_iommu_attach_device);
-/**
- * arm_iommu_detach_device
- * @dev: valid struct device pointer
- *
- * Detaches the provided device from a previously attached map.
- */
-void arm_iommu_detach_device(struct device *dev)
+static void __arm_iommu_detach_device(struct device *dev)
{
struct dma_iommu_mapping *mapping;
@@ -1989,6 +1999,19 @@ void arm_iommu_detach_device(struct device *dev)
pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev));
}
+
+/**
+ * arm_iommu_detach_device
+ * @dev: valid struct device pointer
+ *
+ * Detaches the provided device from a previously attached map.
+ * This voids the dma operations (dma_map_ops pointer)
+ */
+void arm_iommu_detach_device(struct device *dev)
+{
+ __arm_iommu_detach_device(dev);
+ set_dma_ops(dev, NULL);
+}
EXPORT_SYMBOL_GPL(arm_iommu_detach_device);
static struct dma_map_ops *arm_get_iommu_dma_map_ops(bool coherent)
@@ -2011,7 +2034,7 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
return false;
}
- if (arm_iommu_attach_device(dev, mapping)) {
+ if (__arm_iommu_attach_device(dev, mapping)) {
pr_warn("Failed to attached device %s to IOMMU_mapping\n",
dev_name(dev));
arm_iommu_release_mapping(mapping);
@@ -2025,7 +2048,7 @@ static void arm_teardown_iommu_dma_ops(struct device *dev)
{
struct dma_iommu_mapping *mapping = dev->archdata.mapping;
- arm_iommu_detach_device(dev);
+ __arm_iommu_detach_device(dev);
arm_iommu_release_mapping(mapping);
}
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index 59424937e52b..9fe8e241335c 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -220,9 +220,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u
static const char units[] = "KMGTPE";
u64 prot = val & pg_level[level].mask;
- if (addr < USER_PGTABLES_CEILING)
- return;
-
if (!st->level) {
st->level = level;
st->current_prot = prot;
@@ -308,15 +305,13 @@ static void walk_pgd(struct seq_file *m)
pgd_t *pgd = swapper_pg_dir;
struct pg_state st;
unsigned long addr;
- unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE;
+ unsigned i;
memset(&st, 0, sizeof(st));
st.seq = m;
st.marker = address_markers;
- pgd += pgdoff;
-
- for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) {
+ for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
addr = i * PGDIR_SIZE;
if (!pgd_none(*pgd)) {
walk_pud(&st, pgd, addr);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 98ad9c79ea0e..2495c8cb47ba 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -658,8 +658,8 @@ static struct section_perm ro_perms[] = {
.start = (unsigned long)_stext,
.end = (unsigned long)__init_begin,
#ifdef CONFIG_ARM_LPAE
- .mask = ~PMD_SECT_RDONLY,
- .prot = PMD_SECT_RDONLY,
+ .mask = ~L_PMD_SECT_RDONLY,
+ .prot = L_PMD_SECT_RDONLY,
#else
.mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
.prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index cda7c40999b6..4e6ef896c619 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1329,8 +1329,8 @@ static void __init kmap_init(void)
static void __init map_lowmem(void)
{
struct memblock_region *reg;
- unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
- unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+ phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
+ phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 1c43cec971b5..066688863920 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -85,6 +85,7 @@ vdso_install:
# We use MRPROPER_FILES and CLEAN_FILES now
archclean:
$(Q)$(MAKE) $(clean)=$(boot)
+ $(Q)$(MAKE) $(clean)=$(boot)/dts
define archhelp
echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)'
diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile
index 3b8d427c3985..c62b0f4d9ef6 100644
--- a/arch/arm64/boot/dts/Makefile
+++ b/arch/arm64/boot/dts/Makefile
@@ -3,6 +3,4 @@ dts-dirs += apm
dts-dirs += arm
dts-dirs += cavium
-always := $(dtb-y)
subdir-y := $(dts-dirs)
-clean-files := *.dtb
diff --git a/arch/arm64/boot/dts/arm/juno.dts b/arch/arm64/boot/dts/arm/juno.dts
index cb3073e4e7a8..d429129ecb3d 100644
--- a/arch/arm64/boot/dts/arm/juno.dts
+++ b/arch/arm64/boot/dts/arm/juno.dts
@@ -22,7 +22,7 @@
};
chosen {
- stdout-path = &soc_uart0;
+ stdout-path = "serial0:115200n8";
};
psci {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index dd301be89ecc..5376d908eabe 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -1,6 +1,7 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+CONFIG_FHANDLE=y
CONFIG_AUDIT=y
CONFIG_NO_HZ_IDLE=y
CONFIG_HIGH_RES_TIMERS=y
@@ -13,14 +14,12 @@ CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
-CONFIG_RESOURCE_COUNTERS=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_MEMCG_KMEM=y
CONFIG_CGROUP_HUGETLB=y
# CONFIG_UTS_NS is not set
# CONFIG_IPC_NS is not set
-# CONFIG_PID_NS is not set
# CONFIG_NET_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
@@ -92,7 +91,6 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_VIRTIO_CONSOLE=y
# CONFIG_HW_RANDOM is not set
-# CONFIG_HMC_DRV is not set
CONFIG_SPI=y
CONFIG_SPI_PL022=y
CONFIG_GPIO_PL061=y
@@ -133,6 +131,8 @@ CONFIG_EXT3_FS=y
CONFIG_EXT4_FS=y
CONFIG_FANOTIFY=y
CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
+CONFIG_QUOTA=y
+CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=y
CONFIG_CUSE=y
CONFIG_VFAT_FS=y
@@ -152,14 +152,15 @@ CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_LOCKUP_DETECTOR=y
# CONFIG_SCHED_DEBUG is not set
+# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set
+CONFIG_KEYS=y
CONFIG_SECURITY=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
CONFIG_CRYPTO_GHASH_ARM64_CE=y
-CONFIG_CRYPTO_AES_ARM64_CE=y
CONFIG_CRYPTO_AES_ARM64_CE_CCM=y
CONFIG_CRYPTO_AES_ARM64_CE_BLK=y
CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y
diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h
index b1fa4e614718..fbe0ca31a99c 100644
--- a/arch/arm64/include/asm/arch_timer.h
+++ b/arch/arm64/include/asm/arch_timer.h
@@ -21,6 +21,7 @@
#include <asm/barrier.h>
+#include <linux/bug.h>
#include <linux/init.h>
#include <linux/types.h>
diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h
index ace70682499b..8e797b2fcc01 100644
--- a/arch/arm64/include/asm/cpu.h
+++ b/arch/arm64/include/asm/cpu.h
@@ -39,6 +39,7 @@ struct cpuinfo_arm64 {
u64 reg_id_aa64pfr0;
u64 reg_id_aa64pfr1;
+ u32 reg_id_dfr0;
u32 reg_id_isar0;
u32 reg_id_isar1;
u32 reg_id_isar2;
@@ -51,6 +52,10 @@ struct cpuinfo_arm64 {
u32 reg_id_mmfr3;
u32 reg_id_pfr0;
u32 reg_id_pfr1;
+
+ u32 reg_mvfr0;
+ u32 reg_mvfr1;
+ u32 reg_mvfr2;
};
DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data);
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index d34189bceff7..9ce3e680ae1c 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -52,13 +52,14 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
dev->archdata.dma_ops = ops;
}
-static inline int set_arch_dma_coherent_ops(struct device *dev)
+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
+ struct iommu_ops *iommu, bool coherent)
{
- dev->archdata.dma_coherent = true;
- set_dma_ops(dev, &coherent_swiotlb_dma_ops);
- return 0;
+ dev->archdata.dma_coherent = coherent;
+ if (coherent)
+ set_dma_ops(dev, &coherent_swiotlb_dma_ops);
}
-#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops
+#define arch_setup_dma_ops arch_setup_dma_ops
/* do not use this function in a driver */
static inline bool is_device_dma_coherent(struct device *dev)
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 8127e45e2637..3cb4c856b10d 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -41,6 +41,18 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+ if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
+ vcpu->arch.hcr_el2 &= ~HCR_RW;
+}
+
+static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.hcr_el2;
+}
+
+static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
+{
+ vcpu->arch.hcr_el2 = hcr;
}
static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 0b7dfdb931df..acd101a9014d 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -116,9 +116,6 @@ struct kvm_vcpu_arch {
* Anything that is not used directly from assembly code goes
* here.
*/
- /* dcache set/way operation pending */
- int last_pcpu;
- cpumask_t require_dcache_flush;
/* Don't run the guest */
bool pause;
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 14a74f136272..adcf49547301 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -243,24 +243,46 @@ static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
return (vcpu_sys_reg(vcpu, SCTLR_EL1) & 0b101) == 0b101;
}
-static inline void coherent_cache_guest_page(struct kvm_vcpu *vcpu, hva_t hva,
- unsigned long size,
- bool ipa_uncached)
+static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu, pfn_t pfn,
+ unsigned long size,
+ bool ipa_uncached)
{
+ void *va = page_address(pfn_to_page(pfn));
+
if (!vcpu_has_cache_enabled(vcpu) || ipa_uncached)
- kvm_flush_dcache_to_poc((void *)hva, size);
+ kvm_flush_dcache_to_poc(va, size);
if (!icache_is_aliasing()) { /* PIPT */
- flush_icache_range(hva, hva + size);
+ flush_icache_range((unsigned long)va,
+ (unsigned long)va + size);
} else if (!icache_is_aivivt()) { /* non ASID-tagged VIVT */
/* any kind of VIPT cache */
__flush_icache_all();
}
}
+static inline void __kvm_flush_dcache_pte(pte_t pte)
+{
+ struct page *page = pte_page(pte);
+ kvm_flush_dcache_to_poc(page_address(page), PAGE_SIZE);
+}
+
+static inline void __kvm_flush_dcache_pmd(pmd_t pmd)
+{
+ struct page *page = pmd_page(pmd);
+ kvm_flush_dcache_to_poc(page_address(page), PMD_SIZE);
+}
+
+static inline void __kvm_flush_dcache_pud(pud_t pud)
+{
+ struct page *page = pud_page(pud);
+ kvm_flush_dcache_to_poc(page_address(page), PUD_SIZE);
+}
+
#define kvm_virt_to_phys(x) __virt_to_phys((unsigned long)(x))
-void stage2_flush_vm(struct kvm *kvm);
+void kvm_set_way_flush(struct kvm_vcpu *vcpu);
+void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
#endif /* __ASSEMBLY__ */
#endif /* __ARM64_KVM_MMU_H__ */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index df22314f57cf..210d632aa5ad 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -298,7 +298,6 @@ void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
#define pfn_pmd(pfn,prot) (__pmd(((phys_addr_t)(pfn) << PAGE_SHIFT) | pgprot_val(prot)))
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
-#define pmd_page(pmd) pfn_to_page(__phys_to_pfn(pmd_val(pmd) & PHYS_MASK))
#define pud_write(pud) pte_write(pud_pte(pud))
#define pud_pfn(pud) (((pud_val(pud) & PUD_MASK) & PHYS_MASK) >> PAGE_SHIFT)
@@ -401,7 +400,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long addr)
return (pmd_t *)pud_page_vaddr(*pud) + pmd_index(addr);
}
-#define pud_page(pud) pmd_page(pud_pmd(pud))
+#define pud_page(pud) pfn_to_page(__phys_to_pfn(pud_val(pud) & PHYS_MASK))
#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 2 */
@@ -437,6 +436,8 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long addr)
return (pud_t *)pgd_page_vaddr(*pgd) + pud_index(addr);
}
+#define pgd_page(pgd) pfn_to_page(__phys_to_pfn(pgd_val(pgd) & PHYS_MASK))
+
#endif /* CONFIG_ARM64_PGTABLE_LEVELS > 3 */
#define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd_val(pgd))
diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h
index 286b1bec547c..f9be30ea1cbd 100644
--- a/arch/arm64/include/asm/processor.h
+++ b/arch/arm64/include/asm/processor.h
@@ -31,6 +31,7 @@
#include <asm/fpsimd.h>
#include <asm/hw_breakpoint.h>
+#include <asm/pgtable-hwdef.h>
#include <asm/ptrace.h>
#include <asm/types.h>
@@ -123,9 +124,6 @@ struct task_struct;
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
-/* Prepare to copy thread state - unlazy all lazy status */
-#define prepare_to_copy(tsk) do { } while (0)
-
unsigned long get_wchan(struct task_struct *p);
#define cpu_relax() barrier()
diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h
index 49c9aefd24a5..23e9432ac112 100644
--- a/arch/arm64/include/asm/unistd.h
+++ b/arch/arm64/include/asm/unistd.h
@@ -44,7 +44,7 @@
#define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2)
#define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5)
-#define __NR_compat_syscalls 386
+#define __NR_compat_syscalls 388
#endif
#define __ARCH_WANT_SYS_CLONE
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index 8893cebcea5b..27224426e0bf 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -795,3 +795,5 @@ __SYSCALL(__NR_getrandom, sys_getrandom)
__SYSCALL(__NR_memfd_create, sys_memfd_create)
#define __NR_bpf 386
__SYSCALL(__NR_bpf, sys_bpf)
+#define __NR_execveat 387
+__SYSCALL(__NR_execveat, compat_sys_execveat)
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 57b641747534..07d435cf2eea 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -147,6 +147,7 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
* If we have AArch32, we care about 32-bit features for compat. These
* registers should be RES0 otherwise.
*/
+ diff |= CHECK(id_dfr0, boot, cur, cpu);
diff |= CHECK(id_isar0, boot, cur, cpu);
diff |= CHECK(id_isar1, boot, cur, cpu);
diff |= CHECK(id_isar2, boot, cur, cpu);
@@ -165,6 +166,10 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
diff |= CHECK(id_pfr0, boot, cur, cpu);
diff |= CHECK(id_pfr1, boot, cur, cpu);
+ diff |= CHECK(mvfr0, boot, cur, cpu);
+ diff |= CHECK(mvfr1, boot, cur, cpu);
+ diff |= CHECK(mvfr2, boot, cur, cpu);
+
/*
* Mismatched CPU features are a recipe for disaster. Don't even
* pretend to support them.
@@ -189,6 +194,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
+ info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
@@ -202,6 +208,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
+ info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
+ info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
+ info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
+
cpuinfo_detect_icache_policy(info);
check_local_cpu_errata();
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 6fac253bc783..2bb4347d0edf 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -326,6 +326,7 @@ void __init efi_idmap_init(void)
/* boot time idmap_pg_dir is incomplete, so fill in missing parts */
efi_setup_idmap();
+ early_memunmap(memmap.map, memmap.map_end - memmap.map);
}
static int __init remap_region(efi_memory_desc_t *md, void **new)
@@ -380,7 +381,6 @@ static int __init arm64_enter_virtual_mode(void)
}
mapsize = memmap.map_end - memmap.map;
- early_memunmap(memmap.map, mapsize);
if (efi_runtime_disabled()) {
pr_info("EFI runtime services will be disabled.\n");
diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c
index fd027b101de5..9b6f71db2709 100644
--- a/arch/arm64/kernel/module.c
+++ b/arch/arm64/kernel/module.c
@@ -25,6 +25,7 @@
#include <linux/mm.h>
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
+#include <asm/alternative.h>
#include <asm/insn.h>
#include <asm/sections.h>
diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c
index 6762ad705587..3f62b35fb6f1 100644
--- a/arch/arm64/kernel/perf_regs.c
+++ b/arch/arm64/kernel/perf_regs.c
@@ -50,3 +50,11 @@ u64 perf_reg_abi(struct task_struct *task)
else
return PERF_SAMPLE_REGS_ABI_64;
}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c
index b80991166754..20fe2932ad0c 100644
--- a/arch/arm64/kernel/setup.c
+++ b/arch/arm64/kernel/setup.c
@@ -402,6 +402,7 @@ void __init setup_arch(char **cmdline_p)
request_standard_resources();
efi_idmap_init();
+ early_ioremap_reset();
unflatten_device_tree();
diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c
index 4f93c67e63de..14944e5b28da 100644
--- a/arch/arm64/kernel/smp_spin_table.c
+++ b/arch/arm64/kernel/smp_spin_table.c
@@ -25,6 +25,7 @@
#include <asm/cacheflush.h>
#include <asm/cpu_ops.h>
#include <asm/cputype.h>
+#include <asm/io.h>
#include <asm/smp_plat.h>
extern void secondary_holding_pen(void);
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index 3771b72b6569..2d6b6065fe7f 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -5,6 +5,7 @@
#include <asm/debug-monitors.h>
#include <asm/pgtable.h>
#include <asm/memory.h>
+#include <asm/mmu_context.h>
#include <asm/smp_plat.h>
#include <asm/suspend.h>
#include <asm/tlbflush.h>
@@ -98,7 +99,18 @@ int __cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
*/
ret = __cpu_suspend_enter(arg, fn);
if (ret == 0) {
- cpu_switch_mm(mm->pgd, mm);
+ /*
+ * We are resuming from reset with TTBR0_EL1 set to the
+ * idmap to enable the MMU; restore the active_mm mappings in
+ * TTBR0_EL1 unless the active_mm == &init_mm, in which case
+ * the thread entered __cpu_suspend with TTBR0_EL1 set to
+ * reserved TTBR0 page tables and should be restored as such.
+ */
+ if (mm == &init_mm)
+ cpu_set_reserved_ttbr0();
+ else
+ cpu_switch_mm(mm->pgd, mm);
+
flush_tlb_all();
/*
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index fbe909fb0a1a..c3ca89c27c6b 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -1014,6 +1014,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa)
* Instead, we invalidate Stage-2 for this IPA, and the
* whole of Stage-1. Weep...
*/
+ lsr x1, x1, #12
tlbi ipas2e1is, x1
/*
* We have to ensure completion of the invalidation at Stage-2,
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index 70a7816535cd..0b4326578985 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
if (!cpu_has_32bit_el1())
return -EINVAL;
cpu_reset = &default_regs_reset32;
- vcpu->arch.hcr_el2 &= ~HCR_RW;
} else {
cpu_reset = &default_regs_reset;
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 3d7c2df89946..f31e8bb2bc5b 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -69,68 +69,31 @@ static u32 get_ccsidr(u32 csselr)
return ccsidr;
}
-static void do_dc_cisw(u32 val)
-{
- asm volatile("dc cisw, %x0" : : "r" (val));
- dsb(ish);
-}
-
-static void do_dc_csw(u32 val)
-{
- asm volatile("dc csw, %x0" : : "r" (val));
- dsb(ish);
-}
-
-/* See note at ARM ARM B1.14.4 */
+/*
+ * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
+ */
static bool access_dcsw(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
- unsigned long val;
- int cpu;
-
if (!p->is_write)
return read_from_write_only(vcpu, p);
- cpu = get_cpu();
-
- cpumask_setall(&vcpu->arch.require_dcache_flush);
- cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush);
-
- /* If we were already preempted, take the long way around */
- if (cpu != vcpu->arch.last_pcpu) {
- flush_cache_all();
- goto done;
- }
-
- val = *vcpu_reg(vcpu, p->Rt);
-
- switch (p->CRm) {
- case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */
- case 14: /* DCCISW */
- do_dc_cisw(val);
- break;
-
- case 10: /* DCCSW */
- do_dc_csw(val);
- break;
- }
-
-done:
- put_cpu();
-
+ kvm_set_way_flush(vcpu);
return true;
}
/*
* Generic accessor for VM registers. Only called as long as HCR_TVM
- * is set.
+ * is set. If the guest enables the MMU, we stop trapping the VM
+ * sys_regs and leave it in complete control of the caches.
*/
static bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct sys_reg_params *p,
const struct sys_reg_desc *r)
{
unsigned long val;
+ bool was_enabled = vcpu_has_cache_enabled(vcpu);
BUG_ON(!p->is_write);
@@ -143,25 +106,7 @@ static bool access_vm_reg(struct kvm_vcpu *vcpu,
vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL;
}
- return true;
-}
-
-/*
- * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the
- * guest enables the MMU, we stop trapping the VM sys_regs and leave
- * it in complete control of the caches.
- */
-static bool access_sctlr(struct kvm_vcpu *vcpu,
- const struct sys_reg_params *p,
- const struct sys_reg_desc *r)
-{
- access_vm_reg(vcpu, p, r);
-
- if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */
- vcpu->arch.hcr_el2 &= ~HCR_TVM;
- stage2_flush_vm(vcpu->kvm);
- }
-
+ kvm_toggle_cache(vcpu, was_enabled);
return true;
}
@@ -377,7 +322,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
NULL, reset_mpidr, MPIDR_EL1 },
/* SCTLR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000),
- access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 },
+ access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
/* CPACR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010),
NULL, reset_val, CPACR_EL1, 0 },
@@ -657,7 +602,7 @@ static const struct sys_reg_desc cp14_64_regs[] = {
* register).
*/
static const struct sys_reg_desc cp15_regs[] = {
- { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR },
+ { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
{ Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
diff --git a/arch/arm64/mm/dump.c b/arch/arm64/mm/dump.c
index cf33f33333cc..d54dc9ac4b70 100644
--- a/arch/arm64/mm/dump.c
+++ b/arch/arm64/mm/dump.c
@@ -15,6 +15,7 @@
*/
#include <linux/debugfs.h>
#include <linux/fs.h>
+#include <linux/io.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index bac492c12fcc..c95464a33f36 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -335,14 +335,8 @@ static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
- if (!keep_initrd) {
- if (start == initrd_start)
- start = round_down(start, PAGE_SIZE);
- if (end == initrd_end)
- end = round_up(end, PAGE_SIZE);
-
+ if (!keep_initrd)
free_reserved_area((void *)start, (void *)end, 0, "initrd");
- }
}
static int __init keepinitrd_setup(char *__unused)
diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c
index 2c9412908024..164efa009e5b 100644
--- a/arch/avr32/kernel/module.c
+++ b/arch/avr32/kernel/module.c
@@ -19,12 +19,10 @@
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
-void module_free(struct module *mod, void *module_region)
+void module_arch_freeing_init(struct module *mod)
{
vfree(mod->arch.syminfo);
mod->arch.syminfo = NULL;
-
- vfree(module_region);
}
static inline int check_rela(Elf32_Rela *rela, struct module *module,
@@ -291,12 +289,3 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
return ret;
}
-
-int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
- struct module *module)
-{
- vfree(module->arch.syminfo);
- module->arch.syminfo = NULL;
-
- return 0;
-}
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index 0eca93327195..d223a8b57c1e 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -142,6 +142,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c
index 6f4bac969bf7..23eada79439c 100644
--- a/arch/blackfin/mach-bf533/boards/stamp.c
+++ b/arch/blackfin/mach-bf533/boards/stamp.c
@@ -7,6 +7,7 @@
*/
#include <linux/device.h>
+#include <linux/delay.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
diff --git a/arch/cris/arch-v32/drivers/sync_serial.c b/arch/cris/arch-v32/drivers/sync_serial.c
index 08a313fc2241..f772068d9e79 100644
--- a/arch/cris/arch-v32/drivers/sync_serial.c
+++ b/arch/cris/arch-v32/drivers/sync_serial.c
@@ -604,7 +604,7 @@ static ssize_t __sync_serial_read(struct file *file,
struct timespec *ts)
{
unsigned long flags;
- int dev = MINOR(file->f_dentry->d_inode->i_rdev);
+ int dev = MINOR(file_inode(file)->i_rdev);
int avail;
struct sync_port *port;
unsigned char *start;
diff --git a/arch/cris/kernel/module.c b/arch/cris/kernel/module.c
index 51123f985eb5..af04cb6b6dc9 100644
--- a/arch/cris/kernel/module.c
+++ b/arch/cris/kernel/module.c
@@ -36,7 +36,7 @@ void *module_alloc(unsigned long size)
}
/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_memfree(void *module_region)
{
kfree(module_region);
}
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index 1790f22e71a2..2686a7aa8ec8 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -176,6 +176,8 @@ retry:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/frv/mb93090-mb00/pci-frv.c b/arch/frv/mb93090-mb00/pci-frv.c
index 67b1d1685759..0635bd6c2af3 100644
--- a/arch/frv/mb93090-mb00/pci-frv.c
+++ b/arch/frv/mb93090-mb00/pci-frv.c
@@ -94,7 +94,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
r = &dev->resource[idx];
if (!r->start)
continue;
- pci_claim_resource(dev, idx);
+ pci_claim_bridge_resource(dev, idx);
}
}
pcibios_allocate_bus_resources(&bus->children);
diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c
index 9a66372fc7c7..ec4917ddf678 100644
--- a/arch/frv/mm/fault.c
+++ b/arch/frv/mm/fault.c
@@ -168,6 +168,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index f3b51b57740a..95c39b95e97e 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -11,7 +11,7 @@
-#define NR_syscalls 318 /* length of syscall table */
+#define NR_syscalls 319 /* length of syscall table */
/*
* The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h
index 4c2240c1b0cb..461079560c78 100644
--- a/arch/ia64/include/uapi/asm/unistd.h
+++ b/arch/ia64/include/uapi/asm/unistd.h
@@ -331,5 +331,6 @@
#define __NR_getrandom 1339
#define __NR_memfd_create 1340
#define __NR_bpf 1341
+#define __NR_execveat 1342
#endif /* _UAPI_ASM_IA64_UNISTD_H */
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index 615ef81def49..e795cb848154 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -893,13 +893,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
}
/* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
+int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
{
return _acpi_map_lsapic(handle, physid, pcpu);
}
-EXPORT_SYMBOL(acpi_map_lsapic);
+EXPORT_SYMBOL(acpi_map_cpu);
-int acpi_unmap_lsapic(int cpu)
+int acpi_unmap_cpu(int cpu)
{
ia64_cpu_to_sapicid[cpu] = -1;
set_cpu_present(cpu, false);
@@ -910,8 +910,7 @@ int acpi_unmap_lsapic(int cpu)
return (0);
}
-
-EXPORT_SYMBOL(acpi_unmap_lsapic);
+EXPORT_SYMBOL(acpi_unmap_cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index f5e96dffc63c..fcf8b8cbca0b 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1779,6 +1779,7 @@ sys_call_table:
data8 sys_getrandom
data8 sys_memfd_create // 1340
data8 sys_bpf
+ data8 sys_execveat
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
index 24603be24c14..29754aae5177 100644
--- a/arch/ia64/kernel/module.c
+++ b/arch/ia64/kernel/module.c
@@ -305,14 +305,12 @@ plt_target (struct plt_entry *plt)
#endif /* !USE_BRL */
void
-module_free (struct module *mod, void *module_region)
+module_arch_freeing_init (struct module *mod)
{
- if (mod && mod->arch.init_unw_table &&
- module_region == mod->module_init) {
+ if (mod->arch.init_unw_table) {
unw_remove_unwind_table(mod->arch.init_unw_table);
mod->arch.init_unw_table = NULL;
}
- vfree(module_region);
}
/* Have we already seen one of these relocations? */
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 7225dad87094..ba5ba7accd0d 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -172,6 +172,8 @@ retry:
*/
if (fault & VM_FAULT_OOM) {
goto out_of_memory;
+ } else if (fault & VM_FAULT_SIGSEGV) {
+ goto bad_area;
} else if (fault & VM_FAULT_SIGBUS) {
signal = SIGBUS;
goto bad_area;
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 291a582777cf..900cc93e5409 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -487,45 +487,39 @@ int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
return 0;
}
-static int is_valid_resource(struct pci_dev *dev, int idx)
+void pcibios_fixup_device_resources(struct pci_dev *dev)
{
- unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
- struct resource *devr = &dev->resource[idx], *busr;
+ int idx;
if (!dev->bus)
- return 0;
-
- pci_bus_for_each_resource(dev->bus, busr, i) {
- if (!busr || ((busr->flags ^ devr->flags) & type_mask))
- continue;
- if ((devr->start) && (devr->start >= busr->start) &&
- (devr->end <= busr->end))
- return 1;
- }
- return 0;
-}
+ return;
-static void pcibios_fixup_resources(struct pci_dev *dev, int start, int limit)
-{
- int i;
+ for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
+ struct resource *r = &dev->resource[idx];
- for (i = start; i < limit; i++) {
- if (!dev->resource[i].flags)
+ if (!r->flags || r->parent || !r->start)
continue;
- if ((is_valid_resource(dev, i)))
- pci_claim_resource(dev, i);
- }
-}
-void pcibios_fixup_device_resources(struct pci_dev *dev)
-{
- pcibios_fixup_resources(dev, 0, PCI_BRIDGE_RESOURCES);
+ pci_claim_resource(dev, idx);
+ }
}
EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
{
- pcibios_fixup_resources(dev, PCI_BRIDGE_RESOURCES, PCI_NUM_RESOURCES);
+ int idx;
+
+ if (!dev->bus)
+ return;
+
+ for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
+ struct resource *r = &dev->resource[idx];
+
+ if (!r->flags || r->parent || !r->start)
+ continue;
+
+ pci_claim_bridge_resource(dev, idx);
+ }
}
/*
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index e9c6a8014bd6..e3d4d4890104 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -200,6 +200,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 75e75d7b1702..244e0dbe45db 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 355
+#define NR_syscalls 356
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 2c1bec9a14b6..61fb6cb9d2ae 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -360,5 +360,6 @@
#define __NR_getrandom 352
#define __NR_memfd_create 353
#define __NR_bpf 354
+#define __NR_execveat 355
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 2ca219e184cd..a0ec4303f2c8 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -375,4 +375,5 @@ ENTRY(sys_call_table)
.long sys_getrandom
.long sys_memfd_create
.long sys_bpf
+ .long sys_execveat /* 355 */
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index 2bd7487440c4..b2f04aee46ec 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -145,6 +145,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto map_err;
else if (fault & VM_FAULT_SIGBUS)
goto bus_err;
BUG();
diff --git a/arch/metag/mm/fault.c b/arch/metag/mm/fault.c
index 332680e5ebf2..2de5dc695a87 100644
--- a/arch/metag/mm/fault.c
+++ b/arch/metag/mm/fault.c
@@ -141,6 +141,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index fa4cf52aa7a6..d46a5ebb7570 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -224,6 +224,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index b30e41c0c033..48528fb81eff 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -1026,6 +1026,8 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
pr, (pr && pr->name) ? pr->name : "nil");
if (pr && !(pr->flags & IORESOURCE_UNSET)) {
+ struct pci_dev *dev = bus->self;
+
if (request_resource(pr, res) == 0)
continue;
/*
@@ -1035,6 +1037,12 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
*/
if (reparent_resources(pr, res) == 0)
continue;
+
+ if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
+ pci_claim_bridge_resource(dev,
+ i + PCI_BRIDGE_RESOURCES) == 0)
+ continue;
+
}
pr_warn("PCI: Cannot allocate resource region ");
pr_cont("%d of PCI bridge %d, will remap\n", i, bus->number);
@@ -1227,7 +1235,10 @@ void pcibios_claim_one_bus(struct pci_bus *bus)
(unsigned long long)r->end,
(unsigned int)r->flags);
- pci_claim_resource(dev, i);
+ if (pci_claim_resource(dev, i) == 0)
+ continue;
+
+ pci_claim_bridge_resource(dev, i);
}
}
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index becc42bb1849..70ab5d664332 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -158,6 +158,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 9fd6834a2172..5d6139390bf8 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -1388,7 +1388,7 @@ out:
void bpf_jit_free(struct bpf_prog *fp)
{
if (fp->jited)
- module_free(NULL, fp->bpf_func);
+ module_memfree(fp->bpf_func);
bpf_prog_unlock_free(fp);
}
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
index 3516cbdf1ee9..0c2cc5d39c8e 100644
--- a/arch/mn10300/mm/fault.c
+++ b/arch/mn10300/mm/fault.c
@@ -262,6 +262,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/mn10300/unit-asb2305/pci-asb2305.c b/arch/mn10300/unit-asb2305/pci-asb2305.c
index febb9cd83177..b5b036f64275 100644
--- a/arch/mn10300/unit-asb2305/pci-asb2305.c
+++ b/arch/mn10300/unit-asb2305/pci-asb2305.c
@@ -106,7 +106,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
if (!r->flags)
continue;
if (!r->start ||
- pci_claim_resource(dev, idx) < 0) {
+ pci_claim_bridge_resource(dev, idx) < 0) {
printk(KERN_ERR "PCI:"
" Cannot allocate resource"
" region %d of bridge %s\n",
diff --git a/arch/mn10300/unit-asb2305/pci.c b/arch/mn10300/unit-asb2305/pci.c
index 6b4339f8c9c2..471ff398090c 100644
--- a/arch/mn10300/unit-asb2305/pci.c
+++ b/arch/mn10300/unit-asb2305/pci.c
@@ -281,42 +281,37 @@ static int __init pci_check_direct(void)
return -ENODEV;
}
-static int is_valid_resource(struct pci_dev *dev, int idx)
+static void pcibios_fixup_device_resources(struct pci_dev *dev)
{
- unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
- struct resource *devr = &dev->resource[idx], *busr;
-
- if (dev->bus) {
- pci_bus_for_each_resource(dev->bus, busr, i) {
- if (!busr || (busr->flags ^ devr->flags) & type_mask)
- continue;
-
- if (devr->start &&
- devr->start >= busr->start &&
- devr->end <= busr->end)
- return 1;
- }
- }
+ int idx;
- return 0;
+ if (!dev->bus)
+ return;
+
+ for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
+ struct resource *r = &dev->resource[idx];
+
+ if (!r->flags || r->parent || !r->start)
+ continue;
+
+ pci_claim_resource(dev, idx);
+ }
}
-static void pcibios_fixup_device_resources(struct pci_dev *dev)
+static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
{
- int limit, i;
+ int idx;
- if (dev->bus->number != 0)
+ if (!dev->bus)
return;
- limit = (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) ?
- PCI_BRIDGE_RESOURCES : PCI_NUM_RESOURCES;
+ for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
+ struct resource *r = &dev->resource[idx];
- for (i = 0; i < limit; i++) {
- if (!dev->resource[i].flags)
+ if (!r->flags || r->parent || !r->start)
continue;
- if (is_valid_resource(dev, i))
- pci_claim_resource(dev, i);
+ pci_claim_bridge_resource(dev, idx);
}
}
@@ -330,7 +325,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
if (bus->self) {
pci_read_bridge_bases(bus);
- pcibios_fixup_device_resources(bus->self);
+ pcibios_fixup_bridge_resources(bus->self);
}
list_for_each_entry(dev, &bus->devices, bus_list)
diff --git a/arch/nios2/kernel/cpuinfo.c b/arch/nios2/kernel/cpuinfo.c
index 51d5bb90d3e5..a223691dff4f 100644
--- a/arch/nios2/kernel/cpuinfo.c
+++ b/arch/nios2/kernel/cpuinfo.c
@@ -72,6 +72,7 @@ void __init setup_cpuinfo(void)
cpuinfo.has_div = fcpu_has(cpu, "altr,has-div");
cpuinfo.has_mul = fcpu_has(cpu, "altr,has-mul");
cpuinfo.has_mulx = fcpu_has(cpu, "altr,has-mulx");
+ cpuinfo.mmu = fcpu_has(cpu, "altr,has-mmu");
if (IS_ENABLED(CONFIG_NIOS2_HW_DIV_SUPPORT) && !cpuinfo.has_div)
err_cpu("DIV");
diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S
index 83bca17d1008..0bdfd13ff98b 100644
--- a/arch/nios2/kernel/entry.S
+++ b/arch/nios2/kernel/entry.S
@@ -365,30 +365,14 @@ ENTRY(ret_from_interrupt)
GET_THREAD_INFO r1
ldw r4, TI_PREEMPT_COUNT(r1)
bne r4, r0, restore_all
-
-need_resched:
ldw r4, TI_FLAGS(r1) /* ? Need resched set */
BTBZ r10, r4, TIF_NEED_RESCHED, restore_all
ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */
andi r10, r4, ESTATUS_EPIE
beq r10, r0, restore_all
- movia r4, PREEMPT_ACTIVE
- stw r4, TI_PREEMPT_COUNT(r1)
- rdctl r10, status /* enable intrs again */
- ori r10, r10 ,STATUS_PIE
- wrctl status, r10
- PUSH r1
- call schedule
- POP r1
- mov r4, r0
- stw r4, TI_PREEMPT_COUNT(r1)
- rdctl r10, status /* disable intrs */
- andi r10, r10, %lo(~STATUS_PIE)
- wrctl status, r10
- br need_resched
-#else
- br restore_all
+ call preempt_schedule_irq
#endif
+ br restore_all
/***********************************************************************
* A few syscall wrappers
diff --git a/arch/nios2/kernel/module.c b/arch/nios2/kernel/module.c
index cc924a38f22a..e2e3f13f98d5 100644
--- a/arch/nios2/kernel/module.c
+++ b/arch/nios2/kernel/module.c
@@ -36,7 +36,7 @@ void *module_alloc(unsigned long size)
}
/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_memfree(void *module_region)
{
kfree(module_region);
}
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c
index f9d27883a714..2d0ea25be171 100644
--- a/arch/nios2/kernel/signal.c
+++ b/arch/nios2/kernel/signal.c
@@ -200,7 +200,7 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
/* Set up to return from userspace; jump to fixed address sigreturn
trampoline on kuser page. */
- regs->ra = (unsigned long) (0x1040);
+ regs->ra = (unsigned long) (0x1044);
/* Set up registers for signal handler */
regs->sp = (unsigned long) frame;
diff --git a/arch/nios2/mm/fault.c b/arch/nios2/mm/fault.c
index 15a0bb5fc06d..34429d5a0ccd 100644
--- a/arch/nios2/mm/fault.c
+++ b/arch/nios2/mm/fault.c
@@ -135,6 +135,8 @@ survive:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/openrisc/mm/fault.c b/arch/openrisc/mm/fault.c
index 0703acf7d327..230ac20ae794 100644
--- a/arch/openrisc/mm/fault.c
+++ b/arch/openrisc/mm/fault.c
@@ -171,6 +171,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/parisc/include/asm/ldcw.h b/arch/parisc/include/asm/ldcw.h
index d2d11b7055ba..8121aa6db2ff 100644
--- a/arch/parisc/include/asm/ldcw.h
+++ b/arch/parisc/include/asm/ldcw.h
@@ -33,11 +33,18 @@
#endif /*!CONFIG_PA20*/
-/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
+/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*.
+ We don't explicitly expose that "*a" may be written as reload
+ fails to find a register in class R1_REGS when "a" needs to be
+ reloaded when generating 64-bit PIC code. Instead, we clobber
+ memory to indicate to the compiler that the assembly code reads
+ or writes to items other than those listed in the input and output
+ operands. This may pessimize the code somewhat but __ldcw is
+ usually used within code blocks surrounded by memory barriors. */
#define __ldcw(a) ({ \
unsigned __ret; \
- __asm__ __volatile__(__LDCW " 0(%2),%0" \
- : "=r" (__ret), "+m" (*(a)) : "r" (a)); \
+ __asm__ __volatile__(__LDCW " 0(%1),%0" \
+ : "=r" (__ret) : "r" (a) : "memory"); \
__ret; \
})
diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
index 50dfafc3f2c1..5822e8e200e6 100644
--- a/arch/parisc/kernel/module.c
+++ b/arch/parisc/kernel/module.c
@@ -298,14 +298,10 @@ static inline unsigned long count_stubs(const Elf_Rela *rela, unsigned long n)
}
#endif
-
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_arch_freeing_init(struct module *mod)
{
kfree(mod->arch.section);
mod->arch.section = NULL;
-
- vfree(module_region);
}
/* Additional bytes needed in front of individual sections */
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 3ca9c1131cfe..e5120e653240 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -256,6 +256,8 @@ good_area:
*/
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto bad_area;
BUG();
diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c
index d3feba5a275f..c154cebc1041 100644
--- a/arch/powerpc/crypto/sha1.c
+++ b/arch/powerpc/crypto/sha1.c
@@ -154,4 +154,5 @@ module_exit(sha1_powerpc_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
+MODULE_ALIAS_CRYPTO("sha1");
MODULE_ALIAS_CRYPTO("sha1-powerpc");
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h
index 19c36cba37c4..a46f5f45570c 100644
--- a/arch/powerpc/include/asm/kexec.h
+++ b/arch/powerpc/include/asm/kexec.h
@@ -86,6 +86,11 @@ extern int overlaps_crashkernel(unsigned long start, unsigned long size);
extern void reserve_crashkernel(void);
extern void machine_kexec_mask_interrupts(void);
+static inline bool kdump_in_progress(void)
+{
+ return crashing_cpu >= 0;
+}
+
#else /* !CONFIG_KEXEC */
static inline void crash_kexec_secondary(struct pt_regs *regs) { }
@@ -106,6 +111,11 @@ static inline int crash_shutdown_unregister(crash_shutdown_t handler)
return 0;
}
+static inline bool kdump_in_progress(void)
+{
+ return false;
+}
+
#endif /* CONFIG_KEXEC */
#endif /* ! __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index ce9577d693be..91062eef582f 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -366,3 +366,4 @@ SYSCALL_SPU(seccomp)
SYSCALL_SPU(getrandom)
SYSCALL_SPU(memfd_create)
SYSCALL_SPU(bpf)
+COMPAT_SYS(execveat)
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index ebc4f165690a..0be6c681cab1 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -23,9 +23,9 @@
#define THREAD_SIZE (1 << THREAD_SHIFT)
#ifdef CONFIG_PPC64
-#define CURRENT_THREAD_INFO(dest, sp) clrrdi dest, sp, THREAD_SHIFT
+#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(clrrdi dest, sp, THREAD_SHIFT)
#else
-#define CURRENT_THREAD_INFO(dest, sp) rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT
+#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT)
#endif
#ifndef __ASSEMBLY__
@@ -71,12 +71,13 @@ struct thread_info {
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
/* how to get the thread information struct from C */
-register unsigned long __current_r1 asm("r1");
static inline struct thread_info *current_thread_info(void)
{
- /* gcc4, at least, is smart enough to turn this into a single
- * rlwinm for ppc32 and clrrdi for ppc64 */
- return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1));
+ unsigned long val;
+
+ asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val));
+
+ return (struct thread_info *)val;
}
#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index e0da021caa00..36b79c31eedd 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define __NR_syscalls 362
+#define __NR_syscalls 363
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index f55351f2e66e..ef5b5b1f3123 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -384,5 +384,6 @@
#define __NR_getrandom 359
#define __NR_memfd_create 360
#define __NR_bpf 361
+#define __NR_execveat 362
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c
index 879b3aacac32..f96d1ec24189 100644
--- a/arch/powerpc/kernel/machine_kexec_64.c
+++ b/arch/powerpc/kernel/machine_kexec_64.c
@@ -330,7 +330,7 @@ void default_machine_kexec(struct kimage *image)
* using debugger IPI.
*/
- if (crashing_cpu == -1)
+ if (!kdump_in_progress())
kexec_prepare_cpus();
pr_debug("kexec: Starting switchover sequence.\n");
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 37d512d35943..2a525c938158 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1184,6 +1184,8 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
pr, (pr && pr->name) ? pr->name : "nil");
if (pr && !(pr->flags & IORESOURCE_UNSET)) {
+ struct pci_dev *dev = bus->self;
+
if (request_resource(pr, res) == 0)
continue;
/*
@@ -1193,6 +1195,11 @@ static void pcibios_allocate_bus_resources(struct pci_bus *bus)
*/
if (reparent_resources(pr, res) == 0)
continue;
+
+ if (dev && i < PCI_BRIDGE_RESOURCE_NUM &&
+ pci_claim_bridge_resource(dev,
+ i + PCI_BRIDGE_RESOURCES) == 0)
+ continue;
}
pr_warning("PCI: Cannot allocate resource region "
"%d of PCI bridge %d, will remap\n", i, bus->number);
@@ -1401,7 +1408,10 @@ void pcibios_claim_one_bus(struct pci_bus *bus)
(unsigned long long)r->end,
(unsigned int)r->flags);
- pci_claim_resource(dev, i);
+ if (pci_claim_resource(dev, i) == 0)
+ continue;
+
+ pci_claim_bridge_resource(dev, i);
}
}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 8ec017cb4446..8b2d2dc8ef10 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -700,6 +700,7 @@ void start_secondary(void *unused)
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
preempt_disable();
+ cpu_callin_map[cpu] = 1;
if (smp_ops->setup_cpu)
smp_ops->setup_cpu(cpu);
@@ -738,14 +739,6 @@ void start_secondary(void *unused)
notify_cpu_starting(cpu);
set_cpu_online(cpu, true);
- /*
- * CPU must be marked active and online before we signal back to the
- * master, because the scheduler needs to see the cpu_online and
- * cpu_active bits set.
- */
- smp_wmb();
- cpu_callin_map[cpu] = 1;
-
local_irq_enable();
cpu_startup_entry(CPUHP_ONLINE);
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 5a236f082c78..1b5305d4bdab 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -76,7 +76,7 @@ int copro_handle_mm_fault(struct mm_struct *mm, unsigned long ea,
if (*flt & VM_FAULT_OOM) {
ret = -ENOMEM;
goto out_unlock;
- } else if (*flt & VM_FAULT_SIGBUS) {
+ } else if (*flt & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
ret = -EFAULT;
goto out_unlock;
}
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index eb79907f34fa..6154b0a2b063 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -437,6 +437,8 @@ good_area:
*/
fault = handle_mm_fault(mm, vma, address, flags);
if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
+ if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
rc = mm_fault_error(regs, address, fault);
if (rc >= MM_FAULT_RETURN)
goto bail;
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 1ca125b9c226..d1916b577f2c 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -699,7 +699,7 @@ out:
void bpf_jit_free(struct bpf_prog *fp)
{
if (fp->jited)
- module_free(NULL, fp->bpf_func);
+ module_memfree(fp->bpf_func);
bpf_prog_unlock_free(fp);
}
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index 54eca8b3b288..0509bca5e830 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -40,7 +40,6 @@ BEGIN_FTR_SECTION; \
b 1f; \
END_FTR_SECTION(0, 1); \
ld r12,opal_tracepoint_refcount@toc(r2); \
- std r12,32(r1); \
cmpdi r12,0; \
bne- LABEL; \
1:
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index b700a329c31d..d2de7d5d7574 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -304,7 +304,7 @@ int pnv_save_sprs_for_winkle(void)
* all cpus at boot. Get these reg values of current cpu and use the
* same accross all cpus.
*/
- uint64_t lpcr_val = mfspr(SPRN_LPCR);
+ uint64_t lpcr_val = mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1;
uint64_t hid0_val = mfspr(SPRN_HID0);
uint64_t hid1_val = mfspr(SPRN_HID1);
uint64_t hid4_val = mfspr(SPRN_HID4);
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 469751d92004..b5682fd6c984 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -43,6 +43,7 @@
#include <asm/trace.h>
#include <asm/firmware.h>
#include <asm/plpar_wrappers.h>
+#include <asm/kexec.h>
#include <asm/fadump.h>
#include "pseries.h"
@@ -267,8 +268,13 @@ static void pSeries_lpar_hptab_clear(void)
* out to the user, but at least this will stop us from
* continuing on further and creating an even more
* difficult to debug situation.
+ *
+ * There is a known problem when kdump'ing, if cpus are offline
+ * the above call will fail. Rather than panicking again, keep
+ * going and hope the kdump kernel is also little endian, which
+ * it usually is.
*/
- if (rc)
+ if (rc && !kdump_in_progress())
panic("Could not enable big endian exceptions");
}
#endif
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 5b150f0c5df9..13c6e200b24e 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -337,6 +337,7 @@ static inline void disable_surveillance(void)
args.token = rtas_token("set-indicator");
if (args.token == RTAS_UNKNOWN_SERVICE)
return;
+ args.token = cpu_to_be32(args.token);
args.nargs = cpu_to_be32(3);
args.nret = cpu_to_be32(1);
args.rets = &args.args[3];
diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c
index 32040ace00ea..afbe07907c10 100644
--- a/arch/s390/hypfs/hypfs_vm.c
+++ b/arch/s390/hypfs/hypfs_vm.c
@@ -231,7 +231,7 @@ failed:
struct dbfs_d2fc_hdr {
u64 len; /* Length of d2fc buffer without header */
u16 version; /* Version of header */
- char tod_ext[16]; /* TOD clock for d2fc */
+ char tod_ext[STORE_CLOCK_EXT_SIZE]; /* TOD clock for d2fc */
u64 count; /* Number of VM guests in d2fc buffer */
char reserved[30];
} __attribute__ ((packed));
diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h
index 37b9091ab8c0..16aa0c779e07 100644
--- a/arch/s390/include/asm/irqflags.h
+++ b/arch/s390/include/asm/irqflags.h
@@ -36,7 +36,7 @@ static inline notrace void __arch_local_irq_ssm(unsigned long flags)
static inline notrace unsigned long arch_local_save_flags(void)
{
- return __arch_local_irq_stosm(0x00);
+ return __arch_local_irq_stnsm(0xff);
}
static inline notrace unsigned long arch_local_irq_save(void)
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 8beee1cceba4..98eb2a579223 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -67,20 +67,22 @@ static inline void local_tick_enable(unsigned long long comp)
set_clock_comparator(S390_lowcore.clock_comparator);
}
-#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
+#define STORE_CLOCK_EXT_SIZE 16 /* stcke writes 16 bytes */
typedef unsigned long long cycles_t;
-static inline void get_tod_clock_ext(char clk[16])
+static inline void get_tod_clock_ext(char *clk)
{
- typedef struct { char _[sizeof(clk)]; } addrtype;
+ typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype;
asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc");
}
static inline unsigned long long get_tod_clock(void)
{
- unsigned char clk[16];
+ unsigned char clk[STORE_CLOCK_EXT_SIZE];
+
get_tod_clock_ext(clk);
return *((unsigned long long *)&clk[1]);
}
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 2b446cf0cc65..67878af257a0 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -289,7 +289,8 @@
#define __NR_bpf 351
#define __NR_s390_pci_mmio_write 352
#define __NR_s390_pci_mmio_read 353
-#define NR_syscalls 354
+#define __NR_execveat 354
+#define NR_syscalls 355
/*
* There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
index b89b59158b95..409d152585be 100644
--- a/arch/s390/kernel/module.c
+++ b/arch/s390/kernel/module.c
@@ -55,14 +55,10 @@ void *module_alloc(unsigned long size)
}
#endif
-/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_arch_freeing_init(struct module *mod)
{
- if (mod) {
- vfree(mod->arch.syminfo);
- mod->arch.syminfo = NULL;
- }
- vfree(module_region);
+ vfree(mod->arch.syminfo);
+ mod->arch.syminfo = NULL;
}
static void check_rela(Elf_Rela *rela, struct module *me)
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index a2987243bc76..939ec474b1dd 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -362,3 +362,4 @@ SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf)
SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
+SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat)
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index f6b3cd056ec2..cc7328080b60 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -48,6 +48,30 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
return false;
}
+static int check_per_event(unsigned short cause, unsigned long control,
+ struct pt_regs *regs)
+{
+ if (!(regs->psw.mask & PSW_MASK_PER))
+ return 0;
+ /* user space single step */
+ if (control == 0)
+ return 1;
+ /* over indication for storage alteration */
+ if ((control & 0x20200000) && (cause & 0x2000))
+ return 1;
+ if (cause & 0x8000) {
+ /* all branches */
+ if ((control & 0x80800000) == 0x80000000)
+ return 1;
+ /* branch into selected range */
+ if (((control & 0x80800000) == 0x80800000) &&
+ regs->psw.addr >= current->thread.per_user.start &&
+ regs->psw.addr <= current->thread.per_user.end)
+ return 1;
+ }
+ return 0;
+}
+
int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{
int fixup = probe_get_fixup_type(auprobe->insn);
@@ -71,9 +95,13 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
if (regs->psw.addr - utask->xol_vaddr == ilen)
regs->psw.addr = utask->vaddr + ilen;
}
- /* If per tracing was active generate trap */
- if (regs->psw.mask & PSW_MASK_PER)
- do_per_trap(regs);
+ if (check_per_event(current->thread.per_event.cause,
+ current->thread.per_user.control, regs)) {
+ /* fix per address */
+ current->thread.per_event.address = utask->vaddr;
+ /* trigger per event */
+ set_pt_regs_flag(regs, PIF_PER_TRAP);
+ }
return 0;
}
@@ -106,6 +134,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
clear_thread_flag(TIF_UPROBE_SINGLESTEP);
regs->int_code = auprobe->saved_int_code;
regs->psw.addr = current->utask->vaddr;
+ current->thread.per_event.address = current->utask->vaddr;
}
unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline,
@@ -146,17 +175,20 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len)
__rc; \
})
-#define emu_store_ril(ptr, input) \
+#define emu_store_ril(regs, ptr, input) \
({ \
unsigned int mask = sizeof(*(ptr)) - 1; \
+ __typeof__(ptr) __ptr = (ptr); \
int __rc = 0; \
\
if (!test_facility(34)) \
__rc = EMU_ILLEGAL_OP; \
- else if ((u64 __force)ptr & mask) \
+ else if ((u64 __force)__ptr & mask) \
__rc = EMU_SPECIFICATION; \
- else if (put_user(*(input), ptr)) \
+ else if (put_user(*(input), __ptr)) \
__rc = EMU_ADDRESSING; \
+ if (__rc == 0) \
+ sim_stor_event(regs, __ptr, mask + 1); \
__rc; \
})
@@ -198,6 +230,25 @@ union split_register {
};
/*
+ * If user per registers are setup to trace storage alterations and an
+ * emulated store took place on a fitting address a user trap is generated.
+ */
+static void sim_stor_event(struct pt_regs *regs, void *addr, int len)
+{
+ if (!(regs->psw.mask & PSW_MASK_PER))
+ return;
+ if (!(current->thread.per_user.control & PER_EVENT_STORE))
+ return;
+ if ((void *)current->thread.per_user.start > (addr + len))
+ return;
+ if ((void *)current->thread.per_user.end < addr)
+ return;
+ current->thread.per_event.address = regs->psw.addr;
+ current->thread.per_event.cause = PER_EVENT_STORE >> 16;
+ set_pt_regs_flag(regs, PIF_PER_TRAP);
+}
+
+/*
* pc relative instructions are emulated, since parameters may not be
* accessible from the xol area due to range limitations.
*/
@@ -249,13 +300,13 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs)
rc = emu_load_ril((u32 __user *)uptr, &rx->u64);
break;
case 0x07: /* sthrl */
- rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]);
+ rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]);
break;
case 0x0b: /* stgrl */
- rc = emu_store_ril((u64 __user *)uptr, &rx->u64);
+ rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64);
break;
case 0x0f: /* strl */
- rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]);
+ rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]);
break;
}
break;
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 7f0089d9a4aa..e34122e539a1 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -128,8 +128,6 @@ void vtime_account_irq_enter(struct task_struct *tsk)
struct thread_info *ti = task_thread_info(tsk);
u64 timer, system;
- WARN_ON_ONCE(!irqs_disabled());
-
timer = S390_lowcore.last_update_timer;
S390_lowcore.last_update_timer = get_vtimer();
S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 811937bb90be..9065d5aa3932 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -374,6 +374,12 @@ static noinline void do_fault_error(struct pt_regs *regs, int fault)
do_no_context(regs);
else
pagefault_out_of_memory();
+ } else if (fault & VM_FAULT_SIGSEGV) {
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs))
+ do_no_context(regs);
+ else
+ do_sigsegv(regs, SEGV_MAPERR);
} else if (fault & VM_FAULT_SIGBUS) {
/* Kernel mode? Handle exceptions or die */
if (!user_mode(regs))
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index be99357d238c..3cf8cc03fff6 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -322,11 +322,12 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
static unsigned long __gmap_segment_gaddr(unsigned long *entry)
{
struct page *page;
- unsigned long offset;
+ unsigned long offset, mask;
offset = (unsigned long) entry / sizeof(unsigned long);
offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
- page = pmd_to_page((pmd_t *) entry);
+ mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
+ page = virt_to_page((void *)((unsigned long) entry & mask));
return page->index + offset;
}
diff --git a/arch/s390/net/bpf_jit.S b/arch/s390/net/bpf_jit.S
index 7e45d13816c1..ba44c9f55346 100644
--- a/arch/s390/net/bpf_jit.S
+++ b/arch/s390/net/bpf_jit.S
@@ -22,8 +22,8 @@
* skb_copy_bits takes 4 parameters:
* %r2 = skb pointer
* %r3 = offset into skb data
- * %r4 = length to copy
- * %r5 = pointer to temp buffer
+ * %r4 = pointer to temp buffer
+ * %r5 = length to copy
*/
#define SKBDATA %r8
@@ -44,8 +44,9 @@ ENTRY(sk_load_word)
sk_load_word_slow:
lgr %r9,%r2 # save %r2
- lhi %r4,4 # 4 bytes
- la %r5,160(%r15) # pointer to temp buffer
+ lgr %r3,%r1 # offset
+ la %r4,160(%r15) # pointer to temp buffer
+ lghi %r5,4 # 4 bytes
brasl %r14,skb_copy_bits # get data from skb
l %r5,160(%r15) # load result from temp buffer
ltgr %r2,%r2 # set cc to (%r2 != 0)
@@ -69,8 +70,9 @@ ENTRY(sk_load_half)
sk_load_half_slow:
lgr %r9,%r2 # save %r2
- lhi %r4,2 # 2 bytes
- la %r5,162(%r15) # pointer to temp buffer
+ lgr %r3,%r1 # offset
+ la %r4,162(%r15) # pointer to temp buffer
+ lghi %r5,2 # 2 bytes
brasl %r14,skb_copy_bits # get data from skb
xc 160(2,%r15),160(%r15)
l %r5,160(%r15) # load result from temp buffer
@@ -95,8 +97,9 @@ ENTRY(sk_load_byte)
sk_load_byte_slow:
lgr %r9,%r2 # save %r2
- lhi %r4,1 # 1 bytes
- la %r5,163(%r15) # pointer to temp buffer
+ lgr %r3,%r1 # offset
+ la %r4,163(%r15) # pointer to temp buffer
+ lghi %r5,1 # 1 byte
brasl %r14,skb_copy_bits # get data from skb
xc 160(3,%r15),160(%r15)
l %r5,160(%r15) # load result from temp buffer
@@ -104,11 +107,11 @@ sk_load_byte_slow:
lgr %r2,%r9 # restore %r2
br %r8
- /* A = (*(u8 *)(skb->data+K) & 0xf) << 2 */
+ /* X = (*(u8 *)(skb->data+K) & 0xf) << 2 */
ENTRY(sk_load_byte_msh)
llgfr %r1,%r3 # extend offset
clr %r11,%r3 # hlen < offset ?
- jle sk_load_byte_slow
+ jle sk_load_byte_msh_slow
lhi %r12,0
ic %r12,0(%r1,%r10) # get byte from skb
nill %r12,0x0f
@@ -118,8 +121,9 @@ ENTRY(sk_load_byte_msh)
sk_load_byte_msh_slow:
lgr %r9,%r2 # save %r2
- lhi %r4,2 # 2 bytes
- la %r5,162(%r15) # pointer to temp buffer
+ lgr %r3,%r1 # offset
+ la %r4,163(%r15) # pointer to temp buffer
+ lghi %r5,1 # 1 byte
brasl %r14,skb_copy_bits # get data from skb
xc 160(3,%r15),160(%r15)
l %r12,160(%r15) # load result from temp buffer
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index c52ac77408ca..bbd1981cc150 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -431,8 +431,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
EMIT4_DISP(0x88500000, K);
break;
case BPF_ALU | BPF_NEG: /* A = -A */
- /* lnr %r5,%r5 */
- EMIT2(0x1155);
+ /* lcr %r5,%r5 */
+ EMIT2(0x1355);
break;
case BPF_JMP | BPF_JA: /* ip += K */
offset = addrs[i + K] + jit->start - jit->prg;
@@ -448,15 +448,12 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter,
mask = 0x800000; /* je */
kbranch: /* Emit compare if the branch targets are different */
if (filter->jt != filter->jf) {
- if (K <= 16383)
- /* chi %r5,<K> */
- EMIT4_IMM(0xa75e0000, K);
- else if (test_facility(21))
+ if (test_facility(21))
/* clfi %r5,<K> */
EMIT6_IMM(0xc25f0000, K);
else
- /* c %r5,<d(K)>(%r13) */
- EMIT4_DISP(0x5950d000, EMIT_CONST(K));
+ /* cl %r5,<d(K)>(%r13) */
+ EMIT4_DISP(0x5550d000, EMIT_CONST(K));
}
branch: if (filter->jt == filter->jf) {
if (filter->jt == 0)
@@ -502,8 +499,8 @@ branch: if (filter->jt == filter->jf) {
xbranch: /* Emit compare if the branch targets are different */
if (filter->jt != filter->jf) {
jit->seen |= SEEN_XREG;
- /* cr %r5,%r12 */
- EMIT2(0x195c);
+ /* clr %r5,%r12 */
+ EMIT2(0x155c);
}
goto branch;
case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */
diff --git a/arch/score/mm/fault.c b/arch/score/mm/fault.c
index 52238983527d..6860beb2a280 100644
--- a/arch/score/mm/fault.c
+++ b/arch/score/mm/fault.c
@@ -114,6 +114,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 541dc6101508..a58fec9b55e0 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -353,6 +353,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
} else {
if (fault & VM_FAULT_SIGBUS)
do_sigbus(regs, error_code, address);
+ else if (fault & VM_FAULT_SIGSEGV)
+ bad_area(regs, error_code, address);
else
BUG();
}
diff --git a/arch/sparc/kernel/pci.c b/arch/sparc/kernel/pci.c
index b36365f49478..9ce5afe167ff 100644
--- a/arch/sparc/kernel/pci.c
+++ b/arch/sparc/kernel/pci.c
@@ -639,7 +639,10 @@ static void pci_claim_bus_resources(struct pci_bus *bus)
(unsigned long long)r->end,
(unsigned int)r->flags);
- pci_claim_resource(dev, i);
+ if (pci_claim_resource(dev, i) == 0)
+ continue;
+
+ pci_claim_bridge_resource(dev, i);
}
}
diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
index 908e8c17c902..70d817154fe8 100644
--- a/arch/sparc/mm/fault_32.c
+++ b/arch/sparc/mm/fault_32.c
@@ -249,6 +249,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
index 18fcd7167095..479823249429 100644
--- a/arch/sparc/mm/fault_64.c
+++ b/arch/sparc/mm/fault_64.c
@@ -446,6 +446,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/sparc/net/bpf_jit_comp.c b/arch/sparc/net/bpf_jit_comp.c
index f33e7c7a3bf7..7931eeeb649a 100644
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -776,7 +776,7 @@ cond_branch: f_offset = addrs[i + filter[i].jf];
if (unlikely(proglen + ilen > oldproglen)) {
pr_err("bpb_jit_compile fatal error\n");
kfree(addrs);
- module_free(NULL, image);
+ module_memfree(image);
return;
}
memcpy(image + proglen, temp, ilen);
@@ -822,7 +822,7 @@ out:
void bpf_jit_free(struct bpf_prog *fp)
{
if (fp->jited)
- module_free(NULL, fp->bpf_func);
+ module_memfree(fp->bpf_func);
bpf_prog_unlock_free(fp);
}
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index 96447c9160a0..2305084c9b93 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -74,7 +74,7 @@ error:
/* Free memory returned from module_alloc */
-void module_free(struct module *mod, void *module_region)
+void module_memfree(void *module_region)
{
vfree(module_region);
@@ -83,7 +83,7 @@ void module_free(struct module *mod, void *module_region)
0, 0, 0, NULL, NULL, 0);
/*
- * FIXME: If module_region == mod->module_init, trim exception
+ * FIXME: Add module_arch_freeing_init to trim exception
* table entries.
*/
}
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 565e25a98334..0f61a73534e6 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -442,6 +442,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common
index 87bc86821bc9..d195a87ca542 100644
--- a/arch/um/Kconfig.common
+++ b/arch/um/Kconfig.common
@@ -3,6 +3,7 @@ config UML
default y
select HAVE_ARCH_AUDITSYSCALL
select HAVE_UID16
+ select HAVE_FUTEX_CMPXCHG if FUTEX
select GENERIC_IRQ_SHOW
select GENERIC_CPU_DEVICES
select GENERIC_IO
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 5678c3571e7c..209617302df8 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -80,6 +80,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM) {
goto out_of_memory;
+ } else if (fault & VM_FAULT_SIGSEGV) {
+ goto out;
} else if (fault & VM_FAULT_SIGBUS) {
err = -EACCES;
goto out;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ba397bde7948..0dc9d0144a27 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -857,7 +857,7 @@ source "kernel/Kconfig.preempt"
config X86_UP_APIC
bool "Local APIC support on uniprocessors"
- depends on X86_32 && !SMP && !X86_32_NON_STANDARD && !PCI_MSI
+ depends on X86_32 && !SMP && !X86_32_NON_STANDARD
---help---
A local APIC (Advanced Programmable Interrupt Controller) is an
integrated interrupt controller in the CPU. If you have a single-CPU
@@ -868,6 +868,10 @@ config X86_UP_APIC
performance counters), and the NMI watchdog which detects hard
lockups.
+config X86_UP_APIC_MSI
+ def_bool y
+ select X86_UP_APIC if X86_32 && !SMP && !X86_32_NON_STANDARD && PCI_MSI
+
config X86_UP_IOAPIC
bool "IO-APIC support on uniprocessors"
depends on X86_UP_APIC
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index 5b016e2498f3..3db07f30636f 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -51,6 +51,7 @@ targets += cpustr.h
$(obj)/cpustr.h: $(obj)/mkcpustr FORCE
$(call if_changed,cpustr)
endif
+clean-files += cpustr.h
# ---------------------------------------------------------------------------
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index d999398928bc..ad754b4411f7 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -90,7 +90,7 @@ suffix-$(CONFIG_KERNEL_LZO) := lzo
suffix-$(CONFIG_KERNEL_LZ4) := lz4
RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
- perl $(srctree)/arch/x86/tools/calc_run_size.pl)
+ $(CONFIG_SHELL) $(srctree)/arch/x86/tools/calc_run_size.sh)
quiet_cmd_mkpiggy = MKPIGGY $@
cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index dcc1c536cc21..a950864a64da 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -373,6 +373,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
unsigned long output_len,
unsigned long run_size)
{
+ unsigned char *output_orig = output;
+
real_mode = rmode;
sanitize_boot_params(real_mode);
@@ -421,7 +423,12 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
debug_putstr("\nDecompressing Linux... ");
decompress(input_data, input_len, NULL, NULL, output, NULL, error);
parse_elf(output);
- handle_relocations(output, output_len);
+ /*
+ * 32-bit always performs relocations. 64-bit relocations are only
+ * needed if kASLR has chosen a different load address.
+ */
+ if (!IS_ENABLED(CONFIG_X86_64) || output != output_orig)
+ handle_relocations(output, output_len);
debug_putstr("done.\nBooting the kernel.\n");
return output;
}
diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile
index fd0f848938cc..5a4a089e8b1f 100644
--- a/arch/x86/crypto/Makefile
+++ b/arch/x86/crypto/Makefile
@@ -26,7 +26,6 @@ obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o
obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o
obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o
-obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o
obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o
obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o
@@ -46,6 +45,7 @@ endif
ifeq ($(avx2_supported),yes)
obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o
obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o
+ obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/
endif
aes-i586-y := aes-i586-asm_32.o aes_glue.o
diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
index 2df2a0298f5a..a916c4a61165 100644
--- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
+++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S
@@ -208,7 +208,7 @@ ddq_add_8:
.if (klen == KEY_128)
.if (load_keys)
- vmovdqa 3*16(p_keys), xkeyA
+ vmovdqa 3*16(p_keys), xkey4
.endif
.else
vmovdqa 3*16(p_keys), xkeyA
@@ -224,7 +224,7 @@ ddq_add_8:
add $(16*by), p_in
.if (klen == KEY_128)
- vmovdqa 4*16(p_keys), xkey4
+ vmovdqa 4*16(p_keys), xkeyB
.else
.if (load_keys)
vmovdqa 4*16(p_keys), xkey4
@@ -234,7 +234,12 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
- vaesenc xkeyA, var_xdata, var_xdata /* key 3 */
+ /* key 3 */
+ .if (klen == KEY_128)
+ vaesenc xkey4, var_xdata, var_xdata
+ .else
+ vaesenc xkeyA, var_xdata, var_xdata
+ .endif
.set i, (i +1)
.endr
@@ -243,13 +248,18 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
- vaesenc xkey4, var_xdata, var_xdata /* key 4 */
+ /* key 4 */
+ .if (klen == KEY_128)
+ vaesenc xkeyB, var_xdata, var_xdata
+ .else
+ vaesenc xkey4, var_xdata, var_xdata
+ .endif
.set i, (i +1)
.endr
.if (klen == KEY_128)
.if (load_keys)
- vmovdqa 6*16(p_keys), xkeyB
+ vmovdqa 6*16(p_keys), xkey8
.endif
.else
vmovdqa 6*16(p_keys), xkeyB
@@ -267,12 +277,17 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
- vaesenc xkeyB, var_xdata, var_xdata /* key 6 */
+ /* key 6 */
+ .if (klen == KEY_128)
+ vaesenc xkey8, var_xdata, var_xdata
+ .else
+ vaesenc xkeyB, var_xdata, var_xdata
+ .endif
.set i, (i +1)
.endr
.if (klen == KEY_128)
- vmovdqa 8*16(p_keys), xkey8
+ vmovdqa 8*16(p_keys), xkeyB
.else
.if (load_keys)
vmovdqa 8*16(p_keys), xkey8
@@ -288,7 +303,7 @@ ddq_add_8:
.if (klen == KEY_128)
.if (load_keys)
- vmovdqa 9*16(p_keys), xkeyA
+ vmovdqa 9*16(p_keys), xkey12
.endif
.else
vmovdqa 9*16(p_keys), xkeyA
@@ -297,7 +312,12 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
- vaesenc xkey8, var_xdata, var_xdata /* key 8 */
+ /* key 8 */
+ .if (klen == KEY_128)
+ vaesenc xkeyB, var_xdata, var_xdata
+ .else
+ vaesenc xkey8, var_xdata, var_xdata
+ .endif
.set i, (i +1)
.endr
@@ -306,7 +326,12 @@ ddq_add_8:
.set i, 0
.rept by
club XDATA, i
- vaesenc xkeyA, var_xdata, var_xdata /* key 9 */
+ /* key 9 */
+ .if (klen == KEY_128)
+ vaesenc xkey12, var_xdata, var_xdata
+ .else
+ vaesenc xkeyA, var_xdata, var_xdata
+ .endif
.set i, (i +1)
.endr
@@ -412,7 +437,6 @@ ddq_add_8:
/* main body of aes ctr load */
.macro do_aes_ctrmain key_len
-
cmp $16, num_bytes
jb .Ldo_return2\key_len
diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c
index a225a5ca1037..fd9f6b035b16 100644
--- a/arch/x86/crypto/sha-mb/sha1_mb.c
+++ b/arch/x86/crypto/sha-mb/sha1_mb.c
@@ -931,4 +931,4 @@ module_exit(sha1_mb_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated");
-MODULE_ALIAS("sha1");
+MODULE_ALIAS_CRYPTO("sha1");
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 0ab4f9fd2687..3a45668f6dc3 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -50,6 +50,7 @@ void acpi_pic_sci_set_trigger(unsigned int, u16);
extern int (*__acpi_register_gsi)(struct device *dev, u32 gsi,
int trigger, int polarity);
+extern void (*__acpi_unregister_gsi)(u32 gsi);
static inline void disable_acpi(void)
{
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index 50d033a8947d..a94b82e8f156 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -251,7 +251,8 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
}
-#define _LDT_empty(info) \
+/* This intentionally ignores lm, since 32-bit apps don't have that field. */
+#define LDT_empty(info) \
((info)->base_addr == 0 && \
(info)->limit == 0 && \
(info)->contents == 0 && \
@@ -261,11 +262,18 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
(info)->seg_not_present == 1 && \
(info)->useable == 0)
-#ifdef CONFIG_X86_64
-#define LDT_empty(info) (_LDT_empty(info) && ((info)->lm == 0))
-#else
-#define LDT_empty(info) (_LDT_empty(info))
-#endif
+/* Lots of programs expect an all-zero user_desc to mean "no segment at all". */
+static inline bool LDT_zero(const struct user_desc *info)
+{
+ return (info->base_addr == 0 &&
+ info->limit == 0 &&
+ info->contents == 0 &&
+ info->read_exec_only == 0 &&
+ info->seg_32bit == 0 &&
+ info->limit_in_pages == 0 &&
+ info->seg_not_present == 0 &&
+ info->useable == 0);
+}
static inline void clear_LDT(void)
{
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 40269a2bf6f9..4b75d591eb5e 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -130,7 +130,25 @@ static inline void arch_bprm_mm_init(struct mm_struct *mm,
static inline void arch_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
- mpx_notify_unmap(mm, vma, start, end);
+ /*
+ * mpx_notify_unmap() goes and reads a rarely-hot
+ * cacheline in the mm_struct. That can be expensive
+ * enough to be seen in profiles.
+ *
+ * The mpx_notify_unmap() call and its contents have been
+ * observed to affect munmap() performance on hardware
+ * where MPX is not present.
+ *
+ * The unlikely() optimizes for the fast case: no MPX
+ * in the CPU, or no MPX use in the process. Even if
+ * we get this wrong (in the unlikely event that MPX
+ * is widely enabled on some system) the overhead of
+ * MPX itself (reading bounds tables) is expected to
+ * overwhelm the overhead of getting this unlikely()
+ * consistently wrong.
+ */
+ if (unlikely(cpu_feature_enabled(X86_FEATURE_MPX)))
+ mpx_notify_unmap(mm, vma, start, end);
}
#endif /* _ASM_X86_MMU_CONTEXT_H */
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index e7e9682a33e9..f556c4843aa1 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -80,9 +80,11 @@ static inline unsigned int __getcpu(void)
/*
* Load per CPU data from GDT. LSL is faster than RDTSCP and
- * works on all CPUs.
+ * works on all CPUs. This is volatile so that it orders
+ * correctly wrt barrier() and to keep gcc from cleverly
+ * hoisting it out of the calling function.
*/
- asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
+ asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
return p;
}
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index 4433a4be8171..b9e30daa0881 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -611,20 +611,20 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
{
- int irq;
-
- if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
- *irqp = gsi;
- } else {
- mutex_lock(&acpi_ioapic_lock);
- irq = mp_map_gsi_to_irq(gsi,
- IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
- mutex_unlock(&acpi_ioapic_lock);
- if (irq < 0)
- return -1;
- *irqp = irq;
+ int rc, irq, trigger, polarity;
+
+ rc = acpi_get_override_irq(gsi, &trigger, &polarity);
+ if (rc == 0) {
+ trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
+ polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
+ irq = acpi_register_gsi(NULL, gsi, trigger, polarity);
+ if (irq >= 0) {
+ *irqp = irq;
+ return 0;
+ }
}
- return 0;
+
+ return -1;
}
EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
@@ -750,13 +750,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
}
/* wrapper to silence section mismatch warning */
-int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu)
+int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu)
{
return _acpi_map_lsapic(handle, physid, pcpu);
}
-EXPORT_SYMBOL(acpi_map_lsapic);
+EXPORT_SYMBOL(acpi_map_cpu);
-int acpi_unmap_lsapic(int cpu)
+int acpi_unmap_cpu(int cpu)
{
#ifdef CONFIG_ACPI_NUMA
set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE);
@@ -768,8 +768,7 @@ int acpi_unmap_lsapic(int cpu)
return (0);
}
-
-EXPORT_SYMBOL(acpi_unmap_lsapic);
+EXPORT_SYMBOL(acpi_unmap_cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index e27b49d7c922..80091ae54c2b 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -66,3 +66,4 @@ targets += capflags.c
$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
$(call if_changed,mkcapflags)
endif
+clean-files += capflags.c
diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh
index e2b22df964cd..36d99a337b49 100644
--- a/arch/x86/kernel/cpu/mkcapflags.sh
+++ b/arch/x86/kernel/cpu/mkcapflags.sh
@@ -28,7 +28,7 @@ function dump_array()
# If the /* comment */ starts with a quote string, grab that.
VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')"
[ -z "$VALUE" ] && VALUE="\"$NAME\""
- [ "$VALUE" == '""' ] && continue
+ [ "$VALUE" = '""' ] && continue
# Name is uppercase, VALUE is all lowercase
VALUE="$(echo "$VALUE" | tr A-Z a-z)"
diff --git a/arch/x86/kernel/cpu/mshyperv.c b/arch/x86/kernel/cpu/mshyperv.c
index a450373e8e91..939155ffdece 100644
--- a/arch/x86/kernel/cpu/mshyperv.c
+++ b/arch/x86/kernel/cpu/mshyperv.c
@@ -107,6 +107,7 @@ static struct clocksource hyperv_cs = {
.rating = 400, /* use this when running on Hyperv*/
.read = read_hv_clock,
.mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static void __init ms_hyperv_init_platform(void)
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 944bf019b74f..498b6d967138 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2431,6 +2431,7 @@ __init int intel_pmu_init(void)
break;
case 55: /* 22nm Atom "Silvermont" */
+ case 76: /* 14nm Atom "Airmont" */
case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 3c895d480cd7..073983398364 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -568,8 +568,8 @@ struct event_constraint intel_atom_pebs_event_constraints[] = {
};
struct event_constraint intel_slm_pebs_event_constraints[] = {
- /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */
- INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf),
+ /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */
+ INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1),
/* Allow all events as PEBS with no flags */
INTEL_ALL_EVENT_CONSTRAINT(0, 0x1),
EVENT_CONSTRAINT_END
diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
index 673f930c700f..c4bb8b8e5017 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c
@@ -103,6 +103,13 @@ static struct kobj_attribute format_attr_##_var = \
#define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */
+#define RAPL_EVENT_ATTR_STR(_name, v, str) \
+static struct perf_pmu_events_attr event_attr_##v = { \
+ .attr = __ATTR(_name, 0444, rapl_sysfs_show, NULL), \
+ .id = 0, \
+ .event_str = str, \
+};
+
struct rapl_pmu {
spinlock_t lock;
int hw_unit; /* 1/2^hw_unit Joule */
@@ -135,7 +142,7 @@ static inline u64 rapl_scale(u64 v)
* or use ldexp(count, -32).
* Watts = Joules/Time delta
*/
- return v << (32 - __this_cpu_read(rapl_pmu->hw_unit));
+ return v << (32 - __this_cpu_read(rapl_pmu)->hw_unit);
}
static u64 rapl_event_update(struct perf_event *event)
@@ -379,23 +386,36 @@ static struct attribute_group rapl_pmu_attr_group = {
.attrs = rapl_pmu_attrs,
};
-EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
-EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
-EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
-EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04");
+static ssize_t rapl_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *page)
+{
+ struct perf_pmu_events_attr *pmu_attr = \
+ container_of(attr, struct perf_pmu_events_attr, attr);
+
+ if (pmu_attr->event_str)
+ return sprintf(page, "%s", pmu_attr->event_str);
+
+ return 0;
+}
+
+RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
+RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02");
+RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03");
+RAPL_EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04");
-EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
-EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules");
-EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules");
-EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules");
+RAPL_EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules");
/*
* we compute in 0.23 nJ increments regardless of MSR
*/
-EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
-EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10");
-EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10");
-EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10");
+RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10");
static struct attribute *rapl_events_srv_attr[] = {
EVENT_PTR(rapl_cores),
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
index 10b8d3eaaf15..c635b8b49e93 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
@@ -840,7 +840,6 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
box->phys_id = phys_id;
box->pci_dev = pdev;
box->pmu = pmu;
- uncore_box_init(box);
pci_set_drvdata(pdev, box);
raw_spin_lock(&uncore_box_lock);
@@ -1004,10 +1003,8 @@ static int uncore_cpu_starting(int cpu)
pmu = &type->pmus[j];
box = *per_cpu_ptr(pmu->box, cpu);
/* called by uncore_cpu_init? */
- if (box && box->phys_id >= 0) {
- uncore_box_init(box);
+ if (box && box->phys_id >= 0)
continue;
- }
for_each_online_cpu(k) {
exist = *per_cpu_ptr(pmu->box, k);
@@ -1023,10 +1020,8 @@ static int uncore_cpu_starting(int cpu)
}
}
- if (box) {
+ if (box)
box->phys_id = phys_id;
- uncore_box_init(box);
- }
}
}
return 0;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
index 18eb78bbdd10..6c8c1e7e69d8 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
@@ -17,7 +17,7 @@
#define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff)
#define UNCORE_PCI_DEV_IDX(data) (data & 0xff)
#define UNCORE_EXTRA_PCI_DEV 0xff
-#define UNCORE_EXTRA_PCI_DEV_MAX 2
+#define UNCORE_EXTRA_PCI_DEV_MAX 3
/* support up to 8 sockets */
#define UNCORE_SOCKET_MAX 8
@@ -257,6 +257,14 @@ static inline int uncore_num_counters(struct intel_uncore_box *box)
return box->pmu->type->num_counters;
}
+static inline void uncore_box_init(struct intel_uncore_box *box)
+{
+ if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
+ if (box->pmu->type->ops->init_box)
+ box->pmu->type->ops->init_box(box);
+ }
+}
+
static inline void uncore_disable_box(struct intel_uncore_box *box)
{
if (box->pmu->type->ops->disable_box)
@@ -265,6 +273,8 @@ static inline void uncore_disable_box(struct intel_uncore_box *box)
static inline void uncore_enable_box(struct intel_uncore_box *box)
{
+ uncore_box_init(box);
+
if (box->pmu->type->ops->enable_box)
box->pmu->type->ops->enable_box(box);
}
@@ -287,14 +297,6 @@ static inline u64 uncore_read_counter(struct intel_uncore_box *box,
return box->pmu->type->ops->read_counter(box, event);
}
-static inline void uncore_box_init(struct intel_uncore_box *box)
-{
- if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
- if (box->pmu->type->ops->init_box)
- box->pmu->type->ops->init_box(box);
- }
-}
-
static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
{
return (box->phys_id < 0);
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
index 745b158e9a65..21af6149edf2 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
@@ -891,6 +891,7 @@ void snbep_uncore_cpu_init(void)
enum {
SNBEP_PCI_QPI_PORT0_FILTER,
SNBEP_PCI_QPI_PORT1_FILTER,
+ HSWEP_PCI_PCU_3,
};
static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event)
@@ -2026,6 +2027,17 @@ void hswep_uncore_cpu_init(void)
{
if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
+
+ /* Detect 6-8 core systems with only two SBOXes */
+ if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) {
+ u32 capid4;
+
+ pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3],
+ 0x94, &capid4);
+ if (((capid4 >> 6) & 0x3) == 0)
+ hswep_uncore_sbox.num_boxes = 2;
+ }
+
uncore_msr_uncores = hswep_msr_uncores;
}
@@ -2287,6 +2299,11 @@ static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = {
.driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
SNBEP_PCI_QPI_PORT1_FILTER),
},
+ { /* PCU.3 (for Capability registers) */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0),
+ .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV,
+ HSWEP_PCI_PCU_3),
+ },
{ /* end: all zeroes */ }
};
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 2142376dc8c6..8b7b0a51e742 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -674,7 +674,7 @@ static inline void *alloc_tramp(unsigned long size)
}
static inline void tramp_free(void *tramp)
{
- module_free(NULL, tramp);
+ module_memfree(tramp);
}
#else
/* Trampolines can only be created if modules are supported */
diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
index 6307a0f0cf17..705ef8d48e2d 100644
--- a/arch/x86/kernel/irq.c
+++ b/arch/x86/kernel/irq.c
@@ -127,7 +127,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
seq_puts(p, " Machine check polls\n");
#endif
#if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
- seq_printf(p, "%*s: ", prec, "THR");
+ seq_printf(p, "%*s: ", prec, "HYP");
for_each_online_cpu(j)
seq_printf(p, "%10u ", irq_stats(j)->irq_hv_callback_count);
seq_puts(p, " Hypervisor callback interrupts\n");
diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
index f7e3cd50ece0..98f654d466e5 100644
--- a/arch/x86/kernel/kprobes/core.c
+++ b/arch/x86/kernel/kprobes/core.c
@@ -1020,6 +1020,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
regs->flags &= ~X86_EFLAGS_IF;
trace_hardirqs_off();
regs->ip = (unsigned long)(jp->entry);
+
+ /*
+ * jprobes use jprobe_return() which skips the normal return
+ * path of the function, and this messes up the accounting of the
+ * function graph tracer to get messed up.
+ *
+ * Pause function graph tracing while performing the jprobe function.
+ */
+ pause_graph_tracing();
return 1;
}
NOKPROBE_SYMBOL(setjmp_pre_handler);
@@ -1048,24 +1057,25 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
u8 *addr = (u8 *) (regs->ip - 1);
struct jprobe *jp = container_of(p, struct jprobe, kp);
+ void *saved_sp = kcb->jprobe_saved_sp;
if ((addr > (u8 *) jprobe_return) &&
(addr < (u8 *) jprobe_return_end)) {
- if (stack_addr(regs) != kcb->jprobe_saved_sp) {
+ if (stack_addr(regs) != saved_sp) {
struct pt_regs *saved_regs = &kcb->jprobe_saved_regs;
printk(KERN_ERR
"current sp %p does not match saved sp %p\n",
- stack_addr(regs), kcb->jprobe_saved_sp);
+ stack_addr(regs), saved_sp);
printk(KERN_ERR "Saved registers for jprobe %p\n", jp);
show_regs(saved_regs);
printk(KERN_ERR "Current registers\n");
show_regs(regs);
BUG();
}
+ /* It's OK to start function graph tracing again */
+ unpause_graph_tracing();
*regs = kcb->jprobe_saved_regs;
- memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp),
- kcb->jprobes_stack,
- MIN_STACK_SIZE(kcb->jprobe_saved_sp));
+ memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp));
preempt_enable_no_resched();
return 1;
}
diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c
index e309cc5c276e..781861cc5ee8 100644
--- a/arch/x86/kernel/perf_regs.c
+++ b/arch/x86/kernel/perf_regs.c
@@ -78,6 +78,14 @@ u64 perf_reg_abi(struct task_struct *task)
{
return PERF_SAMPLE_REGS_ABI_32;
}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
#else /* CONFIG_X86_64 */
#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \
(1ULL << PERF_REG_X86_ES) | \
@@ -102,4 +110,86 @@ u64 perf_reg_abi(struct task_struct *task)
else
return PERF_SAMPLE_REGS_ABI_64;
}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ struct pt_regs *user_regs = task_pt_regs(current);
+
+ /*
+ * If we're in an NMI that interrupted task_pt_regs setup, then
+ * we can't sample user regs at all. This check isn't really
+ * sufficient, though, as we could be in an NMI inside an interrupt
+ * that happened during task_pt_regs setup.
+ */
+ if (regs->sp > (unsigned long)&user_regs->r11 &&
+ regs->sp <= (unsigned long)(user_regs + 1)) {
+ regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
+ regs_user->regs = NULL;
+ return;
+ }
+
+ /*
+ * RIP, flags, and the argument registers are usually saved.
+ * orig_ax is probably okay, too.
+ */
+ regs_user_copy->ip = user_regs->ip;
+ regs_user_copy->cx = user_regs->cx;
+ regs_user_copy->dx = user_regs->dx;
+ regs_user_copy->si = user_regs->si;
+ regs_user_copy->di = user_regs->di;
+ regs_user_copy->r8 = user_regs->r8;
+ regs_user_copy->r9 = user_regs->r9;
+ regs_user_copy->r10 = user_regs->r10;
+ regs_user_copy->r11 = user_regs->r11;
+ regs_user_copy->orig_ax = user_regs->orig_ax;
+ regs_user_copy->flags = user_regs->flags;
+
+ /*
+ * Don't even try to report the "rest" regs.
+ */
+ regs_user_copy->bx = -1;
+ regs_user_copy->bp = -1;
+ regs_user_copy->r12 = -1;
+ regs_user_copy->r13 = -1;
+ regs_user_copy->r14 = -1;
+ regs_user_copy->r15 = -1;
+
+ /*
+ * For this to be at all useful, we need a reasonable guess for
+ * sp and the ABI. Be careful: we're in NMI context, and we're
+ * considering current to be the current task, so we should
+ * be careful not to look at any other percpu variables that might
+ * change during context switches.
+ */
+ if (IS_ENABLED(CONFIG_IA32_EMULATION) &&
+ task_thread_info(current)->status & TS_COMPAT) {
+ /* Easy case: we're in a compat syscall. */
+ regs_user->abi = PERF_SAMPLE_REGS_ABI_32;
+ regs_user_copy->sp = user_regs->sp;
+ regs_user_copy->cs = user_regs->cs;
+ regs_user_copy->ss = user_regs->ss;
+ } else if (user_regs->orig_ax != -1) {
+ /*
+ * We're probably in a 64-bit syscall.
+ * Warning: this code is severely racy. At least it's better
+ * than just blindly copying user_regs.
+ */
+ regs_user->abi = PERF_SAMPLE_REGS_ABI_64;
+ regs_user_copy->sp = this_cpu_read(old_rsp);
+ regs_user_copy->cs = __USER_CS;
+ regs_user_copy->ss = __USER_DS;
+ regs_user_copy->cx = -1; /* usually contains garbage */
+ } else {
+ /* We're probably in an interrupt or exception. */
+ regs_user->abi = user_64bit_mode(user_regs) ?
+ PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32;
+ regs_user_copy->sp = user_regs->sp;
+ regs_user_copy->cs = user_regs->cs;
+ regs_user_copy->ss = user_regs->ss;
+ }
+
+ regs_user->regs = regs_user_copy;
+}
#endif /* CONFIG_X86_32 */
diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
index 4e942f31b1a7..7fc5e843f247 100644
--- a/arch/x86/kernel/tls.c
+++ b/arch/x86/kernel/tls.c
@@ -29,7 +29,28 @@ static int get_free_idx(void)
static bool tls_desc_okay(const struct user_desc *info)
{
- if (LDT_empty(info))
+ /*
+ * For historical reasons (i.e. no one ever documented how any
+ * of the segmentation APIs work), user programs can and do
+ * assume that a struct user_desc that's all zeros except for
+ * entry_number means "no segment at all". This never actually
+ * worked. In fact, up to Linux 3.19, a struct user_desc like
+ * this would create a 16-bit read-write segment with base and
+ * limit both equal to zero.
+ *
+ * That was close enough to "no segment at all" until we
+ * hardened this function to disallow 16-bit TLS segments. Fix
+ * it up by interpreting these zeroed segments the way that they
+ * were almost certainly intended to be interpreted.
+ *
+ * The correct way to ask for "no segment at all" is to specify
+ * a user_desc that satisfies LDT_empty. To keep everything
+ * working, we accept both.
+ *
+ * Note that there's a similar kludge in modify_ldt -- look at
+ * the distinction between modes 1 and 0x11.
+ */
+ if (LDT_empty(info) || LDT_zero(info))
return true;
/*
@@ -71,7 +92,7 @@ static void set_tls_desc(struct task_struct *p, int idx,
cpu = get_cpu();
while (n-- > 0) {
- if (LDT_empty(info))
+ if (LDT_empty(info) || LDT_zero(info))
desc->a = desc->b = 0;
else
fill_ldt(desc, info);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index b7e50bba3bbb..505449700e0c 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -617,7 +617,7 @@ static unsigned long quick_pit_calibrate(void)
goto success;
}
}
- pr_err("Fast TSC calibration failed\n");
+ pr_info("Fast TSC calibration failed\n");
return 0;
success:
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 169b09d76ddd..de12c1d379f1 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -2348,7 +2348,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
* Not recognized on AMD in compat mode (but is recognized in legacy
* mode).
*/
- if ((ctxt->mode == X86EMUL_MODE_PROT32) && (efer & EFER_LMA)
+ if ((ctxt->mode != X86EMUL_MODE_PROT64) && (efer & EFER_LMA)
&& !vendor_intel(ctxt))
return emulate_ud(ctxt);
@@ -2359,25 +2359,13 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
setup_syscalls_segments(ctxt, &cs, &ss);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
- switch (ctxt->mode) {
- case X86EMUL_MODE_PROT32:
- if ((msr_data & 0xfffc) == 0x0)
- return emulate_gp(ctxt, 0);
- break;
- case X86EMUL_MODE_PROT64:
- if (msr_data == 0x0)
- return emulate_gp(ctxt, 0);
- break;
- default:
- break;
- }
+ if ((msr_data & 0xfffc) == 0x0)
+ return emulate_gp(ctxt, 0);
ctxt->eflags &= ~(EFLG_VM | EFLG_IF);
- cs_sel = (u16)msr_data;
- cs_sel &= ~SELECTOR_RPL_MASK;
+ cs_sel = (u16)msr_data & ~SELECTOR_RPL_MASK;
ss_sel = cs_sel + 8;
- ss_sel &= ~SELECTOR_RPL_MASK;
- if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) {
+ if (efer & EFER_LMA) {
cs.d = 0;
cs.l = 1;
}
@@ -2386,10 +2374,11 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
- ctxt->_eip = msr_data;
+ ctxt->_eip = (efer & EFER_LMA) ? msr_data : (u32)msr_data;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
- *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
+ *reg_write(ctxt, VCPU_REGS_RSP) = (efer & EFER_LMA) ? msr_data :
+ (u32)msr_data;
return X86EMUL_CONTINUE;
}
@@ -3791,8 +3780,8 @@ static const struct opcode group5[] = {
};
static const struct opcode group6[] = {
- DI(Prot, sldt),
- DI(Prot, str),
+ DI(Prot | DstMem, sldt),
+ DI(Prot | DstMem, str),
II(Prot | Priv | SrcMem16, em_lldt, lldt),
II(Prot | Priv | SrcMem16, em_ltr, ltr),
N, N, N, N,
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 4f0c0b954686..d52dcf0776ea 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -192,6 +192,9 @@ static void recalculate_apic_map(struct kvm *kvm)
u16 cid, lid;
u32 ldr, aid;
+ if (!kvm_apic_present(vcpu))
+ continue;
+
aid = kvm_apic_id(apic);
ldr = kvm_apic_get_reg(apic, APIC_LDR);
cid = apic_cluster_id(new, ldr);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 10fbed126b11..f83fc6c5e0ba 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4448,7 +4448,7 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
* zap all shadow pages.
*/
if (unlikely(kvm_current_mmio_generation(kvm) == 0)) {
- printk_ratelimited(KERN_INFO "kvm: zapping shadow pages for mmio generation wraparound\n");
+ printk_ratelimited(KERN_DEBUG "kvm: zapping shadow pages for mmio generation wraparound\n");
kvm_mmu_invalidate_zap_all_pages(kvm);
}
}
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index feb852b04598..d4c58d884838 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5840,53 +5840,10 @@ static __init int hardware_setup(void)
memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
- vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
- vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
- vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
- vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
- vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
-
- memcpy(vmx_msr_bitmap_legacy_x2apic,
- vmx_msr_bitmap_legacy, PAGE_SIZE);
- memcpy(vmx_msr_bitmap_longmode_x2apic,
- vmx_msr_bitmap_longmode, PAGE_SIZE);
-
- if (enable_apicv) {
- for (msr = 0x800; msr <= 0x8ff; msr++)
- vmx_disable_intercept_msr_read_x2apic(msr);
-
- /* According SDM, in x2apic mode, the whole id reg is used.
- * But in KVM, it only use the highest eight bits. Need to
- * intercept it */
- vmx_enable_intercept_msr_read_x2apic(0x802);
- /* TMCCT */
- vmx_enable_intercept_msr_read_x2apic(0x839);
- /* TPR */
- vmx_disable_intercept_msr_write_x2apic(0x808);
- /* EOI */
- vmx_disable_intercept_msr_write_x2apic(0x80b);
- /* SELF-IPI */
- vmx_disable_intercept_msr_write_x2apic(0x83f);
- }
-
- if (enable_ept) {
- kvm_mmu_set_mask_ptes(0ull,
- (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
- (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
- 0ull, VMX_EPT_EXECUTABLE_MASK);
- ept_set_mmio_spte_mask();
- kvm_enable_tdp();
- } else
- kvm_disable_tdp();
-
- update_ple_window_actual_max();
-
if (setup_vmcs_config(&vmcs_config) < 0) {
r = -EIO;
goto out7;
- }
+ }
if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX);
@@ -5945,6 +5902,49 @@ static __init int hardware_setup(void)
if (nested)
nested_vmx_setup_ctls_msrs();
+ vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
+ vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
+ vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
+ vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
+ vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
+ vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
+ vmx_disable_intercept_for_msr(MSR_IA32_BNDCFGS, true);
+
+ memcpy(vmx_msr_bitmap_legacy_x2apic,
+ vmx_msr_bitmap_legacy, PAGE_SIZE);
+ memcpy(vmx_msr_bitmap_longmode_x2apic,
+ vmx_msr_bitmap_longmode, PAGE_SIZE);
+
+ if (enable_apicv) {
+ for (msr = 0x800; msr <= 0x8ff; msr++)
+ vmx_disable_intercept_msr_read_x2apic(msr);
+
+ /* According SDM, in x2apic mode, the whole id reg is used.
+ * But in KVM, it only use the highest eight bits. Need to
+ * intercept it */
+ vmx_enable_intercept_msr_read_x2apic(0x802);
+ /* TMCCT */
+ vmx_enable_intercept_msr_read_x2apic(0x839);
+ /* TPR */
+ vmx_disable_intercept_msr_write_x2apic(0x808);
+ /* EOI */
+ vmx_disable_intercept_msr_write_x2apic(0x80b);
+ /* SELF-IPI */
+ vmx_disable_intercept_msr_write_x2apic(0x83f);
+ }
+
+ if (enable_ept) {
+ kvm_mmu_set_mask_ptes(0ull,
+ (enable_ept_ad_bits) ? VMX_EPT_ACCESS_BIT : 0ull,
+ (enable_ept_ad_bits) ? VMX_EPT_DIRTY_BIT : 0ull,
+ 0ull, VMX_EPT_EXECUTABLE_MASK);
+ ept_set_mmio_spte_mask();
+ kvm_enable_tdp();
+ } else
+ kvm_disable_tdp();
+
+ update_ple_window_actual_max();
+
return alloc_kvm_area();
out7:
diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
index 2480978b31cc..1313ae6b478b 100644
--- a/arch/x86/lib/insn.c
+++ b/arch/x86/lib/insn.c
@@ -28,7 +28,7 @@
/* Verify next sizeof(t) bytes can be on the same instruction */
#define validate_next(t, insn, n) \
- ((insn)->next_byte + sizeof(t) + n < (insn)->end_kaddr)
+ ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr)
#define __get_next(t, insn) \
({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; })
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 38dcec403b46..e3ff27a5b634 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -898,6 +898,8 @@ mm_fault_error(struct pt_regs *regs, unsigned long error_code,
if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|
VM_FAULT_HWPOISON_LARGE))
do_sigbus(regs, error_code, address, fault);
+ else if (fault & VM_FAULT_SIGSEGV)
+ bad_area_nosemaphore(regs, error_code, address);
else
BUG();
}
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index a97ee0801475..079c3b6a3ff1 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -43,7 +43,7 @@ uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
[_PAGE_CACHE_MODE_WT] = _PAGE_PCD,
[_PAGE_CACHE_MODE_WP] = _PAGE_PCD,
};
-EXPORT_SYMBOL_GPL(__cachemode2pte_tbl);
+EXPORT_SYMBOL(__cachemode2pte_tbl);
uint8_t __pte2cachemode_tbl[8] = {
[__pte2cm_idx(0)] = _PAGE_CACHE_MODE_WB,
[__pte2cm_idx(_PAGE_PWT)] = _PAGE_CACHE_MODE_WC,
@@ -54,7 +54,7 @@ uint8_t __pte2cachemode_tbl[8] = {
[__pte2cm_idx(_PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
[__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
};
-EXPORT_SYMBOL_GPL(__pte2cachemode_tbl);
+EXPORT_SYMBOL(__pte2cachemode_tbl);
static unsigned long __initdata pgt_buf_start;
static unsigned long __initdata pgt_buf_end;
@@ -438,20 +438,20 @@ static unsigned long __init init_range_memory_mapping(
static unsigned long __init get_new_step_size(unsigned long step_size)
{
/*
- * Explain why we shift by 5 and why we don't have to worry about
- * 'step_size << 5' overflowing:
- *
- * initial mapped size is PMD_SIZE (2M).
+ * Initial mapped size is PMD_SIZE (2M).
* We can not set step_size to be PUD_SIZE (1G) yet.
* In worse case, when we cross the 1G boundary, and
* PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
- * to map 1G range with PTE. Use 5 as shift for now.
+ * to map 1G range with PTE. Hence we use one less than the
+ * difference of page table level shifts.
*
- * Don't need to worry about overflow, on 32bit, when step_size
- * is 0, round_down() returns 0 for start, and that turns it
- * into 0x100000000ULL.
+ * Don't need to worry about overflow in the top-down case, on 32bit,
+ * when step_size is 0, round_down() returns 0 for start, and that
+ * turns it into 0x100000000ULL.
+ * In the bottom-up case, round_up(x, 0) returns 0 though too, which
+ * needs to be taken into consideration by the code below.
*/
- return step_size << 5;
+ return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
}
/**
@@ -471,7 +471,6 @@ static void __init memory_map_top_down(unsigned long map_start,
unsigned long step_size;
unsigned long addr;
unsigned long mapped_ram_size = 0;
- unsigned long new_mapped_ram_size;
/* xen has big range in reserved near end of ram, skip it at first.*/
addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
@@ -496,14 +495,12 @@ static void __init memory_map_top_down(unsigned long map_start,
start = map_start;
} else
start = map_start;
- new_mapped_ram_size = init_range_memory_mapping(start,
+ mapped_ram_size += init_range_memory_mapping(start,
last_start);
last_start = start;
min_pfn_mapped = last_start >> PAGE_SHIFT;
- /* only increase step_size after big range get mapped */
- if (new_mapped_ram_size > mapped_ram_size)
+ if (mapped_ram_size >= step_size)
step_size = get_new_step_size(step_size);
- mapped_ram_size += new_mapped_ram_size;
}
if (real_end < map_end)
@@ -524,7 +521,7 @@ static void __init memory_map_top_down(unsigned long map_start,
static void __init memory_map_bottom_up(unsigned long map_start,
unsigned long map_end)
{
- unsigned long next, new_mapped_ram_size, start;
+ unsigned long next, start;
unsigned long mapped_ram_size = 0;
/* step_size need to be small so pgt_buf from BRK could cover it */
unsigned long step_size = PMD_SIZE;
@@ -539,19 +536,19 @@ static void __init memory_map_bottom_up(unsigned long map_start,
* for page table.
*/
while (start < map_end) {
- if (map_end - start > step_size) {
+ if (step_size && map_end - start > step_size) {
next = round_up(start + 1, step_size);
if (next > map_end)
next = map_end;
- } else
+ } else {
next = map_end;
+ }
- new_mapped_ram_size = init_range_memory_mapping(start, next);
+ mapped_ram_size += init_range_memory_mapping(start, next);
start = next;
- if (new_mapped_ram_size > mapped_ram_size)
+ if (mapped_ram_size >= step_size)
step_size = get_new_step_size(step_size);
- mapped_ram_size += new_mapped_ram_size;
}
}
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 67ebf5751222..c439ec478216 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -349,6 +349,12 @@ static __user void *task_get_bounds_dir(struct task_struct *tsk)
return MPX_INVALID_BOUNDS_DIR;
/*
+ * 32-bit binaries on 64-bit kernels are currently
+ * unsupported.
+ */
+ if (IS_ENABLED(CONFIG_X86_64) && test_thread_flag(TIF_IA32))
+ return MPX_INVALID_BOUNDS_DIR;
+ /*
* The bounds directory pointer is stored in a register
* only accessible if we first do an xsave.
*/
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index edf299c8ff6c..7ac68698406c 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -234,8 +234,13 @@ void pat_init(void)
PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
/* Boot CPU check */
- if (!boot_pat_state)
+ if (!boot_pat_state) {
rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
+ if (!boot_pat_state) {
+ pat_disable("PAT read returns always zero, disabled.");
+ return;
+ }
+ }
wrmsrl(MSR_IA32_CR_PAT, pat);
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index 9b18ef315a55..349c0d32cc0b 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -216,7 +216,7 @@ static void pcibios_allocate_bridge_resources(struct pci_dev *dev)
continue;
if (r->parent) /* Already allocated */
continue;
- if (!r->start || pci_claim_resource(dev, idx) < 0) {
+ if (!r->start || pci_claim_bridge_resource(dev, idx) < 0) {
/*
* Something is wrong with the region.
* Invalidate the resource to prevent
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index c489ef2c1a39..9098d880c476 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -458,6 +458,7 @@ int __init pci_xen_hvm_init(void)
* just how GSIs get registered.
*/
__acpi_register_gsi = acpi_register_gsi_xen_hvm;
+ __acpi_unregister_gsi = NULL;
#endif
#ifdef CONFIG_PCI_MSI
@@ -471,52 +472,6 @@ int __init pci_xen_hvm_init(void)
}
#ifdef CONFIG_XEN_DOM0
-static __init void xen_setup_acpi_sci(void)
-{
- int rc;
- int trigger, polarity;
- int gsi = acpi_sci_override_gsi;
- int irq = -1;
- int gsi_override = -1;
-
- if (!gsi)
- return;
-
- rc = acpi_get_override_irq(gsi, &trigger, &polarity);
- if (rc) {
- printk(KERN_WARNING "xen: acpi_get_override_irq failed for acpi"
- " sci, rc=%d\n", rc);
- return;
- }
- trigger = trigger ? ACPI_LEVEL_SENSITIVE : ACPI_EDGE_SENSITIVE;
- polarity = polarity ? ACPI_ACTIVE_LOW : ACPI_ACTIVE_HIGH;
-
- printk(KERN_INFO "xen: sci override: global_irq=%d trigger=%d "
- "polarity=%d\n", gsi, trigger, polarity);
-
- /* Before we bind the GSI to a Linux IRQ, check whether
- * we need to override it with bus_irq (IRQ) value. Usually for
- * IRQs below IRQ_LEGACY_IRQ this holds IRQ == GSI, as so:
- * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 9 low level)
- * but there are oddballs where the IRQ != GSI:
- * ACPI: INT_SRC_OVR (bus 0 bus_irq 9 global_irq 20 low level)
- * which ends up being: gsi_to_irq[9] == 20
- * (which is what acpi_gsi_to_irq ends up calling when starting the
- * the ACPI interpreter and keels over since IRQ 9 has not been
- * setup as we had setup IRQ 20 for it).
- */
- if (acpi_gsi_to_irq(gsi, &irq) == 0) {
- /* Use the provided value if it's valid. */
- if (irq >= 0)
- gsi_override = irq;
- }
-
- gsi = xen_register_gsi(gsi, gsi_override, trigger, polarity);
- printk(KERN_INFO "xen: acpi sci %d\n", gsi);
-
- return;
-}
-
int __init pci_xen_initial_domain(void)
{
int irq;
@@ -527,8 +482,8 @@ int __init pci_xen_initial_domain(void)
x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs;
pci_msi_ignore_mask = 1;
#endif
- xen_setup_acpi_sci();
__acpi_register_gsi = acpi_register_gsi_xen;
+ __acpi_unregister_gsi = NULL;
/* Pre-allocate legacy irqs */
for (irq = 0; irq < nr_legacy_irqs(); irq++) {
int trigger, polarity;
diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl
deleted file mode 100644
index 23210baade2d..000000000000
--- a/arch/x86/tools/calc_run_size.pl
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/perl
-#
-# Calculate the amount of space needed to run the kernel, including room for
-# the .bss and .brk sections.
-#
-# Usage:
-# objdump -h a.out | perl calc_run_size.pl
-use strict;
-
-my $mem_size = 0;
-my $file_offset = 0;
-
-my $sections=" *[0-9]+ \.(?:bss|brk) +";
-while (<>) {
- if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) {
- my $size = hex($1);
- my $offset = hex($2);
- $mem_size += $size;
- if ($file_offset == 0) {
- $file_offset = $offset;
- } elsif ($file_offset != $offset) {
- # BFD linker shows the same file offset in ELF.
- # Gold linker shows them as consecutive.
- next if ($file_offset + $mem_size == $offset + $size);
-
- printf STDERR "file_offset: 0x%lx\n", $file_offset;
- printf STDERR "mem_size: 0x%lx\n", $mem_size;
- printf STDERR "offset: 0x%lx\n", $offset;
- printf STDERR "size: 0x%lx\n", $size;
-
- die ".bss and .brk are non-contiguous\n";
- }
- }
-}
-
-if ($file_offset == 0) {
- die "Never found .bss or .brk file offset\n";
-}
-printf("%d\n", $mem_size + $file_offset);
diff --git a/arch/x86/tools/calc_run_size.sh b/arch/x86/tools/calc_run_size.sh
new file mode 100644
index 000000000000..1a4c17bb3910
--- /dev/null
+++ b/arch/x86/tools/calc_run_size.sh
@@ -0,0 +1,42 @@
+#!/bin/sh
+#
+# Calculate the amount of space needed to run the kernel, including room for
+# the .bss and .brk sections.
+#
+# Usage:
+# objdump -h a.out | sh calc_run_size.sh
+
+NUM='\([0-9a-fA-F]*[ \t]*\)'
+OUT=$(sed -n 's/^[ \t0-9]*.b[sr][sk][ \t]*'"$NUM$NUM$NUM$NUM"'.*/\1\4/p')
+if [ -z "$OUT" ] ; then
+ echo "Never found .bss or .brk file offset" >&2
+ exit 1
+fi
+
+OUT=$(echo ${OUT# })
+sizeA=$(printf "%d" 0x${OUT%% *})
+OUT=${OUT#* }
+offsetA=$(printf "%d" 0x${OUT%% *})
+OUT=${OUT#* }
+sizeB=$(printf "%d" 0x${OUT%% *})
+OUT=${OUT#* }
+offsetB=$(printf "%d" 0x${OUT%% *})
+
+run_size=$(( $offsetA + $sizeA + $sizeB ))
+
+# BFD linker shows the same file offset in ELF.
+if [ "$offsetA" -ne "$offsetB" ] ; then
+ # Gold linker shows them as consecutive.
+ endB=$(( $offsetB + $sizeB ))
+ if [ "$endB" != "$run_size" ] ; then
+ printf "sizeA: 0x%x\n" $sizeA >&2
+ printf "offsetA: 0x%x\n" $offsetA >&2
+ printf "sizeB: 0x%x\n" $sizeB >&2
+ printf "offsetB: 0x%x\n" $offsetB >&2
+ echo ".bss and .brk are non-contiguous" >&2
+ exit 1
+ fi
+fi
+
+printf "%d\n" $run_size
+exit 0
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
index 531d4269e2e3..bd16d6c370ec 100644
--- a/arch/x86/um/sys_call_table_32.c
+++ b/arch/x86/um/sys_call_table_32.c
@@ -34,7 +34,7 @@ typedef asmlinkage void (*sys_call_ptr_t)(void);
extern asmlinkage void sys_ni_syscall(void);
-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
+const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
diff --git a/arch/x86/um/sys_call_table_64.c b/arch/x86/um/sys_call_table_64.c
index 20c3649d0691..5cdfa9db2217 100644
--- a/arch/x86/um/sys_call_table_64.c
+++ b/arch/x86/um/sys_call_table_64.c
@@ -47,7 +47,7 @@ typedef void (*sys_call_ptr_t)(void);
extern void sys_ni_syscall(void);
-const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
+const sys_call_ptr_t sys_call_table[] ____cacheline_aligned = {
/*
* Smells like a compiler bug -- it doesn't work
* when the & below is removed.
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index 009495b9ab4b..1c9f750c3859 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image)
struct linux_binprm;
-/* Put the vdso above the (randomized) stack with another randomized offset.
- This way there is no hole in the middle of address space.
- To save memory make sure it is still in the same PTE as the stack top.
- This doesn't give that many random bits.
-
- Only used for the 64-bit and x32 vdsos. */
+/*
+ * Put the vdso above the (randomized) stack with another randomized
+ * offset. This way there is no hole in the middle of address space.
+ * To save memory make sure it is still in the same PTE as the stack
+ * top. This doesn't give that many random bits.
+ *
+ * Note that this algorithm is imperfect: the distribution of the vdso
+ * start address within a PMD is biased toward the end.
+ *
+ * Only used for the 64-bit and x32 vdsos.
+ */
static unsigned long vdso_addr(unsigned long start, unsigned len)
{
#ifdef CONFIG_X86_32
@@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
#else
unsigned long addr, end;
unsigned offset;
- end = (start + PMD_SIZE - 1) & PMD_MASK;
+
+ /*
+ * Round up the start address. It can start out unaligned as a result
+ * of stack start randomization.
+ */
+ start = PAGE_ALIGN(start);
+
+ /* Round the lowest possible end address up to a PMD boundary. */
+ end = (start + len + PMD_SIZE - 1) & PMD_MASK;
if (end >= TASK_SIZE_MAX)
end = TASK_SIZE_MAX;
end -= len;
- /* This loses some more bits than a modulo, but is cheaper */
- offset = get_random_int() & (PTRS_PER_PTE - 1);
- addr = start + (offset << PAGE_SHIFT);
- if (addr >= end)
- addr = end;
+
+ if (end > start) {
+ offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
+ addr = start + (offset << PAGE_SHIFT);
+ } else {
+ addr = start;
+ }
/*
- * page-align it here so that get_unmapped_area doesn't
- * align it wrongfully again to the next page. addr can come in 4K
- * unaligned here as a result of stack start randomization.
+ * Forcibly align the final address in case we have a hardware
+ * issue that requires alignment for performance reasons.
*/
- addr = PAGE_ALIGN(addr);
addr = align_vdso_addr(addr);
return addr;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 6bf3a13e3e0f..78a881b7fc41 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -40,6 +40,7 @@
#include <xen/interface/physdev.h>
#include <xen/interface/vcpu.h>
#include <xen/interface/memory.h>
+#include <xen/interface/nmi.h>
#include <xen/interface/xen-mca.h>
#include <xen/features.h>
#include <xen/page.h>
@@ -66,6 +67,7 @@
#include <asm/reboot.h>
#include <asm/stackprotector.h>
#include <asm/hypervisor.h>
+#include <asm/mach_traps.h>
#include <asm/mwait.h>
#include <asm/pci_x86.h>
#include <asm/pat.h>
@@ -1351,6 +1353,21 @@ static const struct machine_ops xen_machine_ops __initconst = {
.emergency_restart = xen_emergency_restart,
};
+static unsigned char xen_get_nmi_reason(void)
+{
+ unsigned char reason = 0;
+
+ /* Construct a value which looks like it came from port 0x61. */
+ if (test_bit(_XEN_NMIREASON_io_error,
+ &HYPERVISOR_shared_info->arch.nmi_reason))
+ reason |= NMI_REASON_IOCHK;
+ if (test_bit(_XEN_NMIREASON_pci_serr,
+ &HYPERVISOR_shared_info->arch.nmi_reason))
+ reason |= NMI_REASON_SERR;
+
+ return reason;
+}
+
static void __init xen_boot_params_init_edd(void)
{
#if IS_ENABLED(CONFIG_EDD)
@@ -1535,9 +1552,12 @@ asmlinkage __visible void __init xen_start_kernel(void)
pv_info = xen_info;
pv_init_ops = xen_init_ops;
pv_apic_ops = xen_apic_ops;
- if (!xen_pvh_domain())
+ if (!xen_pvh_domain()) {
pv_cpu_ops = xen_cpu_ops;
+ x86_platform.get_nmi_reason = xen_get_nmi_reason;
+ }
+
if (xen_feature(XENFEAT_auto_translated_physmap))
x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
else
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index edbc7a63fd73..70fb5075c901 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -167,10 +167,13 @@ static void * __ref alloc_p2m_page(void)
return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT);
}
-/* Only to be called in case of a race for a page just allocated! */
-static void free_p2m_page(void *p)
+static void __ref free_p2m_page(void *p)
{
- BUG_ON(!slab_is_available());
+ if (unlikely(!slab_is_available())) {
+ free_bootmem((unsigned long)p, PAGE_SIZE);
+ return;
+ }
+
free_page((unsigned long)p);
}
@@ -375,7 +378,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m)
p2m_missing_pte : p2m_identity_pte;
for (i = 0; i < PMDS_PER_MID_PAGE; i++) {
pmdp = populate_extra_pmd(
- (unsigned long)(p2m + pfn + i * PTRS_PER_PTE));
+ (unsigned long)(p2m + pfn) + i * PMD_SIZE);
set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE));
}
}
@@ -436,10 +439,9 @@ EXPORT_SYMBOL_GPL(get_phys_to_machine);
* a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual
* pmd. In case of PAE/x86-32 there are multiple pmds to allocate!
*/
-static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
+static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg)
{
pte_t *ptechk;
- pte_t *pteret = ptep;
pte_t *pte_newpg[PMDS_PER_MID_PAGE];
pmd_t *pmdp;
unsigned int level;
@@ -473,8 +475,6 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
if (ptechk == pte_pg) {
set_pmd(pmdp,
__pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE));
- if (vaddr == (addr & ~(PMD_SIZE - 1)))
- pteret = pte_offset_kernel(pmdp, addr);
pte_newpg[i] = NULL;
}
@@ -488,7 +488,7 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg)
vaddr += PMD_SIZE;
}
- return pteret;
+ return lookup_address(addr, &level);
}
/*
@@ -517,7 +517,7 @@ static bool alloc_p2m(unsigned long pfn)
if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) {
/* PMD level is missing, allocate a new one */
- ptep = alloc_p2m_pmd(addr, ptep, pte_pg);
+ ptep = alloc_p2m_pmd(addr, pte_pg);
if (!ptep)
return false;
}
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index dfd77dec8e2b..865e56cea7a0 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -140,7 +140,7 @@ static void __init xen_del_extra_mem(u64 start, u64 size)
unsigned long __ref xen_chk_extra_mem(unsigned long pfn)
{
int i;
- unsigned long addr = PFN_PHYS(pfn);
+ phys_addr_t addr = PFN_PHYS(pfn);
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
if (addr >= xen_extra_mem[i].start &&
@@ -160,6 +160,8 @@ void __init xen_inv_extra_mem(void)
int i;
for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) {
+ if (!xen_extra_mem[i].size)
+ continue;
pfn_s = PFN_DOWN(xen_extra_mem[i].start);
pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size);
for (pfn = pfn_s; pfn < pfn_e; pfn++)
@@ -229,15 +231,14 @@ static int __init xen_free_mfn(unsigned long mfn)
* as a fallback if the remapping fails.
*/
static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
- unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity,
- unsigned long *released)
+ unsigned long end_pfn, unsigned long nr_pages, unsigned long *released)
{
- unsigned long len = 0;
unsigned long pfn, end;
int ret;
WARN_ON(start_pfn > end_pfn);
+ /* Release pages first. */
end = min(end_pfn, nr_pages);
for (pfn = start_pfn; pfn < end; pfn++) {
unsigned long mfn = pfn_to_mfn(pfn);
@@ -250,16 +251,14 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn,
WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret);
if (ret == 1) {
+ (*released)++;
if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY))
break;
- len++;
} else
break;
}
- /* Need to release pages first */
- *released += len;
- *identity += set_phys_range_identity(start_pfn, end_pfn);
+ set_phys_range_identity(start_pfn, end_pfn);
}
/*
@@ -287,7 +286,7 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn)
}
/* Update kernel mapping, but not for highmem. */
- if ((pfn << PAGE_SHIFT) >= __pa(high_memory))
+ if (pfn >= PFN_UP(__pa(high_memory - 1)))
return;
if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT),
@@ -318,7 +317,6 @@ static void __init xen_do_set_identity_and_remap_chunk(
unsigned long ident_pfn_iter, remap_pfn_iter;
unsigned long ident_end_pfn = start_pfn + size;
unsigned long left = size;
- unsigned long ident_cnt = 0;
unsigned int i, chunk;
WARN_ON(size == 0);
@@ -347,8 +345,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
xen_remap_mfn = mfn;
/* Set identity map */
- ident_cnt += set_phys_range_identity(ident_pfn_iter,
- ident_pfn_iter + chunk);
+ set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk);
left -= chunk;
}
@@ -371,7 +368,7 @@ static void __init xen_do_set_identity_and_remap_chunk(
static unsigned long __init xen_set_identity_and_remap_chunk(
const struct e820entry *list, size_t map_size, unsigned long start_pfn,
unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn,
- unsigned long *identity, unsigned long *released)
+ unsigned long *released, unsigned long *remapped)
{
unsigned long pfn;
unsigned long i = 0;
@@ -386,8 +383,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
/* Do not remap pages beyond the current allocation */
if (cur_pfn >= nr_pages) {
/* Identity map remaining pages */
- *identity += set_phys_range_identity(cur_pfn,
- cur_pfn + size);
+ set_phys_range_identity(cur_pfn, cur_pfn + size);
break;
}
if (cur_pfn + size > nr_pages)
@@ -398,7 +394,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
if (!remap_range_size) {
pr_warning("Unable to find available pfn range, not remapping identity pages\n");
xen_set_identity_and_release_chunk(cur_pfn,
- cur_pfn + left, nr_pages, identity, released);
+ cur_pfn + left, nr_pages, released);
break;
}
/* Adjust size to fit in current e820 RAM region */
@@ -410,7 +406,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
/* Update variables to reflect new mappings. */
i += size;
remap_pfn += size;
- *identity += size;
+ *remapped += size;
}
/*
@@ -427,13 +423,13 @@ static unsigned long __init xen_set_identity_and_remap_chunk(
static void __init xen_set_identity_and_remap(
const struct e820entry *list, size_t map_size, unsigned long nr_pages,
- unsigned long *released)
+ unsigned long *released, unsigned long *remapped)
{
phys_addr_t start = 0;
- unsigned long identity = 0;
unsigned long last_pfn = nr_pages;
const struct e820entry *entry;
unsigned long num_released = 0;
+ unsigned long num_remapped = 0;
int i;
/*
@@ -460,14 +456,14 @@ static void __init xen_set_identity_and_remap(
last_pfn = xen_set_identity_and_remap_chunk(
list, map_size, start_pfn,
end_pfn, nr_pages, last_pfn,
- &identity, &num_released);
+ &num_released, &num_remapped);
start = end;
}
}
*released = num_released;
+ *remapped = num_remapped;
- pr_info("Set %ld page(s) to 1-1 mapping\n", identity);
pr_info("Released %ld page(s)\n", num_released);
}
@@ -586,6 +582,7 @@ char * __init xen_memory_setup(void)
struct xen_memory_map memmap;
unsigned long max_pages;
unsigned long extra_pages = 0;
+ unsigned long remapped_pages;
int i;
int op;
@@ -635,9 +632,10 @@ char * __init xen_memory_setup(void)
* underlying RAM.
*/
xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn,
- &xen_released_pages);
+ &xen_released_pages, &remapped_pages);
extra_pages += xen_released_pages;
+ extra_pages += remapped_pages;
/*
* Clamp the amount of extra memory to a EXTRA_MEM_RATIO
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index f473d268d387..69087341d9ae 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -391,7 +391,7 @@ static const struct clock_event_device *xen_clockevent =
struct xen_clock_event_device {
struct clock_event_device evt;
- char *name;
+ char name[16];
};
static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
@@ -420,46 +420,38 @@ void xen_teardown_timer(int cpu)
if (evt->irq >= 0) {
unbind_from_irqhandler(evt->irq, NULL);
evt->irq = -1;
- kfree(per_cpu(xen_clock_events, cpu).name);
- per_cpu(xen_clock_events, cpu).name = NULL;
}
}
void xen_setup_timer(int cpu)
{
- char *name;
- struct clock_event_device *evt;
+ struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu);
+ struct clock_event_device *evt = &xevt->evt;
int irq;
- evt = &per_cpu(xen_clock_events, cpu).evt;
WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
if (evt->irq >= 0)
xen_teardown_timer(cpu);
printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
- name = kasprintf(GFP_KERNEL, "timer%d", cpu);
- if (!name)
- name = "<timer kasprintf failed>";
+ snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu);
irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER|
IRQF_FORCE_RESUME|IRQF_EARLY_RESUME,
- name, NULL);
+ xevt->name, NULL);
(void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX);
memcpy(evt, xen_clockevent, sizeof(*evt));
evt->cpumask = cpumask_of(cpu);
evt->irq = irq;
- per_cpu(xen_clock_events, cpu).name = name;
}
void xen_setup_cpu_clockevents(void)
{
- BUG_ON(preemptible());
-
clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt));
}
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index b57c4f91f487..9e3571a6535c 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -117,6 +117,8 @@ good_area:
if (unlikely(fault & VM_FAULT_ERROR)) {
if (fault & VM_FAULT_OOM)
goto out_of_memory;
+ else if (fault & VM_FAULT_SIGSEGV)
+ goto bad_area;
else if (fault & VM_FAULT_SIGBUS)
goto do_sigbus;
BUG();
diff --git a/block/blk-core.c b/block/blk-core.c
index 30f6153a40c2..3ad405571dcc 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q)
}
EXPORT_SYMBOL_GPL(blk_queue_bypass_end);
+void blk_set_queue_dying(struct request_queue *q)
+{
+ queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+
+ if (q->mq_ops)
+ blk_mq_wake_waiters(q);
+ else {
+ struct request_list *rl;
+
+ blk_queue_for_each_rl(rl, q) {
+ if (rl->rq_pool) {
+ wake_up(&rl->wait[BLK_RW_SYNC]);
+ wake_up(&rl->wait[BLK_RW_ASYNC]);
+ }
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(blk_set_queue_dying);
+
/**
* blk_cleanup_queue - shutdown a request queue
* @q: request queue to shutdown
@@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q)
/* mark @q DYING, no new request or merges will be allowed afterwards */
mutex_lock(&q->sysfs_lock);
- queue_flag_set_unlocked(QUEUE_FLAG_DYING, q);
+ blk_set_queue_dying(q);
spin_lock_irq(lock);
/*
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index 1630a20d5dcf..6774a0e69867 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -15,6 +15,26 @@
static void blk_mq_sysfs_release(struct kobject *kobj)
{
+ struct request_queue *q;
+
+ q = container_of(kobj, struct request_queue, mq_kobj);
+ free_percpu(q->queue_ctx);
+}
+
+static void blk_mq_ctx_release(struct kobject *kobj)
+{
+ struct blk_mq_ctx *ctx;
+
+ ctx = container_of(kobj, struct blk_mq_ctx, kobj);
+ kobject_put(&ctx->queue->mq_kobj);
+}
+
+static void blk_mq_hctx_release(struct kobject *kobj)
+{
+ struct blk_mq_hw_ctx *hctx;
+
+ hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
+ kfree(hctx);
}
struct blk_mq_ctx_sysfs_entry {
@@ -318,13 +338,13 @@ static struct kobj_type blk_mq_ktype = {
static struct kobj_type blk_mq_ctx_ktype = {
.sysfs_ops = &blk_mq_sysfs_ops,
.default_attrs = default_ctx_attrs,
- .release = blk_mq_sysfs_release,
+ .release = blk_mq_ctx_release,
};
static struct kobj_type blk_mq_hw_ktype = {
.sysfs_ops = &blk_mq_hw_sysfs_ops,
.default_attrs = default_hw_ctx_attrs,
- .release = blk_mq_sysfs_release,
+ .release = blk_mq_hctx_release,
};
static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
@@ -355,6 +375,7 @@ static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
return ret;
hctx_for_each_ctx(hctx, ctx, i) {
+ kobject_get(&q->mq_kobj);
ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
if (ret)
break;
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index 32e8dbb9ad1c..60c9d4a93fe4 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
}
/*
- * Wakeup all potentially sleeping on normal (non-reserved) tags
+ * Wakeup all potentially sleeping on tags
*/
-static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
+void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
{
struct blk_mq_bitmap_tags *bt;
int i, wake_index;
@@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
wake_index = bt_index_inc(wake_index);
}
+
+ if (include_reserve) {
+ bt = &tags->breserved_tags;
+ if (waitqueue_active(&bt->bs[0].wait))
+ wake_up(&bt->bs[0].wait);
+ }
}
/*
@@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
atomic_dec(&tags->active_queues);
- blk_mq_tag_wakeup_all(tags);
+ blk_mq_tag_wakeup_all(tags, false);
}
/*
@@ -584,7 +590,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth)
* static and should never need resizing.
*/
bt_update_count(&tags->bitmap_tags, tdepth);
- blk_mq_tag_wakeup_all(tags);
+ blk_mq_tag_wakeup_all(tags, false);
return 0;
}
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 6206ed17ef76..a6fa0fc9d41a 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -54,6 +54,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page);
extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag);
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth);
+extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
enum {
BLK_MQ_TAG_CACHE_MIN = 1,
diff --git a/block/blk-mq.c b/block/blk-mq.c
index da1ab5641227..9ee3b87c4498 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -107,7 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
wake_up_all(&q->mq_freeze_wq);
}
-static void blk_mq_freeze_queue_start(struct request_queue *q)
+void blk_mq_freeze_queue_start(struct request_queue *q)
{
bool freeze;
@@ -120,6 +120,7 @@ static void blk_mq_freeze_queue_start(struct request_queue *q)
blk_mq_run_queues(q, false);
}
}
+EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start);
static void blk_mq_freeze_queue_wait(struct request_queue *q)
{
@@ -136,7 +137,7 @@ void blk_mq_freeze_queue(struct request_queue *q)
blk_mq_freeze_queue_wait(q);
}
-static void blk_mq_unfreeze_queue(struct request_queue *q)
+void blk_mq_unfreeze_queue(struct request_queue *q)
{
bool wake;
@@ -149,6 +150,24 @@ static void blk_mq_unfreeze_queue(struct request_queue *q)
wake_up_all(&q->mq_freeze_wq);
}
}
+EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue);
+
+void blk_mq_wake_waiters(struct request_queue *q)
+{
+ struct blk_mq_hw_ctx *hctx;
+ unsigned int i;
+
+ queue_for_each_hw_ctx(q, hctx, i)
+ if (blk_mq_hw_queue_mapped(hctx))
+ blk_mq_tag_wakeup_all(hctx->tags, true);
+
+ /*
+ * If we are called because the queue has now been marked as
+ * dying, we need to ensure that processes currently waiting on
+ * the queue are notified as well.
+ */
+ wake_up_all(&q->mq_freeze_wq);
+}
bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx)
{
@@ -258,8 +277,10 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp,
ctx = alloc_data.ctx;
}
blk_mq_put_ctx(ctx);
- if (!rq)
+ if (!rq) {
+ blk_mq_queue_exit(q);
return ERR_PTR(-EWOULDBLOCK);
+ }
return rq;
}
EXPORT_SYMBOL(blk_mq_alloc_request);
@@ -383,6 +404,12 @@ void blk_mq_complete_request(struct request *rq)
}
EXPORT_SYMBOL(blk_mq_complete_request);
+int blk_mq_request_started(struct request *rq)
+{
+ return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags);
+}
+EXPORT_SYMBOL_GPL(blk_mq_request_started);
+
void blk_mq_start_request(struct request *rq)
{
struct request_queue *q = rq->q;
@@ -500,12 +527,38 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head)
}
EXPORT_SYMBOL(blk_mq_add_to_requeue_list);
+void blk_mq_cancel_requeue_work(struct request_queue *q)
+{
+ cancel_work_sync(&q->requeue_work);
+}
+EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work);
+
void blk_mq_kick_requeue_list(struct request_queue *q)
{
kblockd_schedule_work(&q->requeue_work);
}
EXPORT_SYMBOL(blk_mq_kick_requeue_list);
+void blk_mq_abort_requeue_list(struct request_queue *q)
+{
+ unsigned long flags;
+ LIST_HEAD(rq_list);
+
+ spin_lock_irqsave(&q->requeue_lock, flags);
+ list_splice_init(&q->requeue_list, &rq_list);
+ spin_unlock_irqrestore(&q->requeue_lock, flags);
+
+ while (!list_empty(&rq_list)) {
+ struct request *rq;
+
+ rq = list_first_entry(&rq_list, struct request, queuelist);
+ list_del_init(&rq->queuelist);
+ rq->errors = -EIO;
+ blk_mq_end_request(rq, rq->errors);
+ }
+}
+EXPORT_SYMBOL(blk_mq_abort_requeue_list);
+
static inline bool is_flush_request(struct request *rq,
struct blk_flush_queue *fq, unsigned int tag)
{
@@ -566,13 +619,24 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved)
break;
}
}
-
+
static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx,
struct request *rq, void *priv, bool reserved)
{
struct blk_mq_timeout_data *data = priv;
- if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags))
+ if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) {
+ /*
+ * If a request wasn't started before the queue was
+ * marked dying, kill it here or it'll go unnoticed.
+ */
+ if (unlikely(blk_queue_dying(rq->q))) {
+ rq->errors = -EIO;
+ blk_mq_complete_request(rq);
+ }
+ return;
+ }
+ if (rq->cmd_flags & REQ_NO_TIMEOUT)
return;
if (time_after_eq(jiffies, rq->deadline)) {
@@ -1577,10 +1641,8 @@ static void blk_mq_free_hw_queues(struct request_queue *q,
struct blk_mq_hw_ctx *hctx;
unsigned int i;
- queue_for_each_hw_ctx(q, hctx, i) {
+ queue_for_each_hw_ctx(q, hctx, i)
free_cpumask_var(hctx->cpumask);
- kfree(hctx);
- }
}
static int blk_mq_init_hctx(struct request_queue *q,
@@ -1601,7 +1663,6 @@ static int blk_mq_init_hctx(struct request_queue *q,
hctx->queue = q;
hctx->queue_num = hctx_idx;
hctx->flags = set->flags;
- hctx->cmd_size = set->cmd_size;
blk_mq_init_cpu_notifier(&hctx->cpu_notifier,
blk_mq_hctx_notify, hctx);
@@ -1939,11 +2000,9 @@ void blk_mq_free_queue(struct request_queue *q)
percpu_ref_exit(&q->mq_usage_counter);
- free_percpu(q->queue_ctx);
kfree(q->queue_hw_ctx);
kfree(q->mq_map);
- q->queue_ctx = NULL;
q->queue_hw_ctx = NULL;
q->mq_map = NULL;
diff --git a/block/blk-mq.h b/block/blk-mq.h
index 206230e64f79..4f4f943c22c3 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q);
void blk_mq_clone_flush_request(struct request *flush_rq,
struct request *orig_rq);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
+void blk_mq_wake_waiters(struct request_queue *q);
/*
* CPU hotplug helpers
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 56c025894cdf..246dfb16c3d9 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -190,6 +190,9 @@ void blk_add_timer(struct request *req)
struct request_queue *q = req->q;
unsigned long expiry;
+ if (req->cmd_flags & REQ_NO_TIMEOUT)
+ return;
+
/* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */
if (!q->mq_ops && !q->rq_timed_out_fn)
return;
diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c
index 9b3c54c1cbe8..3dd101144a58 100644
--- a/crypto/aes_generic.c
+++ b/crypto/aes_generic.c
@@ -1475,3 +1475,4 @@ module_exit(aes_fini);
MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_ALIAS_CRYPTO("aes");
+MODULE_ALIAS_CRYPTO("aes-generic");
diff --git a/crypto/af_alg.c b/crypto/af_alg.c
index 1fa7bc31be63..4665b79c729a 100644
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -455,6 +455,9 @@ void af_alg_complete(struct crypto_async_request *req, int err)
{
struct af_alg_completion *completion = req->data;
+ if (err == -EINPROGRESS)
+ return;
+
completion->err = err;
complete(&completion->completion);
}
diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c
index b4485a108389..6f5bebc9bf01 100644
--- a/crypto/ansi_cprng.c
+++ b/crypto/ansi_cprng.c
@@ -477,3 +477,4 @@ MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)");
module_init(prng_mod_init);
module_exit(prng_mod_fini);
MODULE_ALIAS_CRYPTO("stdrng");
+MODULE_ALIAS_CRYPTO("ansi_cprng");
diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c
index 7bd71f02d0dd..87b392a77a93 100644
--- a/crypto/blowfish_generic.c
+++ b/crypto/blowfish_generic.c
@@ -139,3 +139,4 @@ module_exit(blowfish_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Blowfish Cipher Algorithm");
MODULE_ALIAS_CRYPTO("blowfish");
+MODULE_ALIAS_CRYPTO("blowfish-generic");
diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c
index 1b74c5a3e891..a02286bf319e 100644
--- a/crypto/camellia_generic.c
+++ b/crypto/camellia_generic.c
@@ -1099,3 +1099,4 @@ module_exit(camellia_fini);
MODULE_DESCRIPTION("Camellia Cipher Algorithm");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("camellia");
+MODULE_ALIAS_CRYPTO("camellia-generic");
diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c
index 84c86db67ec7..df5c72629383 100644
--- a/crypto/cast5_generic.c
+++ b/crypto/cast5_generic.c
@@ -550,3 +550,4 @@ module_exit(cast5_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cast5 Cipher Algorithm");
MODULE_ALIAS_CRYPTO("cast5");
+MODULE_ALIAS_CRYPTO("cast5-generic");
diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c
index f408f0bd8de2..058c8d755d03 100644
--- a/crypto/cast6_generic.c
+++ b/crypto/cast6_generic.c
@@ -292,3 +292,4 @@ module_exit(cast6_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Cast6 Cipher Algorithm");
MODULE_ALIAS_CRYPTO("cast6");
+MODULE_ALIAS_CRYPTO("cast6-generic");
diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c
index 2a062025749d..06f1b60f02b2 100644
--- a/crypto/crc32c_generic.c
+++ b/crypto/crc32c_generic.c
@@ -171,4 +171,5 @@ MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>");
MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crc32c");
+MODULE_ALIAS_CRYPTO("crc32c-generic");
MODULE_SOFTDEP("pre: crc32c");
diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c
index 08bb4f504520..c1229614c7e3 100644
--- a/crypto/crct10dif_generic.c
+++ b/crypto/crct10dif_generic.c
@@ -125,3 +125,4 @@ MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>");
MODULE_DESCRIPTION("T10 DIF CRC calculation.");
MODULE_LICENSE("GPL");
MODULE_ALIAS_CRYPTO("crct10dif");
+MODULE_ALIAS_CRYPTO("crct10dif-generic");
diff --git a/crypto/des_generic.c b/crypto/des_generic.c
index 42912948776b..a71720544d11 100644
--- a/crypto/des_generic.c
+++ b/crypto/des_generic.c
@@ -983,8 +983,6 @@ static struct crypto_alg des_algs[2] = { {
.cia_decrypt = des3_ede_decrypt } }
} };
-MODULE_ALIAS_CRYPTO("des3_ede");
-
static int __init des_generic_mod_init(void)
{
return crypto_register_algs(des_algs, ARRAY_SIZE(des_algs));
@@ -1001,4 +999,7 @@ module_exit(des_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms");
MODULE_AUTHOR("Dag Arne Osvik <da@osvik.no>");
-MODULE_ALIAS("des");
+MODULE_ALIAS_CRYPTO("des");
+MODULE_ALIAS_CRYPTO("des-generic");
+MODULE_ALIAS_CRYPTO("des3_ede");
+MODULE_ALIAS_CRYPTO("des3_ede-generic");
diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c
index 4e97fae9666f..bac70995e064 100644
--- a/crypto/ghash-generic.c
+++ b/crypto/ghash-generic.c
@@ -173,3 +173,4 @@ module_exit(ghash_mod_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("GHASH Message Digest Algorithm");
MODULE_ALIAS_CRYPTO("ghash");
+MODULE_ALIAS_CRYPTO("ghash-generic");
diff --git a/crypto/krng.c b/crypto/krng.c
index 67c88b331210..0224841b6579 100644
--- a/crypto/krng.c
+++ b/crypto/krng.c
@@ -63,3 +63,4 @@ module_exit(krng_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Kernel Random Number Generator");
MODULE_ALIAS_CRYPTO("stdrng");
+MODULE_ALIAS_CRYPTO("krng");
diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c
index 3d0f9df30ac9..f550b5d94630 100644
--- a/crypto/salsa20_generic.c
+++ b/crypto/salsa20_generic.c
@@ -249,3 +249,4 @@ module_exit(salsa20_generic_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm");
MODULE_ALIAS_CRYPTO("salsa20");
+MODULE_ALIAS_CRYPTO("salsa20-generic");
diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c
index a53b5e2af335..94970a794975 100644
--- a/crypto/serpent_generic.c
+++ b/crypto/serpent_generic.c
@@ -667,3 +667,4 @@ MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Ci
MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>");
MODULE_ALIAS_CRYPTO("tnepres");
MODULE_ALIAS_CRYPTO("serpent");
+MODULE_ALIAS_CRYPTO("serpent-generic");
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 039e58cfa155..a3e50c37eb6f 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -154,3 +154,4 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
MODULE_ALIAS_CRYPTO("sha1");
+MODULE_ALIAS_CRYPTO("sha1-generic");
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 5eb21b120033..b001ff5c2efc 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -385,4 +385,6 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm");
MODULE_ALIAS_CRYPTO("sha224");
+MODULE_ALIAS_CRYPTO("sha224-generic");
MODULE_ALIAS_CRYPTO("sha256");
+MODULE_ALIAS_CRYPTO("sha256-generic");
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 8d0b19ed4f4b..1c3c3767e079 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -289,4 +289,6 @@ MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms");
MODULE_ALIAS_CRYPTO("sha384");
+MODULE_ALIAS_CRYPTO("sha384-generic");
MODULE_ALIAS_CRYPTO("sha512");
+MODULE_ALIAS_CRYPTO("sha512-generic");
diff --git a/crypto/tea.c b/crypto/tea.c
index 495be2d0077d..b70b441c7d1e 100644
--- a/crypto/tea.c
+++ b/crypto/tea.c
@@ -270,6 +270,7 @@ static void __exit tea_mod_fini(void)
crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs));
}
+MODULE_ALIAS_CRYPTO("tea");
MODULE_ALIAS_CRYPTO("xtea");
MODULE_ALIAS_CRYPTO("xeta");
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index 6e5651c66cf8..321bc6ff2a9d 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -676,6 +676,7 @@ static void __exit tgr192_mod_fini(void)
crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs));
}
+MODULE_ALIAS_CRYPTO("tgr192");
MODULE_ALIAS_CRYPTO("tgr160");
MODULE_ALIAS_CRYPTO("tgr128");
diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c
index 523ad8c4e359..ebf7a3efb572 100644
--- a/crypto/twofish_generic.c
+++ b/crypto/twofish_generic.c
@@ -212,3 +212,4 @@ module_exit(twofish_mod_fini);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION ("Twofish Cipher Algorithm");
MODULE_ALIAS_CRYPTO("twofish");
+MODULE_ALIAS_CRYPTO("twofish-generic");
diff --git a/crypto/wp512.c b/crypto/wp512.c
index 0de42eb3d040..7ee5a043a988 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -1167,6 +1167,7 @@ static void __exit wp512_mod_fini(void)
crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs));
}
+MODULE_ALIAS_CRYPTO("wp512");
MODULE_ALIAS_CRYPTO("wp384");
MODULE_ALIAS_CRYPTO("wp256");
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 694d5a70d6ce..c70d6e45dc10 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -134,8 +134,6 @@ source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
-source "drivers/soc/Kconfig"
-
source "drivers/clk/Kconfig"
source "drivers/hwspinlock/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 67d2334dc41e..527a6da8d539 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -50,7 +50,10 @@ obj-$(CONFIG_RESET_CONTROLLER) += reset/
obj-y += tty/
obj-y += char/
-# gpu/ comes after char for AGP vs DRM startup
+# iommu/ comes before gpu as gpu are using iommu controllers
+obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
+
+# gpu/ comes after char for AGP vs DRM startup and after iommu
obj-y += gpu/
obj-$(CONFIG_CONNECTOR) += connector/
@@ -141,7 +144,6 @@ obj-y += clk/
obj-$(CONFIG_MAILBOX) += mailbox/
obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
-obj-$(CONFIG_IOMMU_SUPPORT) += iommu/
obj-$(CONFIG_REMOTEPROC) += remoteproc/
obj-$(CONFIG_RPMSG) += rpmsg/
diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c
index 1fdf5e07a1c7..1020b1b53a17 100644
--- a/drivers/acpi/acpi_processor.c
+++ b/drivers/acpi/acpi_processor.c
@@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
acpi_status status;
int ret;
- if (pr->apic_id == -1)
+ if (pr->phys_id == -1)
return -ENODEV;
status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta);
@@ -180,13 +180,13 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr)
cpu_maps_update_begin();
cpu_hotplug_begin();
- ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id);
+ ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id);
if (ret)
goto out;
ret = arch_register_cpu(pr->id);
if (ret) {
- acpi_unmap_lsapic(pr->id);
+ acpi_unmap_cpu(pr->id);
goto out;
}
@@ -215,7 +215,7 @@ static int acpi_processor_get_info(struct acpi_device *device)
union acpi_object object = { 0 };
struct acpi_buffer buffer = { sizeof(union acpi_object), &object };
struct acpi_processor *pr = acpi_driver_data(device);
- int apic_id, cpu_index, device_declaration = 0;
+ int phys_id, cpu_index, device_declaration = 0;
acpi_status status = AE_OK;
static int cpu0_initialized;
unsigned long long value;
@@ -262,15 +262,18 @@ static int acpi_processor_get_info(struct acpi_device *device)
pr->acpi_id = value;
}
- apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id);
- if (apic_id < 0)
- acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n");
- pr->apic_id = apic_id;
+ phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id);
+ if (phys_id < 0)
+ acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n");
+ pr->phys_id = phys_id;
- cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id);
+ cpu_index = acpi_map_cpuid(pr->phys_id, pr->acpi_id);
if (!cpu0_initialized && !acpi_has_cpu_in_madt()) {
cpu0_initialized = 1;
- /* Handle UP system running SMP kernel, with no LAPIC in MADT */
+ /*
+ * Handle UP system running SMP kernel, with no CPU
+ * entry in MADT
+ */
if ((cpu_index == -1) && (num_online_cpus() == 1))
cpu_index = 0;
}
@@ -458,7 +461,7 @@ static void acpi_processor_remove(struct acpi_device *device)
/* Remove the CPU. */
arch_unregister_cpu(pr->id);
- acpi_unmap_lsapic(pr->id);
+ acpi_unmap_cpu(pr->id);
cpu_hotplug_done();
cpu_maps_update_done();
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index c2daa85fc9f7..c0d44d394ca3 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -257,7 +257,7 @@ int acpi_bus_init_power(struct acpi_device *device)
device->power.state = ACPI_STATE_UNKNOWN;
if (!acpi_device_is_present(device))
- return 0;
+ return -ENXIO;
result = acpi_device_get_power(device, &state);
if (result)
diff --git a/drivers/acpi/int340x_thermal.c b/drivers/acpi/int340x_thermal.c
index a27d31d1ba24..9dcf83682e36 100644
--- a/drivers/acpi/int340x_thermal.c
+++ b/drivers/acpi/int340x_thermal.c
@@ -14,10 +14,10 @@
#include "internal.h"
-#define DO_ENUMERATION 0x01
+#define INT3401_DEVICE 0X01
static const struct acpi_device_id int340x_thermal_device_ids[] = {
- {"INT3400", DO_ENUMERATION },
- {"INT3401"},
+ {"INT3400"},
+ {"INT3401", INT3401_DEVICE},
{"INT3402"},
{"INT3403"},
{"INT3404"},
@@ -34,7 +34,10 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev,
const struct acpi_device_id *id)
{
#if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE)
- if (id->driver_data == DO_ENUMERATION)
+ acpi_create_platform_device(adev);
+#elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE)
+ /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */
+ if (id->driver_data == INT3401_DEVICE)
acpi_create_platform_device(adev);
#endif
return 1;
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index 5277a0ee5704..b1def411c0b8 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -512,7 +512,6 @@ void acpi_pci_irq_disable(struct pci_dev *dev)
dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
if (gsi >= 0) {
acpi_unregister_gsi(gsi);
- dev->irq = 0;
dev->irq_managed = 0;
}
}
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index 342942f90a10..02e48394276c 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -69,7 +69,7 @@ static int map_madt_entry(int type, u32 acpi_id)
unsigned long madt_end, entry;
static struct acpi_table_madt *madt;
static int read_madt;
- int apic_id = -1;
+ int phys_id = -1; /* CPU hardware ID */
if (!read_madt) {
if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0,
@@ -79,7 +79,7 @@ static int map_madt_entry(int type, u32 acpi_id)
}
if (!madt)
- return apic_id;
+ return phys_id;
entry = (unsigned long)madt;
madt_end = entry + madt->header.length;
@@ -91,18 +91,18 @@ static int map_madt_entry(int type, u32 acpi_id)
struct acpi_subtable_header *header =
(struct acpi_subtable_header *)entry;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) {
- if (!map_lapic_id(header, acpi_id, &apic_id))
+ if (!map_lapic_id(header, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) {
- if (!map_x2apic_id(header, type, acpi_id, &apic_id))
+ if (!map_x2apic_id(header, type, acpi_id, &phys_id))
break;
} else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) {
- if (!map_lsapic_id(header, type, acpi_id, &apic_id))
+ if (!map_lsapic_id(header, type, acpi_id, &phys_id))
break;
}
entry += header->length;
}
- return apic_id;
+ return phys_id;
}
static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
@@ -110,7 +110,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
union acpi_object *obj;
struct acpi_subtable_header *header;
- int apic_id = -1;
+ int phys_id = -1;
if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer)))
goto exit;
@@ -126,38 +126,38 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id)
header = (struct acpi_subtable_header *)obj->buffer.pointer;
if (header->type == ACPI_MADT_TYPE_LOCAL_APIC)
- map_lapic_id(header, acpi_id, &apic_id);
+ map_lapic_id(header, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC)
- map_lsapic_id(header, type, acpi_id, &apic_id);
+ map_lsapic_id(header, type, acpi_id, &phys_id);
else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC)
- map_x2apic_id(header, type, acpi_id, &apic_id);
+ map_x2apic_id(header, type, acpi_id, &phys_id);
exit:
kfree(buffer.pointer);
- return apic_id;
+ return phys_id;
}
-int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id)
+int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id)
{
- int apic_id;
+ int phys_id;
- apic_id = map_mat_entry(handle, type, acpi_id);
- if (apic_id == -1)
- apic_id = map_madt_entry(type, acpi_id);
+ phys_id = map_mat_entry(handle, type, acpi_id);
+ if (phys_id == -1)
+ phys_id = map_madt_entry(type, acpi_id);
- return apic_id;
+ return phys_id;
}
-int acpi_map_cpuid(int apic_id, u32 acpi_id)
+int acpi_map_cpuid(int phys_id, u32 acpi_id)
{
#ifdef CONFIG_SMP
int i;
#endif
- if (apic_id == -1) {
+ if (phys_id == -1) {
/*
* On UP processor, there is no _MAT or MADT table.
- * So above apic_id is always set to -1.
+ * So above phys_id is always set to -1.
*
* BIOS may define multiple CPU handles even for UP processor.
* For example,
@@ -170,7 +170,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
* Processor (CPU3, 0x03, 0x00000410, 0x06) {}
* }
*
- * Ignores apic_id and always returns 0 for the processor
+ * Ignores phys_id and always returns 0 for the processor
* handle with acpi id 0 if nr_cpu_ids is 1.
* This should be the case if SMP tables are not found.
* Return -1 for other CPU's handle.
@@ -178,28 +178,28 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id)
if (nr_cpu_ids <= 1 && acpi_id == 0)
return acpi_id;
else
- return apic_id;
+ return phys_id;
}
#ifdef CONFIG_SMP
for_each_possible_cpu(i) {
- if (cpu_physical_id(i) == apic_id)
+ if (cpu_physical_id(i) == phys_id)
return i;
}
#else
/* In UP kernel, only processor 0 is valid */
- if (apic_id == 0)
- return apic_id;
+ if (phys_id == 0)
+ return phys_id;
#endif
return -1;
}
int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id)
{
- int apic_id;
+ int phys_id;
- apic_id = acpi_get_apicid(handle, type, acpi_id);
+ phys_id = acpi_get_phys_id(handle, type, acpi_id);
- return acpi_map_cpuid(apic_id, acpi_id);
+ return acpi_map_cpuid(phys_id, acpi_id);
}
EXPORT_SYMBOL_GPL(acpi_get_cpuid);
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index 499536504698..87b704e41877 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -985,8 +985,6 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
state->flags = 0;
switch (cx->type) {
case ACPI_STATE_C1:
- if (cx->entry_method != ACPI_CSTATE_FFH)
- state->flags |= CPUIDLE_FLAG_TIME_INVALID;
state->enter = acpi_idle_enter_c1;
state->enter_dead = acpi_idle_play_dead;
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 16914cc30882..dc4d8960684a 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1001,7 +1001,7 @@ static void acpi_free_power_resources_lists(struct acpi_device *device)
if (device->wakeup.flags.valid)
acpi_power_resources_list_free(&device->wakeup.resources);
- if (!device->flags.power_manageable)
+ if (!device->power.flags.power_resources)
return;
for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
@@ -1744,10 +1744,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device)
device->power.flags.power_resources)
device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1;
- if (acpi_bus_init_power(device)) {
- acpi_free_power_resources_lists(device);
+ if (acpi_bus_init_power(device))
device->flags.power_manageable = 0;
- }
}
static void acpi_bus_get_flags(struct acpi_device *device)
@@ -2371,13 +2369,18 @@ static void acpi_bus_attach(struct acpi_device *device)
/* Skip devices that are not present. */
if (!acpi_device_is_present(device)) {
device->flags.visited = false;
+ device->flags.power_manageable = 0;
return;
}
if (device->handler)
goto ok;
if (!device->flags.initialized) {
- acpi_bus_update_power(device, NULL);
+ device->flags.power_manageable =
+ device->power.states[ACPI_STATE_D0].flags.valid;
+ if (acpi_bus_init_power(device))
+ device->flags.power_manageable = 0;
+
device->flags.initialized = true;
}
device->flags.visited = false;
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 1eaadff2e198..032db459370f 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -505,6 +505,33 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
DMI_MATCH(DMI_PRODUCT_NAME, "HP ENVY 15 Notebook PC"),
},
},
+
+ {
+ .callback = video_disable_native_backlight,
+ .ident = "SAMSUNG 870Z5E/880Z5E/680Z5E",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "870Z5E/880Z5E/680Z5E"),
+ },
+ },
+ {
+ .callback = video_disable_native_backlight,
+ .ident = "SAMSUNG 370R4E/370R4V/370R5E/3570RE/370R5V",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"),
+ },
+ },
+
+ {
+ /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */
+ .callback = video_disable_native_backlight,
+ .ident = "Dell XPS15 L521X",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"),
+ },
+ },
{}
};
diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig
index a3a13605a9c4..5f601553b9b0 100644
--- a/drivers/ata/Kconfig
+++ b/drivers/ata/Kconfig
@@ -835,6 +835,7 @@ config PATA_AT32
config PATA_AT91
tristate "PATA support for AT91SAM9260"
depends on ARM && SOC_AT91SAM9
+ depends on !ARCH_MULTIPLATFORM
help
This option enables support for IDE devices on the Atmel AT91SAM9260 SoC.
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 49f1e6890587..33bb06e006c9 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -325,7 +325,6 @@ static const struct pci_device_id ahci_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
{ PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
- { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
{ PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index feeb8f1e2fe8..cbcd20810355 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -125,10 +125,11 @@ static int xgene_ahci_restart_engine(struct ata_port *ap)
* xgene_ahci_qc_issue - Issue commands to the device
* @qc: Command to issue
*
- * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
- * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
- * state machine goes into the CMFatalErrorUpdate state and locks up. By
- * restarting the dma engine, it removes the controller out of lock up state.
+ * Due to Hardware errata for IDENTIFY DEVICE command and PACKET
+ * command of ATAPI protocol set, the controller cannot clear the BSY bit
+ * after receiving the PIO setup FIS. This results in the DMA state machine
+ * going into the CMFatalErrorUpdate state and locks up. By restarting the
+ * DMA engine, it removes the controller out of lock up state.
*/
static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
{
@@ -137,7 +138,8 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
struct xgene_ahci_context *ctx = hpriv->plat_data;
int rc = 0;
- if (unlikely(ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA))
+ if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) ||
+ (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET)))
xgene_ahci_restart_engine(ap);
rc = ahci_qc_issue(qc);
@@ -188,7 +190,7 @@ static unsigned int xgene_ahci_read_id(struct ata_device *dev,
*
* Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP
*/
- id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8);
+ id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8));
return 0;
}
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 97683e45ab04..61a9c07e0dff 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -2003,7 +2003,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep)
devslp = readl(port_mmio + PORT_DEVSLP);
if (!(devslp & PORT_DEVSLP_DSP)) {
- dev_err(ap->host->dev, "port does not support device sleep\n");
+ dev_info(ap->host->dev, "port does not support device sleep\n");
return;
}
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 5c84fb5c3372..d1a05f9bb91f 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4233,10 +4233,33 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
/* devices that don't properly handle queued TRIM commands */
- { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
- { "Crucial_CT*M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Micron_M[56]*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
+ ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Crucial_CT*SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+
+ /*
+ * As defined, the DRAT (Deterministic Read After Trim) and RZAT
+ * (Return Zero After Trim) flags in the ATA Command Set are
+ * unreliable in the sense that they only define what happens if
+ * the device successfully executed the DSM TRIM command. TRIM
+ * is only advisory, however, and the device is free to silently
+ * ignore all or parts of the request.
+ *
+ * Whitelist drives that are known to reliably return zeroes
+ * after TRIM.
+ */
+
+ /*
+ * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude
+ * that model before whitelisting all other intel SSDs.
+ */
+ { "INTEL*SSDSC2MH*", NULL, 0, },
+
+ { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
+ { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, },
/*
* Some WD SATA-I drives spin up and down erratically when the link
@@ -4748,7 +4771,10 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
return NULL;
for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
- tag = tag < max_queue ? tag : 0;
+ if (ap->flags & ATA_FLAG_LOWTAG)
+ tag = i;
+ else
+ tag = tag < max_queue ? tag : 0;
/* the last tag is reserved for internal command. */
if (tag == ATA_TAG_INTERNAL)
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index 3dbec8954c86..8d00c2638bed 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -2389,6 +2389,7 @@ const char *ata_get_cmd_descript(u8 command)
return NULL;
}
+EXPORT_SYMBOL_GPL(ata_get_cmd_descript);
/**
* ata_eh_link_report - report error handling to user
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index e364e86e84d7..6abd17a85b13 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2532,13 +2532,15 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
rbuf[15] = lowest_aligned;
if (ata_id_has_trim(args->id)) {
- rbuf[14] |= 0x80; /* TPE */
+ rbuf[14] |= 0x80; /* LBPME */
- if (ata_id_has_zero_after_trim(args->id))
- rbuf[14] |= 0x40; /* TPRZ */
+ if (ata_id_has_zero_after_trim(args->id) &&
+ dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) {
+ ata_dev_info(dev, "Enabling discard_zeroes_data\n");
+ rbuf[14] |= 0x40; /* LBPRZ */
+ }
}
}
-
return 0;
}
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index db90aa35cb71..2e86e3b85266 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -1333,7 +1333,19 @@ void ata_sff_flush_pio_task(struct ata_port *ap)
DPRINTK("ENTER\n");
cancel_delayed_work_sync(&ap->sff_pio_task);
+
+ /*
+ * We wanna reset the HSM state to IDLE. If we do so without
+ * grabbing the port lock, critical sections protected by it which
+ * expect the HSM state to stay stable may get surprised. For
+ * example, we may set IDLE in between the time
+ * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls
+ * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG().
+ */
+ spin_lock_irq(ap->lock);
ap->hsm_task_state = HSM_ST_IDLE;
+ spin_unlock_irq(ap->lock);
+
ap->sff_pio_task_link = NULL;
if (ata_msg_ctl(ap))
diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c
index c7ddef89e7b0..8e8248179d20 100644
--- a/drivers/ata/sata_dwc_460ex.c
+++ b/drivers/ata/sata_dwc_460ex.c
@@ -797,7 +797,7 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
if (err) {
dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns"
" %d\n", __func__, err);
- goto error_out;
+ return err;
}
/* Enabe DMA */
@@ -808,11 +808,6 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq)
sata_dma_regs);
return 0;
-
-error_out:
- dma_dwc_exit(hsdev);
-
- return err;
}
static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val)
@@ -1662,7 +1657,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
char *ver = (char *)&versionr;
u8 *base = NULL;
int err = 0;
- int irq, rc;
+ int irq;
struct ata_host *host;
struct ata_port_info pi = sata_dwc_port_info[0];
const struct ata_port_info *ppi[] = { &pi, NULL };
@@ -1725,7 +1720,7 @@ static int sata_dwc_probe(struct platform_device *ofdev)
if (irq == NO_IRQ) {
dev_err(&ofdev->dev, "no SATA DMA irq\n");
err = -ENODEV;
- goto error_out;
+ goto error_iomap;
}
/* Get physical SATA DMA register base address */
@@ -1734,14 +1729,16 @@ static int sata_dwc_probe(struct platform_device *ofdev)
dev_err(&ofdev->dev, "ioremap failed for AHBDMA register"
" address\n");
err = -ENODEV;
- goto error_out;
+ goto error_iomap;
}
/* Save dev for later use in dev_xxx() routines */
host_pvt.dwc_dev = &ofdev->dev;
/* Initialize AHB DMAC */
- dma_dwc_init(hsdev, irq);
+ err = dma_dwc_init(hsdev, irq);
+ if (err)
+ goto error_dma_iomap;
/* Enable SATA Interrupts */
sata_dwc_enable_interrupts(hsdev);
@@ -1759,9 +1756,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
* device discovery process, invoking our port_start() handler &
* error_handler() to execute a dummy Softreset EH session
*/
- rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
-
- if (rc != 0)
+ err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht);
+ if (err)
dev_err(&ofdev->dev, "failed to activate host");
dev_set_drvdata(&ofdev->dev, host);
@@ -1770,7 +1766,8 @@ static int sata_dwc_probe(struct platform_device *ofdev)
error_out:
/* Free SATA DMA resources */
dma_dwc_exit(hsdev);
-
+error_dma_iomap:
+ iounmap((void __iomem *)host_pvt.sata_dma_regs);
error_iomap:
iounmap(base);
error_kmalloc:
@@ -1791,6 +1788,7 @@ static int sata_dwc_remove(struct platform_device *ofdev)
/* Free SATA DMA resources */
dma_dwc_exit(hsdev);
+ iounmap((void __iomem *)host_pvt.sata_dma_regs);
iounmap(hsdev->reg_base);
kfree(hsdev);
kfree(host);
diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c
index d81b20ddb527..ea655949023f 100644
--- a/drivers/ata/sata_sil24.c
+++ b/drivers/ata/sata_sil24.c
@@ -246,7 +246,7 @@ enum {
/* host flags */
SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA |
ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA |
- ATA_FLAG_AN | ATA_FLAG_PMP,
+ ATA_FLAG_AN | ATA_FLAG_PMP | ATA_FLAG_LOWTAG,
SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */
IRQ_STAT_4PORTS = 0xf,
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 6a103a35ea9b..0d8780c04a5e 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -2088,7 +2088,7 @@ EXPORT_SYMBOL_GPL(of_genpd_del_provider);
* Returns a valid pointer to struct generic_pm_domain on success or ERR_PTR()
* on failure.
*/
-static struct generic_pm_domain *of_genpd_get_from_provider(
+struct generic_pm_domain *of_genpd_get_from_provider(
struct of_phandle_args *genpdspec)
{
struct generic_pm_domain *genpd = ERR_PTR(-ENOENT);
@@ -2108,6 +2108,7 @@ static struct generic_pm_domain *of_genpd_get_from_provider(
return genpd;
}
+EXPORT_SYMBOL_GPL(of_genpd_get_from_provider);
/**
* genpd_dev_pm_detach - Detach a device from its PM domain.
diff --git a/drivers/base/power/opp.c b/drivers/base/power/opp.c
index d24dd614a0bd..106c69359306 100644
--- a/drivers/base/power/opp.c
+++ b/drivers/base/power/opp.c
@@ -108,6 +108,14 @@ static LIST_HEAD(dev_opp_list);
/* Lock to allow exclusive modification to the device and opp lists */
static DEFINE_MUTEX(dev_opp_list_lock);
+#define opp_rcu_lockdep_assert() \
+do { \
+ rcu_lockdep_assert(rcu_read_lock_held() || \
+ lockdep_is_held(&dev_opp_list_lock), \
+ "Missing rcu_read_lock() or " \
+ "dev_opp_list_lock protection"); \
+} while (0)
+
/**
* find_device_opp() - find device_opp struct using device pointer
* @dev: device pointer used to lookup device OPPs
@@ -208,9 +216,7 @@ EXPORT_SYMBOL_GPL(dev_pm_opp_get_freq);
* This function returns the number of available opps if there are any,
* else returns 0 if none or the corresponding error value.
*
- * Locking: This function must be called under rcu_read_lock(). This function
- * internally references two RCU protected structures: device_opp and opp which
- * are safe as long as we are under a common RCU locked section.
+ * Locking: This function takes rcu_read_lock().
*/
int dev_pm_opp_get_opp_count(struct device *dev)
{
@@ -218,11 +224,14 @@ int dev_pm_opp_get_opp_count(struct device *dev)
struct dev_pm_opp *temp_opp;
int count = 0;
+ rcu_read_lock();
+
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
- int r = PTR_ERR(dev_opp);
- dev_err(dev, "%s: device OPP not found (%d)\n", __func__, r);
- return r;
+ count = PTR_ERR(dev_opp);
+ dev_err(dev, "%s: device OPP not found (%d)\n",
+ __func__, count);
+ goto out_unlock;
}
list_for_each_entry_rcu(temp_opp, &dev_opp->opp_list, node) {
@@ -230,6 +239,8 @@ int dev_pm_opp_get_opp_count(struct device *dev)
count++;
}
+out_unlock:
+ rcu_read_unlock();
return count;
}
EXPORT_SYMBOL_GPL(dev_pm_opp_get_opp_count);
@@ -267,6 +278,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_exact(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ opp_rcu_lockdep_assert();
+
dev_opp = find_device_opp(dev);
if (IS_ERR(dev_opp)) {
int r = PTR_ERR(dev_opp);
@@ -313,6 +326,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_ceil(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ opp_rcu_lockdep_assert();
+
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
return ERR_PTR(-EINVAL);
@@ -361,6 +376,8 @@ struct dev_pm_opp *dev_pm_opp_find_freq_floor(struct device *dev,
struct device_opp *dev_opp;
struct dev_pm_opp *temp_opp, *opp = ERR_PTR(-ERANGE);
+ opp_rcu_lockdep_assert();
+
if (!dev || !freq) {
dev_err(dev, "%s: Invalid argument freq=%p\n", __func__, freq);
return ERR_PTR(-EINVAL);
@@ -783,9 +800,15 @@ void of_free_opp_table(struct device *dev)
/* Check for existing list for 'dev' */
dev_opp = find_device_opp(dev);
- if (WARN(IS_ERR(dev_opp), "%s: dev_opp: %ld\n", dev_name(dev),
- PTR_ERR(dev_opp)))
+ if (IS_ERR(dev_opp)) {
+ int error = PTR_ERR(dev_opp);
+ if (error != -ENODEV)
+ WARN(1, "%s: dev_opp: %d\n",
+ IS_ERR_OR_NULL(dev) ?
+ "Invalid device" : dev_name(dev),
+ error);
return;
+ }
/* Hold our list modification lock here */
mutex_lock(&dev_opp_list_lock);
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index ae9f615382f6..aa2224aa7caa 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -530,7 +530,7 @@ static int null_add_dev(void)
goto out_cleanup_queues;
nullb->q = blk_mq_init_queue(&nullb->tag_set);
- if (!nullb->q) {
+ if (IS_ERR(nullb->q)) {
rv = -ENOMEM;
goto out_cleanup_tags;
}
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index b1d5d8797315..d826bf3e62c8 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -106,7 +106,7 @@ struct nvme_queue {
dma_addr_t cq_dma_addr;
u32 __iomem *q_db;
u16 q_depth;
- u16 cq_vector;
+ s16 cq_vector;
u16 sq_head;
u16 sq_tail;
u16 cq_head;
@@ -215,6 +215,7 @@ static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx,
cmd->fn = handler;
cmd->ctx = ctx;
cmd->aborted = 0;
+ blk_mq_start_request(blk_mq_rq_from_pdu(cmd));
}
/* Special values must be less than 0x1000 */
@@ -431,8 +432,13 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx,
if (unlikely(status)) {
if (!(status & NVME_SC_DNR || blk_noretry_request(req))
&& (jiffies - req->start_time) < req->timeout) {
+ unsigned long flags;
+
blk_mq_requeue_request(req);
- blk_mq_kick_requeue_list(req->q);
+ spin_lock_irqsave(req->q->queue_lock, flags);
+ if (!blk_queue_stopped(req->q))
+ blk_mq_kick_requeue_list(req->q);
+ spin_unlock_irqrestore(req->q->queue_lock, flags);
return;
}
req->errors = nvme_error_status(status);
@@ -664,8 +670,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
}
}
- blk_mq_start_request(req);
-
nvme_set_info(cmd, iod, req_completion);
spin_lock_irq(&nvmeq->q_lock);
if (req->cmd_flags & REQ_DISCARD)
@@ -835,6 +839,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev)
if (IS_ERR(req))
return PTR_ERR(req);
+ req->cmd_flags |= REQ_NO_TIMEOUT;
cmd_info = blk_mq_rq_to_pdu(req);
nvme_set_info(cmd_info, req, async_req_completion);
@@ -1016,14 +1021,19 @@ static void nvme_abort_req(struct request *req)
struct nvme_command cmd;
if (!nvmeq->qid || cmd_rq->aborted) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev_list_lock, flags);
if (work_busy(&dev->reset_work))
- return;
+ goto out;
list_del_init(&dev->node);
dev_warn(&dev->pci_dev->dev,
"I/O %d QID %d timeout, reset controller\n",
req->tag, nvmeq->qid);
dev->reset_workfn = nvme_reset_failed_dev;
queue_work(nvme_workq, &dev->reset_work);
+ out:
+ spin_unlock_irqrestore(&dev_list_lock, flags);
return;
}
@@ -1064,15 +1074,22 @@ static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx,
void *ctx;
nvme_completion_fn fn;
struct nvme_cmd_info *cmd;
- static struct nvme_completion cqe = {
- .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1),
- };
+ struct nvme_completion cqe;
+
+ if (!blk_mq_request_started(req))
+ return;
cmd = blk_mq_rq_to_pdu(req);
if (cmd->ctx == CMD_CTX_CANCELLED)
return;
+ if (blk_queue_dying(req->q))
+ cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1);
+ else
+ cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1);
+
+
dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n",
req->tag, nvmeq->qid);
ctx = cancel_cmd_info(cmd, &fn);
@@ -1084,17 +1101,29 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved)
struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req);
struct nvme_queue *nvmeq = cmd->nvmeq;
- dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
- nvmeq->qid);
- if (nvmeq->dev->initialized)
- nvme_abort_req(req);
-
/*
* The aborted req will be completed on receiving the abort req.
* We enable the timer again. If hit twice, it'll cause a device reset,
* as the device then is in a faulty state.
*/
- return BLK_EH_RESET_TIMER;
+ int ret = BLK_EH_RESET_TIMER;
+
+ dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag,
+ nvmeq->qid);
+
+ spin_lock_irq(&nvmeq->q_lock);
+ if (!nvmeq->dev->initialized) {
+ /*
+ * Force cancelled command frees the request, which requires we
+ * return BLK_EH_NOT_HANDLED.
+ */
+ nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved);
+ ret = BLK_EH_NOT_HANDLED;
+ } else
+ nvme_abort_req(req);
+ spin_unlock_irq(&nvmeq->q_lock);
+
+ return ret;
}
static void nvme_free_queue(struct nvme_queue *nvmeq)
@@ -1131,10 +1160,16 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
*/
static int nvme_suspend_queue(struct nvme_queue *nvmeq)
{
- int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
+ int vector;
spin_lock_irq(&nvmeq->q_lock);
+ if (nvmeq->cq_vector == -1) {
+ spin_unlock_irq(&nvmeq->q_lock);
+ return 1;
+ }
+ vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
nvmeq->dev->online_queues--;
+ nvmeq->cq_vector = -1;
spin_unlock_irq(&nvmeq->q_lock);
irq_set_affinity_hint(vector, NULL);
@@ -1169,11 +1204,13 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
adapter_delete_sq(dev, qid);
adapter_delete_cq(dev, qid);
}
+ if (!qid && dev->admin_q)
+ blk_mq_freeze_queue_start(dev->admin_q);
nvme_clear_queue(nvmeq);
}
static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
- int depth, int vector)
+ int depth)
{
struct device *dmadev = &dev->pci_dev->dev;
struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL);
@@ -1199,7 +1236,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
nvmeq->cq_phase = 1;
nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
nvmeq->q_depth = depth;
- nvmeq->cq_vector = vector;
nvmeq->qid = qid;
dev->queue_count++;
dev->queues[qid] = nvmeq;
@@ -1244,6 +1280,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid)
struct nvme_dev *dev = nvmeq->dev;
int result;
+ nvmeq->cq_vector = qid - 1;
result = adapter_alloc_cq(dev, qid, nvmeq);
if (result < 0)
return result;
@@ -1355,6 +1392,14 @@ static struct blk_mq_ops nvme_mq_ops = {
.timeout = nvme_timeout,
};
+static void nvme_dev_remove_admin(struct nvme_dev *dev)
+{
+ if (dev->admin_q && !blk_queue_dying(dev->admin_q)) {
+ blk_cleanup_queue(dev->admin_q);
+ blk_mq_free_tag_set(&dev->admin_tagset);
+ }
+}
+
static int nvme_alloc_admin_tags(struct nvme_dev *dev)
{
if (!dev->admin_q) {
@@ -1370,21 +1415,20 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev)
return -ENOMEM;
dev->admin_q = blk_mq_init_queue(&dev->admin_tagset);
- if (!dev->admin_q) {
+ if (IS_ERR(dev->admin_q)) {
blk_mq_free_tag_set(&dev->admin_tagset);
return -ENOMEM;
}
- }
+ if (!blk_get_queue(dev->admin_q)) {
+ nvme_dev_remove_admin(dev);
+ return -ENODEV;
+ }
+ } else
+ blk_mq_unfreeze_queue(dev->admin_q);
return 0;
}
-static void nvme_free_admin_tags(struct nvme_dev *dev)
-{
- if (dev->admin_q)
- blk_mq_free_tag_set(&dev->admin_tagset);
-}
-
static int nvme_configure_admin_queue(struct nvme_dev *dev)
{
int result;
@@ -1416,7 +1460,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
nvmeq = dev->queues[0];
if (!nvmeq) {
- nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0);
+ nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH);
if (!nvmeq)
return -ENOMEM;
}
@@ -1439,18 +1483,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev)
if (result)
goto free_nvmeq;
- result = nvme_alloc_admin_tags(dev);
- if (result)
- goto free_nvmeq;
-
+ nvmeq->cq_vector = 0;
result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
if (result)
- goto free_tags;
+ goto free_nvmeq;
return result;
- free_tags:
- nvme_free_admin_tags(dev);
free_nvmeq:
nvme_free_queues(dev, 0);
return result;
@@ -1944,7 +1983,7 @@ static void nvme_create_io_queues(struct nvme_dev *dev)
unsigned i;
for (i = dev->queue_count; i <= dev->max_qid; i++)
- if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1))
+ if (!nvme_alloc_queue(dev, i, dev->q_depth))
break;
for (i = dev->online_queues; i <= dev->queue_count - 1; i++)
@@ -2235,13 +2274,18 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
break;
if (!schedule_timeout(ADMIN_TIMEOUT) ||
fatal_signal_pending(current)) {
+ /*
+ * Disable the controller first since we can't trust it
+ * at this point, but leave the admin queue enabled
+ * until all queue deletion requests are flushed.
+ * FIXME: This may take a while if there are more h/w
+ * queues than admin tags.
+ */
set_current_state(TASK_RUNNING);
-
nvme_disable_ctrl(dev, readq(&dev->bar->cap));
- nvme_disable_queue(dev, 0);
-
- send_sig(SIGKILL, dq->worker->task, 1);
+ nvme_clear_queue(dev->queues[0]);
flush_kthread_worker(dq->worker);
+ nvme_disable_queue(dev, 0);
return;
}
}
@@ -2318,7 +2362,6 @@ static void nvme_del_queue_start(struct kthread_work *work)
{
struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
cmdinfo.work);
- allow_signal(SIGKILL);
if (nvme_delete_sq(nvmeq))
nvme_del_queue_end(nvmeq);
}
@@ -2376,6 +2419,34 @@ static void nvme_dev_list_remove(struct nvme_dev *dev)
kthread_stop(tmp);
}
+static void nvme_freeze_queues(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns;
+
+ list_for_each_entry(ns, &dev->namespaces, list) {
+ blk_mq_freeze_queue_start(ns->queue);
+
+ spin_lock(ns->queue->queue_lock);
+ queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue);
+ spin_unlock(ns->queue->queue_lock);
+
+ blk_mq_cancel_requeue_work(ns->queue);
+ blk_mq_stop_hw_queues(ns->queue);
+ }
+}
+
+static void nvme_unfreeze_queues(struct nvme_dev *dev)
+{
+ struct nvme_ns *ns;
+
+ list_for_each_entry(ns, &dev->namespaces, list) {
+ queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue);
+ blk_mq_unfreeze_queue(ns->queue);
+ blk_mq_start_stopped_hw_queues(ns->queue, true);
+ blk_mq_kick_requeue_list(ns->queue);
+ }
+}
+
static void nvme_dev_shutdown(struct nvme_dev *dev)
{
int i;
@@ -2384,8 +2455,10 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
dev->initialized = 0;
nvme_dev_list_remove(dev);
- if (dev->bar)
+ if (dev->bar) {
+ nvme_freeze_queues(dev);
csts = readl(&dev->bar->csts);
+ }
if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) {
for (i = dev->queue_count - 1; i >= 0; i--) {
struct nvme_queue *nvmeq = dev->queues[i];
@@ -2400,12 +2473,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev)
nvme_dev_unmap(dev);
}
-static void nvme_dev_remove_admin(struct nvme_dev *dev)
-{
- if (dev->admin_q && !blk_queue_dying(dev->admin_q))
- blk_cleanup_queue(dev->admin_q);
-}
-
static void nvme_dev_remove(struct nvme_dev *dev)
{
struct nvme_ns *ns;
@@ -2413,8 +2480,10 @@ static void nvme_dev_remove(struct nvme_dev *dev)
list_for_each_entry(ns, &dev->namespaces, list) {
if (ns->disk->flags & GENHD_FL_UP)
del_gendisk(ns->disk);
- if (!blk_queue_dying(ns->queue))
+ if (!blk_queue_dying(ns->queue)) {
+ blk_mq_abort_requeue_list(ns->queue);
blk_cleanup_queue(ns->queue);
+ }
}
}
@@ -2495,6 +2564,7 @@ static void nvme_free_dev(struct kref *kref)
nvme_free_namespaces(dev);
nvme_release_instance(dev);
blk_mq_free_tag_set(&dev->tagset);
+ blk_put_queue(dev->admin_q);
kfree(dev->queues);
kfree(dev->entry);
kfree(dev);
@@ -2591,15 +2661,20 @@ static int nvme_dev_start(struct nvme_dev *dev)
}
nvme_init_queue(dev->queues[0], 0);
+ result = nvme_alloc_admin_tags(dev);
+ if (result)
+ goto disable;
result = nvme_setup_io_queues(dev);
if (result)
- goto disable;
+ goto free_tags;
nvme_set_irq_hints(dev);
return result;
+ free_tags:
+ nvme_dev_remove_admin(dev);
disable:
nvme_disable_queue(dev, 0);
nvme_dev_list_remove(dev);
@@ -2639,6 +2714,9 @@ static int nvme_dev_resume(struct nvme_dev *dev)
dev->reset_workfn = nvme_remove_disks;
queue_work(nvme_workq, &dev->reset_work);
spin_unlock(&dev_list_lock);
+ } else {
+ nvme_unfreeze_queues(dev);
+ nvme_set_irq_hints(dev);
}
dev->initialized = 1;
return 0;
@@ -2776,11 +2854,10 @@ static void nvme_remove(struct pci_dev *pdev)
pci_set_drvdata(pdev, NULL);
flush_work(&dev->reset_work);
misc_deregister(&dev->miscdev);
- nvme_dev_remove(dev);
nvme_dev_shutdown(dev);
+ nvme_dev_remove(dev);
nvme_dev_remove_admin(dev);
nvme_free_queues(dev, 0);
- nvme_free_admin_tags(dev);
nvme_release_prp_pools(dev);
kref_put(&dev->kref, nvme_free_dev);
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 3ec85dfce124..8a86b62466f7 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2098,32 +2098,26 @@ static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
* If an image has a non-zero parent overlap, get a reference to its
* parent.
*
- * We must get the reference before checking for the overlap to
- * coordinate properly with zeroing the parent overlap in
- * rbd_dev_v2_parent_info() when an image gets flattened. We
- * drop it again if there is no overlap.
- *
* Returns true if the rbd device has a parent with a non-zero
* overlap and a reference for it was successfully taken, or
* false otherwise.
*/
static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
{
- int counter;
+ int counter = 0;
if (!rbd_dev->parent_spec)
return false;
- counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
- if (counter > 0 && rbd_dev->parent_overlap)
- return true;
-
- /* Image was flattened, but parent is not yet torn down */
+ down_read(&rbd_dev->header_rwsem);
+ if (rbd_dev->parent_overlap)
+ counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
+ up_read(&rbd_dev->header_rwsem);
if (counter < 0)
rbd_warn(rbd_dev, "parent reference overflow");
- return false;
+ return counter > 0;
}
/*
@@ -4239,7 +4233,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
*/
if (rbd_dev->parent_overlap) {
rbd_dev->parent_overlap = 0;
- smp_mb();
rbd_dev_parent_put(rbd_dev);
pr_info("%s: clone image has been flattened\n",
rbd_dev->disk->disk_name);
@@ -4285,7 +4278,6 @@ static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
* treat it specially.
*/
rbd_dev->parent_overlap = overlap;
- smp_mb();
if (!overlap) {
/* A null parent_spec indicates it's the initial probe */
@@ -5114,10 +5106,7 @@ static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
{
struct rbd_image_header *header;
- /* Drop parent reference unless it's already been done (or none) */
-
- if (rbd_dev->parent_overlap)
- rbd_dev_parent_put(rbd_dev);
+ rbd_dev_parent_put(rbd_dev);
/* Free dynamic fields from the header, then zero it out */
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 7ef7c098708f..cdfbd21e3597 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -638,7 +638,7 @@ static int virtblk_probe(struct virtio_device *vdev)
goto out_put_disk;
q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
- if (!q) {
+ if (IS_ERR(q)) {
err = -ENOMEM;
goto out_free_tags;
}
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 860da40b78ef..0ce5e2d65a06 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -1312,6 +1312,9 @@ static int cci_probe(void)
if (!np)
return -ENODEV;
+ if (!of_device_is_available(np))
+ return -ENODEV;
+
cci_config = of_match_node(arm_cci_matches, np)->data;
if (!cci_config)
return -ENODEV;
diff --git a/drivers/bus/mvebu-mbus.c b/drivers/bus/mvebu-mbus.c
index eb7682dc123b..81bf297f1034 100644
--- a/drivers/bus/mvebu-mbus.c
+++ b/drivers/bus/mvebu-mbus.c
@@ -210,12 +210,25 @@ static void mvebu_mbus_disable_window(struct mvebu_mbus_state *mbus,
}
/* Checks whether the given window number is available */
+
+/* On Armada XP, 375 and 38x the MBus window 13 has the remap
+ * capability, like windows 0 to 7. However, the mvebu-mbus driver
+ * isn't currently taking into account this special case, which means
+ * that when window 13 is actually used, the remap registers are left
+ * to 0, making the device using this MBus window unavailable. The
+ * quick fix for stable is to not use window 13. A follow up patch
+ * will correctly handle this window.
+*/
static int mvebu_mbus_window_is_free(struct mvebu_mbus_state *mbus,
const int win)
{
void __iomem *addr = mbus->mbuswins_base +
mbus->soc->win_cfg_offset(win);
u32 ctrl = readl(addr + WIN_CTRL_OFF);
+
+ if (win == 13)
+ return false;
+
return !(ctrl & WIN_CTRL_ENABLE);
}
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index 19db03667650..dcbbb4ea3cc1 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -417,6 +417,6 @@ static void __exit agp_ali_cleanup(void)
module_init(agp_ali_init);
module_exit(agp_ali_cleanup);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 3b47ed0310e1..0ef350010766 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -813,6 +813,6 @@ static void __exit agp_amd64_cleanup(void)
module_init(agp_amd64_mod_init);
module_exit(agp_amd64_cleanup);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>, Andi Kleen");
+MODULE_AUTHOR("Dave Jones, Andi Kleen");
module_param(agp_try_unsupported, bool, 0);
MODULE_LICENSE("GPL");
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index 18a7a6baa304..75a9786a77e6 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -579,6 +579,6 @@ static void __exit agp_ati_cleanup(void)
module_init(agp_ati_init);
module_exit(agp_ati_cleanup);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 317c28ce8328..38ffb281df97 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -356,7 +356,7 @@ static __init int agp_setup(char *s)
__setup("agp=", agp_setup);
#endif
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones, Jeff Hartmann");
MODULE_DESCRIPTION("AGP GART driver");
MODULE_LICENSE("GPL and additional rights");
MODULE_ALIAS_MISCDEV(AGPGART_MINOR);
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index f9b9ca5d31b7..0a21daed5b62 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -920,5 +920,5 @@ static void __exit agp_intel_cleanup(void)
module_init(agp_intel_init);
module_exit(agp_intel_cleanup);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones, Various @Intel");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/intel-gtt.c b/drivers/char/agp/intel-gtt.c
index f3334829e55a..92aa43fa8d70 100644
--- a/drivers/char/agp/intel-gtt.c
+++ b/drivers/char/agp/intel-gtt.c
@@ -1438,5 +1438,5 @@ void intel_gmch_remove(void)
}
EXPORT_SYMBOL(intel_gmch_remove);
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones, Various @Intel");
MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index a1861b75eb31..6c8d39cb566e 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -1,7 +1,7 @@
/*
* Nvidia AGPGART routines.
* Based upon a 2.4 agpgart diff by the folks from NVIDIA, and hacked up
- * to work in 2.5 by Dave Jones <davej@redhat.com>
+ * to work in 2.5 by Dave Jones.
*/
#include <linux/module.h>
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index 228f20cddc05..a4961d35e940 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -595,4 +595,4 @@ module_init(agp_via_init);
module_exit(agp_via_cleanup);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Dave Jones <davej@redhat.com>");
+MODULE_AUTHOR("Dave Jones");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 5fa83f751378..6b65fa4e0c55 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -199,18 +199,6 @@ struct bmc_device {
int guid_set;
char name[16];
struct kref usecount;
-
- /* bmc device attributes */
- struct device_attribute device_id_attr;
- struct device_attribute provides_dev_sdrs_attr;
- struct device_attribute revision_attr;
- struct device_attribute firmware_rev_attr;
- struct device_attribute version_attr;
- struct device_attribute add_dev_support_attr;
- struct device_attribute manufacturer_id_attr;
- struct device_attribute product_id_attr;
- struct device_attribute guid_attr;
- struct device_attribute aux_firmware_rev_attr;
};
#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
@@ -2252,7 +2240,7 @@ static ssize_t device_id_show(struct device *dev,
return snprintf(buf, 10, "%u\n", bmc->id.device_id);
}
-DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
+static DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
static ssize_t provides_device_sdrs_show(struct device *dev,
struct device_attribute *attr,
@@ -2263,7 +2251,8 @@ static ssize_t provides_device_sdrs_show(struct device *dev,
return snprintf(buf, 10, "%u\n",
(bmc->id.device_revision & 0x80) >> 7);
}
-DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show, NULL);
+static DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show,
+ NULL);
static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -2273,7 +2262,7 @@ static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
return snprintf(buf, 20, "%u\n",
bmc->id.device_revision & 0x0F);
}
-DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
+static DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
static ssize_t firmware_revision_show(struct device *dev,
struct device_attribute *attr,
@@ -2284,7 +2273,7 @@ static ssize_t firmware_revision_show(struct device *dev,
return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
bmc->id.firmware_revision_2);
}
-DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
+static DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
static ssize_t ipmi_version_show(struct device *dev,
struct device_attribute *attr,
@@ -2296,7 +2285,7 @@ static ssize_t ipmi_version_show(struct device *dev,
ipmi_version_major(&bmc->id),
ipmi_version_minor(&bmc->id));
}
-DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
+static DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
static ssize_t add_dev_support_show(struct device *dev,
struct device_attribute *attr,
@@ -2307,7 +2296,8 @@ static ssize_t add_dev_support_show(struct device *dev,
return snprintf(buf, 10, "0x%02x\n",
bmc->id.additional_device_support);
}
-DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, NULL);
+static DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show,
+ NULL);
static ssize_t manufacturer_id_show(struct device *dev,
struct device_attribute *attr,
@@ -2317,7 +2307,7 @@ static ssize_t manufacturer_id_show(struct device *dev,
return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
}
-DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
+static DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
static ssize_t product_id_show(struct device *dev,
struct device_attribute *attr,
@@ -2327,7 +2317,7 @@ static ssize_t product_id_show(struct device *dev,
return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
}
-DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
+static DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
static ssize_t aux_firmware_rev_show(struct device *dev,
struct device_attribute *attr,
@@ -2341,7 +2331,7 @@ static ssize_t aux_firmware_rev_show(struct device *dev,
bmc->id.aux_firmware_revision[1],
bmc->id.aux_firmware_revision[0]);
}
-DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
+static DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -2352,7 +2342,7 @@ static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
(long long) bmc->guid[0],
(long long) bmc->guid[8]);
}
-DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
+static DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
static struct attribute *bmc_dev_attrs[] = {
&dev_attr_device_id.attr,
@@ -2392,10 +2382,10 @@ cleanup_bmc_device(struct kref *ref)
if (bmc->id.aux_firmware_revision_set)
device_remove_file(&bmc->pdev.dev,
- &bmc->aux_firmware_rev_attr);
+ &dev_attr_aux_firmware_revision);
if (bmc->guid_set)
device_remove_file(&bmc->pdev.dev,
- &bmc->guid_attr);
+ &dev_attr_guid);
platform_device_unregister(&bmc->pdev);
}
@@ -2422,16 +2412,14 @@ static int create_bmc_files(struct bmc_device *bmc)
int err;
if (bmc->id.aux_firmware_revision_set) {
- bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
err = device_create_file(&bmc->pdev.dev,
- &bmc->aux_firmware_rev_attr);
+ &dev_attr_aux_firmware_revision);
if (err)
goto out;
}
if (bmc->guid_set) {
- bmc->guid_attr.attr.name = "guid";
err = device_create_file(&bmc->pdev.dev,
- &bmc->guid_attr);
+ &dev_attr_guid);
if (err)
goto out_aux_firm;
}
@@ -2441,7 +2429,7 @@ static int create_bmc_files(struct bmc_device *bmc)
out_aux_firm:
if (bmc->id.aux_firmware_revision_set)
device_remove_file(&bmc->pdev.dev,
- &bmc->aux_firmware_rev_attr);
+ &dev_attr_aux_firmware_revision);
out:
return err;
}
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index e178ac27e73c..982b96323f82 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -52,6 +52,7 @@
#include <linux/dmi.h>
#include <linux/kthread.h>
#include <linux/acpi.h>
+#include <linux/ctype.h>
#define PFX "ipmi_ssif: "
#define DEVICE_NAME "ipmi_ssif"
@@ -968,7 +969,8 @@ static void sender(void *send_info,
do_gettimeofday(&t);
pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n",
- msg->data[0], msg->data[1], t.tv_sec, t.tv_usec);
+ msg->data[0], msg->data[1],
+ (long) t.tv_sec, (long) t.tv_usec);
}
}
diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c
index 32f7c1b36204..2f13bd5246b5 100644
--- a/drivers/clk/at91/clk-slow.c
+++ b/drivers/clk/at91/clk-slow.c
@@ -70,6 +70,7 @@ struct clk_sam9x5_slow {
#define to_clk_sam9x5_slow(hw) container_of(hw, struct clk_sam9x5_slow, hw)
+static struct clk *slow_clk;
static int clk_slow_osc_prepare(struct clk_hw *hw)
{
@@ -357,6 +358,8 @@ at91_clk_register_sam9x5_slow(void __iomem *sckcr,
clk = clk_register(NULL, &slowck->hw);
if (IS_ERR(clk))
kfree(slowck);
+ else
+ slow_clk = clk;
return clk;
}
@@ -433,6 +436,8 @@ at91_clk_register_sam9260_slow(struct at91_pmc *pmc,
clk = clk_register(NULL, &slowck->hw);
if (IS_ERR(clk))
kfree(slowck);
+ else
+ slow_clk = clk;
return clk;
}
@@ -465,3 +470,25 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np,
of_clk_add_provider(np, of_clk_src_simple_get, clk);
}
+
+/*
+ * FIXME: All slow clk users are not properly claiming it (get + prepare +
+ * enable) before using it.
+ * If all users properly claiming this clock decide that they don't need it
+ * anymore (or are removed), it is disabled while faulty users are still
+ * requiring it, and the system hangs.
+ * Prevent this clock from being disabled until all users are properly
+ * requesting it.
+ * Once this is done we should remove this function and the slow_clk variable.
+ */
+static int __init of_at91_clk_slow_retain(void)
+{
+ if (!slow_clk)
+ return 0;
+
+ __clk_get(slow_clk);
+ clk_prepare_enable(slow_clk);
+
+ return 0;
+}
+arch_initcall(of_at91_clk_slow_retain);
diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c
index 21784e4eb3f0..440ef81ab15c 100644
--- a/drivers/clk/berlin/bg2q.c
+++ b/drivers/clk/berlin/bg2q.c
@@ -285,7 +285,6 @@ static const struct berlin2_gate_data bg2q_gates[] __initconst = {
{ "pbridge", "perif", 15, CLK_IGNORE_UNUSED },
{ "sdio", "perif", 16, CLK_IGNORE_UNUSED },
{ "nfc", "perif", 18 },
- { "smemc", "perif", 19 },
{ "pcie", "perif", 22 },
};
diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c
index b6e6c85507a5..0a47d6f49cd6 100644
--- a/drivers/clk/clk-ppc-corenet.c
+++ b/drivers/clk/clk-ppc-corenet.c
@@ -291,7 +291,7 @@ static const struct of_device_id ppc_clk_ids[] __initconst = {
{}
};
-static struct platform_driver ppc_corenet_clk_driver __initdata = {
+static struct platform_driver ppc_corenet_clk_driver = {
.driver = {
.name = "ppc_corenet_clock",
.of_match_table = ppc_clk_ids,
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index f4963b7d4e17..d48ac71c6c8b 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -1366,7 +1366,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
new_rate = clk->ops->determine_rate(clk->hw, rate,
&best_parent_rate,
&parent_hw);
- parent = parent_hw->clk;
+ parent = parent_hw ? parent_hw->clk : NULL;
} else if (clk->ops->round_rate) {
new_rate = clk->ops->round_rate(clk->hw, rate,
&best_parent_rate);
diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c
index 75c8c45ef728..8539c4fd34cc 100644
--- a/drivers/clk/rockchip/clk-cpu.c
+++ b/drivers/clk/rockchip/clk-cpu.c
@@ -124,10 +124,11 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
{
const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
unsigned long alt_prate, alt_div;
+ unsigned long flags;
alt_prate = clk_get_rate(cpuclk->alt_parent);
- spin_lock(cpuclk->lock);
+ spin_lock_irqsave(cpuclk->lock, flags);
/*
* If the old parent clock speed is less than the clock speed
@@ -164,7 +165,7 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk,
cpuclk->reg_base + reg_data->core_reg);
}
- spin_unlock(cpuclk->lock);
+ spin_unlock_irqrestore(cpuclk->lock, flags);
return 0;
}
@@ -173,6 +174,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
{
const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data;
const struct rockchip_cpuclk_rate_table *rate;
+ unsigned long flags;
rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate);
if (!rate) {
@@ -181,7 +183,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
return -EINVAL;
}
- spin_lock(cpuclk->lock);
+ spin_lock_irqsave(cpuclk->lock, flags);
if (ndata->old_rate < ndata->new_rate)
rockchip_cpuclk_set_dividers(cpuclk, rate);
@@ -201,7 +203,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk,
if (ndata->old_rate > ndata->new_rate)
rockchip_cpuclk_set_dividers(cpuclk, rate);
- spin_unlock(cpuclk->lock);
+ spin_unlock_irqrestore(cpuclk->lock, flags);
return 0;
}
diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c
index c54078960847..7eb684c50d42 100644
--- a/drivers/clk/rockchip/clk-rk3188.c
+++ b/drivers/clk/rockchip/clk-rk3188.c
@@ -210,6 +210,17 @@ PNAME(mux_sclk_hsadc_p) = { "hsadc_src", "hsadc_frac", "ext_hsadc" };
PNAME(mux_mac_p) = { "gpll", "dpll" };
PNAME(mux_sclk_macref_p) = { "mac_src", "ext_rmii" };
+static struct rockchip_pll_clock rk3066_pll_clks[] __initdata = {
+ [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
+ RK2928_MODE_CON, 0, 5, 0, rk3188_pll_rates),
+ [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4),
+ RK2928_MODE_CON, 4, 4, 0, NULL),
+ [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8),
+ RK2928_MODE_CON, 8, 6, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
+ [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12),
+ RK2928_MODE_CON, 12, 7, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates),
+};
+
static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = {
[apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0),
RK2928_MODE_CON, 0, 6, 0, rk3188_pll_rates),
@@ -427,11 +438,11 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = {
/* hclk_peri gates */
GATE(0, "hclk_peri_axi_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 0, GFLAGS),
GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 6, GFLAGS),
- GATE(0, "hclk_emem_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 7, GFLAGS),
+ GATE(0, "hclk_emem_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 7, GFLAGS),
GATE(HCLK_EMAC, "hclk_emac", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS),
GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS),
- GATE(0, "hclk_usb_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 5, GFLAGS),
- GATE(HCLK_OTG0, "hclk_usbotg0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS),
+ GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 5, GFLAGS),
+ GATE(HCLK_OTG0, "hclk_usbotg0", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 13, GFLAGS),
GATE(HCLK_HSADC, "hclk_hsadc", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 5, GFLAGS),
GATE(HCLK_PIDF, "hclk_pidfilter", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 6, GFLAGS),
GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 10, GFLAGS),
@@ -592,7 +603,8 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = {
GATE(0, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS),
GATE(0, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
- GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 14, GFLAGS),
+ GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED,
+ RK2928_CLKGATE_CON(5), 14, GFLAGS),
GATE(0, "aclk_cif1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 7, GFLAGS),
@@ -680,7 +692,8 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = {
GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS),
GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS),
- GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS),
+ GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED,
+ RK2928_CLKGATE_CON(7), 3, GFLAGS),
GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS),
GATE(PCLK_TIMER3, "pclk_timer3", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 9, GFLAGS),
@@ -735,8 +748,8 @@ static void __init rk3188_common_clk_init(struct device_node *np)
static void __init rk3066a_clk_init(struct device_node *np)
{
rk3188_common_clk_init(np);
- rockchip_clk_register_plls(rk3188_pll_clks,
- ARRAY_SIZE(rk3188_pll_clks),
+ rockchip_clk_register_plls(rk3066_pll_clks,
+ ARRAY_SIZE(rk3066_pll_clks),
RK3066_GRF_SOC_STATUS);
rockchip_clk_register_branches(rk3066a_clk_branches,
ARRAY_SIZE(rk3066a_clk_branches));
diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c
index ac6be7c0132d..11194b8329fe 100644
--- a/drivers/clk/rockchip/clk-rk3288.c
+++ b/drivers/clk/rockchip/clk-rk3288.c
@@ -145,20 +145,20 @@ struct rockchip_pll_rate_table rk3288_pll_rates[] = {
}
static struct rockchip_cpuclk_rate_table rk3288_cpuclk_rates[] __initdata = {
- RK3288_CPUCLK_RATE(1800000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE(1704000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE(1608000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE(1512000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE(1416000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE(1200000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE(1008000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 816000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 696000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 600000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 408000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 312000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 216000000, 2, 4, 2, 4, 4),
- RK3288_CPUCLK_RATE( 126000000, 2, 4, 2, 4, 4),
+ RK3288_CPUCLK_RATE(1800000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE(1704000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE(1608000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE(1512000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE(1416000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE(1200000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE(1008000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 816000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 696000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 600000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 408000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 312000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 216000000, 1, 3, 1, 3, 3),
+ RK3288_CPUCLK_RATE( 126000000, 1, 3, 1, 3, 3),
};
static const struct rockchip_cpuclk_reg_data rk3288_cpuclk_data = {
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 6a79fc4f900c..095c1774592c 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -462,7 +462,7 @@ static void __init arch_counter_register(unsigned type)
/* Register the CP15 based counter if we have one */
if (type & ARCH_CP15_TIMER) {
- if (arch_timer_use_virtual)
+ if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual)
arch_timer_read_counter = arch_counter_get_cntvct;
else
arch_timer_read_counter = arch_counter_get_cntpct;
diff --git a/drivers/clocksource/bcm_kona_timer.c b/drivers/clocksource/bcm_kona_timer.c
index 0595dc6c453e..f1e33d08dd83 100644
--- a/drivers/clocksource/bcm_kona_timer.c
+++ b/drivers/clocksource/bcm_kona_timer.c
@@ -68,9 +68,8 @@ static void kona_timer_disable_and_clear(void __iomem *base)
}
static void
-kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
+kona_timer_get_counter(void __iomem *timer_base, uint32_t *msw, uint32_t *lsw)
{
- void __iomem *base = IOMEM(timer_base);
int loop_limit = 4;
/*
@@ -86,9 +85,9 @@ kona_timer_get_counter(void *timer_base, uint32_t *msw, uint32_t *lsw)
*/
while (--loop_limit) {
- *msw = readl(base + KONA_GPTIMER_STCHI_OFFSET);
- *lsw = readl(base + KONA_GPTIMER_STCLO_OFFSET);
- if (*msw == readl(base + KONA_GPTIMER_STCHI_OFFSET))
+ *msw = readl(timer_base + KONA_GPTIMER_STCHI_OFFSET);
+ *lsw = readl(timer_base + KONA_GPTIMER_STCLO_OFFSET);
+ if (*msw == readl(timer_base + KONA_GPTIMER_STCHI_OFFSET))
break;
}
if (!loop_limit) {
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 9403061a2acc..83564c9cfdbe 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -97,8 +97,8 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
writel_relaxed(value, reg_base + offset);
if (likely(offset >= EXYNOS4_MCT_L_BASE(0))) {
- stat_addr = (offset & ~EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
- switch (offset & EXYNOS4_MCT_L_MASK) {
+ stat_addr = (offset & EXYNOS4_MCT_L_MASK) + MCT_L_WSTAT_OFFSET;
+ switch (offset & ~EXYNOS4_MCT_L_MASK) {
case MCT_L_TCON_OFFSET:
mask = 1 << 3; /* L_TCON write status */
break;
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 0f665b8f2461..f150ca82bfaf 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -428,7 +428,7 @@ static void sh_tmu_register_clockevent(struct sh_tmu_channel *ch,
ced->features = CLOCK_EVT_FEAT_PERIODIC;
ced->features |= CLOCK_EVT_FEAT_ONESHOT;
ced->rating = 200;
- ced->cpumask = cpumask_of(0);
+ ced->cpumask = cpu_possible_mask;
ced->set_next_event = sh_tmu_clock_event_next;
ced->set_mode = sh_tmu_clock_event_mode;
ced->suspend = sh_tmu_clock_event_suspend;
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index f56147a1daed..fde97d6e31d6 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -211,6 +211,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
/* OPPs might be populated at runtime, don't check for error here */
of_init_opp_table(cpu_dev);
+ /*
+ * But we need OPP table to function so if it is not there let's
+ * give platform code chance to provide it for us.
+ */
+ ret = dev_pm_opp_get_opp_count(cpu_dev);
+ if (ret <= 0) {
+ pr_debug("OPP table is not ready, deferring probe\n");
+ ret = -EPROBE_DEFER;
+ goto out_free_opp;
+ }
+
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
ret = -ENOMEM;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index a09a29c312a9..46bed4f81cde 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -2028,6 +2028,12 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
/* Don't start any governor operations if we are entering suspend */
if (cpufreq_suspended)
return 0;
+ /*
+ * Governor might not be initiated here if ACPI _PPC changed
+ * notification happened, so check it.
+ */
+ if (!policy->governor)
+ return -EINVAL;
if (policy->governor->max_transition_latency &&
policy->cpuinfo.transition_latency >
diff --git a/drivers/cpuidle/governors/ladder.c b/drivers/cpuidle/governors/ladder.c
index 37263d9a1051..401c0106ed34 100644
--- a/drivers/cpuidle/governors/ladder.c
+++ b/drivers/cpuidle/governors/ladder.c
@@ -79,12 +79,7 @@ static int ladder_select_state(struct cpuidle_driver *drv,
last_state = &ldev->states[last_idx];
- if (!(drv->states[last_idx].flags & CPUIDLE_FLAG_TIME_INVALID)) {
- last_residency = cpuidle_get_last_residency(dev) - \
- drv->states[last_idx].exit_latency;
- }
- else
- last_residency = last_state->threshold.promotion_time + 1;
+ last_residency = cpuidle_get_last_residency(dev) - drv->states[last_idx].exit_latency;
/* consider promotion */
if (last_idx < drv->state_count - 1 &&
diff --git a/drivers/cpuidle/governors/menu.c b/drivers/cpuidle/governors/menu.c
index 659d7b0c9ebf..40580794e23d 100644
--- a/drivers/cpuidle/governors/menu.c
+++ b/drivers/cpuidle/governors/menu.c
@@ -396,8 +396,8 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* power state and occurrence of the wakeup event.
*
* If the entered idle state didn't support residency measurements,
- * we are basically lost in the dark how much time passed.
- * As a compromise, assume we slept for the whole expected time.
+ * we use them anyway if they are short, and if long,
+ * truncate to the whole expected time.
*
* Any measured amount of time will include the exit latency.
* Since we are interested in when the wakeup begun, not when it
@@ -405,22 +405,17 @@ static void menu_update(struct cpuidle_driver *drv, struct cpuidle_device *dev)
* the measured amount of time is less than the exit latency,
* assume the state was never reached and the exit latency is 0.
*/
- if (unlikely(target->flags & CPUIDLE_FLAG_TIME_INVALID)) {
- /* Use timer value as is */
- measured_us = data->next_timer_us;
- } else {
- /* Use measured value */
- measured_us = cpuidle_get_last_residency(dev);
+ /* measured value */
+ measured_us = cpuidle_get_last_residency(dev);
- /* Deduct exit latency */
- if (measured_us > target->exit_latency)
- measured_us -= target->exit_latency;
+ /* Deduct exit latency */
+ if (measured_us > target->exit_latency)
+ measured_us -= target->exit_latency;
- /* Make sure our coefficients do not exceed unity */
- if (measured_us > data->next_timer_us)
- measured_us = data->next_timer_us;
- }
+ /* Make sure our coefficients do not exceed unity */
+ if (measured_us > data->next_timer_us)
+ measured_us = data->next_timer_us;
/* Update our correction ratio */
new_factor = data->correction_factor[data->bucket];
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index 380478562b7d..5c062548957c 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -1505,7 +1505,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata)
dw->regs = chip->regs;
chip->dw = dw;
- pm_runtime_enable(chip->dev);
pm_runtime_get_sync(chip->dev);
dw_params = dma_read_byaddr(chip->regs, DW_PARAMS);
@@ -1703,7 +1702,6 @@ int dw_dma_remove(struct dw_dma_chip *chip)
}
pm_runtime_put_sync_suspend(chip->dev);
- pm_runtime_disable(chip->dev);
return 0;
}
EXPORT_SYMBOL_GPL(dw_dma_remove);
diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c
index a630161473a4..32ea1aca7a0e 100644
--- a/drivers/dma/dw/platform.c
+++ b/drivers/dma/dw/platform.c
@@ -15,6 +15,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/clk.h>
+#include <linux/pm_runtime.h>
#include <linux/platform_device.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
@@ -185,6 +186,8 @@ static int dw_probe(struct platform_device *pdev)
if (err)
return err;
+ pm_runtime_enable(&pdev->dev);
+
err = dw_dma_probe(chip, pdata);
if (err)
goto err_dw_dma_probe;
@@ -205,6 +208,7 @@ static int dw_probe(struct platform_device *pdev)
return 0;
err_dw_dma_probe:
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(chip->clk);
return err;
}
@@ -217,6 +221,7 @@ static int dw_remove(struct platform_device *pdev)
of_dma_controller_free(pdev->dev.of_node);
dw_dma_remove(chip);
+ pm_runtime_disable(&pdev->dev);
clk_disable_unprepare(chip->clk);
return 0;
diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c
index 55d4803d71b0..3d9e08f7e823 100644
--- a/drivers/gpio/gpio-crystalcove.c
+++ b/drivers/gpio/gpio-crystalcove.c
@@ -272,7 +272,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data)
for (gpio = 0; gpio < CRYSTALCOVE_GPIO_NUM; gpio++) {
if (pending & BIT(gpio)) {
virq = irq_find_mapping(cg->chip.irqdomain, gpio);
- generic_handle_irq(virq);
+ handle_nested_irq(virq);
}
}
diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c
index 978b51eae2ec..ce3c1558cb0a 100644
--- a/drivers/gpio/gpio-dln2.c
+++ b/drivers/gpio/gpio-dln2.c
@@ -47,13 +47,6 @@
#define DLN2_GPIO_MAX_PINS 32
-struct dln2_irq_work {
- struct work_struct work;
- struct dln2_gpio *dln2;
- int pin;
- int type;
-};
-
struct dln2_gpio {
struct platform_device *pdev;
struct gpio_chip gpio;
@@ -64,10 +57,12 @@ struct dln2_gpio {
*/
DECLARE_BITMAP(output_enabled, DLN2_GPIO_MAX_PINS);
- DECLARE_BITMAP(irqs_masked, DLN2_GPIO_MAX_PINS);
- DECLARE_BITMAP(irqs_enabled, DLN2_GPIO_MAX_PINS);
- DECLARE_BITMAP(irqs_pending, DLN2_GPIO_MAX_PINS);
- struct dln2_irq_work *irq_work;
+ /* active IRQs - not synced to hardware */
+ DECLARE_BITMAP(unmasked_irqs, DLN2_GPIO_MAX_PINS);
+ /* active IRQS - synced to hardware */
+ DECLARE_BITMAP(enabled_irqs, DLN2_GPIO_MAX_PINS);
+ int irq_type[DLN2_GPIO_MAX_PINS];
+ struct mutex irq_lock;
};
struct dln2_gpio_pin {
@@ -141,16 +136,16 @@ static int dln2_gpio_pin_get_out_val(struct dln2_gpio *dln2, unsigned int pin)
return !!ret;
}
-static void dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
- unsigned int pin, int value)
+static int dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2,
+ unsigned int pin, int value)
{
struct dln2_gpio_pin_val req = {
.pin = cpu_to_le16(pin),
.value = value,
};
- dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
- sizeof(req));
+ return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req,
+ sizeof(req));
}
#define DLN2_GPIO_DIRECTION_IN 0
@@ -267,6 +262,13 @@ static int dln2_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
+ struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio);
+ int ret;
+
+ ret = dln2_gpio_pin_set_out_val(dln2, offset, value);
+ if (ret < 0)
+ return ret;
+
return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT);
}
@@ -297,36 +299,13 @@ static int dln2_gpio_set_event_cfg(struct dln2_gpio *dln2, unsigned pin,
&req, sizeof(req));
}
-static void dln2_irq_work(struct work_struct *w)
-{
- struct dln2_irq_work *iw = container_of(w, struct dln2_irq_work, work);
- struct dln2_gpio *dln2 = iw->dln2;
- u8 type = iw->type & DLN2_GPIO_EVENT_MASK;
-
- if (test_bit(iw->pin, dln2->irqs_enabled))
- dln2_gpio_set_event_cfg(dln2, iw->pin, type, 0);
- else
- dln2_gpio_set_event_cfg(dln2, iw->pin, DLN2_GPIO_EVENT_NONE, 0);
-}
-
-static void dln2_irq_enable(struct irq_data *irqd)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
- struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
- int pin = irqd_to_hwirq(irqd);
-
- set_bit(pin, dln2->irqs_enabled);
- schedule_work(&dln2->irq_work[pin].work);
-}
-
-static void dln2_irq_disable(struct irq_data *irqd)
+static void dln2_irq_unmask(struct irq_data *irqd)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
int pin = irqd_to_hwirq(irqd);
- clear_bit(pin, dln2->irqs_enabled);
- schedule_work(&dln2->irq_work[pin].work);
+ set_bit(pin, dln2->unmasked_irqs);
}
static void dln2_irq_mask(struct irq_data *irqd)
@@ -335,27 +314,7 @@ static void dln2_irq_mask(struct irq_data *irqd)
struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
int pin = irqd_to_hwirq(irqd);
- set_bit(pin, dln2->irqs_masked);
-}
-
-static void dln2_irq_unmask(struct irq_data *irqd)
-{
- struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
- struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
- struct device *dev = dln2->gpio.dev;
- int pin = irqd_to_hwirq(irqd);
-
- if (test_and_clear_bit(pin, dln2->irqs_pending)) {
- int irq;
-
- irq = irq_find_mapping(dln2->gpio.irqdomain, pin);
- if (!irq) {
- dev_err(dev, "pin %d not mapped to IRQ\n", pin);
- return;
- }
-
- generic_handle_irq(irq);
- }
+ clear_bit(pin, dln2->unmasked_irqs);
}
static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
@@ -366,19 +325,19 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
switch (type) {
case IRQ_TYPE_LEVEL_HIGH:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_HIGH;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_HIGH;
break;
case IRQ_TYPE_LEVEL_LOW:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_LOW;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_LOW;
break;
case IRQ_TYPE_EDGE_BOTH:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE;
break;
case IRQ_TYPE_EDGE_RISING:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_RISING;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_RISING;
break;
case IRQ_TYPE_EDGE_FALLING:
- dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_FALLING;
+ dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_FALLING;
break;
default:
return -EINVAL;
@@ -387,13 +346,50 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type)
return 0;
}
+static void dln2_irq_bus_lock(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
+
+ mutex_lock(&dln2->irq_lock);
+}
+
+static void dln2_irq_bus_unlock(struct irq_data *irqd)
+{
+ struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd);
+ struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio);
+ int pin = irqd_to_hwirq(irqd);
+ int enabled, unmasked;
+ unsigned type;
+ int ret;
+
+ enabled = test_bit(pin, dln2->enabled_irqs);
+ unmasked = test_bit(pin, dln2->unmasked_irqs);
+
+ if (enabled != unmasked) {
+ if (unmasked) {
+ type = dln2->irq_type[pin] & DLN2_GPIO_EVENT_MASK;
+ set_bit(pin, dln2->enabled_irqs);
+ } else {
+ type = DLN2_GPIO_EVENT_NONE;
+ clear_bit(pin, dln2->enabled_irqs);
+ }
+
+ ret = dln2_gpio_set_event_cfg(dln2, pin, type, 0);
+ if (ret)
+ dev_err(dln2->gpio.dev, "failed to set event\n");
+ }
+
+ mutex_unlock(&dln2->irq_lock);
+}
+
static struct irq_chip dln2_gpio_irqchip = {
.name = "dln2-irq",
- .irq_enable = dln2_irq_enable,
- .irq_disable = dln2_irq_disable,
.irq_mask = dln2_irq_mask,
.irq_unmask = dln2_irq_unmask,
.irq_set_type = dln2_irq_set_type,
+ .irq_bus_lock = dln2_irq_bus_lock,
+ .irq_bus_sync_unlock = dln2_irq_bus_unlock,
};
static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
@@ -425,14 +421,7 @@ static void dln2_gpio_event(struct platform_device *pdev, u16 echo,
return;
}
- if (!test_bit(pin, dln2->irqs_enabled))
- return;
- if (test_bit(pin, dln2->irqs_masked)) {
- set_bit(pin, dln2->irqs_pending);
- return;
- }
-
- switch (dln2->irq_work[pin].type) {
+ switch (dln2->irq_type[pin]) {
case DLN2_GPIO_EVENT_CHANGE_RISING:
if (event->value)
generic_handle_irq(irq);
@@ -451,7 +440,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
struct dln2_gpio *dln2;
struct device *dev = &pdev->dev;
int pins;
- int i, ret;
+ int ret;
pins = dln2_gpio_get_pin_count(pdev);
if (pins < 0) {
@@ -467,15 +456,7 @@ static int dln2_gpio_probe(struct platform_device *pdev)
if (!dln2)
return -ENOMEM;
- dln2->irq_work = devm_kcalloc(&pdev->dev, pins,
- sizeof(struct dln2_irq_work), GFP_KERNEL);
- if (!dln2->irq_work)
- return -ENOMEM;
- for (i = 0; i < pins; i++) {
- INIT_WORK(&dln2->irq_work[i].work, dln2_irq_work);
- dln2->irq_work[i].pin = i;
- dln2->irq_work[i].dln2 = dln2;
- }
+ mutex_init(&dln2->irq_lock);
dln2->pdev = pdev;
@@ -529,11 +510,8 @@ out:
static int dln2_gpio_remove(struct platform_device *pdev)
{
struct dln2_gpio *dln2 = platform_get_drvdata(pdev);
- int i;
dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV);
- for (i = 0; i < dln2->gpio.ngpio; i++)
- flush_work(&dln2->irq_work[i].work);
gpiochip_remove(&dln2->gpio);
return 0;
diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c
index 09daaf2aeb56..3a5a71050559 100644
--- a/drivers/gpio/gpio-grgpio.c
+++ b/drivers/gpio/gpio-grgpio.c
@@ -441,7 +441,8 @@ static int grgpio_probe(struct platform_device *ofdev)
err = gpiochip_add(gc);
if (err) {
dev_err(&ofdev->dev, "Could not add gpiochip\n");
- irq_domain_remove(priv->domain);
+ if (priv->domain)
+ irq_domain_remove(priv->domain);
return err;
}
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 604dbe60bdee..08261f2b3a82 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -45,8 +45,14 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data)
return false;
ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags);
- if (ret < 0)
- return false;
+ if (ret < 0) {
+ /* We've found the gpio chip, but the translation failed.
+ * Return true to stop looking and return the translation
+ * error via out_gpio
+ */
+ gg_data->out_gpio = ERR_PTR(ret);
+ return true;
+ }
gg_data->out_gpio = gpiochip_get_desc(gc, ret);
return true;
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 2ac1800b58bb..f62aa115d79a 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -128,7 +128,7 @@ static ssize_t gpio_value_store(struct device *dev,
return status;
}
-static const DEVICE_ATTR(value, 0644,
+static DEVICE_ATTR(value, 0644,
gpio_value_show, gpio_value_store);
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
@@ -353,17 +353,46 @@ static ssize_t gpio_active_low_store(struct device *dev,
return status ? : size;
}
-static const DEVICE_ATTR(active_low, 0644,
+static DEVICE_ATTR(active_low, 0644,
gpio_active_low_show, gpio_active_low_store);
-static const struct attribute *gpio_attrs[] = {
+static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr,
+ int n)
+{
+ struct device *dev = container_of(kobj, struct device, kobj);
+ struct gpio_desc *desc = dev_get_drvdata(dev);
+ umode_t mode = attr->mode;
+ bool show_direction = test_bit(FLAG_SYSFS_DIR, &desc->flags);
+
+ if (attr == &dev_attr_direction.attr) {
+ if (!show_direction)
+ mode = 0;
+ } else if (attr == &dev_attr_edge.attr) {
+ if (gpiod_to_irq(desc) < 0)
+ mode = 0;
+ if (!show_direction && test_bit(FLAG_IS_OUT, &desc->flags))
+ mode = 0;
+ }
+
+ return mode;
+}
+
+static struct attribute *gpio_attrs[] = {
+ &dev_attr_direction.attr,
+ &dev_attr_edge.attr,
&dev_attr_value.attr,
&dev_attr_active_low.attr,
NULL,
};
-static const struct attribute_group gpio_attr_group = {
- .attrs = (struct attribute **) gpio_attrs,
+static const struct attribute_group gpio_group = {
+ .attrs = gpio_attrs,
+ .is_visible = gpio_is_visible,
+};
+
+static const struct attribute_group *gpio_groups[] = {
+ &gpio_group,
+ NULL
};
/*
@@ -400,16 +429,13 @@ static ssize_t chip_ngpio_show(struct device *dev,
}
static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL);
-static const struct attribute *gpiochip_attrs[] = {
+static struct attribute *gpiochip_attrs[] = {
&dev_attr_base.attr,
&dev_attr_label.attr,
&dev_attr_ngpio.attr,
NULL,
};
-
-static const struct attribute_group gpiochip_attr_group = {
- .attrs = (struct attribute **) gpiochip_attrs,
-};
+ATTRIBUTE_GROUPS(gpiochip);
/*
* /sys/class/gpio/export ... write-only
@@ -556,45 +582,30 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
goto fail_unlock;
}
- if (!desc->chip->direction_input || !desc->chip->direction_output)
- direction_may_change = false;
+ if (desc->chip->direction_input && desc->chip->direction_output &&
+ direction_may_change) {
+ set_bit(FLAG_SYSFS_DIR, &desc->flags);
+ }
+
spin_unlock_irqrestore(&gpio_lock, flags);
offset = gpio_chip_hwgpio(desc);
if (desc->chip->names && desc->chip->names[offset])
ioname = desc->chip->names[offset];
- dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
- desc, ioname ? ioname : "gpio%u",
- desc_to_gpio(desc));
+ dev = device_create_with_groups(&gpio_class, desc->chip->dev,
+ MKDEV(0, 0), desc, gpio_groups,
+ ioname ? ioname : "gpio%u",
+ desc_to_gpio(desc));
if (IS_ERR(dev)) {
status = PTR_ERR(dev);
goto fail_unlock;
}
- status = sysfs_create_group(&dev->kobj, &gpio_attr_group);
- if (status)
- goto fail_unregister_device;
-
- if (direction_may_change) {
- status = device_create_file(dev, &dev_attr_direction);
- if (status)
- goto fail_unregister_device;
- }
-
- if (gpiod_to_irq(desc) >= 0 && (direction_may_change ||
- !test_bit(FLAG_IS_OUT, &desc->flags))) {
- status = device_create_file(dev, &dev_attr_edge);
- if (status)
- goto fail_unregister_device;
- }
-
set_bit(FLAG_EXPORT, &desc->flags);
mutex_unlock(&sysfs_lock);
return 0;
-fail_unregister_device:
- device_unregister(dev);
fail_unlock:
mutex_unlock(&sysfs_lock);
gpiod_dbg(desc, "%s: status %d\n", __func__, status);
@@ -718,6 +729,7 @@ void gpiod_unexport(struct gpio_desc *desc)
dev = class_find_device(&gpio_class, NULL, desc, match_export);
if (dev) {
gpio_setup_irq(desc, dev, 0);
+ clear_bit(FLAG_SYSFS_DIR, &desc->flags);
clear_bit(FLAG_EXPORT, &desc->flags);
} else
status = -ENODEV;
@@ -750,13 +762,13 @@ int gpiochip_export(struct gpio_chip *chip)
/* use chip->base for the ID; it's already known to be unique */
mutex_lock(&sysfs_lock);
- dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip,
- "gpiochip%d", chip->base);
- if (!IS_ERR(dev)) {
- status = sysfs_create_group(&dev->kobj,
- &gpiochip_attr_group);
- } else
+ dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0),
+ chip, gpiochip_groups,
+ "gpiochip%d", chip->base);
+ if (IS_ERR(dev))
status = PTR_ERR(dev);
+ else
+ status = 0;
chip->exported = (status == 0);
mutex_unlock(&sysfs_lock);
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 487afe6f22fc..568aa2b6bdb0 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -248,29 +248,30 @@ int gpiochip_add(struct gpio_chip *chip)
base = gpiochip_find_base(chip->ngpio);
if (base < 0) {
status = base;
- goto unlock;
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ goto err_free_descs;
}
chip->base = base;
}
status = gpiochip_add_to_list(chip);
+ if (status) {
+ spin_unlock_irqrestore(&gpio_lock, flags);
+ goto err_free_descs;
+ }
- if (status == 0) {
- for (id = 0; id < chip->ngpio; id++) {
- struct gpio_desc *desc = &descs[id];
- desc->chip = chip;
-
- /* REVISIT: most hardware initializes GPIOs as
- * inputs (often with pullups enabled) so power
- * usage is minimized. Linux code should set the
- * gpio direction first thing; but until it does,
- * and in case chip->get_direction is not set,
- * we may expose the wrong direction in sysfs.
- */
- desc->flags = !chip->direction_input
- ? (1 << FLAG_IS_OUT)
- : 0;
- }
+ for (id = 0; id < chip->ngpio; id++) {
+ struct gpio_desc *desc = &descs[id];
+
+ desc->chip = chip;
+
+ /* REVISIT: most hardware initializes GPIOs as inputs (often
+ * with pullups enabled) so power usage is minimized. Linux
+ * code should set the gpio direction first thing; but until
+ * it does, and in case chip->get_direction is not set, we may
+ * expose the wrong direction in sysfs.
+ */
+ desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0;
}
chip->desc = descs;
@@ -284,12 +285,9 @@ int gpiochip_add(struct gpio_chip *chip)
of_gpiochip_add(chip);
acpi_gpiochip_add(chip);
- if (status)
- goto fail;
-
status = gpiochip_export(chip);
if (status)
- goto fail;
+ goto err_remove_chip;
pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__,
chip->base, chip->base + chip->ngpio - 1,
@@ -297,11 +295,15 @@ int gpiochip_add(struct gpio_chip *chip)
return 0;
-unlock:
+err_remove_chip:
+ acpi_gpiochip_remove(chip);
+ of_gpiochip_remove(chip);
+ spin_lock_irqsave(&gpio_lock, flags);
+ list_del(&chip->list);
spin_unlock_irqrestore(&gpio_lock, flags);
-fail:
- kfree(descs);
chip->desc = NULL;
+err_free_descs:
+ kfree(descs);
/* failures here can mean systems won't boot... */
pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__,
@@ -325,14 +327,15 @@ void gpiochip_remove(struct gpio_chip *chip)
unsigned long flags;
unsigned id;
- acpi_gpiochip_remove(chip);
-
- spin_lock_irqsave(&gpio_lock, flags);
+ gpiochip_unexport(chip);
gpiochip_irqchip_remove(chip);
+
+ acpi_gpiochip_remove(chip);
gpiochip_remove_pin_ranges(chip);
of_gpiochip_remove(chip);
+ spin_lock_irqsave(&gpio_lock, flags);
for (id = 0; id < chip->ngpio; id++) {
if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags))
dev_crit(chip->dev, "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n");
@@ -342,7 +345,6 @@ void gpiochip_remove(struct gpio_chip *chip)
list_del(&chip->list);
spin_unlock_irqrestore(&gpio_lock, flags);
- gpiochip_unexport(chip);
kfree(chip->desc);
chip->desc = NULL;
diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h
index e3a52113a541..550a5eafbd38 100644
--- a/drivers/gpio/gpiolib.h
+++ b/drivers/gpio/gpiolib.h
@@ -77,6 +77,7 @@ struct gpio_desc {
#define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */
#define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */
#define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */
+#define FLAG_SYSFS_DIR 10 /* show sysfs direction attribute */
#define ID_SHIFT 16 /* add new flags before this one */
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 66e40398b3d3..e620807418ea 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -37,6 +37,7 @@ obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o
obj-$(CONFIG_DRM_TTM) += ttm/
obj-$(CONFIG_DRM_TDFX) += tdfx/
obj-$(CONFIG_DRM_R128) += r128/
+obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
obj-$(CONFIG_DRM_RADEON)+= radeon/
obj-$(CONFIG_DRM_MGA) += mga/
obj-$(CONFIG_DRM_I810) += i810/
@@ -67,4 +68,3 @@ obj-$(CONFIG_DRM_IMX) += imx/
obj-y += i2c/
obj-y += panel/
obj-y += bridge/
-obj-$(CONFIG_HSA_AMD) += amd/amdkfd/
diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile
index be6246de5091..307a309110e6 100644
--- a/drivers/gpu/drm/amd/amdkfd/Makefile
+++ b/drivers/gpu/drm/amd/amdkfd/Makefile
@@ -8,7 +8,6 @@ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \
kfd_pasid.o kfd_doorbell.o kfd_flat_memory.o \
kfd_process.o kfd_queue.o kfd_mqd_manager.o \
kfd_kernel_queue.o kfd_packet_manager.o \
- kfd_process_queue_manager.o kfd_device_queue_manager.o \
- kfd_interrupt.o
+ kfd_process_queue_manager.o kfd_device_queue_manager.o
obj-$(CONFIG_HSA_AMD) += amdkfd.o
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 4f7b275f2f7b..fcfdf23e1913 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -31,7 +31,6 @@
#include <uapi/linux/kfd_ioctl.h>
#include <linux/time.h>
#include <linux/mm.h>
-#include <linux/uaccess.h>
#include <uapi/asm-generic/mman-common.h>
#include <asm/processor.h>
#include "kfd_priv.h"
@@ -121,27 +120,20 @@ static int kfd_open(struct inode *inode, struct file *filep)
if (IS_ERR(process))
return PTR_ERR(process);
- process->is_32bit_user_mode = is_32bit_user_mode;
-
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
process->pasid, process->is_32bit_user_mode);
- kfd_init_apertures(process);
-
return 0;
}
-static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
- void __user *arg)
+static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p,
+ void *data)
{
- struct kfd_ioctl_get_version_args args;
+ struct kfd_ioctl_get_version_args *args = data;
int err = 0;
- args.major_version = KFD_IOCTL_MAJOR_VERSION;
- args.minor_version = KFD_IOCTL_MINOR_VERSION;
-
- if (copy_to_user(arg, &args, sizeof(args)))
- err = -EFAULT;
+ args->major_version = KFD_IOCTL_MAJOR_VERSION;
+ args->minor_version = KFD_IOCTL_MINOR_VERSION;
return err;
}
@@ -225,10 +217,10 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties,
return 0;
}
-static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
- void __user *arg)
+static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
+ void *data)
{
- struct kfd_ioctl_create_queue_args args;
+ struct kfd_ioctl_create_queue_args *args = data;
struct kfd_dev *dev;
int err = 0;
unsigned int queue_id;
@@ -237,16 +229,13 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
memset(&q_properties, 0, sizeof(struct queue_properties));
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
pr_debug("kfd: creating queue ioctl\n");
- err = set_queue_properties_from_user(&q_properties, &args);
+ err = set_queue_properties_from_user(&q_properties, args);
if (err)
return err;
- dev = kfd_device_by_id(args.gpu_id);
+ dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL)
return -EINVAL;
@@ -254,7 +243,7 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) {
- err = PTR_ERR(pdd);
+ err = -ESRCH;
goto err_bind_process;
}
@@ -267,33 +256,26 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p,
if (err != 0)
goto err_create_queue;
- args.queue_id = queue_id;
+ args->queue_id = queue_id;
/* Return gpu_id as doorbell offset for mmap usage */
- args.doorbell_offset = args.gpu_id << PAGE_SHIFT;
-
- if (copy_to_user(arg, &args, sizeof(args))) {
- err = -EFAULT;
- goto err_copy_args_out;
- }
+ args->doorbell_offset = args->gpu_id << PAGE_SHIFT;
mutex_unlock(&p->mutex);
- pr_debug("kfd: queue id %d was created successfully\n", args.queue_id);
+ pr_debug("kfd: queue id %d was created successfully\n", args->queue_id);
pr_debug("ring buffer address == 0x%016llX\n",
- args.ring_base_address);
+ args->ring_base_address);
pr_debug("read ptr address == 0x%016llX\n",
- args.read_pointer_address);
+ args->read_pointer_address);
pr_debug("write ptr address == 0x%016llX\n",
- args.write_pointer_address);
+ args->write_pointer_address);
return 0;
-err_copy_args_out:
- pqm_destroy_queue(&p->pqm, queue_id);
err_create_queue:
err_bind_process:
mutex_unlock(&p->mutex);
@@ -301,99 +283,90 @@ err_bind_process:
}
static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p,
- void __user *arg)
+ void *data)
{
int retval;
- struct kfd_ioctl_destroy_queue_args args;
-
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
+ struct kfd_ioctl_destroy_queue_args *args = data;
pr_debug("kfd: destroying queue id %d for PASID %d\n",
- args.queue_id,
+ args->queue_id,
p->pasid);
mutex_lock(&p->mutex);
- retval = pqm_destroy_queue(&p->pqm, args.queue_id);
+ retval = pqm_destroy_queue(&p->pqm, args->queue_id);
mutex_unlock(&p->mutex);
return retval;
}
static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
- void __user *arg)
+ void *data)
{
int retval;
- struct kfd_ioctl_update_queue_args args;
+ struct kfd_ioctl_update_queue_args *args = data;
struct queue_properties properties;
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
- if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
+ if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) {
pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n");
return -EINVAL;
}
- if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) {
+ if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) {
pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n");
return -EINVAL;
}
- if ((args.ring_base_address) &&
+ if ((args->ring_base_address) &&
(!access_ok(VERIFY_WRITE,
- (const void __user *) args.ring_base_address,
+ (const void __user *) args->ring_base_address,
sizeof(uint64_t)))) {
pr_err("kfd: can't access ring base address\n");
return -EFAULT;
}
- if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) {
+ if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) {
pr_err("kfd: ring size must be a power of 2 or 0\n");
return -EINVAL;
}
- properties.queue_address = args.ring_base_address;
- properties.queue_size = args.ring_size;
- properties.queue_percent = args.queue_percentage;
- properties.priority = args.queue_priority;
+ properties.queue_address = args->ring_base_address;
+ properties.queue_size = args->ring_size;
+ properties.queue_percent = args->queue_percentage;
+ properties.priority = args->queue_priority;
pr_debug("kfd: updating queue id %d for PASID %d\n",
- args.queue_id, p->pasid);
+ args->queue_id, p->pasid);
mutex_lock(&p->mutex);
- retval = pqm_update_queue(&p->pqm, args.queue_id, &properties);
+ retval = pqm_update_queue(&p->pqm, args->queue_id, &properties);
mutex_unlock(&p->mutex);
return retval;
}
-static long kfd_ioctl_set_memory_policy(struct file *filep,
- struct kfd_process *p, void __user *arg)
+static int kfd_ioctl_set_memory_policy(struct file *filep,
+ struct kfd_process *p, void *data)
{
- struct kfd_ioctl_set_memory_policy_args args;
+ struct kfd_ioctl_set_memory_policy_args *args = data;
struct kfd_dev *dev;
int err = 0;
struct kfd_process_device *pdd;
enum cache_policy default_policy, alternate_policy;
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
- if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT
- && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+ if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT
+ && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
return -EINVAL;
}
- if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
- && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
+ if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT
+ && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) {
return -EINVAL;
}
- dev = kfd_device_by_id(args.gpu_id);
+ dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL)
return -EINVAL;
@@ -401,23 +374,23 @@ static long kfd_ioctl_set_memory_policy(struct file *filep,
pdd = kfd_bind_process_to_device(dev, p);
if (IS_ERR(pdd)) {
- err = PTR_ERR(pdd);
+ err = -ESRCH;
goto out;
}
- default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+ default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT)
? cache_policy_coherent : cache_policy_noncoherent;
alternate_policy =
- (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
+ (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT)
? cache_policy_coherent : cache_policy_noncoherent;
if (!dev->dqm->set_cache_memory_policy(dev->dqm,
&pdd->qpd,
default_policy,
alternate_policy,
- (void __user *)args.alternate_aperture_base,
- args.alternate_aperture_size))
+ (void __user *)args->alternate_aperture_base,
+ args->alternate_aperture_size))
err = -EINVAL;
out:
@@ -426,53 +399,44 @@ out:
return err;
}
-static long kfd_ioctl_get_clock_counters(struct file *filep,
- struct kfd_process *p, void __user *arg)
+static int kfd_ioctl_get_clock_counters(struct file *filep,
+ struct kfd_process *p, void *data)
{
- struct kfd_ioctl_get_clock_counters_args args;
+ struct kfd_ioctl_get_clock_counters_args *args = data;
struct kfd_dev *dev;
struct timespec time;
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
- dev = kfd_device_by_id(args.gpu_id);
+ dev = kfd_device_by_id(args->gpu_id);
if (dev == NULL)
return -EINVAL;
/* Reading GPU clock counter from KGD */
- args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
+ args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd);
/* No access to rdtsc. Using raw monotonic time */
getrawmonotonic(&time);
- args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
+ args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time);
get_monotonic_boottime(&time);
- args.system_clock_counter = (uint64_t)timespec_to_ns(&time);
+ args->system_clock_counter = (uint64_t)timespec_to_ns(&time);
/* Since the counter is in nano-seconds we use 1GHz frequency */
- args.system_clock_freq = 1000000000;
-
- if (copy_to_user(arg, &args, sizeof(args)))
- return -EFAULT;
+ args->system_clock_freq = 1000000000;
return 0;
}
static int kfd_ioctl_get_process_apertures(struct file *filp,
- struct kfd_process *p, void __user *arg)
+ struct kfd_process *p, void *data)
{
- struct kfd_ioctl_get_process_apertures_args args;
+ struct kfd_ioctl_get_process_apertures_args *args = data;
struct kfd_process_device_apertures *pAperture;
struct kfd_process_device *pdd;
dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid);
- if (copy_from_user(&args, arg, sizeof(args)))
- return -EFAULT;
-
- args.num_of_nodes = 0;
+ args->num_of_nodes = 0;
mutex_lock(&p->mutex);
@@ -481,7 +445,8 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
/* Run over all pdd of the process */
pdd = kfd_get_first_process_device_data(p);
do {
- pAperture = &args.process_apertures[args.num_of_nodes];
+ pAperture =
+ &args->process_apertures[args->num_of_nodes];
pAperture->gpu_id = pdd->dev->id;
pAperture->lds_base = pdd->lds_base;
pAperture->lds_limit = pdd->lds_limit;
@@ -491,7 +456,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
pAperture->scratch_limit = pdd->scratch_limit;
dev_dbg(kfd_device,
- "node id %u\n", args.num_of_nodes);
+ "node id %u\n", args->num_of_nodes);
dev_dbg(kfd_device,
"gpu id %u\n", pdd->dev->id);
dev_dbg(kfd_device,
@@ -507,80 +472,131 @@ static int kfd_ioctl_get_process_apertures(struct file *filp,
dev_dbg(kfd_device,
"scratch_limit %llX\n", pdd->scratch_limit);
- args.num_of_nodes++;
+ args->num_of_nodes++;
} while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL &&
- (args.num_of_nodes < NUM_OF_SUPPORTED_GPUS));
+ (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS));
}
mutex_unlock(&p->mutex);
- if (copy_to_user(arg, &args, sizeof(args)))
- return -EFAULT;
-
return 0;
}
+#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \
+ [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl}
+
+/** Ioctl table */
+static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION,
+ kfd_ioctl_get_version, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE,
+ kfd_ioctl_create_queue, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE,
+ kfd_ioctl_destroy_queue, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY,
+ kfd_ioctl_set_memory_policy, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS,
+ kfd_ioctl_get_clock_counters, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES,
+ kfd_ioctl_get_process_apertures, 0),
+
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE,
+ kfd_ioctl_update_queue, 0),
+};
+
+#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
+
static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct kfd_process *process;
- long err = -EINVAL;
+ amdkfd_ioctl_t *func;
+ const struct amdkfd_ioctl_desc *ioctl = NULL;
+ unsigned int nr = _IOC_NR(cmd);
+ char stack_kdata[128];
+ char *kdata = NULL;
+ unsigned int usize, asize;
+ int retcode = -EINVAL;
+
+ if (nr >= AMDKFD_CORE_IOCTL_COUNT)
+ goto err_i1;
+
+ if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) {
+ u32 amdkfd_size;
+
+ ioctl = &amdkfd_ioctls[nr];
- dev_dbg(kfd_device,
- "ioctl cmd 0x%x (#%d), arg 0x%lx\n",
- cmd, _IOC_NR(cmd), arg);
+ amdkfd_size = _IOC_SIZE(ioctl->cmd);
+ usize = asize = _IOC_SIZE(cmd);
+ if (amdkfd_size > asize)
+ asize = amdkfd_size;
+
+ cmd = ioctl->cmd;
+ } else
+ goto err_i1;
+
+ dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg);
process = kfd_get_process(current);
- if (IS_ERR(process))
- return PTR_ERR(process);
+ if (IS_ERR(process)) {
+ dev_dbg(kfd_device, "no process\n");
+ goto err_i1;
+ }
- switch (cmd) {
- case KFD_IOC_GET_VERSION:
- err = kfd_ioctl_get_version(filep, process, (void __user *)arg);
- break;
- case KFD_IOC_CREATE_QUEUE:
- err = kfd_ioctl_create_queue(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_DESTROY_QUEUE:
- err = kfd_ioctl_destroy_queue(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_SET_MEMORY_POLICY:
- err = kfd_ioctl_set_memory_policy(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_GET_CLOCK_COUNTERS:
- err = kfd_ioctl_get_clock_counters(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_GET_PROCESS_APERTURES:
- err = kfd_ioctl_get_process_apertures(filep, process,
- (void __user *)arg);
- break;
-
- case KFD_IOC_UPDATE_QUEUE:
- err = kfd_ioctl_update_queue(filep, process,
- (void __user *)arg);
- break;
-
- default:
- dev_err(kfd_device,
- "unknown ioctl cmd 0x%x, arg 0x%lx)\n",
- cmd, arg);
- err = -EINVAL;
- break;
+ /* Do not trust userspace, use our own definition */
+ func = ioctl->func;
+
+ if (unlikely(!func)) {
+ dev_dbg(kfd_device, "no function\n");
+ retcode = -EINVAL;
+ goto err_i1;
}
- if (err < 0)
- dev_err(kfd_device,
- "ioctl error %ld for ioctl cmd 0x%x (#%d)\n",
- err, cmd, _IOC_NR(cmd));
+ if (cmd & (IOC_IN | IOC_OUT)) {
+ if (asize <= sizeof(stack_kdata)) {
+ kdata = stack_kdata;
+ } else {
+ kdata = kmalloc(asize, GFP_KERNEL);
+ if (!kdata) {
+ retcode = -ENOMEM;
+ goto err_i1;
+ }
+ }
+ if (asize > usize)
+ memset(kdata + usize, 0, asize - usize);
+ }
- return err;
+ if (cmd & IOC_IN) {
+ if (copy_from_user(kdata, (void __user *)arg, usize) != 0) {
+ retcode = -EFAULT;
+ goto err_i1;
+ }
+ } else if (cmd & IOC_OUT) {
+ memset(kdata, 0, usize);
+ }
+
+ retcode = func(filep, process, kdata);
+
+ if (cmd & IOC_OUT)
+ if (copy_to_user((void __user *)arg, kdata, usize) != 0)
+ retcode = -EFAULT;
+
+err_i1:
+ if (!ioctl)
+ dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
+ task_pid_nr(current), cmd, nr);
+
+ if (kdata != stack_kdata)
+ kfree(kdata);
+
+ if (retcode)
+ dev_dbg(kfd_device, "ret = %d\n", retcode);
+
+ return retcode;
}
static int kfd_mmap(struct file *filp, struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 43884ebd4303..25bc47f3c1cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -26,6 +26,7 @@
#include <linux/slab.h>
#include "kfd_priv.h"
#include "kfd_device_queue_manager.h"
+#include "kfd_pm4_headers.h"
#define MQD_SIZE_ALIGNED 768
@@ -169,9 +170,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
kfd->shared_resources = *gpu_resources;
/* calculate max size of mqds needed for queues */
- size = max_num_of_processes *
- max_num_of_queues_per_process *
- kfd->device_info->mqd_size_aligned;
+ size = max_num_of_queues_per_device *
+ kfd->device_info->mqd_size_aligned;
/* add another 512KB for all other allocations on gart */
size += 512 * 1024;
@@ -192,13 +192,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
goto kfd_topology_add_device_error;
}
- if (kfd_interrupt_init(kfd)) {
- dev_err(kfd_device,
- "Error initializing interrupts for device (%x:%x)\n",
- kfd->pdev->vendor, kfd->pdev->device);
- goto kfd_interrupt_error;
- }
-
if (!device_iommu_pasid_init(kfd)) {
dev_err(kfd_device,
"Error initializing iommuv2 for device (%x:%x)\n",
@@ -237,8 +230,6 @@ dqm_start_error:
device_queue_manager_error:
amd_iommu_free_device(kfd->pdev);
device_iommu_pasid_error:
- kfd_interrupt_exit(kfd);
-kfd_interrupt_error:
kfd_topology_remove_device(kfd);
kfd_topology_add_device_error:
kfd2kgd->fini_sa_manager(kfd->kgd);
@@ -254,7 +245,6 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
if (kfd->init_complete) {
device_queue_manager_uninit(kfd->dqm);
amd_iommu_free_device(kfd->pdev);
- kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
}
@@ -296,13 +286,5 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
- if (kfd->init_complete) {
- spin_lock(&kfd->interrupt_lock);
-
- if (kfd->interrupts_active
- && enqueue_ih_ring_entry(kfd, ih_ring_entry))
- schedule_work(&kfd->interrupt_work);
-
- spin_unlock(&kfd->interrupt_lock);
- }
+ /* Process interrupts / schedule work as necessary */
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 924e90c072e5..0d8694f015c1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -161,6 +161,9 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
{
int bit = qpd->vmid - KFD_VMID_START_OFFSET;
+ /* Release the vmid mapping */
+ set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
+
set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
qpd->vmid = 0;
q->properties.vmid = 0;
@@ -180,6 +183,13 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
mutex_lock(&dqm->lock);
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+ dqm->total_queue_count);
+ mutex_unlock(&dqm->lock);
+ return -EPERM;
+ }
+
if (list_empty(&qpd->queues_list)) {
retval = allocate_vmid(dqm, qpd, q);
if (retval != 0) {
@@ -204,6 +214,14 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
list_add(&q->list, &qpd->queues_list);
dqm->queue_count++;
+ /*
+ * Unconditionally increment this counter, regardless of the queue's
+ * type or whether the queue is active.
+ */
+ dqm->total_queue_count++;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
mutex_unlock(&dqm->lock);
return 0;
}
@@ -272,6 +290,18 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
return retval;
}
+ pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
+ q->pipe,
+ q->queue);
+
+ retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
+ q->queue, (uint32_t __user *) q->properties.write_ptr);
+ if (retval != 0) {
+ deallocate_hqd(dqm, q);
+ mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ return retval;
+ }
+
return 0;
}
@@ -311,6 +341,15 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
if (list_empty(&qpd->queues_list))
deallocate_vmid(dqm, qpd, q);
dqm->queue_count--;
+
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+ * type
+ */
+ dqm->total_queue_count--;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
out:
mutex_unlock(&dqm->lock);
return retval;
@@ -320,6 +359,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
{
int retval;
struct mqd_manager *mqd;
+ bool prev_active = false;
BUG_ON(!dqm || !q || !q->mqd);
@@ -330,10 +370,18 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
return -ENOMEM;
}
- retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
if (q->properties.is_active == true)
+ prev_active = true;
+
+ /*
+ *
+ * check active state vs. the previous state
+ * and modify counter accordingly
+ */
+ retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
+ if ((q->properties.is_active == true) && (prev_active == false))
dqm->queue_count++;
- else
+ else if ((q->properties.is_active == false) && (prev_active == true))
dqm->queue_count--;
if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
@@ -517,10 +565,14 @@ static int init_pipelines(struct device_queue_manager *dqm,
for (i = 0; i < pipes_num; i++) {
inx = i + first_pipe;
+ /*
+ * HPD buffer on GTT is allocated by amdkfd, no need to waste
+ * space in GTT for pipelines we don't initialize
+ */
pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
/* = log2(bytes/4)-1 */
- kfd2kgd->init_pipeline(dqm->dev->kgd, i,
+ kfd2kgd->init_pipeline(dqm->dev->kgd, inx,
CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
}
@@ -536,7 +588,7 @@ static int init_scheduler(struct device_queue_manager *dqm)
pr_debug("kfd: In %s\n", __func__);
- retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE);
+ retval = init_pipelines(dqm, get_pipes_num(dqm), get_first_pipe(dqm));
if (retval != 0)
return retval;
@@ -728,6 +780,21 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
pr_debug("kfd: In func %s\n", __func__);
mutex_lock(&dqm->lock);
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("amdkfd: Can't create new kernel queue because %d queues were already created\n",
+ dqm->total_queue_count);
+ mutex_unlock(&dqm->lock);
+ return -EPERM;
+ }
+
+ /*
+ * Unconditionally increment this counter, regardless of the queue's
+ * type or whether the queue is active.
+ */
+ dqm->total_queue_count++;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
list_add(&kq->list, &qpd->priv_queue_list);
dqm->queue_count++;
qpd->is_debug = true;
@@ -751,6 +818,13 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->queue_count--;
qpd->is_debug = false;
execute_queues_cpsch(dqm, false);
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+ * type.
+ */
+ dqm->total_queue_count++;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
mutex_unlock(&dqm->lock);
}
@@ -769,6 +843,13 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
mutex_lock(&dqm->lock);
+ if (dqm->total_queue_count >= max_num_of_queues_per_device) {
+ pr_warn("amdkfd: Can't create new usermode queue because %d queues were already created\n",
+ dqm->total_queue_count);
+ retval = -EPERM;
+ goto out;
+ }
+
mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
if (mqd == NULL) {
mutex_unlock(&dqm->lock);
@@ -786,6 +867,15 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
retval = execute_queues_cpsch(dqm, false);
}
+ /*
+ * Unconditionally increment this counter, regardless of the queue's
+ * type or whether the queue is active.
+ */
+ dqm->total_queue_count++;
+
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
out:
mutex_unlock(&dqm->lock);
return retval;
@@ -906,6 +996,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ /*
+ * Unconditionally decrement this counter, regardless of the queue's
+ * type
+ */
+ dqm->total_queue_count--;
+ pr_debug("Total of %d queues are accountable so far\n",
+ dqm->total_queue_count);
+
mutex_unlock(&dqm->lock);
return 0;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index c3f189e8ae35..52035bf0c1cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -130,6 +130,7 @@ struct device_queue_manager {
struct list_head queues;
unsigned int processes_count;
unsigned int queue_count;
+ unsigned int total_queue_count;
unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues;
unsigned int vmid_bitmap;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index 66df4da01c29..e64aa99e5e41 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -299,13 +299,13 @@ int kfd_init_apertures(struct kfd_process *process)
struct kfd_dev *dev;
struct kfd_process_device *pdd;
- mutex_lock(&process->mutex);
-
/*Iterating over all devices*/
while ((dev = kfd_topology_enum_kfd_devices(id)) != NULL &&
id < NUM_OF_SUPPORTED_GPUS) {
pdd = kfd_get_process_device_data(dev, process, 1);
+ if (!pdd)
+ return -1;
/*
* For 64 bit process aperture will be statically reserved in
@@ -348,8 +348,6 @@ int kfd_init_apertures(struct kfd_process *process)
id++;
}
- mutex_unlock(&process->mutex);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
deleted file mode 100644
index 5b999095a1f7..000000000000
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ /dev/null
@@ -1,176 +0,0 @@
-/*
- * Copyright 2014 Advanced Micro Devices, Inc.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-/*
- * KFD Interrupts.
- *
- * AMD GPUs deliver interrupts by pushing an interrupt description onto the
- * interrupt ring and then sending an interrupt. KGD receives the interrupt
- * in ISR and sends us a pointer to each new entry on the interrupt ring.
- *
- * We generally can't process interrupt-signaled events from ISR, so we call
- * out to each interrupt client module (currently only the scheduler) to ask if
- * each interrupt is interesting. If they return true, then it requires further
- * processing so we copy it to an internal interrupt ring and call each
- * interrupt client again from a work-queue.
- *
- * There's no acknowledgment for the interrupts we use. The hardware simply
- * queues a new interrupt each time without waiting.
- *
- * The fixed-size internal queue means that it's possible for us to lose
- * interrupts because we have no back-pressure to the hardware.
- */
-
-#include <linux/slab.h>
-#include <linux/device.h>
-#include "kfd_priv.h"
-
-#define KFD_INTERRUPT_RING_SIZE 256
-
-static void interrupt_wq(struct work_struct *);
-
-int kfd_interrupt_init(struct kfd_dev *kfd)
-{
- void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE,
- kfd->device_info->ih_ring_entry_size,
- GFP_KERNEL);
- if (!interrupt_ring)
- return -ENOMEM;
-
- kfd->interrupt_ring = interrupt_ring;
- kfd->interrupt_ring_size =
- KFD_INTERRUPT_RING_SIZE * kfd->device_info->ih_ring_entry_size;
- atomic_set(&kfd->interrupt_ring_wptr, 0);
- atomic_set(&kfd->interrupt_ring_rptr, 0);
-
- spin_lock_init(&kfd->interrupt_lock);
-
- INIT_WORK(&kfd->interrupt_work, interrupt_wq);
-
- kfd->interrupts_active = true;
-
- /*
- * After this function returns, the interrupt will be enabled. This
- * barrier ensures that the interrupt running on a different processor
- * sees all the above writes.
- */
- smp_wmb();
-
- return 0;
-}
-
-void kfd_interrupt_exit(struct kfd_dev *kfd)
-{
- /*
- * Stop the interrupt handler from writing to the ring and scheduling
- * workqueue items. The spinlock ensures that any interrupt running
- * after we have unlocked sees interrupts_active = false.
- */
- unsigned long flags;
-
- spin_lock_irqsave(&kfd->interrupt_lock, flags);
- kfd->interrupts_active = false;
- spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
-
- /*
- * Flush_scheduled_work ensures that there are no outstanding
- * work-queue items that will access interrupt_ring. New work items
- * can't be created because we stopped interrupt handling above.
- */
- flush_scheduled_work();
-
- kfree(kfd->interrupt_ring);
-}
-
-/*
- * This assumes that it can't be called concurrently with itself
- * but only with dequeue_ih_ring_entry.
- */
-bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry)
-{
- unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
- unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
-
- if ((rptr - wptr) % kfd->interrupt_ring_size ==
- kfd->device_info->ih_ring_entry_size) {
- /* This is very bad, the system is likely to hang. */
- dev_err_ratelimited(kfd_chardev(),
- "Interrupt ring overflow, dropping interrupt.\n");
- return false;
- }
-
- memcpy(kfd->interrupt_ring + wptr, ih_ring_entry,
- kfd->device_info->ih_ring_entry_size);
-
- wptr = (wptr + kfd->device_info->ih_ring_entry_size) %
- kfd->interrupt_ring_size;
- smp_wmb(); /* Ensure memcpy'd data is visible before wptr update. */
- atomic_set(&kfd->interrupt_ring_wptr, wptr);
-
- return true;
-}
-
-/*
- * This assumes that it can't be called concurrently with itself
- * but only with enqueue_ih_ring_entry.
- */
-static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry)
-{
- /*
- * Assume that wait queues have an implicit barrier, i.e. anything that
- * happened in the ISR before it queued work is visible.
- */
-
- unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr);
- unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr);
-
- if (rptr == wptr)
- return false;
-
- memcpy(ih_ring_entry, kfd->interrupt_ring + rptr,
- kfd->device_info->ih_ring_entry_size);
-
- rptr = (rptr + kfd->device_info->ih_ring_entry_size) %
- kfd->interrupt_ring_size;
-
- /*
- * Ensure the rptr write update is not visible until
- * memcpy has finished reading.
- */
- smp_mb();
- atomic_set(&kfd->interrupt_ring_rptr, rptr);
-
- return true;
-}
-
-static void interrupt_wq(struct work_struct *work)
-{
- struct kfd_dev *dev = container_of(work, struct kfd_dev,
- interrupt_work);
-
- uint32_t ih_ring_entry[DIV_ROUND_UP(
- dev->device_info->ih_ring_entry_size,
- sizeof(uint32_t))];
-
- while (dequeue_ih_ring_entry(dev, ih_ring_entry))
- ;
-}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 95d5af138e6e..a8be6df85347 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -50,15 +50,10 @@ module_param(sched_policy, int, 0444);
MODULE_PARM_DESC(sched_policy,
"Kernel cmdline parameter that defines the amdkfd scheduling policy");
-int max_num_of_processes = KFD_MAX_NUM_OF_PROCESSES_DEFAULT;
-module_param(max_num_of_processes, int, 0444);
-MODULE_PARM_DESC(max_num_of_processes,
- "Kernel cmdline parameter that defines the amdkfd maximum number of supported processes");
-
-int max_num_of_queues_per_process = KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT;
-module_param(max_num_of_queues_per_process, int, 0444);
-MODULE_PARM_DESC(max_num_of_queues_per_process,
- "Kernel cmdline parameter that defines the amdkfd maximum number of supported queues per process");
+int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
+module_param(max_num_of_queues_per_device, int, 0444);
+MODULE_PARM_DESC(max_num_of_queues_per_device,
+ "Maximum number of supported queues per device (1 = Minimum, 4096 = default)");
bool kgd2kfd_init(unsigned interface_version,
const struct kfd2kgd_calls *f2g,
@@ -100,16 +95,10 @@ static int __init kfd_module_init(void)
}
/* Verify module parameters */
- if ((max_num_of_processes < 0) ||
- (max_num_of_processes > KFD_MAX_NUM_OF_PROCESSES)) {
- pr_err("kfd: max_num_of_processes must be between 0 to KFD_MAX_NUM_OF_PROCESSES\n");
- return -1;
- }
-
- if ((max_num_of_queues_per_process < 0) ||
- (max_num_of_queues_per_process >
- KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)) {
- pr_err("kfd: max_num_of_queues_per_process must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_PROCESS\n");
+ if ((max_num_of_queues_per_device < 0) ||
+ (max_num_of_queues_per_device >
+ KFD_MAX_NUM_OF_QUEUES_PER_DEVICE)) {
+ pr_err("kfd: max_num_of_queues_per_device must be between 0 to KFD_MAX_NUM_OF_QUEUES_PER_DEVICE\n");
return -1;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index adc31474e786..4c3828cf45bf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -184,7 +184,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd,
uint32_t queue_id)
{
- return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address,
+ return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address,
pipe_id, queue_id);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
index 71699ad97d74..6cfe7f1f18cf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c
@@ -30,9 +30,9 @@ static DEFINE_MUTEX(pasid_mutex);
int kfd_pasid_init(void)
{
- pasid_limit = max_num_of_processes;
+ pasid_limit = KFD_MAX_NUM_OF_PROCESSES;
- pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL);
+ pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL);
if (!pasid_bitmap)
return -ENOMEM;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index f9fb81e3bb09..96dc10e8904a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -52,20 +52,19 @@
#define kfd_alloc_struct(ptr_to_struct) \
((typeof(ptr_to_struct)) kzalloc(sizeof(*ptr_to_struct), GFP_KERNEL))
-/* Kernel module parameter to specify maximum number of supported processes */
-extern int max_num_of_processes;
-
-#define KFD_MAX_NUM_OF_PROCESSES_DEFAULT 32
#define KFD_MAX_NUM_OF_PROCESSES 512
+#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
/*
- * Kernel module parameter to specify maximum number of supported queues
- * per process
+ * Kernel module parameter to specify maximum number of supported queues per
+ * device
*/
-extern int max_num_of_queues_per_process;
+extern int max_num_of_queues_per_device;
-#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS_DEFAULT 128
-#define KFD_MAX_NUM_OF_QUEUES_PER_PROCESS 1024
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
+#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE \
+ (KFD_MAX_NUM_OF_PROCESSES * \
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
#define KFD_KERNEL_QUEUE_SIZE 2048
@@ -135,22 +134,10 @@ struct kfd_dev {
struct kgd2kfd_shared_resources shared_resources;
- void *interrupt_ring;
- size_t interrupt_ring_size;
- atomic_t interrupt_ring_rptr;
- atomic_t interrupt_ring_wptr;
- struct work_struct interrupt_work;
- spinlock_t interrupt_lock;
-
/* QCM Device instance */
struct device_queue_manager *dqm;
bool init_complete;
- /*
- * Interrupts of interest to KFD are copied
- * from the HW ring into a SW ring.
- */
- bool interrupts_active;
};
/* KGD2KFD callbacks */
@@ -463,6 +450,24 @@ struct kfd_process {
bool is_32bit_user_mode;
};
+/**
+ * Ioctl function type.
+ *
+ * \param filep pointer to file structure.
+ * \param p amdkfd process pointer.
+ * \param data pointer to arg that was copied from user.
+ */
+typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p,
+ void *data);
+
+struct amdkfd_ioctl_desc {
+ unsigned int cmd;
+ int flags;
+ amdkfd_ioctl_t *func;
+ unsigned int cmd_drv;
+ const char *name;
+};
+
void kfd_process_create_wq(void);
void kfd_process_destroy_wq(void);
struct kfd_process *kfd_create_process(const struct task_struct *);
@@ -513,10 +518,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev);
struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx);
/* Interrupts */
-int kfd_interrupt_init(struct kfd_dev *dev);
-void kfd_interrupt_exit(struct kfd_dev *dev);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
-bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
/* Power Management */
void kgd2kfd_suspend(struct kfd_dev *kfd);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index b85eb0b830b4..3c76ef05cbcf 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -26,6 +26,8 @@
#include <linux/slab.h>
#include <linux/amd-iommu.h>
#include <linux/notifier.h>
+#include <linux/compat.h>
+
struct mm_struct;
#include "kfd_priv.h"
@@ -285,8 +287,15 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (err != 0)
goto err_process_pqm_init;
+ /* init process apertures*/
+ process->is_32bit_user_mode = is_compat_task();
+ if (kfd_init_apertures(process) != 0)
+ goto err_init_apretures;
+
return process;
+err_init_apretures:
+ pqm_uninit(&process->pqm);
err_process_pqm_init:
hash_del_rcu(&process->kfd_processes);
synchronize_rcu();
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 47526780d736..f37cf5efe642 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -54,11 +54,11 @@ static int find_available_queue_slot(struct process_queue_manager *pqm,
pr_debug("kfd: in %s\n", __func__);
found = find_first_zero_bit(pqm->queue_slot_bitmap,
- max_num_of_queues_per_process);
+ KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
pr_debug("kfd: the new slot id %lu\n", found);
- if (found >= max_num_of_queues_per_process) {
+ if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
pr_info("amdkfd: Can not open more queues for process with pasid %d\n",
pqm->process->pasid);
return -ENOMEM;
@@ -76,7 +76,7 @@ int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
INIT_LIST_HEAD(&pqm->queues);
pqm->queue_slot_bitmap =
- kzalloc(DIV_ROUND_UP(max_num_of_queues_per_process,
+ kzalloc(DIV_ROUND_UP(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
BITS_PER_BYTE), GFP_KERNEL);
if (pqm->queue_slot_bitmap == NULL)
return -ENOMEM;
@@ -203,6 +203,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
pqn->kq = NULL;
retval = dev->dqm->create_queue(dev->dqm, q, &pdd->qpd,
&q->properties.vmid);
+ pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
case KFD_QUEUE_TYPE_DIQ:
@@ -222,7 +223,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("kfd: error dqm create queue\n");
+ pr_debug("Error dqm create queue\n");
goto err_create_queue;
}
@@ -241,7 +242,10 @@ int pqm_create_queue(struct process_queue_manager *pqm,
err_create_queue:
kfree(pqn);
err_allocate_pqn:
+ /* check if queues list is empty unregister process from device */
clear_bit(*qid, pqm->queue_slot_bitmap);
+ if (list_empty(&pqm->queues))
+ dev->dqm->unregister_process(dev->dqm, &pdd->qpd);
return retval;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 5733e2859e8a..cca1708fd811 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -700,8 +700,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.simd_per_cu);
sysfs_show_32bit_prop(buffer, "max_slots_scratch_cu",
dev->node_props.max_slots_scratch_cu);
- sysfs_show_32bit_prop(buffer, "engine_id",
- dev->node_props.engine_id);
sysfs_show_32bit_prop(buffer, "vendor_id",
dev->node_props.vendor_id);
sysfs_show_32bit_prop(buffer, "device_id",
@@ -715,6 +713,12 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->gpu->kgd));
sysfs_show_64bit_prop(buffer, "local_mem_size",
kfd2kgd->get_vmem_size(dev->gpu->kgd));
+
+ sysfs_show_32bit_prop(buffer, "fw_version",
+ kfd2kgd->get_fw_version(
+ dev->gpu->kgd,
+ KGD_ENGINE_MEC1));
+
}
ret = sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
@@ -917,7 +921,7 @@ static int kfd_build_sysfs_node_tree(void)
uint32_t i = 0;
list_for_each_entry(dev, &topology_device_list, list) {
- ret = kfd_build_sysfs_node_entry(dev, 0);
+ ret = kfd_build_sysfs_node_entry(dev, i);
if (ret < 0)
return ret;
i++;
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 9c729dd8dd50..96a512208fad 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -45,6 +45,17 @@ enum kgd_memory_pool {
KGD_POOL_FRAMEBUFFER = 3,
};
+enum kgd_engine_type {
+ KGD_ENGINE_PFP = 1,
+ KGD_ENGINE_ME,
+ KGD_ENGINE_CE,
+ KGD_ENGINE_MEC1,
+ KGD_ENGINE_MEC2,
+ KGD_ENGINE_RLC,
+ KGD_ENGINE_SDMA,
+ KGD_ENGINE_MAX
+};
+
struct kgd2kfd_shared_resources {
/* Bit n == 1 means VMID n is available for KFD. */
unsigned int compute_vmid_bitmap;
@@ -137,6 +148,8 @@ struct kgd2kfd_calls {
*
* @hqd_destroy: Destructs and preempts the queue assigned to that hqd slot.
*
+ * @get_fw_version: Returns FW versions from the header
+ *
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@@ -170,12 +183,14 @@ struct kfd2kgd_calls {
int (*hqd_load)(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr);
- bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address,
+ bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type,
unsigned int timeout, uint32_t pipe_id,
uint32_t queue_id);
+ uint16_t (*get_fw_version)(struct kgd_dev *kgd,
+ enum kgd_engine_type type);
};
bool kgd2kfd_init(unsigned interface_version,
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4a78a773151c..bbdbe4721573 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -61,7 +61,7 @@ drm_atomic_helper_plane_changed(struct drm_atomic_state *state,
struct drm_crtc_state *crtc_state;
if (plane->state->crtc) {
- crtc_state = state->crtc_states[drm_crtc_index(plane->crtc)];
+ crtc_state = state->crtc_states[drm_crtc_index(plane->state->crtc)];
if (WARN_ON(!crtc_state))
return;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 52ce26d6b4fb..dc386ebe5193 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -145,6 +145,31 @@ int drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper, struct drm_
}
EXPORT_SYMBOL(drm_fb_helper_add_one_connector);
+static void remove_from_modeset(struct drm_mode_set *set,
+ struct drm_connector *connector)
+{
+ int i, j;
+
+ for (i = 0; i < set->num_connectors; i++) {
+ if (set->connectors[i] == connector)
+ break;
+ }
+
+ if (i == set->num_connectors)
+ return;
+
+ for (j = i + 1; j < set->num_connectors; j++) {
+ set->connectors[j - 1] = set->connectors[j];
+ }
+ set->num_connectors--;
+
+ /* because i915 is pissy about this..
+ * TODO maybe need to makes sure we set it back to !=NULL somewhere?
+ */
+ if (set->num_connectors == 0)
+ set->fb = NULL;
+}
+
int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
struct drm_connector *connector)
{
@@ -167,6 +192,11 @@ int drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
}
fb_helper->connector_count--;
kfree(fb_helper_connector);
+
+ /* also cleanup dangling references to the connector: */
+ for (i = 0; i < fb_helper->crtc_count; i++)
+ remove_from_modeset(&fb_helper->crtc_info[i].mode_set, connector);
+
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_remove_one_connector);
@@ -741,7 +771,9 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
int i, j, rc = 0;
int start;
- drm_modeset_lock_all(dev);
+ if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+ return -EBUSY;
+ }
if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev);
return -EBUSY;
@@ -915,7 +947,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
int ret = 0;
int i;
- drm_modeset_lock_all(dev);
+ if (__drm_modeset_lock_all(dev, !!oops_in_progress)) {
+ return -EBUSY;
+ }
if (!drm_fb_helper_is_bound(fb_helper)) {
drm_modeset_unlock_all(dev);
return -EBUSY;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index f5a5f18efa5b..4d79dad9d44f 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -830,6 +830,8 @@ drm_get_last_vbltimestamp(struct drm_device *dev, int crtc,
* vblank events since the system was booted, including lost events due to
* modesetting activity.
*
+ * This is the legacy version of drm_crtc_vblank_count().
+ *
* Returns:
* The software vblank counter.
*/
@@ -844,6 +846,25 @@ u32 drm_vblank_count(struct drm_device *dev, int crtc)
EXPORT_SYMBOL(drm_vblank_count);
/**
+ * drm_crtc_vblank_count - retrieve "cooked" vblank counter value
+ * @crtc: which counter to retrieve
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
+ *
+ * This is the native KMS version of drm_vblank_count().
+ *
+ * Returns:
+ * The software vblank counter.
+ */
+u32 drm_crtc_vblank_count(struct drm_crtc *crtc)
+{
+ return drm_vblank_count(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_vblank_count);
+
+/**
* drm_vblank_count_and_time - retrieve "cooked" vblank counter value
* and the system timestamp corresponding to that vblank counter value.
*
@@ -904,6 +925,8 @@ static void send_vblank_event(struct drm_device *dev,
*
* Updates sequence # and timestamp on event, and sends it to userspace.
* Caller must hold event lock.
+ *
+ * This is the legacy version of drm_crtc_send_vblank_event().
*/
void drm_send_vblank_event(struct drm_device *dev, int crtc,
struct drm_pending_vblank_event *e)
@@ -923,6 +946,23 @@ void drm_send_vblank_event(struct drm_device *dev, int crtc,
EXPORT_SYMBOL(drm_send_vblank_event);
/**
+ * drm_crtc_send_vblank_event - helper to send vblank event after pageflip
+ * @crtc: the source CRTC of the vblank event
+ * @e: the event to send
+ *
+ * Updates sequence # and timestamp on event, and sends it to userspace.
+ * Caller must hold event lock.
+ *
+ * This is the native KMS version of drm_send_vblank_event().
+ */
+void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e)
+{
+ drm_send_vblank_event(crtc->dev, drm_crtc_index(crtc), e);
+}
+EXPORT_SYMBOL(drm_crtc_send_vblank_event);
+
+/**
* drm_vblank_enable - enable the vblank interrupt on a CRTC
* @dev: DRM device
* @crtc: CRTC in question
@@ -1594,6 +1634,8 @@ static void drm_handle_vblank_events(struct drm_device *dev, int crtc)
*
* Drivers should call this routine in their vblank interrupt handlers to
* update the vblank counter and send any signals that may be pending.
+ *
+ * This is the legacy version of drm_crtc_handle_vblank().
*/
bool drm_handle_vblank(struct drm_device *dev, int crtc)
{
@@ -1670,3 +1712,21 @@ bool drm_handle_vblank(struct drm_device *dev, int crtc)
return true;
}
EXPORT_SYMBOL(drm_handle_vblank);
+
+/**
+ * drm_crtc_handle_vblank - handle a vblank event
+ * @crtc: where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ *
+ * This is the native KMS version of drm_handle_vblank().
+ *
+ * Returns:
+ * True if the event was successfully handled, false on failure.
+ */
+bool drm_crtc_handle_vblank(struct drm_crtc *crtc)
+{
+ return drm_handle_vblank(crtc->dev, drm_crtc_index(crtc));
+}
+EXPORT_SYMBOL(drm_crtc_handle_vblank);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 121470a83d1a..1bcbe07cecfc 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -645,18 +645,6 @@ static int exynos_drm_init(void)
if (!is_exynos)
return -ENODEV;
- /*
- * Register device object only in case of Exynos SoC.
- *
- * Below codes resolves temporarily infinite loop issue incurred
- * by Exynos drm driver when using multi-platform kernel.
- * So these codes will be replaced with more generic way later.
- */
- if (!of_machine_is_compatible("samsung,exynos3") &&
- !of_machine_is_compatible("samsung,exynos4") &&
- !of_machine_is_compatible("samsung,exynos5"))
- return -ENODEV;
-
exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
NULL, 0);
if (IS_ERR(exynos_drm_pdev))
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 5765a161abdd..98051e8e855a 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1669,7 +1669,6 @@ static void hdmi_mode_apply(struct hdmi_context *hdata)
static void hdmiphy_conf_reset(struct hdmi_context *hdata)
{
- u8 buffer[2];
u32 reg;
clk_disable_unprepare(hdata->res.sclk_hdmi);
@@ -1677,11 +1676,8 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata)
clk_prepare_enable(hdata->res.sclk_hdmi);
/* operation mode */
- buffer[0] = 0x1f;
- buffer[1] = 0x00;
-
- if (hdata->hdmiphy_port)
- i2c_master_send(hdata->hdmiphy_port, buffer, 2);
+ hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE,
+ HDMI_PHY_ENABLE_MODE_SET);
if (hdata->type == HDMI_TYPE13)
reg = HDMI_V13_PHY_RSTOUT;
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 820b76234ef4..064ed6597def 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -1026,6 +1026,7 @@ static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos)
static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
{
struct mixer_context *mixer_ctx = mgr_to_mixer(mgr);
+ int err;
mutex_lock(&mixer_ctx->mixer_mutex);
if (!mixer_ctx->powered) {
@@ -1034,7 +1035,11 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
}
mutex_unlock(&mixer_ctx->mixer_mutex);
- drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
+ err = drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
+ if (err < 0) {
+ DRM_DEBUG_KMS("failed to acquire vblank counter\n");
+ return;
+ }
atomic_set(&mixer_ctx->wait_vsync_event, 1);
@@ -1262,8 +1267,6 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data)
return ret;
}
- pm_runtime_enable(dev);
-
return 0;
}
@@ -1272,8 +1275,6 @@ static void mixer_unbind(struct device *dev, struct device *master, void *data)
struct mixer_context *ctx = dev_get_drvdata(dev);
mixer_mgr_remove(&ctx->manager);
-
- pm_runtime_disable(dev);
}
static const struct component_ops mixer_component_ops = {
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index d4762799351d..a9041d1a8ff0 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -32,6 +32,8 @@
struct tda998x_priv {
struct i2c_client *cec;
struct i2c_client *hdmi;
+ struct mutex mutex;
+ struct delayed_work dwork;
uint16_t rev;
uint8_t current_page;
int dpms;
@@ -402,9 +404,10 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
uint8_t addr = REG2ADDR(reg);
int ret;
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return ret;
+ goto out;
ret = i2c_master_send(client, &addr, sizeof(addr));
if (ret < 0)
@@ -414,10 +417,12 @@ reg_read_range(struct tda998x_priv *priv, uint16_t reg, char *buf, int cnt)
if (ret < 0)
goto fail;
- return ret;
+ goto out;
fail:
dev_err(&client->dev, "Error %d reading from 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
return ret;
}
@@ -431,13 +436,16 @@ reg_write_range(struct tda998x_priv *priv, uint16_t reg, uint8_t *p, int cnt)
buf[0] = REG2ADDR(reg);
memcpy(&buf[1], p, cnt);
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return;
+ goto out;
ret = i2c_master_send(client, buf, cnt + 1);
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
}
static int
@@ -459,13 +467,16 @@ reg_write(struct tda998x_priv *priv, uint16_t reg, uint8_t val)
uint8_t buf[] = {REG2ADDR(reg), val};
int ret;
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return;
+ goto out;
ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
}
static void
@@ -475,13 +486,16 @@ reg_write16(struct tda998x_priv *priv, uint16_t reg, uint16_t val)
uint8_t buf[] = {REG2ADDR(reg), val >> 8, val};
int ret;
+ mutex_lock(&priv->mutex);
ret = set_page(priv, reg);
if (ret < 0)
- return;
+ goto out;
ret = i2c_master_send(client, buf, sizeof(buf));
if (ret < 0)
dev_err(&client->dev, "Error %d writing to 0x%x\n", ret, reg);
+out:
+ mutex_unlock(&priv->mutex);
}
static void
@@ -536,6 +550,17 @@ tda998x_reset(struct tda998x_priv *priv)
reg_write(priv, REG_MUX_VP_VIP_OUT, 0x24);
}
+/* handle HDMI connect/disconnect */
+static void tda998x_hpd(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct tda998x_priv *priv =
+ container_of(dwork, struct tda998x_priv, dwork);
+
+ if (priv->encoder && priv->encoder->dev)
+ drm_kms_helper_hotplug_event(priv->encoder->dev);
+}
+
/*
* only 2 interrupts may occur: screen plug/unplug and EDID read
*/
@@ -559,8 +584,7 @@ static irqreturn_t tda998x_irq_thread(int irq, void *data)
priv->wq_edid_wait = 0;
wake_up(&priv->wq_edid);
} else if (cec != 0) { /* HPD change */
- if (priv->encoder && priv->encoder->dev)
- drm_helper_hpd_irq_event(priv->encoder->dev);
+ schedule_delayed_work(&priv->dwork, HZ/10);
}
return IRQ_HANDLED;
}
@@ -1170,8 +1194,10 @@ static void tda998x_destroy(struct tda998x_priv *priv)
/* disable all IRQs and free the IRQ handler */
cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
- if (priv->hdmi->irq)
+ if (priv->hdmi->irq) {
free_irq(priv->hdmi->irq, priv);
+ cancel_delayed_work_sync(&priv->dwork);
+ }
i2c_unregister_device(priv->cec);
}
@@ -1255,6 +1281,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
struct device_node *np = client->dev.of_node;
u32 video;
int rev_lo, rev_hi, ret;
+ unsigned short cec_addr;
priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(2) | VIP_CNTRL_0_SWAP_B(3);
priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(0) | VIP_CNTRL_1_SWAP_D(1);
@@ -1262,12 +1289,16 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
priv->current_page = 0xff;
priv->hdmi = client;
- priv->cec = i2c_new_dummy(client->adapter, 0x34);
+ /* CEC I2C address bound to TDA998x I2C addr by configuration pins */
+ cec_addr = 0x34 + (client->addr & 0x03);
+ priv->cec = i2c_new_dummy(client->adapter, cec_addr);
if (!priv->cec)
return -ENODEV;
priv->dpms = DRM_MODE_DPMS_OFF;
+ mutex_init(&priv->mutex); /* protect the page access */
+
/* wake up the device: */
cec_write(priv, REG_CEC_ENAMODS,
CEC_ENAMODS_EN_RXSENS | CEC_ENAMODS_EN_HDMI);
@@ -1323,8 +1354,9 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
if (client->irq) {
int irqf_trigger;
- /* init read EDID waitqueue */
+ /* init read EDID waitqueue and HDP work */
init_waitqueue_head(&priv->wq_edid);
+ INIT_DELAYED_WORK(&priv->dwork, tda998x_hpd);
/* clear pending interrupts */
reg_read(priv, REG_INT_FLAGS_0);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f990ab4c3efb..7643300828c3 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -462,19 +462,13 @@ void intel_detect_pch(struct drm_device *dev)
} else if (id == INTEL_PCH_LPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev));
- WARN_ON(IS_HSW_ULT(dev));
- } else if (IS_BROADWELL(dev)) {
- dev_priv->pch_type = PCH_LPT;
- dev_priv->pch_id =
- INTEL_PCH_LPT_LP_DEVICE_ID_TYPE;
- DRM_DEBUG_KMS("This is Broadwell, assuming "
- "LynxPoint LP PCH\n");
+ WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+ WARN_ON(IS_HSW_ULT(dev) || IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_LPT;
DRM_DEBUG_KMS("Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev));
- WARN_ON(!IS_HSW_ULT(dev));
+ WARN_ON(!IS_HASWELL(dev) && !IS_BROADWELL(dev));
+ WARN_ON(!IS_HSW_ULT(dev) && !IS_BDW_ULT(dev));
} else if (id == INTEL_PCH_SPT_DEVICE_ID_TYPE) {
dev_priv->pch_type = PCH_SPT;
DRM_DEBUG_KMS("Found SunrisePoint PCH\n");
@@ -811,6 +805,8 @@ int i915_reset(struct drm_device *dev)
if (!i915.reset)
return 0;
+ intel_reset_gt_powersave(dev);
+
mutex_lock(&dev->struct_mutex);
i915_gem_reset(dev);
@@ -880,7 +876,7 @@ int i915_reset(struct drm_device *dev)
* of re-init after reset.
*/
if (INTEL_INFO(dev)->gen > 5)
- intel_reset_gt_powersave(dev);
+ intel_enable_gt_powersave(dev);
} else {
mutex_unlock(&dev->struct_mutex);
}
@@ -1584,7 +1580,7 @@ static struct drm_driver driver = {
.gem_prime_import = i915_gem_prime_import,
.dumb_create = i915_gem_dumb_create,
- .dumb_map_offset = i915_gem_dumb_map_offset,
+ .dumb_map_offset = i915_gem_mmap_gtt,
.dumb_destroy = drm_gem_dumb_destroy,
.ioctls = i915_ioctls,
.fops = &i915_driver_fops,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 63bcda5541ec..9d7a7155bf02 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1756,8 +1756,6 @@ struct drm_i915_private {
*/
struct workqueue_struct *dp_wq;
- uint32_t bios_vgacntr;
-
/* Abstract the submission mechanism (legacy ringbuffer or execlists) away */
struct {
int (*do_execbuf)(struct drm_device *dev, struct drm_file *file,
@@ -2161,8 +2159,7 @@ struct drm_i915_cmd_table {
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
(INTEL_DEVID(dev) & 0xFF00) == 0x0C00)
#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
- ((INTEL_DEVID(dev) & 0xf) == 0x2 || \
- (INTEL_DEVID(dev) & 0xf) == 0x6 || \
+ ((INTEL_DEVID(dev) & 0xf) == 0x6 || \
(INTEL_DEVID(dev) & 0xf) == 0xe))
#define IS_BDW_GT3(dev) (IS_BROADWELL(dev) && \
(INTEL_DEVID(dev) & 0x00F0) == 0x0020)
@@ -2501,9 +2498,8 @@ void i915_vma_move_to_active(struct i915_vma *vma,
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int i915_gem_dumb_map_offset(struct drm_file *file_priv,
- struct drm_device *dev, uint32_t handle,
- uint64_t *offset);
+int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
+ uint32_t handle, uint64_t *offset);
/**
* Returns true if seq1 is later than seq2.
*/
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 4a9faea626db..5f614828d365 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -401,7 +401,6 @@ static int
i915_gem_create(struct drm_file *file,
struct drm_device *dev,
uint64_t size,
- bool dumb,
uint32_t *handle_p)
{
struct drm_i915_gem_object *obj;
@@ -417,7 +416,6 @@ i915_gem_create(struct drm_file *file,
if (obj == NULL)
return -ENOMEM;
- obj->base.dumb = dumb;
ret = drm_gem_handle_create(file, &obj->base, &handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(&obj->base);
@@ -437,7 +435,7 @@ i915_gem_dumb_create(struct drm_file *file,
args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
args->size = args->pitch * args->height;
return i915_gem_create(file, dev,
- args->size, true, &args->handle);
+ args->size, &args->handle);
}
/**
@@ -450,7 +448,7 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_create *args = data;
return i915_gem_create(file, dev,
- args->size, false, &args->handle);
+ args->size, &args->handle);
}
static inline int
@@ -1050,6 +1048,7 @@ int
i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_pwrite *args = data;
struct drm_i915_gem_object *obj;
int ret;
@@ -1069,9 +1068,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return -EFAULT;
}
+ intel_runtime_pm_get(dev_priv);
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
- return ret;
+ goto put_rpm;
obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (&obj->base == NULL) {
@@ -1123,6 +1124,9 @@ out:
drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
+put_rpm:
+ intel_runtime_pm_put(dev_priv);
+
return ret;
}
@@ -1840,10 +1844,10 @@ static void i915_gem_object_free_mmap_offset(struct drm_i915_gem_object *obj)
drm_gem_free_mmap_offset(&obj->base);
}
-static int
+int
i915_gem_mmap_gtt(struct drm_file *file,
struct drm_device *dev,
- uint32_t handle, bool dumb,
+ uint32_t handle,
uint64_t *offset)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -1860,13 +1864,6 @@ i915_gem_mmap_gtt(struct drm_file *file,
goto unlock;
}
- /*
- * We don't allow dumb mmaps on objects created using another
- * interface.
- */
- WARN_ONCE(dumb && !(obj->base.dumb || obj->base.import_attach),
- "Illegal dumb map of accelerated buffer.\n");
-
if (obj->base.size > dev_priv->gtt.mappable_end) {
ret = -E2BIG;
goto out;
@@ -1891,15 +1888,6 @@ unlock:
return ret;
}
-int
-i915_gem_dumb_map_offset(struct drm_file *file,
- struct drm_device *dev,
- uint32_t handle,
- uint64_t *offset)
-{
- return i915_gem_mmap_gtt(file, dev, handle, true, offset);
-}
-
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
@@ -1921,7 +1909,7 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
{
struct drm_i915_gem_mmap_gtt *args = data;
- return i915_gem_mmap_gtt(file, dev, args->handle, false, &args->offset);
+ return i915_gem_mmap_gtt(file, dev, args->handle, &args->offset);
}
static inline int
@@ -3160,6 +3148,13 @@ static void i965_write_fence_reg(struct drm_device *dev, int reg,
u32 size = i915_gem_obj_ggtt_size(obj);
uint64_t val;
+ /* Adjust fence size to match tiled area */
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ uint32_t row_size = obj->stride *
+ (obj->tiling_mode == I915_TILING_Y ? 32 : 8);
+ size = (size / row_size) * row_size;
+ }
+
val = (uint64_t)((i915_gem_obj_ggtt_offset(obj) + size - 4096) &
0xfffff000) << 32;
val |= i915_gem_obj_ggtt_offset(obj) & 0xfffff000;
@@ -4896,25 +4891,18 @@ i915_gem_init_hw(struct drm_device *dev)
for (i = 0; i < NUM_L3_SLICES(dev); i++)
i915_gem_l3_remap(&dev_priv->ring[RCS], i);
- /*
- * XXX: Contexts should only be initialized once. Doing a switch to the
- * default context switch however is something we'd like to do after
- * reset or thaw (the latter may not actually be necessary for HW, but
- * goes with our code better). Context switching requires rings (for
- * the do_switch), but before enabling PPGTT. So don't move this.
- */
- ret = i915_gem_context_enable(dev_priv);
+ ret = i915_ppgtt_init_hw(dev);
if (ret && ret != -EIO) {
- DRM_ERROR("Context enable failed %d\n", ret);
+ DRM_ERROR("PPGTT enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
-
- return ret;
}
- ret = i915_ppgtt_init_hw(dev);
+ ret = i915_gem_context_enable(dev_priv);
if (ret && ret != -EIO) {
- DRM_ERROR("PPGTT enable failed %d\n", ret);
+ DRM_ERROR("Context enable failed %d\n", ret);
i915_gem_cleanup_ringbuffer(dev);
+
+ return ret;
}
return ret;
@@ -5167,7 +5155,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
if (!mutex_is_locked(mutex))
return false;
-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)
+#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index d17ff435f276..d011ec82ef1e 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -473,7 +473,12 @@ mi_set_context(struct intel_engine_cs *ring,
u32 hw_flags)
{
u32 flags = hw_flags | MI_MM_SPACE_GTT;
- int ret;
+ const int num_rings =
+ /* Use an extended w/a on ivb+ if signalling from other rings */
+ i915_semaphore_is_enabled(ring->dev) ?
+ hweight32(INTEL_INFO(ring->dev)->ring_mask) - 1 :
+ 0;
+ int len, i, ret;
/* w/a: If Flush TLB Invalidation Mode is enabled, driver must do a TLB
* invalidation prior to MI_SET_CONTEXT. On GEN6 we don't set the value
@@ -490,15 +495,31 @@ mi_set_context(struct intel_engine_cs *ring,
if (!IS_HASWELL(ring->dev) && INTEL_INFO(ring->dev)->gen < 8)
flags |= (MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN);
- ret = intel_ring_begin(ring, 6);
+
+ len = 4;
+ if (INTEL_INFO(ring->dev)->gen >= 7)
+ len += 2 + (num_rings ? 4*num_rings + 2 : 0);
+
+ ret = intel_ring_begin(ring, len);
if (ret)
return ret;
/* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
- if (INTEL_INFO(ring->dev)->gen >= 7)
+ if (INTEL_INFO(ring->dev)->gen >= 7) {
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_DISABLE);
- else
- intel_ring_emit(ring, MI_NOOP);
+ if (num_rings) {
+ struct intel_engine_cs *signaller;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
+ for_each_ring(signaller, to_i915(ring->dev), i) {
+ if (signaller == ring)
+ continue;
+
+ intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
+ intel_ring_emit(ring, _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ }
+ }
+ }
intel_ring_emit(ring, MI_NOOP);
intel_ring_emit(ring, MI_SET_CONTEXT);
@@ -510,10 +531,21 @@ mi_set_context(struct intel_engine_cs *ring,
*/
intel_ring_emit(ring, MI_NOOP);
- if (INTEL_INFO(ring->dev)->gen >= 7)
+ if (INTEL_INFO(ring->dev)->gen >= 7) {
+ if (num_rings) {
+ struct intel_engine_cs *signaller;
+
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(num_rings));
+ for_each_ring(signaller, to_i915(ring->dev), i) {
+ if (signaller == ring)
+ continue;
+
+ intel_ring_emit(ring, RING_PSMI_CTL(signaller->mmio_base));
+ intel_ring_emit(ring, _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE));
+ }
+ }
intel_ring_emit(ring, MI_ARB_ON_OFF | MI_ARB_ENABLE);
- else
- intel_ring_emit(ring, MI_NOOP);
+ }
intel_ring_advance(ring);
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f06027ba3ee5..11738316394a 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -121,9 +121,6 @@ eb_lookup_vmas(struct eb_vmas *eb,
goto err;
}
- WARN_ONCE(obj->base.dumb,
- "GPU use of dumb buffer is illegal.\n");
-
drm_gem_object_reference(&obj->base);
list_add_tail(&obj->obj_exec_link, &objects);
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 981834b0f9b6..b051a238baf9 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -281,13 +281,34 @@ void gen6_enable_rps_interrupts(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
spin_lock_irq(&dev_priv->irq_lock);
+
WARN_ON(dev_priv->rps.pm_iir);
WARN_ON(I915_READ(gen6_pm_iir(dev_priv)) & dev_priv->pm_rps_events);
dev_priv->rps.interrupts_enabled = true;
+ I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) |
+ dev_priv->pm_rps_events);
gen6_enable_pm_irq(dev_priv, dev_priv->pm_rps_events);
+
spin_unlock_irq(&dev_priv->irq_lock);
}
+u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask)
+{
+ /*
+ * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer
+ * if GEN6_PM_UP_EI_EXPIRED is masked.
+ *
+ * TODO: verify if this can be reproduced on VLV,CHV.
+ */
+ if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv))
+ mask &= ~GEN6_PM_RP_UP_EI_EXPIRED;
+
+ if (INTEL_INFO(dev_priv)->gen >= 8)
+ mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP;
+
+ return mask;
+}
+
void gen6_disable_rps_interrupts(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -300,8 +321,7 @@ void gen6_disable_rps_interrupts(struct drm_device *dev)
spin_lock_irq(&dev_priv->irq_lock);
- I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ?
- ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0);
+ I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0));
__gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events);
I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) &
@@ -3307,8 +3327,10 @@ static void gen5_gt_irq_postinstall(struct drm_device *dev)
GEN5_IRQ_INIT(GT, dev_priv->gt_irq_mask, gt_irqs);
if (INTEL_INFO(dev)->gen >= 6) {
- pm_irqs |= dev_priv->pm_rps_events;
-
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS
+ * itself is enabled/disabled.
+ */
if (HAS_VEBOX(dev))
pm_irqs |= PM_VEBOX_USER_INTERRUPT;
@@ -3520,7 +3542,11 @@ static void gen8_gt_irq_postinstall(struct drm_i915_private *dev_priv)
dev_priv->pm_irq_mask = 0xffffffff;
GEN8_IRQ_INIT_NDX(GT, 0, ~gt_interrupts[0], gt_interrupts[0]);
GEN8_IRQ_INIT_NDX(GT, 1, ~gt_interrupts[1], gt_interrupts[1]);
- GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, dev_priv->pm_rps_events);
+ /*
+ * RPS interrupts will get enabled/disabled on demand when RPS itself
+ * is enabled/disabled.
+ */
+ GEN8_IRQ_INIT_NDX(GT, 2, dev_priv->pm_irq_mask, 0);
GEN8_IRQ_INIT_NDX(GT, 3, ~gt_interrupts[3], gt_interrupts[3]);
}
@@ -3609,7 +3635,7 @@ static void vlv_display_irq_uninstall(struct drm_i915_private *dev_priv)
vlv_display_irq_reset(dev_priv);
- dev_priv->irq_mask = 0;
+ dev_priv->irq_mask = ~0;
}
static void valleyview_irq_uninstall(struct drm_device *dev)
@@ -3715,8 +3741,6 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
if ((iir & flip_pending) == 0)
goto check_page_flip;
- intel_prepare_page_flip(dev, plane);
-
/* We detect FlipDone by looking for the change in PendingFlip from '1'
* to '0' on the following vblank, i.e. IIR has the Pendingflip
* asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
@@ -3726,6 +3750,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev,
if (I915_READ16(ISR) & flip_pending)
goto check_page_flip;
+ intel_prepare_page_flip(dev, plane);
intel_finish_page_flip(dev, pipe);
return true;
@@ -3897,8 +3922,6 @@ static bool i915_handle_vblank(struct drm_device *dev,
if ((iir & flip_pending) == 0)
goto check_page_flip;
- intel_prepare_page_flip(dev, plane);
-
/* We detect FlipDone by looking for the change in PendingFlip from '1'
* to '0' on the following vblank, i.e. IIR has the Pendingflip
* asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence
@@ -3908,6 +3931,7 @@ static bool i915_handle_vblank(struct drm_device *dev,
if (I915_READ(ISR) & flip_pending)
goto check_page_flip;
+ intel_prepare_page_flip(dev, plane);
intel_finish_page_flip(dev, pipe);
return true;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index eefdc238f70b..172de3b3433b 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -395,6 +395,7 @@
#define PIPE_CONTROL_STORE_DATA_INDEX (1<<21)
#define PIPE_CONTROL_CS_STALL (1<<20)
#define PIPE_CONTROL_TLB_INVALIDATE (1<<18)
+#define PIPE_CONTROL_MEDIA_STATE_CLEAR (1<<16)
#define PIPE_CONTROL_QW_WRITE (1<<14)
#define PIPE_CONTROL_POST_SYNC_OP_MASK (3<<14)
#define PIPE_CONTROL_DEPTH_STALL (1<<13)
@@ -1128,6 +1129,7 @@ enum punit_power_well {
#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE))
#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE))
#define GEN6_NOSYNC 0
+#define RING_PSMI_CTL(base) ((base)+0x50)
#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
@@ -1458,6 +1460,7 @@ enum punit_power_well {
#define GEN6_BLITTER_FBC_NOTIFY (1<<3)
#define GEN6_RC_SLEEP_PSMI_CONTROL 0x2050
+#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0)
#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12)
#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1<<10)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index fb3e3d429191..e7a16f119a29 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -9815,7 +9815,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (obj->tiling_mode != work->old_fb_obj->tiling_mode)
/* vlv: DISPLAY_FLIP fails to change tiling */
ring = NULL;
- } else if (IS_IVYBRIDGE(dev)) {
+ } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) {
ring = &dev_priv->ring[BCS];
} else if (INTEL_INFO(dev)->gen >= 7) {
ring = obj->ring;
@@ -13057,11 +13057,7 @@ static void i915_disable_vga(struct drm_device *dev)
vga_put(dev->pdev, VGA_RSRC_LEGACY_IO);
udelay(300);
- /*
- * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming
- * from S3 without preserving (some of?) the other bits.
- */
- I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE);
+ I915_WRITE(vga_reg, VGA_DISP_DISABLE);
POSTING_READ(vga_reg);
}
@@ -13146,8 +13142,6 @@ void intel_modeset_init(struct drm_device *dev)
intel_shared_dpll_init(dev);
- /* save the BIOS value before clobbering it */
- dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev));
/* Just disable it once at startup */
i915_disable_vga(dev);
intel_setup_outputs(dev);
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 25fdbb16d4e0..3b40a17b8852 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -794,6 +794,7 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask);
void gen6_reset_rps_interrupts(struct drm_device *dev);
void gen6_enable_rps_interrupts(struct drm_device *dev);
void gen6_disable_rps_interrupts(struct drm_device *dev);
+u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask);
void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv);
void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv);
static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 4d63839bd9b4..dfb783a8f2c3 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -962,7 +962,7 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
WARN_ON(panel->backlight.max == 0);
- if (panel->backlight.level == 0) {
+ if (panel->backlight.level <= panel->backlight.min) {
panel->backlight.level = panel->backlight.max;
if (panel->backlight.device)
panel->backlight.device->props.brightness =
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 1f4b56e273c8..bf814a64582a 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4363,16 +4363,7 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val)
mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED);
mask &= dev_priv->pm_rps_events;
- /* IVB and SNB hard hangs on looping batchbuffer
- * if GEN6_PM_UP_EI_EXPIRED is masked.
- */
- if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev))
- mask |= GEN6_PM_RP_UP_EI_EXPIRED;
-
- if (IS_GEN8(dev_priv->dev))
- mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP;
-
- return ~mask;
+ return gen6_sanitize_rps_pm_mask(dev_priv, ~mask);
}
/* gen6_set_rps is called to update the frequency request, but should also be
@@ -4441,7 +4432,8 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
return;
/* Mask turbo interrupt so that they will not come in between */
- I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMINTRMSK,
+ gen6_sanitize_rps_pm_mask(dev_priv, ~0));
vlv_force_gfx_clock(dev_priv, true);
@@ -6191,6 +6183,20 @@ void intel_cleanup_gt_powersave(struct drm_device *dev)
valleyview_cleanup_gt_powersave(dev);
}
+static void gen6_suspend_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ flush_delayed_work(&dev_priv->rps.delayed_resume_work);
+
+ /*
+ * TODO: disable RPS interrupts on GEN9+ too once RPS support
+ * is added for it.
+ */
+ if (INTEL_INFO(dev)->gen < 9)
+ gen6_disable_rps_interrupts(dev);
+}
+
/**
* intel_suspend_gt_powersave - suspend PM work and helper threads
* @dev: drm device
@@ -6206,14 +6212,7 @@ void intel_suspend_gt_powersave(struct drm_device *dev)
if (INTEL_INFO(dev)->gen < 6)
return;
- flush_delayed_work(&dev_priv->rps.delayed_resume_work);
-
- /*
- * TODO: disable RPS interrupts on GEN9+ too once RPS support
- * is added for it.
- */
- if (INTEL_INFO(dev)->gen < 9)
- gen6_disable_rps_interrupts(dev);
+ gen6_suspend_rps(dev);
/* Force GPU to min freq during suspend */
gen6_rps_idle(dev_priv);
@@ -6316,8 +6315,11 @@ void intel_reset_gt_powersave(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ if (INTEL_INFO(dev)->gen < 6)
+ return;
+
+ gen6_suspend_rps(dev);
dev_priv->rps.enabled = false;
- intel_enable_gt_powersave(dev);
}
static void ibx_init_clock_gating(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 9f445e9a75d1..c7bc93d28d84 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -362,12 +362,15 @@ gen7_render_ring_flush(struct intel_engine_cs *ring,
flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+ flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
/*
* TLB invalidate requires a post-sync write.
*/
flags |= PIPE_CONTROL_QW_WRITE;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
+ flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
+
/* Workaround: we must issue a pipe_control with CS-stall bit
* set before a pipe_control command that has the state cache
* invalidate bit set. */
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index f5a78d53e297..ac6da7102fbb 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -615,29 +615,6 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv,
vlv_power_sequencer_reset(dev_priv);
}
-static void check_power_well_state(struct drm_i915_private *dev_priv,
- struct i915_power_well *power_well)
-{
- bool enabled = power_well->ops->is_enabled(dev_priv, power_well);
-
- if (power_well->always_on || !i915.disable_power_well) {
- if (!enabled)
- goto mismatch;
-
- return;
- }
-
- if (enabled != (power_well->count > 0))
- goto mismatch;
-
- return;
-
-mismatch:
- WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n",
- power_well->name, power_well->always_on, enabled,
- power_well->count, i915.disable_power_well);
-}
-
/**
* intel_display_power_get - grab a power domain reference
* @dev_priv: i915 device instance
@@ -669,8 +646,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
power_well->ops->enable(dev_priv, power_well);
power_well->hw_enabled = true;
}
-
- check_power_well_state(dev_priv, power_well);
}
power_domains->domain_use_count[domain]++;
@@ -709,8 +684,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
power_well->hw_enabled = false;
power_well->ops->disable(dev_priv, power_well);
}
-
- check_power_well_state(dev_priv, power_well);
}
mutex_unlock(&power_domains->lock);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index aa873048308b..94a5bee69fe7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -386,9 +386,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
drm_gem_object_unreference(gpu->memptrs_bo);
}
- if (gpu->pm4)
- release_firmware(gpu->pm4);
- if (gpu->pfp)
- release_firmware(gpu->pfp);
+ release_firmware(gpu->pm4);
+ release_firmware(gpu->pfp);
msm_gpu_cleanup(&gpu->base);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index fbebb0405d76..b4e70e0e3cfa 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -141,6 +141,15 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
uint32_t hpd_ctrl;
int i, ret;
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->hpd_regs[i]);
+ if (ret) {
+ dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ goto fail;
+ }
+ }
+
ret = gpio_config(hdmi, true);
if (ret) {
dev_err(dev->dev, "failed to configure GPIOs: %d\n", ret);
@@ -164,15 +173,6 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
}
}
- for (i = 0; i < config->hpd_reg_cnt; i++) {
- ret = regulator_enable(hdmi->hpd_regs[i]);
- if (ret) {
- dev_err(dev->dev, "failed to enable hpd regulator: %s (%d)\n",
- config->hpd_reg_names[i], ret);
- goto fail;
- }
- }
-
hdmi_set_mode(hdmi, false);
phy->funcs->reset(phy);
hdmi_set_mode(hdmi, true);
@@ -200,7 +200,7 @@ fail:
return ret;
}
-static int hdp_disable(struct hdmi_connector *hdmi_connector)
+static void hdp_disable(struct hdmi_connector *hdmi_connector)
{
struct hdmi *hdmi = hdmi_connector->hdmi;
const struct hdmi_platform_config *config = hdmi->config;
@@ -212,28 +212,19 @@ static int hdp_disable(struct hdmi_connector *hdmi_connector)
hdmi_set_mode(hdmi, false);
- for (i = 0; i < config->hpd_reg_cnt; i++) {
- ret = regulator_disable(hdmi->hpd_regs[i]);
- if (ret) {
- dev_err(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
- config->hpd_reg_names[i], ret);
- goto fail;
- }
- }
-
for (i = 0; i < config->hpd_clk_cnt; i++)
clk_disable_unprepare(hdmi->hpd_clks[i]);
ret = gpio_config(hdmi, false);
- if (ret) {
- dev_err(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
- goto fail;
- }
-
- return 0;
+ if (ret)
+ dev_warn(dev->dev, "failed to unconfigure GPIOs: %d\n", ret);
-fail:
- return ret;
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_disable(hdmi->hpd_regs[i]);
+ if (ret)
+ dev_warn(dev->dev, "failed to disable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ }
}
static void
@@ -260,11 +251,11 @@ void hdmi_connector_irq(struct drm_connector *connector)
(hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
bool detected = !!(hpd_int_status & HDMI_HPD_INT_STATUS_CABLE_DETECTED);
- DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
-
- /* ack the irq: */
+ /* ack & disable (temporarily) HPD events: */
hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
- hpd_int_ctrl | HDMI_HPD_INT_CTRL_INT_ACK);
+ HDMI_HPD_INT_CTRL_INT_ACK);
+
+ DBG("status=%04x, ctrl=%04x", hpd_int_status, hpd_int_ctrl);
/* detect disconnect if we are connected or visa versa: */
hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index a7672e100d8b..3449213f1e76 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -331,17 +331,8 @@ static int mdp4_crtc_atomic_check(struct drm_crtc *crtc,
struct drm_crtc_state *state)
{
struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc);
- struct drm_device *dev = crtc->dev;
-
DBG("%s: check", mdp4_crtc->name);
-
- if (mdp4_crtc->event) {
- dev_err(dev->dev, "already pending flip!\n");
- return -EBUSY;
- }
-
// TODO anything else to check?
-
return 0;
}
@@ -357,7 +348,7 @@ static void mdp4_crtc_atomic_flush(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
unsigned long flags;
- DBG("%s: flush", mdp4_crtc->name);
+ DBG("%s: event: %p", mdp4_crtc->name, crtc->state->event);
WARN_ON(mdp4_crtc->event);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 0e9a2e3a82d7..f021f960a8a2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -303,11 +303,6 @@ static int mdp5_crtc_atomic_check(struct drm_crtc *crtc,
DBG("%s: check", mdp5_crtc->name);
- if (mdp5_crtc->event) {
- dev_err(dev->dev, "already pending flip!\n");
- return -EBUSY;
- }
-
/* request a free CTL, if none is already allocated for this CRTC */
if (state->enable && !mdp5_crtc->ctl) {
mdp5_crtc->ctl = mdp5_ctlm_request(mdp5_kms->ctlm, crtc);
@@ -364,7 +359,7 @@ static void mdp5_crtc_atomic_flush(struct drm_crtc *crtc)
struct drm_device *dev = crtc->dev;
unsigned long flags;
- DBG("%s: flush", mdp5_crtc->name);
+ DBG("%s: event: %p", mdp5_crtc->name, crtc->state->event);
WARN_ON(mdp5_crtc->event);
@@ -460,10 +455,7 @@ void mdp5_crtc_set_intf(struct drm_crtc *crtc, int intf,
/* now that we know what irq's we want: */
mdp5_crtc->err.irqmask = intf2err(intf);
mdp5_crtc->vblank.irqmask = intf2vblank(intf);
-
- /* when called from modeset_init(), skip the rest until later: */
- if (!mdp5_kms)
- return;
+ mdp_irq_update(&mdp5_kms->base);
spin_lock_irqsave(&mdp5_kms->resource_lock, flags);
intf_sel = mdp5_read(mdp5_kms, REG_MDP5_DISP_INTF_SEL);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index a11f1b80c488..9f01a4f21af2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -216,17 +216,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
goto fail;
}
- /* NOTE: the vsync and error irq's are actually associated with
- * the INTF/encoder.. the easiest way to deal with this (ie. what
- * we do now) is assume a fixed relationship between crtc's and
- * encoders. I'm not sure if there is ever a need to more freely
- * assign crtcs to encoders, but if there is then we need to take
- * care of error and vblank irq's that the crtc has registered,
- * and also update user-requested vblank_mask.
- */
- encoder->possible_crtcs = BIT(0);
- mdp5_crtc_set_intf(priv->crtcs[0], 3, INTF_HDMI);
-
+ encoder->possible_crtcs = (1 << priv->num_crtcs) - 1;;
priv->encoders[priv->num_encoders++] = encoder;
/* Construct bridge/connector for HDMI: */
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.c b/drivers/gpu/drm/msm/mdp/mdp_kms.c
index 03455b64a245..2a731722d840 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.c
@@ -42,7 +42,10 @@ static void update_irq(struct mdp_kms *mdp_kms)
mdp_kms->funcs->set_irqmask(mdp_kms, irqmask);
}
-static void update_irq_unlocked(struct mdp_kms *mdp_kms)
+/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
+ * link changes, this must be called to figure out the new global irqmask
+ */
+void mdp_irq_update(struct mdp_kms *mdp_kms)
{
unsigned long flags;
spin_lock_irqsave(&list_lock, flags);
@@ -122,7 +125,7 @@ void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
spin_unlock_irqrestore(&list_lock, flags);
if (needs_update)
- update_irq_unlocked(mdp_kms);
+ mdp_irq_update(mdp_kms);
}
void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
@@ -141,5 +144,5 @@ void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
spin_unlock_irqrestore(&list_lock, flags);
if (needs_update)
- update_irq_unlocked(mdp_kms);
+ mdp_irq_update(mdp_kms);
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 99557b5ad4fd..b268ce95d394 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -75,7 +75,7 @@ void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask);
void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq);
-
+void mdp_irq_update(struct mdp_kms *mdp_kms);
/*
* pixel format helpers:
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index f0de412e13dc..191968256c58 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -23,10 +23,41 @@ struct msm_commit {
struct drm_atomic_state *state;
uint32_t fence;
struct msm_fence_cb fence_cb;
+ uint32_t crtc_mask;
};
static void fence_cb(struct msm_fence_cb *cb);
+/* block until specified crtcs are no longer pending update, and
+ * atomically mark them as pending update
+ */
+static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
+{
+ int ret;
+
+ spin_lock(&priv->pending_crtcs_event.lock);
+ ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
+ !(priv->pending_crtcs & crtc_mask));
+ if (ret == 0) {
+ DBG("start: %08x", crtc_mask);
+ priv->pending_crtcs |= crtc_mask;
+ }
+ spin_unlock(&priv->pending_crtcs_event.lock);
+
+ return ret;
+}
+
+/* clear specified crtcs (no longer pending update)
+ */
+static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
+{
+ spin_lock(&priv->pending_crtcs_event.lock);
+ DBG("end: %08x", crtc_mask);
+ priv->pending_crtcs &= ~crtc_mask;
+ wake_up_all_locked(&priv->pending_crtcs_event);
+ spin_unlock(&priv->pending_crtcs_event.lock);
+}
+
static struct msm_commit *new_commit(struct drm_atomic_state *state)
{
struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
@@ -58,12 +89,27 @@ static void complete_commit(struct msm_commit *c)
drm_atomic_helper_commit_post_planes(dev, state);
+ /* NOTE: _wait_for_vblanks() only waits for vblank on
+ * enabled CRTCs. So we end up faulting when disabling
+ * due to (potentially) unref'ing the outgoing fb's
+ * before the vblank when the disable has latched.
+ *
+ * But if it did wait on disabled (or newly disabled)
+ * CRTCs, that would be racy (ie. we could have missed
+ * the irq. We need some way to poll for pipe shut
+ * down. Or just live with occasionally hitting the
+ * timeout in the CRTC disable path (which really should
+ * not be critical path)
+ */
+
drm_atomic_helper_wait_for_vblanks(dev, state);
drm_atomic_helper_cleanup_planes(dev, state);
drm_atomic_state_free(state);
+ end_atomic(dev->dev_private, c->crtc_mask);
+
kfree(c);
}
@@ -97,8 +143,9 @@ static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async)
{
- struct msm_commit *c;
int nplanes = dev->mode_config.num_total_plane;
+ int ncrtcs = dev->mode_config.num_crtc;
+ struct msm_commit *c;
int i, ret;
ret = drm_atomic_helper_prepare_planes(dev, state);
@@ -106,6 +153,18 @@ int msm_atomic_commit(struct drm_device *dev,
return ret;
c = new_commit(state);
+ if (!c)
+ return -ENOMEM;
+
+ /*
+ * Figure out what crtcs we have:
+ */
+ for (i = 0; i < ncrtcs; i++) {
+ struct drm_crtc *crtc = state->crtcs[i];
+ if (!crtc)
+ continue;
+ c->crtc_mask |= (1 << drm_crtc_index(crtc));
+ }
/*
* Figure out what fence to wait for:
@@ -122,6 +181,14 @@ int msm_atomic_commit(struct drm_device *dev,
}
/*
+ * Wait for pending updates on any of the same crtc's and then
+ * mark our set of crtc's as busy:
+ */
+ ret = start_atomic(dev->dev_private, c->crtc_mask);
+ if (ret)
+ return ret;
+
+ /*
* This is the point of no return - everything below never fails except
* when the hw goes bonghits. Which means we can commit the new state on
* the software side now.
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c795217e1bfc..9a61546a0b05 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -193,6 +193,7 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
priv->wq = alloc_ordered_workqueue("msm", 0);
init_waitqueue_head(&priv->fence_event);
+ init_waitqueue_head(&priv->pending_crtcs_event);
INIT_LIST_HEAD(&priv->inactive_list);
INIT_LIST_HEAD(&priv->fence_cbs);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 136303818436..b69ef2d5a26c 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -96,6 +96,10 @@ struct msm_drm_private {
/* callbacks deferred until bo is inactive: */
struct list_head fence_cbs;
+ /* crtcs pending async atomic updates: */
+ uint32_t pending_crtcs;
+ wait_queue_head_t pending_crtcs_event;
+
/* registered MMUs: */
unsigned int num_mmus;
struct msm_mmu *mmus[NUM_DOMAINS];
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 94d55e526b4e..1f3af13ccede 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -190,8 +190,7 @@ fail_unlock:
fail:
if (ret) {
- if (fbi)
- framebuffer_release(fbi);
+ framebuffer_release(fbi);
if (fb) {
drm_framebuffer_unregister_private(fb);
drm_framebuffer_remove(fb);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 4a6f0e49d5b5..49dea4fb55ac 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -535,8 +535,7 @@ void msm_gem_free_object(struct drm_gem_object *obj)
drm_free_large(msm_obj->pages);
} else {
- if (msm_obj->vaddr)
- vunmap(msm_obj->vaddr);
+ vunmap(msm_obj->vaddr);
put_pages(obj);
}
diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c
index ff2b434b3db4..760947e380c9 100644
--- a/drivers/gpu/drm/nouveau/core/core/event.c
+++ b/drivers/gpu/drm/nouveau/core/core/event.c
@@ -26,7 +26,7 @@
void
nvkm_event_put(struct nvkm_event *event, u32 types, int index)
{
- BUG_ON(!spin_is_locked(&event->refs_lock));
+ assert_spin_locked(&event->refs_lock);
while (types) {
int type = __ffs(types); types &= ~(1 << type);
if (--event->refs[index * event->types_nr + type] == 0) {
@@ -39,7 +39,7 @@ nvkm_event_put(struct nvkm_event *event, u32 types, int index)
void
nvkm_event_get(struct nvkm_event *event, u32 types, int index)
{
- BUG_ON(!spin_is_locked(&event->refs_lock));
+ assert_spin_locked(&event->refs_lock);
while (types) {
int type = __ffs(types); types &= ~(1 << type);
if (++event->refs[index * event->types_nr + type] == 1) {
diff --git a/drivers/gpu/drm/nouveau/core/core/notify.c b/drivers/gpu/drm/nouveau/core/core/notify.c
index d1bcde55e9d7..839a32577680 100644
--- a/drivers/gpu/drm/nouveau/core/core/notify.c
+++ b/drivers/gpu/drm/nouveau/core/core/notify.c
@@ -98,7 +98,7 @@ nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size)
struct nvkm_event *event = notify->event;
unsigned long flags;
- BUG_ON(!spin_is_locked(&event->list_lock));
+ assert_spin_locked(&event->list_lock);
BUG_ON(size != notify->size);
spin_lock_irqsave(&event->refs_lock, flags);
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
index 674da1f095b2..732922690653 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c
@@ -249,6 +249,39 @@ nve0_identify(struct nouveau_device *device)
device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass;
break;
+ case 0x106:
+ device->cname = "GK208B";
+ device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
+ device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass;
+ device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass;
+ device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass;
+ device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass;
+ device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass;
+ device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
+ device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
+ device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
+ device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass;
+ device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass;
+ device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass;
+ device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass;
+ device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass;
+ device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass;
+ device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass;
+ device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass;
+ device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass;
+ device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass;
+ device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass;
+ device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass;
+ device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass;
+ device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass;
+ device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass;
+ device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass;
+ device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass;
+ device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass;
+ device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
+ break;
case 0x108:
device->cname = "GK208";
device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
index 5e58bba0dd5c..a7a890fad1e5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c
@@ -44,8 +44,10 @@ static void
pramin_fini(void *data)
{
struct priv *priv = data;
- nv_wr32(priv->bios, 0x001700, priv->bar0);
- kfree(priv);
+ if (priv) {
+ nv_wr32(priv->bios, 0x001700, priv->bar0);
+ kfree(priv);
+ }
}
static void *
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
index 00f2ca7e44a5..033a8e999497 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c
@@ -24,34 +24,71 @@
#include "nv50.h"
+struct nvaa_ram_priv {
+ struct nouveau_ram base;
+ u64 poller_base;
+};
+
static int
nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
struct nouveau_oclass *oclass, void *data, u32 datasize,
struct nouveau_object **pobject)
{
- const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */
- const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */
+ u32 rsvd_head = ( 256 * 1024); /* vga memory */
+ u32 rsvd_tail = (1024 * 1024); /* vbios etc */
struct nouveau_fb *pfb = nouveau_fb(parent);
- struct nouveau_ram *ram;
+ struct nvaa_ram_priv *priv;
int ret;
- ret = nouveau_ram_create(parent, engine, oclass, &ram);
- *pobject = nv_object(ram);
+ ret = nouveau_ram_create(parent, engine, oclass, &priv);
+ *pobject = nv_object(priv);
if (ret)
return ret;
- ram->size = nv_rd32(pfb, 0x10020c);
- ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32);
+ priv->base.type = NV_MEM_TYPE_STOLEN;
+ priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
+ priv->base.size = (u64)nv_rd32(pfb, 0x100e14) << 12;
- ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) -
- (rsvd_head + rsvd_tail), 1);
+ rsvd_tail += 0x1000;
+ priv->poller_base = priv->base.size - rsvd_tail;
+
+ ret = nouveau_mm_init(&pfb->vram, rsvd_head >> 12,
+ (priv->base.size - (rsvd_head + rsvd_tail)) >> 12,
+ 1);
if (ret)
return ret;
- ram->type = NV_MEM_TYPE_STOLEN;
- ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12;
- ram->get = nv50_ram_get;
- ram->put = nv50_ram_put;
+ priv->base.get = nv50_ram_get;
+ priv->base.put = nv50_ram_put;
+ return 0;
+}
+
+static int
+nvaa_ram_init(struct nouveau_object *object)
+{
+ struct nouveau_fb *pfb = nouveau_fb(object);
+ struct nvaa_ram_priv *priv = (void *)object;
+ int ret;
+ u64 dniso, hostnb, flush;
+
+ ret = nouveau_ram_init(&priv->base);
+ if (ret)
+ return ret;
+
+ dniso = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1;
+ hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1;
+ flush = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1;
+
+ /* Enable NISO poller for various clients and set their associated
+ * read address, only for MCP77/78 and MCP79/7A. (fd#25701)
+ */
+ nv_wr32(pfb, 0x100c18, dniso);
+ nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001);
+ nv_wr32(pfb, 0x100c1c, hostnb);
+ nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002);
+ nv_wr32(pfb, 0x100c24, flush);
+ nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000);
+
return 0;
}
@@ -60,7 +97,7 @@ nvaa_ram_oclass = {
.ofuncs = &(struct nouveau_ofuncs) {
.ctor = nvaa_ram_ctor,
.dtor = _nouveau_ram_dtor,
- .init = _nouveau_ram_init,
+ .init = nvaa_ram_init,
.fini = _nouveau_ram_fini,
},
};
diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
index a75c35ccf25c..165401c4045c 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c
@@ -24,13 +24,6 @@
#include "nv04.h"
-static void
-nv4c_mc_msi_rearm(struct nouveau_mc *pmc)
-{
- struct nv04_mc_priv *priv = (void *)pmc;
- nv_wr08(priv, 0x088050, 0xff);
-}
-
struct nouveau_oclass *
nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0x4c),
@@ -41,5 +34,4 @@ nv4c_mc_oclass = &(struct nouveau_mc_oclass) {
.fini = _nouveau_mc_fini,
},
.intr = nv04_mc_intr,
- .msi_rearm = nv4c_mc_msi_rearm,
}.base;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 21ec561edc99..bba2960d3dfb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1572,8 +1572,10 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
* so use the DMA API for them.
*/
if (!nv_device_is_cpu_coherent(device) &&
- ttm->caching_state == tt_uncached)
+ ttm->caching_state == tt_uncached) {
ttm_dma_unpopulate(ttm_dma, dev->dev);
+ return;
+ }
#if __OS_HAS_AGP
if (drm->agp.stat == ENABLED) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 5d93902a91ab..f8042433752b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -876,7 +876,6 @@ nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
if (ret)
return ret;
- bo->gem.dumb = true;
ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
drm_gem_object_unreference_unlocked(&bo->gem);
return ret;
@@ -892,14 +891,6 @@ nouveau_display_dumb_map_offset(struct drm_file *file_priv,
gem = drm_gem_object_lookup(dev, file_priv, handle);
if (gem) {
struct nouveau_bo *bo = nouveau_gem_object(gem);
-
- /*
- * We don't allow dumb mmaps on objects created using another
- * interface.
- */
- WARN_ONCE(!(gem->dumb || gem->import_attach),
- "Illegal dumb map of accelerated buffer.\n");
-
*poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
drm_gem_object_unreference_unlocked(gem);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index 28d51a22a4bf..bf0f9e21d714 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -36,7 +36,14 @@ void
nouveau_gem_object_del(struct drm_gem_object *gem)
{
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
+ struct device *dev = drm->dev->dev;
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (WARN_ON(ret < 0 && ret != -EACCES))
+ return;
if (gem->import_attach)
drm_prime_gem_destroy(gem, nvbo->bo.sg);
@@ -46,6 +53,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem)
/* reset filp so nouveau_bo_del_ttm() can test for it */
gem->filp = NULL;
ttm_bo_unref(&bo);
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
}
int
@@ -53,7 +63,9 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct nouveau_vma *vma;
+ struct device *dev = drm->dev->dev;
int ret;
if (!cli->vm)
@@ -71,11 +83,16 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
goto out;
}
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0 && ret != -EACCES)
+ goto out;
+
ret = nouveau_bo_vma_add(nvbo, cli->vm, vma);
- if (ret) {
+ if (ret)
kfree(vma);
- goto out;
- }
+
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
} else {
vma->refcount++;
}
@@ -129,6 +146,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
{
struct nouveau_cli *cli = nouveau_cli(file_priv);
struct nouveau_bo *nvbo = nouveau_gem_object(gem);
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+ struct device *dev = drm->dev->dev;
struct nouveau_vma *vma;
int ret;
@@ -141,8 +160,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv)
vma = nouveau_bo_vma_find(nvbo, cli->vm);
if (vma) {
- if (--vma->refcount == 0)
- nouveau_gem_object_unmap(nvbo, vma);
+ if (--vma->refcount == 0) {
+ ret = pm_runtime_get_sync(dev);
+ if (!WARN_ON(ret < 0 && ret != -EACCES)) {
+ nouveau_gem_object_unmap(nvbo, vma);
+ pm_runtime_mark_last_busy(dev);
+ pm_runtime_put_autosuspend(dev);
+ }
+ }
}
ttm_bo_unreserve(&nvbo->bo);
}
@@ -444,9 +469,6 @@ validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli,
list_for_each_entry(nvbo, list, entry) {
struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
- WARN_ONCE(nvbo->gem.dumb,
- "GPU use of dumb buffer is illegal.\n");
-
ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains,
b->write_domains,
b->valid_domains);
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 753a6def61e7..3d1cfcb96b6b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -28,6 +28,7 @@
#include "nouveau_ttm.h"
#include "nouveau_gem.h"
+#include "drm_legacy.h"
static int
nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
{
@@ -281,7 +282,7 @@ nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
struct nouveau_drm *drm = nouveau_drm(file_priv->minor->dev);
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
- return -EINVAL;
+ return drm_legacy_mmap(filp, vma);
return ttm_bo_mmap(filp, vma, &drm->ttm.bdev);
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index d59ec491dbb9..ed644a4f6f57 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1851,10 +1851,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
return pll;
}
/* otherwise, pick one of the plls */
- if ((rdev->family == CHIP_KAVERI) ||
- (rdev->family == CHIP_KABINI) ||
+ if ((rdev->family == CHIP_KABINI) ||
(rdev->family == CHIP_MULLINS)) {
- /* KB/KV/ML has PPLL1 and PPLL2 */
+ /* KB/ML has PPLL1 and PPLL2 */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
@@ -1863,7 +1862,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc)
DRM_ERROR("unable to allocate a PPLL\n");
return ATOM_PPLL_INVALID;
} else {
- /* CI has PPLL0, PPLL1, and PPLL2 */
+ /* CI/KV has PPLL0, PPLL1, and PPLL2 */
pll_in_use = radeon_get_pll_use_mask(crtc);
if (!(pll_in_use & (1 << ATOM_PPLL2)))
return ATOM_PPLL2;
@@ -2155,6 +2154,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc)
case ATOM_PPLL0:
/* disable the ppll */
if ((rdev->family == CHIP_ARUBA) ||
+ (rdev->family == CHIP_KAVERI) ||
(rdev->family == CHIP_BONAIRE) ||
(rdev->family == CHIP_HAWAII))
atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id,
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 11ba9d21b89b..db42a670f995 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -492,6 +492,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector,
struct radeon_connector_atom_dig *dig_connector;
int dp_clock;
+ if ((mode->clock > 340000) &&
+ (!radeon_connector_is_dp12_capable(connector)))
+ return MODE_CLOCK_HIGH;
+
if (!radeon_connector->con_priv)
return MODE_CLOCK_HIGH;
dig_connector = radeon_connector->con_priv;
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 6dcde3798b45..64fdae558d36 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -6033,6 +6033,17 @@ void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 1 << vm_id);
+ /* wait for the invalidate to complete */
+ radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */
+ WAIT_REG_MEM_FUNCTION(0) | /* always */
+ WAIT_REG_MEM_ENGINE(0))); /* me */
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0); /* ref */
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, 0x20); /* poll interval */
+
/* compute doesn't have PFP */
if (usepfp) {
/* sync PFP to ME, otherwise we might get invalid PFP reads */
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index dde5c7e29eb2..42cd0cffe210 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -816,7 +816,6 @@ void cik_sdma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {
@@ -903,6 +902,9 @@ void cik_sdma_vm_pad_ib(struct radeon_ib *ib)
void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
unsigned vm_id, uint64_t pd_addr)
{
+ u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) |
+ SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */
+
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
if (vm_id < 8) {
radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2);
@@ -943,5 +945,12 @@ void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000));
radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
radeon_ring_write(ring, 1 << vm_id);
+
+ radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits));
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0); /* reference */
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */
}
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index ba85986febea..03003f8a6de6 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -2156,4 +2156,6 @@
#define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu
#define ATC_VM_APERTURE1_LOW_ADDR 0x3304u
+#define IH_VMID_0_LUT 0x3D40u
+
#endif
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
index 2fe8cfc966d9..bafdf92a5732 100644
--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -103,7 +103,7 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
}
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 9b42001295ba..e3e9c10cfba9 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2745,13 +2745,11 @@ int kv_dpm_init(struct radeon_device *rdev)
pi->enable_auto_thermal_throttling = true;
pi->disable_nb_ps3_in_battery = false;
if (radeon_bapm == -1) {
- /* There are stability issues reported on with
- * bapm enabled on an asrock system.
- */
- if (rdev->pdev->subsystem_vendor == 0x1849)
- pi->bapm_enable = false;
- else
+ /* only enable bapm on KB, ML by default */
+ if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS)
pi->bapm_enable = true;
+ else
+ pi->bapm_enable = false;
} else if (radeon_bapm == 0) {
pi->bapm_enable = false;
} else {
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 360de9f1f491..aea48c89b241 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -2516,6 +2516,16 @@ void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0));
radeon_ring_write(ring, 1 << vm_id);
+ /* wait for the invalidate to complete */
+ radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
+ WAIT_REG_MEM_ENGINE(0))); /* me */
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0); /* ref */
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, 0x20); /* poll interval */
+
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c
index 50f88611ff60..ce787a9f12c0 100644
--- a/drivers/gpu/drm/radeon/ni_dma.c
+++ b/drivers/gpu/drm/radeon/ni_dma.c
@@ -372,7 +372,6 @@ void cayman_dma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {
@@ -463,5 +462,11 @@ void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
radeon_ring_write(ring, 1 << vm_id);
+
+ /* wait for invalidate to complete */
+ radeon_ring_write(ring, DMA_SRBM_READ_PACKET);
+ radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2));
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, 0); /* value */
}
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 2e12e4d69253..ad7125486894 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -1133,6 +1133,23 @@
#define PACKET3_MEM_SEMAPHORE 0x39
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_WAIT_REG_MEM 0x3C
+#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
+ /* 0 - always
+ * 1 - <
+ * 2 - <=
+ * 3 - ==
+ * 4 - !=
+ * 5 - >=
+ * 6 - >
+ */
+#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
+ /* 0 - reg
+ * 1 - mem
+ */
+#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
+ /* 0 - me
+ * 1 - pfp
+ */
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_PFP_SYNC_ME 0x42
#define PACKET3_SURFACE_SYNC 0x43
@@ -1272,6 +1289,13 @@
(1 << 21) | \
(((n) & 0xFFFFF) << 0))
+#define DMA_SRBM_POLL_PACKET ((9 << 28) | \
+ (1 << 27) | \
+ (1 << 26))
+
+#define DMA_SRBM_READ_PACKET ((9 << 28) | \
+ (1 << 27))
+
/* async DMA Packet types */
#define DMA_PACKET_WRITE 0x2
#define DMA_PACKET_COPY 0x3
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 74f06d540591..279801ca5110 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -644,6 +644,7 @@ int r100_pci_gart_init(struct radeon_device *rdev)
return r;
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
return radeon_gart_table_ram_alloc(rdev);
}
@@ -681,11 +682,16 @@ void r100_pci_gart_disable(struct radeon_device *rdev)
WREG32(RADEON_AIC_HI_ADDR, 0);
}
+uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags)
+{
+ return addr;
+}
+
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+ uint64_t entry)
{
u32 *gtt = rdev->gart.ptr;
- gtt[i] = cpu_to_le32(lower_32_bits(addr));
+ gtt[i] = cpu_to_le32(lower_32_bits(entry));
}
void r100_pci_gart_fini(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 064ad5569cca..08d68f3e13e9 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -73,11 +73,8 @@ void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev)
#define R300_PTE_WRITEABLE (1 << 2)
#define R300_PTE_READABLE (1 << 3)
-void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
- void __iomem *ptr = rdev->gart.ptr;
-
addr = (lower_32_bits(addr) >> 8) |
((upper_32_bits(addr) & 0xff) << 24);
if (flags & RADEON_GART_PAGE_READ)
@@ -86,10 +83,18 @@ void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
addr |= R300_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP))
addr |= R300_PTE_UNSNOOPED;
+ return addr;
+}
+
+void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t entry)
+{
+ void __iomem *ptr = rdev->gart.ptr;
+
/* on x86 we want this to be CPU endian, on powerpc
* on powerpc without HW swappers, it'll get swapped on way
* into VRAM - so no need for cpu_to_le32 on VRAM tables */
- writel(addr, ((void __iomem *)ptr) + (i * 4));
+ writel(entry, ((void __iomem *)ptr) + (i * 4));
}
int rv370_pcie_gart_init(struct radeon_device *rdev)
@@ -109,6 +114,7 @@ int rv370_pcie_gart_init(struct radeon_device *rdev)
DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
rdev->gart.table_size = rdev->gart.num_gpu_pages * 4;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
return radeon_gart_table_vram_alloc(rdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 54529b837afa..3f2a8d3febca 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -242,6 +242,7 @@ bool radeon_get_bios(struct radeon_device *rdev);
* Dummy page
*/
struct radeon_dummy_page {
+ uint64_t entry;
struct page *page;
dma_addr_t addr;
};
@@ -645,7 +646,7 @@ struct radeon_gart {
unsigned num_cpu_pages;
unsigned table_size;
struct page **pages;
- dma_addr_t *pages_addr;
+ uint64_t *pages_entry;
bool ready;
};
@@ -1847,8 +1848,9 @@ struct radeon_asic {
/* gart */
struct {
void (*tlb_flush)(struct radeon_device *rdev);
+ uint64_t (*get_page_entry)(uint64_t addr, uint32_t flags);
void (*set_page)(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
} gart;
struct {
int (*init)(struct radeon_device *rdev);
@@ -2852,7 +2854,8 @@ static inline void radeon_ring_write(struct radeon_ring *ring, uint32_t v)
#define radeon_vga_set_state(rdev, state) (rdev)->asic->vga_set_state((rdev), (state))
#define radeon_asic_reset(rdev) (rdev)->asic->asic_reset((rdev))
#define radeon_gart_tlb_flush(rdev) (rdev)->asic->gart.tlb_flush((rdev))
-#define radeon_gart_set_page(rdev, i, p, f) (rdev)->asic->gart.set_page((rdev), (i), (p), (f))
+#define radeon_gart_get_page_entry(a, f) (rdev)->asic->gart.get_page_entry((a), (f))
+#define radeon_gart_set_page(rdev, i, e) (rdev)->asic->gart.set_page((rdev), (i), (e))
#define radeon_asic_vm_init(rdev) (rdev)->asic->vm.init((rdev))
#define radeon_asic_vm_fini(rdev) (rdev)->asic->vm.fini((rdev))
#define radeon_asic_vm_copy_pages(rdev, ib, pe, src, count) ((rdev)->asic->vm.copy_pages((rdev), (ib), (pe), (src), (count)))
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index 850de57069be..ed0e10eee2dc 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -159,11 +159,13 @@ void radeon_agp_disable(struct radeon_device *rdev)
DRM_INFO("Forcing AGP to PCIE mode\n");
rdev->flags |= RADEON_IS_PCIE;
rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry;
rdev->asic->gart.set_page = &rv370_pcie_gart_set_page;
} else {
DRM_INFO("Forcing AGP to PCI mode\n");
rdev->flags |= RADEON_IS_PCI;
rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart.get_page_entry = &r100_pci_gart_get_page_entry;
rdev->asic->gart.set_page = &r100_pci_gart_set_page;
}
rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
@@ -199,6 +201,7 @@ static struct radeon_asic r100_asic = {
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
+ .get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
@@ -265,6 +268,7 @@ static struct radeon_asic r200_asic = {
.mc_wait_for_idle = &r100_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
+ .get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
@@ -333,6 +337,20 @@ static struct radeon_asic_ring r300_gfx_ring = {
.set_wptr = &r100_gfx_set_wptr,
};
+static struct radeon_asic_ring rv515_gfx_ring = {
+ .ib_execute = &r100_ring_ib_execute,
+ .emit_fence = &r300_fence_ring_emit,
+ .emit_semaphore = &r100_semaphore_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .ring_start = &rv515_ring_start,
+ .ring_test = &r100_ring_test,
+ .ib_test = &r100_ib_test,
+ .is_lockup = &r100_gpu_is_lockup,
+ .get_rptr = &r100_gfx_get_rptr,
+ .get_wptr = &r100_gfx_get_wptr,
+ .set_wptr = &r100_gfx_set_wptr,
+};
+
static struct radeon_asic r300_asic = {
.init = &r300_init,
.fini = &r300_fini,
@@ -345,6 +363,7 @@ static struct radeon_asic r300_asic = {
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &r100_pci_gart_tlb_flush,
+ .get_page_entry = &r100_pci_gart_get_page_entry,
.set_page = &r100_pci_gart_set_page,
},
.ring = {
@@ -411,6 +430,7 @@ static struct radeon_asic r300_asic_pcie = {
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
@@ -477,6 +497,7 @@ static struct radeon_asic r420_asic = {
.mc_wait_for_idle = &r300_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
@@ -543,6 +564,7 @@ static struct radeon_asic rs400_asic = {
.mc_wait_for_idle = &rs400_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
+ .get_page_entry = &rs400_gart_get_page_entry,
.set_page = &rs400_gart_set_page,
},
.ring = {
@@ -609,6 +631,7 @@ static struct radeon_asic rs600_asic = {
.mc_wait_for_idle = &rs600_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs600_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -677,6 +700,7 @@ static struct radeon_asic rs690_asic = {
.mc_wait_for_idle = &rs690_mc_wait_for_idle,
.gart = {
.tlb_flush = &rs400_gart_tlb_flush,
+ .get_page_entry = &rs400_gart_get_page_entry,
.set_page = &rs400_gart_set_page,
},
.ring = {
@@ -745,10 +769,11 @@ static struct radeon_asic rv515_asic = {
.mc_wait_for_idle = &rv515_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+ [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -811,10 +836,11 @@ static struct radeon_asic r520_asic = {
.mc_wait_for_idle = &r520_mc_wait_for_idle,
.gart = {
.tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .get_page_entry = &rv370_pcie_gart_get_page_entry,
.set_page = &rv370_pcie_gart_set_page,
},
.ring = {
- [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring
+ [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring
},
.irq = {
.set = &rs600_irq_set,
@@ -905,6 +931,7 @@ static struct radeon_asic r600_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -990,6 +1017,7 @@ static struct radeon_asic rv6xx_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1081,6 +1109,7 @@ static struct radeon_asic rs780_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1185,6 +1214,7 @@ static struct radeon_asic rv770_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &r600_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1303,6 +1333,7 @@ static struct radeon_asic evergreen_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1395,6 +1426,7 @@ static struct radeon_asic sumo_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1486,6 +1518,7 @@ static struct radeon_asic btc_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &evergreen_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.ring = {
@@ -1621,6 +1654,7 @@ static struct radeon_asic cayman_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -1724,6 +1758,7 @@ static struct radeon_asic trinity_asic = {
.get_gpu_clock_counter = &r600_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cayman_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -1857,6 +1892,7 @@ static struct radeon_asic si_asic = {
.get_gpu_clock_counter = &si_get_gpu_clock_counter,
.gart = {
.tlb_flush = &si_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -2018,6 +2054,7 @@ static struct radeon_asic ci_asic = {
.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
@@ -2125,6 +2162,7 @@ static struct radeon_asic kv_asic = {
.get_gpu_clock_counter = &cik_get_gpu_clock_counter,
.gart = {
.tlb_flush = &cik_pcie_gart_tlb_flush,
+ .get_page_entry = &rs600_gart_get_page_entry,
.set_page = &rs600_gart_set_page,
},
.vm = {
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 2a45d548d5ec..8d787d115653 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -67,8 +67,9 @@ bool r100_gpu_is_lockup(struct radeon_device *rdev, struct radeon_ring *cp);
int r100_asic_reset(struct radeon_device *rdev);
u32 r100_get_vblank_counter(struct radeon_device *rdev, int crtc);
void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t r100_pci_gart_get_page_entry(uint64_t addr, uint32_t flags);
void r100_pci_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
void r100_ring_start(struct radeon_device *rdev, struct radeon_ring *ring);
int r100_irq_set(struct radeon_device *rdev);
int r100_irq_process(struct radeon_device *rdev);
@@ -172,8 +173,9 @@ extern void r300_fence_ring_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
extern int r300_cs_parse(struct radeon_cs_parser *p);
extern void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev);
+extern uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags);
extern void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -208,8 +210,9 @@ extern void rs400_fini(struct radeon_device *rdev);
extern int rs400_suspend(struct radeon_device *rdev);
extern int rs400_resume(struct radeon_device *rdev);
void rs400_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags);
void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
int rs400_gart_init(struct radeon_device *rdev);
@@ -232,8 +235,9 @@ int rs600_irq_process(struct radeon_device *rdev);
void rs600_irq_disable(struct radeon_device *rdev);
u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc);
void rs600_gart_tlb_flush(struct radeon_device *rdev);
+uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags);
void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags);
+ uint64_t entry);
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs600_bandwidth_update(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 0ec65168f331..bd7519fdd3f4 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -774,6 +774,8 @@ int radeon_dummy_page_init(struct radeon_device *rdev)
rdev->dummy_page.page = NULL;
return -ENOMEM;
}
+ rdev->dummy_page.entry = radeon_gart_get_page_entry(rdev->dummy_page.addr,
+ RADEON_GART_PAGE_DUMMY);
return 0;
}
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 84146d5901aa..5450fa95a47e 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -165,6 +165,19 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
radeon_bo_unpin(rdev->gart.robj);
radeon_bo_unreserve(rdev->gart.robj);
rdev->gart.table_addr = gpu_addr;
+
+ if (!r) {
+ int i;
+
+ /* We might have dropped some GART table updates while it wasn't
+ * mapped, restore all entries
+ */
+ for (i = 0; i < rdev->gart.num_gpu_pages; i++)
+ radeon_gart_set_page(rdev, i, rdev->gart.pages_entry[i]);
+ mb();
+ radeon_gart_tlb_flush(rdev);
+ }
+
return r;
}
@@ -228,7 +241,6 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
unsigned t;
unsigned p;
int i, j;
- u64 page_base;
if (!rdev->gart.ready) {
WARN(1, "trying to unbind memory from uninitialized GART !\n");
@@ -239,14 +251,12 @@ void radeon_gart_unbind(struct radeon_device *rdev, unsigned offset,
for (i = 0; i < pages; i++, p++) {
if (rdev->gart.pages[p]) {
rdev->gart.pages[p] = NULL;
- rdev->gart.pages_addr[p] = rdev->dummy_page.addr;
- page_base = rdev->gart.pages_addr[p];
for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ rdev->gart.pages_entry[t] = rdev->dummy_page.entry;
if (rdev->gart.ptr) {
- radeon_gart_set_page(rdev, t, page_base,
- RADEON_GART_PAGE_DUMMY);
+ radeon_gart_set_page(rdev, t,
+ rdev->dummy_page.entry);
}
- page_base += RADEON_GPU_PAGE_SIZE;
}
}
}
@@ -274,7 +284,7 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
{
unsigned t;
unsigned p;
- uint64_t page_base;
+ uint64_t page_base, page_entry;
int i, j;
if (!rdev->gart.ready) {
@@ -285,14 +295,15 @@ int radeon_gart_bind(struct radeon_device *rdev, unsigned offset,
p = t / (PAGE_SIZE / RADEON_GPU_PAGE_SIZE);
for (i = 0; i < pages; i++, p++) {
- rdev->gart.pages_addr[p] = dma_addr[i];
rdev->gart.pages[p] = pagelist[i];
- if (rdev->gart.ptr) {
- page_base = rdev->gart.pages_addr[p];
- for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
- radeon_gart_set_page(rdev, t, page_base, flags);
- page_base += RADEON_GPU_PAGE_SIZE;
+ page_base = dma_addr[i];
+ for (j = 0; j < (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); j++, t++) {
+ page_entry = radeon_gart_get_page_entry(page_base, flags);
+ rdev->gart.pages_entry[t] = page_entry;
+ if (rdev->gart.ptr) {
+ radeon_gart_set_page(rdev, t, page_entry);
}
+ page_base += RADEON_GPU_PAGE_SIZE;
}
}
mb();
@@ -334,16 +345,15 @@ int radeon_gart_init(struct radeon_device *rdev)
radeon_gart_fini(rdev);
return -ENOMEM;
}
- rdev->gart.pages_addr = vzalloc(sizeof(dma_addr_t) *
- rdev->gart.num_cpu_pages);
- if (rdev->gart.pages_addr == NULL) {
+ rdev->gart.pages_entry = vmalloc(sizeof(uint64_t) *
+ rdev->gart.num_gpu_pages);
+ if (rdev->gart.pages_entry == NULL) {
radeon_gart_fini(rdev);
return -ENOMEM;
}
/* set GART entry to point to the dummy page by default */
- for (i = 0; i < rdev->gart.num_cpu_pages; i++) {
- rdev->gart.pages_addr[i] = rdev->dummy_page.addr;
- }
+ for (i = 0; i < rdev->gart.num_gpu_pages; i++)
+ rdev->gart.pages_entry[i] = rdev->dummy_page.entry;
return 0;
}
@@ -356,15 +366,15 @@ int radeon_gart_init(struct radeon_device *rdev)
*/
void radeon_gart_fini(struct radeon_device *rdev)
{
- if (rdev->gart.pages && rdev->gart.pages_addr && rdev->gart.ready) {
+ if (rdev->gart.ready) {
/* unbind pages */
radeon_gart_unbind(rdev, 0, rdev->gart.num_cpu_pages);
}
rdev->gart.ready = false;
vfree(rdev->gart.pages);
- vfree(rdev->gart.pages_addr);
+ vfree(rdev->gart.pages_entry);
rdev->gart.pages = NULL;
- rdev->gart.pages_addr = NULL;
+ rdev->gart.pages_entry = NULL;
radeon_dummy_page_fini(rdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c
index fe48f229043e..d0b4f7d1140d 100644
--- a/drivers/gpu/drm/radeon/radeon_gem.c
+++ b/drivers/gpu/drm/radeon/radeon_gem.c
@@ -394,10 +394,9 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
return r;
}
-static int radeon_mode_mmap(struct drm_file *filp,
- struct drm_device *dev,
- uint32_t handle, bool dumb,
- uint64_t *offset_p)
+int radeon_mode_dumb_mmap(struct drm_file *filp,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *offset_p)
{
struct drm_gem_object *gobj;
struct radeon_bo *robj;
@@ -406,14 +405,6 @@ static int radeon_mode_mmap(struct drm_file *filp,
if (gobj == NULL) {
return -ENOENT;
}
-
- /*
- * We don't allow dumb mmaps on objects created using another
- * interface.
- */
- WARN_ONCE(dumb && !(gobj->dumb || gobj->import_attach),
- "Illegal dumb map of GPU buffer.\n");
-
robj = gem_to_radeon_bo(gobj);
if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) {
drm_gem_object_unreference_unlocked(gobj);
@@ -424,20 +415,12 @@ static int radeon_mode_mmap(struct drm_file *filp,
return 0;
}
-int radeon_mode_dumb_mmap(struct drm_file *filp,
- struct drm_device *dev,
- uint32_t handle, uint64_t *offset_p)
-{
- return radeon_mode_mmap(filp, dev, handle, true, offset_p);
-}
-
int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
struct drm_radeon_gem_mmap *args = data;
- return radeon_mode_mmap(filp, dev, args->handle, false,
- &args->addr_ptr);
+ return radeon_mode_dumb_mmap(filp, dev, args->handle, &args->addr_ptr);
}
int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -593,7 +576,7 @@ error_unreserve:
error_free:
drm_free_large(vm_bos);
- if (r)
+ if (r && r != -ERESTARTSYS)
DRM_ERROR("Couldn't update BO_VA (%d)\n", r);
}
@@ -763,7 +746,6 @@ int radeon_mode_dumb_create(struct drm_file *file_priv,
return -ENOMEM;
r = drm_gem_handle_create(file_priv, gobj, &handle);
- gobj->dumb = true;
/* drop reference from allocate - handle holds it now */
drm_gem_object_unreference_unlocked(gobj);
if (r) {
diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c
index 065d02068ec3..bef9a0953284 100644
--- a/drivers/gpu/drm/radeon/radeon_kfd.c
+++ b/drivers/gpu/drm/radeon/radeon_kfd.c
@@ -28,6 +28,8 @@
#include "cikd.h"
#include "cik_reg.h"
#include "radeon_kfd.h"
+#include "radeon_ucode.h"
+#include <linux/firmware.h>
#define CIK_PIPE_PER_MEC (4)
@@ -49,6 +51,7 @@ static uint64_t get_vmem_size(struct kgd_dev *kgd);
static uint64_t get_gpu_clock_counter(struct kgd_dev *kgd);
static uint32_t get_max_engine_clock_in_mhz(struct kgd_dev *kgd);
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
/*
* Register access functions
@@ -69,7 +72,7 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
uint32_t queue_id, uint32_t __user *wptr);
-static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id);
static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
@@ -89,14 +92,16 @@ static const struct kfd2kgd_calls kfd2kgd = {
.init_memory = kgd_init_memory,
.init_pipeline = kgd_init_pipeline,
.hqd_load = kgd_hqd_load,
- .hqd_is_occupies = kgd_hqd_is_occupies,
+ .hqd_is_occupied = kgd_hqd_is_occupied,
.hqd_destroy = kgd_hqd_destroy,
+ .get_fw_version = get_fw_version
};
static const struct kgd2kfd_calls *kgd2kfd;
bool radeon_kfd_init(void)
{
+#if defined(CONFIG_HSA_AMD_MODULE)
bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*,
const struct kgd2kfd_calls**);
@@ -113,6 +118,17 @@ bool radeon_kfd_init(void)
}
return true;
+#elif defined(CONFIG_HSA_AMD)
+ if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) {
+ kgd2kfd = NULL;
+
+ return false;
+ }
+
+ return true;
+#else
+ return false;
+#endif
}
void radeon_kfd_fini(void)
@@ -374,6 +390,10 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
cpu_relax();
write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
+ /* Mapping vmid to pasid also for IH block */
+ write_register(kgd, IH_VMID_0_LUT + vmid * sizeof(uint32_t),
+ pasid_mapping);
+
return 0;
}
@@ -416,7 +436,7 @@ static int kgd_init_memory(struct kgd_dev *kgd)
static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
uint32_t hpd_size, uint64_t hpd_gpu_addr)
{
- uint32_t mec = (++pipe_id / CIK_PIPE_PER_MEC) + 1;
+ uint32_t mec = (pipe_id / CIK_PIPE_PER_MEC) + 1;
uint32_t pipe = (pipe_id % CIK_PIPE_PER_MEC);
lock_srbm(kgd, mec, pipe, 0, 0);
@@ -513,7 +533,7 @@ static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
return 0;
}
-static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address,
+static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
uint32_t pipe_id, uint32_t queue_id)
{
uint32_t act;
@@ -552,6 +572,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
if (timeout == 0) {
pr_err("kfd: cp queue preemption time out (%dms)\n",
temp);
+ release_queue(kgd);
return -ETIME;
}
msleep(20);
@@ -561,3 +582,52 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type,
release_queue(kgd);
return 0;
}
+
+static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
+{
+ struct radeon_device *rdev = (struct radeon_device *) kgd;
+ const union radeon_firmware_header *hdr;
+
+ BUG_ON(kgd == NULL || rdev->mec_fw == NULL);
+
+ switch (type) {
+ case KGD_ENGINE_PFP:
+ hdr = (const union radeon_firmware_header *) rdev->pfp_fw->data;
+ break;
+
+ case KGD_ENGINE_ME:
+ hdr = (const union radeon_firmware_header *) rdev->me_fw->data;
+ break;
+
+ case KGD_ENGINE_CE:
+ hdr = (const union radeon_firmware_header *) rdev->ce_fw->data;
+ break;
+
+ case KGD_ENGINE_MEC1:
+ hdr = (const union radeon_firmware_header *) rdev->mec_fw->data;
+ break;
+
+ case KGD_ENGINE_MEC2:
+ hdr = (const union radeon_firmware_header *)
+ rdev->mec2_fw->data;
+ break;
+
+ case KGD_ENGINE_RLC:
+ hdr = (const union radeon_firmware_header *) rdev->rlc_fw->data;
+ break;
+
+ case KGD_ENGINE_SDMA:
+ hdr = (const union radeon_firmware_header *)
+ rdev->sdma_fw->data;
+ break;
+
+ default:
+ return 0;
+ }
+
+ if (hdr == NULL)
+ return 0;
+
+ /* Only 12 bit in use*/
+ return hdr->common.ucode_version;
+}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 7d68223eb469..86fc56434b28 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -529,9 +529,6 @@ int radeon_bo_list_validate(struct radeon_device *rdev,
u32 current_domain =
radeon_mem_type_to_domain(bo->tbo.mem.mem_type);
- WARN_ONCE(bo->gem_base.dumb,
- "GPU use of dumb buffer is illegal.\n");
-
/* Check if this buffer will be moved and don't move it
* if we have moved too many buffers for this IB already.
*
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 32522cc940a1..f7da8fe96a66 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1287,8 +1287,39 @@ dpm_failed:
return ret;
}
+struct radeon_dpm_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+};
+
+/* cards with dpm stability problems */
+static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = {
+ /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */
+ { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 },
+ /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */
+ { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 },
+ { 0, 0, 0, 0 },
+};
+
int radeon_pm_init(struct radeon_device *rdev)
{
+ struct radeon_dpm_quirk *p = radeon_dpm_quirk_list;
+ bool disable_dpm = false;
+
+ /* Apply dpm quirks */
+ while (p && p->chip_device != 0) {
+ if (rdev->pdev->vendor == p->chip_vendor &&
+ rdev->pdev->device == p->chip_device &&
+ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+ rdev->pdev->subsystem_device == p->subsys_device) {
+ disable_dpm = true;
+ break;
+ }
+ ++p;
+ }
+
/* enable dpm on rv6xx+ */
switch (rdev->family) {
case CHIP_RV610:
@@ -1344,6 +1375,8 @@ int radeon_pm_init(struct radeon_device *rdev)
(!(rdev->flags & RADEON_IS_IGP)) &&
(!rdev->smc_fw))
rdev->pm.pm_method = PM_METHOD_PROFILE;
+ else if (disable_dpm && (radeon_dpm == -1))
+ rdev->pm.pm_method = PM_METHOD_PROFILE;
else if (radeon_dpm == 0)
rdev->pm.pm_method = PM_METHOD_PROFILE;
else
diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
index 535403e0c8a2..15aee723db77 100644
--- a/drivers/gpu/drm/radeon/radeon_state.c
+++ b/drivers/gpu/drm/radeon/radeon_state.c
@@ -1703,7 +1703,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev,
u32 format;
u32 *buffer;
const u8 __user *data;
- int size, dwords, tex_width, blit_width, spitch;
+ unsigned int size, dwords, tex_width, blit_width, spitch;
u32 height;
int i;
u32 texpitch, microtile;
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index cde48c42b30a..06d2246d07f1 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -587,10 +587,8 @@ uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr)
uint64_t result;
/* page table offset */
- result = rdev->gart.pages_addr[addr >> PAGE_SHIFT];
-
- /* in case cpu page size != gpu page size*/
- result |= addr & (~PAGE_MASK);
+ result = rdev->gart.pages_entry[addr >> RADEON_GPU_PAGE_SHIFT];
+ result &= ~RADEON_GPU_PAGE_MASK;
return result;
}
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index c5799f16aa4b..34e3235f41d2 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -212,11 +212,9 @@ void rs400_gart_fini(struct radeon_device *rdev)
#define RS400_PTE_WRITEABLE (1 << 2)
#define RS400_PTE_READABLE (1 << 3)
-void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+uint64_t rs400_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
uint32_t entry;
- u32 *gtt = rdev->gart.ptr;
entry = (lower_32_bits(addr) & PAGE_MASK) |
((upper_32_bits(addr) & 0xff) << 4);
@@ -226,8 +224,14 @@ void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
entry |= RS400_PTE_WRITEABLE;
if (!(flags & RADEON_GART_PAGE_SNOOP))
entry |= RS400_PTE_UNSNOOPED;
- entry = cpu_to_le32(entry);
- gtt[i] = entry;
+ return entry;
+}
+
+void rs400_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t entry)
+{
+ u32 *gtt = rdev->gart.ptr;
+ gtt[i] = cpu_to_le32(lower_32_bits(entry));
}
int rs400_mc_wait_for_idle(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 9acb1c3c005b..74bce91aecc1 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -625,11 +625,8 @@ static void rs600_gart_fini(struct radeon_device *rdev)
radeon_gart_table_vram_free(rdev);
}
-void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
- uint64_t addr, uint32_t flags)
+uint64_t rs600_gart_get_page_entry(uint64_t addr, uint32_t flags)
{
- void __iomem *ptr = (void *)rdev->gart.ptr;
-
addr = addr & 0xFFFFFFFFFFFFF000ULL;
addr |= R600_PTE_SYSTEM;
if (flags & RADEON_GART_PAGE_VALID)
@@ -640,7 +637,14 @@ void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
addr |= R600_PTE_WRITEABLE;
if (flags & RADEON_GART_PAGE_SNOOP)
addr |= R600_PTE_SNOOPED;
- writeq(addr, ptr + (i * 8));
+ return addr;
+}
+
+void rs600_gart_set_page(struct radeon_device *rdev, unsigned i,
+ uint64_t entry)
+{
+ void __iomem *ptr = (void *)rdev->gart.ptr;
+ writeq(entry, ptr + (i * 8));
}
int rs600_irq_set(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 60df444bd075..5d89b874a1a2 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -5057,6 +5057,16 @@ void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, 0);
radeon_ring_write(ring, 1 << vm_id);
+ /* wait for the invalidate to complete */
+ radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5));
+ radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */
+ WAIT_REG_MEM_ENGINE(0))); /* me */
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2);
+ radeon_ring_write(ring, 0);
+ radeon_ring_write(ring, 0); /* ref */
+ radeon_ring_write(ring, 0); /* mask */
+ radeon_ring_write(ring, 0x20); /* poll interval */
+
/* sync PFP to ME, otherwise we might get invalid PFP reads */
radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0));
radeon_ring_write(ring, 0x0);
diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c
index f5cc777e1c5f..83207929fc62 100644
--- a/drivers/gpu/drm/radeon/si_dma.c
+++ b/drivers/gpu/drm/radeon/si_dma.c
@@ -123,7 +123,6 @@ void si_dma_vm_write_pages(struct radeon_device *rdev,
for (; ndw > 0; ndw -= 2, --count, pe += 8) {
if (flags & R600_PTE_SYSTEM) {
value = radeon_vm_map_gart(rdev, addr);
- value &= 0xFFFFFFFFFFFFF000ULL;
} else if (flags & R600_PTE_VALID) {
value = addr;
} else {
@@ -206,6 +205,14 @@ void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring,
radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0));
radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2));
radeon_ring_write(ring, 1 << vm_id);
+
+ /* wait for invalidate to complete */
+ radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0));
+ radeon_ring_write(ring, VM_INVALIDATE_REQUEST);
+ radeon_ring_write(ring, 0xff << 16); /* retry */
+ radeon_ring_write(ring, 1 << vm_id); /* mask */
+ radeon_ring_write(ring, 0); /* value */
+ radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */
}
/**
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 32e354b8b0ab..eff8a6444956 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2908,6 +2908,22 @@ static int si_init_smc_spll_table(struct radeon_device *rdev)
return ret;
}
+struct si_dpm_quirk {
+ u32 chip_vendor;
+ u32 chip_device;
+ u32 subsys_vendor;
+ u32 subsys_device;
+ u32 max_sclk;
+ u32 max_mclk;
+};
+
+/* cards with dpm stability problems */
+static struct si_dpm_quirk si_dpm_quirk_list[] = {
+ /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
+ { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+ { 0, 0, 0, 0 },
+};
+
static void si_apply_state_adjust_rules(struct radeon_device *rdev,
struct radeon_ps *rps)
{
@@ -2918,7 +2934,22 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
u32 mclk, sclk;
u16 vddc, vddci;
u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
+ u32 max_sclk = 0, max_mclk = 0;
int i;
+ struct si_dpm_quirk *p = si_dpm_quirk_list;
+
+ /* Apply dpm quirks */
+ while (p && p->chip_device != 0) {
+ if (rdev->pdev->vendor == p->chip_vendor &&
+ rdev->pdev->device == p->chip_device &&
+ rdev->pdev->subsystem_vendor == p->subsys_vendor &&
+ rdev->pdev->subsystem_device == p->subsys_device) {
+ max_sclk = p->max_sclk;
+ max_mclk = p->max_mclk;
+ break;
+ }
+ ++p;
+ }
if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
ni_dpm_vblank_too_short(rdev))
@@ -2972,6 +3003,14 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
if (ps->performance_levels[i].mclk > max_mclk_vddc)
ps->performance_levels[i].mclk = max_mclk_vddc;
}
+ if (max_mclk) {
+ if (ps->performance_levels[i].mclk > max_mclk)
+ ps->performance_levels[i].mclk = max_mclk;
+ }
+ if (max_sclk) {
+ if (ps->performance_levels[i].sclk > max_sclk)
+ ps->performance_levels[i].sclk = max_sclk;
+ }
}
/* XXX validate the min clocks required for display */
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 4069be89e585..84999242c747 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -1632,6 +1632,23 @@
#define PACKET3_MPEG_INDEX 0x3A
#define PACKET3_COPY_DW 0x3B
#define PACKET3_WAIT_REG_MEM 0x3C
+#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0)
+ /* 0 - always
+ * 1 - <
+ * 2 - <=
+ * 3 - ==
+ * 4 - !=
+ * 5 - >=
+ * 6 - >
+ */
+#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4)
+ /* 0 - reg
+ * 1 - mem
+ */
+#define WAIT_REG_MEM_ENGINE(x) ((x) << 8)
+ /* 0 - me
+ * 1 - pfp
+ */
#define PACKET3_MEM_WRITE 0x3D
#define PACKET3_COPY_DATA 0x40
#define PACKET3_CP_DMA 0x41
@@ -1835,6 +1852,7 @@
#define DMA_PACKET_TRAP 0x7
#define DMA_PACKET_SRBM_WRITE 0x9
#define DMA_PACKET_CONSTANT_FILL 0xd
+#define DMA_PACKET_POLL_REG_MEM 0xe
#define DMA_PACKET_NOP 0xf
#define VCE_STATUS 0x20004
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 3367960286a6..978993fa3a36 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -168,7 +168,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
const struct tegra_dc_window *window)
{
unsigned h_offset, v_offset, h_size, v_size, h_dda, v_dda, bpp;
- unsigned long value;
+ unsigned long value, flags;
bool yuv, planar;
/*
@@ -181,6 +181,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
else
bpp = planar ? 1 : 2;
+ spin_lock_irqsave(&dc->lock, flags);
+
value = WINDOW_A_SELECT << index;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -273,6 +275,7 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
case TEGRA_BO_TILING_MODE_BLOCK:
DRM_ERROR("hardware doesn't support block linear mode\n");
+ spin_unlock_irqrestore(&dc->lock, flags);
return -EINVAL;
}
@@ -331,6 +334,8 @@ static int tegra_dc_setup_window(struct tegra_dc *dc, unsigned int index,
tegra_dc_window_commit(dc, index);
+ spin_unlock_irqrestore(&dc->lock, flags);
+
return 0;
}
@@ -338,11 +343,14 @@ static int tegra_window_plane_disable(struct drm_plane *plane)
{
struct tegra_dc *dc = to_tegra_dc(plane->crtc);
struct tegra_plane *p = to_tegra_plane(plane);
+ unsigned long flags;
u32 value;
if (!plane->crtc)
return 0;
+ spin_lock_irqsave(&dc->lock, flags);
+
value = WINDOW_A_SELECT << p->index;
tegra_dc_writel(dc, value, DC_CMD_DISPLAY_WINDOW_HEADER);
@@ -352,6 +360,8 @@ static int tegra_window_plane_disable(struct drm_plane *plane)
tegra_dc_window_commit(dc, p->index);
+ spin_unlock_irqrestore(&dc->lock, flags);
+
return 0;
}
@@ -699,14 +709,16 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
struct tegra_bo *bo = tegra_fb_get_plane(fb, 0);
unsigned int h_offset = 0, v_offset = 0;
struct tegra_bo_tiling tiling;
+ unsigned long value, flags;
unsigned int format, swap;
- unsigned long value;
int err;
err = tegra_fb_get_tiling(fb, &tiling);
if (err < 0)
return err;
+ spin_lock_irqsave(&dc->lock, flags);
+
tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
value = fb->offsets[0] + y * fb->pitches[0] +
@@ -752,6 +764,7 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
case TEGRA_BO_TILING_MODE_BLOCK:
DRM_ERROR("hardware doesn't support block linear mode\n");
+ spin_unlock_irqrestore(&dc->lock, flags);
return -EINVAL;
}
@@ -778,6 +791,8 @@ static int tegra_dc_set_base(struct tegra_dc *dc, int x, int y,
tegra_dc_writel(dc, value << 8, DC_CMD_STATE_CONTROL);
tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
+ spin_unlock_irqrestore(&dc->lock, flags);
+
return 0;
}
@@ -814,23 +829,32 @@ static void tegra_dc_finish_page_flip(struct tegra_dc *dc)
unsigned long flags, base;
struct tegra_bo *bo;
- if (!dc->event)
+ spin_lock_irqsave(&drm->event_lock, flags);
+
+ if (!dc->event) {
+ spin_unlock_irqrestore(&drm->event_lock, flags);
return;
+ }
bo = tegra_fb_get_plane(crtc->primary->fb, 0);
+ spin_lock_irqsave(&dc->lock, flags);
+
/* check if new start address has been latched */
+ tegra_dc_writel(dc, WINDOW_A_SELECT, DC_CMD_DISPLAY_WINDOW_HEADER);
tegra_dc_writel(dc, READ_MUX, DC_CMD_STATE_ACCESS);
base = tegra_dc_readl(dc, DC_WINBUF_START_ADDR);
tegra_dc_writel(dc, 0, DC_CMD_STATE_ACCESS);
+ spin_unlock_irqrestore(&dc->lock, flags);
+
if (base == bo->paddr + crtc->primary->fb->offsets[0]) {
- spin_lock_irqsave(&drm->event_lock, flags);
- drm_send_vblank_event(drm, dc->pipe, dc->event);
- drm_vblank_put(drm, dc->pipe);
+ drm_crtc_send_vblank_event(crtc, dc->event);
+ drm_crtc_vblank_put(crtc);
dc->event = NULL;
- spin_unlock_irqrestore(&drm->event_lock, flags);
}
+
+ spin_unlock_irqrestore(&drm->event_lock, flags);
}
void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
@@ -843,7 +867,7 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
if (dc->event && dc->event->base.file_priv == file) {
dc->event->base.destroy(&dc->event->base);
- drm_vblank_put(drm, dc->pipe);
+ drm_crtc_vblank_put(crtc);
dc->event = NULL;
}
@@ -853,16 +877,16 @@ void tegra_dc_cancel_page_flip(struct drm_crtc *crtc, struct drm_file *file)
static int tegra_dc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event, uint32_t page_flip_flags)
{
+ unsigned int pipe = drm_crtc_index(crtc);
struct tegra_dc *dc = to_tegra_dc(crtc);
- struct drm_device *drm = crtc->dev;
if (dc->event)
return -EBUSY;
if (event) {
- event->pipe = dc->pipe;
+ event->pipe = pipe;
dc->event = event;
- drm_vblank_get(drm, dc->pipe);
+ drm_crtc_vblank_get(crtc);
}
tegra_dc_set_base(dc, 0, 0, fb);
@@ -1127,7 +1151,7 @@ static irqreturn_t tegra_dc_irq(int irq, void *data)
/*
dev_dbg(dc->dev, "%s(): vertical blank\n", __func__);
*/
- drm_handle_vblank(dc->base.dev, dc->pipe);
+ drm_crtc_handle_vblank(&dc->base);
tegra_dc_finish_page_flip(dc);
}
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index e549afeece1f..d4f827593dfa 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -694,24 +694,28 @@ static const struct file_operations tegra_drm_fops = {
.llseek = noop_llseek,
};
-static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm, int pipe)
+static struct drm_crtc *tegra_crtc_from_pipe(struct drm_device *drm,
+ unsigned int pipe)
{
struct drm_crtc *crtc;
list_for_each_entry(crtc, &drm->mode_config.crtc_list, head) {
- struct tegra_dc *dc = to_tegra_dc(crtc);
-
- if (dc->pipe == pipe)
+ if (pipe == drm_crtc_index(crtc))
return crtc;
}
return NULL;
}
-static u32 tegra_drm_get_vblank_counter(struct drm_device *dev, int crtc)
+static u32 tegra_drm_get_vblank_counter(struct drm_device *drm, int pipe)
{
+ struct drm_crtc *crtc = tegra_crtc_from_pipe(drm, pipe);
+
+ if (!crtc)
+ return 0;
+
/* TODO: implement real hardware counter using syncpoints */
- return drm_vblank_count(dev, crtc);
+ return drm_crtc_vblank_count(crtc);
}
static int tegra_drm_enable_vblank(struct drm_device *drm, int pipe)
diff --git a/drivers/gpu/drm/tegra/gem.c b/drivers/gpu/drm/tegra/gem.c
index da32086cbeaf..8777b7f75791 100644
--- a/drivers/gpu/drm/tegra/gem.c
+++ b/drivers/gpu/drm/tegra/gem.c
@@ -216,32 +216,58 @@ static void tegra_bo_free(struct drm_device *drm, struct tegra_bo *bo)
}
}
-static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo,
- size_t size)
+static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
{
+ struct scatterlist *s;
+ struct sg_table *sgt;
+ unsigned int i;
+
bo->pages = drm_gem_get_pages(&bo->gem);
if (IS_ERR(bo->pages))
return PTR_ERR(bo->pages);
- bo->num_pages = size >> PAGE_SHIFT;
-
- bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
- if (IS_ERR(bo->sgt)) {
- drm_gem_put_pages(&bo->gem, bo->pages, false, false);
- return PTR_ERR(bo->sgt);
+ bo->num_pages = bo->gem.size >> PAGE_SHIFT;
+
+ sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
+ if (IS_ERR(sgt))
+ goto put_pages;
+
+ /*
+ * Fake up the SG table so that dma_map_sg() can be used to flush the
+ * pages associated with it. Note that this relies on the fact that
+ * the DMA API doesn't hook into IOMMU on Tegra, therefore mapping is
+ * only cache maintenance.
+ *
+ * TODO: Replace this by drm_clflash_sg() once it can be implemented
+ * without relying on symbols that are not exported.
+ */
+ for_each_sg(sgt->sgl, s, sgt->nents, i)
+ sg_dma_address(s) = sg_phys(s);
+
+ if (dma_map_sg(drm->dev, sgt->sgl, sgt->nents, DMA_TO_DEVICE) == 0) {
+ sgt = ERR_PTR(-ENOMEM);
+ goto release_sgt;
}
+ bo->sgt = sgt;
+
return 0;
+
+release_sgt:
+ sg_free_table(sgt);
+ kfree(sgt);
+put_pages:
+ drm_gem_put_pages(&bo->gem, bo->pages, false, false);
+ return PTR_ERR(sgt);
}
-static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
- size_t size)
+static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo)
{
struct tegra_drm *tegra = drm->dev_private;
int err;
if (tegra->domain) {
- err = tegra_bo_get_pages(drm, bo, size);
+ err = tegra_bo_get_pages(drm, bo);
if (err < 0)
return err;
@@ -251,6 +277,8 @@ static int tegra_bo_alloc(struct drm_device *drm, struct tegra_bo *bo,
return err;
}
} else {
+ size_t size = bo->gem.size;
+
bo->vaddr = dma_alloc_writecombine(drm->dev, size, &bo->paddr,
GFP_KERNEL | __GFP_NOWARN);
if (!bo->vaddr) {
@@ -274,7 +302,7 @@ struct tegra_bo *tegra_bo_create(struct drm_device *drm, size_t size,
if (IS_ERR(bo))
return bo;
- err = tegra_bo_alloc(drm, bo, size);
+ err = tegra_bo_alloc(drm, bo);
if (err < 0)
goto release;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 7b5d22110f25..6c6b655defcf 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -406,11 +406,9 @@ int vmw_3d_resource_inc(struct vmw_private *dev_priv,
if (unlikely(ret != 0))
--dev_priv->num_3d_resources;
} else if (unhide_svga) {
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ENABLE,
vmw_read(dev_priv, SVGA_REG_ENABLE) &
~SVGA_REG_ENABLE_HIDE);
- mutex_unlock(&dev_priv->hw_mutex);
}
mutex_unlock(&dev_priv->release_mutex);
@@ -433,13 +431,10 @@ void vmw_3d_resource_dec(struct vmw_private *dev_priv,
mutex_lock(&dev_priv->release_mutex);
if (unlikely(--dev_priv->num_3d_resources == 0))
vmw_release_device(dev_priv);
- else if (hide_svga) {
- mutex_lock(&dev_priv->hw_mutex);
+ else if (hide_svga)
vmw_write(dev_priv, SVGA_REG_ENABLE,
vmw_read(dev_priv, SVGA_REG_ENABLE) |
SVGA_REG_ENABLE_HIDE);
- mutex_unlock(&dev_priv->hw_mutex);
- }
n3d = (int32_t) dev_priv->num_3d_resources;
mutex_unlock(&dev_priv->release_mutex);
@@ -600,12 +595,14 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->dev = dev;
dev_priv->vmw_chipset = chipset;
dev_priv->last_read_seqno = (uint32_t) -100;
- mutex_init(&dev_priv->hw_mutex);
mutex_init(&dev_priv->cmdbuf_mutex);
mutex_init(&dev_priv->release_mutex);
mutex_init(&dev_priv->binding_mutex);
rwlock_init(&dev_priv->resource_lock);
ttm_lock_init(&dev_priv->reservation_sem);
+ spin_lock_init(&dev_priv->hw_lock);
+ spin_lock_init(&dev_priv->waiter_lock);
+ spin_lock_init(&dev_priv->cap_lock);
for (i = vmw_res_context; i < vmw_res_max; ++i) {
idr_init(&dev_priv->res_idr[i]);
@@ -626,14 +623,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->enable_fb = enable_fbdev;
- mutex_lock(&dev_priv->hw_mutex);
-
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
svga_id = vmw_read(dev_priv, SVGA_REG_ID);
if (svga_id != SVGA_ID_2) {
ret = -ENOSYS;
DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id);
- mutex_unlock(&dev_priv->hw_mutex);
goto out_err0;
}
@@ -683,10 +677,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->prim_bb_mem = dev_priv->vram_size;
ret = vmw_dma_masks(dev_priv);
- if (unlikely(ret != 0)) {
- mutex_unlock(&dev_priv->hw_mutex);
+ if (unlikely(ret != 0))
goto out_err0;
- }
/*
* Limit back buffer size to VRAM size. Remove this once
@@ -695,8 +687,6 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
if (dev_priv->prim_bb_mem > dev_priv->vram_size)
dev_priv->prim_bb_mem = dev_priv->vram_size;
- mutex_unlock(&dev_priv->hw_mutex);
-
vmw_print_capabilities(dev_priv->capabilities);
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
@@ -1160,9 +1150,7 @@ static int vmw_master_set(struct drm_device *dev,
if (unlikely(ret != 0))
return ret;
vmw_kms_save_vga(dev_priv);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 0);
- mutex_unlock(&dev_priv->hw_mutex);
}
if (active) {
@@ -1196,9 +1184,7 @@ out_no_active_lock:
if (!dev_priv->enable_fb) {
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, true);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
- mutex_unlock(&dev_priv->hw_mutex);
}
return ret;
}
@@ -1233,9 +1219,7 @@ static void vmw_master_drop(struct drm_device *dev,
DRM_ERROR("Unable to clean VRAM on master drop.\n");
vmw_kms_restore_vga(dev_priv);
vmw_3d_resource_dec(dev_priv, true);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_TRACES, 1);
- mutex_unlock(&dev_priv->hw_mutex);
}
dev_priv->active_master = &dev_priv->fbdev_master;
@@ -1367,10 +1351,8 @@ static void vmw_pm_complete(struct device *kdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct vmw_private *dev_priv = vmw_priv(dev);
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2);
(void) vmw_read(dev_priv, SVGA_REG_ID);
- mutex_unlock(&dev_priv->hw_mutex);
/**
* Reclaim 3d reference held by fbdev and potentially
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 4ee799b43d5d..d26a6daa9719 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -399,7 +399,8 @@ struct vmw_private {
uint32_t memory_size;
bool has_gmr;
bool has_mob;
- struct mutex hw_mutex;
+ spinlock_t hw_lock;
+ spinlock_t cap_lock;
/*
* VGA registers.
@@ -449,8 +450,9 @@ struct vmw_private {
atomic_t marker_seq;
wait_queue_head_t fence_queue;
wait_queue_head_t fifo_queue;
- int fence_queue_waiters; /* Protected by hw_mutex */
- int goal_queue_waiters; /* Protected by hw_mutex */
+ spinlock_t waiter_lock;
+ int fence_queue_waiters; /* Protected by waiter_lock */
+ int goal_queue_waiters; /* Protected by waiter_lock */
atomic_t fifo_queue_waiters;
uint32_t last_read_seqno;
spinlock_t irq_lock;
@@ -553,20 +555,35 @@ static inline struct vmw_master *vmw_master(struct drm_master *master)
return (struct vmw_master *) master->driver_priv;
}
+/*
+ * The locking here is fine-grained, so that it is performed once
+ * for every read- and write operation. This is of course costly, but we
+ * don't perform much register access in the timing critical paths anyway.
+ * Instead we have the extra benefit of being sure that we don't forget
+ * the hw lock around register accesses.
+ */
static inline void vmw_write(struct vmw_private *dev_priv,
unsigned int offset, uint32_t value)
{
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
outl(value, dev_priv->io_start + VMWGFX_VALUE_PORT);
+ spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
}
static inline uint32_t vmw_read(struct vmw_private *dev_priv,
unsigned int offset)
{
- uint32_t val;
+ unsigned long irq_flags;
+ u32 val;
+ spin_lock_irqsave(&dev_priv->hw_lock, irq_flags);
outl(offset, dev_priv->io_start + VMWGFX_INDEX_PORT);
val = inl(dev_priv->io_start + VMWGFX_VALUE_PORT);
+ spin_unlock_irqrestore(&dev_priv->hw_lock, irq_flags);
+
return val;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
index b7594cb758af..945f1e0dad92 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
@@ -35,7 +35,7 @@ struct vmw_fence_manager {
struct vmw_private *dev_priv;
spinlock_t lock;
struct list_head fence_list;
- struct work_struct work, ping_work;
+ struct work_struct work;
u32 user_fence_size;
u32 fence_size;
u32 event_fence_action_size;
@@ -134,14 +134,6 @@ static const char *vmw_fence_get_timeline_name(struct fence *f)
return "svga";
}
-static void vmw_fence_ping_func(struct work_struct *work)
-{
- struct vmw_fence_manager *fman =
- container_of(work, struct vmw_fence_manager, ping_work);
-
- vmw_fifo_ping_host(fman->dev_priv, SVGA_SYNC_GENERIC);
-}
-
static bool vmw_fence_enable_signaling(struct fence *f)
{
struct vmw_fence_obj *fence =
@@ -155,11 +147,7 @@ static bool vmw_fence_enable_signaling(struct fence *f)
if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
return false;
- if (mutex_trylock(&dev_priv->hw_mutex)) {
- vmw_fifo_ping_host_locked(dev_priv, SVGA_SYNC_GENERIC);
- mutex_unlock(&dev_priv->hw_mutex);
- } else
- schedule_work(&fman->ping_work);
+ vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
return true;
}
@@ -305,7 +293,6 @@ struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
INIT_LIST_HEAD(&fman->fence_list);
INIT_LIST_HEAD(&fman->cleanup_list);
INIT_WORK(&fman->work, &vmw_fence_work_func);
- INIT_WORK(&fman->ping_work, &vmw_fence_ping_func);
fman->fifo_down = true;
fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
@@ -323,7 +310,6 @@ void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
bool lists_empty;
(void) cancel_work_sync(&fman->work);
- (void) cancel_work_sync(&fman->ping_work);
spin_lock_irqsave(&fman->lock, irq_flags);
lists_empty = list_empty(&fman->fence_list) &&
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index 09e10aefcd8e..39f2b03888e7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -44,10 +44,10 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
if (!dev_priv->has_mob)
return false;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->cap_lock);
vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_3D);
result = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->cap_lock);
return (result != 0);
}
@@ -120,7 +120,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
DRM_INFO("height %d\n", vmw_read(dev_priv, SVGA_REG_HEIGHT));
DRM_INFO("bpp %d\n", vmw_read(dev_priv, SVGA_REG_BITS_PER_PIXEL));
- mutex_lock(&dev_priv->hw_mutex);
dev_priv->enable_state = vmw_read(dev_priv, SVGA_REG_ENABLE);
dev_priv->config_done_state = vmw_read(dev_priv, SVGA_REG_CONFIG_DONE);
dev_priv->traces_state = vmw_read(dev_priv, SVGA_REG_TRACES);
@@ -143,7 +142,6 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
mb();
vmw_write(dev_priv, SVGA_REG_CONFIG_DONE, 1);
- mutex_unlock(&dev_priv->hw_mutex);
max = ioread32(fifo_mem + SVGA_FIFO_MAX);
min = ioread32(fifo_mem + SVGA_FIFO_MIN);
@@ -160,31 +158,28 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
return vmw_fifo_send_fence(dev_priv, &dummy);
}
-void vmw_fifo_ping_host_locked(struct vmw_private *dev_priv, uint32_t reason)
+void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
+ static DEFINE_SPINLOCK(ping_lock);
+ unsigned long irq_flags;
+ /*
+ * The ping_lock is needed because we don't have an atomic
+ * test-and-set of the SVGA_FIFO_BUSY register.
+ */
+ spin_lock_irqsave(&ping_lock, irq_flags);
if (unlikely(ioread32(fifo_mem + SVGA_FIFO_BUSY) == 0)) {
iowrite32(1, fifo_mem + SVGA_FIFO_BUSY);
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
}
-}
-
-void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
-{
- mutex_lock(&dev_priv->hw_mutex);
-
- vmw_fifo_ping_host_locked(dev_priv, reason);
-
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock_irqrestore(&ping_lock, irq_flags);
}
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
- mutex_lock(&dev_priv->hw_mutex);
-
vmw_write(dev_priv, SVGA_REG_SYNC, SVGA_SYNC_GENERIC);
while (vmw_read(dev_priv, SVGA_REG_BUSY) != 0)
;
@@ -198,7 +193,6 @@ void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
vmw_write(dev_priv, SVGA_REG_TRACES,
dev_priv->traces_state);
- mutex_unlock(&dev_priv->hw_mutex);
vmw_marker_queue_takedown(&fifo->marker_queue);
if (likely(fifo->static_buffer != NULL)) {
@@ -271,7 +265,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
return vmw_fifo_wait_noirq(dev_priv, bytes,
interruptible, timeout);
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (atomic_add_return(1, &dev_priv->fifo_queue_waiters) > 0) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
outl(SVGA_IRQFLAG_FIFO_PROGRESS,
@@ -280,7 +274,7 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
if (interruptible)
ret = wait_event_interruptible_timeout
@@ -296,14 +290,14 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
else if (likely(ret > 0))
ret = 0;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (atomic_dec_and_test(&dev_priv->fifo_queue_waiters)) {
spin_lock_irqsave(&dev_priv->irq_lock, irq_flags);
dev_priv->irq_mask &= ~SVGA_IRQFLAG_FIFO_PROGRESS;
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
return ret;
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index 37881ecf5d7a..69c8ce23123c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -135,13 +135,13 @@ static int vmw_fill_compat_cap(struct vmw_private *dev_priv, void *bounce,
(pair_offset + max_size * sizeof(SVGA3dCapPair)) / sizeof(u32);
compat_cap->header.type = SVGA3DCAPS_RECORD_DEVCAPS;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->cap_lock);
for (i = 0; i < max_size; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
compat_cap->pairs[i][0] = i;
compat_cap->pairs[i][1] = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->cap_lock);
return 0;
}
@@ -191,12 +191,12 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
if (num > SVGA3D_DEVCAP_MAX)
num = SVGA3D_DEVCAP_MAX;
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->cap_lock);
for (i = 0; i < num; ++i) {
vmw_write(dev_priv, SVGA_REG_DEV_CAP, i);
*bounce32++ = vmw_read(dev_priv, SVGA_REG_DEV_CAP);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->cap_lock);
} else if (gb_objects) {
ret = vmw_fill_compat_cap(dev_priv, bounce, size);
if (unlikely(ret != 0))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
index 0c423766c441..9fe9827ee499 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
@@ -62,13 +62,8 @@ irqreturn_t vmw_irq_handler(int irq, void *arg)
static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
{
- uint32_t busy;
- mutex_lock(&dev_priv->hw_mutex);
- busy = vmw_read(dev_priv, SVGA_REG_BUSY);
- mutex_unlock(&dev_priv->hw_mutex);
-
- return (busy == 0);
+ return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
}
void vmw_update_seqno(struct vmw_private *dev_priv,
@@ -184,7 +179,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (dev_priv->fence_queue_waiters++ == 0) {
unsigned long irq_flags;
@@ -195,12 +190,12 @@ void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (--dev_priv->fence_queue_waiters == 0) {
unsigned long irq_flags;
@@ -209,13 +204,13 @@ void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
void vmw_goal_waiter_add(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (dev_priv->goal_queue_waiters++ == 0) {
unsigned long irq_flags;
@@ -226,12 +221,12 @@ void vmw_goal_waiter_add(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
{
- mutex_lock(&dev_priv->hw_mutex);
+ spin_lock(&dev_priv->waiter_lock);
if (--dev_priv->goal_queue_waiters == 0) {
unsigned long irq_flags;
@@ -240,7 +235,7 @@ void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
spin_unlock_irqrestore(&dev_priv->irq_lock, irq_flags);
}
- mutex_unlock(&dev_priv->hw_mutex);
+ spin_unlock(&dev_priv->waiter_lock);
}
int vmw_wait_seqno(struct vmw_private *dev_priv,
@@ -315,9 +310,7 @@ void vmw_irq_uninstall(struct drm_device *dev)
if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
return;
- mutex_lock(&dev_priv->hw_mutex);
vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
- mutex_unlock(&dev_priv->hw_mutex);
status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 3725b521d931..8725b79e7847 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1828,9 +1828,7 @@ vmw_du_connector_detect(struct drm_connector *connector, bool force)
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_display_unit *du = vmw_connector_to_du(connector);
- mutex_lock(&dev_priv->hw_mutex);
num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
- mutex_unlock(&dev_priv->hw_mutex);
return ((vmw_connector_to_du(connector)->unit < num_displays &&
du->pref_active) ?
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 230b6f887cd8..dfdc26970022 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -27,7 +27,8 @@ if HID
config HID_BATTERY_STRENGTH
bool "Battery level reporting for HID devices"
- depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY
+ depends on HID
+ select POWER_SUPPLY
default n
---help---
This option adds support of reporting battery strength (for HID devices
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index c3d0ac1a0988..8b638792cb43 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1805,6 +1805,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) },
{ HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 7460f3402298..9243359c1821 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -526,6 +526,7 @@
#define USB_DEVICE_ID_KYE_GPEN_560 0x5003
#define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010
#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011
+#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2 0x501a
#define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013
#define USB_VENDOR_ID_LABTEC 0x1020
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index e0a0f06ac5ef..9505605b6e22 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -312,6 +312,9 @@ static const struct hid_device_id hid_battery_quirks[] = {
USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI),
HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
+ USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO),
+ HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE,
USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI),
HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE },
{}
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index b92bf01a1ae8..158fcf577fae 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -323,6 +323,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
}
break;
case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+ case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) {
rdesc = mousepen_i608x_rdesc_fixed;
*rsize = sizeof(mousepen_i608x_rdesc_fixed);
@@ -415,6 +416,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id)
switch (id->product) {
case USB_DEVICE_ID_KYE_EASYPEN_I405X:
case USB_DEVICE_ID_KYE_MOUSEPEN_I608X:
+ case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2:
case USB_DEVICE_ID_KYE_EASYPEN_M610X:
ret = kye_tablet_enable(hdev);
if (ret) {
@@ -446,6 +448,8 @@ static const struct hid_device_id kye_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
USB_DEVICE_ID_KYE_MOUSEPEN_I608X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
+ USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_KYE,
USB_DEVICE_ID_KYE_EASYPEN_M610X) },
{ HID_USB_DEVICE(USB_VENDOR_ID_KYE,
USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) },
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index c917ab61aafa..5bc6d80d5be7 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -962,10 +962,24 @@ static int logi_dj_raw_event(struct hid_device *hdev,
switch (data[0]) {
case REPORT_ID_DJ_SHORT:
+ if (size != DJREPORT_SHORT_LENGTH) {
+ dev_err(&hdev->dev, "DJ report of bad size (%d)", size);
+ return false;
+ }
return logi_dj_dj_event(hdev, report, data, size);
case REPORT_ID_HIDPP_SHORT:
- /* intentional fallthrough */
+ if (size != HIDPP_REPORT_SHORT_LENGTH) {
+ dev_err(&hdev->dev,
+ "Short HID++ report of bad size (%d)", size);
+ return false;
+ }
+ return logi_dj_hidpp_event(hdev, report, data, size);
case REPORT_ID_HIDPP_LONG:
+ if (size != HIDPP_REPORT_LONG_LENGTH) {
+ dev_err(&hdev->dev,
+ "Long HID++ report of bad size (%d)", size);
+ return false;
+ }
return logi_dj_hidpp_event(hdev, report, data, size);
}
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 2f420c0b6609..a93cefe0e522 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -282,6 +282,33 @@ static inline bool hidpp_report_is_connect_event(struct hidpp_report *report)
(report->rap.sub_id == 0x41);
}
+/**
+ * hidpp_prefix_name() prefixes the current given name with "Logitech ".
+ */
+static void hidpp_prefix_name(char **name, int name_length)
+{
+#define PREFIX_LENGTH 9 /* "Logitech " */
+
+ int new_length;
+ char *new_name;
+
+ if (name_length > PREFIX_LENGTH &&
+ strncmp(*name, "Logitech ", PREFIX_LENGTH) == 0)
+ /* The prefix has is already in the name */
+ return;
+
+ new_length = PREFIX_LENGTH + name_length;
+ new_name = kzalloc(new_length, GFP_KERNEL);
+ if (!new_name)
+ return;
+
+ snprintf(new_name, new_length, "Logitech %s", *name);
+
+ kfree(*name);
+
+ *name = new_name;
+}
+
/* -------------------------------------------------------------------------- */
/* HIDP++ 1.0 commands */
/* -------------------------------------------------------------------------- */
@@ -321,6 +348,10 @@ static char *hidpp_get_unifying_name(struct hidpp_device *hidpp_dev)
return NULL;
memcpy(name, &response.rap.params[2], len);
+
+ /* include the terminating '\0' */
+ hidpp_prefix_name(&name, len + 1);
+
return name;
}
@@ -498,6 +529,9 @@ static char *hidpp_get_device_name(struct hidpp_device *hidpp)
index += ret;
}
+ /* include the terminating '\0' */
+ hidpp_prefix_name(&name, __name_length + 1);
+
return name;
}
@@ -794,18 +828,25 @@ static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size)
switch (data[0]) {
case 0x02:
+ if (size < 2) {
+ hid_err(hdev, "Received HID report of bad size (%d)",
+ size);
+ return 1;
+ }
if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) {
input_event(wd->input, EV_KEY, BTN_LEFT,
!!(data[1] & 0x01));
input_event(wd->input, EV_KEY, BTN_RIGHT,
!!(data[1] & 0x02));
input_sync(wd->input);
+ return 0;
} else {
if (size < 21)
return 1;
return wtp_mouse_raw_xy_event(hidpp, &data[7]);
}
case REPORT_ID_HIDPP_LONG:
+ /* size is already checked in hidpp_raw_event. */
if ((report->fap.feature_index != wd->mt_feature_index) ||
(report->fap.funcindex_clientid != EVENT_TOUCHPAD_RAW_XY))
return 1;
diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c
index 1a07e07d99a0..47d7e74231e5 100644
--- a/drivers/hid/hid-roccat-pyra.c
+++ b/drivers/hid/hid-roccat-pyra.c
@@ -35,6 +35,8 @@ static struct class *pyra_class;
static void profile_activated(struct pyra_device *pyra,
unsigned int new_profile)
{
+ if (new_profile >= ARRAY_SIZE(pyra->profile_settings))
+ return;
pyra->actual_profile = new_profile;
pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi;
}
@@ -257,9 +259,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp,
if (off != 0 || count != PYRA_SIZE_SETTINGS)
return -EINVAL;
- mutex_lock(&pyra->pyra_lock);
-
settings = (struct pyra_settings const *)buf;
+ if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings))
+ return -EINVAL;
+
+ mutex_lock(&pyra->pyra_lock);
retval = pyra_set_settings(usb_dev, settings);
if (retval) {
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index d32037cbf9db..d43e967e7533 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -706,12 +706,7 @@ static int i2c_hid_start(struct hid_device *hid)
static void i2c_hid_stop(struct hid_device *hid)
{
- struct i2c_client *client = hid->driver_data;
- struct i2c_hid *ihid = i2c_get_clientdata(client);
-
hid->claimed = 0;
-
- i2c_hid_free_buffers(ihid);
}
static int i2c_hid_open(struct hid_device *hid)
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index dc89be90b35e..b27b3d33ebab 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -124,6 +124,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT },
+ { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT },
{ USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS },
{ USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 6529c09c46f0..a7de26d1ac80 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -574,6 +574,16 @@ config SENSORS_IIO_HWMON
for those channels specified in the map. This map can be provided
either via platform data or the device tree bindings.
+config SENSORS_I5500
+ tristate "Intel 5500/5520/X58 temperature sensor"
+ depends on X86 && PCI
+ help
+ If you say yes here you get support for the temperature
+ sensor inside the Intel 5500, 5520 and X58 chipsets.
+
+ This driver can also be built as a module. If so, the module
+ will be called i5500_temp.
+
config SENSORS_CORETEMP
tristate "Intel Core/Core2/Atom temperature sensor"
depends on X86
diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile
index 67280643bcf0..6c941472e707 100644
--- a/drivers/hwmon/Makefile
+++ b/drivers/hwmon/Makefile
@@ -68,6 +68,7 @@ obj-$(CONFIG_SENSORS_GPIO_FAN) += gpio-fan.o
obj-$(CONFIG_SENSORS_HIH6130) += hih6130.o
obj-$(CONFIG_SENSORS_HTU21) += htu21.o
obj-$(CONFIG_SENSORS_ULTRA45) += ultra45_env.o
+obj-$(CONFIG_SENSORS_I5500) += i5500_temp.o
obj-$(CONFIG_SENSORS_I5K_AMB) += i5k_amb.o
obj-$(CONFIG_SENSORS_IBMAEM) += ibmaem.o
obj-$(CONFIG_SENSORS_IBMPEX) += ibmpex.o
diff --git a/drivers/hwmon/i5500_temp.c b/drivers/hwmon/i5500_temp.c
new file mode 100644
index 000000000000..3e3ccbf18b4e
--- /dev/null
+++ b/drivers/hwmon/i5500_temp.c
@@ -0,0 +1,149 @@
+/*
+ * i5500_temp - Driver for Intel 5500/5520/X58 chipset thermal sensor
+ *
+ * Copyright (C) 2012, 2014 Jean Delvare <jdelvare@suse.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/jiffies.h>
+#include <linux/device.h>
+#include <linux/pci.h>
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+#include <linux/err.h>
+#include <linux/mutex.h>
+
+/* Register definitions from datasheet */
+#define REG_TSTHRCATA 0xE2
+#define REG_TSCTRL 0xE8
+#define REG_TSTHRRPEX 0xEB
+#define REG_TSTHRLO 0xEC
+#define REG_TSTHRHI 0xEE
+#define REG_CTHINT 0xF0
+#define REG_TSFSC 0xF3
+#define REG_CTSTS 0xF4
+#define REG_TSTHRRQPI 0xF5
+#define REG_CTCTRL 0xF7
+#define REG_TSTIMER 0xF8
+
+/*
+ * Sysfs stuff
+ */
+
+/* Sensor resolution : 0.5 degree C */
+static ssize_t show_temp(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
+ long temp;
+ u16 tsthrhi;
+ s8 tsfsc;
+
+ pci_read_config_word(pdev, REG_TSTHRHI, &tsthrhi);
+ pci_read_config_byte(pdev, REG_TSFSC, &tsfsc);
+ temp = ((long)tsthrhi - tsfsc) * 500;
+
+ return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t show_thresh(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
+ int reg = to_sensor_dev_attr(devattr)->index;
+ long temp;
+ u16 tsthr;
+
+ pci_read_config_word(pdev, reg, &tsthr);
+ temp = tsthr * 500;
+
+ return sprintf(buf, "%ld\n", temp);
+}
+
+static ssize_t show_alarm(struct device *dev,
+ struct device_attribute *devattr, char *buf)
+{
+ struct pci_dev *pdev = to_pci_dev(dev->parent);
+ int nr = to_sensor_dev_attr(devattr)->index;
+ u8 ctsts;
+
+ pci_read_config_byte(pdev, REG_CTSTS, &ctsts);
+ return sprintf(buf, "%u\n", (unsigned int)ctsts & (1 << nr));
+}
+
+static DEVICE_ATTR(temp1_input, S_IRUGO, show_temp, NULL);
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, show_thresh, NULL, 0xE2);
+static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IRUGO, show_thresh, NULL, 0xEC);
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO, show_thresh, NULL, 0xEE);
+static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, show_alarm, NULL, 0);
+static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO, show_alarm, NULL, 1);
+
+static struct attribute *i5500_temp_attrs[] = {
+ &dev_attr_temp1_input.attr,
+ &sensor_dev_attr_temp1_crit.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_hyst.dev_attr.attr,
+ &sensor_dev_attr_temp1_max.dev_attr.attr,
+ &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr,
+ &sensor_dev_attr_temp1_max_alarm.dev_attr.attr,
+ NULL
+};
+
+ATTRIBUTE_GROUPS(i5500_temp);
+
+static const struct pci_device_id i5500_temp_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x3438) },
+ { 0 },
+};
+
+MODULE_DEVICE_TABLE(pci, i5500_temp_ids);
+
+static int i5500_temp_probe(struct pci_dev *pdev,
+ const struct pci_device_id *id)
+{
+ int err;
+ struct device *hwmon_dev;
+ u32 tstimer;
+ s8 tsfsc;
+
+ err = pci_enable_device(pdev);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to enable device\n");
+ return err;
+ }
+
+ pci_read_config_byte(pdev, REG_TSFSC, &tsfsc);
+ pci_read_config_dword(pdev, REG_TSTIMER, &tstimer);
+ if (tsfsc == 0x7F && tstimer == 0x07D30D40) {
+ dev_notice(&pdev->dev, "Sensor seems to be disabled\n");
+ return -ENODEV;
+ }
+
+ hwmon_dev = devm_hwmon_device_register_with_groups(&pdev->dev,
+ "intel5500", NULL,
+ i5500_temp_groups);
+ return PTR_ERR_OR_ZERO(hwmon_dev);
+}
+
+static struct pci_driver i5500_temp_driver = {
+ .name = "i5500_temp",
+ .id_table = i5500_temp_ids,
+ .probe = i5500_temp_probe,
+};
+
+module_pci_driver(i5500_temp_driver);
+
+MODULE_AUTHOR("Jean Delvare <jdelvare@suse.de>");
+MODULE_DESCRIPTION("Intel 5500/5520/X58 chipset thermal sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 31e8308ba899..ab838d9e28b6 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -881,6 +881,7 @@ config I2C_XLR
config I2C_RCAR
tristate "Renesas R-Car I2C Controller"
depends on ARCH_SHMOBILE || COMPILE_TEST
+ select I2C_SLAVE
help
If you say yes to this option, support will be included for the
R-Car I2C controller.
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index bff20a589621..958c8db4ec30 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -785,14 +785,16 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
int ret;
pm_runtime_get_sync(&adap->dev);
- clk_prepare_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret)
+ return ret;
for (retry = 0; retry < adap->retries; retry++) {
ret = s3c24xx_i2c_doxfer(i2c, msgs, num);
if (ret != -EAGAIN) {
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
pm_runtime_put(&adap->dev);
return ret;
}
@@ -802,7 +804,7 @@ static int s3c24xx_i2c_xfer(struct i2c_adapter *adap,
udelay(100);
}
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
pm_runtime_put(&adap->dev);
return -EREMOTEIO;
}
@@ -1197,7 +1199,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
clk_prepare_enable(i2c->clk);
ret = s3c24xx_i2c_init(i2c);
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
if (ret != 0) {
dev_err(&pdev->dev, "I2C controller init failed\n");
return ret;
@@ -1210,6 +1212,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
i2c->irq = ret = platform_get_irq(pdev, 0);
if (ret <= 0) {
dev_err(&pdev->dev, "cannot find IRQ\n");
+ clk_unprepare(i2c->clk);
return ret;
}
@@ -1218,6 +1221,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
if (ret != 0) {
dev_err(&pdev->dev, "cannot claim IRQ %d\n", i2c->irq);
+ clk_unprepare(i2c->clk);
return ret;
}
}
@@ -1225,6 +1229,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
ret = s3c24xx_i2c_register_cpufreq(i2c);
if (ret < 0) {
dev_err(&pdev->dev, "failed to register cpufreq notifier\n");
+ clk_unprepare(i2c->clk);
return ret;
}
@@ -1241,6 +1246,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
if (ret < 0) {
dev_err(&pdev->dev, "failed to add bus to i2c core\n");
s3c24xx_i2c_deregister_cpufreq(i2c);
+ clk_unprepare(i2c->clk);
return ret;
}
@@ -1262,6 +1268,8 @@ static int s3c24xx_i2c_remove(struct platform_device *pdev)
{
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+ clk_unprepare(i2c->clk);
+
pm_runtime_disable(&i2c->adap.dev);
pm_runtime_disable(&pdev->dev);
@@ -1293,13 +1301,16 @@ static int s3c24xx_i2c_resume_noirq(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct s3c24xx_i2c *i2c = platform_get_drvdata(pdev);
+ int ret;
if (!IS_ERR(i2c->sysreg))
regmap_write(i2c->sysreg, EXYNOS5_SYS_I2C_CFG, i2c->sys_i2c_cfg);
- clk_prepare_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret)
+ return ret;
s3c24xx_i2c_init(i2c);
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
i2c->suspended = 0;
return 0;
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 440d5dbc8b5f..007818b3e174 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -139,6 +139,7 @@ struct sh_mobile_i2c_data {
int pos;
int sr;
bool send_stop;
+ bool stop_after_dma;
struct resource *res;
struct dma_chan *dma_tx;
@@ -407,7 +408,7 @@ static int sh_mobile_i2c_isr_tx(struct sh_mobile_i2c_data *pd)
if (pd->pos == pd->msg->len) {
/* Send stop if we haven't yet (DMA case) */
- if (pd->send_stop && (iic_rd(pd, ICCR) & ICCR_BBSY))
+ if (pd->send_stop && pd->stop_after_dma)
i2c_op(pd, OP_TX_STOP, 0);
return 1;
}
@@ -449,6 +450,13 @@ static int sh_mobile_i2c_isr_rx(struct sh_mobile_i2c_data *pd)
real_pos = pd->pos - 2;
if (pd->pos == pd->msg->len) {
+ if (pd->stop_after_dma) {
+ /* Simulate PIO end condition after DMA transfer */
+ i2c_op(pd, OP_RX_STOP, 0);
+ pd->pos++;
+ break;
+ }
+
if (real_pos < 0) {
i2c_op(pd, OP_RX_STOP, 0);
break;
@@ -536,6 +544,7 @@ static void sh_mobile_i2c_dma_callback(void *data)
sh_mobile_i2c_dma_unmap(pd);
pd->pos = pd->msg->len;
+ pd->stop_after_dma = true;
iic_set_clr(pd, ICIC, 0, ICIC_TDMAE | ICIC_RDMAE);
}
@@ -726,6 +735,7 @@ static int sh_mobile_i2c_xfer(struct i2c_adapter *adapter,
bool do_start = pd->send_stop || !i;
msg = &msgs[i];
pd->send_stop = i == num - 1 || msg->flags & I2C_M_STOP;
+ pd->stop_after_dma = false;
err = start_ch(pd, msg, do_start);
if (err)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 39d25a8cb1ad..e9eae57a2b50 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -2972,6 +2972,7 @@ trace:
}
EXPORT_SYMBOL(i2c_smbus_xfer);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
{
int ret;
@@ -3019,6 +3020,7 @@ int i2c_slave_unregister(struct i2c_client *client)
return ret;
}
EXPORT_SYMBOL_GPL(i2c_slave_unregister);
+#endif
MODULE_AUTHOR("Simon G. Vogl <simon@tk.uni-linz.ac.at>");
MODULE_DESCRIPTION("I2C-Bus main module");
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 6631400b5f02..cf9b09db092f 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -74,7 +74,7 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
struct eeprom_data *eeprom;
unsigned long flags;
- if (off + count >= attr->size)
+ if (off + count > attr->size)
return -EFBIG;
eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
@@ -92,7 +92,7 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
struct eeprom_data *eeprom;
unsigned long flags;
- if (off + count >= attr->size)
+ if (off + count > attr->size)
return -EFBIG;
eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index e37412da15f5..b99de00e57b8 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -143,9 +143,15 @@ static int ad799x_write_config(struct ad799x_state *st, u16 val)
case ad7998:
return i2c_smbus_write_word_swapped(st->client, AD7998_CONF_REG,
val);
- default:
+ case ad7992:
+ case ad7993:
+ case ad7994:
return i2c_smbus_write_byte_data(st->client, AD7998_CONF_REG,
val);
+ default:
+ /* Will be written when doing a conversion */
+ st->config = val;
+ return 0;
}
}
@@ -155,8 +161,13 @@ static int ad799x_read_config(struct ad799x_state *st)
case ad7997:
case ad7998:
return i2c_smbus_read_word_swapped(st->client, AD7998_CONF_REG);
- default:
+ case ad7992:
+ case ad7993:
+ case ad7994:
return i2c_smbus_read_byte_data(st->client, AD7998_CONF_REG);
+ default:
+ /* No readback support */
+ return st->config;
}
}
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 866fe904cba2..90c8cb727cc7 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -449,6 +449,9 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
if (val2 == NULL)
val2 = &unused;
+ if(!iio_channel_has_info(chan->channel, info))
+ return -EINVAL;
+
if (chan->indio_dev->info->read_raw_multi) {
ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev,
chan->channel, INDIO_MAX_RAW_ELEMENTS,
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index 57ecc5b204f3..9117b7a2d5f8 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1114,7 +1114,8 @@ static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_
struct mlx4_dev *dev = to_mdev(qp->device)->dev;
int err = 0;
- if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
+ dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
return 0; /* do nothing */
ib_flow = flow_attr + 1;
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 8afa28e4570e..18d4b2c8fe55 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -28,6 +28,13 @@
#include <linux/cdev.h>
#include "input-compat.h"
+enum evdev_clock_type {
+ EV_CLK_REAL = 0,
+ EV_CLK_MONO,
+ EV_CLK_BOOT,
+ EV_CLK_MAX
+};
+
struct evdev {
int open;
struct input_handle handle;
@@ -49,12 +56,32 @@ struct evdev_client {
struct fasync_struct *fasync;
struct evdev *evdev;
struct list_head node;
- int clkid;
+ int clk_type;
bool revoked;
unsigned int bufsize;
struct input_event buffer[];
};
+static int evdev_set_clk_type(struct evdev_client *client, unsigned int clkid)
+{
+ switch (clkid) {
+
+ case CLOCK_REALTIME:
+ client->clk_type = EV_CLK_REAL;
+ break;
+ case CLOCK_MONOTONIC:
+ client->clk_type = EV_CLK_MONO;
+ break;
+ case CLOCK_BOOTTIME:
+ client->clk_type = EV_CLK_BOOT;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/* flush queued events of type @type, caller must hold client->buffer_lock */
static void __evdev_flush_queue(struct evdev_client *client, unsigned int type)
{
@@ -108,8 +135,11 @@ static void evdev_queue_syn_dropped(struct evdev_client *client)
struct input_event ev;
ktime_t time;
- time = (client->clkid == CLOCK_MONOTONIC) ?
- ktime_get() : ktime_get_real();
+ time = client->clk_type == EV_CLK_REAL ?
+ ktime_get_real() :
+ client->clk_type == EV_CLK_MONO ?
+ ktime_get() :
+ ktime_get_boottime();
ev.time = ktime_to_timeval(time);
ev.type = EV_SYN;
@@ -159,7 +189,7 @@ static void __pass_event(struct evdev_client *client,
static void evdev_pass_values(struct evdev_client *client,
const struct input_value *vals, unsigned int count,
- ktime_t mono, ktime_t real)
+ ktime_t *ev_time)
{
struct evdev *evdev = client->evdev;
const struct input_value *v;
@@ -169,8 +199,7 @@ static void evdev_pass_values(struct evdev_client *client,
if (client->revoked)
return;
- event.time = ktime_to_timeval(client->clkid == CLOCK_MONOTONIC ?
- mono : real);
+ event.time = ktime_to_timeval(ev_time[client->clk_type]);
/* Interrupts are disabled, just acquire the lock. */
spin_lock(&client->buffer_lock);
@@ -198,21 +227,22 @@ static void evdev_events(struct input_handle *handle,
{
struct evdev *evdev = handle->private;
struct evdev_client *client;
- ktime_t time_mono, time_real;
+ ktime_t ev_time[EV_CLK_MAX];
- time_mono = ktime_get();
- time_real = ktime_mono_to_real(time_mono);
+ ev_time[EV_CLK_MONO] = ktime_get();
+ ev_time[EV_CLK_REAL] = ktime_mono_to_real(ev_time[EV_CLK_MONO]);
+ ev_time[EV_CLK_BOOT] = ktime_mono_to_any(ev_time[EV_CLK_MONO],
+ TK_OFFS_BOOT);
rcu_read_lock();
client = rcu_dereference(evdev->grab);
if (client)
- evdev_pass_values(client, vals, count, time_mono, time_real);
+ evdev_pass_values(client, vals, count, ev_time);
else
list_for_each_entry_rcu(client, &evdev->client_list, node)
- evdev_pass_values(client, vals, count,
- time_mono, time_real);
+ evdev_pass_values(client, vals, count, ev_time);
rcu_read_unlock();
}
@@ -877,10 +907,8 @@ static long evdev_do_ioctl(struct file *file, unsigned int cmd,
case EVIOCSCLOCKID:
if (copy_from_user(&i, p, sizeof(unsigned int)))
return -EFAULT;
- if (i != CLOCK_MONOTONIC && i != CLOCK_REALTIME)
- return -EINVAL;
- client->clkid = i;
- return 0;
+
+ return evdev_set_clk_type(client, i);
case EVIOCGKEYCODE:
return evdev_handle_get_keycode(dev, p);
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 04217c2e345c..213e3a1903ee 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1974,18 +1974,22 @@ static unsigned int input_estimate_events_per_packet(struct input_dev *dev)
events = mt_slots + 1; /* count SYN_MT_REPORT and SYN_REPORT */
- for (i = 0; i < ABS_CNT; i++) {
- if (test_bit(i, dev->absbit)) {
- if (input_is_mt_axis(i))
- events += mt_slots;
- else
- events++;
+ if (test_bit(EV_ABS, dev->evbit)) {
+ for (i = 0; i < ABS_CNT; i++) {
+ if (test_bit(i, dev->absbit)) {
+ if (input_is_mt_axis(i))
+ events += mt_slots;
+ else
+ events++;
+ }
}
}
- for (i = 0; i < REL_CNT; i++)
- if (test_bit(i, dev->relbit))
- events++;
+ if (test_bit(EV_REL, dev->evbit)) {
+ for (i = 0; i < REL_CNT; i++)
+ if (test_bit(i, dev->relbit))
+ events++;
+ }
/* Make room for KEY and MSC events */
events += 7;
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig
index 96ee26c555e0..a5d9b3f3c871 100644
--- a/drivers/input/keyboard/Kconfig
+++ b/drivers/input/keyboard/Kconfig
@@ -559,6 +559,7 @@ config KEYBOARD_SH_KEYSC
config KEYBOARD_STMPE
tristate "STMPE keypad support"
depends on MFD_STMPE
+ depends on OF
select INPUT_MATRIXKMAP
help
Say Y here if you want to use the keypad controller on STMPE I/O
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index d4dd78a7d56b..883d6aed5b9a 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -35,9 +35,13 @@
struct gpio_button_data {
const struct gpio_keys_button *button;
struct input_dev *input;
- struct timer_list timer;
- struct work_struct work;
- unsigned int timer_debounce; /* in msecs */
+
+ struct timer_list release_timer;
+ unsigned int release_delay; /* in msecs, for IRQ-only buttons */
+
+ struct delayed_work work;
+ unsigned int software_debounce; /* in msecs, for GPIO-driven buttons */
+
unsigned int irq;
spinlock_t lock;
bool disabled;
@@ -116,11 +120,14 @@ static void gpio_keys_disable_button(struct gpio_button_data *bdata)
{
if (!bdata->disabled) {
/*
- * Disable IRQ and possible debouncing timer.
+ * Disable IRQ and associated timer/work structure.
*/
disable_irq(bdata->irq);
- if (bdata->timer_debounce)
- del_timer_sync(&bdata->timer);
+
+ if (gpio_is_valid(bdata->button->gpio))
+ cancel_delayed_work_sync(&bdata->work);
+ else
+ del_timer_sync(&bdata->release_timer);
bdata->disabled = true;
}
@@ -343,7 +350,7 @@ static void gpio_keys_gpio_report_event(struct gpio_button_data *bdata)
static void gpio_keys_gpio_work_func(struct work_struct *work)
{
struct gpio_button_data *bdata =
- container_of(work, struct gpio_button_data, work);
+ container_of(work, struct gpio_button_data, work.work);
gpio_keys_gpio_report_event(bdata);
@@ -351,13 +358,6 @@ static void gpio_keys_gpio_work_func(struct work_struct *work)
pm_relax(bdata->input->dev.parent);
}
-static void gpio_keys_gpio_timer(unsigned long _data)
-{
- struct gpio_button_data *bdata = (struct gpio_button_data *)_data;
-
- schedule_work(&bdata->work);
-}
-
static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
{
struct gpio_button_data *bdata = dev_id;
@@ -366,11 +366,10 @@ static irqreturn_t gpio_keys_gpio_isr(int irq, void *dev_id)
if (bdata->button->wakeup)
pm_stay_awake(bdata->input->dev.parent);
- if (bdata->timer_debounce)
- mod_timer(&bdata->timer,
- jiffies + msecs_to_jiffies(bdata->timer_debounce));
- else
- schedule_work(&bdata->work);
+
+ mod_delayed_work(system_wq,
+ &bdata->work,
+ msecs_to_jiffies(bdata->software_debounce));
return IRQ_HANDLED;
}
@@ -408,7 +407,7 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
input_event(input, EV_KEY, button->code, 1);
input_sync(input);
- if (!bdata->timer_debounce) {
+ if (!bdata->release_delay) {
input_event(input, EV_KEY, button->code, 0);
input_sync(input);
goto out;
@@ -417,9 +416,9 @@ static irqreturn_t gpio_keys_irq_isr(int irq, void *dev_id)
bdata->key_pressed = true;
}
- if (bdata->timer_debounce)
- mod_timer(&bdata->timer,
- jiffies + msecs_to_jiffies(bdata->timer_debounce));
+ if (bdata->release_delay)
+ mod_timer(&bdata->release_timer,
+ jiffies + msecs_to_jiffies(bdata->release_delay));
out:
spin_unlock_irqrestore(&bdata->lock, flags);
return IRQ_HANDLED;
@@ -429,10 +428,10 @@ static void gpio_keys_quiesce_key(void *data)
{
struct gpio_button_data *bdata = data;
- if (bdata->timer_debounce)
- del_timer_sync(&bdata->timer);
-
- cancel_work_sync(&bdata->work);
+ if (gpio_is_valid(bdata->button->gpio))
+ cancel_delayed_work_sync(&bdata->work);
+ else
+ del_timer_sync(&bdata->release_timer);
}
static int gpio_keys_setup_key(struct platform_device *pdev,
@@ -466,23 +465,25 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
button->debounce_interval * 1000);
/* use timer if gpiolib doesn't provide debounce */
if (error < 0)
- bdata->timer_debounce =
+ bdata->software_debounce =
button->debounce_interval;
}
- irq = gpio_to_irq(button->gpio);
- if (irq < 0) {
- error = irq;
- dev_err(dev,
- "Unable to get irq number for GPIO %d, error %d\n",
- button->gpio, error);
- return error;
+ if (button->irq) {
+ bdata->irq = button->irq;
+ } else {
+ irq = gpio_to_irq(button->gpio);
+ if (irq < 0) {
+ error = irq;
+ dev_err(dev,
+ "Unable to get irq number for GPIO %d, error %d\n",
+ button->gpio, error);
+ return error;
+ }
+ bdata->irq = irq;
}
- bdata->irq = irq;
- INIT_WORK(&bdata->work, gpio_keys_gpio_work_func);
- setup_timer(&bdata->timer,
- gpio_keys_gpio_timer, (unsigned long)bdata);
+ INIT_DELAYED_WORK(&bdata->work, gpio_keys_gpio_work_func);
isr = gpio_keys_gpio_isr;
irqflags = IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING;
@@ -499,8 +500,8 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
return -EINVAL;
}
- bdata->timer_debounce = button->debounce_interval;
- setup_timer(&bdata->timer,
+ bdata->release_delay = button->debounce_interval;
+ setup_timer(&bdata->release_timer,
gpio_keys_irq_timer, (unsigned long)bdata);
isr = gpio_keys_irq_isr;
@@ -510,7 +511,7 @@ static int gpio_keys_setup_key(struct platform_device *pdev,
input_set_capability(input, button->type ?: EV_KEY, button->code);
/*
- * Install custom action to cancel debounce timer and
+ * Install custom action to cancel release timer and
* workqueue item.
*/
error = devm_add_action(&pdev->dev, gpio_keys_quiesce_key, bdata);
@@ -618,33 +619,30 @@ gpio_keys_get_devtree_pdata(struct device *dev)
i = 0;
for_each_child_of_node(node, pp) {
- int gpio = -1;
enum of_gpio_flags flags;
button = &pdata->buttons[i++];
- if (!of_find_property(pp, "gpios", NULL)) {
- button->irq = irq_of_parse_and_map(pp, 0);
- if (button->irq == 0) {
- i--;
- pdata->nbuttons--;
- dev_warn(dev, "Found button without gpios or irqs\n");
- continue;
- }
- } else {
- gpio = of_get_gpio_flags(pp, 0, &flags);
- if (gpio < 0) {
- error = gpio;
+ button->gpio = of_get_gpio_flags(pp, 0, &flags);
+ if (button->gpio < 0) {
+ error = button->gpio;
+ if (error != -ENOENT) {
if (error != -EPROBE_DEFER)
dev_err(dev,
"Failed to get gpio flags, error: %d\n",
error);
return ERR_PTR(error);
}
+ } else {
+ button->active_low = flags & OF_GPIO_ACTIVE_LOW;
}
- button->gpio = gpio;
- button->active_low = flags & OF_GPIO_ACTIVE_LOW;
+ button->irq = irq_of_parse_and_map(pp, 0);
+
+ if (!gpio_is_valid(button->gpio) && !button->irq) {
+ dev_err(dev, "Found button without gpios or irqs\n");
+ return ERR_PTR(-EINVAL);
+ }
if (of_property_read_u32(pp, "linux,code", &button->code)) {
dev_err(dev, "Button without keycode: 0x%x\n",
@@ -659,6 +657,8 @@ gpio_keys_get_devtree_pdata(struct device *dev)
button->wakeup = !!of_get_property(pp, "gpio-key,wakeup", NULL);
+ button->can_disable = !!of_get_property(pp, "linux,can-disable", NULL);
+
if (of_property_read_u32(pp, "debounce-interval",
&button->debounce_interval))
button->debounce_interval = 5;
diff --git a/drivers/input/keyboard/hil_kbd.c b/drivers/input/keyboard/hil_kbd.c
index 610a8af795a1..5b152f25a8e1 100644
--- a/drivers/input/keyboard/hil_kbd.c
+++ b/drivers/input/keyboard/hil_kbd.c
@@ -473,7 +473,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
if (error)
goto bail1;
- init_completion(&dev->cmd_done);
+ reinit_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
@@ -482,7 +482,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
if (error)
goto bail1;
- init_completion(&dev->cmd_done);
+ reinit_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
@@ -491,7 +491,7 @@ static int hil_dev_connect(struct serio *serio, struct serio_driver *drv)
if (error)
goto bail1;
- init_completion(&dev->cmd_done);
+ reinit_completion(&dev->cmd_done);
serio_write(serio, 0);
serio_write(serio, 0);
serio_write(serio, HIL_PKT_CMD >> 8);
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index ef5e67fb567e..fe6e3f22eed7 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -45,13 +45,14 @@
#define STMPE_KEYPAD_MAX_ROWS 8
#define STMPE_KEYPAD_MAX_COLS 8
#define STMPE_KEYPAD_ROW_SHIFT 3
-#define STMPE_KEYPAD_KEYMAP_SIZE \
+#define STMPE_KEYPAD_KEYMAP_MAX_SIZE \
(STMPE_KEYPAD_MAX_ROWS * STMPE_KEYPAD_MAX_COLS)
/**
* struct stmpe_keypad_variant - model-specific attributes
* @auto_increment: whether the KPC_DATA_BYTE register address
* auto-increments on multiple read
+ * @set_pullup: whether the pins need to have their pull-ups set
* @num_data: number of data bytes
* @num_normal_data: number of normal keys' data bytes
* @max_cols: maximum number of columns supported
@@ -61,6 +62,7 @@
*/
struct stmpe_keypad_variant {
bool auto_increment;
+ bool set_pullup;
int num_data;
int num_normal_data;
int max_cols;
@@ -81,6 +83,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
},
[STMPE2401] = {
.auto_increment = false,
+ .set_pullup = true,
.num_data = 3,
.num_normal_data = 2,
.max_cols = 8,
@@ -90,6 +93,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
},
[STMPE2403] = {
.auto_increment = true,
+ .set_pullup = true,
.num_data = 5,
.num_normal_data = 3,
.max_cols = 8,
@@ -99,16 +103,30 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
},
};
+/**
+ * struct stmpe_keypad - STMPE keypad state container
+ * @stmpe: pointer to parent STMPE device
+ * @input: spawned input device
+ * @variant: STMPE variant
+ * @debounce_ms: debounce interval, in ms. Maximum is
+ * %STMPE_KEYPAD_MAX_DEBOUNCE.
+ * @scan_count: number of key scanning cycles to confirm key data.
+ * Maximum is %STMPE_KEYPAD_MAX_SCAN_COUNT.
+ * @no_autorepeat: disable key autorepeat
+ * @rows: bitmask for the rows
+ * @cols: bitmask for the columns
+ * @keymap: the keymap
+ */
struct stmpe_keypad {
struct stmpe *stmpe;
struct input_dev *input;
const struct stmpe_keypad_variant *variant;
- const struct stmpe_keypad_platform_data *plat;
-
+ unsigned int debounce_ms;
+ unsigned int scan_count;
+ bool no_autorepeat;
unsigned int rows;
unsigned int cols;
-
- unsigned short keymap[STMPE_KEYPAD_KEYMAP_SIZE];
+ unsigned short keymap[STMPE_KEYPAD_KEYMAP_MAX_SIZE];
};
static int stmpe_keypad_read_data(struct stmpe_keypad *keypad, u8 *data)
@@ -171,7 +189,10 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
unsigned int col_gpios = variant->col_gpios;
unsigned int row_gpios = variant->row_gpios;
struct stmpe *stmpe = keypad->stmpe;
+ u8 pureg = stmpe->regs[STMPE_IDX_GPPUR_LSB];
unsigned int pins = 0;
+ unsigned int pu_pins = 0;
+ int ret;
int i;
/*
@@ -188,8 +209,10 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
for (i = 0; i < variant->max_cols; i++) {
int num = __ffs(col_gpios);
- if (keypad->cols & (1 << i))
+ if (keypad->cols & (1 << i)) {
pins |= 1 << num;
+ pu_pins |= 1 << num;
+ }
col_gpios &= ~(1 << num);
}
@@ -203,20 +226,43 @@ static int stmpe_keypad_altfunc_init(struct stmpe_keypad *keypad)
row_gpios &= ~(1 << num);
}
- return stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
+ ret = stmpe_set_altfunc(stmpe, pins, STMPE_BLOCK_KEYPAD);
+ if (ret)
+ return ret;
+
+ /*
+ * On STMPE24xx, set pin bias to pull-up on all keypad input
+ * pins (columns), this incidentally happen to be maximum 8 pins
+ * and placed at GPIO0-7 so only the LSB of the pull up register
+ * ever needs to be written.
+ */
+ if (variant->set_pullup) {
+ u8 val;
+
+ ret = stmpe_reg_read(stmpe, pureg);
+ if (ret)
+ return ret;
+
+ /* Do not touch unused pins, may be used for GPIO */
+ val = ret & ~pu_pins;
+ val |= pu_pins;
+
+ ret = stmpe_reg_write(stmpe, pureg, val);
+ }
+
+ return 0;
}
static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
{
- const struct stmpe_keypad_platform_data *plat = keypad->plat;
const struct stmpe_keypad_variant *variant = keypad->variant;
struct stmpe *stmpe = keypad->stmpe;
int ret;
- if (plat->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE)
+ if (keypad->debounce_ms > STMPE_KEYPAD_MAX_DEBOUNCE)
return -EINVAL;
- if (plat->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT)
+ if (keypad->scan_count > STMPE_KEYPAD_MAX_SCAN_COUNT)
return -EINVAL;
ret = stmpe_enable(stmpe, STMPE_BLOCK_KEYPAD);
@@ -245,7 +291,7 @@ static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
ret = stmpe_set_bits(stmpe, STMPE_KPC_CTRL_MSB,
STMPE_KPC_CTRL_MSB_SCAN_COUNT,
- plat->scan_count << 4);
+ keypad->scan_count << 4);
if (ret < 0)
return ret;
@@ -253,17 +299,18 @@ static int stmpe_keypad_chip_init(struct stmpe_keypad *keypad)
STMPE_KPC_CTRL_LSB_SCAN |
STMPE_KPC_CTRL_LSB_DEBOUNCE,
STMPE_KPC_CTRL_LSB_SCAN |
- (plat->debounce_ms << 1));
+ (keypad->debounce_ms << 1));
}
-static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
+static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad,
+ u32 used_rows, u32 used_cols)
{
int row, col;
- for (row = 0; row < STMPE_KEYPAD_MAX_ROWS; row++) {
- for (col = 0; col < STMPE_KEYPAD_MAX_COLS; col++) {
+ for (row = 0; row < used_rows; row++) {
+ for (col = 0; col < used_cols; col++) {
int code = MATRIX_SCAN_CODE(row, col,
- STMPE_KEYPAD_ROW_SHIFT);
+ STMPE_KEYPAD_ROW_SHIFT);
if (keypad->keymap[code] != KEY_RESERVED) {
keypad->rows |= 1 << row;
keypad->cols |= 1 << col;
@@ -272,51 +319,17 @@ static void stmpe_keypad_fill_used_pins(struct stmpe_keypad *keypad)
}
}
-#ifdef CONFIG_OF
-static const struct stmpe_keypad_platform_data *
-stmpe_keypad_of_probe(struct device *dev)
-{
- struct device_node *np = dev->of_node;
- struct stmpe_keypad_platform_data *plat;
-
- if (!np)
- return ERR_PTR(-ENODEV);
-
- plat = devm_kzalloc(dev, sizeof(*plat), GFP_KERNEL);
- if (!plat)
- return ERR_PTR(-ENOMEM);
-
- of_property_read_u32(np, "debounce-interval", &plat->debounce_ms);
- of_property_read_u32(np, "st,scan-count", &plat->scan_count);
-
- plat->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
-
- return plat;
-}
-#else
-static inline const struct stmpe_keypad_platform_data *
-stmpe_keypad_of_probe(struct device *dev)
-{
- return ERR_PTR(-EINVAL);
-}
-#endif
-
static int stmpe_keypad_probe(struct platform_device *pdev)
{
struct stmpe *stmpe = dev_get_drvdata(pdev->dev.parent);
- const struct stmpe_keypad_platform_data *plat;
+ struct device_node *np = pdev->dev.of_node;
struct stmpe_keypad *keypad;
struct input_dev *input;
+ u32 rows;
+ u32 cols;
int error;
int irq;
- plat = stmpe->pdata->keypad;
- if (!plat) {
- plat = stmpe_keypad_of_probe(&pdev->dev);
- if (IS_ERR(plat))
- return PTR_ERR(plat);
- }
-
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
@@ -326,6 +339,13 @@ static int stmpe_keypad_probe(struct platform_device *pdev)
if (!keypad)
return -ENOMEM;
+ keypad->stmpe = stmpe;
+ keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
+
+ of_property_read_u32(np, "debounce-interval", &keypad->debounce_ms);
+ of_property_read_u32(np, "st,scan-count", &keypad->scan_count);
+ keypad->no_autorepeat = of_property_read_bool(np, "st,no-autorepeat");
+
input = devm_input_allocate_device(&pdev->dev);
if (!input)
return -ENOMEM;
@@ -334,23 +354,22 @@ static int stmpe_keypad_probe(struct platform_device *pdev)
input->id.bustype = BUS_I2C;
input->dev.parent = &pdev->dev;
- error = matrix_keypad_build_keymap(plat->keymap_data, NULL,
- STMPE_KEYPAD_MAX_ROWS,
- STMPE_KEYPAD_MAX_COLS,
+ error = matrix_keypad_parse_of_params(&pdev->dev, &rows, &cols);
+ if (error)
+ return error;
+
+ error = matrix_keypad_build_keymap(NULL, NULL, rows, cols,
keypad->keymap, input);
if (error)
return error;
input_set_capability(input, EV_MSC, MSC_SCAN);
- if (!plat->no_autorepeat)
+ if (!keypad->no_autorepeat)
__set_bit(EV_REP, input->evbit);
- stmpe_keypad_fill_used_pins(keypad);
+ stmpe_keypad_fill_used_pins(keypad, rows, cols);
- keypad->stmpe = stmpe;
- keypad->plat = plat;
keypad->input = input;
- keypad->variant = &stmpe_keypad_variants[stmpe->partnum];
error = stmpe_keypad_chip_init(keypad);
if (error < 0)
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index d125a019383f..d88d73d83552 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -881,6 +881,34 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
unsigned char *pkt,
unsigned char pkt_id)
{
+ /*
+ * packet-fmt b7 b6 b5 b4 b3 b2 b1 b0
+ * Byte0 TWO & MULTI L 1 R M 1 Y0-2 Y0-1 Y0-0
+ * Byte0 NEW L 1 X1-5 1 1 Y0-2 Y0-1 Y0-0
+ * Byte1 Y0-10 Y0-9 Y0-8 Y0-7 Y0-6 Y0-5 Y0-4 Y0-3
+ * Byte2 X0-11 1 X0-10 X0-9 X0-8 X0-7 X0-6 X0-5
+ * Byte3 X1-11 1 X0-4 X0-3 1 X0-2 X0-1 X0-0
+ * Byte4 TWO X1-10 TWO X1-9 X1-8 X1-7 X1-6 X1-5 X1-4
+ * Byte4 MULTI X1-10 TWO X1-9 X1-8 X1-7 X1-6 Y1-5 1
+ * Byte4 NEW X1-10 TWO X1-9 X1-8 X1-7 X1-6 0 0
+ * Byte5 TWO & NEW Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 Y1-5 Y1-4
+ * Byte5 MULTI Y1-10 0 Y1-9 Y1-8 Y1-7 Y1-6 F-1 F-0
+ * L: Left button
+ * R / M: Non-clickpads: Right / Middle button
+ * Clickpads: When > 2 fingers are down, and some fingers
+ * are in the button area, then the 2 coordinates reported
+ * are for fingers outside the button area and these report
+ * extra fingers being present in the right / left button
+ * area. Note these fingers are not added to the F field!
+ * so if a TWO packet is received and R = 1 then there are
+ * 3 fingers down, etc.
+ * TWO: 1: Two touches present, byte 0/4/5 are in TWO fmt
+ * 0: If byte 4 bit 0 is 1, then byte 0/4/5 are in MULTI fmt
+ * otherwise byte 0 bit 4 must be set and byte 0/4/5 are
+ * in NEW fmt
+ * F: Number of fingers - 3, 0 means 3 fingers, 1 means 4 ...
+ */
+
mt[0].x = ((pkt[2] & 0x80) << 4);
mt[0].x |= ((pkt[2] & 0x3F) << 5);
mt[0].x |= ((pkt[3] & 0x30) >> 1);
@@ -919,18 +947,21 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
static int alps_get_mt_count(struct input_mt_pos *mt)
{
- int i;
+ int i, fingers = 0;
- for (i = 0; i < MAX_TOUCHES && mt[i].x != 0 && mt[i].y != 0; i++)
- /* empty */;
+ for (i = 0; i < MAX_TOUCHES; i++) {
+ if (mt[i].x != 0 || mt[i].y != 0)
+ fingers++;
+ }
- return i;
+ return fingers;
}
static int alps_decode_packet_v7(struct alps_fields *f,
unsigned char *p,
struct psmouse *psmouse)
{
+ struct alps_data *priv = psmouse->private;
unsigned char pkt_id;
pkt_id = alps_get_packet_id_v7(p);
@@ -938,19 +969,52 @@ static int alps_decode_packet_v7(struct alps_fields *f,
return 0;
if (pkt_id == V7_PACKET_ID_UNKNOWN)
return -1;
+ /*
+ * NEW packets are send to indicate a discontinuity in the finger
+ * coordinate reporting. Specifically a finger may have moved from
+ * slot 0 to 1 or vice versa. INPUT_MT_TRACK takes care of this for
+ * us.
+ *
+ * NEW packets have 3 problems:
+ * 1) They do not contain middle / right button info (on non clickpads)
+ * this can be worked around by preserving the old button state
+ * 2) They do not contain an accurate fingercount, and they are
+ * typically send when the number of fingers changes. We cannot use
+ * the old finger count as that may mismatch with the amount of
+ * touch coordinates we've available in the NEW packet
+ * 3) Their x data for the second touch is inaccurate leading to
+ * a possible jump of the x coordinate by 16 units when the first
+ * non NEW packet comes in
+ * Since problems 2 & 3 cannot be worked around, just ignore them.
+ */
+ if (pkt_id == V7_PACKET_ID_NEW)
+ return 1;
alps_get_finger_coordinate_v7(f->mt, p, pkt_id);
- if (pkt_id == V7_PACKET_ID_TWO || pkt_id == V7_PACKET_ID_MULTI) {
- f->left = (p[0] & 0x80) >> 7;
+ if (pkt_id == V7_PACKET_ID_TWO)
+ f->fingers = alps_get_mt_count(f->mt);
+ else /* pkt_id == V7_PACKET_ID_MULTI */
+ f->fingers = 3 + (p[5] & 0x03);
+
+ f->left = (p[0] & 0x80) >> 7;
+ if (priv->flags & ALPS_BUTTONPAD) {
+ if (p[0] & 0x20)
+ f->fingers++;
+ if (p[0] & 0x10)
+ f->fingers++;
+ } else {
f->right = (p[0] & 0x20) >> 5;
f->middle = (p[0] & 0x10) >> 4;
}
- if (pkt_id == V7_PACKET_ID_TWO)
- f->fingers = alps_get_mt_count(f->mt);
- else if (pkt_id == V7_PACKET_ID_MULTI)
- f->fingers = 3 + (p[5] & 0x03);
+ /* Sometimes a single touch is reported in mt[1] rather then mt[0] */
+ if (f->fingers == 1 && f->mt[0].x == 0 && f->mt[0].y == 0) {
+ f->mt[0].x = f->mt[1].x;
+ f->mt[0].y = f->mt[1].y;
+ f->mt[1].x = 0;
+ f->mt[1].y = 0;
+ }
return 0;
}
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index f2b978026407..6e22682c8255 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1097,6 +1097,8 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
* Asus UX31 0x361f00 20, 15, 0e clickpad
* Asus UX32VD 0x361f02 00, 15, 0e clickpad
* Avatar AVIU-145A2 0x361f00 ? clickpad
+ * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
+ * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
* Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
* Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
* Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
@@ -1475,6 +1477,20 @@ static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
},
},
+ {
+ /* Fujitsu LIFEBOOK E554 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E554"),
+ },
+ },
+ {
+ /* Fujitsu LIFEBOOK E544 does not work with crc_enabled == 0 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E544"),
+ },
+ },
#endif
{ }
};
@@ -1520,6 +1536,8 @@ static int elantech_set_properties(struct elantech_data *etd)
case 7:
case 8:
case 9:
+ case 10:
+ case 13:
etd->hw_version = 4;
break;
default:
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index f9472920d986..23e26e0768b5 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -135,8 +135,9 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
1232, 5710, 1156, 4696
},
{
- (const char * const []){"LEN0034", "LEN0036", "LEN0039",
- "LEN2002", "LEN2004", NULL},
+ (const char * const []){"LEN0034", "LEN0036", "LEN0037",
+ "LEN0039", "LEN2002", "LEN2004",
+ NULL},
1024, 5112, 2024, 4832
},
{
@@ -165,7 +166,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0034", /* T431s, L440, L540, T540, W540, X1 Carbon 2nd */
"LEN0035", /* X240 */
"LEN0036", /* T440 */
- "LEN0037",
+ "LEN0037", /* X1 Carbon 2nd */
"LEN0038",
"LEN0039", /* T440s */
"LEN0041",
diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c
index 30c8b6998808..354d47ecd66a 100644
--- a/drivers/input/mouse/trackpoint.c
+++ b/drivers/input/mouse/trackpoint.c
@@ -227,6 +227,7 @@ TRACKPOINT_INT_ATTR(thresh, TP_THRESH, TP_DEF_THRESH);
TRACKPOINT_INT_ATTR(upthresh, TP_UP_THRESH, TP_DEF_UP_THRESH);
TRACKPOINT_INT_ATTR(ztime, TP_Z_TIME, TP_DEF_Z_TIME);
TRACKPOINT_INT_ATTR(jenks, TP_JENKS_CURV, TP_DEF_JENKS_CURV);
+TRACKPOINT_INT_ATTR(drift_time, TP_DRIFT_TIME, TP_DEF_DRIFT_TIME);
TRACKPOINT_BIT_ATTR(press_to_select, TP_TOGGLE_PTSON, TP_MASK_PTSON, 0,
TP_DEF_PTSON);
@@ -246,6 +247,7 @@ static struct attribute *trackpoint_attrs[] = {
&psmouse_attr_upthresh.dattr.attr,
&psmouse_attr_ztime.dattr.attr,
&psmouse_attr_jenks.dattr.attr,
+ &psmouse_attr_drift_time.dattr.attr,
&psmouse_attr_press_to_select.dattr.attr,
&psmouse_attr_skipback.dattr.attr,
&psmouse_attr_ext_dev.dattr.attr,
@@ -312,6 +314,7 @@ static int trackpoint_sync(struct psmouse *psmouse, bool in_power_on_state)
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, upthresh);
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, ztime);
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, jenks);
+ TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, drift_time);
/* toggles */
TRACKPOINT_UPDATE(in_power_on_state, psmouse, tp, press_to_select);
@@ -332,6 +335,7 @@ static void trackpoint_defaults(struct trackpoint_data *tp)
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, upthresh);
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, ztime);
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, jenks);
+ TRACKPOINT_SET_POWER_ON_DEFAULT(tp, drift_time);
TRACKPOINT_SET_POWER_ON_DEFAULT(tp, inertia);
/* toggles */
diff --git a/drivers/input/mouse/trackpoint.h b/drivers/input/mouse/trackpoint.h
index ecd0547964a5..5617ed3a7d7a 100644
--- a/drivers/input/mouse/trackpoint.h
+++ b/drivers/input/mouse/trackpoint.h
@@ -70,6 +70,9 @@
#define TP_UP_THRESH 0x5A /* Used to generate a 'click' on Z-axis */
#define TP_Z_TIME 0x5E /* How sharp of a press */
#define TP_JENKS_CURV 0x5D /* Minimum curvature for double click */
+#define TP_DRIFT_TIME 0x5F /* How long a 'hands off' condition */
+ /* must last (x*107ms) for drift */
+ /* correction to occur */
/*
* Toggling Flag bits
@@ -120,6 +123,7 @@
#define TP_DEF_UP_THRESH 0xFF
#define TP_DEF_Z_TIME 0x26
#define TP_DEF_JENKS_CURV 0x87
+#define TP_DEF_DRIFT_TIME 0x05
/* Toggles */
#define TP_DEF_MB 0x00
@@ -137,6 +141,7 @@ struct trackpoint_data
unsigned char draghys, mindrag;
unsigned char thresh, upthresh;
unsigned char ztime, jenks;
+ unsigned char drift_time;
/* toggles */
unsigned char press_to_select;
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index c66d1b53843e..c11556563ef0 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -152,6 +152,14 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
},
},
{
+ /* Medion Akoya E7225 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Medion"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Akoya E7225"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "1.0"),
+ },
+ },
+ {
/* Blue FB5601 */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "blue"),
@@ -415,6 +423,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
},
},
{
+ /* Acer Aspire 7738 */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"),
+ },
+ },
+ {
/* Gericom Bellagio */
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
@@ -745,6 +760,35 @@ static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = {
{ }
};
+/*
+ * Some laptops need keyboard reset before probing for the trackpad to get
+ * it detected, initialised & finally work.
+ */
+static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = {
+ {
+ /* Gigabyte P35 v2 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"),
+ },
+ },
+ {
+ /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X3"),
+ },
+ },
+ {
+ /* Gigabyte P34 - Elantech touchpad */
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P34"),
+ },
+ },
+ { }
+};
+
#endif /* CONFIG_X86 */
#ifdef CONFIG_PNP
@@ -1040,6 +1084,9 @@ static int __init i8042_platform_init(void)
if (dmi_check_system(i8042_dmi_dritek_table))
i8042_dritek = true;
+ if (dmi_check_system(i8042_dmi_kbdreset_table))
+ i8042_kbdreset = true;
+
/*
* A20 was already enabled during early kernel init. But some buggy
* BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 924e4bf357fb..986a71c614b0 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -67,6 +67,10 @@ static bool i8042_notimeout;
module_param_named(notimeout, i8042_notimeout, bool, 0);
MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042");
+static bool i8042_kbdreset;
+module_param_named(kbdreset, i8042_kbdreset, bool, 0);
+MODULE_PARM_DESC(kbdreset, "Reset device connected to KBD port");
+
#ifdef CONFIG_X86
static bool i8042_dritek;
module_param_named(dritek, i8042_dritek, bool, 0);
@@ -790,6 +794,16 @@ static int __init i8042_check_aux(void)
return -1;
/*
+ * Reset keyboard (needed on some laptops to successfully detect
+ * touchpad, e.g., some Gigabyte laptop models with Elantech
+ * touchpads).
+ */
+ if (i8042_kbdreset) {
+ pr_warn("Attempting to reset device connected to KBD port\n");
+ i8042_kbd_write(NULL, (unsigned char) 0xff);
+ }
+
+/*
* Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and
* used it for a PCI card or somethig else.
*/
diff --git a/drivers/input/touchscreen/atmel_mxt_ts.c b/drivers/input/touchscreen/atmel_mxt_ts.c
index bb070206223c..95ee92a91bd2 100644
--- a/drivers/input/touchscreen/atmel_mxt_ts.c
+++ b/drivers/input/touchscreen/atmel_mxt_ts.c
@@ -99,13 +99,9 @@
#define MXT_T6_STATUS_COMSERR (1 << 2)
/* MXT_GEN_POWER_T7 field */
-struct t7_config {
- u8 idle;
- u8 active;
-} __packed;
-
-#define MXT_POWER_CFG_RUN 0
-#define MXT_POWER_CFG_DEEPSLEEP 1
+#define MXT_POWER_IDLEACQINT 0
+#define MXT_POWER_ACTVACQINT 1
+#define MXT_POWER_ACTV2IDLETO 2
/* MXT_GEN_ACQUIRE_T8 field */
#define MXT_ACQUIRE_CHRGTIME 0
@@ -117,6 +113,7 @@ struct t7_config {
#define MXT_ACQUIRE_ATCHCALSTHR 7
/* MXT_TOUCH_MULTI_T9 field */
+#define MXT_TOUCH_CTRL 0
#define MXT_T9_ORIENT 9
#define MXT_T9_RANGE 18
@@ -256,7 +253,6 @@ struct mxt_data {
bool update_input;
u8 last_message_count;
u8 num_touchids;
- struct t7_config t7_cfg;
/* Cached parameters from object table */
u16 T5_address;
@@ -672,6 +668,20 @@ static void mxt_proc_t6_messages(struct mxt_data *data, u8 *msg)
data->t6_status = status;
}
+static int mxt_write_object(struct mxt_data *data,
+ u8 type, u8 offset, u8 val)
+{
+ struct mxt_object *object;
+ u16 reg;
+
+ object = mxt_get_object(data, type);
+ if (!object || offset >= mxt_obj_size(object))
+ return -EINVAL;
+
+ reg = object->start_address;
+ return mxt_write_reg(data->client, reg + offset, val);
+}
+
static void mxt_input_button(struct mxt_data *data, u8 *message)
{
struct input_dev *input = data->input_dev;
@@ -1742,60 +1752,6 @@ err_free_object_table:
return error;
}
-static int mxt_set_t7_power_cfg(struct mxt_data *data, u8 sleep)
-{
- struct device *dev = &data->client->dev;
- int error;
- struct t7_config *new_config;
- struct t7_config deepsleep = { .active = 0, .idle = 0 };
-
- if (sleep == MXT_POWER_CFG_DEEPSLEEP)
- new_config = &deepsleep;
- else
- new_config = &data->t7_cfg;
-
- error = __mxt_write_reg(data->client, data->T7_address,
- sizeof(data->t7_cfg), new_config);
- if (error)
- return error;
-
- dev_dbg(dev, "Set T7 ACTV:%d IDLE:%d\n",
- new_config->active, new_config->idle);
-
- return 0;
-}
-
-static int mxt_init_t7_power_cfg(struct mxt_data *data)
-{
- struct device *dev = &data->client->dev;
- int error;
- bool retry = false;
-
-recheck:
- error = __mxt_read_reg(data->client, data->T7_address,
- sizeof(data->t7_cfg), &data->t7_cfg);
- if (error)
- return error;
-
- if (data->t7_cfg.active == 0 || data->t7_cfg.idle == 0) {
- if (!retry) {
- dev_dbg(dev, "T7 cfg zero, resetting\n");
- mxt_soft_reset(data);
- retry = true;
- goto recheck;
- } else {
- dev_dbg(dev, "T7 cfg zero after reset, overriding\n");
- data->t7_cfg.active = 20;
- data->t7_cfg.idle = 100;
- return mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
- }
- }
-
- dev_dbg(dev, "Initialized power cfg: ACTV %d, IDLE %d\n",
- data->t7_cfg.active, data->t7_cfg.idle);
- return 0;
-}
-
static int mxt_configure_objects(struct mxt_data *data,
const struct firmware *cfg)
{
@@ -1809,12 +1765,6 @@ static int mxt_configure_objects(struct mxt_data *data,
dev_warn(dev, "Error %d updating config\n", error);
}
- error = mxt_init_t7_power_cfg(data);
- if (error) {
- dev_err(dev, "Failed to initialize power cfg\n");
- return error;
- }
-
error = mxt_initialize_t9_input_device(data);
if (error)
return error;
@@ -2093,15 +2043,16 @@ static const struct attribute_group mxt_attr_group = {
static void mxt_start(struct mxt_data *data)
{
- mxt_set_t7_power_cfg(data, MXT_POWER_CFG_RUN);
-
- /* Recalibrate since chip has been in deep sleep */
- mxt_t6_command(data, MXT_COMMAND_CALIBRATE, 1, false);
+ /* Touch enable */
+ mxt_write_object(data,
+ MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0x83);
}
static void mxt_stop(struct mxt_data *data)
{
- mxt_set_t7_power_cfg(data, MXT_POWER_CFG_DEEPSLEEP);
+ /* Touch disable */
+ mxt_write_object(data,
+ MXT_TOUCH_MULTI_T9, MXT_TOUCH_CTRL, 0);
}
static int mxt_input_open(struct input_dev *dev)
@@ -2266,6 +2217,8 @@ static int __maybe_unused mxt_resume(struct device *dev)
struct mxt_data *data = i2c_get_clientdata(client);
struct input_dev *input_dev = data->input_dev;
+ mxt_soft_reset(data);
+
mutex_lock(&input_dev->mutex);
if (input_dev->users)
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 3793fcc7e5db..d4c24fb7704f 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -850,9 +850,11 @@ static int edt_ft5x06_ts_identify(struct i2c_client *client,
}
#define EDT_ATTR_CHECKSET(name, reg) \
+do { \
if (pdata->name >= edt_ft5x06_attr_##name.limit_low && \
pdata->name <= edt_ft5x06_attr_##name.limit_high) \
- edt_ft5x06_register_write(tsdata, reg, pdata->name)
+ edt_ft5x06_register_write(tsdata, reg, pdata->name); \
+} while (0)
#define EDT_GET_PROP(name, reg) { \
u32 val; \
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 1232336b960e..40dfbc0444c0 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -4029,14 +4029,6 @@ static int device_notifier(struct notifier_block *nb,
if (action != BUS_NOTIFY_REMOVED_DEVICE)
return 0;
- /*
- * If the device is still attached to a device driver we can't
- * tear down the domain yet as DMA mappings may still be in use.
- * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that.
- */
- if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL)
- return 0;
-
domain = find_domain(dev);
if (!domain)
return 0;
@@ -4428,6 +4420,10 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
domain_remove_one_dev_info(old_domain, dev);
else
domain_remove_dev_info(old_domain);
+
+ if (!domain_type_is_vm_or_si(old_domain) &&
+ list_empty(&old_domain->devices))
+ domain_exit(old_domain);
}
}
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index 68dfb0fd5ee9..748693192c20 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -558,7 +558,7 @@ static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd,
static u64 ipmmu_page_prot(unsigned int prot, u64 type)
{
- u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
+ u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF
| ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV
| ARM_VMSA_PTE_NS | type;
@@ -568,8 +568,8 @@ static u64 ipmmu_page_prot(unsigned int prot, u64 type)
if (prot & IOMMU_CACHE)
pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT;
- if (prot & IOMMU_EXEC)
- pgprot &= ~ARM_VMSA_PTE_XN;
+ if (prot & IOMMU_NOEXEC)
+ pgprot |= ARM_VMSA_PTE_XN;
else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
/* If no access create a faulting entry to avoid TLB fills. */
pgprot &= ~ARM_VMSA_PTE_PAGE;
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
index b2023af384b9..6a8b1ec4a48a 100644
--- a/drivers/iommu/rockchip-iommu.c
+++ b/drivers/iommu/rockchip-iommu.c
@@ -1009,7 +1009,6 @@ static struct platform_driver rk_iommu_driver = {
.remove = rk_iommu_remove,
.driver = {
.name = "rk_iommu",
- .owner = THIS_MODULE,
.of_match_table = of_match_ptr(rk_iommu_dt_ids),
},
};
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index f722a0c466cf..c48da057dbb1 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -315,6 +315,7 @@ static const struct iommu_ops gart_iommu_ops = {
.attach_dev = gart_iommu_attach_dev,
.detach_dev = gart_iommu_detach_dev,
.map = gart_iommu_map,
+ .map_sg = default_iommu_map_sg,
.unmap = gart_iommu_unmap,
.iova_to_phys = gart_iommu_iova_to_phys,
.pgsize_bitmap = GART_IOMMU_PGSIZES,
@@ -395,7 +396,7 @@ static int tegra_gart_probe(struct platform_device *pdev)
do_gart_setup(gart, NULL);
gart_handle = gart;
- bus_set_iommu(&platform_bus_type, &gart_iommu_ops);
+
return 0;
}
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index d111ac779c40..63cd031b2c28 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -28,7 +28,7 @@
#define AT91_AIC_IRQ_MIN_PRIORITY 0
#define AT91_AIC_IRQ_MAX_PRIORITY 7
-#define AT91_AIC_SRCTYPE GENMASK(7, 6)
+#define AT91_AIC_SRCTYPE GENMASK(6, 5)
#define AT91_AIC_SRCTYPE_LOW (0 << 5)
#define AT91_AIC_SRCTYPE_FALLING (1 << 5)
#define AT91_AIC_SRCTYPE_HIGH (2 << 5)
@@ -74,7 +74,7 @@ int aic_common_set_type(struct irq_data *d, unsigned type, unsigned *val)
return -EINVAL;
}
- *val &= AT91_AIC_SRCTYPE;
+ *val &= ~AT91_AIC_SRCTYPE;
*val |= aic_type;
return 0;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 86e4684adeb1..d8996bdf0f61 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1053,7 +1053,7 @@ static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
* of two entries. No, the architecture doesn't let you
* express an ITT with a single entry.
*/
- nr_ites = max(2, roundup_pow_of_two(nvecs));
+ nr_ites = max(2UL, roundup_pow_of_two(nvecs));
sz = nr_ites * its->ite_size;
sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
itt = kmalloc(sz, GFP_KERNEL);
diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c
index 29b8f21b74d0..6bc2deb73d53 100644
--- a/drivers/irqchip/irq-hip04.c
+++ b/drivers/irqchip/irq-hip04.c
@@ -381,7 +381,7 @@ hip04_of_init(struct device_node *node, struct device_node *parent)
* It will be refined as each CPU probes its ID.
*/
for (i = 0; i < NR_HIP04_CPU_IF; i++)
- hip04_cpu_map[i] = 0xff;
+ hip04_cpu_map[i] = 0xffff;
/*
* Find out how many interrupts are supported.
diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c
index 7e342df6a62f..0b0d2c00a2df 100644
--- a/drivers/irqchip/irq-mtk-sysirq.c
+++ b/drivers/irqchip/irq-mtk-sysirq.c
@@ -137,9 +137,9 @@ static int __init mtk_sysirq_of_init(struct device_node *node,
return -ENOMEM;
chip_data->intpol_base = of_io_request_and_map(node, 0, "intpol");
- if (!chip_data->intpol_base) {
+ if (IS_ERR(chip_data->intpol_base)) {
pr_err("mtk_sysirq: unable to map sysirq register\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(chip_data->intpol_base);
goto out_free;
}
diff --git a/drivers/irqchip/irq-omap-intc.c b/drivers/irqchip/irq-omap-intc.c
index 28718d3e8281..c03f140acbae 100644
--- a/drivers/irqchip/irq-omap-intc.c
+++ b/drivers/irqchip/irq-omap-intc.c
@@ -263,7 +263,7 @@ static int __init omap_init_irq_of(struct device_node *node)
return ret;
}
-static int __init omap_init_irq_legacy(u32 base)
+static int __init omap_init_irq_legacy(u32 base, struct device_node *node)
{
int j, irq_base;
@@ -277,7 +277,7 @@ static int __init omap_init_irq_legacy(u32 base)
irq_base = 0;
}
- domain = irq_domain_add_legacy(NULL, omap_nr_irqs, irq_base, 0,
+ domain = irq_domain_add_legacy(node, omap_nr_irqs, irq_base, 0,
&irq_domain_simple_ops, NULL);
omap_irq_soft_reset();
@@ -301,10 +301,26 @@ static int __init omap_init_irq(u32 base, struct device_node *node)
{
int ret;
- if (node)
+ /*
+ * FIXME legacy OMAP DMA driver sitting under arch/arm/plat-omap/dma.c
+ * depends is still not ready for linear IRQ domains; because of that
+ * we need to temporarily "blacklist" OMAP2 and OMAP3 devices from using
+ * linear IRQ Domain until that driver is finally fixed.
+ */
+ if (of_device_is_compatible(node, "ti,omap2-intc") ||
+ of_device_is_compatible(node, "ti,omap3-intc")) {
+ struct resource res;
+
+ if (of_address_to_resource(node, 0, &res))
+ return -ENOMEM;
+
+ base = res.start;
+ ret = omap_init_irq_legacy(base, node);
+ } else if (node) {
ret = omap_init_irq_of(node);
- else
- ret = omap_init_irq_legacy(base);
+ } else {
+ ret = omap_init_irq_legacy(base, NULL);
+ }
if (ret == 0)
omap_irq_enable_protection();
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index a82e542ffc21..0b380603a578 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -4880,7 +4880,7 @@ static void sig_ind(PLCI *plci)
byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/
byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00";
byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00";
- byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\0x00\0x00\0x00\0x00";
+ byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\x00\x00\x00\x00";
byte force_mt_info = false;
byte dir;
dword d;
diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c
index 26515c27ea8c..25e419752a7b 100644
--- a/drivers/leds/leds-netxbig.c
+++ b/drivers/leds/leds-netxbig.c
@@ -330,18 +330,18 @@ create_netxbig_led(struct platform_device *pdev,
led_dat->sata = 0;
led_dat->cdev.brightness = LED_OFF;
led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME;
- /*
- * If available, expose the SATA activity blink capability through
- * a "sata" sysfs attribute.
- */
- if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
- led_dat->cdev.groups = netxbig_led_groups;
led_dat->mode_addr = template->mode_addr;
led_dat->mode_val = template->mode_val;
led_dat->bright_addr = template->bright_addr;
led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1;
led_dat->timer = pdata->timer;
led_dat->num_timer = pdata->num_timer;
+ /*
+ * If available, expose the SATA activity blink capability through
+ * a "sata" sysfs attribute.
+ */
+ if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE)
+ led_dat->cdev.groups = netxbig_led_groups;
return led_classdev_register(&pdev->dev, &led_dat->cdev);
}
diff --git a/drivers/mcb/mcb-internal.h b/drivers/mcb/mcb-internal.h
index f956ef26c0ce..fb7493dcfb79 100644
--- a/drivers/mcb/mcb-internal.h
+++ b/drivers/mcb/mcb-internal.h
@@ -7,6 +7,7 @@
#define PCI_DEVICE_ID_MEN_CHAMELEON 0x4d45
#define CHAMELEON_FILENAME_LEN 12
#define CHAMELEONV2_MAGIC 0xabce
+#define CHAM_HEADER_SIZE 0x200
enum chameleon_descriptor_type {
CHAMELEON_DTYPE_GENERAL = 0x0,
diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c
index b59181965643..5e1bd5db02c8 100644
--- a/drivers/mcb/mcb-pci.c
+++ b/drivers/mcb/mcb-pci.c
@@ -17,6 +17,7 @@
struct priv {
struct mcb_bus *bus;
+ phys_addr_t mapbase;
void __iomem *base;
};
@@ -31,8 +32,8 @@ static int mcb_pci_get_irq(struct mcb_device *mdev)
static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
+ struct resource *res;
struct priv *priv;
- phys_addr_t mapbase;
int ret;
int num_cells;
unsigned long flags;
@@ -47,19 +48,21 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return -ENODEV;
}
- mapbase = pci_resource_start(pdev, 0);
- if (!mapbase) {
+ priv->mapbase = pci_resource_start(pdev, 0);
+ if (!priv->mapbase) {
dev_err(&pdev->dev, "No PCI resource\n");
goto err_start;
}
- ret = pci_request_region(pdev, 0, KBUILD_MODNAME);
- if (ret) {
- dev_err(&pdev->dev, "Failed to request PCI BARs\n");
+ res = request_mem_region(priv->mapbase, CHAM_HEADER_SIZE,
+ KBUILD_MODNAME);
+ if (IS_ERR(res)) {
+ dev_err(&pdev->dev, "Failed to request PCI memory\n");
+ ret = PTR_ERR(res);
goto err_start;
}
- priv->base = pci_iomap(pdev, 0, 0);
+ priv->base = ioremap(priv->mapbase, CHAM_HEADER_SIZE);
if (!priv->base) {
dev_err(&pdev->dev, "Cannot ioremap\n");
ret = -ENOMEM;
@@ -84,7 +87,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
priv->bus->get_irq = mcb_pci_get_irq;
- ret = chameleon_parse_cells(priv->bus, mapbase, priv->base);
+ ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base);
if (ret < 0)
goto err_drvdata;
num_cells = ret;
@@ -93,8 +96,10 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
mcb_bus_add_devices(priv->bus);
+ return 0;
+
err_drvdata:
- pci_iounmap(pdev, priv->base);
+ iounmap(priv->base);
err_ioremap:
pci_release_region(pdev, 0);
err_start:
@@ -107,6 +112,10 @@ static void mcb_pci_remove(struct pci_dev *pdev)
struct priv *priv = pci_get_drvdata(pdev);
mcb_release_bus(priv->bus);
+
+ iounmap(priv->base);
+ release_region(priv->mapbase, CHAM_HEADER_SIZE);
+ pci_disable_device(pdev);
}
static const struct pci_device_id mcb_pci_tbl[] = {
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 9fc616c2755e..c1c010498a21 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -94,6 +94,9 @@ struct cache_disk_superblock {
} __packed;
struct dm_cache_metadata {
+ atomic_t ref_count;
+ struct list_head list;
+
struct block_device *bdev;
struct dm_block_manager *bm;
struct dm_space_map *metadata_sm;
@@ -669,10 +672,10 @@ static void unpack_value(__le64 value_le, dm_oblock_t *block, unsigned *flags)
/*----------------------------------------------------------------*/
-struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
- sector_t data_block_size,
- bool may_format_device,
- size_t policy_hint_size)
+static struct dm_cache_metadata *metadata_open(struct block_device *bdev,
+ sector_t data_block_size,
+ bool may_format_device,
+ size_t policy_hint_size)
{
int r;
struct dm_cache_metadata *cmd;
@@ -680,9 +683,10 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
DMERR("could not allocate metadata struct");
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
+ atomic_set(&cmd->ref_count, 1);
init_rwsem(&cmd->root_lock);
cmd->bdev = bdev;
cmd->data_block_size = data_block_size;
@@ -705,10 +709,96 @@ struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
return cmd;
}
+/*
+ * We keep a little list of ref counted metadata objects to prevent two
+ * different target instances creating separate bufio instances. This is
+ * an issue if a table is reloaded before the suspend.
+ */
+static DEFINE_MUTEX(table_lock);
+static LIST_HEAD(table);
+
+static struct dm_cache_metadata *lookup(struct block_device *bdev)
+{
+ struct dm_cache_metadata *cmd;
+
+ list_for_each_entry(cmd, &table, list)
+ if (cmd->bdev == bdev) {
+ atomic_inc(&cmd->ref_count);
+ return cmd;
+ }
+
+ return NULL;
+}
+
+static struct dm_cache_metadata *lookup_or_open(struct block_device *bdev,
+ sector_t data_block_size,
+ bool may_format_device,
+ size_t policy_hint_size)
+{
+ struct dm_cache_metadata *cmd, *cmd2;
+
+ mutex_lock(&table_lock);
+ cmd = lookup(bdev);
+ mutex_unlock(&table_lock);
+
+ if (cmd)
+ return cmd;
+
+ cmd = metadata_open(bdev, data_block_size, may_format_device, policy_hint_size);
+ if (!IS_ERR(cmd)) {
+ mutex_lock(&table_lock);
+ cmd2 = lookup(bdev);
+ if (cmd2) {
+ mutex_unlock(&table_lock);
+ __destroy_persistent_data_objects(cmd);
+ kfree(cmd);
+ return cmd2;
+ }
+ list_add(&cmd->list, &table);
+ mutex_unlock(&table_lock);
+ }
+
+ return cmd;
+}
+
+static bool same_params(struct dm_cache_metadata *cmd, sector_t data_block_size)
+{
+ if (cmd->data_block_size != data_block_size) {
+ DMERR("data_block_size (%llu) different from that in metadata (%llu)\n",
+ (unsigned long long) data_block_size,
+ (unsigned long long) cmd->data_block_size);
+ return false;
+ }
+
+ return true;
+}
+
+struct dm_cache_metadata *dm_cache_metadata_open(struct block_device *bdev,
+ sector_t data_block_size,
+ bool may_format_device,
+ size_t policy_hint_size)
+{
+ struct dm_cache_metadata *cmd = lookup_or_open(bdev, data_block_size,
+ may_format_device, policy_hint_size);
+
+ if (!IS_ERR(cmd) && !same_params(cmd, data_block_size)) {
+ dm_cache_metadata_close(cmd);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return cmd;
+}
+
void dm_cache_metadata_close(struct dm_cache_metadata *cmd)
{
- __destroy_persistent_data_objects(cmd);
- kfree(cmd);
+ if (atomic_dec_and_test(&cmd->ref_count)) {
+ mutex_lock(&table_lock);
+ list_del(&cmd->list);
+ mutex_unlock(&table_lock);
+
+ __destroy_persistent_data_objects(cmd);
+ kfree(cmd);
+ }
}
/*
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1e96d7889f51..e1650539cc2f 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -221,7 +221,13 @@ struct cache {
struct list_head need_commit_migrations;
sector_t migration_threshold;
wait_queue_head_t migration_wait;
- atomic_t nr_migrations;
+ atomic_t nr_allocated_migrations;
+
+ /*
+ * The number of in flight migrations that are performing
+ * background io. eg, promotion, writeback.
+ */
+ atomic_t nr_io_migrations;
wait_queue_head_t quiescing_wait;
atomic_t quiescing;
@@ -258,7 +264,6 @@ struct cache {
struct dm_deferred_set *all_io_ds;
mempool_t *migration_pool;
- struct dm_cache_migration *next_migration;
struct dm_cache_policy *policy;
unsigned policy_nr_args;
@@ -350,10 +355,31 @@ static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cel
dm_bio_prison_free_cell(cache->prison, cell);
}
+static struct dm_cache_migration *alloc_migration(struct cache *cache)
+{
+ struct dm_cache_migration *mg;
+
+ mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
+ if (mg) {
+ mg->cache = cache;
+ atomic_inc(&mg->cache->nr_allocated_migrations);
+ }
+
+ return mg;
+}
+
+static void free_migration(struct dm_cache_migration *mg)
+{
+ if (atomic_dec_and_test(&mg->cache->nr_allocated_migrations))
+ wake_up(&mg->cache->migration_wait);
+
+ mempool_free(mg, mg->cache->migration_pool);
+}
+
static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
{
if (!p->mg) {
- p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
+ p->mg = alloc_migration(cache);
if (!p->mg)
return -ENOMEM;
}
@@ -382,7 +408,7 @@ static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
free_prison_cell(cache, p->cell1);
if (p->mg)
- mempool_free(p->mg, cache->migration_pool);
+ free_migration(p->mg);
}
static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
@@ -854,24 +880,14 @@ static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
* Migration covers moving data from the origin device to the cache, or
* vice versa.
*--------------------------------------------------------------*/
-static void free_migration(struct dm_cache_migration *mg)
-{
- mempool_free(mg, mg->cache->migration_pool);
-}
-
-static void inc_nr_migrations(struct cache *cache)
+static void inc_io_migrations(struct cache *cache)
{
- atomic_inc(&cache->nr_migrations);
+ atomic_inc(&cache->nr_io_migrations);
}
-static void dec_nr_migrations(struct cache *cache)
+static void dec_io_migrations(struct cache *cache)
{
- atomic_dec(&cache->nr_migrations);
-
- /*
- * Wake the worker in case we're suspending the target.
- */
- wake_up(&cache->migration_wait);
+ atomic_dec(&cache->nr_io_migrations);
}
static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
@@ -894,11 +910,10 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
wake_worker(cache);
}
-static void cleanup_migration(struct dm_cache_migration *mg)
+static void free_io_migration(struct dm_cache_migration *mg)
{
- struct cache *cache = mg->cache;
+ dec_io_migrations(mg->cache);
free_migration(mg);
- dec_nr_migrations(cache);
}
static void migration_failure(struct dm_cache_migration *mg)
@@ -923,7 +938,7 @@ static void migration_failure(struct dm_cache_migration *mg)
cell_defer(cache, mg->new_ocell, true);
}
- cleanup_migration(mg);
+ free_io_migration(mg);
}
static void migration_success_pre_commit(struct dm_cache_migration *mg)
@@ -934,7 +949,7 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
if (mg->writeback) {
clear_dirty(cache, mg->old_oblock, mg->cblock);
cell_defer(cache, mg->old_ocell, false);
- cleanup_migration(mg);
+ free_io_migration(mg);
return;
} else if (mg->demote) {
@@ -944,14 +959,14 @@ static void migration_success_pre_commit(struct dm_cache_migration *mg)
mg->old_oblock);
if (mg->promote)
cell_defer(cache, mg->new_ocell, true);
- cleanup_migration(mg);
+ free_io_migration(mg);
return;
}
} else {
if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
policy_remove_mapping(cache->policy, mg->new_oblock);
- cleanup_migration(mg);
+ free_io_migration(mg);
return;
}
}
@@ -984,7 +999,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
} else {
if (mg->invalidate)
policy_remove_mapping(cache->policy, mg->old_oblock);
- cleanup_migration(mg);
+ free_io_migration(mg);
}
} else {
@@ -999,7 +1014,7 @@ static void migration_success_post_commit(struct dm_cache_migration *mg)
bio_endio(mg->new_ocell->holder, 0);
cell_defer(cache, mg->new_ocell, false);
}
- cleanup_migration(mg);
+ free_io_migration(mg);
}
}
@@ -1251,7 +1266,7 @@ static void promote(struct cache *cache, struct prealloc *structs,
mg->new_ocell = cell;
mg->start_jiffies = jiffies;
- inc_nr_migrations(cache);
+ inc_io_migrations(cache);
quiesce_migration(mg);
}
@@ -1275,7 +1290,7 @@ static void writeback(struct cache *cache, struct prealloc *structs,
mg->new_ocell = NULL;
mg->start_jiffies = jiffies;
- inc_nr_migrations(cache);
+ inc_io_migrations(cache);
quiesce_migration(mg);
}
@@ -1302,7 +1317,7 @@ static void demote_then_promote(struct cache *cache, struct prealloc *structs,
mg->new_ocell = new_ocell;
mg->start_jiffies = jiffies;
- inc_nr_migrations(cache);
+ inc_io_migrations(cache);
quiesce_migration(mg);
}
@@ -1330,7 +1345,7 @@ static void invalidate(struct cache *cache, struct prealloc *structs,
mg->new_ocell = NULL;
mg->start_jiffies = jiffies;
- inc_nr_migrations(cache);
+ inc_io_migrations(cache);
quiesce_migration(mg);
}
@@ -1412,7 +1427,7 @@ static void process_discard_bio(struct cache *cache, struct prealloc *structs,
static bool spare_migration_bandwidth(struct cache *cache)
{
- sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
+ sector_t current_volume = (atomic_read(&cache->nr_io_migrations) + 1) *
cache->sectors_per_block;
return current_volume < cache->migration_threshold;
}
@@ -1764,7 +1779,7 @@ static void stop_quiescing(struct cache *cache)
static void wait_for_migrations(struct cache *cache)
{
- wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
+ wait_event(cache->migration_wait, !atomic_read(&cache->nr_allocated_migrations));
}
static void stop_worker(struct cache *cache)
@@ -1876,9 +1891,6 @@ static void destroy(struct cache *cache)
{
unsigned i;
- if (cache->next_migration)
- mempool_free(cache->next_migration, cache->migration_pool);
-
if (cache->migration_pool)
mempool_destroy(cache->migration_pool);
@@ -2424,7 +2436,8 @@ static int cache_create(struct cache_args *ca, struct cache **result)
INIT_LIST_HEAD(&cache->quiesced_migrations);
INIT_LIST_HEAD(&cache->completed_migrations);
INIT_LIST_HEAD(&cache->need_commit_migrations);
- atomic_set(&cache->nr_migrations, 0);
+ atomic_set(&cache->nr_allocated_migrations, 0);
+ atomic_set(&cache->nr_io_migrations, 0);
init_waitqueue_head(&cache->migration_wait);
init_waitqueue_head(&cache->quiescing_wait);
@@ -2487,8 +2500,6 @@ static int cache_create(struct cache_args *ca, struct cache **result)
goto bad;
}
- cache->next_migration = NULL;
-
cache->need_tick_bio = true;
cache->sized = false;
cache->invalidate = false;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 8735543eacdb..07705ee181e3 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1127,6 +1127,24 @@ static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
schedule_zero(tc, virt_block, data_dest, cell, bio);
}
+static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
+
+static void check_for_space(struct pool *pool)
+{
+ int r;
+ dm_block_t nr_free;
+
+ if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE)
+ return;
+
+ r = dm_pool_get_free_block_count(pool->pmd, &nr_free);
+ if (r)
+ return;
+
+ if (nr_free)
+ set_pool_mode(pool, PM_WRITE);
+}
+
/*
* A non-zero return indicates read_only or fail_io mode.
* Many callers don't care about the return value.
@@ -1141,6 +1159,8 @@ static int commit(struct pool *pool)
r = dm_pool_commit_metadata(pool->pmd);
if (r)
metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
+ else
+ check_for_space(pool);
return r;
}
@@ -1159,8 +1179,6 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks)
}
}
-static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
-
static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
{
int r;
@@ -2155,7 +2173,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode)
pool->process_cell = process_cell_read_only;
pool->process_discard_cell = process_discard_cell;
pool->process_prepared_mapping = process_prepared_mapping;
- pool->process_prepared_discard = process_prepared_discard_passdown;
+ pool->process_prepared_discard = process_prepared_discard;
if (!pool->pf.error_if_no_space && no_space_timeout)
queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout);
@@ -3367,6 +3385,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
+ if (get_pool_mode(pool) >= PM_READ_ONLY) {
+ DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
+ dm_device_name(pool->pool_md));
+ return -EINVAL;
+ }
+
if (!strcasecmp(argv[0], "create_thin"))
r = process_create_thin_mesg(argc, argv, pool);
@@ -3814,6 +3838,8 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
r = -EINVAL;
goto bad;
}
+ atomic_set(&tc->refcount, 1);
+ init_completion(&tc->can_destroy);
list_add_tail_rcu(&tc->list, &tc->pool->active_thins);
spin_unlock_irqrestore(&tc->pool->lock, flags);
/*
@@ -3826,9 +3852,6 @@ static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
dm_put(pool_md);
- atomic_set(&tc->refcount, 1);
- init_completion(&tc->can_destroy);
-
return 0;
bad:
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 4c06585bf165..2caf5b374649 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -206,6 +206,9 @@ struct mapped_device {
/* zero-length flush that will be cloned and submitted to targets */
struct bio flush_bio;
+ /* the number of internal suspends */
+ unsigned internal_suspend_count;
+
struct dm_stats stats;
};
@@ -899,7 +902,7 @@ static void disable_write_same(struct mapped_device *md)
static void clone_endio(struct bio *bio, int error)
{
- int r = 0;
+ int r = error;
struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
struct dm_io *io = tio->io;
struct mapped_device *md = tio->io->md;
@@ -2928,7 +2931,7 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
{
struct dm_table *map = NULL;
- if (dm_suspended_internally_md(md))
+ if (md->internal_suspend_count++)
return; /* nested internal suspend */
if (dm_suspended_md(md)) {
@@ -2953,7 +2956,9 @@ static void __dm_internal_suspend(struct mapped_device *md, unsigned suspend_fla
static void __dm_internal_resume(struct mapped_device *md)
{
- if (!dm_suspended_internally_md(md))
+ BUG_ON(!md->internal_suspend_count);
+
+ if (--md->internal_suspend_count)
return; /* resume from nested internal suspend */
if (dm_suspended_md(md))
diff --git a/drivers/media/pci/cx23885/cx23885-cards.c b/drivers/media/pci/cx23885/cx23885-cards.c
index db99ca2613ba..06931f6fa26c 100644
--- a/drivers/media/pci/cx23885/cx23885-cards.c
+++ b/drivers/media/pci/cx23885/cx23885-cards.c
@@ -614,7 +614,7 @@ struct cx23885_board cx23885_boards[] = {
.portb = CX23885_MPEG_DVB,
},
[CX23885_BOARD_HAUPPAUGE_HVR4400] = {
- .name = "Hauppauge WinTV-HVR4400",
+ .name = "Hauppauge WinTV-HVR4400/HVR5500",
.porta = CX23885_ANALOG_VIDEO,
.portb = CX23885_MPEG_DVB,
.portc = CX23885_MPEG_DVB,
@@ -622,6 +622,10 @@ struct cx23885_board cx23885_boards[] = {
.tuner_addr = 0x60, /* 0xc0 >> 1 */
.tuner_bus = 1,
},
+ [CX23885_BOARD_HAUPPAUGE_STARBURST] = {
+ .name = "Hauppauge WinTV Starburst",
+ .portb = CX23885_MPEG_DVB,
+ },
[CX23885_BOARD_AVERMEDIA_HC81R] = {
.name = "AVerTV Hybrid Express Slim HC81R",
.tuner_type = TUNER_XC2028,
@@ -936,19 +940,19 @@ struct cx23885_subid cx23885_subids[] = {
}, {
.subvendor = 0x0070,
.subdevice = 0xc108,
- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-4400 (Model 121xxx, Hybrid DVB-T/S2, IR) */
}, {
.subvendor = 0x0070,
.subdevice = 0xc138,
- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */
}, {
.subvendor = 0x0070,
.subdevice = 0xc12a,
- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ .card = CX23885_BOARD_HAUPPAUGE_STARBURST, /* Hauppauge WinTV Starburst (Model 121x00, DVB-S2, IR) */
}, {
.subvendor = 0x0070,
.subdevice = 0xc1f8,
- .card = CX23885_BOARD_HAUPPAUGE_HVR4400,
+ .card = CX23885_BOARD_HAUPPAUGE_HVR4400, /* Hauppauge WinTV HVR-5500 (Model 121xxx, Hybrid DVB-T/C/S2, IR) */
}, {
.subvendor = 0x1461,
.subdevice = 0xd939,
@@ -1545,8 +1549,9 @@ void cx23885_gpio_setup(struct cx23885_dev *dev)
cx_write(GPIO_ISM, 0x00000000);/* INTERRUPTS active low*/
break;
case CX23885_BOARD_HAUPPAUGE_HVR4400:
+ case CX23885_BOARD_HAUPPAUGE_STARBURST:
/* GPIO-8 tda10071 demod reset */
- /* GPIO-9 si2165 demod reset */
+ /* GPIO-9 si2165 demod reset (only HVR4400/HVR5500)*/
/* Put the parts into reset and back */
cx23885_gpio_enable(dev, GPIO_8 | GPIO_9, 1);
@@ -1872,6 +1877,7 @@ void cx23885_card_setup(struct cx23885_dev *dev)
case CX23885_BOARD_HAUPPAUGE_HVR1850:
case CX23885_BOARD_HAUPPAUGE_HVR1290:
case CX23885_BOARD_HAUPPAUGE_HVR4400:
+ case CX23885_BOARD_HAUPPAUGE_STARBURST:
case CX23885_BOARD_HAUPPAUGE_IMPACTVCBE:
if (dev->i2c_bus[0].i2c_rc == 0)
hauppauge_eeprom(dev, eeprom+0xc0);
@@ -1980,6 +1986,11 @@ void cx23885_card_setup(struct cx23885_dev *dev)
ts2->ts_clk_en_val = 0x1; /* Enable TS_CLK */
ts2->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
break;
+ case CX23885_BOARD_HAUPPAUGE_STARBURST:
+ ts1->gen_ctrl_val = 0xc; /* Serial bus + punctured clock */
+ ts1->ts_clk_en_val = 0x1; /* Enable TS_CLK */
+ ts1->src_sel_val = CX23885_SRC_SEL_PARALLEL_MPEG_VIDEO;
+ break;
case CX23885_BOARD_DVBSKY_T9580:
case CX23885_BOARD_DVBSKY_T982:
ts1->gen_ctrl_val = 0x5; /* Parallel */
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 1d9d0f86ca8c..1ad49946d7fa 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -2049,11 +2049,11 @@ static void cx23885_finidev(struct pci_dev *pci_dev)
cx23885_shutdown(dev);
- pci_disable_device(pci_dev);
-
/* unregister stuff */
free_irq(pci_dev->irq, dev);
+ pci_disable_device(pci_dev);
+
cx23885_dev_unregister(dev);
vb2_dma_sg_cleanup_ctx(dev->alloc_ctx);
v4l2_ctrl_handler_free(&dev->ctrl_handler);
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index c47d18270cfc..a9c450d4b54e 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -1710,6 +1710,17 @@ static int dvb_register(struct cx23885_tsport *port)
break;
}
break;
+ case CX23885_BOARD_HAUPPAUGE_STARBURST:
+ i2c_bus = &dev->i2c_bus[0];
+ fe0->dvb.frontend = dvb_attach(tda10071_attach,
+ &hauppauge_tda10071_config,
+ &i2c_bus->i2c_adap);
+ if (fe0->dvb.frontend != NULL) {
+ dvb_attach(a8293_attach, fe0->dvb.frontend,
+ &i2c_bus->i2c_adap,
+ &hauppauge_a8293_config);
+ }
+ break;
case CX23885_BOARD_DVBSKY_T9580:
case CX23885_BOARD_DVBSKY_S950:
i2c_bus = &dev->i2c_bus[0];
diff --git a/drivers/media/pci/cx23885/cx23885.h b/drivers/media/pci/cx23885/cx23885.h
index f55cd12da0fd..36f2f96c40e4 100644
--- a/drivers/media/pci/cx23885/cx23885.h
+++ b/drivers/media/pci/cx23885/cx23885.h
@@ -99,6 +99,7 @@
#define CX23885_BOARD_DVBSKY_S950 49
#define CX23885_BOARD_DVBSKY_S952 50
#define CX23885_BOARD_DVBSKY_T982 51
+#define CX23885_BOARD_HAUPPAUGE_STARBURST 52
#define GPIO_0 0x00000001
#define GPIO_1 0x00000002
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index b463fe172d16..3fe9047ef466 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -602,10 +602,13 @@ isp_video_querycap(struct file *file, void *fh, struct v4l2_capability *cap)
strlcpy(cap->card, video->video.name, sizeof(cap->card));
strlcpy(cap->bus_info, "media", sizeof(cap->bus_info));
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
+ | V4L2_CAP_STREAMING | V4L2_CAP_DEVICE_CAPS;
+
if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
else
- cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/atmel-isi.c b/drivers/media/platform/soc_camera/atmel-isi.c
index 8efe40337608..6d885239b16a 100644
--- a/drivers/media/platform/soc_camera/atmel-isi.c
+++ b/drivers/media/platform/soc_camera/atmel-isi.c
@@ -760,8 +760,9 @@ static int isi_camera_querycap(struct soc_camera_host *ici,
{
strcpy(cap->driver, "atmel-isi");
strcpy(cap->card, "Atmel Image Sensor Interface");
- cap->capabilities = (V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_STREAMING);
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
return 0;
}
diff --git a/drivers/media/platform/soc_camera/mx2_camera.c b/drivers/media/platform/soc_camera/mx2_camera.c
index ce72bd26a6ac..192377f55840 100644
--- a/drivers/media/platform/soc_camera/mx2_camera.c
+++ b/drivers/media/platform/soc_camera/mx2_camera.c
@@ -1256,7 +1256,8 @@ static int mx2_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the friendly caller:-> */
strlcpy(cap->card, MX2_CAM_DRIVER_DESCRIPTION, sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/mx3_camera.c b/drivers/media/platform/soc_camera/mx3_camera.c
index a60c3bb0e4cc..0b3299dee05d 100644
--- a/drivers/media/platform/soc_camera/mx3_camera.c
+++ b/drivers/media/platform/soc_camera/mx3_camera.c
@@ -967,7 +967,8 @@ static int mx3_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the firendly caller:-> */
strlcpy(cap->card, "i.MX3x Camera", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c
index e6b93281f246..16f65ecb70a3 100644
--- a/drivers/media/platform/soc_camera/omap1_camera.c
+++ b/drivers/media/platform/soc_camera/omap1_camera.c
@@ -1427,7 +1427,8 @@ static int omap1_cam_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the friendly caller:-> */
strlcpy(cap->card, "OMAP1 Camera", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/pxa_camera.c b/drivers/media/platform/soc_camera/pxa_camera.c
index 951226af0eba..8d6e343fec0f 100644
--- a/drivers/media/platform/soc_camera/pxa_camera.c
+++ b/drivers/media/platform/soc_camera/pxa_camera.c
@@ -1576,7 +1576,8 @@ static int pxa_camera_querycap(struct soc_camera_host *ici,
{
/* cap->name is set by the firendly caller:-> */
strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
return 0;
}
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 0c1f55648106..9f1473c0a0cf 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -1799,7 +1799,9 @@ static int rcar_vin_querycap(struct soc_camera_host *ici,
struct v4l2_capability *cap)
{
strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
return 0;
}
diff --git a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
index 8b27b3eb2b25..71787702d4a2 100644
--- a/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
+++ b/drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
@@ -1652,7 +1652,9 @@ static int sh_mobile_ceu_querycap(struct soc_camera_host *ici,
struct v4l2_capability *cap)
{
strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card));
- cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
+ cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS;
+
return 0;
}
diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
index 0f345b1f9014..f327c49d7e09 100644
--- a/drivers/media/usb/dvb-usb/cxusb.c
+++ b/drivers/media/usb/dvb-usb/cxusb.c
@@ -2232,7 +2232,7 @@ static struct dvb_usb_device_properties cxusb_mygica_t230_properties = {
{
"Mygica T230 DVB-T/T2/C",
{ NULL },
- { &cxusb_table[22], NULL },
+ { &cxusb_table[20], NULL },
},
}
};
diff --git a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
index 1b158f1167ed..536210b39428 100644
--- a/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
+++ b/drivers/media/usb/pvrusb2/pvrusb2-v4l2.c
@@ -89,16 +89,6 @@ static int vbi_nr[PVR_NUM] = {[0 ... PVR_NUM-1] = -1};
module_param_array(vbi_nr, int, NULL, 0444);
MODULE_PARM_DESC(vbi_nr, "Offset for device's vbi dev minor");
-static struct v4l2_capability pvr_capability ={
- .driver = "pvrusb2",
- .card = "Hauppauge WinTV pvr-usb2",
- .bus_info = "usb",
- .version = LINUX_VERSION_CODE,
- .capabilities = (V4L2_CAP_VIDEO_CAPTURE |
- V4L2_CAP_TUNER | V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
- V4L2_CAP_READWRITE),
-};
-
static struct v4l2_fmtdesc pvr_fmtdesc [] = {
{
.index = 0,
@@ -160,10 +150,22 @@ static int pvr2_querycap(struct file *file, void *priv, struct v4l2_capability *
struct pvr2_v4l2_fh *fh = file->private_data;
struct pvr2_hdw *hdw = fh->channel.mc_head->hdw;
- memcpy(cap, &pvr_capability, sizeof(struct v4l2_capability));
+ strlcpy(cap->driver, "pvrusb2", sizeof(cap->driver));
strlcpy(cap->bus_info, pvr2_hdw_get_bus_info(hdw),
sizeof(cap->bus_info));
strlcpy(cap->card, pvr2_hdw_get_desc(hdw), sizeof(cap->card));
+ cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_TUNER |
+ V4L2_CAP_AUDIO | V4L2_CAP_RADIO |
+ V4L2_CAP_READWRITE | V4L2_CAP_DEVICE_CAPS;
+ switch (fh->pdi->devbase.vfl_type) {
+ case VFL_TYPE_GRABBER:
+ cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO;
+ break;
+ case VFL_TYPE_RADIO:
+ cap->device_caps = V4L2_CAP_RADIO;
+ break;
+ }
+ cap->device_caps |= V4L2_CAP_TUNER | V4L2_CAP_READWRITE;
return 0;
}
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index d09a8916e940..bc08a829bc13 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -3146,27 +3146,26 @@ static int vb2_thread(void *data)
prequeue--;
} else {
call_void_qop(q, wait_finish, q);
- ret = vb2_internal_dqbuf(q, &fileio->b, 0);
+ if (!threadio->stop)
+ ret = vb2_internal_dqbuf(q, &fileio->b, 0);
call_void_qop(q, wait_prepare, q);
dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
}
- if (threadio->stop)
- break;
- if (ret)
+ if (ret || threadio->stop)
break;
try_to_freeze();
vb = q->bufs[fileio->b.index];
if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR))
- ret = threadio->fnc(vb, threadio->priv);
- if (ret)
- break;
+ if (threadio->fnc(vb, threadio->priv))
+ break;
call_void_qop(q, wait_finish, q);
if (set_timestamp)
v4l2_get_timestamp(&fileio->b.timestamp);
- ret = vb2_internal_qbuf(q, &fileio->b);
+ if (!threadio->stop)
+ ret = vb2_internal_qbuf(q, &fileio->b);
call_void_qop(q, wait_prepare, q);
- if (ret)
+ if (ret || threadio->stop)
break;
}
@@ -3235,11 +3234,11 @@ int vb2_thread_stop(struct vb2_queue *q)
threadio->stop = true;
vb2_internal_streamoff(q, q->type);
call_void_qop(q, wait_prepare, q);
+ err = kthread_stop(threadio->thread);
q->fileio = NULL;
fileio->req.count = 0;
vb2_reqbufs(q, &fileio->req);
kfree(fileio);
- err = kthread_stop(threadio->thread);
threadio->thread = NULL;
kfree(threadio);
q->fileio = NULL;
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index 52a0c2f6264f..ae498b53ee40 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -554,7 +554,8 @@ int da9052_device_init(struct da9052 *da9052, u8 chip_id)
return ret;
}
- ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
+ ret = mfd_add_devices(da9052->dev, PLATFORM_DEVID_AUTO,
+ da9052_subdev_info,
ARRAY_SIZE(da9052_subdev_info), NULL, 0, NULL);
if (ret) {
dev_err(da9052->dev, "mfd_add_devices failed: %d\n", ret);
diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c
index dbdd0faeb6ce..210d1f85679e 100644
--- a/drivers/mfd/rtsx_usb.c
+++ b/drivers/mfd/rtsx_usb.c
@@ -681,21 +681,9 @@ static void rtsx_usb_disconnect(struct usb_interface *intf)
#ifdef CONFIG_PM
static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message)
{
- struct rtsx_ucr *ucr =
- (struct rtsx_ucr *)usb_get_intfdata(intf);
-
dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n",
__func__, message.event);
- /*
- * Call to make sure LED is off during suspend to save more power.
- * It is NOT a permanent state and could be turned on anytime later.
- * Thus no need to call turn_on when resunming.
- */
- mutex_lock(&ucr->dev_mutex);
- rtsx_usb_turn_off_led(ucr);
- mutex_unlock(&ucr->dev_mutex);
-
return 0;
}
diff --git a/drivers/mfd/stmpe.c b/drivers/mfd/stmpe.c
index e2f9df1c0c36..2d7fae94c861 100644
--- a/drivers/mfd/stmpe.c
+++ b/drivers/mfd/stmpe.c
@@ -519,6 +519,7 @@ static const u8 stmpe1601_regs[] = {
[STMPE_IDX_GPDR_LSB] = STMPE1601_REG_GPIO_SET_DIR_LSB,
[STMPE_IDX_GPRER_LSB] = STMPE1601_REG_GPIO_RE_LSB,
[STMPE_IDX_GPFER_LSB] = STMPE1601_REG_GPIO_FE_LSB,
+ [STMPE_IDX_GPPUR_LSB] = STMPE1601_REG_GPIO_PU_LSB,
[STMPE_IDX_GPAFR_U_MSB] = STMPE1601_REG_GPIO_AF_U_MSB,
[STMPE_IDX_IEGPIOR_LSB] = STMPE1601_REG_INT_EN_GPIO_MASK_LSB,
[STMPE_IDX_ISGPIOR_MSB] = STMPE1601_REG_INT_STA_GPIO_MSB,
@@ -667,6 +668,7 @@ static const u8 stmpe1801_regs[] = {
[STMPE_IDX_GPDR_LSB] = STMPE1801_REG_GPIO_SET_DIR_LOW,
[STMPE_IDX_GPRER_LSB] = STMPE1801_REG_GPIO_RE_LOW,
[STMPE_IDX_GPFER_LSB] = STMPE1801_REG_GPIO_FE_LOW,
+ [STMPE_IDX_GPPUR_LSB] = STMPE1801_REG_GPIO_PULL_UP_LOW,
[STMPE_IDX_IEGPIOR_LSB] = STMPE1801_REG_INT_EN_GPIO_MASK_LOW,
[STMPE_IDX_ISGPIOR_LSB] = STMPE1801_REG_INT_STA_GPIO_LOW,
};
@@ -750,6 +752,8 @@ static const u8 stmpe24xx_regs[] = {
[STMPE_IDX_GPDR_LSB] = STMPE24XX_REG_GPDR_LSB,
[STMPE_IDX_GPRER_LSB] = STMPE24XX_REG_GPRER_LSB,
[STMPE_IDX_GPFER_LSB] = STMPE24XX_REG_GPFER_LSB,
+ [STMPE_IDX_GPPUR_LSB] = STMPE24XX_REG_GPPUR_LSB,
+ [STMPE_IDX_GPPDR_LSB] = STMPE24XX_REG_GPPDR_LSB,
[STMPE_IDX_GPAFR_U_MSB] = STMPE24XX_REG_GPAFR_U_MSB,
[STMPE_IDX_IEGPIOR_LSB] = STMPE24XX_REG_IEGPIOR_LSB,
[STMPE_IDX_ISGPIOR_MSB] = STMPE24XX_REG_ISGPIOR_MSB,
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
index bee0abf82040..84adb46b3e2f 100644
--- a/drivers/mfd/stmpe.h
+++ b/drivers/mfd/stmpe.h
@@ -188,6 +188,7 @@ int stmpe_remove(struct stmpe *stmpe);
#define STMPE1601_REG_GPIO_ED_MSB 0x8A
#define STMPE1601_REG_GPIO_RE_LSB 0x8D
#define STMPE1601_REG_GPIO_FE_LSB 0x8F
+#define STMPE1601_REG_GPIO_PU_LSB 0x91
#define STMPE1601_REG_GPIO_AF_U_MSB 0x92
#define STMPE1601_SYS_CTRL_ENABLE_GPIO (1 << 3)
@@ -276,6 +277,8 @@ int stmpe_remove(struct stmpe *stmpe);
#define STMPE24XX_REG_GPEDR_MSB 0x8C
#define STMPE24XX_REG_GPRER_LSB 0x91
#define STMPE24XX_REG_GPFER_LSB 0x94
+#define STMPE24XX_REG_GPPUR_LSB 0x97
+#define STMPE24XX_REG_GPPDR_LSB 0x9a
#define STMPE24XX_REG_GPAFR_U_MSB 0x9B
#define STMPE24XX_SYS_CTRL_ENABLE_GPIO (1 << 3)
diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c
index 0d256cb002eb..d6b764349f9d 100644
--- a/drivers/mfd/tps65218.c
+++ b/drivers/mfd/tps65218.c
@@ -125,10 +125,21 @@ int tps65218_clear_bits(struct tps65218 *tps, unsigned int reg,
}
EXPORT_SYMBOL_GPL(tps65218_clear_bits);
+static const struct regmap_range tps65218_yes_ranges[] = {
+ regmap_reg_range(TPS65218_REG_INT1, TPS65218_REG_INT2),
+ regmap_reg_range(TPS65218_REG_STATUS, TPS65218_REG_STATUS),
+};
+
+static const struct regmap_access_table tps65218_volatile_table = {
+ .yes_ranges = tps65218_yes_ranges,
+ .n_yes_ranges = ARRAY_SIZE(tps65218_yes_ranges),
+};
+
static struct regmap_config tps65218_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
.cache_type = REGCACHE_RBTREE,
+ .volatile_table = &tps65218_volatile_table,
};
static const struct regmap_irq tps65218_irqs[] = {
@@ -193,6 +204,7 @@ static struct regmap_irq_chip tps65218_irq_chip = {
.num_regs = 2,
.mask_base = TPS65218_REG_INT_MASK1,
+ .status_base = TPS65218_REG_INT1,
};
static const struct of_device_id of_tps65218_match_table[] = {
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 51fd6b524371..d1b55fe62817 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master,
return 0;
}
+static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ struct cxl_context *ctx = vma->vm_file->private_data;
+ unsigned long address = (unsigned long)vmf->virtual_address;
+ u64 area, offset;
+
+ offset = vmf->pgoff << PAGE_SHIFT;
+
+ pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n",
+ __func__, ctx->pe, address, offset);
+
+ if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
+ area = ctx->afu->psn_phys;
+ if (offset > ctx->afu->adapter->ps_size)
+ return VM_FAULT_SIGBUS;
+ } else {
+ area = ctx->psn_phys;
+ if (offset > ctx->psn_size)
+ return VM_FAULT_SIGBUS;
+ }
+
+ mutex_lock(&ctx->status_mutex);
+
+ if (ctx->status != STARTED) {
+ mutex_unlock(&ctx->status_mutex);
+ pr_devel("%s: Context not started, failing problem state access\n", __func__);
+ return VM_FAULT_SIGBUS;
+ }
+
+ vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT);
+
+ mutex_unlock(&ctx->status_mutex);
+
+ return VM_FAULT_NOPAGE;
+}
+
+static const struct vm_operations_struct cxl_mmap_vmops = {
+ .fault = cxl_mmap_fault,
+};
+
/*
* Map a per-context mmio space into the given vma.
*/
@@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma)
u64 len = vma->vm_end - vma->vm_start;
len = min(len, ctx->psn_size);
- if (ctx->afu->current_mode == CXL_MODE_DEDICATED) {
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size);
- }
+ if (ctx->afu->current_mode != CXL_MODE_DEDICATED) {
+ /* make sure there is a valid per process space for this AFU */
+ if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
+ pr_devel("AFU doesn't support mmio space\n");
+ return -EINVAL;
+ }
- /* make sure there is a valid per process space for this AFU */
- if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) {
- pr_devel("AFU doesn't support mmio space\n");
- return -EINVAL;
+ /* Can't mmap until the AFU is enabled */
+ if (!ctx->afu->enabled)
+ return -EBUSY;
}
- /* Can't mmap until the AFU is enabled */
- if (!ctx->afu->enabled)
- return -EBUSY;
-
pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__,
ctx->psn_phys, ctx->pe , ctx->master);
+ vma->vm_flags |= VM_IO | VM_PFNMAP;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- return vm_iomap_memory(vma, ctx->psn_phys, len);
+ vma->vm_ops = &cxl_mmap_vmops;
+ return 0;
}
/*
@@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx)
afu_release_irqs(ctx);
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
wake_up_all(&ctx->wq);
-
- /* Release Problem State Area mapping */
- mutex_lock(&ctx->mapping_lock);
- if (ctx->mapping)
- unmap_mapping_range(ctx->mapping, 0, 0, 1);
- mutex_unlock(&ctx->mapping_lock);
}
/*
@@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu)
* created and torn down after the IDR removed
*/
__detach_context(ctx);
+
+ /*
+ * We are force detaching - remove any active PSA mappings so
+ * userspace cannot interfere with the card if it comes back.
+ * Easiest way to exercise this is to unbind and rebind the
+ * driver via sysfs while it is in use.
+ */
+ mutex_lock(&ctx->mapping_lock);
+ if (ctx->mapping)
+ unmap_mapping_range(ctx->mapping, 0, 0, 1);
+ mutex_unlock(&ctx->mapping_lock);
}
mutex_unlock(&afu->contexts_lock);
}
diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c
index e9f2f10dbb37..b15d8113877c 100644
--- a/drivers/misc/cxl/file.c
+++ b/drivers/misc/cxl/file.c
@@ -140,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx,
pr_devel("%s: pe: %i\n", __func__, ctx->pe);
- mutex_lock(&ctx->status_mutex);
- if (ctx->status != OPENED) {
- rc = -EIO;
- goto out;
- }
-
+ /* Do this outside the status_mutex to avoid a circular dependency with
+ * the locking in cxl_mmap_fault() */
if (copy_from_user(&work, uwork,
sizeof(struct cxl_ioctl_start_work))) {
rc = -EFAULT;
goto out;
}
+ mutex_lock(&ctx->status_mutex);
+ if (ctx->status != OPENED) {
+ rc = -EIO;
+ goto out;
+ }
+
/*
* if any of the reserved fields are set or any of the unused
* flags are set it's invalid
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index ff2755062b44..06ff0a2ec960 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -234,6 +234,18 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
struct mei_me_hw *hw = to_me_hw(dev);
u32 hcsr = mei_hcsr_read(hw);
+ /* H_RST may be found lit before reset is started,
+ * for example if preceding reset flow hasn't completed.
+ * In that case asserting H_RST will be ignored, therefore
+ * we need to clean H_RST bit to start a successful reset sequence.
+ */
+ if ((hcsr & H_RST) == H_RST) {
+ dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
+ hcsr &= ~H_RST;
+ mei_me_reg_write(hw, H_CSR, hcsr);
+ hcsr = mei_hcsr_read(hw);
+ }
+
hcsr |= H_RST | H_IG | H_IS;
if (intr_enable)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 02ad79229f65..7466ce098e60 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -886,7 +886,7 @@ static int mmc_select_bus_width(struct mmc_card *card)
unsigned idx, bus_width = 0;
int err = 0;
- if (!mmc_can_ext_csd(card) &&
+ if (!mmc_can_ext_csd(card) ||
!(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
return 0;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index e3e56d35f0ee..970314e0aac8 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -247,6 +247,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = {
{ "INT33BB" , "3" , &sdhci_acpi_slot_int_sd },
{ "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio },
{ "INT3436" , NULL, &sdhci_acpi_slot_int_sdio },
+ { "INT344D" , NULL, &sdhci_acpi_slot_int_sdio },
{ "PNP0D40" },
{ },
};
@@ -257,6 +258,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = {
{ "INT33BB" },
{ "INT33C6" },
{ "INT3436" },
+ { "INT344D" },
{ "PNP0D40" },
{ },
};
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 03427755b902..4f38554ce679 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -993,6 +993,31 @@ static const struct pci_device_id pci_ids[] = {
.subdevice = PCI_ANY_ID,
.driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc,
},
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_SPT_EMMC,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_SPT_SDIO,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio,
+ },
+
+ {
+ .vendor = PCI_VENDOR_ID_INTEL,
+ .device = PCI_DEVICE_ID_INTEL_SPT_SD,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd,
+ },
+
{
.vendor = PCI_VENDOR_ID_O2,
.device = PCI_DEVICE_ID_O2_8120,
diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h
index d57c3d169914..1ec684d06d54 100644
--- a/drivers/mmc/host/sdhci-pci.h
+++ b/drivers/mmc/host/sdhci-pci.h
@@ -21,6 +21,9 @@
#define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5
#define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6
#define PCI_DEVICE_ID_INTEL_QRK_SD 0x08A7
+#define PCI_DEVICE_ID_INTEL_SPT_EMMC 0x9d2b
+#define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c
+#define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d
/*
* PCI registers
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 45238871192d..ca3424e7ef71 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -300,13 +300,6 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
if (IS_ERR(host))
return PTR_ERR(host);
- if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
- ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
- if (ret < 0)
- goto err_mbus_win;
- }
-
-
pltfm_host = sdhci_priv(host);
pltfm_host->priv = pxa;
@@ -325,6 +318,12 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
if (!IS_ERR(pxa->clk_core))
clk_prepare_enable(pxa->clk_core);
+ if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) {
+ ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info());
+ if (ret < 0)
+ goto err_mbus_win;
+ }
+
/* enable 1/8V DDR capable */
host->mmc->caps |= MMC_CAP_1_8V_DDR;
@@ -396,11 +395,11 @@ err_add_host:
pm_runtime_disable(&pdev->dev);
err_of_parse:
err_cd_req:
+err_mbus_win:
clk_disable_unprepare(pxa->clk_io);
if (!IS_ERR(pxa->clk_core))
clk_disable_unprepare(pxa->clk_core);
err_clk_get:
-err_mbus_win:
sdhci_pltfm_free(pdev);
return ret;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index cbb245b58538..f1a488ee432f 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -259,8 +259,6 @@ static void sdhci_reinit(struct sdhci_host *host)
del_timer_sync(&host->tuning_timer);
host->flags &= ~SDHCI_NEEDS_RETUNING;
- host->mmc->max_blk_count =
- (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
}
sdhci_enable_card_detection(host);
}
@@ -1273,6 +1271,12 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
spin_unlock_irq(&host->lock);
mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
spin_lock_irq(&host->lock);
+
+ if (mode != MMC_POWER_OFF)
+ sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
+ else
+ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
+
return;
}
@@ -1353,6 +1357,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
sdhci_runtime_pm_get(host);
+ present = mmc_gpio_get_cd(host->mmc);
+
spin_lock_irqsave(&host->lock, flags);
WARN_ON(host->mrq != NULL);
@@ -1381,7 +1387,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
* zero: cd-gpio is used, and card is removed
* one: cd-gpio is used, and card is present
*/
- present = mmc_gpio_get_cd(host->mmc);
if (present < 0) {
/* If polling, assume that the card is always present. */
if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
@@ -1880,6 +1885,18 @@ static int sdhci_card_busy(struct mmc_host *mmc)
return !(present_state & SDHCI_DATA_LVL_MASK);
}
+static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
+{
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&host->lock, flags);
+ host->flags |= SDHCI_HS400_TUNING;
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ return 0;
+}
+
static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
{
struct sdhci_host *host = mmc_priv(mmc);
@@ -1887,10 +1904,18 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
int tuning_loop_counter = MAX_TUNING_LOOP;
int err = 0;
unsigned long flags;
+ unsigned int tuning_count = 0;
+ bool hs400_tuning;
sdhci_runtime_pm_get(host);
spin_lock_irqsave(&host->lock, flags);
+ hs400_tuning = host->flags & SDHCI_HS400_TUNING;
+ host->flags &= ~SDHCI_HS400_TUNING;
+
+ if (host->tuning_mode == SDHCI_TUNING_MODE_1)
+ tuning_count = host->tuning_count;
+
/*
* The Host Controller needs tuning only in case of SDR104 mode
* and for SDR50 mode when Use Tuning for SDR50 is set in the
@@ -1899,8 +1924,20 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
* tuning function has to be executed.
*/
switch (host->timing) {
+ /* HS400 tuning is done in HS200 mode */
case MMC_TIMING_MMC_HS400:
+ err = -EINVAL;
+ goto out_unlock;
+
case MMC_TIMING_MMC_HS200:
+ /*
+ * Periodic re-tuning for HS400 is not expected to be needed, so
+ * disable it here.
+ */
+ if (hs400_tuning)
+ tuning_count = 0;
+ break;
+
case MMC_TIMING_UHS_SDR104:
break;
@@ -1911,9 +1948,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
/* FALLTHROUGH */
default:
- spin_unlock_irqrestore(&host->lock, flags);
- sdhci_runtime_pm_put(host);
- return 0;
+ goto out_unlock;
}
if (host->ops->platform_execute_tuning) {
@@ -2037,24 +2072,11 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
}
out:
- /*
- * If this is the very first time we are here, we start the retuning
- * timer. Since only during the first time, SDHCI_NEEDS_RETUNING
- * flag won't be set, we check this condition before actually starting
- * the timer.
- */
- if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count &&
- (host->tuning_mode == SDHCI_TUNING_MODE_1)) {
+ host->flags &= ~SDHCI_NEEDS_RETUNING;
+
+ if (tuning_count) {
host->flags |= SDHCI_USING_RETUNING_TIMER;
- mod_timer(&host->tuning_timer, jiffies +
- host->tuning_count * HZ);
- /* Tuning mode 1 limits the maximum data length to 4MB */
- mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size;
- } else if (host->flags & SDHCI_USING_RETUNING_TIMER) {
- host->flags &= ~SDHCI_NEEDS_RETUNING;
- /* Reload the new initial value for timer */
- mod_timer(&host->tuning_timer, jiffies +
- host->tuning_count * HZ);
+ mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ);
}
/*
@@ -2070,6 +2092,7 @@ out:
sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
+out_unlock:
spin_unlock_irqrestore(&host->lock, flags);
sdhci_runtime_pm_put(host);
@@ -2110,15 +2133,18 @@ static void sdhci_card_event(struct mmc_host *mmc)
{
struct sdhci_host *host = mmc_priv(mmc);
unsigned long flags;
+ int present;
/* First check if client has provided their own card event */
if (host->ops->card_event)
host->ops->card_event(host);
+ present = sdhci_do_get_cd(host);
+
spin_lock_irqsave(&host->lock, flags);
/* Check host->mrq first in case we are runtime suspended */
- if (host->mrq && !sdhci_do_get_cd(host)) {
+ if (host->mrq && !present) {
pr_err("%s: Card removed during transfer!\n",
mmc_hostname(host->mmc));
pr_err("%s: Resetting controller.\n",
@@ -2142,6 +2168,7 @@ static const struct mmc_host_ops sdhci_ops = {
.hw_reset = sdhci_hw_reset,
.enable_sdio_irq = sdhci_enable_sdio_irq,
.start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
+ .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
.execute_tuning = sdhci_execute_tuning,
.card_event = sdhci_card_event,
.card_busy = sdhci_card_busy,
@@ -3260,8 +3287,9 @@ int sdhci_add_host(struct sdhci_host *host)
mmc->max_segs = SDHCI_MAX_SEGS;
/*
- * Maximum number of sectors in one transfer. Limited by DMA boundary
- * size (512KiB).
+ * Maximum number of sectors in one transfer. Limited by SDMA boundary
+ * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
+ * is less anyway.
*/
mmc->max_req_size = 524288;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 184c434ae305..0dceba1a2ba1 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1648,7 +1648,7 @@ static int __bond_release_one(struct net_device *bond_dev,
/* slave is not a slave or master is not master of this slave */
if (!(slave_dev->flags & IFF_SLAVE) ||
!netdev_has_upper_dev(slave_dev, bond_dev)) {
- netdev_err(bond_dev, "cannot release %s\n",
+ netdev_dbg(bond_dev, "cannot release %s\n",
slave_dev->name);
return -EINVAL;
}
diff --git a/drivers/net/caif/caif_virtio.c b/drivers/net/caif/caif_virtio.c
index a5fefb9059c5..b306210b02b7 100644
--- a/drivers/net/caif/caif_virtio.c
+++ b/drivers/net/caif/caif_virtio.c
@@ -257,7 +257,6 @@ static int cfv_rx_poll(struct napi_struct *napi, int quota)
struct vringh_kiov *riov = &cfv->ctx.riov;
unsigned int skb_len;
-again:
do {
skb = NULL;
@@ -322,7 +321,6 @@ exit:
napi_schedule_prep(napi)) {
vringh_notify_disable_kern(cfv->vr_rx);
__napi_schedule(napi);
- goto again;
}
break;
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index f94a9fa60488..c672c4dcffac 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -615,6 +615,9 @@ static void c_can_stop(struct net_device *dev)
c_can_irq_control(priv, false);
+ /* put ctrl to init on stop to end ongoing transmission */
+ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT);
+
/* deactivate pins */
pinctrl_pm_select_sleep_state(dev->dev.parent);
priv->can.state = CAN_STATE_STOPPED;
diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c
index f363972cd77d..e36d10520e24 100644
--- a/drivers/net/can/c_can/c_can_platform.c
+++ b/drivers/net/can/c_can/c_can_platform.c
@@ -103,27 +103,34 @@ static void c_can_hw_raminit_syscon(const struct c_can_priv *priv, bool enable)
mask = 1 << raminit->bits.start | 1 << raminit->bits.done;
regmap_read(raminit->syscon, raminit->reg, &ctrl);
- /* We clear the done and start bit first. The start bit is
+ /* We clear the start bit first. The start bit is
* looking at the 0 -> transition, but is not self clearing;
- * And we clear the init done bit as well.
* NOTE: DONE must be written with 1 to clear it.
+ * We can't clear the DONE bit here using regmap_update_bits()
+ * as it will bypass the write if initial condition is START:0 DONE:1
+ * e.g. on DRA7 which needs START pulse.
*/
- ctrl &= ~(1 << raminit->bits.start);
- ctrl |= 1 << raminit->bits.done;
- regmap_write(raminit->syscon, raminit->reg, ctrl);
+ ctrl &= ~mask; /* START = 0, DONE = 0 */
+ regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl);
- ctrl &= ~(1 << raminit->bits.done);
- c_can_hw_raminit_wait_syscon(priv, mask, ctrl);
+ /* check if START bit is 0. Ignore DONE bit for now
+ * as it can be either 0 or 1.
+ */
+ c_can_hw_raminit_wait_syscon(priv, 1 << raminit->bits.start, ctrl);
if (enable) {
- /* Set start bit and wait for the done bit. */
+ /* Clear DONE bit & set START bit. */
ctrl |= 1 << raminit->bits.start;
- regmap_write(raminit->syscon, raminit->reg, ctrl);
-
+ /* DONE must be written with 1 to clear it */
+ ctrl |= 1 << raminit->bits.done;
+ regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl);
+ /* prevent further clearing of DONE bit */
+ ctrl &= ~(1 << raminit->bits.done);
/* clear START bit if start pulse is needed */
if (raminit->needs_pulse) {
ctrl &= ~(1 << raminit->bits.start);
- regmap_write(raminit->syscon, raminit->reg, ctrl);
+ regmap_update_bits(raminit->syscon, raminit->reg,
+ mask, ctrl);
}
ctrl |= 1 << raminit->bits.done;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 3ec8f6f25e5f..847c1f813261 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -807,10 +807,14 @@ static int can_changelink(struct net_device *dev,
if (dev->flags & IFF_UP)
return -EBUSY;
cm = nla_data(data[IFLA_CAN_CTRLMODE]);
- if (cm->flags & ~priv->ctrlmode_supported)
+
+ /* check whether changed bits are allowed to be modified */
+ if (cm->mask & ~priv->ctrlmode_supported)
return -EOPNOTSUPP;
+
+ /* clear bits to be modified and copy the flag values */
priv->ctrlmode &= ~cm->mask;
- priv->ctrlmode |= cm->flags;
+ priv->ctrlmode |= (cm->flags & cm->mask);
/* CAN_CTRLMODE_FD can only be set when driver supports FD */
if (priv->ctrlmode & CAN_CTRLMODE_FD)
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index d7bc462aafdc..244529881be9 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -955,6 +955,11 @@ static struct net_device *alloc_m_can_dev(void)
priv->can.data_bittiming_const = &m_can_data_bittiming_const;
priv->can.do_set_mode = m_can_set_mode;
priv->can.do_get_berr_counter = m_can_get_berr_counter;
+
+ /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */
+ priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO;
+
+ /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */
priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
CAN_CTRLMODE_LISTENONLY |
CAN_CTRLMODE_BERR_REPORTING |
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 541fb7a05625..7af379ca861b 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -520,10 +520,10 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev,
skb = alloc_can_err_skb(priv->netdev, &cf);
if (skb) {
cf->can_id |= CAN_ERR_RESTARTED;
- netif_rx(skb);
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
} else {
netdev_err(priv->netdev,
"No memory left for err_skb\n");
@@ -587,7 +587,7 @@ static int kvaser_usb_simple_msg_async(struct kvaser_usb_net_priv *priv,
usb_sndbulkpipe(dev->udev,
dev->bulk_out->bEndpointAddress),
buf, msg->len,
- kvaser_usb_simple_msg_callback, priv);
+ kvaser_usb_simple_msg_callback, netdev);
usb_anchor_urb(urb, &priv->tx_submitted);
err = usb_submit_urb(urb, GFP_ATOMIC);
@@ -662,11 +662,6 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
priv = dev->nets[channel];
stats = &priv->netdev->stats;
- if (status & M16C_STATE_BUS_RESET) {
- kvaser_usb_unlink_tx_urbs(priv);
- return;
- }
-
skb = alloc_can_err_skb(priv->netdev, &cf);
if (!skb) {
stats->rx_dropped++;
@@ -677,7 +672,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
netdev_dbg(priv->netdev, "Error status: 0x%02x\n", status);
- if (status & M16C_STATE_BUS_OFF) {
+ if (status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) {
cf->can_id |= CAN_ERR_BUSOFF;
priv->can.can_stats.bus_off++;
@@ -703,9 +698,7 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
}
new_state = CAN_STATE_ERROR_PASSIVE;
- }
-
- if (status == M16C_STATE_BUS_ERROR) {
+ } else if (status & M16C_STATE_BUS_ERROR) {
if ((priv->can.state < CAN_STATE_ERROR_WARNING) &&
((txerr >= 96) || (rxerr >= 96))) {
cf->can_id |= CAN_ERR_CRTL;
@@ -715,7 +708,8 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
priv->can.can_stats.error_warning++;
new_state = CAN_STATE_ERROR_WARNING;
- } else if (priv->can.state > CAN_STATE_ERROR_ACTIVE) {
+ } else if ((priv->can.state > CAN_STATE_ERROR_ACTIVE) &&
+ ((txerr < 96) && (rxerr < 96))) {
cf->can_id |= CAN_ERR_PROT;
cf->data[2] = CAN_ERR_PROT_ACTIVE;
@@ -770,10 +764,9 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev,
priv->can.state = new_state;
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
@@ -805,10 +798,9 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
stats->rx_over_errors++;
stats->rx_errors++;
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
}
@@ -887,10 +879,9 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev,
cf->can_dlc);
}
- netif_rx(skb);
-
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
+ netif_rx(skb);
}
static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev,
@@ -1246,6 +1237,9 @@ static int kvaser_usb_close(struct net_device *netdev)
if (err)
netdev_warn(netdev, "Cannot stop device, error %d\n", err);
+ /* reset tx contexts */
+ kvaser_usb_unlink_tx_urbs(priv);
+
priv->can.state = CAN_STATE_STOPPED;
close_candev(priv->netdev);
@@ -1294,12 +1288,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
if (!urb) {
netdev_err(netdev, "No memory left for URBs\n");
stats->tx_dropped++;
- goto nourbmem;
+ dev_kfree_skb(skb);
+ return NETDEV_TX_OK;
}
buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC);
if (!buf) {
stats->tx_dropped++;
+ dev_kfree_skb(skb);
goto nobufmem;
}
@@ -1334,6 +1330,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
}
}
+ /* This should never happen; it implies a flow control bug */
if (!context) {
netdev_warn(netdev, "cannot find free context\n");
ret = NETDEV_TX_BUSY;
@@ -1364,9 +1361,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb,
if (unlikely(err)) {
can_free_echo_skb(netdev, context->echo_index);
- skb = NULL; /* set to NULL to avoid double free in
- * dev_kfree_skb(skb) */
-
atomic_dec(&priv->active_tx_urbs);
usb_unanchor_urb(urb);
@@ -1388,8 +1382,6 @@ releasebuf:
kfree(buf);
nobufmem:
usb_free_urb(urb);
-nourbmem:
- dev_kfree_skb(skb);
return ret;
}
@@ -1502,6 +1494,10 @@ static int kvaser_usb_init_one(struct usb_interface *intf,
struct kvaser_usb_net_priv *priv;
int i, err;
+ err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel);
+ if (err)
+ return err;
+
netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS);
if (!netdev) {
dev_err(&intf->dev, "Cannot alloc candev\n");
@@ -1588,7 +1584,7 @@ static int kvaser_usb_probe(struct usb_interface *intf,
{
struct kvaser_usb *dev;
int err = -ENOMEM;
- int i;
+ int i, retry = 3;
dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -1606,10 +1602,15 @@ static int kvaser_usb_probe(struct usb_interface *intf,
usb_set_intfdata(intf, dev);
- for (i = 0; i < MAX_NET_DEVICES; i++)
- kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i);
+ /* On some x86 laptops, plugging a Kvaser device again after
+ * an unplug makes the firmware always ignore the very first
+ * command. For such a case, provide some room for retries
+ * instead of completely exiting the driver.
+ */
+ do {
+ err = kvaser_usb_get_software_info(dev);
+ } while (--retry && err == -ETIMEDOUT);
- err = kvaser_usb_get_software_info(dev);
if (err) {
dev_err(&intf->dev,
"Cannot get software infos, error %d\n", err);
diff --git a/drivers/net/ethernet/8390/ne2k-pci.c b/drivers/net/ethernet/8390/ne2k-pci.c
index 89c8d9fc97de..57e97910c728 100644
--- a/drivers/net/ethernet/8390/ne2k-pci.c
+++ b/drivers/net/ethernet/8390/ne2k-pci.c
@@ -246,13 +246,13 @@ static int ne2k_pci_init_one(struct pci_dev *pdev,
if (!ioaddr || ((pci_resource_flags (pdev, 0) & IORESOURCE_IO) == 0)) {
dev_err(&pdev->dev, "no I/O resource at PCI BAR #0\n");
- return -ENODEV;
+ goto err_out;
}
if (request_region (ioaddr, NE_IO_EXTENT, DRV_NAME) == NULL) {
dev_err(&pdev->dev, "I/O resource 0x%x @ 0x%lx busy\n",
NE_IO_EXTENT, ioaddr);
- return -EBUSY;
+ goto err_out;
}
reg0 = inb(ioaddr);
@@ -392,6 +392,8 @@ err_out_free_netdev:
free_netdev (dev);
err_out_free_res:
release_region (ioaddr, NE_IO_EXTENT);
+err_out:
+ pci_disable_device(pdev);
return -ENODEV;
}
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index df76050d0a9d..eadcb053807e 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -156,18 +156,6 @@ source "drivers/net/ethernet/realtek/Kconfig"
source "drivers/net/ethernet/renesas/Kconfig"
source "drivers/net/ethernet/rdc/Kconfig"
source "drivers/net/ethernet/rocker/Kconfig"
-
-config S6GMAC
- tristate "S6105 GMAC ethernet support"
- depends on XTENSA_VARIANT_S6000
- select PHYLIB
- ---help---
- This driver supports the on chip ethernet device on the
- S6105 xtensa processor.
-
- To compile this driver as a module, choose M here. The module
- will be called s6gmac.
-
source "drivers/net/ethernet/samsung/Kconfig"
source "drivers/net/ethernet/seeq/Kconfig"
source "drivers/net/ethernet/silan/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index bf56f8b36e90..1367afcd0a8b 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -66,7 +66,6 @@ obj-$(CONFIG_NET_VENDOR_REALTEK) += realtek/
obj-$(CONFIG_SH_ETH) += renesas/
obj-$(CONFIG_NET_VENDOR_RDC) += rdc/
obj-$(CONFIG_NET_VENDOR_ROCKER) += rocker/
-obj-$(CONFIG_S6GMAC) += s6gmac.o
obj-$(CONFIG_NET_VENDOR_SAMSUNG) += samsung/
obj-$(CONFIG_NET_VENDOR_SEEQ) += seeq/
obj-$(CONFIG_NET_VENDOR_SILAN) += silan/
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 1fcd5568a352..f3470d96837a 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -850,8 +850,10 @@ static int emac_probe(struct platform_device *pdev)
}
db->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(db->clk))
+ if (IS_ERR(db->clk)) {
+ ret = PTR_ERR(db->clk);
goto out;
+ }
clk_prepare_enable(db->clk);
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 3498760dc22a..760c72c6e2ac 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -1170,10 +1170,6 @@ tx_request_irq_error:
init_error:
free_skbufs(dev);
alloc_skbuf_error:
- if (priv->phydev) {
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
- }
phy_error:
return ret;
}
@@ -1186,12 +1182,9 @@ static int tse_shutdown(struct net_device *dev)
int ret;
unsigned long int flags;
- /* Stop and disconnect the PHY */
- if (priv->phydev) {
+ /* Stop the PHY */
+ if (priv->phydev)
phy_stop(priv->phydev);
- phy_disconnect(priv->phydev);
- priv->phydev = NULL;
- }
netif_stop_queue(dev);
napi_disable(&priv->napi);
@@ -1525,6 +1518,10 @@ err_free_netdev:
static int altera_tse_remove(struct platform_device *pdev)
{
struct net_device *ndev = platform_get_drvdata(pdev);
+ struct altera_tse_private *priv = netdev_priv(ndev);
+
+ if (priv->phydev)
+ phy_disconnect(priv->phydev);
platform_set_drvdata(pdev, NULL);
altera_tse_mdio_destroy(ndev);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 75b08c63d39f..29a09271b64a 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -767,16 +767,17 @@
#define MTL_Q_RQOMR 0x40
#define MTL_Q_RQMPOCR 0x44
#define MTL_Q_RQDR 0x4c
+#define MTL_Q_RQFCR 0x50
#define MTL_Q_IER 0x70
#define MTL_Q_ISR 0x74
/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQFCR_RFA_INDEX 1
+#define MTL_Q_RQFCR_RFA_WIDTH 6
+#define MTL_Q_RQFCR_RFD_INDEX 17
+#define MTL_Q_RQFCR_RFD_WIDTH 6
#define MTL_Q_RQOMR_EHFC_INDEX 7
#define MTL_Q_RQOMR_EHFC_WIDTH 1
-#define MTL_Q_RQOMR_RFA_INDEX 8
-#define MTL_Q_RQOMR_RFA_WIDTH 3
-#define MTL_Q_RQOMR_RFD_INDEX 13
-#define MTL_Q_RQOMR_RFD_WIDTH 3
#define MTL_Q_RQOMR_RQS_INDEX 16
#define MTL_Q_RQOMR_RQS_WIDTH 9
#define MTL_Q_RQOMR_RSF_INDEX 5
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index 53f5f66ec2ee..4c66cd1d1e60 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -2079,10 +2079,10 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
for (i = 0; i < pdata->rx_q_count; i++) {
/* Activate flow control when less than 4k left in fifo */
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA, 2);
/* De-activate flow control when more than 6k left in fifo */
- XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFD, 4);
+ XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD, 4);
}
}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index e398eda07298..c8af3ce3ea38 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx)
schedule_work(&alx->reset_wk);
}
-static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
+static int alx_clean_rx_irq(struct alx_priv *alx, int budget)
{
struct alx_rx_queue *rxq = &alx->rxq;
struct alx_rrd *rrd;
struct alx_buffer *rxb;
struct sk_buff *skb;
u16 length, rfd_cleaned = 0;
+ int work = 0;
- while (budget > 0) {
+ while (work < budget) {
rrd = &rxq->rrd[rxq->rrd_read_idx];
if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT)))
break;
@@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
ALX_GET_FIELD(le32_to_cpu(rrd->word0),
RRD_NOR) != 1) {
alx_schedule_reset(alx);
- return 0;
+ return work;
}
rxb = &rxq->bufs[rxq->read_idx];
@@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget)
}
napi_gro_receive(&alx->napi, skb);
- budget--;
+ work++;
next_pkt:
if (++rxq->read_idx == alx->rx_ringsz)
@@ -258,21 +259,22 @@ next_pkt:
if (rfd_cleaned)
alx_refill_rx_ring(alx, GFP_ATOMIC);
- return budget > 0;
+ return work;
}
static int alx_poll(struct napi_struct *napi, int budget)
{
struct alx_priv *alx = container_of(napi, struct alx_priv, napi);
struct alx_hw *hw = &alx->hw;
- bool complete = true;
unsigned long flags;
+ bool tx_complete;
+ int work;
- complete = alx_clean_tx_irq(alx) &&
- alx_clean_rx_irq(alx, budget);
+ tx_complete = alx_clean_tx_irq(alx);
+ work = alx_clean_rx_irq(alx, budget);
- if (!complete)
- return 1;
+ if (!tx_complete || work == budget)
+ return budget;
napi_complete(&alx->napi);
@@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
alx_post_write(hw);
- return 0;
+ return work;
}
static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr)
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 05c6af6c418f..3007d95fbb9f 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1167,10 +1167,10 @@ static int bgmac_poll(struct napi_struct *napi, int weight)
bgmac->int_status = 0;
}
- if (handled < weight)
+ if (handled < weight) {
napi_complete(napi);
-
- bgmac_chip_intrs_on(bgmac);
+ bgmac_chip_intrs_on(bgmac);
+ }
return handled;
}
@@ -1515,6 +1515,8 @@ static int bgmac_probe(struct bcma_device *core)
if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM)
bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n");
+ netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
+
err = bgmac_mii_register(bgmac);
if (err) {
bgmac_err(bgmac, "Cannot register MDIO\n");
@@ -1529,8 +1531,6 @@ static int bgmac_probe(struct bcma_device *core)
netif_carrier_off(net_dev);
- netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT);
-
return 0;
err_mii_unregister:
@@ -1549,9 +1549,9 @@ static void bgmac_remove(struct bcma_device *core)
{
struct bgmac *bgmac = bcma_get_drvdata(core);
- netif_napi_del(&bgmac->napi);
unregister_netdev(bgmac->net_dev);
bgmac_mii_unregister(bgmac);
+ netif_napi_del(&bgmac->napi);
bgmac_dma_free(bgmac);
bcma_set_drvdata(core, NULL);
free_netdev(bgmac->net_dev);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 1d1147c93d59..e468ed3f210f 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -3175,7 +3175,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
}
#endif
if (!bnx2x_fp_lock_napi(fp))
- return work_done;
+ return budget;
for_each_cos_in_tx_queue(fp, cos)
if (bnx2x_tx_queue_has_work(fp->txdata_ptr[cos]))
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 9f5e38769a29..72eef9fc883e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12553,9 +12553,11 @@ static int bnx2x_get_phys_port_id(struct net_device *netdev,
return 0;
}
-static bool bnx2x_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t bnx2x_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
{
- return vxlan_gso_check(skb);
+ return vxlan_features_check(skb, features);
}
static const struct net_device_ops bnx2x_netdev_ops = {
@@ -12589,7 +12591,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
#endif
.ndo_get_phys_port_id = bnx2x_get_phys_port_id,
.ndo_set_vf_link_state = bnx2x_set_vf_link_state,
- .ndo_gso_check = bnx2x_gso_check,
+ .ndo_features_check = bnx2x_features_check,
};
static int bnx2x_set_coherency_mask(struct bnx2x *bp)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index bb48a610b72a..96bf01ba32dd 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7413,6 +7413,8 @@ static inline void tg3_netif_start(struct tg3 *tp)
}
static void tg3_irq_quiesce(struct tg3 *tp)
+ __releases(tp->lock)
+ __acquires(tp->lock)
{
int i;
@@ -7421,8 +7423,12 @@ static void tg3_irq_quiesce(struct tg3 *tp)
tp->irq_sync = 1;
smp_mb();
+ spin_unlock_bh(&tp->lock);
+
for (i = 0; i < tp->irq_cnt; i++)
synchronize_irq(tp->napi[i].irq_vec);
+
+ spin_lock_bh(&tp->lock);
}
/* Fully shutdown all tg3 driver activity elsewhere in the system.
@@ -9018,6 +9024,8 @@ static void tg3_restore_clk(struct tg3 *tp)
/* tp->lock is held. */
static int tg3_chip_reset(struct tg3 *tp)
+ __releases(tp->lock)
+ __acquires(tp->lock)
{
u32 val;
void (*write_op)(struct tg3 *, u32, u32);
@@ -9073,9 +9081,13 @@ static int tg3_chip_reset(struct tg3 *tp)
}
smp_mb();
+ tg3_full_unlock(tp);
+
for (i = 0; i < tp->irq_cnt; i++)
synchronize_irq(tp->napi[i].irq_vec);
+ tg3_full_lock(tp, 0);
+
if (tg3_asic_rev(tp) == ASIC_REV_57780) {
val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
@@ -10903,11 +10915,13 @@ static void tg3_timer(unsigned long __opaque)
{
struct tg3 *tp = (struct tg3 *) __opaque;
- if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
- goto restart_timer;
-
spin_lock(&tp->lock);
+ if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) {
+ spin_unlock(&tp->lock);
+ goto restart_timer;
+ }
+
if (tg3_asic_rev(tp) == ASIC_REV_5717 ||
tg3_flag(tp, 57765_CLASS))
tg3_chk_missed_msi(tp);
@@ -11101,11 +11115,13 @@ static void tg3_reset_task(struct work_struct *work)
struct tg3 *tp = container_of(work, struct tg3, reset_task);
int err;
+ rtnl_lock();
tg3_full_lock(tp, 0);
if (!netif_running(tp->dev)) {
tg3_flag_clear(tp, RESET_TASK_PENDING);
tg3_full_unlock(tp);
+ rtnl_unlock();
return;
}
@@ -11138,6 +11154,7 @@ out:
tg3_phy_start(tp);
tg3_flag_clear(tp, RESET_TASK_PENDING);
+ rtnl_unlock();
}
static int tg3_request_irq(struct tg3 *tp, int irq_num)
@@ -17800,23 +17817,6 @@ static int tg3_init_one(struct pci_dev *pdev,
goto err_out_apeunmap;
}
- /*
- * Reset chip in case UNDI or EFI driver did not shutdown
- * DMA self test will enable WDMAC and we'll see (spurious)
- * pending DMA on the PCI bus at that point.
- */
- if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
- (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
- tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
- tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
- }
-
- err = tg3_test_dma(tp);
- if (err) {
- dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
- goto err_out_apeunmap;
- }
-
intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
@@ -17861,6 +17861,23 @@ static int tg3_init_one(struct pci_dev *pdev,
sndmbx += 0xc;
}
+ /*
+ * Reset chip in case UNDI or EFI driver did not shutdown
+ * DMA self test will enable WDMAC and we'll see (spurious)
+ * pending DMA on the PCI bus at that point.
+ */
+ if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
+ (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
+ tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
+ tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
+ }
+
+ err = tg3_test_dma(tp);
+ if (err) {
+ dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
+ goto err_out_apeunmap;
+ }
+
tg3_init_coal(tp);
pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
index 7d6aa8c87df8..619083a860a4 100644
--- a/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
+++ b/drivers/net/ethernet/brocade/bna/bnad_debugfs.c
@@ -172,7 +172,7 @@ bnad_get_debug_drvinfo(struct bnad *bnad, void *buffer, u32 len)
/* Retrieve flash partition info */
fcomp.comp_status = 0;
- init_completion(&fcomp.comp);
+ reinit_completion(&fcomp.comp);
spin_lock_irqsave(&bnad->bna_lock, flags);
ret = bfa_nw_flash_get_attr(&bnad->bna.flash, &drvinfo->flash_attr,
bnad_cb_completion, &fcomp);
diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c
index 55eb7f2af2b4..7ef55f5fa664 100644
--- a/drivers/net/ethernet/cadence/at91_ether.c
+++ b/drivers/net/ethernet/cadence/at91_ether.c
@@ -340,7 +340,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
res = PTR_ERR(lp->pclk);
goto err_free_dev;
}
- clk_enable(lp->pclk);
+ clk_prepare_enable(lp->pclk);
lp->hclk = ERR_PTR(-ENOENT);
lp->tx_clk = ERR_PTR(-ENOENT);
@@ -406,7 +406,7 @@ static int __init at91ether_probe(struct platform_device *pdev)
err_out_unregister_netdev:
unregister_netdev(dev);
err_disable_clock:
- clk_disable(lp->pclk);
+ clk_disable_unprepare(lp->pclk);
err_free_dev:
free_netdev(dev);
return res;
@@ -424,7 +424,7 @@ static int at91ether_remove(struct platform_device *pdev)
kfree(lp->mii_bus->irq);
mdiobus_free(lp->mii_bus);
unregister_netdev(dev);
- clk_disable(lp->pclk);
+ clk_disable_unprepare(lp->pclk);
free_netdev(dev);
return 0;
@@ -440,7 +440,7 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg)
netif_stop_queue(net_dev);
netif_device_detach(net_dev);
- clk_disable(lp->pclk);
+ clk_disable_unprepare(lp->pclk);
}
return 0;
}
@@ -451,7 +451,7 @@ static int at91ether_resume(struct platform_device *pdev)
struct macb *lp = netdev_priv(net_dev);
if (netif_running(net_dev)) {
- clk_enable(lp->pclk);
+ clk_prepare_enable(lp->pclk);
netif_device_attach(net_dev);
netif_start_queue(net_dev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index d00a751f0588..6049f70e110c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -96,6 +96,9 @@ struct port_info {
s16 xact_addr_filt; /* index of our MAC address filter */
u16 rss_size; /* size of VI's RSS table slice */
u8 pidx; /* index into adapter port[] */
+ s8 mdio_addr;
+ u8 port_type; /* firmware port type */
+ u8 mod_type; /* firmware module type */
u8 port_id; /* physical port ID */
u8 nqsets; /* # of "Queue Sets" */
u8 first_qset; /* index of first "Queue Set" */
@@ -522,6 +525,7 @@ static inline struct adapter *netdev2adap(const struct net_device *dev)
* is "contracted" to provide for the common code.
*/
void t4vf_os_link_changed(struct adapter *, int, int);
+void t4vf_os_portmod_changed(struct adapter *, int);
/*
* SGE function prototype declarations.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index aa74ec34a467..a936ee8958c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -44,6 +44,7 @@
#include <linux/etherdevice.h>
#include <linux/debugfs.h>
#include <linux/ethtool.h>
+#include <linux/mdio.h>
#include "t4vf_common.h"
#include "t4vf_defs.h"
@@ -210,6 +211,38 @@ void t4vf_os_link_changed(struct adapter *adapter, int pidx, int link_ok)
}
/*
+ * THe port module type has changed on the indicated "port" (Virtual
+ * Interface).
+ */
+void t4vf_os_portmod_changed(struct adapter *adapter, int pidx)
+{
+ static const char * const mod_str[] = {
+ NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
+ };
+ const struct net_device *dev = adapter->port[pidx];
+ const struct port_info *pi = netdev_priv(dev);
+
+ if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
+ dev_info(adapter->pdev_dev, "%s: port module unplugged\n",
+ dev->name);
+ else if (pi->mod_type < ARRAY_SIZE(mod_str))
+ dev_info(adapter->pdev_dev, "%s: %s port module inserted\n",
+ dev->name, mod_str[pi->mod_type]);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
+ dev_info(adapter->pdev_dev, "%s: unsupported optical port "
+ "module inserted\n", dev->name);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
+ dev_info(adapter->pdev_dev, "%s: unknown port module inserted,"
+ "forcing TWINAX\n", dev->name);
+ else if (pi->mod_type == FW_PORT_MOD_TYPE_ERROR)
+ dev_info(adapter->pdev_dev, "%s: transceiver module error\n",
+ dev->name);
+ else
+ dev_info(adapter->pdev_dev, "%s: unknown module type %d "
+ "inserted\n", dev->name, pi->mod_type);
+}
+
+/*
* Net device operations.
* ======================
*/
@@ -1193,24 +1226,103 @@ static void cxgb4vf_poll_controller(struct net_device *dev)
* state of the port to which we're linked.
*/
-/*
- * Return current port link settings.
- */
-static int cxgb4vf_get_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
-{
- const struct port_info *pi = netdev_priv(dev);
+static unsigned int t4vf_from_fw_linkcaps(enum fw_port_type type,
+ unsigned int caps)
+{
+ unsigned int v = 0;
+
+ if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
+ type == FW_PORT_TYPE_BT_XAUI) {
+ v |= SUPPORTED_TP;
+ if (caps & FW_PORT_CAP_SPEED_100M)
+ v |= SUPPORTED_100baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseT_Full;
+ } else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
+ v |= SUPPORTED_Backplane;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseKX_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseKX4_Full;
+ } else if (type == FW_PORT_TYPE_KR)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
+ else if (type == FW_PORT_TYPE_BP_AP)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+ SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
+ else if (type == FW_PORT_TYPE_BP4_AP)
+ v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
+ SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
+ SUPPORTED_10000baseKX4_Full;
+ else if (type == FW_PORT_TYPE_FIBER_XFI ||
+ type == FW_PORT_TYPE_FIBER_XAUI ||
+ type == FW_PORT_TYPE_SFP ||
+ type == FW_PORT_TYPE_QSFP_10G ||
+ type == FW_PORT_TYPE_QSA) {
+ v |= SUPPORTED_FIBRE;
+ if (caps & FW_PORT_CAP_SPEED_1G)
+ v |= SUPPORTED_1000baseT_Full;
+ if (caps & FW_PORT_CAP_SPEED_10G)
+ v |= SUPPORTED_10000baseT_Full;
+ } else if (type == FW_PORT_TYPE_BP40_BA ||
+ type == FW_PORT_TYPE_QSFP) {
+ v |= SUPPORTED_40000baseSR4_Full;
+ v |= SUPPORTED_FIBRE;
+ }
+
+ if (caps & FW_PORT_CAP_ANEG)
+ v |= SUPPORTED_Autoneg;
+ return v;
+}
+
+static int cxgb4vf_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+ const struct port_info *p = netdev_priv(dev);
+
+ if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
+ p->port_type == FW_PORT_TYPE_BT_XFI ||
+ p->port_type == FW_PORT_TYPE_BT_XAUI)
+ cmd->port = PORT_TP;
+ else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
+ p->port_type == FW_PORT_TYPE_FIBER_XAUI)
+ cmd->port = PORT_FIBRE;
+ else if (p->port_type == FW_PORT_TYPE_SFP ||
+ p->port_type == FW_PORT_TYPE_QSFP_10G ||
+ p->port_type == FW_PORT_TYPE_QSA ||
+ p->port_type == FW_PORT_TYPE_QSFP) {
+ if (p->mod_type == FW_PORT_MOD_TYPE_LR ||
+ p->mod_type == FW_PORT_MOD_TYPE_SR ||
+ p->mod_type == FW_PORT_MOD_TYPE_ER ||
+ p->mod_type == FW_PORT_MOD_TYPE_LRM)
+ cmd->port = PORT_FIBRE;
+ else if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
+ p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
+ cmd->port = PORT_DA;
+ else
+ cmd->port = PORT_OTHER;
+ } else
+ cmd->port = PORT_OTHER;
- cmd->supported = pi->link_cfg.supported;
- cmd->advertising = pi->link_cfg.advertising;
+ if (p->mdio_addr >= 0) {
+ cmd->phy_address = p->mdio_addr;
+ cmd->transceiver = XCVR_EXTERNAL;
+ cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
+ MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
+ } else {
+ cmd->phy_address = 0; /* not really, but no better option */
+ cmd->transceiver = XCVR_INTERNAL;
+ cmd->mdio_support = 0;
+ }
+
+ cmd->supported = t4vf_from_fw_linkcaps(p->port_type,
+ p->link_cfg.supported);
+ cmd->advertising = t4vf_from_fw_linkcaps(p->port_type,
+ p->link_cfg.advertising);
ethtool_cmd_speed_set(cmd,
- netif_carrier_ok(dev) ? pi->link_cfg.speed : -1);
+ netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
cmd->duplex = DUPLEX_FULL;
-
- cmd->port = (cmd->supported & SUPPORTED_TP) ? PORT_TP : PORT_FIBRE;
- cmd->phy_address = pi->port_id;
- cmd->transceiver = XCVR_EXTERNAL;
- cmd->autoneg = pi->link_cfg.autoneg;
+ cmd->autoneg = p->link_cfg.autoneg;
cmd->maxtxpkt = 0;
cmd->maxrxpkt = 0;
return 0;
@@ -2318,7 +2430,7 @@ static void cfg_queues(struct adapter *adapter)
*/
n10g = 0;
for_each_port(adapter, pidx)
- n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
+ n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg);
/*
* We default to 1 queue per non-10G port and up to # of cores queues
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 8d3237f5e364..b9debb4f29a3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -230,7 +230,7 @@ struct adapter_params {
static inline bool is_10g_port(const struct link_config *lc)
{
- return (lc->supported & SUPPORTED_10000baseT_Full) != 0;
+ return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
}
static inline bool is_x_10g_port(const struct link_config *lc)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 02e8833b7797..60426cf890a7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -245,6 +245,10 @@ static int hash_mac_addr(const u8 *addr)
return a & 0x3f;
}
+#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
+ FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_SPEED_40G | \
+ FW_PORT_CAP_SPEED_100G | FW_PORT_CAP_ANEG)
+
/**
* init_link_config - initialize a link's SW state
* @lc: structure holding the link state
@@ -259,8 +263,8 @@ static void init_link_config(struct link_config *lc, unsigned int caps)
lc->requested_speed = 0;
lc->speed = 0;
lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
- if (lc->supported & SUPPORTED_Autoneg) {
- lc->advertising = lc->supported;
+ if (lc->supported & FW_PORT_CAP_ANEG) {
+ lc->advertising = lc->supported & ADVERT_MASK;
lc->autoneg = AUTONEG_ENABLE;
lc->requested_fc |= PAUSE_AUTONEG;
} else {
@@ -280,7 +284,6 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
struct fw_vi_cmd vi_cmd, vi_rpl;
struct fw_port_cmd port_cmd, port_rpl;
int v;
- u32 word;
/*
* Execute a VI Read command to get our Virtual Interface information
@@ -319,19 +322,13 @@ int t4vf_port_init(struct adapter *adapter, int pidx)
if (v)
return v;
- v = 0;
- word = be16_to_cpu(port_rpl.u.info.pcap);
- if (word & FW_PORT_CAP_SPEED_100M)
- v |= SUPPORTED_100baseT_Full;
- if (word & FW_PORT_CAP_SPEED_1G)
- v |= SUPPORTED_1000baseT_Full;
- if (word & FW_PORT_CAP_SPEED_10G)
- v |= SUPPORTED_10000baseT_Full;
- if (word & FW_PORT_CAP_SPEED_40G)
- v |= SUPPORTED_40000baseSR4_Full;
- if (word & FW_PORT_CAP_ANEG)
- v |= SUPPORTED_Autoneg;
- init_link_config(&pi->link_cfg, v);
+ v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype);
+ pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ?
+ FW_PORT_CMD_MDIOADDR_G(v) : -1;
+ pi->port_type = FW_PORT_CMD_PTYPE_G(v);
+ pi->mod_type = FW_PORT_MOD_TYPE_NA;
+
+ init_link_config(&pi->link_cfg, be16_to_cpu(port_rpl.u.info.pcap));
return 0;
}
@@ -1491,7 +1488,7 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
*/
const struct fw_port_cmd *port_cmd =
(const struct fw_port_cmd *)rpl;
- u32 word;
+ u32 stat, mod;
int action, port_id, link_ok, speed, fc, pidx;
/*
@@ -1509,21 +1506,21 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
port_id = FW_PORT_CMD_PORTID_G(
be32_to_cpu(port_cmd->op_to_portid));
- word = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
- link_ok = (word & FW_PORT_CMD_LSTATUS_F) != 0;
+ stat = be32_to_cpu(port_cmd->u.info.lstatus_to_modtype);
+ link_ok = (stat & FW_PORT_CMD_LSTATUS_F) != 0;
speed = 0;
fc = 0;
- if (word & FW_PORT_CMD_RXPAUSE_F)
+ if (stat & FW_PORT_CMD_RXPAUSE_F)
fc |= PAUSE_RX;
- if (word & FW_PORT_CMD_TXPAUSE_F)
+ if (stat & FW_PORT_CMD_TXPAUSE_F)
fc |= PAUSE_TX;
- if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
+ if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_100M))
speed = 100;
- else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_1G))
speed = 1000;
- else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_10G))
speed = 10000;
- else if (word & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
+ else if (stat & FW_PORT_CMD_LSPEED_V(FW_PORT_CAP_SPEED_40G))
speed = 40000;
/*
@@ -1540,12 +1537,21 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
continue;
lc = &pi->link_cfg;
+
+ mod = FW_PORT_CMD_MODTYPE_G(stat);
+ if (mod != pi->mod_type) {
+ pi->mod_type = mod;
+ t4vf_os_portmod_changed(adapter, pidx);
+ }
+
if (link_ok != lc->link_ok || speed != lc->speed ||
fc != lc->fc) {
/* something changed */
lc->link_ok = link_ok;
lc->speed = speed;
lc->fc = fc;
+ lc->supported =
+ be16_to_cpu(port_cmd->u.info.pcap);
t4vf_os_link_changed(adapter, pidx, link_ok);
}
}
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 868d0f605d60..e356afa44e7d 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1060,10 +1060,14 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3);
}
- if ((netdev->features & NETIF_F_RXCSUM) && !csum_not_calc) {
- skb->csum = htons(checksum);
- skb->ip_summed = CHECKSUM_COMPLETE;
- }
+ /* Hardware does not provide whole packet checksum. It only
+ * provides pseudo checksum. Since hw validates the packet
+ * checksum but not provide us the checksum value. use
+ * CHECSUM_UNNECESSARY.
+ */
+ if ((netdev->features & NETIF_F_RXCSUM) && tcp_udp_csum_ok &&
+ ipv4_csum_ok)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
if (vlan_stripped)
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
@@ -1331,7 +1335,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
int err;
if (!enic_poll_lock_napi(&enic->rq[rq]))
- return work_done;
+ return budget;
/* Service RQ
*/
@@ -1612,7 +1616,7 @@ static int enic_open(struct net_device *netdev)
if (vnic_rq_desc_used(&enic->rq[i]) == 0) {
netdev_err(netdev, "Unable to alloc receive buffers\n");
err = -ENOMEM;
- goto err_out_notify_unset;
+ goto err_out_free_rq;
}
}
@@ -1645,7 +1649,9 @@ static int enic_open(struct net_device *netdev)
return 0;
-err_out_notify_unset:
+err_out_free_rq:
+ for (i = 0; i < enic->rq_count; i++)
+ vnic_rq_clean(&enic->rq[i], enic_free_rq_buf);
enic_dev_notify_unset(enic);
err_out_free_intr:
enic_free_intr(enic);
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index a379c3e4b57f..13d00a38a5bd 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -398,13 +398,8 @@ static int dnet_poll(struct napi_struct *napi, int budget)
* break out of while loop if there are no more
* packets waiting
*/
- if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) {
- napi_complete(napi);
- int_enable = dnet_readl(bp, INTR_ENB);
- int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
- dnet_writel(bp, int_enable, INTR_ENB);
- return 0;
- }
+ if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16))
+ break;
cmd_word = dnet_readl(bp, RX_LEN_FIFO);
pkt_len = cmd_word & 0xFFFF;
@@ -433,20 +428,17 @@ static int dnet_poll(struct napi_struct *napi, int budget)
"size %u.\n", dev->name, pkt_len);
}
- budget -= npackets;
-
if (npackets < budget) {
/* We processed all packets available. Tell NAPI it can
- * stop polling then re-enable rx interrupts */
+ * stop polling then re-enable rx interrupts.
+ */
napi_complete(napi);
int_enable = dnet_readl(bp, INTR_ENB);
int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
dnet_writel(bp, int_enable, INTR_ENB);
- return 0;
}
- /* There are still packets waiting */
- return 1;
+ return npackets;
}
static irqreturn_t dnet_interrupt(int irq, void *dev_id)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 196073110e32..d48806b5cd88 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4383,8 +4383,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
* distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload
* is expected to work across all types of IP tunnels once exported. Skyhawk
* supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN
- * offloads in hw_enc_features only when a VxLAN port is added. Note this only
- * ensures that other tunnels work fine while VxLAN offloads are not enabled.
+ * offloads in hw_enc_features only when a VxLAN port is added. If other (non
+ * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for
+ * those other tunnels are unexported on the fly through ndo_features_check().
*
* Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack
* adds more than one port, disable offloads and don't re-enable them again
@@ -4459,9 +4460,45 @@ done:
adapter->vxlan_port_count--;
}
-static bool be_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t be_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
{
- return vxlan_gso_check(skb);
+ struct be_adapter *adapter = netdev_priv(dev);
+ u8 l4_hdr = 0;
+
+ /* The code below restricts offload features for some tunneled packets.
+ * Offload features for normal (non tunnel) packets are unchanged.
+ */
+ if (!skb->encapsulation ||
+ !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS))
+ return features;
+
+ /* It's an encapsulated packet and VxLAN offloads are enabled. We
+ * should disable tunnel offload features if it's not a VxLAN packet,
+ * as tunnel offloads have been enabled only for VxLAN. This is done to
+ * allow other tunneled traffic like GRE work fine while VxLAN
+ * offloads are configured in Skyhawk-R.
+ */
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return features;
+ }
+
+ if (l4_hdr != IPPROTO_UDP ||
+ skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
+ skb->inner_protocol != htons(ETH_P_TEB) ||
+ skb_inner_mac_header(skb) - skb_transport_header(skb) !=
+ sizeof(struct udphdr) + sizeof(struct vxlanhdr))
+ return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+
+ return features;
}
#endif
@@ -4492,7 +4529,7 @@ static const struct net_device_ops be_netdev_ops = {
#ifdef CONFIG_BE2NET_VXLAN
.ndo_add_vxlan_port = be_add_vxlan_port,
.ndo_del_vxlan_port = be_del_vxlan_port,
- .ndo_gso_check = be_gso_check,
+ .ndo_features_check = be_features_check,
#endif
};
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 469691ad4a1e..40132929daf7 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -424,6 +424,8 @@ struct bufdesc_ex {
* (40ns * 6).
*/
#define FEC_QUIRK_BUG_CAPTURE (1 << 10)
+/* Controller has only one MDIO bus */
+#define FEC_QUIRK_SINGLE_MDIO (1 << 11)
struct fec_enet_priv_tx_q {
int index;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 5ebdf8dc8a31..bba87775419d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -91,7 +91,8 @@ static struct platform_device_id fec_devtype[] = {
.driver_data = 0,
}, {
.name = "imx28-fec",
- .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
+ .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+ FEC_QUIRK_SINGLE_MDIO,
}, {
.name = "imx6q-fec",
.driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
@@ -1937,7 +1938,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
int err = -ENXIO, i;
/*
- * The dual fec interfaces are not equivalent with enet-mac.
+ * The i.MX28 dual fec interfaces are not equal.
* Here are the differences:
*
* - fec0 supports MII & RMII modes while fec1 only supports RMII
@@ -1952,7 +1953,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
* mdio interface in board design, and need to be configured by
* fec0 mii_bus.
*/
- if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) {
+ if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
/* fec1 uses fec0 mii_bus */
if (mii_cnt && fec0_mii_bus) {
fep->mii_bus = fec0_mii_bus;
@@ -2015,7 +2016,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
mii_cnt++;
/* save fec0 mii_bus */
- if (fep->quirks & FEC_QUIRK_ENET_MAC)
+ if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
fec0_mii_bus = fep->mii_bus;
return 0;
@@ -3129,6 +3130,7 @@ fec_probe(struct platform_device *pdev)
pdev->id_entry = of_id->data;
fep->quirks = pdev->id_entry->driver_data;
+ fep->netdev = ndev;
fep->num_rx_queues = num_rx_qs;
fep->num_tx_queues = num_tx_qs;
diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig
index 5b8300a32bf5..4d61ef50b465 100644
--- a/drivers/net/ethernet/intel/Kconfig
+++ b/drivers/net/ethernet/intel/Kconfig
@@ -281,6 +281,17 @@ config I40E_DCB
If unsure, say N.
+config I40E_FCOE
+ bool "Fibre Channel over Ethernet (FCoE)"
+ default n
+ depends on I40E && DCB && FCOE
+ ---help---
+ Say Y here if you want to use Fibre Channel over Ethernet (FCoE)
+ in the driver. This will create new netdev for exclusive FCoE
+ use with XL710 FCoE offloads enabled.
+
+ If unsure, say N.
+
config I40EVF
tristate "Intel(R) XL710 X710 Virtual Function Ethernet support"
depends on PCI_MSI
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 781065eb5431..e9c3a87e5b11 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -1543,7 +1543,7 @@ static int e100_phy_init(struct nic *nic)
mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr);
} else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) &&
(mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) &&
- !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
+ (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) {
/* enable/disable MDI/MDI-X auto-switching. */
mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG,
nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH);
diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile
index 4b94ddb29c24..c40581999121 100644
--- a/drivers/net/ethernet/intel/i40e/Makefile
+++ b/drivers/net/ethernet/intel/i40e/Makefile
@@ -44,4 +44,4 @@ i40e-objs := i40e_main.o \
i40e_virtchnl_pf.o
i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
-i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o
+i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o
diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
index 433a55886ad2..cb0de455683e 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c
@@ -829,7 +829,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
if (desc_n >= ring->count || desc_n < 0) {
dev_info(&pf->pdev->dev,
"descriptor %d not found\n", desc_n);
- return;
+ goto out;
}
if (!is_rx_ring) {
txd = I40E_TX_DESC(ring, desc_n);
@@ -855,6 +855,8 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
} else {
dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n");
}
+
+out:
kfree(ring);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
index 045b5c4b98b3..ad802dd0f67a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h
@@ -78,7 +78,7 @@ do { \
} while (0)
typedef enum i40e_status_code i40e_status;
-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#ifdef CONFIG_I40E_FCOE
#define I40E_FCOE
-#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
+#endif
#endif /* _I40E_OSDEP_H_ */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
index 04b441460bbd..cecb340898fe 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
@@ -658,6 +658,8 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
return le32_to_cpu(*(volatile __le32 *)head);
}
+#define WB_STRIDE 0x3
+
/**
* i40e_clean_tx_irq - Reclaim resources after transmit completes
* @tx_ring: tx ring to clean
@@ -759,6 +761,18 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets;
+ /* check to see if there are any non-cache aligned descriptors
+ * waiting to be written back, and kick the hardware to force
+ * them to be written back in case of napi polling
+ */
+ if (budget &&
+ !((i & WB_STRIDE) == WB_STRIDE) &&
+ !test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
+ (I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
+ tx_ring->arm_wb = true;
+ else
+ tx_ring->arm_wb = false;
+
if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) {
/* schedule immediate reset if we believe we hung */
dev_info(tx_ring->dev, "Detected Tx Unit Hang\n"
@@ -777,13 +791,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
dev_info(tx_ring->dev,
- "tx hang detected on queue %d, resetting adapter\n",
+ "tx hang detected on queue %d, reset requested\n",
tx_ring->queue_index);
- tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev);
+ /* do not fire the reset immediately, wait for the stack to
+ * decide we are truly stuck, also prevents every queue from
+ * simultaneously requesting a reset
+ */
- /* the adapter is about to reset, no point in enabling stuff */
- return true;
+ /* the adapter is about to reset, no point in enabling polling */
+ budget = 1;
}
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
@@ -806,7 +823,25 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
}
}
- return budget > 0;
+ return !!budget;
+}
+
+/**
+ * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors
+ * @vsi: the VSI we care about
+ * @q_vector: the vector on which to force writeback
+ *
+ **/
+static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
+{
+ u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK
+ /* allow 00 to be written to the index */;
+
+ wr32(&vsi->back->hw,
+ I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
+ val);
}
/**
@@ -1290,9 +1325,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
* so the total length of IPv4 header is IHL*4 bytes
* The UDP_0 bit *may* bet set if the *inner* header is UDP
*/
- if (ipv4_tunnel &&
- (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) &&
- !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) {
+ if (ipv4_tunnel) {
skb->transport_header = skb->mac_header +
sizeof(struct ethhdr) +
(ip_hdr(skb)->ihl * 4);
@@ -1302,15 +1335,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
skb->protocol == htons(ETH_P_8021AD))
? VLAN_HLEN : 0;
- rx_udp_csum = udp_csum(skb);
- iph = ip_hdr(skb);
- csum = csum_tcpudp_magic(
- iph->saddr, iph->daddr,
- (skb->len - skb_transport_offset(skb)),
- IPPROTO_UDP, rx_udp_csum);
+ if ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
+ (udp_hdr(skb)->check != 0)) {
+ rx_udp_csum = udp_csum(skb);
+ iph = ip_hdr(skb);
+ csum = csum_tcpudp_magic(
+ iph->saddr, iph->daddr,
+ (skb->len - skb_transport_offset(skb)),
+ IPPROTO_UDP, rx_udp_csum);
- if (udp_hdr(skb)->check != csum)
- goto checksum_fail;
+ if (udp_hdr(skb)->check != csum)
+ goto checksum_fail;
+
+ } /* else its GRE and so no outer UDP header */
}
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1581,6 +1618,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
struct i40e_vsi *vsi = q_vector->vsi;
struct i40e_ring *ring;
bool clean_complete = true;
+ bool arm_wb = false;
int budget_per_ring;
if (test_bit(__I40E_DOWN, &vsi->state)) {
@@ -1591,8 +1629,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
/* Since the actual Tx work is minimal, we can give the Tx a larger
* budget and be more aggressive about cleaning up the Tx descriptors.
*/
- i40e_for_each_ring(ring, q_vector->tx)
+ i40e_for_each_ring(ring, q_vector->tx) {
clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit);
+ arm_wb |= ring->arm_wb;
+ }
/* We attempt to distribute budget to each Rx queue fairly, but don't
* allow the budget to go below 1 because that would exit polling early.
@@ -1603,8 +1643,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring);
/* If work not completed, return budget and polling will return */
- if (!clean_complete)
+ if (!clean_complete) {
+ if (arm_wb)
+ i40e_force_wb(vsi, q_vector);
return budget;
+ }
/* Work is done so exit the polling mode and re-enable the interrupt */
napi_complete(napi);
@@ -1840,17 +1883,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb,
if (err < 0)
return err;
- if (protocol == htons(ETH_P_IP)) {
- iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb);
+ ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb);
+
+ if (iph->version == 4) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
iph->tot_len = 0;
iph->check = 0;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
0, IPPROTO_TCP, 0);
- } else if (skb_is_gso_v6(skb)) {
-
- ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb)
- : ipv6_hdr(skb);
+ } else if (ipv6h->version == 6) {
tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb);
ipv6h->payload_len = 0;
tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr,
@@ -1946,13 +1988,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
}
} else if (tx_flags & I40E_TX_FLAGS_IPV6) {
- if (tx_flags & I40E_TX_FLAGS_TSO) {
- *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6;
+ if (tx_flags & I40E_TX_FLAGS_TSO)
ip_hdr(skb)->check = 0;
- } else {
- *cd_tunneling |=
- I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM;
- }
}
/* Now set the ctx descriptor fields */
@@ -1962,7 +2000,10 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags,
((skb_inner_network_offset(skb) -
skb_transport_offset(skb)) >> 1) <<
I40E_TXD_CTX_QW0_NATLEN_SHIFT;
-
+ if (this_ip_hdr->version == 6) {
+ tx_flags &= ~I40E_TX_FLAGS_IPV4;
+ tx_flags |= I40E_TX_FLAGS_IPV6;
+ }
} else {
network_hdr_len = skb_network_header_len(skb);
this_ip_hdr = ip_hdr(skb);
@@ -2198,7 +2239,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
/* Place RS bit on last descriptor of any packet that spans across the
* 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
*/
-#define WB_STRIDE 0x3
if (((i & WB_STRIDE) != WB_STRIDE) &&
(first <= &tx_ring->tx_bi[i]) &&
(first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
index e60d3accb2e2..18b00231d2f1 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
@@ -241,6 +241,7 @@ struct i40e_ring {
unsigned long last_rx_timestamp;
bool ring_active; /* is ring online or not */
+ bool arm_wb; /* do something to arm write back */
/* stats structs */
struct i40e_queue_stats stats;
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index 051ea94bdcd3..0f69ef81751a 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1125,7 +1125,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
u32 swmask = mask;
u32 fwmask = mask << 16;
s32 ret_val = 0;
- s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+ s32 i = 0, timeout = 200;
while (i < timeout) {
if (igb_get_hw_semaphore(hw)) {
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index a62fc38f045e..1c75829eb166 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -192,6 +192,10 @@ static char mv643xx_eth_driver_version[] = "1.4";
#define IS_TSO_HEADER(txq, addr) \
((addr >= txq->tso_hdrs_dma) && \
(addr < txq->tso_hdrs_dma + txq->tx_ring_size * TSO_HEADER_SIZE))
+
+#define DESC_DMA_MAP_SINGLE 0
+#define DESC_DMA_MAP_PAGE 1
+
/*
* RX/TX descriptors.
*/
@@ -362,6 +366,7 @@ struct tx_queue {
dma_addr_t tso_hdrs_dma;
struct tx_desc *tx_desc_area;
+ char *tx_desc_mapping; /* array to track the type of the dma mapping */
dma_addr_t tx_desc_dma;
int tx_desc_area_size;
@@ -750,6 +755,7 @@ txq_put_data_tso(struct net_device *dev, struct tx_queue *txq,
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
desc = &txq->tx_desc_area[tx_index];
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
desc->l4i_chk = 0;
desc->byte_cnt = length;
@@ -879,14 +885,13 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
skb_frag_t *this_frag;
int tx_index;
struct tx_desc *desc;
- void *addr;
this_frag = &skb_shinfo(skb)->frags[frag];
- addr = page_address(this_frag->page.p) + this_frag->page_offset;
tx_index = txq->tx_curr_desc++;
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
desc = &txq->tx_desc_area[tx_index];
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_PAGE;
/*
* The last fragment will generate an interrupt
@@ -902,8 +907,9 @@ static void txq_submit_frag_skb(struct tx_queue *txq, struct sk_buff *skb)
desc->l4i_chk = 0;
desc->byte_cnt = skb_frag_size(this_frag);
- desc->buf_ptr = dma_map_single(mp->dev->dev.parent, addr,
- desc->byte_cnt, DMA_TO_DEVICE);
+ desc->buf_ptr = skb_frag_dma_map(mp->dev->dev.parent,
+ this_frag, 0, desc->byte_cnt,
+ DMA_TO_DEVICE);
}
}
@@ -936,6 +942,7 @@ static int txq_submit_skb(struct tx_queue *txq, struct sk_buff *skb,
if (txq->tx_curr_desc == txq->tx_ring_size)
txq->tx_curr_desc = 0;
desc = &txq->tx_desc_area[tx_index];
+ txq->tx_desc_mapping[tx_index] = DESC_DMA_MAP_SINGLE;
if (nr_frags) {
txq_submit_frag_skb(txq, skb);
@@ -1047,9 +1054,12 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
int tx_index;
struct tx_desc *desc;
u32 cmd_sts;
+ char desc_dma_map;
tx_index = txq->tx_used_desc;
desc = &txq->tx_desc_area[tx_index];
+ desc_dma_map = txq->tx_desc_mapping[tx_index];
+
cmd_sts = desc->cmd_sts;
if (cmd_sts & BUFFER_OWNED_BY_DMA) {
@@ -1065,9 +1075,19 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
reclaimed++;
txq->tx_desc_count--;
- if (!IS_TSO_HEADER(txq, desc->buf_ptr))
- dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
- desc->byte_cnt, DMA_TO_DEVICE);
+ if (!IS_TSO_HEADER(txq, desc->buf_ptr)) {
+
+ if (desc_dma_map == DESC_DMA_MAP_PAGE)
+ dma_unmap_page(mp->dev->dev.parent,
+ desc->buf_ptr,
+ desc->byte_cnt,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_single(mp->dev->dev.parent,
+ desc->buf_ptr,
+ desc->byte_cnt,
+ DMA_TO_DEVICE);
+ }
if (cmd_sts & TX_ENABLE_INTERRUPT) {
struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
@@ -1996,6 +2016,7 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
struct tx_queue *txq = mp->txq + index;
struct tx_desc *tx_desc;
int size;
+ int ret;
int i;
txq->index = index;
@@ -2048,18 +2069,34 @@ static int txq_init(struct mv643xx_eth_private *mp, int index)
nexti * sizeof(struct tx_desc);
}
+ txq->tx_desc_mapping = kcalloc(txq->tx_ring_size, sizeof(char),
+ GFP_KERNEL);
+ if (!txq->tx_desc_mapping) {
+ ret = -ENOMEM;
+ goto err_free_desc_area;
+ }
+
/* Allocate DMA buffers for TSO MAC/IP/TCP headers */
txq->tso_hdrs = dma_alloc_coherent(mp->dev->dev.parent,
txq->tx_ring_size * TSO_HEADER_SIZE,
&txq->tso_hdrs_dma, GFP_KERNEL);
if (txq->tso_hdrs == NULL) {
- dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
- txq->tx_desc_area, txq->tx_desc_dma);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_free_desc_mapping;
}
skb_queue_head_init(&txq->tx_skb);
return 0;
+
+err_free_desc_mapping:
+ kfree(txq->tx_desc_mapping);
+err_free_desc_area:
+ if (index == 0 && size <= mp->tx_desc_sram_size)
+ iounmap(txq->tx_desc_area);
+ else
+ dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
+ txq->tx_desc_area, txq->tx_desc_dma);
+ return ret;
}
static void txq_deinit(struct tx_queue *txq)
@@ -2077,6 +2114,8 @@ static void txq_deinit(struct tx_queue *txq)
else
dma_free_coherent(mp->dev->dev.parent, txq->tx_desc_area_size,
txq->tx_desc_area, txq->tx_desc_dma);
+ kfree(txq->tx_desc_mapping);
+
if (txq->tso_hdrs)
dma_free_coherent(mp->dev->dev.parent,
txq->tx_ring_size * TSO_HEADER_SIZE,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 190cbd931f6b..ac6a8f1eea6c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -475,7 +475,8 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad
{
int err;
- if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
+ if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN ||
+ priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC)
return 0; /* do nothing */
err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn,
@@ -2365,9 +2366,11 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev,
queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
}
-static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t mlx4_en_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
{
- return vxlan_gso_check(skb);
+ return vxlan_features_check(skb, features);
}
#endif
@@ -2400,7 +2403,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
#ifdef CONFIG_MLX4_EN_VXLAN
.ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
.ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
- .ndo_gso_check = mlx4_en_gso_check,
+ .ndo_features_check = mlx4_en_features_check,
#endif
};
@@ -2434,7 +2437,7 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
#ifdef CONFIG_MLX4_EN_VXLAN
.ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
.ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
- .ndo_gso_check = mlx4_en_gso_check,
+ .ndo_features_check = mlx4_en_features_check,
#endif
};
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index a308d41e4de0..e3357bf523df 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -962,7 +962,17 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_desc->ctrl.owner_opcode = op_own;
if (send_doorbell) {
wmb();
- iowrite32(ring->doorbell_qpn,
+ /* Since there is no iowrite*_native() that writes the
+ * value as is, without byteswapping - using the one
+ * the doesn't do byteswapping in the relevant arch
+ * endianness.
+ */
+#if defined(__LITTLE_ENDIAN)
+ iowrite32(
+#else
+ iowrite32be(
+#endif
+ ring->doorbell_qpn,
ring->bf.uar->map + MLX4_SEND_DOORBELL);
} else {
ring->xmit_more++;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 943cbd47d832..6e08352ec994 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -1744,8 +1744,7 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev,
struct mlx4_dev_cap *dev_cap)
{
if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED &&
- dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS &&
- dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC)
+ dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)
dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN;
else
dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE;
@@ -1829,7 +1828,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_dev_cap(dev, &dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n");
- goto err_stop_fw;
+ return err;
}
choose_steering_mode(dev, &dev_cap);
@@ -1860,7 +1859,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
&init_hca);
if ((long long) icm_size < 0) {
err = icm_size;
- goto err_stop_fw;
+ return err;
}
dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
@@ -1874,7 +1873,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
if (err)
- goto err_stop_fw;
+ return err;
err = mlx4_INIT_HCA(dev, &init_hca);
if (err) {
@@ -1886,7 +1885,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
err = mlx4_query_func(dev, &dev_cap);
if (err < 0) {
mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n");
- goto err_stop_fw;
+ goto err_close;
} else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) {
dev->caps.num_eqs = dev_cap.max_eqs;
dev->caps.reserved_eqs = dev_cap.reserved_eqs;
@@ -2006,11 +2005,6 @@ err_free_icm:
if (!mlx4_is_slave(dev))
mlx4_free_icms(dev);
-err_stop_fw:
- if (!mlx4_is_slave(dev)) {
- mlx4_UNMAP_FA(dev);
- mlx4_free_icm(dev, priv->fw.fw_icm, 0);
- }
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c
index d6f549685c0f..7094a9c70fd5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mr.c
@@ -584,6 +584,7 @@ EXPORT_SYMBOL_GPL(mlx4_mr_free);
void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr)
{
mlx4_mtt_cleanup(dev, &mr->mtt);
+ mr->mtt.order = -1;
}
EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup);
@@ -593,14 +594,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr,
{
int err;
- mpt_entry->start = cpu_to_be64(iova);
- mpt_entry->length = cpu_to_be64(size);
- mpt_entry->entity_size = cpu_to_be32(page_shift);
-
err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt);
if (err)
return err;
+ mpt_entry->start = cpu_to_be64(mr->iova);
+ mpt_entry->length = cpu_to_be64(mr->size);
+ mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift);
+
mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK |
MLX4_MPT_PD_FLAG_EN_INV);
mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE |
diff --git a/drivers/net/ethernet/micrel/ksz884x.c b/drivers/net/ethernet/micrel/ksz884x.c
index f1ebed6c63b1..2fa6ae026e4f 100644
--- a/drivers/net/ethernet/micrel/ksz884x.c
+++ b/drivers/net/ethernet/micrel/ksz884x.c
@@ -2303,12 +2303,6 @@ static inline int port_chk_force_flow_ctrl(struct ksz_hw *hw, int p)
/* Spanning Tree */
-static inline void port_cfg_dis_learn(struct ksz_hw *hw, int p, int set)
-{
- port_cfg(hw, p,
- KS8842_PORT_CTRL_2_OFFSET, PORT_LEARN_DISABLE, set);
-}
-
static inline void port_cfg_rx(struct ksz_hw *hw, int p, int set)
{
port_cfg(hw, p,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index af099057f0e9..71af98bb72cb 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -4033,8 +4033,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
(void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd),
&mgp->cmd_bus, GFP_KERNEL);
- if (mgp->cmd == NULL)
+ if (!mgp->cmd) {
+ status = -ENOMEM;
goto abort_with_enabled;
+ }
mgp->board_span = pci_resource_len(pdev, 0);
mgp->iomem_base = pci_resource_start(pdev, 0);
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index f5e4b820128b..db0c7a9aee60 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -6987,7 +6987,9 @@ static int s2io_add_isr(struct s2io_nic *sp)
if (sp->s2io_entries[i].in_use == MSIX_FLG) {
if (sp->s2io_entries[i].type ==
MSIX_RING_TYPE) {
- sprintf(sp->desc[i], "%s:MSI-X-%d-RX",
+ snprintf(sp->desc[i],
+ sizeof(sp->desc[i]),
+ "%s:MSI-X-%d-RX",
dev->name, i);
err = request_irq(sp->entries[i].vector,
s2io_msix_ring_handle,
@@ -6996,7 +6998,9 @@ static int s2io_add_isr(struct s2io_nic *sp)
sp->s2io_entries[i].arg);
} else if (sp->s2io_entries[i].type ==
MSIX_ALARM_TYPE) {
- sprintf(sp->desc[i], "%s:MSI-X-%d-TX",
+ snprintf(sp->desc[i],
+ sizeof(sp->desc[i]),
+ "%s:MSI-X-%d-TX",
dev->name, i);
err = request_irq(sp->entries[i].vector,
s2io_msix_fifo_handle,
@@ -8154,7 +8158,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
"%s: UDP Fragmentation Offload(UFO) enabled\n",
dev->name);
/* Initialize device name */
- sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name);
+ snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name,
+ sp->product_name);
if (vlan_tag_strip)
sp->vlan_strip_flag = 1;
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 613037584d08..c531c8ae1be4 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2388,7 +2388,10 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
work_done = netxen_process_rcv_ring(sds_ring, budget);
- if ((work_done < budget) && tx_complete) {
+ if (!tx_complete)
+ work_done = budget;
+
+ if (work_done < budget) {
napi_complete(&sds_ring->napi);
if (test_bit(__NX_DEV_UP, &adapter->state))
netxen_nic_enable_int(sds_ring);
diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c
index c2f09af5c25b..4847713211ca 100644
--- a/drivers/net/ethernet/qlogic/qla3xxx.c
+++ b/drivers/net/ethernet/qlogic/qla3xxx.c
@@ -146,10 +146,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
{
int i = 0;
- while (i < 10) {
- if (i)
- ssleep(1);
-
+ do {
if (ql_sem_lock(qdev,
QL_DRVR_SEM_MASK,
(QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index)
@@ -158,7 +155,8 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev)
"driver lock acquired\n");
return 1;
}
- }
+ ssleep(1);
+ } while (++i < 10);
netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n");
return 0;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 1aa25b13ace1..2528c3fb6b90 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -505,9 +505,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev,
adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
}
-static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev)
+static netdev_features_t qlcnic_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
{
- return vxlan_gso_check(skb);
+ return vxlan_features_check(skb, features);
}
#endif
@@ -532,7 +534,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
#ifdef CONFIG_QLCNIC_VXLAN
.ndo_add_vxlan_port = qlcnic_add_vxlan_port,
.ndo_del_vxlan_port = qlcnic_del_vxlan_port,
- .ndo_gso_check = qlcnic_gso_check,
+ .ndo_features_check = qlcnic_features_check,
#endif
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = qlcnic_poll_controller,
@@ -2603,6 +2605,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
} else {
dev_err(&pdev->dev,
"%s: failed. Please Reboot\n", __func__);
+ err = -ENODEV;
goto err_out_free_hw;
}
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 6d0b9dfac313..78bb4ceb1cdd 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -787,10 +787,10 @@ static struct net_device *rtl8139_init_board(struct pci_dev *pdev)
if (rc)
goto err_out;
+ disable_dev_on_err = 1;
rc = pci_request_regions (pdev, DRV_NAME);
if (rc)
goto err_out;
- disable_dev_on_err = 1;
pci_set_master (pdev);
@@ -1110,6 +1110,7 @@ static int rtl8139_init_one(struct pci_dev *pdev,
return 0;
err_out:
+ netif_napi_del(&tp->napi);
__rtl8139_cleanup_dev (dev);
pci_disable_device (pdev);
return i;
@@ -1124,6 +1125,7 @@ static void rtl8139_remove_one(struct pci_dev *pdev)
assert (dev != NULL);
cancel_delayed_work_sync(&tp->thread);
+ netif_napi_del(&tp->napi);
unregister_netdev (dev);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index c29ba80ae02b..04283fe0e6a7 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -396,6 +396,9 @@ static const u16 sh_eth_offset_fast_sh3_sh2[SH_ETH_MAX_REGISTER_OFFSET] = {
[TSU_ADRL31] = 0x01fc,
};
+static void sh_eth_rcv_snd_disable(struct net_device *ndev);
+static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev);
+
static bool sh_eth_is_gether(struct sh_eth_private *mdp)
{
return mdp->reg_offset == sh_eth_offset_gigabit;
@@ -473,6 +476,7 @@ static struct sh_eth_cpu_data r8a777x_data = {
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI,
+ .fdr_value = 0x00000f0f,
.apr = 1,
.mpr = 1,
@@ -495,6 +499,9 @@ static struct sh_eth_cpu_data r8a779x_data = {
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE |
EESR_ECI,
+ .fdr_value = 0x00000f0f,
+
+ .trscer_err_mask = DESC_I_RINT8,
.apr = 1,
.mpr = 1,
@@ -856,6 +863,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd)
if (!cd->eesr_err_check)
cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK;
+
+ if (!cd->trscer_err_mask)
+ cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK;
}
static int sh_eth_check_reset(struct net_device *ndev)
@@ -1113,6 +1123,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+ dma_addr_t dma_addr;
mdp->cur_rx = 0;
mdp->cur_tx = 0;
@@ -1126,7 +1137,6 @@ static void sh_eth_ring_format(struct net_device *ndev)
/* skb */
mdp->rx_skbuff[i] = NULL;
skb = netdev_alloc_skb(ndev, skbuff_size);
- mdp->rx_skbuff[i] = skb;
if (skb == NULL)
break;
sh_eth_set_receive_align(skb);
@@ -1135,9 +1145,15 @@ static void sh_eth_ring_format(struct net_device *ndev)
rxdesc = &mdp->rx_ring[i];
/* The size of the buffer is a multiple of 16 bytes. */
rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
- dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
- DMA_FROM_DEVICE);
- rxdesc->addr = virt_to_phys(skb->data);
+ dma_addr = dma_map_single(&ndev->dev, skb->data,
+ rxdesc->buffer_length,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ kfree_skb(skb);
+ break;
+ }
+ mdp->rx_skbuff[i] = skb;
+ rxdesc->addr = dma_addr;
rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
/* Rx descriptor address set */
@@ -1294,7 +1310,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
/* Frame recv control (enable multiple-packets per rx irq) */
sh_eth_write(ndev, RMCR_RNC, RMCR);
- sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER);
+ sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER);
if (mdp->cd->bculr)
sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */
@@ -1309,8 +1325,10 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
RFLR);
sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
- if (start)
+ if (start) {
+ mdp->irq_enabled = true;
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+ }
/* PAUSE Prohibition */
val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
@@ -1349,6 +1367,33 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start)
return ret;
}
+static void sh_eth_dev_exit(struct net_device *ndev)
+{
+ struct sh_eth_private *mdp = netdev_priv(ndev);
+ int i;
+
+ /* Deactivate all TX descriptors, so DMA should stop at next
+ * packet boundary if it's currently running
+ */
+ for (i = 0; i < mdp->num_tx_ring; i++)
+ mdp->tx_ring[i].status &= ~cpu_to_edmac(mdp, TD_TACT);
+
+ /* Disable TX FIFO egress to MAC */
+ sh_eth_rcv_snd_disable(ndev);
+
+ /* Stop RX DMA at next packet boundary */
+ sh_eth_write(ndev, 0, EDRRR);
+
+ /* Aside from TX DMA, we can't tell when the hardware is
+ * really stopped, so we need to reset to make sure.
+ * Before doing that, wait for long enough to *probably*
+ * finish transmitting the last packet and poll stats.
+ */
+ msleep(2); /* max frame time at 10 Mbps < 1250 us */
+ sh_eth_get_stats(ndev);
+ sh_eth_reset(ndev);
+}
+
/* free Tx skb function */
static int sh_eth_txfree(struct net_device *ndev)
{
@@ -1393,6 +1438,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
u16 pkt_len = 0;
u32 desc_status;
int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
+ dma_addr_t dma_addr;
boguscnt = min(boguscnt, *quota);
limit = boguscnt;
@@ -1440,9 +1486,9 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
mdp->rx_skbuff[entry] = NULL;
if (mdp->cd->rpadir)
skb_reserve(skb, NET_IP_ALIGN);
- dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
- ALIGN(mdp->rx_buf_sz, 16),
- DMA_FROM_DEVICE);
+ dma_unmap_single(&ndev->dev, rxdesc->addr,
+ ALIGN(mdp->rx_buf_sz, 16),
+ DMA_FROM_DEVICE);
skb_put(skb, pkt_len);
skb->protocol = eth_type_trans(skb, ndev);
netif_receive_skb(skb);
@@ -1462,15 +1508,20 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
if (mdp->rx_skbuff[entry] == NULL) {
skb = netdev_alloc_skb(ndev, skbuff_size);
- mdp->rx_skbuff[entry] = skb;
if (skb == NULL)
break; /* Better luck next round. */
sh_eth_set_receive_align(skb);
- dma_map_single(&ndev->dev, skb->data,
- rxdesc->buffer_length, DMA_FROM_DEVICE);
+ dma_addr = dma_map_single(&ndev->dev, skb->data,
+ rxdesc->buffer_length,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&ndev->dev, dma_addr)) {
+ kfree_skb(skb);
+ break;
+ }
+ mdp->rx_skbuff[entry] = skb;
skb_checksum_none_assert(skb);
- rxdesc->addr = virt_to_phys(skb->data);
+ rxdesc->addr = dma_addr;
}
if (entry >= mdp->num_rx_ring - 1)
rxdesc->status |=
@@ -1566,7 +1617,6 @@ ignore_link:
if (intr_status & EESR_RFRMER) {
/* Receive Frame Overflow int */
ndev->stats.rx_frame_errors++;
- netif_err(mdp, rx_err, ndev, "Receive Abort\n");
}
}
@@ -1585,13 +1635,11 @@ ignore_link:
if (intr_status & EESR_RDE) {
/* Receive Descriptor Empty int */
ndev->stats.rx_over_errors++;
- netif_err(mdp, rx_err, ndev, "Receive Descriptor Empty\n");
}
if (intr_status & EESR_RFE) {
/* Receive FIFO Overflow int */
ndev->stats.rx_fifo_errors++;
- netif_err(mdp, rx_err, ndev, "Receive FIFO Overflow\n");
}
if (!mdp->cd->no_ade && (intr_status & EESR_ADE)) {
@@ -1646,7 +1694,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
if (intr_status & (EESR_RX_CHECK | cd->tx_check | cd->eesr_err_check))
ret = IRQ_HANDLED;
else
- goto other_irq;
+ goto out;
+
+ if (!likely(mdp->irq_enabled)) {
+ sh_eth_write(ndev, 0, EESIPR);
+ goto out;
+ }
if (intr_status & EESR_RX_CHECK) {
if (napi_schedule_prep(&mdp->napi)) {
@@ -1677,7 +1730,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
sh_eth_error(ndev, intr_status);
}
-other_irq:
+out:
spin_unlock(&mdp->lock);
return ret;
@@ -1705,7 +1758,8 @@ static int sh_eth_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
/* Reenable Rx interrupts */
- sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
+ if (mdp->irq_enabled)
+ sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
out:
return budget - quota;
}
@@ -1820,6 +1874,9 @@ static int sh_eth_get_settings(struct net_device *ndev,
unsigned long flags;
int ret;
+ if (!mdp->phydev)
+ return -ENODEV;
+
spin_lock_irqsave(&mdp->lock, flags);
ret = phy_ethtool_gset(mdp->phydev, ecmd);
spin_unlock_irqrestore(&mdp->lock, flags);
@@ -1834,6 +1891,9 @@ static int sh_eth_set_settings(struct net_device *ndev,
unsigned long flags;
int ret;
+ if (!mdp->phydev)
+ return -ENODEV;
+
spin_lock_irqsave(&mdp->lock, flags);
/* disable tx and rx */
@@ -1868,6 +1928,9 @@ static int sh_eth_nway_reset(struct net_device *ndev)
unsigned long flags;
int ret;
+ if (!mdp->phydev)
+ return -ENODEV;
+
spin_lock_irqsave(&mdp->lock, flags);
ret = phy_start_aneg(mdp->phydev);
spin_unlock_irqrestore(&mdp->lock, flags);
@@ -1952,40 +2015,50 @@ static int sh_eth_set_ringparam(struct net_device *ndev,
return -EINVAL;
if (netif_running(ndev)) {
+ netif_device_detach(ndev);
netif_tx_disable(ndev);
- /* Disable interrupts by clearing the interrupt mask. */
- sh_eth_write(ndev, 0x0000, EESIPR);
- /* Stop the chip's Tx and Rx processes. */
- sh_eth_write(ndev, 0, EDTRR);
- sh_eth_write(ndev, 0, EDRRR);
+
+ /* Serialise with the interrupt handler and NAPI, then
+ * disable interrupts. We have to clear the
+ * irq_enabled flag first to ensure that interrupts
+ * won't be re-enabled.
+ */
+ mdp->irq_enabled = false;
synchronize_irq(ndev->irq);
- }
+ napi_synchronize(&mdp->napi);
+ sh_eth_write(ndev, 0x0000, EESIPR);
- /* Free all the skbuffs in the Rx queue. */
- sh_eth_ring_free(ndev);
- /* Free DMA buffer */
- sh_eth_free_dma_buffer(mdp);
+ sh_eth_dev_exit(ndev);
+
+ /* Free all the skbuffs in the Rx queue. */
+ sh_eth_ring_free(ndev);
+ /* Free DMA buffer */
+ sh_eth_free_dma_buffer(mdp);
+ }
/* Set new parameters */
mdp->num_rx_ring = ring->rx_pending;
mdp->num_tx_ring = ring->tx_pending;
- ret = sh_eth_ring_init(ndev);
- if (ret < 0) {
- netdev_err(ndev, "%s: sh_eth_ring_init failed.\n", __func__);
- return ret;
- }
- ret = sh_eth_dev_init(ndev, false);
- if (ret < 0) {
- netdev_err(ndev, "%s: sh_eth_dev_init failed.\n", __func__);
- return ret;
- }
-
if (netif_running(ndev)) {
+ ret = sh_eth_ring_init(ndev);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: sh_eth_ring_init failed.\n",
+ __func__);
+ return ret;
+ }
+ ret = sh_eth_dev_init(ndev, false);
+ if (ret < 0) {
+ netdev_err(ndev, "%s: sh_eth_dev_init failed.\n",
+ __func__);
+ return ret;
+ }
+
+ mdp->irq_enabled = true;
sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);
/* Setting the Rx mode will start the Rx process. */
sh_eth_write(ndev, EDRRR_R, EDRRR);
- netif_wake_queue(ndev);
+ netif_device_attach(ndev);
}
return 0;
@@ -2101,6 +2174,9 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
}
spin_unlock_irqrestore(&mdp->lock, flags);
+ if (skb_padto(skb, ETH_ZLEN))
+ return NETDEV_TX_OK;
+
entry = mdp->cur_tx % mdp->num_tx_ring;
mdp->tx_skbuff[entry] = skb;
txdesc = &mdp->tx_ring[entry];
@@ -2110,10 +2186,11 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb->len + 2);
txdesc->addr = dma_map_single(&ndev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- if (skb->len < ETH_ZLEN)
- txdesc->buffer_length = ETH_ZLEN;
- else
- txdesc->buffer_length = skb->len;
+ if (dma_mapping_error(&ndev->dev, txdesc->addr)) {
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
+ }
+ txdesc->buffer_length = skb->len;
if (entry >= mdp->num_tx_ring - 1)
txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
@@ -2165,24 +2242,26 @@ static int sh_eth_close(struct net_device *ndev)
netif_stop_queue(ndev);
- /* Disable interrupts by clearing the interrupt mask. */
+ /* Serialise with the interrupt handler and NAPI, then disable
+ * interrupts. We have to clear the irq_enabled flag first to
+ * ensure that interrupts won't be re-enabled.
+ */
+ mdp->irq_enabled = false;
+ synchronize_irq(ndev->irq);
+ napi_disable(&mdp->napi);
sh_eth_write(ndev, 0x0000, EESIPR);
- /* Stop the chip's Tx and Rx processes. */
- sh_eth_write(ndev, 0, EDTRR);
- sh_eth_write(ndev, 0, EDRRR);
+ sh_eth_dev_exit(ndev);
- sh_eth_get_stats(ndev);
/* PHY Disconnect */
if (mdp->phydev) {
phy_stop(mdp->phydev);
phy_disconnect(mdp->phydev);
+ mdp->phydev = NULL;
}
free_irq(ndev->irq, ndev);
- napi_disable(&mdp->napi);
-
/* Free all the skbuffs in the Rx queue. */
sh_eth_ring_free(ndev);
@@ -2410,7 +2489,7 @@ static int sh_eth_tsu_purge_all(struct net_device *ndev)
struct sh_eth_private *mdp = netdev_priv(ndev);
int i, ret;
- if (unlikely(!mdp->cd->tsu))
+ if (!mdp->cd->tsu)
return 0;
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) {
@@ -2433,7 +2512,7 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0);
int i;
- if (unlikely(!mdp->cd->tsu))
+ if (!mdp->cd->tsu)
return;
for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) {
@@ -2443,8 +2522,8 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev)
}
}
-/* Multicast reception directions set */
-static void sh_eth_set_multicast_list(struct net_device *ndev)
+/* Update promiscuous flag and multicast filter */
+static void sh_eth_set_rx_mode(struct net_device *ndev)
{
struct sh_eth_private *mdp = netdev_priv(ndev);
u32 ecmr_bits;
@@ -2455,7 +2534,9 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
/* Initial condition is MCT = 1, PRM = 0.
* Depending on ndev->flags, set PRM or clear MCT
*/
- ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT;
+ ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM;
+ if (mdp->cd->tsu)
+ ecmr_bits |= ECMR_MCT;
if (!(ndev->flags & IFF_MULTICAST)) {
sh_eth_tsu_purge_mcast(ndev);
@@ -2484,9 +2565,6 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
}
}
}
- } else {
- /* Normal, unicast/broadcast-only mode. */
- ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT;
}
/* update the ethernet mode */
@@ -2694,6 +2772,7 @@ static const struct net_device_ops sh_eth_netdev_ops = {
.ndo_stop = sh_eth_close,
.ndo_start_xmit = sh_eth_start_xmit,
.ndo_get_stats = sh_eth_get_stats,
+ .ndo_set_rx_mode = sh_eth_set_rx_mode,
.ndo_tx_timeout = sh_eth_tx_timeout,
.ndo_do_ioctl = sh_eth_do_ioctl,
.ndo_validate_addr = eth_validate_addr,
@@ -2706,7 +2785,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = {
.ndo_stop = sh_eth_close,
.ndo_start_xmit = sh_eth_start_xmit,
.ndo_get_stats = sh_eth_get_stats,
- .ndo_set_rx_mode = sh_eth_set_multicast_list,
+ .ndo_set_rx_mode = sh_eth_set_rx_mode,
.ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid,
.ndo_tx_timeout = sh_eth_tx_timeout,
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index 22301bf9c21d..332d3c16d483 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -369,6 +369,8 @@ enum DESC_I_BIT {
DESC_I_RINT1 = 0x0001,
};
+#define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2)
+
/* RPADIR */
enum RPADIR_BIT {
RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000,
@@ -470,6 +472,9 @@ struct sh_eth_cpu_data {
unsigned long tx_check;
unsigned long eesr_err_check;
+ /* Error mask */
+ unsigned long trscer_err_mask;
+
/* hardware features */
unsigned long irq_flags; /* IRQ configuration flags */
unsigned no_psr:1; /* EtherC DO NOT have PSR */
@@ -508,6 +513,7 @@ struct sh_eth_private {
u32 rx_buf_sz; /* Based on MTU+slack. */
int edmac_endian;
struct napi_struct napi;
+ bool irq_enabled;
/* MII transceiver section. */
u32 phy_id; /* PHY ID */
struct mii_bus *mii_bus; /* MDIO bus control */
diff --git a/drivers/net/ethernet/s6gmac.c b/drivers/net/ethernet/s6gmac.c
deleted file mode 100644
index f537cbea20e5..000000000000
--- a/drivers/net/ethernet/s6gmac.c
+++ /dev/null
@@ -1,1058 +0,0 @@
-/*
- * Ethernet driver for S6105 on chip network device
- * (c)2008 emlix GmbH http://www.emlix.com
- * Authors: Oskar Schirmer <oskar@scara.com>
- * Daniel Gloeckner <dg@emlix.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/types.h>
-#include <linux/delay.h>
-#include <linux/spinlock.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/if.h>
-#include <linux/stddef.h>
-#include <linux/mii.h>
-#include <linux/phy.h>
-#include <linux/platform_device.h>
-#include <variant/hardware.h>
-#include <variant/dmac.h>
-
-#define DRV_NAME "s6gmac"
-#define DRV_PRMT DRV_NAME ": "
-
-
-/* register declarations */
-
-#define S6_GMAC_MACCONF1 0x000
-#define S6_GMAC_MACCONF1_TXENA 0
-#define S6_GMAC_MACCONF1_SYNCTX 1
-#define S6_GMAC_MACCONF1_RXENA 2
-#define S6_GMAC_MACCONF1_SYNCRX 3
-#define S6_GMAC_MACCONF1_TXFLOWCTRL 4
-#define S6_GMAC_MACCONF1_RXFLOWCTRL 5
-#define S6_GMAC_MACCONF1_LOOPBACK 8
-#define S6_GMAC_MACCONF1_RESTXFUNC 16
-#define S6_GMAC_MACCONF1_RESRXFUNC 17
-#define S6_GMAC_MACCONF1_RESTXMACCTRL 18
-#define S6_GMAC_MACCONF1_RESRXMACCTRL 19
-#define S6_GMAC_MACCONF1_SIMULRES 30
-#define S6_GMAC_MACCONF1_SOFTRES 31
-#define S6_GMAC_MACCONF2 0x004
-#define S6_GMAC_MACCONF2_FULL 0
-#define S6_GMAC_MACCONF2_CRCENA 1
-#define S6_GMAC_MACCONF2_PADCRCENA 2
-#define S6_GMAC_MACCONF2_LENGTHFCHK 4
-#define S6_GMAC_MACCONF2_HUGEFRAMENA 5
-#define S6_GMAC_MACCONF2_IFMODE 8
-#define S6_GMAC_MACCONF2_IFMODE_NIBBLE 1
-#define S6_GMAC_MACCONF2_IFMODE_BYTE 2
-#define S6_GMAC_MACCONF2_IFMODE_MASK 3
-#define S6_GMAC_MACCONF2_PREAMBLELEN 12
-#define S6_GMAC_MACCONF2_PREAMBLELEN_MASK 0x0F
-#define S6_GMAC_MACIPGIFG 0x008
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP 0
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP_MASK 0x7F
-#define S6_GMAC_MACIPGIFG_MINIFGENFORCE 8
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP2 16
-#define S6_GMAC_MACIPGIFG_B2BINTERPGAP1 24
-#define S6_GMAC_MACHALFDUPLEX 0x00C
-#define S6_GMAC_MACHALFDUPLEX_COLLISWIN 0
-#define S6_GMAC_MACHALFDUPLEX_COLLISWIN_MASK 0x3F
-#define S6_GMAC_MACHALFDUPLEX_RETXMAX 12
-#define S6_GMAC_MACHALFDUPLEX_RETXMAX_MASK 0x0F
-#define S6_GMAC_MACHALFDUPLEX_EXCESSDEF 16
-#define S6_GMAC_MACHALFDUPLEX_NOBACKOFF 17
-#define S6_GMAC_MACHALFDUPLEX_BPNOBCKOF 18
-#define S6_GMAC_MACHALFDUPLEX_ALTBEBENA 19
-#define S6_GMAC_MACHALFDUPLEX_ALTBEBTRN 20
-#define S6_GMAC_MACHALFDUPLEX_ALTBEBTR_MASK 0x0F
-#define S6_GMAC_MACMAXFRAMELEN 0x010
-#define S6_GMAC_MACMIICONF 0x020
-#define S6_GMAC_MACMIICONF_CSEL 0
-#define S6_GMAC_MACMIICONF_CSEL_DIV10 0
-#define S6_GMAC_MACMIICONF_CSEL_DIV12 1
-#define S6_GMAC_MACMIICONF_CSEL_DIV14 2
-#define S6_GMAC_MACMIICONF_CSEL_DIV18 3
-#define S6_GMAC_MACMIICONF_CSEL_DIV24 4
-#define S6_GMAC_MACMIICONF_CSEL_DIV34 5
-#define S6_GMAC_MACMIICONF_CSEL_DIV68 6
-#define S6_GMAC_MACMIICONF_CSEL_DIV168 7
-#define S6_GMAC_MACMIICONF_CSEL_MASK 7
-#define S6_GMAC_MACMIICONF_PREAMBLESUPR 4
-#define S6_GMAC_MACMIICONF_SCANAUTOINCR 5
-#define S6_GMAC_MACMIICMD 0x024
-#define S6_GMAC_MACMIICMD_READ 0
-#define S6_GMAC_MACMIICMD_SCAN 1
-#define S6_GMAC_MACMIIADDR 0x028
-#define S6_GMAC_MACMIIADDR_REG 0
-#define S6_GMAC_MACMIIADDR_REG_MASK 0x1F
-#define S6_GMAC_MACMIIADDR_PHY 8
-#define S6_GMAC_MACMIIADDR_PHY_MASK 0x1F
-#define S6_GMAC_MACMIICTRL 0x02C
-#define S6_GMAC_MACMIISTAT 0x030
-#define S6_GMAC_MACMIIINDI 0x034
-#define S6_GMAC_MACMIIINDI_BUSY 0
-#define S6_GMAC_MACMIIINDI_SCAN 1
-#define S6_GMAC_MACMIIINDI_INVAL 2
-#define S6_GMAC_MACINTERFSTAT 0x03C
-#define S6_GMAC_MACINTERFSTAT_LINKFAIL 3
-#define S6_GMAC_MACINTERFSTAT_EXCESSDEF 9
-#define S6_GMAC_MACSTATADDR1 0x040
-#define S6_GMAC_MACSTATADDR2 0x044
-
-#define S6_GMAC_FIFOCONF0 0x048
-#define S6_GMAC_FIFOCONF0_HSTRSTWT 0
-#define S6_GMAC_FIFOCONF0_HSTRSTSR 1
-#define S6_GMAC_FIFOCONF0_HSTRSTFR 2
-#define S6_GMAC_FIFOCONF0_HSTRSTST 3
-#define S6_GMAC_FIFOCONF0_HSTRSTFT 4
-#define S6_GMAC_FIFOCONF0_WTMENREQ 8
-#define S6_GMAC_FIFOCONF0_SRFENREQ 9
-#define S6_GMAC_FIFOCONF0_FRFENREQ 10
-#define S6_GMAC_FIFOCONF0_STFENREQ 11
-#define S6_GMAC_FIFOCONF0_FTFENREQ 12
-#define S6_GMAC_FIFOCONF0_WTMENRPLY 16
-#define S6_GMAC_FIFOCONF0_SRFENRPLY 17
-#define S6_GMAC_FIFOCONF0_FRFENRPLY 18
-#define S6_GMAC_FIFOCONF0_STFENRPLY 19
-#define S6_GMAC_FIFOCONF0_FTFENRPLY 20
-#define S6_GMAC_FIFOCONF1 0x04C
-#define S6_GMAC_FIFOCONF2 0x050
-#define S6_GMAC_FIFOCONF2_CFGLWM 0
-#define S6_GMAC_FIFOCONF2_CFGHWM 16
-#define S6_GMAC_FIFOCONF3 0x054
-#define S6_GMAC_FIFOCONF3_CFGFTTH 0
-#define S6_GMAC_FIFOCONF3_CFGHWMFT 16
-#define S6_GMAC_FIFOCONF4 0x058
-#define S6_GMAC_FIFOCONF_RSV_PREVDROP 0
-#define S6_GMAC_FIFOCONF_RSV_RUNT 1
-#define S6_GMAC_FIFOCONF_RSV_FALSECAR 2
-#define S6_GMAC_FIFOCONF_RSV_CODEERR 3
-#define S6_GMAC_FIFOCONF_RSV_CRCERR 4
-#define S6_GMAC_FIFOCONF_RSV_LENGTHERR 5
-#define S6_GMAC_FIFOCONF_RSV_LENRANGE 6
-#define S6_GMAC_FIFOCONF_RSV_OK 7
-#define S6_GMAC_FIFOCONF_RSV_MULTICAST 8
-#define S6_GMAC_FIFOCONF_RSV_BROADCAST 9
-#define S6_GMAC_FIFOCONF_RSV_DRIBBLE 10
-#define S6_GMAC_FIFOCONF_RSV_CTRLFRAME 11
-#define S6_GMAC_FIFOCONF_RSV_PAUSECTRL 12
-#define S6_GMAC_FIFOCONF_RSV_UNOPCODE 13
-#define S6_GMAC_FIFOCONF_RSV_VLANTAG 14
-#define S6_GMAC_FIFOCONF_RSV_LONGEVENT 15
-#define S6_GMAC_FIFOCONF_RSV_TRUNCATED 16
-#define S6_GMAC_FIFOCONF_RSV_MASK 0x3FFFF
-#define S6_GMAC_FIFOCONF5 0x05C
-#define S6_GMAC_FIFOCONF5_DROPLT64 18
-#define S6_GMAC_FIFOCONF5_CFGBYTM 19
-#define S6_GMAC_FIFOCONF5_RXDROPSIZE 20
-#define S6_GMAC_FIFOCONF5_RXDROPSIZE_MASK 0xF
-
-#define S6_GMAC_STAT_REGS 0x080
-#define S6_GMAC_STAT_SIZE_MIN 12
-#define S6_GMAC_STATTR64 0x080
-#define S6_GMAC_STATTR64_SIZE 18
-#define S6_GMAC_STATTR127 0x084
-#define S6_GMAC_STATTR127_SIZE 18
-#define S6_GMAC_STATTR255 0x088
-#define S6_GMAC_STATTR255_SIZE 18
-#define S6_GMAC_STATTR511 0x08C
-#define S6_GMAC_STATTR511_SIZE 18
-#define S6_GMAC_STATTR1K 0x090
-#define S6_GMAC_STATTR1K_SIZE 18
-#define S6_GMAC_STATTRMAX 0x094
-#define S6_GMAC_STATTRMAX_SIZE 18
-#define S6_GMAC_STATTRMGV 0x098
-#define S6_GMAC_STATTRMGV_SIZE 18
-#define S6_GMAC_STATRBYT 0x09C
-#define S6_GMAC_STATRBYT_SIZE 24
-#define S6_GMAC_STATRPKT 0x0A0
-#define S6_GMAC_STATRPKT_SIZE 18
-#define S6_GMAC_STATRFCS 0x0A4
-#define S6_GMAC_STATRFCS_SIZE 12
-#define S6_GMAC_STATRMCA 0x0A8
-#define S6_GMAC_STATRMCA_SIZE 18
-#define S6_GMAC_STATRBCA 0x0AC
-#define S6_GMAC_STATRBCA_SIZE 22
-#define S6_GMAC_STATRXCF 0x0B0
-#define S6_GMAC_STATRXCF_SIZE 18
-#define S6_GMAC_STATRXPF 0x0B4
-#define S6_GMAC_STATRXPF_SIZE 12
-#define S6_GMAC_STATRXUO 0x0B8
-#define S6_GMAC_STATRXUO_SIZE 12
-#define S6_GMAC_STATRALN 0x0BC
-#define S6_GMAC_STATRALN_SIZE 12
-#define S6_GMAC_STATRFLR 0x0C0
-#define S6_GMAC_STATRFLR_SIZE 16
-#define S6_GMAC_STATRCDE 0x0C4
-#define S6_GMAC_STATRCDE_SIZE 12
-#define S6_GMAC_STATRCSE 0x0C8
-#define S6_GMAC_STATRCSE_SIZE 12
-#define S6_GMAC_STATRUND 0x0CC
-#define S6_GMAC_STATRUND_SIZE 12
-#define S6_GMAC_STATROVR 0x0D0
-#define S6_GMAC_STATROVR_SIZE 12
-#define S6_GMAC_STATRFRG 0x0D4
-#define S6_GMAC_STATRFRG_SIZE 12
-#define S6_GMAC_STATRJBR 0x0D8
-#define S6_GMAC_STATRJBR_SIZE 12
-#define S6_GMAC_STATRDRP 0x0DC
-#define S6_GMAC_STATRDRP_SIZE 12
-#define S6_GMAC_STATTBYT 0x0E0
-#define S6_GMAC_STATTBYT_SIZE 24
-#define S6_GMAC_STATTPKT 0x0E4
-#define S6_GMAC_STATTPKT_SIZE 18
-#define S6_GMAC_STATTMCA 0x0E8
-#define S6_GMAC_STATTMCA_SIZE 18
-#define S6_GMAC_STATTBCA 0x0EC
-#define S6_GMAC_STATTBCA_SIZE 18
-#define S6_GMAC_STATTXPF 0x0F0
-#define S6_GMAC_STATTXPF_SIZE 12
-#define S6_GMAC_STATTDFR 0x0F4
-#define S6_GMAC_STATTDFR_SIZE 12
-#define S6_GMAC_STATTEDF 0x0F8
-#define S6_GMAC_STATTEDF_SIZE 12
-#define S6_GMAC_STATTSCL 0x0FC
-#define S6_GMAC_STATTSCL_SIZE 12
-#define S6_GMAC_STATTMCL 0x100
-#define S6_GMAC_STATTMCL_SIZE 12
-#define S6_GMAC_STATTLCL 0x104
-#define S6_GMAC_STATTLCL_SIZE 12
-#define S6_GMAC_STATTXCL 0x108
-#define S6_GMAC_STATTXCL_SIZE 12
-#define S6_GMAC_STATTNCL 0x10C
-#define S6_GMAC_STATTNCL_SIZE 13
-#define S6_GMAC_STATTPFH 0x110
-#define S6_GMAC_STATTPFH_SIZE 12
-#define S6_GMAC_STATTDRP 0x114
-#define S6_GMAC_STATTDRP_SIZE 12
-#define S6_GMAC_STATTJBR 0x118
-#define S6_GMAC_STATTJBR_SIZE 12
-#define S6_GMAC_STATTFCS 0x11C
-#define S6_GMAC_STATTFCS_SIZE 12
-#define S6_GMAC_STATTXCF 0x120
-#define S6_GMAC_STATTXCF_SIZE 12
-#define S6_GMAC_STATTOVR 0x124
-#define S6_GMAC_STATTOVR_SIZE 12
-#define S6_GMAC_STATTUND 0x128
-#define S6_GMAC_STATTUND_SIZE 12
-#define S6_GMAC_STATTFRG 0x12C
-#define S6_GMAC_STATTFRG_SIZE 12
-#define S6_GMAC_STATCARRY(n) (0x130 + 4*(n))
-#define S6_GMAC_STATCARRYMSK(n) (0x138 + 4*(n))
-#define S6_GMAC_STATCARRY1_RDRP 0
-#define S6_GMAC_STATCARRY1_RJBR 1
-#define S6_GMAC_STATCARRY1_RFRG 2
-#define S6_GMAC_STATCARRY1_ROVR 3
-#define S6_GMAC_STATCARRY1_RUND 4
-#define S6_GMAC_STATCARRY1_RCSE 5
-#define S6_GMAC_STATCARRY1_RCDE 6
-#define S6_GMAC_STATCARRY1_RFLR 7
-#define S6_GMAC_STATCARRY1_RALN 8
-#define S6_GMAC_STATCARRY1_RXUO 9
-#define S6_GMAC_STATCARRY1_RXPF 10
-#define S6_GMAC_STATCARRY1_RXCF 11
-#define S6_GMAC_STATCARRY1_RBCA 12
-#define S6_GMAC_STATCARRY1_RMCA 13
-#define S6_GMAC_STATCARRY1_RFCS 14
-#define S6_GMAC_STATCARRY1_RPKT 15
-#define S6_GMAC_STATCARRY1_RBYT 16
-#define S6_GMAC_STATCARRY1_TRMGV 25
-#define S6_GMAC_STATCARRY1_TRMAX 26
-#define S6_GMAC_STATCARRY1_TR1K 27
-#define S6_GMAC_STATCARRY1_TR511 28
-#define S6_GMAC_STATCARRY1_TR255 29
-#define S6_GMAC_STATCARRY1_TR127 30
-#define S6_GMAC_STATCARRY1_TR64 31
-#define S6_GMAC_STATCARRY2_TDRP 0
-#define S6_GMAC_STATCARRY2_TPFH 1
-#define S6_GMAC_STATCARRY2_TNCL 2
-#define S6_GMAC_STATCARRY2_TXCL 3
-#define S6_GMAC_STATCARRY2_TLCL 4
-#define S6_GMAC_STATCARRY2_TMCL 5
-#define S6_GMAC_STATCARRY2_TSCL 6
-#define S6_GMAC_STATCARRY2_TEDF 7
-#define S6_GMAC_STATCARRY2_TDFR 8
-#define S6_GMAC_STATCARRY2_TXPF 9
-#define S6_GMAC_STATCARRY2_TBCA 10
-#define S6_GMAC_STATCARRY2_TMCA 11
-#define S6_GMAC_STATCARRY2_TPKT 12
-#define S6_GMAC_STATCARRY2_TBYT 13
-#define S6_GMAC_STATCARRY2_TFRG 14
-#define S6_GMAC_STATCARRY2_TUND 15
-#define S6_GMAC_STATCARRY2_TOVR 16
-#define S6_GMAC_STATCARRY2_TXCF 17
-#define S6_GMAC_STATCARRY2_TFCS 18
-#define S6_GMAC_STATCARRY2_TJBR 19
-
-#define S6_GMAC_HOST_PBLKCTRL 0x140
-#define S6_GMAC_HOST_PBLKCTRL_TXENA 0
-#define S6_GMAC_HOST_PBLKCTRL_RXENA 1
-#define S6_GMAC_HOST_PBLKCTRL_TXSRES 2
-#define S6_GMAC_HOST_PBLKCTRL_RXSRES 3
-#define S6_GMAC_HOST_PBLKCTRL_TXBSIZ 8
-#define S6_GMAC_HOST_PBLKCTRL_RXBSIZ 12
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_16 4
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_32 5
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_64 6
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_128 7
-#define S6_GMAC_HOST_PBLKCTRL_SIZ_MASK 0xF
-#define S6_GMAC_HOST_PBLKCTRL_STATENA 16
-#define S6_GMAC_HOST_PBLKCTRL_STATAUTOZ 17
-#define S6_GMAC_HOST_PBLKCTRL_STATCLEAR 18
-#define S6_GMAC_HOST_PBLKCTRL_RGMII 19
-#define S6_GMAC_HOST_INTMASK 0x144
-#define S6_GMAC_HOST_INTSTAT 0x148
-#define S6_GMAC_HOST_INT_TXBURSTOVER 3
-#define S6_GMAC_HOST_INT_TXPREWOVER 4
-#define S6_GMAC_HOST_INT_RXBURSTUNDER 5
-#define S6_GMAC_HOST_INT_RXPOSTRFULL 6
-#define S6_GMAC_HOST_INT_RXPOSTRUNDER 7
-#define S6_GMAC_HOST_RXFIFOHWM 0x14C
-#define S6_GMAC_HOST_CTRLFRAMXP 0x150
-#define S6_GMAC_HOST_DSTADDRLO(n) (0x160 + 8*(n))
-#define S6_GMAC_HOST_DSTADDRHI(n) (0x164 + 8*(n))
-#define S6_GMAC_HOST_DSTMASKLO(n) (0x180 + 8*(n))
-#define S6_GMAC_HOST_DSTMASKHI(n) (0x184 + 8*(n))
-
-#define S6_GMAC_BURST_PREWR 0x1B0
-#define S6_GMAC_BURST_PREWR_LEN 0
-#define S6_GMAC_BURST_PREWR_LEN_MASK ((1 << 20) - 1)
-#define S6_GMAC_BURST_PREWR_CFE 20
-#define S6_GMAC_BURST_PREWR_PPE 21
-#define S6_GMAC_BURST_PREWR_FCS 22
-#define S6_GMAC_BURST_PREWR_PAD 23
-#define S6_GMAC_BURST_POSTRD 0x1D0
-#define S6_GMAC_BURST_POSTRD_LEN 0
-#define S6_GMAC_BURST_POSTRD_LEN_MASK ((1 << 20) - 1)
-#define S6_GMAC_BURST_POSTRD_DROP 20
-
-
-/* data handling */
-
-#define S6_NUM_TX_SKB 8 /* must be larger than TX fifo size */
-#define S6_NUM_RX_SKB 16
-#define S6_MAX_FRLEN 1536
-
-struct s6gmac {
- u32 reg;
- u32 tx_dma;
- u32 rx_dma;
- u32 io;
- u8 tx_chan;
- u8 rx_chan;
- spinlock_t lock;
- u8 tx_skb_i, tx_skb_o;
- u8 rx_skb_i, rx_skb_o;
- struct sk_buff *tx_skb[S6_NUM_TX_SKB];
- struct sk_buff *rx_skb[S6_NUM_RX_SKB];
- unsigned long carry[sizeof(struct net_device_stats) / sizeof(long)];
- unsigned long stats[sizeof(struct net_device_stats) / sizeof(long)];
- struct phy_device *phydev;
- struct {
- struct mii_bus *bus;
- int irq[PHY_MAX_ADDR];
- } mii;
- struct {
- unsigned int mbit;
- u8 giga;
- u8 isup;
- u8 full;
- } link;
-};
-
-static void s6gmac_rx_fillfifo(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- struct sk_buff *skb;
- while ((((u8)(pd->rx_skb_i - pd->rx_skb_o)) < S6_NUM_RX_SKB) &&
- (!s6dmac_fifo_full(pd->rx_dma, pd->rx_chan)) &&
- (skb = netdev_alloc_skb(dev, S6_MAX_FRLEN + 2))) {
- pd->rx_skb[(pd->rx_skb_i++) % S6_NUM_RX_SKB] = skb;
- s6dmac_put_fifo_cache(pd->rx_dma, pd->rx_chan,
- pd->io, (u32)skb->data, S6_MAX_FRLEN);
- }
-}
-
-static void s6gmac_rx_interrupt(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- u32 pfx;
- struct sk_buff *skb;
- while (((u8)(pd->rx_skb_i - pd->rx_skb_o)) >
- s6dmac_pending_count(pd->rx_dma, pd->rx_chan)) {
- skb = pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB];
- pfx = readl(pd->reg + S6_GMAC_BURST_POSTRD);
- if (pfx & (1 << S6_GMAC_BURST_POSTRD_DROP)) {
- dev_kfree_skb_irq(skb);
- } else {
- skb_put(skb, (pfx >> S6_GMAC_BURST_POSTRD_LEN)
- & S6_GMAC_BURST_POSTRD_LEN_MASK);
- skb->protocol = eth_type_trans(skb, dev);
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- netif_rx(skb);
- }
- }
-}
-
-static void s6gmac_tx_interrupt(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- while (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >
- s6dmac_pending_count(pd->tx_dma, pd->tx_chan)) {
- dev_kfree_skb_irq(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
- }
- if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
- netif_wake_queue(dev);
-}
-
-struct s6gmac_statinf {
- unsigned reg_size : 4; /* 0: unused */
- unsigned reg_off : 6;
- unsigned net_index : 6;
-};
-
-#define S6_STATS_B (8 * sizeof(u32))
-#define S6_STATS_C(b, r, f) [b] = { \
- BUILD_BUG_ON_ZERO(r##_SIZE < S6_GMAC_STAT_SIZE_MIN) + \
- BUILD_BUG_ON_ZERO((r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1)) \
- >= (1<<4)) + \
- r##_SIZE - (S6_GMAC_STAT_SIZE_MIN - 1), \
- BUILD_BUG_ON_ZERO(((unsigned)((r - S6_GMAC_STAT_REGS) / sizeof(u32))) \
- >= ((1<<6)-1)) + \
- (r - S6_GMAC_STAT_REGS) / sizeof(u32), \
- BUILD_BUG_ON_ZERO((offsetof(struct net_device_stats, f)) \
- % sizeof(unsigned long)) + \
- BUILD_BUG_ON_ZERO((((unsigned)(offsetof(struct net_device_stats, f)) \
- / sizeof(unsigned long)) >= (1<<6))) + \
- BUILD_BUG_ON_ZERO((sizeof(((struct net_device_stats *)0)->f) \
- != sizeof(unsigned long))) + \
- (offsetof(struct net_device_stats, f)) / sizeof(unsigned long)},
-
-static const struct s6gmac_statinf statinf[2][S6_STATS_B] = { {
- S6_STATS_C(S6_GMAC_STATCARRY1_RBYT, S6_GMAC_STATRBYT, rx_bytes)
- S6_STATS_C(S6_GMAC_STATCARRY1_RPKT, S6_GMAC_STATRPKT, rx_packets)
- S6_STATS_C(S6_GMAC_STATCARRY1_RFCS, S6_GMAC_STATRFCS, rx_crc_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RMCA, S6_GMAC_STATRMCA, multicast)
- S6_STATS_C(S6_GMAC_STATCARRY1_RALN, S6_GMAC_STATRALN, rx_frame_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RFLR, S6_GMAC_STATRFLR, rx_length_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RCDE, S6_GMAC_STATRCDE, rx_missed_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RUND, S6_GMAC_STATRUND, rx_length_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_ROVR, S6_GMAC_STATROVR, rx_length_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RFRG, S6_GMAC_STATRFRG, rx_crc_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RJBR, S6_GMAC_STATRJBR, rx_crc_errors)
- S6_STATS_C(S6_GMAC_STATCARRY1_RDRP, S6_GMAC_STATRDRP, rx_dropped)
-}, {
- S6_STATS_C(S6_GMAC_STATCARRY2_TBYT, S6_GMAC_STATTBYT, tx_bytes)
- S6_STATS_C(S6_GMAC_STATCARRY2_TPKT, S6_GMAC_STATTPKT, tx_packets)
- S6_STATS_C(S6_GMAC_STATCARRY2_TEDF, S6_GMAC_STATTEDF, tx_aborted_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TXCL, S6_GMAC_STATTXCL, tx_aborted_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TNCL, S6_GMAC_STATTNCL, collisions)
- S6_STATS_C(S6_GMAC_STATCARRY2_TDRP, S6_GMAC_STATTDRP, tx_dropped)
- S6_STATS_C(S6_GMAC_STATCARRY2_TJBR, S6_GMAC_STATTJBR, tx_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TFCS, S6_GMAC_STATTFCS, tx_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TOVR, S6_GMAC_STATTOVR, tx_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TUND, S6_GMAC_STATTUND, tx_errors)
- S6_STATS_C(S6_GMAC_STATCARRY2_TFRG, S6_GMAC_STATTFRG, tx_errors)
-} };
-
-static void s6gmac_stats_collect(struct s6gmac *pd,
- const struct s6gmac_statinf *inf)
-{
- int b;
- for (b = 0; b < S6_STATS_B; b++) {
- if (inf[b].reg_size) {
- pd->stats[inf[b].net_index] +=
- readl(pd->reg + S6_GMAC_STAT_REGS
- + sizeof(u32) * inf[b].reg_off);
- }
- }
-}
-
-static void s6gmac_stats_carry(struct s6gmac *pd,
- const struct s6gmac_statinf *inf, u32 mask)
-{
- int b;
- while (mask) {
- b = fls(mask) - 1;
- mask &= ~(1 << b);
- pd->carry[inf[b].net_index] += (1 << inf[b].reg_size);
- }
-}
-
-static inline u32 s6gmac_stats_pending(struct s6gmac *pd, int carry)
-{
- int r = readl(pd->reg + S6_GMAC_STATCARRY(carry)) &
- ~readl(pd->reg + S6_GMAC_STATCARRYMSK(carry));
- return r;
-}
-
-static inline void s6gmac_stats_interrupt(struct s6gmac *pd, int carry)
-{
- u32 mask;
- mask = s6gmac_stats_pending(pd, carry);
- if (mask) {
- writel(mask, pd->reg + S6_GMAC_STATCARRY(carry));
- s6gmac_stats_carry(pd, &statinf[carry][0], mask);
- }
-}
-
-static irqreturn_t s6gmac_interrupt(int irq, void *dev_id)
-{
- struct net_device *dev = (struct net_device *)dev_id;
- struct s6gmac *pd = netdev_priv(dev);
- if (!dev)
- return IRQ_NONE;
- spin_lock(&pd->lock);
- if (s6dmac_termcnt_irq(pd->rx_dma, pd->rx_chan))
- s6gmac_rx_interrupt(dev);
- s6gmac_rx_fillfifo(dev);
- if (s6dmac_termcnt_irq(pd->tx_dma, pd->tx_chan))
- s6gmac_tx_interrupt(dev);
- s6gmac_stats_interrupt(pd, 0);
- s6gmac_stats_interrupt(pd, 1);
- spin_unlock(&pd->lock);
- return IRQ_HANDLED;
-}
-
-static inline void s6gmac_set_dstaddr(struct s6gmac *pd, int n,
- u32 addrlo, u32 addrhi, u32 masklo, u32 maskhi)
-{
- writel(addrlo, pd->reg + S6_GMAC_HOST_DSTADDRLO(n));
- writel(addrhi, pd->reg + S6_GMAC_HOST_DSTADDRHI(n));
- writel(masklo, pd->reg + S6_GMAC_HOST_DSTMASKLO(n));
- writel(maskhi, pd->reg + S6_GMAC_HOST_DSTMASKHI(n));
-}
-
-static inline void s6gmac_stop_device(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- writel(0, pd->reg + S6_GMAC_MACCONF1);
-}
-
-static inline void s6gmac_init_device(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- int is_rgmii = !!(pd->phydev->supported
- & (SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half));
-#if 0
- writel(1 << S6_GMAC_MACCONF1_SYNCTX |
- 1 << S6_GMAC_MACCONF1_SYNCRX |
- 1 << S6_GMAC_MACCONF1_TXFLOWCTRL |
- 1 << S6_GMAC_MACCONF1_RXFLOWCTRL |
- 1 << S6_GMAC_MACCONF1_RESTXFUNC |
- 1 << S6_GMAC_MACCONF1_RESRXFUNC |
- 1 << S6_GMAC_MACCONF1_RESTXMACCTRL |
- 1 << S6_GMAC_MACCONF1_RESRXMACCTRL,
- pd->reg + S6_GMAC_MACCONF1);
-#endif
- writel(1 << S6_GMAC_MACCONF1_SOFTRES, pd->reg + S6_GMAC_MACCONF1);
- udelay(1000);
- writel(1 << S6_GMAC_MACCONF1_TXENA | 1 << S6_GMAC_MACCONF1_RXENA,
- pd->reg + S6_GMAC_MACCONF1);
- writel(1 << S6_GMAC_HOST_PBLKCTRL_TXSRES |
- 1 << S6_GMAC_HOST_PBLKCTRL_RXSRES,
- pd->reg + S6_GMAC_HOST_PBLKCTRL);
- writel(S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
- S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
- 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
- 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
- is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
- pd->reg + S6_GMAC_HOST_PBLKCTRL);
- writel(1 << S6_GMAC_MACCONF1_TXENA |
- 1 << S6_GMAC_MACCONF1_RXENA |
- (dev->flags & IFF_LOOPBACK ? 1 : 0)
- << S6_GMAC_MACCONF1_LOOPBACK,
- pd->reg + S6_GMAC_MACCONF1);
- writel(dev->mtu && (dev->mtu < (S6_MAX_FRLEN - ETH_HLEN-ETH_FCS_LEN)) ?
- dev->mtu+ETH_HLEN+ETH_FCS_LEN : S6_MAX_FRLEN,
- pd->reg + S6_GMAC_MACMAXFRAMELEN);
- writel((pd->link.full ? 1 : 0) << S6_GMAC_MACCONF2_FULL |
- 1 << S6_GMAC_MACCONF2_PADCRCENA |
- 1 << S6_GMAC_MACCONF2_LENGTHFCHK |
- (pd->link.giga ?
- S6_GMAC_MACCONF2_IFMODE_BYTE :
- S6_GMAC_MACCONF2_IFMODE_NIBBLE)
- << S6_GMAC_MACCONF2_IFMODE |
- 7 << S6_GMAC_MACCONF2_PREAMBLELEN,
- pd->reg + S6_GMAC_MACCONF2);
- writel(0, pd->reg + S6_GMAC_MACSTATADDR1);
- writel(0, pd->reg + S6_GMAC_MACSTATADDR2);
- writel(1 << S6_GMAC_FIFOCONF0_WTMENREQ |
- 1 << S6_GMAC_FIFOCONF0_SRFENREQ |
- 1 << S6_GMAC_FIFOCONF0_FRFENREQ |
- 1 << S6_GMAC_FIFOCONF0_STFENREQ |
- 1 << S6_GMAC_FIFOCONF0_FTFENREQ,
- pd->reg + S6_GMAC_FIFOCONF0);
- writel(128 << S6_GMAC_FIFOCONF3_CFGFTTH |
- 128 << S6_GMAC_FIFOCONF3_CFGHWMFT,
- pd->reg + S6_GMAC_FIFOCONF3);
- writel((S6_GMAC_FIFOCONF_RSV_MASK & ~(
- 1 << S6_GMAC_FIFOCONF_RSV_RUNT |
- 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
- 1 << S6_GMAC_FIFOCONF_RSV_OK |
- 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
- 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
- 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
- 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
- 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED)) |
- 1 << S6_GMAC_FIFOCONF5_DROPLT64 |
- pd->link.giga << S6_GMAC_FIFOCONF5_CFGBYTM |
- 1 << S6_GMAC_FIFOCONF5_RXDROPSIZE,
- pd->reg + S6_GMAC_FIFOCONF5);
- writel(1 << S6_GMAC_FIFOCONF_RSV_RUNT |
- 1 << S6_GMAC_FIFOCONF_RSV_CRCERR |
- 1 << S6_GMAC_FIFOCONF_RSV_DRIBBLE |
- 1 << S6_GMAC_FIFOCONF_RSV_CTRLFRAME |
- 1 << S6_GMAC_FIFOCONF_RSV_PAUSECTRL |
- 1 << S6_GMAC_FIFOCONF_RSV_UNOPCODE |
- 1 << S6_GMAC_FIFOCONF_RSV_TRUNCATED,
- pd->reg + S6_GMAC_FIFOCONF4);
- s6gmac_set_dstaddr(pd, 0,
- 0xFFFFFFFF, 0x0000FFFF, 0xFFFFFFFF, 0x0000FFFF);
- s6gmac_set_dstaddr(pd, 1,
- dev->dev_addr[5] |
- dev->dev_addr[4] << 8 |
- dev->dev_addr[3] << 16 |
- dev->dev_addr[2] << 24,
- dev->dev_addr[1] |
- dev->dev_addr[0] << 8,
- 0xFFFFFFFF, 0x0000FFFF);
- s6gmac_set_dstaddr(pd, 2,
- 0x00000000, 0x00000100, 0x00000000, 0x00000100);
- s6gmac_set_dstaddr(pd, 3,
- 0x00000000, 0x00000000, 0x00000000, 0x00000000);
- writel(1 << S6_GMAC_HOST_PBLKCTRL_TXENA |
- 1 << S6_GMAC_HOST_PBLKCTRL_RXENA |
- S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_TXBSIZ |
- S6_GMAC_HOST_PBLKCTRL_SIZ_128 << S6_GMAC_HOST_PBLKCTRL_RXBSIZ |
- 1 << S6_GMAC_HOST_PBLKCTRL_STATENA |
- 1 << S6_GMAC_HOST_PBLKCTRL_STATCLEAR |
- is_rgmii << S6_GMAC_HOST_PBLKCTRL_RGMII,
- pd->reg + S6_GMAC_HOST_PBLKCTRL);
-}
-
-static void s6mii_enable(struct s6gmac *pd)
-{
- writel(readl(pd->reg + S6_GMAC_MACCONF1) &
- ~(1 << S6_GMAC_MACCONF1_SOFTRES),
- pd->reg + S6_GMAC_MACCONF1);
- writel((readl(pd->reg + S6_GMAC_MACMIICONF)
- & ~(S6_GMAC_MACMIICONF_CSEL_MASK << S6_GMAC_MACMIICONF_CSEL))
- | (S6_GMAC_MACMIICONF_CSEL_DIV168 << S6_GMAC_MACMIICONF_CSEL),
- pd->reg + S6_GMAC_MACMIICONF);
-}
-
-static int s6mii_busy(struct s6gmac *pd, int tmo)
-{
- while (readl(pd->reg + S6_GMAC_MACMIIINDI)) {
- if (--tmo == 0)
- return -ETIME;
- udelay(64);
- }
- return 0;
-}
-
-static int s6mii_read(struct mii_bus *bus, int phy_addr, int regnum)
-{
- struct s6gmac *pd = bus->priv;
- s6mii_enable(pd);
- if (s6mii_busy(pd, 256))
- return -ETIME;
- writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
- regnum << S6_GMAC_MACMIIADDR_REG,
- pd->reg + S6_GMAC_MACMIIADDR);
- writel(1 << S6_GMAC_MACMIICMD_READ, pd->reg + S6_GMAC_MACMIICMD);
- writel(0, pd->reg + S6_GMAC_MACMIICMD);
- if (s6mii_busy(pd, 256))
- return -ETIME;
- return (u16)readl(pd->reg + S6_GMAC_MACMIISTAT);
-}
-
-static int s6mii_write(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
-{
- struct s6gmac *pd = bus->priv;
- s6mii_enable(pd);
- if (s6mii_busy(pd, 256))
- return -ETIME;
- writel(phy_addr << S6_GMAC_MACMIIADDR_PHY |
- regnum << S6_GMAC_MACMIIADDR_REG,
- pd->reg + S6_GMAC_MACMIIADDR);
- writel(value, pd->reg + S6_GMAC_MACMIICTRL);
- if (s6mii_busy(pd, 256))
- return -ETIME;
- return 0;
-}
-
-static int s6mii_reset(struct mii_bus *bus)
-{
- struct s6gmac *pd = bus->priv;
- s6mii_enable(pd);
- if (s6mii_busy(pd, PHY_INIT_TIMEOUT))
- return -ETIME;
- return 0;
-}
-
-static void s6gmac_set_rgmii_txclock(struct s6gmac *pd)
-{
- u32 pllsel = readl(S6_REG_GREG1 + S6_GREG1_PLLSEL);
- pllsel &= ~(S6_GREG1_PLLSEL_GMAC_MASK << S6_GREG1_PLLSEL_GMAC);
- switch (pd->link.mbit) {
- case 10:
- pllsel |= S6_GREG1_PLLSEL_GMAC_2500KHZ << S6_GREG1_PLLSEL_GMAC;
- break;
- case 100:
- pllsel |= S6_GREG1_PLLSEL_GMAC_25MHZ << S6_GREG1_PLLSEL_GMAC;
- break;
- case 1000:
- pllsel |= S6_GREG1_PLLSEL_GMAC_125MHZ << S6_GREG1_PLLSEL_GMAC;
- break;
- default:
- return;
- }
- writel(pllsel, S6_REG_GREG1 + S6_GREG1_PLLSEL);
-}
-
-static inline void s6gmac_linkisup(struct net_device *dev, int isup)
-{
- struct s6gmac *pd = netdev_priv(dev);
- struct phy_device *phydev = pd->phydev;
-
- pd->link.full = phydev->duplex;
- pd->link.giga = (phydev->speed == 1000);
- if (pd->link.mbit != phydev->speed) {
- pd->link.mbit = phydev->speed;
- s6gmac_set_rgmii_txclock(pd);
- }
- pd->link.isup = isup;
- if (isup)
- netif_carrier_on(dev);
- phy_print_status(phydev);
-}
-
-static void s6gmac_adjust_link(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- struct phy_device *phydev = pd->phydev;
- if (pd->link.isup &&
- (!phydev->link ||
- (pd->link.mbit != phydev->speed) ||
- (pd->link.full != phydev->duplex))) {
- pd->link.isup = 0;
- netif_tx_disable(dev);
- if (!phydev->link) {
- netif_carrier_off(dev);
- phy_print_status(phydev);
- }
- }
- if (!pd->link.isup && phydev->link) {
- if (pd->link.full != phydev->duplex) {
- u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
- if (phydev->duplex)
- maccfg |= 1 << S6_GMAC_MACCONF2_FULL;
- else
- maccfg &= ~(1 << S6_GMAC_MACCONF2_FULL);
- writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
- }
-
- if (pd->link.giga != (phydev->speed == 1000)) {
- u32 fifocfg = readl(pd->reg + S6_GMAC_FIFOCONF5);
- u32 maccfg = readl(pd->reg + S6_GMAC_MACCONF2);
- maccfg &= ~(S6_GMAC_MACCONF2_IFMODE_MASK
- << S6_GMAC_MACCONF2_IFMODE);
- if (phydev->speed == 1000) {
- fifocfg |= 1 << S6_GMAC_FIFOCONF5_CFGBYTM;
- maccfg |= S6_GMAC_MACCONF2_IFMODE_BYTE
- << S6_GMAC_MACCONF2_IFMODE;
- } else {
- fifocfg &= ~(1 << S6_GMAC_FIFOCONF5_CFGBYTM);
- maccfg |= S6_GMAC_MACCONF2_IFMODE_NIBBLE
- << S6_GMAC_MACCONF2_IFMODE;
- }
- writel(fifocfg, pd->reg + S6_GMAC_FIFOCONF5);
- writel(maccfg, pd->reg + S6_GMAC_MACCONF2);
- }
-
- if (!s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
- netif_wake_queue(dev);
- s6gmac_linkisup(dev, 1);
- }
-}
-
-static inline int s6gmac_phy_start(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- int i = 0;
- struct phy_device *p = NULL;
- while ((i < PHY_MAX_ADDR) && (!(p = pd->mii.bus->phy_map[i])))
- i++;
- p = phy_connect(dev, dev_name(&p->dev), &s6gmac_adjust_link,
- PHY_INTERFACE_MODE_RGMII);
- if (IS_ERR(p)) {
- printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name);
- return PTR_ERR(p);
- }
- p->supported &= PHY_GBIT_FEATURES;
- p->advertising = p->supported;
- pd->phydev = p;
- return 0;
-}
-
-static inline void s6gmac_init_stats(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- u32 mask;
- mask = 1 << S6_GMAC_STATCARRY1_RDRP |
- 1 << S6_GMAC_STATCARRY1_RJBR |
- 1 << S6_GMAC_STATCARRY1_RFRG |
- 1 << S6_GMAC_STATCARRY1_ROVR |
- 1 << S6_GMAC_STATCARRY1_RUND |
- 1 << S6_GMAC_STATCARRY1_RCDE |
- 1 << S6_GMAC_STATCARRY1_RFLR |
- 1 << S6_GMAC_STATCARRY1_RALN |
- 1 << S6_GMAC_STATCARRY1_RMCA |
- 1 << S6_GMAC_STATCARRY1_RFCS |
- 1 << S6_GMAC_STATCARRY1_RPKT |
- 1 << S6_GMAC_STATCARRY1_RBYT;
- writel(mask, pd->reg + S6_GMAC_STATCARRY(0));
- writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(0));
- mask = 1 << S6_GMAC_STATCARRY2_TDRP |
- 1 << S6_GMAC_STATCARRY2_TNCL |
- 1 << S6_GMAC_STATCARRY2_TXCL |
- 1 << S6_GMAC_STATCARRY2_TEDF |
- 1 << S6_GMAC_STATCARRY2_TPKT |
- 1 << S6_GMAC_STATCARRY2_TBYT |
- 1 << S6_GMAC_STATCARRY2_TFRG |
- 1 << S6_GMAC_STATCARRY2_TUND |
- 1 << S6_GMAC_STATCARRY2_TOVR |
- 1 << S6_GMAC_STATCARRY2_TFCS |
- 1 << S6_GMAC_STATCARRY2_TJBR;
- writel(mask, pd->reg + S6_GMAC_STATCARRY(1));
- writel(~mask, pd->reg + S6_GMAC_STATCARRYMSK(1));
-}
-
-static inline void s6gmac_init_dmac(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- s6dmac_disable_chan(pd->tx_dma, pd->tx_chan);
- s6dmac_disable_chan(pd->rx_dma, pd->rx_chan);
- s6dmac_disable_error_irqs(pd->tx_dma, 1 << S6_HIFDMA_GMACTX);
- s6dmac_disable_error_irqs(pd->rx_dma, 1 << S6_HIFDMA_GMACRX);
-}
-
-static int s6gmac_tx(struct sk_buff *skb, struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- unsigned long flags;
-
- spin_lock_irqsave(&pd->lock, flags);
- writel(skb->len << S6_GMAC_BURST_PREWR_LEN |
- 0 << S6_GMAC_BURST_PREWR_CFE |
- 1 << S6_GMAC_BURST_PREWR_PPE |
- 1 << S6_GMAC_BURST_PREWR_FCS |
- ((skb->len < ETH_ZLEN) ? 1 : 0) << S6_GMAC_BURST_PREWR_PAD,
- pd->reg + S6_GMAC_BURST_PREWR);
- s6dmac_put_fifo_cache(pd->tx_dma, pd->tx_chan,
- (u32)skb->data, pd->io, skb->len);
- if (s6dmac_fifo_full(pd->tx_dma, pd->tx_chan))
- netif_stop_queue(dev);
- if (((u8)(pd->tx_skb_i - pd->tx_skb_o)) >= S6_NUM_TX_SKB) {
- printk(KERN_ERR "GMAC BUG: skb tx ring overflow [%x, %x]\n",
- pd->tx_skb_o, pd->tx_skb_i);
- BUG();
- }
- pd->tx_skb[(pd->tx_skb_i++) % S6_NUM_TX_SKB] = skb;
- spin_unlock_irqrestore(&pd->lock, flags);
- return 0;
-}
-
-static void s6gmac_tx_timeout(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- unsigned long flags;
- spin_lock_irqsave(&pd->lock, flags);
- s6gmac_tx_interrupt(dev);
- spin_unlock_irqrestore(&pd->lock, flags);
-}
-
-static int s6gmac_open(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- unsigned long flags;
- phy_read_status(pd->phydev);
- spin_lock_irqsave(&pd->lock, flags);
- pd->link.mbit = 0;
- s6gmac_linkisup(dev, pd->phydev->link);
- s6gmac_init_device(dev);
- s6gmac_init_stats(dev);
- s6gmac_init_dmac(dev);
- s6gmac_rx_fillfifo(dev);
- s6dmac_enable_chan(pd->rx_dma, pd->rx_chan,
- 2, 1, 0, 1, 0, 0, 0, 7, -1, 2, 0, 1);
- s6dmac_enable_chan(pd->tx_dma, pd->tx_chan,
- 2, 0, 1, 0, 0, 0, 0, 7, -1, 2, 0, 1);
- writel(0 << S6_GMAC_HOST_INT_TXBURSTOVER |
- 0 << S6_GMAC_HOST_INT_TXPREWOVER |
- 0 << S6_GMAC_HOST_INT_RXBURSTUNDER |
- 0 << S6_GMAC_HOST_INT_RXPOSTRFULL |
- 0 << S6_GMAC_HOST_INT_RXPOSTRUNDER,
- pd->reg + S6_GMAC_HOST_INTMASK);
- spin_unlock_irqrestore(&pd->lock, flags);
- phy_start(pd->phydev);
- netif_start_queue(dev);
- return 0;
-}
-
-static int s6gmac_stop(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- unsigned long flags;
- netif_stop_queue(dev);
- phy_stop(pd->phydev);
- spin_lock_irqsave(&pd->lock, flags);
- s6gmac_init_dmac(dev);
- s6gmac_stop_device(dev);
- while (pd->tx_skb_i != pd->tx_skb_o)
- dev_kfree_skb(pd->tx_skb[(pd->tx_skb_o++) % S6_NUM_TX_SKB]);
- while (pd->rx_skb_i != pd->rx_skb_o)
- dev_kfree_skb(pd->rx_skb[(pd->rx_skb_o++) % S6_NUM_RX_SKB]);
- spin_unlock_irqrestore(&pd->lock, flags);
- return 0;
-}
-
-static struct net_device_stats *s6gmac_stats(struct net_device *dev)
-{
- struct s6gmac *pd = netdev_priv(dev);
- struct net_device_stats *st = (struct net_device_stats *)&pd->stats;
- int i;
- do {
- unsigned long flags;
- spin_lock_irqsave(&pd->lock, flags);
- for (i = 0; i < ARRAY_SIZE(pd->stats); i++)
- pd->stats[i] =
- pd->carry[i] << (S6_GMAC_STAT_SIZE_MIN - 1);
- s6gmac_stats_collect(pd, &statinf[0][0]);
- s6gmac_stats_collect(pd, &statinf[1][0]);
- i = s6gmac_stats_pending(pd, 0) |
- s6gmac_stats_pending(pd, 1);
- spin_unlock_irqrestore(&pd->lock, flags);
- } while (i);
- st->rx_errors = st->rx_crc_errors +
- st->rx_frame_errors +
- st->rx_length_errors +
- st->rx_missed_errors;
- st->tx_errors += st->tx_aborted_errors;
- return st;
-}
-
-static int s6gmac_probe(struct platform_device *pdev)
-{
- struct net_device *dev;
- struct s6gmac *pd;
- int res;
- unsigned long i;
- struct mii_bus *mb;
-
- dev = alloc_etherdev(sizeof(*pd));
- if (!dev)
- return -ENOMEM;
-
- dev->open = s6gmac_open;
- dev->stop = s6gmac_stop;
- dev->hard_start_xmit = s6gmac_tx;
- dev->tx_timeout = s6gmac_tx_timeout;
- dev->watchdog_timeo = HZ;
- dev->get_stats = s6gmac_stats;
- dev->irq = platform_get_irq(pdev, 0);
- pd = netdev_priv(dev);
- memset(pd, 0, sizeof(*pd));
- spin_lock_init(&pd->lock);
- pd->reg = platform_get_resource(pdev, IORESOURCE_MEM, 0)->start;
- i = platform_get_resource(pdev, IORESOURCE_DMA, 0)->start;
- pd->tx_dma = DMA_MASK_DMAC(i);
- pd->tx_chan = DMA_INDEX_CHNL(i);
- i = platform_get_resource(pdev, IORESOURCE_DMA, 1)->start;
- pd->rx_dma = DMA_MASK_DMAC(i);
- pd->rx_chan = DMA_INDEX_CHNL(i);
- pd->io = platform_get_resource(pdev, IORESOURCE_IO, 0)->start;
- res = request_irq(dev->irq, s6gmac_interrupt, 0, dev->name, dev);
- if (res) {
- printk(KERN_ERR DRV_PRMT "irq request failed: %d\n", dev->irq);
- goto errirq;
- }
- res = register_netdev(dev);
- if (res) {
- printk(KERN_ERR DRV_PRMT "error registering device %s\n",
- dev->name);
- goto errdev;
- }
- mb = mdiobus_alloc();
- if (!mb) {
- printk(KERN_ERR DRV_PRMT "error allocating mii bus\n");
- res = -ENOMEM;
- goto errmii;
- }
- mb->name = "s6gmac_mii";
- mb->read = s6mii_read;
- mb->write = s6mii_write;
- mb->reset = s6mii_reset;
- mb->priv = pd;
- snprintf(mb->id, MII_BUS_ID_SIZE, "%s-%x", pdev->name, pdev->id);
- mb->phy_mask = ~(1 << 0);
- mb->irq = &pd->mii.irq[0];
- for (i = 0; i < PHY_MAX_ADDR; i++) {
- int n = platform_get_irq(pdev, i + 1);
- if (n < 0)
- n = PHY_POLL;
- pd->mii.irq[i] = n;
- }
- mdiobus_register(mb);
- pd->mii.bus = mb;
- res = s6gmac_phy_start(dev);
- if (res)
- return res;
- platform_set_drvdata(pdev, dev);
- return 0;
-errmii:
- unregister_netdev(dev);
-errdev:
- free_irq(dev->irq, dev);
-errirq:
- free_netdev(dev);
- return res;
-}
-
-static int s6gmac_remove(struct platform_device *pdev)
-{
- struct net_device *dev = platform_get_drvdata(pdev);
- if (dev) {
- struct s6gmac *pd = netdev_priv(dev);
- mdiobus_unregister(pd->mii.bus);
- unregister_netdev(dev);
- free_irq(dev->irq, dev);
- free_netdev(dev);
- }
- return 0;
-}
-
-static struct platform_driver s6gmac_driver = {
- .probe = s6gmac_probe,
- .remove = s6gmac_remove,
- .driver = {
- .name = "s6gmac",
- },
-};
-
-module_platform_driver(s6gmac_driver);
-
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("S6105 on chip Ethernet driver");
-MODULE_AUTHOR("Oskar Schirmer <oskar@scara.com>");
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 698494481d18..b1a271853d85 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -474,13 +474,19 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
/* allocate memory for RX skbuff array */
rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize,
sizeof(dma_addr_t), GFP_KERNEL);
- if (rx_ring->rx_skbuff_dma == NULL)
- goto dmamem_err;
+ if (!rx_ring->rx_skbuff_dma) {
+ dma_free_coherent(priv->device,
+ rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
+ rx_ring->dma_rx, rx_ring->dma_rx_phy);
+ goto error;
+ }
rx_ring->rx_skbuff = kmalloc_array(rx_rsize,
sizeof(struct sk_buff *), GFP_KERNEL);
- if (rx_ring->rx_skbuff == NULL)
- goto rxbuff_err;
+ if (!rx_ring->rx_skbuff) {
+ kfree(rx_ring->rx_skbuff_dma);
+ goto error;
+ }
/* initialise the buffers */
for (desc_index = 0; desc_index < rx_rsize; desc_index++) {
@@ -502,13 +508,6 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no,
err_init_rx_buffers:
while (--desc_index >= 0)
free_rx_ring(priv->device, rx_ring, desc_index);
- kfree(rx_ring->rx_skbuff);
-rxbuff_err:
- kfree(rx_ring->rx_skbuff_dma);
-dmamem_err:
- dma_free_coherent(priv->device,
- rx_rsize * sizeof(struct sxgbe_rx_norm_desc),
- rx_ring->dma_rx, rx_ring->dma_rx_phy);
error:
return -ENOMEM;
}
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
index 866560ea9e18..b02eed12bfc5 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c
@@ -108,10 +108,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
}
}
- /* Get MAC address if available (DT) */
- if (mac)
- ether_addr_copy(priv->dev->dev_addr, mac);
-
priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr);
if (!priv) {
pr_err("%s: main driver probe failed\n", __func__);
@@ -125,6 +121,10 @@ static int sxgbe_platform_probe(struct platform_device *pdev)
goto err_drv_remove;
}
+ /* Get MAC address if available (DT) */
+ if (mac)
+ ether_addr_copy(priv->dev->dev_addr, mac);
+
/* Get the TX/RX IRQ numbers */
for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) {
priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 118a427d1942..cf62ff4c8c56 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -1671,7 +1671,7 @@ static void stmmac_init_tx_coalesce(struct stmmac_priv *priv)
* 0 on success and an appropriate (-)ve integer as defined in errno.h
* file on failure.
*/
-static int stmmac_hw_setup(struct net_device *dev)
+static int stmmac_hw_setup(struct net_device *dev, bool init_ptp)
{
struct stmmac_priv *priv = netdev_priv(dev);
int ret;
@@ -1708,9 +1708,11 @@ static int stmmac_hw_setup(struct net_device *dev)
stmmac_mmc_setup(priv);
- ret = stmmac_init_ptp(priv);
- if (ret && ret != -EOPNOTSUPP)
- pr_warn("%s: failed PTP initialisation\n", __func__);
+ if (init_ptp) {
+ ret = stmmac_init_ptp(priv);
+ if (ret && ret != -EOPNOTSUPP)
+ pr_warn("%s: failed PTP initialisation\n", __func__);
+ }
#ifdef CONFIG_DEBUG_FS
ret = stmmac_init_fs(dev);
@@ -1787,7 +1789,7 @@ static int stmmac_open(struct net_device *dev)
goto init_error;
}
- ret = stmmac_hw_setup(dev);
+ ret = stmmac_hw_setup(dev, true);
if (ret < 0) {
pr_err("%s: Hw setup failed\n", __func__);
goto init_error;
@@ -2776,6 +2778,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
* @addr: iobase memory address
* Description: this is the main probe function used to
* call the alloc_etherdev, allocate the priv structure.
+ * Return:
+ * on success the new private structure is returned, otherwise the error
+ * pointer.
*/
struct stmmac_priv *stmmac_dvr_probe(struct device *device,
struct plat_stmmacenet_data *plat_dat,
@@ -2787,7 +2792,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
ndev = alloc_etherdev(sizeof(struct stmmac_priv));
if (!ndev)
- return NULL;
+ return ERR_PTR(-ENOMEM);
SET_NETDEV_DEV(ndev, device);
@@ -3036,7 +3041,7 @@ int stmmac_resume(struct net_device *ndev)
netif_device_attach(ndev);
init_dma_desc_rings(ndev, GFP_ATOMIC);
- stmmac_hw_setup(ndev);
+ stmmac_hw_setup(ndev, false);
stmmac_init_tx_coalesce(priv);
napi_enable(&priv->napi);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 4032b170fe24..3039de2465ba 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -430,7 +430,6 @@ static struct platform_driver stmmac_pltfr_driver = {
.remove = stmmac_pltfr_remove,
.driver = {
.name = STMMAC_RESOURCE_NAME,
- .owner = THIS_MODULE,
.pm = &stmmac_pltfr_pm_ops,
.of_match_table = of_match_ptr(stmmac_dt_ids),
},
diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c
index 45c408ef67d0..d2835bf7b4fb 100644
--- a/drivers/net/ethernet/sun/sunvnet.c
+++ b/drivers/net/ethernet/sun/sunvnet.c
@@ -1201,6 +1201,7 @@ static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb)
segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
if (IS_ERR(segs)) {
dev->stats.tx_dropped++;
+ dev_kfree_skb_any(skb);
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index c560f9aeb55d..a39131f494ec 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -610,7 +610,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
/* Clear all mcast from ALE */
cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS <<
- priv->host_port);
+ priv->host_port, -1);
/* Flood All Unicast Packets to Host port */
cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
@@ -634,6 +634,12 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
{
struct cpsw_priv *priv = netdev_priv(ndev);
+ int vid;
+
+ if (priv->data.dual_emac)
+ vid = priv->slaves[priv->emac_port].port_vlan;
+ else
+ vid = priv->data.default_vlan;
if (ndev->flags & IFF_PROMISC) {
/* Enable promiscuous mode */
@@ -649,7 +655,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
/* Clear all mcast from ALE */
- cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
+ cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port,
+ vid);
if (!netdev_mc_empty(ndev)) {
struct netdev_hw_addr *ha;
@@ -757,6 +764,14 @@ requeue:
static irqreturn_t cpsw_interrupt(int irq, void *dev_id)
{
struct cpsw_priv *priv = dev_id;
+ int value = irq - priv->irqs_table[0];
+
+ /* NOTICE: Ending IRQ here. The trick with the 'value' variable above
+ * is to make sure we will always write the correct value to the EOI
+ * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2
+ * for TX Interrupt and 3 for MISC Interrupt.
+ */
+ cpdma_ctlr_eoi(priv->dma, value);
cpsw_intr_disable(priv);
if (priv->irq_enabled == true) {
@@ -786,8 +801,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
int num_tx, num_rx;
num_tx = cpdma_chan_process(priv->txch, 128);
- if (num_tx)
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
num_rx = cpdma_chan_process(priv->rxch, budget);
if (num_rx < budget) {
@@ -795,7 +808,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
prim_cpsw = cpsw_get_slave_priv(priv, 0);
if (prim_cpsw->irq_enabled == false) {
prim_cpsw->irq_enabled = true;
@@ -1310,8 +1322,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
napi_enable(&priv->napi);
cpdma_ctlr_start(priv->dma);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
prim_cpsw = cpsw_get_slave_priv(priv, 0);
if (prim_cpsw->irq_enabled == false) {
@@ -1578,9 +1588,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev)
cpdma_chan_start(priv->txch);
cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
-
}
static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p)
@@ -1620,9 +1627,6 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev)
cpsw_interrupt(ndev->irq, priv);
cpdma_ctlr_int_ctrl(priv->dma, true);
cpsw_intr_enable(priv);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX);
- cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX);
-
}
#endif
@@ -1630,16 +1634,24 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
unsigned short vid)
{
int ret;
- int unreg_mcast_mask;
+ int unreg_mcast_mask = 0;
+ u32 port_mask;
- if (priv->ndev->flags & IFF_ALLMULTI)
- unreg_mcast_mask = ALE_ALL_PORTS;
- else
- unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
+ if (priv->data.dual_emac) {
+ port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST;
+
+ if (priv->ndev->flags & IFF_ALLMULTI)
+ unreg_mcast_mask = port_mask;
+ } else {
+ port_mask = ALE_ALL_PORTS;
- ret = cpsw_ale_add_vlan(priv->ale, vid,
- ALE_ALL_PORTS << priv->host_port,
- 0, ALE_ALL_PORTS << priv->host_port,
+ if (priv->ndev->flags & IFF_ALLMULTI)
+ unreg_mcast_mask = ALE_ALL_PORTS;
+ else
+ unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
+ }
+
+ ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask,
unreg_mcast_mask << priv->host_port);
if (ret != 0)
return ret;
@@ -1650,8 +1662,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
goto clean_vid;
ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast,
- ALE_ALL_PORTS << priv->host_port,
- ALE_VLAN, vid, 0);
+ port_mask, ALE_VLAN, vid, 0);
if (ret != 0)
goto clean_vlan_ucast;
return 0;
@@ -1672,6 +1683,19 @@ static int cpsw_ndo_vlan_rx_add_vid(struct net_device *ndev,
if (vid == priv->data.default_vlan)
return 0;
+ if (priv->data.dual_emac) {
+ /* In dual EMAC, reserved VLAN id should not be used for
+ * creating VLAN interfaces as this can break the dual
+ * EMAC port separation
+ */
+ int i;
+
+ for (i = 0; i < priv->data.slaves; i++) {
+ if (vid == priv->slaves[i].port_vlan)
+ return -EINVAL;
+ }
+ }
+
dev_info(priv->dev, "Adding vlanid %d to vlan filter\n", vid);
return cpsw_add_vlan_ale_entry(priv, vid);
}
@@ -1685,6 +1709,15 @@ static int cpsw_ndo_vlan_rx_kill_vid(struct net_device *ndev,
if (vid == priv->data.default_vlan)
return 0;
+ if (priv->data.dual_emac) {
+ int i;
+
+ for (i = 0; i < priv->data.slaves; i++) {
+ if (vid == priv->slaves[i].port_vlan)
+ return -EINVAL;
+ }
+ }
+
dev_info(priv->dev, "removing vlanid %d from vlan filter\n", vid);
ret = cpsw_ale_del_vlan(priv->ale, vid, 0);
if (ret != 0)
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 097ebe7077ac..5246b3a18ff8 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -234,7 +234,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry,
cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE);
}
-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid)
{
u32 ale_entry[ALE_ENTRY_WORDS];
int ret, idx;
@@ -245,6 +245,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask)
if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR)
continue;
+ /* if vid passed is -1 then remove all multicast entry from
+ * the table irrespective of vlan id, if a valid vlan id is
+ * passed then remove only multicast added to that vlan id.
+ * if vlan id doesn't match then move on to next entry.
+ */
+ if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid)
+ continue;
+
if (cpsw_ale_get_mcast(ale_entry)) {
u8 addr[6];
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index c0d4127aa549..af1e7ecd87c6 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -92,7 +92,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale);
int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout);
int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask);
-int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask);
+int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid);
int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port,
int flags, u16 vid);
int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port,
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index ea712512c7d1..5fae4354722c 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -62,6 +62,7 @@
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/of_device.h>
+#include <linux/of_mdio.h>
#include <linux/of_irq.h>
#include <linux/of_net.h>
@@ -343,9 +344,7 @@ struct emac_priv {
u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS];
u32 rx_addr_type;
const char *phy_id;
-#ifdef CONFIG_OF
struct device_node *phy_node;
-#endif
struct phy_device *phydev;
spinlock_t lock;
/*platform specific members*/
@@ -922,6 +921,16 @@ static void emac_int_disable(struct emac_priv *priv)
if (priv->int_disable)
priv->int_disable();
+ /* NOTE: Rx Threshold and Misc interrupts are not enabled */
+
+ /* ack rxen only then a new pulse will be generated */
+ emac_write(EMAC_DM646X_MACEOIVECTOR,
+ EMAC_DM646X_MAC_EOI_C0_RXEN);
+
+ /* ack txen- only then a new pulse will be generated */
+ emac_write(EMAC_DM646X_MACEOIVECTOR,
+ EMAC_DM646X_MAC_EOI_C0_TXEN);
+
local_irq_restore(flags);
} else {
@@ -951,15 +960,6 @@ static void emac_int_enable(struct emac_priv *priv)
* register */
/* NOTE: Rx Threshold and Misc interrupts are not enabled */
-
- /* ack rxen only then a new pulse will be generated */
- emac_write(EMAC_DM646X_MACEOIVECTOR,
- EMAC_DM646X_MAC_EOI_C0_RXEN);
-
- /* ack txen- only then a new pulse will be generated */
- emac_write(EMAC_DM646X_MACEOIVECTOR,
- EMAC_DM646X_MAC_EOI_C0_TXEN);
-
} else {
/* Set DM644x control registers for interrupt control */
emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1);
@@ -1537,7 +1537,13 @@ static int emac_dev_open(struct net_device *ndev)
int i = 0;
struct emac_priv *priv = netdev_priv(ndev);
- pm_runtime_get(&priv->pdev->dev);
+ ret = pm_runtime_get_sync(&priv->pdev->dev);
+ if (ret < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
+ __func__, ret);
+ return ret;
+ }
netif_carrier_off(ndev);
for (cnt = 0; cnt < ETH_ALEN; cnt++)
@@ -1596,8 +1602,20 @@ static int emac_dev_open(struct net_device *ndev)
cpdma_ctlr_start(priv->dma);
priv->phydev = NULL;
+
+ if (priv->phy_node) {
+ priv->phydev = of_phy_connect(ndev, priv->phy_node,
+ &emac_adjust_link, 0, 0);
+ if (!priv->phydev) {
+ dev_err(emac_dev, "could not connect to phy %s\n",
+ priv->phy_node->full_name);
+ ret = -ENODEV;
+ goto err;
+ }
+ }
+
/* use the first phy on the bus if pdata did not give us a phy id */
- if (!priv->phy_id) {
+ if (!priv->phydev && !priv->phy_id) {
struct device *phy;
phy = bus_find_device(&mdio_bus_type, NULL, NULL,
@@ -1606,7 +1624,7 @@ static int emac_dev_open(struct net_device *ndev)
priv->phy_id = dev_name(phy);
}
- if (priv->phy_id && *priv->phy_id) {
+ if (!priv->phydev && priv->phy_id && *priv->phy_id) {
priv->phydev = phy_connect(ndev, priv->phy_id,
&emac_adjust_link,
PHY_INTERFACE_MODE_MII);
@@ -1627,7 +1645,9 @@ static int emac_dev_open(struct net_device *ndev)
"(mii_bus:phy_addr=%s, id=%x)\n",
priv->phydev->drv->name, dev_name(&priv->phydev->dev),
priv->phydev->phy_id);
- } else {
+ }
+
+ if (!priv->phydev) {
/* No PHY , fix the link, speed and duplex settings */
dev_notice(emac_dev, "no phy, defaulting to 100/full\n");
priv->link = 1;
@@ -1724,6 +1744,15 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
struct emac_priv *priv = netdev_priv(ndev);
u32 mac_control;
u32 stats_clear_mask;
+ int err;
+
+ err = pm_runtime_get_sync(&priv->pdev->dev);
+ if (err < 0) {
+ pm_runtime_put_noidle(&priv->pdev->dev);
+ dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n",
+ __func__, err);
+ return &ndev->stats;
+ }
/* update emac hardware stats and reset the registers*/
@@ -1766,6 +1795,8 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev)
ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN);
emac_write(EMAC_TXUNDERRUN, stats_clear_mask);
+ pm_runtime_put(&priv->pdev->dev);
+
return &ndev->stats;
}
@@ -1859,7 +1890,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
static int davinci_emac_probe(struct platform_device *pdev)
{
int rc = 0;
- struct resource *res;
+ struct resource *res, *res_ctrl;
struct net_device *ndev;
struct emac_priv *priv;
unsigned long hw_ram_addr;
@@ -1876,6 +1907,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
return -EBUSY;
}
emac_bus_frequency = clk_get_rate(emac_clk);
+ devm_clk_put(&pdev->dev, emac_clk);
/* TODO: Probe PHY here if possible */
@@ -1917,11 +1949,20 @@ static int davinci_emac_probe(struct platform_device *pdev)
rc = PTR_ERR(priv->remap_addr);
goto no_pdata;
}
+
+ res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res_ctrl) {
+ priv->ctrl_base =
+ devm_ioremap_resource(&pdev->dev, res_ctrl);
+ if (IS_ERR(priv->ctrl_base))
+ goto no_pdata;
+ } else {
+ priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
+ }
+
priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset;
ndev->base_addr = (unsigned long)priv->remap_addr;
- priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset;
-
hw_ram_addr = pdata->hw_ram_addr;
if (!hw_ram_addr)
hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset;
@@ -1980,12 +2021,22 @@ static int davinci_emac_probe(struct platform_device *pdev)
ndev->ethtool_ops = &ethtool_ops;
netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT);
+ pm_runtime_enable(&pdev->dev);
+ rc = pm_runtime_get_sync(&pdev->dev);
+ if (rc < 0) {
+ pm_runtime_put_noidle(&pdev->dev);
+ dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n",
+ __func__, rc);
+ goto no_cpdma_chan;
+ }
+
/* register the network device */
SET_NETDEV_DEV(ndev, &pdev->dev);
rc = register_netdev(ndev);
if (rc) {
dev_err(&pdev->dev, "error in register_netdev\n");
rc = -ENODEV;
+ pm_runtime_put(&pdev->dev);
goto no_cpdma_chan;
}
@@ -1995,9 +2046,7 @@ static int davinci_emac_probe(struct platform_device *pdev)
"(regs: %p, irq: %d)\n",
(void *)priv->emac_base_phys, ndev->irq);
}
-
- pm_runtime_enable(&pdev->dev);
- pm_runtime_resume(&pdev->dev);
+ pm_runtime_put(&pdev->dev);
return 0;
@@ -2071,9 +2120,14 @@ static const struct emac_platform_data am3517_emac_data = {
.hw_ram_addr = 0x01e20000,
};
+static const struct emac_platform_data dm816_emac_data = {
+ .version = EMAC_VERSION_2,
+};
+
static const struct of_device_id davinci_emac_of_match[] = {
{.compatible = "ti,davinci-dm6467-emac", },
{.compatible = "ti,am3517-emac", .data = &am3517_emac_data, },
+ {.compatible = "ti,dm816-emac", .data = &dm816_emac_data, },
{},
};
MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 9c2d91ea0af4..dbcbf0c5bcfa 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1043,6 +1043,7 @@ static int temac_of_probe(struct platform_device *op)
lp->regs = of_iomap(op->dev.of_node, 0);
if (!lp->regs) {
dev_err(&op->dev, "could not map temac regs.\n");
+ rc = -ENOMEM;
goto nodev;
}
@@ -1062,6 +1063,7 @@ static int temac_of_probe(struct platform_device *op)
np = of_parse_phandle(op->dev.of_node, "llink-connected", 0);
if (!np) {
dev_err(&op->dev, "could not find DMA node\n");
+ rc = -ENODEV;
goto err_iounmap;
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet.h b/drivers/net/ethernet/xilinx/xilinx_axienet.h
index 44b8d2bad8c3..4c9b4fa1d3c1 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet.h
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet.h
@@ -388,7 +388,6 @@ struct axidma_bd {
* @dma_err_tasklet: Tasklet structure to process Axi DMA errors
* @tx_irq: Axidma TX IRQ number
* @rx_irq: Axidma RX IRQ number
- * @temac_type: axienet type to identify between soft and hard temac
* @phy_type: Phy type to identify between MII/GMII/RGMII/SGMII/1000 Base-X
* @options: AxiEthernet option word
* @last_link: Phy link state in which the PHY was negotiated earlier
@@ -431,7 +430,6 @@ struct axienet_local {
int tx_irq;
int rx_irq;
- u32 temac_type;
u32 phy_type;
u32 options; /* Current options word */
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index 4ea2d4e6f1d1..a6d2860b712c 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1501,6 +1501,7 @@ static int axienet_of_probe(struct platform_device *op)
lp->regs = of_iomap(op->dev.of_node, 0);
if (!lp->regs) {
dev_err(&op->dev, "could not map Axi Ethernet regs.\n");
+ ret = -ENOMEM;
goto nodev;
}
/* Setup checksum offload, but default to off if not specified */
@@ -1555,10 +1556,6 @@ static int axienet_of_probe(struct platform_device *op)
if ((be32_to_cpup(p)) >= 0x4000)
lp->jumbo_support = 1;
}
- p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,temac-type",
- NULL);
- if (p)
- lp->temac_type = be32_to_cpup(p);
p = (__be32 *) of_get_property(op->dev.of_node, "xlnx,phy-type", NULL);
if (p)
lp->phy_type = be32_to_cpup(p);
@@ -1567,6 +1564,7 @@ static int axienet_of_probe(struct platform_device *op)
np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0);
if (!np) {
dev_err(&op->dev, "could not find DMA node\n");
+ ret = -ENODEV;
goto err_iounmap;
}
lp->dma_regs = of_iomap(np, 0);
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 24858799c204..9d4ce388510a 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1109,6 +1109,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev)
res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(dev, "no IRQ found\n");
+ rc = -ENXIO;
goto error;
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 2f48f790c9b4..384ca4f4de4a 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -590,6 +590,7 @@ struct nvsp_message {
#define NETVSC_RECEIVE_BUFFER_ID 0xcafe
+#define NETVSC_SEND_BUFFER_ID 0
#define NETVSC_PACKET_SIZE 4096
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index dd867e6cabd6..9f49c0129a78 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -161,8 +161,8 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
/* Deal with the send buffer we may have setup.
* If we got a send section size, it means we received a
- * SendsendBufferComplete msg (ie sent
- * NvspMessage1TypeSendReceiveBuffer msg) therefore, we need
+ * NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE msg (ie sent
+ * NVSP_MSG1_TYPE_SEND_SEND_BUF msg) therefore, we need
* to send a revoke msg here
*/
if (net_device->send_section_size) {
@@ -172,7 +172,8 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
revoke_packet->hdr.msg_type =
NVSP_MSG1_TYPE_REVOKE_SEND_BUF;
- revoke_packet->msg.v1_msg.revoke_recv_buf.id = 0;
+ revoke_packet->msg.v1_msg.revoke_send_buf.id =
+ NETVSC_SEND_BUFFER_ID;
ret = vmbus_sendpacket(net_device->dev->channel,
revoke_packet,
@@ -204,7 +205,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
net_device->send_buf_gpadl_handle = 0;
}
if (net_device->send_buf) {
- /* Free up the receive buffer */
+ /* Free up the send buffer */
vfree(net_device->send_buf);
net_device->send_buf = NULL;
}
@@ -339,9 +340,9 @@ static int netvsc_init_buf(struct hv_device *device)
init_packet = &net_device->channel_init_pkt;
memset(init_packet, 0, sizeof(struct nvsp_message));
init_packet->hdr.msg_type = NVSP_MSG1_TYPE_SEND_SEND_BUF;
- init_packet->msg.v1_msg.send_recv_buf.gpadl_handle =
+ init_packet->msg.v1_msg.send_send_buf.gpadl_handle =
net_device->send_buf_gpadl_handle;
- init_packet->msg.v1_msg.send_recv_buf.id = 0;
+ init_packet->msg.v1_msg.send_send_buf.id = NETVSC_SEND_BUFFER_ID;
/* Send the gpadl notification request */
ret = vmbus_sendpacket(device->channel, init_packet,
@@ -364,7 +365,7 @@ static int netvsc_init_buf(struct hv_device *device)
netdev_err(ndev, "Unable to complete send buffer "
"initialization with NetVsp - status %d\n",
init_packet->msg.v1_msg.
- send_recv_buf_complete.status);
+ send_send_buf_complete.status);
ret = -EINVAL;
goto cleanup;
}
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index a14d87783245..2e195289ddf4 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -377,9 +377,11 @@ static int ipvlan_process_v6_outbound(struct sk_buff *skb)
};
dst = ip6_route_output(dev_net(dev), NULL, &fl6);
- if (IS_ERR(dst))
+ if (dst->error) {
+ ret = dst->error;
+ dst_release(dst);
goto err;
-
+ }
skb_dst_drop(skb);
skb_dst_set(skb, dst);
err = ip6_local_out(skb);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index c530de1e63f5..3ad8ca76196d 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -88,6 +88,7 @@ struct kszphy_priv {
static const struct kszphy_type ksz8021_type = {
.led_mode_reg = MII_KSZPHY_CTRL_2,
+ .has_broadcast_disable = true,
.has_rmii_ref_clk_sel = true,
};
@@ -258,19 +259,6 @@ static int kszphy_config_init(struct phy_device *phydev)
return 0;
}
-static int ksz8021_config_init(struct phy_device *phydev)
-{
- int rc;
-
- rc = kszphy_config_init(phydev);
- if (rc)
- return rc;
-
- rc = kszphy_broadcast_disable(phydev);
-
- return rc < 0 ? rc : 0;
-}
-
static int ksz9021_load_values_from_of(struct phy_device *phydev,
struct device_node *of_node, u16 reg,
char *field1, char *field2,
@@ -584,7 +572,7 @@ static struct phy_driver ksphy_driver[] = {
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
- .config_init = ksz8021_config_init,
+ .config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
@@ -601,7 +589,7 @@ static struct phy_driver ksphy_driver[] = {
.flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
.driver_data = &ksz8021_type,
.probe = kszphy_probe,
- .config_init = ksz8021_config_init,
+ .config_init = kszphy_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = kszphy_ack_interrupt,
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 93e224217e24..f7ff493f1e73 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind)
static void team_notify_peers_work(struct work_struct *work)
{
struct team *team;
+ int val;
team = container_of(work, struct team, notify_peers.dw.work);
@@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work)
schedule_delayed_work(&team->notify_peers.dw, 0);
return;
}
+ val = atomic_dec_if_positive(&team->notify_peers.count_pending);
+ if (val < 0) {
+ rtnl_unlock();
+ return;
+ }
call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
rtnl_unlock();
- if (!atomic_dec_and_test(&team->notify_peers.count_pending))
+ if (val)
schedule_delayed_work(&team->notify_peers.dw,
msecs_to_jiffies(team->notify_peers.interval));
}
@@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team)
static void team_mcast_rejoin_work(struct work_struct *work)
{
struct team *team;
+ int val;
team = container_of(work, struct team, mcast_rejoin.dw.work);
@@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work)
schedule_delayed_work(&team->mcast_rejoin.dw, 0);
return;
}
+ val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
+ if (val < 0) {
+ rtnl_unlock();
+ return;
+ }
call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
rtnl_unlock();
- if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
+ if (val)
schedule_delayed_work(&team->mcast_rejoin.dw,
msecs_to_jiffies(team->mcast_rejoin.interval));
}
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index dcb6d33141e0..1e9cdca37014 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1276,7 +1276,7 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length)
awd.done = 0;
urb->context = &awd;
- status = usb_submit_urb(urb, GFP_NOIO);
+ status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
// something went wrong
usb_free_urb(urb);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index b8a82b86f909..602dc6668c3a 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -56,6 +56,8 @@ struct qmi_wwan_state {
/* default ethernet address used by the modem */
static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3};
+static const u8 buggy_fw_addr[ETH_ALEN] = {0x00, 0xa0, 0xc6, 0x00, 0x00, 0x00};
+
/* Make up an ethernet header if the packet doesn't have one.
*
* A firmware bug common among several devices cause them to send raw
@@ -332,10 +334,12 @@ next_desc:
usb_driver_release_interface(driver, info->data);
}
- /* Never use the same address on both ends of the link, even
- * if the buggy firmware told us to.
+ /* Never use the same address on both ends of the link, even if the
+ * buggy firmware told us to. Or, if device is assigned the well-known
+ * buggy firmware MAC address, replace it with a random address,
*/
- if (ether_addr_equal(dev->net->dev_addr, default_modem_addr))
+ if (ether_addr_equal(dev->net->dev_addr, default_modem_addr) ||
+ ether_addr_equal(dev->net->dev_addr, buggy_fw_addr))
eth_hw_addr_random(dev->net);
/* make MAC addr easily distinguishable from an IP header */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 2d1c77e81836..bf405f134d3a 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -833,9 +833,6 @@ static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data)
index &= ~3;
}
- generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
-
- data |= __le32_to_cpu(tmp) & ~mask;
tmp = __cpu_to_le32(data);
generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
@@ -874,9 +871,6 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data)
index &= ~3;
}
- generic_ocp_read(tp, index, sizeof(tmp), &tmp, type);
-
- data |= __le32_to_cpu(tmp) & ~mask;
tmp = __cpu_to_le32(data);
generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type);
@@ -926,12 +920,6 @@ static void sram_write(struct r8152 *tp, u16 addr, u16 data)
ocp_reg_write(tp, OCP_SRAM_DATA, data);
}
-static u16 sram_read(struct r8152 *tp, u16 addr)
-{
- ocp_reg_write(tp, OCP_SRAM_ADDR, addr);
- return ocp_reg_read(tp, OCP_SRAM_DATA);
-}
-
static int read_mii_word(struct net_device *netdev, int phy_id, int reg)
{
struct r8152 *tp = netdev_priv(netdev);
@@ -1897,6 +1885,22 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev)
netif_wake_queue(netdev);
}
+static netdev_features_t
+rtl8152_features_check(struct sk_buff *skb, struct net_device *dev,
+ netdev_features_t features)
+{
+ u32 mss = skb_shinfo(skb)->gso_size;
+ int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX;
+ int offset = skb_transport_offset(skb);
+
+ if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset)
+ features &= ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
+ else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz)
+ features &= ~NETIF_F_GSO_MASK;
+
+ return features;
+}
+
static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb,
struct net_device *netdev)
{
@@ -2502,24 +2506,18 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
data = ocp_reg_read(tp, OCP_POWER_CFG);
data |= EN_10M_PLLOFF;
ocp_reg_write(tp, OCP_POWER_CFG, data);
- data = sram_read(tp, SRAM_IMPEDANCE);
- data &= ~RX_DRIVING_MASK;
- sram_write(tp, SRAM_IMPEDANCE, data);
+ sram_write(tp, SRAM_IMPEDANCE, 0x0b13);
ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR);
ocp_data |= PFM_PWM_SWITCH;
ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data);
- data = sram_read(tp, SRAM_LPF_CFG);
- data |= LPF_AUTO_TUNE;
- sram_write(tp, SRAM_LPF_CFG, data);
+ /* Enable LPF corner auto tune */
+ sram_write(tp, SRAM_LPF_CFG, 0xf70f);
- data = sram_read(tp, SRAM_10M_AMP1);
- data |= GDAC_IB_UPALL;
- sram_write(tp, SRAM_10M_AMP1, data);
- data = sram_read(tp, SRAM_10M_AMP2);
- data |= AMP_DN;
- sram_write(tp, SRAM_10M_AMP2, data);
+ /* Adjust 10M Amplitude */
+ sram_write(tp, SRAM_10M_AMP1, 0x00af);
+ sram_write(tp, SRAM_10M_AMP2, 0x0208);
set_bit(PHY_RESET, &tp->flags);
}
@@ -3706,6 +3704,7 @@ static const struct net_device_ops rtl8152_netdev_ops = {
.ndo_set_mac_address = rtl8152_set_mac_address,
.ndo_change_mtu = rtl8152_change_mtu,
.ndo_validate_addr = eth_validate_addr,
+ .ndo_features_check = rtl8152_features_check,
};
static void r8152b_get_version(struct r8152 *tp)
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b8bd7191572d..5ca97713bfb3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -760,7 +760,6 @@ static int virtnet_poll(struct napi_struct *napi, int budget)
container_of(napi, struct receive_queue, napi);
unsigned int r, received = 0;
-again:
received += virtnet_receive(rq, budget - received);
/* Out of packets? */
@@ -771,7 +770,6 @@ again:
napi_schedule_prep(napi)) {
virtqueue_disable_cb(rq->vq);
__napi_schedule(napi);
- goto again;
}
}
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 49d9f2291998..7fbd89fbe107 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1579,8 +1579,10 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
bool udp_sum = !udp_get_no_check6_tx(vs->sock->sk);
skb = udp_tunnel_handle_offloads(skb, udp_sum);
- if (IS_ERR(skb))
- return -EINVAL;
+ if (IS_ERR(skb)) {
+ err = -EINVAL;
+ goto err;
+ }
skb_scrub_packet(skb, xnet);
@@ -1590,12 +1592,16 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err))
- return err;
+ if (unlikely(err)) {
+ kfree_skb(skb);
+ goto err;
+ }
skb = vlan_hwaccel_push_inside(skb);
- if (WARN_ON(!skb))
- return -ENOMEM;
+ if (WARN_ON(!skb)) {
+ err = -ENOMEM;
+ goto err;
+ }
vxh = (struct vxlanhdr *) __skb_push(skb, sizeof(*vxh));
vxh->vx_flags = htonl(VXLAN_FLAGS);
@@ -1606,6 +1612,9 @@ static int vxlan6_xmit_skb(struct vxlan_sock *vs,
udp_tunnel6_xmit_skb(vs->sock, dst, skb, dev, saddr, daddr, prio,
ttl, src_port, dst_port);
return 0;
+err:
+ dst_release(dst);
+ return err;
}
#endif
@@ -1621,7 +1630,7 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
skb = udp_tunnel_handle_offloads(skb, udp_sum);
if (IS_ERR(skb))
- return -EINVAL;
+ return PTR_ERR(skb);
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ VXLAN_HLEN + sizeof(struct iphdr)
@@ -1629,8 +1638,10 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
/* Need space for new headers (invalidates iph ptr) */
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err))
+ if (unlikely(err)) {
+ kfree_skb(skb);
return err;
+ }
skb = vlan_hwaccel_push_inside(skb);
if (WARN_ON(!skb))
@@ -1776,9 +1787,12 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
tos, ttl, df, src_port, dst_port,
htonl(vni << 8),
!net_eq(vxlan->net, dev_net(vxlan->dev)));
-
- if (err < 0)
+ if (err < 0) {
+ /* skb is already freed. */
+ skb = NULL;
goto rt_tx_error;
+ }
+
iptunnel_xmit_stats(err, &dev->stats, dev->tstats);
#if IS_ENABLED(CONFIG_IPV6)
} else {
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 9a72640237cb..62b0bf4fdf6b 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -285,6 +285,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
__ath_cancel_work(sc);
+ disable_irq(sc->irq);
tasklet_disable(&sc->intr_tq);
tasklet_disable(&sc->bcon_tasklet);
spin_lock_bh(&sc->sc_pcu_lock);
@@ -331,6 +332,7 @@ static int ath_reset_internal(struct ath_softc *sc, struct ath9k_channel *hchan)
r = -EIO;
out:
+ enable_irq(sc->irq);
spin_unlock_bh(&sc->sc_pcu_lock);
tasklet_enable(&sc->bcon_tasklet);
tasklet_enable(&sc->intr_tq);
@@ -512,9 +514,6 @@ irqreturn_t ath_isr(int irq, void *dev)
if (!ah || test_bit(ATH_OP_INVALID, &common->op_flags))
return IRQ_NONE;
- if (!AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
- return IRQ_NONE;
-
/* shared irq, not for us */
if (!ath9k_hw_intrpend(ah))
return IRQ_NONE;
@@ -529,7 +528,7 @@ irqreturn_t ath_isr(int irq, void *dev)
ath9k_debug_sync_cause(sc, sync_cause);
status &= ah->imask; /* discard unasked-for bits */
- if (AR_SREV_9100(ah) && test_bit(ATH_OP_HW_RESET, &common->op_flags))
+ if (test_bit(ATH_OP_HW_RESET, &common->op_flags))
return IRQ_HANDLED;
/*
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index 3c06e9365949..9880dae2a569 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -1070,7 +1070,7 @@ static int brcmf_ops_sdio_probe(struct sdio_func *func,
*/
if ((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_KEEP_POWER) &&
((sdio_get_host_pm_caps(sdiodev->func[1]) & MMC_PM_WAKE_SDIO_IRQ) ||
- (sdiodev->pdata->oob_irq_supported)))
+ (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)))
bus_if->wowl_supported = true;
#endif
@@ -1167,7 +1167,7 @@ static int brcmf_ops_sdio_resume(struct device *dev)
struct brcmf_sdio_dev *sdiodev = bus_if->bus_priv.sdio;
brcmf_dbg(SDIO, "Enter\n");
- if (sdiodev->pdata->oob_irq_supported)
+ if (sdiodev->pdata && sdiodev->pdata->oob_irq_supported)
disable_irq_wake(sdiodev->pdata->oob_irq_nr);
brcmf_sdio_wd_timer(sdiodev->bus, BRCMF_WD_POLL_MS);
atomic_set(&sdiodev->suspend, false);
diff --git a/drivers/net/wireless/ipw2x00/Kconfig b/drivers/net/wireless/ipw2x00/Kconfig
index 91c0cb3c368e..21de4fe6cf2d 100644
--- a/drivers/net/wireless/ipw2x00/Kconfig
+++ b/drivers/net/wireless/ipw2x00/Kconfig
@@ -65,7 +65,8 @@ config IPW2100_DEBUG
config IPW2200
tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
- depends on PCI && CFG80211 && CFG80211_WEXT
+ depends on PCI && CFG80211
+ select CFG80211_WEXT
select WIRELESS_EXT
select WEXT_SPY
select WEXT_PRIV
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index e5be2d21868f..a5f9198d5747 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -69,8 +69,8 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX 10
-#define IWL3160_UCODE_API_MAX 10
+#define IWL7260_UCODE_API_MAX 12
+#define IWL3160_UCODE_API_MAX 12
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 10
@@ -105,7 +105,7 @@
#define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
#define IWL7265D_FW_PRE "iwlwifi-7265D-"
-#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
+#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_7000 0
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index bf0a95cb7153..3668fc57e770 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -69,7 +69,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL8000_UCODE_API_MAX 10
+#define IWL8000_UCODE_API_MAX 12
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 10
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 38de1513e4de..850b85a47806 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -1323,10 +1323,10 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
try_again:
/* try next, if any */
- kfree(pieces);
release_firmware(ucode_raw);
if (iwl_request_firmware(drv, false))
goto out_unbind;
+ kfree(pieces);
return;
out_free_fw:
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index 9564ae173d06..1f7f15eb86da 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -310,6 +310,7 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
#define FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE (0x01000000)
#define FH_MEM_TFDIB_REG1_ADDR_BITSHIFT 28
+#define FH_MEM_TB_MAX_LENGTH (0x00020000)
/* TFDB Area - TFDs buffer table */
#define FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK (0xFFFFFFFF)
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
index f2a047f6bb3e..660ddb1b7d8a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h
@@ -243,6 +243,10 @@ enum iwl_ucode_tlv_flag {
* @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif.
* @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
* longer than the passive one, which is essential for fragmented scan.
+ * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
+ * regardless of the band or the number of the probes. FW will calculate
+ * the actual dwell time.
+ * @IWL_UCODE_TLV_API_SINGLE_SCAN_EBS: EBS is supported for single scans too.
*/
enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
@@ -253,6 +257,8 @@ enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6),
IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7),
IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8),
+ IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13),
+ IWL_UCODE_TLV_API_SINGLE_SCAN_EBS = BIT(16),
};
/**
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 1f2acf47bfb2..cfc0e65b34a5 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -653,8 +653,11 @@ enum iwl_scan_channel_flags {
};
/* iwl_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
- * @flags: enum iwl_scan_channel_flgs
- * @non_ebs_ratio: how many regular scan iteration before EBS
+ * @flags: enum iwl_scan_channel_flags
+ * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
+ * involved.
+ * 1 - EBS is disabled.
+ * 2 - every second scan will be full scan(and so on).
*/
struct iwl_scan_channel_opt {
__le16 flags;
@@ -672,6 +675,7 @@ struct iwl_scan_channel_opt {
* @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
* @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report
* and DS parameter set IEs into probe requests.
+ * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches
*/
enum iwl_mvm_lmac_scan_flags {
IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0),
@@ -681,6 +685,7 @@ enum iwl_mvm_lmac_scan_flags {
IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4),
IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5),
IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6),
+ IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9),
};
enum iwl_scan_priority {
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 31a5b3f4266c..20915587c820 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -1004,8 +1004,13 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
{
lockdep_assert_held(&mvm->mutex);
- /* disallow low power states when the FW is down */
- iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
+ /*
+ * Disallow low power states when the FW is down by taking
+ * the UCODE_DOWN ref. in case of ongoing hw restart the
+ * ref is already taken, so don't take it again.
+ */
+ if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
+ iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
/* async_handlers_wk is now blocked */
@@ -1023,6 +1028,12 @@ void __iwl_mvm_mac_stop(struct iwl_mvm *mvm)
/* the fw is stopped, the aux sta is dead: clean up driver state */
iwl_mvm_del_aux_sta(mvm);
+ /*
+ * Clear IN_HW_RESTART flag when stopping the hw (as restart_complete()
+ * won't be called in this case).
+ */
+ clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
+
mvm->ucode_loaded = false;
}
@@ -3332,18 +3343,16 @@ static void iwl_mvm_mac_flush(struct ieee80211_hw *hw,
msk |= mvmsta->tfd_queue_msk;
}
- if (drop) {
- if (iwl_mvm_flush_tx_path(mvm, msk, true))
- IWL_ERR(mvm, "flush request fail\n");
- mutex_unlock(&mvm->mutex);
- } else {
- mutex_unlock(&mvm->mutex);
+ msk &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
- /* this can take a while, and we may need/want other operations
- * to succeed while doing this, so do it without the mutex held
- */
- iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
- }
+ if (iwl_mvm_flush_tx_path(mvm, msk, true))
+ IWL_ERR(mvm, "flush request fail\n");
+ mutex_unlock(&mvm->mutex);
+
+ /* this can take a while, and we may need/want other operations
+ * to succeed while doing this, so do it without the mutex held
+ */
+ iwl_trans_wait_tx_queue_empty(mvm->trans, msk);
}
const struct ieee80211_ops iwl_mvm_hw_ops = {
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index e5294d01181e..844bf7c4c8de 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -72,6 +72,8 @@
#define IWL_PLCP_QUIET_THRESH 1
#define IWL_ACTIVE_QUIET_TIME 10
+#define IWL_DENSE_EBS_SCAN_RATIO 5
+#define IWL_SPARSE_EBS_SCAN_RATIO 1
struct iwl_mvm_scan_params {
u32 max_out_time;
@@ -171,15 +173,21 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid,
* already included in the probe template, so we need to set only
* req->n_ssids - 1 bits in addition to the first bit.
*/
-static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids)
+static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm,
+ enum ieee80211_band band, int n_ssids)
{
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+ return 10;
if (band == IEEE80211_BAND_2GHZ)
return 20 + 3 * (n_ssids + 1);
return 10 + 2 * (n_ssids + 1);
}
-static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
+static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm,
+ enum ieee80211_band band)
{
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL)
+ return 110;
return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10;
}
@@ -331,7 +339,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
*/
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
u32 passive_dwell =
- iwl_mvm_get_passive_dwell(IEEE80211_BAND_2GHZ);
+ iwl_mvm_get_passive_dwell(mvm,
+ IEEE80211_BAND_2GHZ);
params->max_out_time = passive_dwell;
} else {
params->passive_fragmented = true;
@@ -348,8 +357,8 @@ not_bound:
params->dwell[band].passive = frag_passive_dwell;
else
params->dwell[band].passive =
- iwl_mvm_get_passive_dwell(band);
- params->dwell[band].active = iwl_mvm_get_active_dwell(band,
+ iwl_mvm_get_passive_dwell(mvm, band);
+ params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band,
n_ssids);
}
}
@@ -1098,6 +1107,12 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
return iwl_umac_scan_stop(mvm, IWL_UMAC_SCAN_UID_SCHED_SCAN,
notify);
+ if (mvm->scan_status == IWL_MVM_SCAN_NONE)
+ return 0;
+
+ if (iwl_mvm_is_radio_killed(mvm))
+ goto out;
+
if (mvm->scan_status != IWL_MVM_SCAN_SCHED &&
(!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) ||
mvm->scan_status != IWL_MVM_SCAN_OS)) {
@@ -1134,6 +1149,7 @@ int iwl_mvm_scan_offload_stop(struct iwl_mvm *mvm, bool notify)
if (mvm->scan_status == IWL_MVM_SCAN_OS)
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
+out:
mvm->scan_status = IWL_MVM_SCAN_NONE;
if (notify) {
@@ -1290,18 +1306,6 @@ iwl_mvm_build_generic_unified_scan_cmd(struct iwl_mvm *mvm,
cmd->scan_prio = cpu_to_le32(IWL_SCAN_PRIORITY_HIGH);
cmd->iter_num = cpu_to_le32(1);
- if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
- mvm->last_ebs_successful) {
- cmd->channel_opt[0].flags =
- cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
- IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
- IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
- cmd->channel_opt[1].flags =
- cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
- IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
- IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
- }
-
if (iwl_mvm_rrm_scan_needed(mvm))
cmd->scan_flags |=
cpu_to_le32(IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
@@ -1376,6 +1380,22 @@ int iwl_mvm_unified_scan_lmac(struct iwl_mvm *mvm,
cmd->schedule[1].iterations = 0;
cmd->schedule[1].full_scan_mul = 0;
+ if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_SINGLE_SCAN_EBS &&
+ mvm->last_ebs_successful) {
+ cmd->channel_opt[0].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[0].non_ebs_ratio =
+ cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
+ cmd->channel_opt[1].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[1].non_ebs_ratio =
+ cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
+ }
+
for (i = 1; i <= req->req.n_ssids; i++)
ssid_bitmap |= BIT(i);
@@ -1448,6 +1468,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
if (iwl_mvm_scan_pass_all(mvm, req))
flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL;
+ else
+ flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH;
if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0)
flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION;
@@ -1474,6 +1496,22 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
cmd->schedule[1].iterations = 0xff;
cmd->schedule[1].full_scan_mul = IWL_FULL_SCAN_MULTIPLIER;
+ if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_EBS_SUPPORT &&
+ mvm->last_ebs_successful) {
+ cmd->channel_opt[0].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[0].non_ebs_ratio =
+ cpu_to_le16(IWL_DENSE_EBS_SCAN_RATIO);
+ cmd->channel_opt[1].flags =
+ cpu_to_le16(IWL_SCAN_CHANNEL_FLAG_EBS |
+ IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
+ IWL_SCAN_CHANNEL_FLAG_CACHE_ADD);
+ cmd->channel_opt[1].non_ebs_ratio =
+ cpu_to_le16(IWL_SPARSE_EBS_SCAN_RATIO);
+ }
+
iwl_mvm_lmac_scan_cfg_channels(mvm, req->channels, req->n_channels,
ssid_bitmap, cmd);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 4f15d9decc81..c59d07567d90 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -90,8 +90,6 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
if (ieee80211_is_probe_resp(fc))
tx_flags |= TX_CMD_FLG_TSF;
- else if (ieee80211_is_back_req(fc))
- tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
if (ieee80211_has_morefrags(fc))
tx_flags |= TX_CMD_FLG_MORE_FRAG;
@@ -100,6 +98,15 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
u8 *qc = ieee80211_get_qos_ctl(hdr);
tx_cmd->tid_tspec = qc[0] & 0xf;
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
+ } else if (ieee80211_is_back_req(fc)) {
+ struct ieee80211_bar *bar = (void *)skb->data;
+ u16 control = le16_to_cpu(bar->control);
+
+ tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
+ tx_cmd->tid_tspec = (control &
+ IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
+ IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
+ WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
} else {
tx_cmd->tid_tspec = IWL_TID_NON_QOS;
if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
@@ -108,8 +115,12 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
}
- /* tid_tspec will default to 0 = BE when QOS isn't enabled */
- ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+ /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */
+ if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
+ ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
+ else
+ ac = tid_to_mac80211_ac[0];
+
tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
TX_CMD_FLG_BT_PRIO_POS;
diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c
index e56e77ef5d2e..917431e30f74 100644
--- a/drivers/net/wireless/iwlwifi/mvm/utils.c
+++ b/drivers/net/wireless/iwlwifi/mvm/utils.c
@@ -665,7 +665,7 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm)
if (num_of_ant(mvm->fw->valid_rx_ant) == 1)
return false;
- if (!mvm->cfg->rx_with_siso_diversity)
+ if (mvm->cfg->rx_with_siso_diversity)
return false;
ieee80211_iterate_active_interfaces_atomic(
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 3ee8e3848876..d5aadb00dd9e 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -367,7 +367,11 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
/* 3165 Series */
{IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
{IWL_PCI_DEVICE(0x3165, 0x4210, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
@@ -523,8 +527,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
else if (cfg == &iwl7265_n_cfg)
cfg_7265d = &iwl7265d_n_cfg;
if (cfg_7265d &&
- (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D)
+ (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) {
cfg = cfg_7265d;
+ iwl_trans->cfg = cfg_7265d;
+ }
#endif
pci_set_drvdata(pdev, iwl_trans);
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 5d79a1f44b8e..523fe0c88dcb 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -614,7 +614,7 @@ static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
{
u8 *v_addr;
dma_addr_t p_addr;
- u32 offset, chunk_sz = section->len;
+ u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
int ret = 0;
IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
@@ -1012,16 +1012,21 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Stop the device, and put it in low power state */
iwl_pcie_apm_stop(trans);
- /* Upon stop, the APM issues an interrupt if HW RF kill is set.
- * Clean again the interrupt here
+ /* stop and reset the on-board processor */
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ udelay(20);
+
+ /*
+ * Upon stop, the APM issues an interrupt if HW RF kill is set.
+ * This is a bug in certain verions of the hardware.
+ * Certain devices also keep sending HW RF kill interrupt all
+ * the time, unless the interrupt is ACKed even if the interrupt
+ * should be masked. Re-ACK all the interrupts here.
*/
spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
spin_unlock(&trans_pcie->irq_lock);
- /* stop and reset the on-board processor */
- iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
- udelay(20);
/* clear all status bits */
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 846a2e6e34d8..c70efb9a6e78 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -666,7 +666,8 @@ tx_status_ok:
}
static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
- u8 *entry, int rxring_idx, int desc_idx)
+ struct sk_buff *new_skb, u8 *entry,
+ int rxring_idx, int desc_idx)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
@@ -674,11 +675,15 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
u8 tmp_one = 1;
struct sk_buff *skb;
+ if (likely(new_skb)) {
+ skb = new_skb;
+ goto remap;
+ }
skb = dev_alloc_skb(rtlpci->rxbuffersize);
if (!skb)
return 0;
- rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
+remap:
/* just set skb->cb to mapping addr for pci_unmap_single use */
*((dma_addr_t *)skb->cb) =
pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
@@ -686,6 +691,7 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw,
bufferaddress = *((dma_addr_t *)skb->cb);
if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress))
return 0;
+ rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb;
if (rtlpriv->use_new_trx_flow) {
rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false,
HW_DESC_RX_PREPARE,
@@ -781,6 +787,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
/*rx pkt */
struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[
rtlpci->rx_ring[rxring_idx].idx];
+ struct sk_buff *new_skb;
if (rtlpriv->use_new_trx_flow) {
rx_remained_cnt =
@@ -807,6 +814,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb),
rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE);
+ /* get a new skb - if fail, old one will be reused */
+ new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
+ if (unlikely(!new_skb)) {
+ pr_err("Allocation of new skb failed in %s\n",
+ __func__);
+ goto no_new;
+ }
if (rtlpriv->use_new_trx_flow) {
buffer_desc =
&rtlpci->rx_ring[rxring_idx].buffer_desc
@@ -911,14 +925,16 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
schedule_work(&rtlpriv->works.lps_change_work);
}
end:
+ skb = new_skb;
+no_new:
if (rtlpriv->use_new_trx_flow) {
- _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc,
+ _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc,
rxring_idx,
- rtlpci->rx_ring[rxring_idx].idx);
+ rtlpci->rx_ring[rxring_idx].idx);
} else {
- _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx,
+ _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc,
+ rxring_idx,
rtlpci->rx_ring[rxring_idx].idx);
-
if (rtlpci->rx_ring[rxring_idx].idx ==
rtlpci->rxringcount - 1)
rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc,
@@ -1307,7 +1323,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
rtlpci->rx_ring[rxring_idx].idx = 0;
for (i = 0; i < rtlpci->rxringcount; i++) {
entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i];
- if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+ if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
rxring_idx, i))
return -ENOMEM;
}
@@ -1332,7 +1348,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx)
for (i = 0; i < rtlpci->rxringcount; i++) {
entry = &rtlpci->rx_ring[rxring_idx].desc[i];
- if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry,
+ if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry,
rxring_idx, i))
return -ENOMEM;
}
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index efbaf2ae1999..794204e34fba 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -737,6 +737,7 @@ static void connect(struct backend_info *be)
}
queue->remaining_credit = credit_bytes;
+ queue->credit_usec = credit_usec;
err = connect_rings(be, queue);
if (err) {
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 22bcb4e12e2a..d8c10764f130 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -88,10 +88,8 @@ struct netfront_cb {
#define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3)
struct netfront_stats {
- u64 rx_packets;
- u64 tx_packets;
- u64 rx_bytes;
- u64 tx_bytes;
+ u64 packets;
+ u64 bytes;
struct u64_stats_sync syncp;
};
@@ -160,7 +158,8 @@ struct netfront_info {
struct netfront_queue *queues;
/* Statistics */
- struct netfront_stats __percpu *stats;
+ struct netfront_stats __percpu *rx_stats;
+ struct netfront_stats __percpu *tx_stats;
atomic_t rx_gso_checksum_fixup;
};
@@ -565,7 +564,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned short id;
struct netfront_info *np = netdev_priv(dev);
- struct netfront_stats *stats = this_cpu_ptr(np->stats);
+ struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats);
struct xen_netif_tx_request *tx;
char *data = skb->data;
RING_IDX i;
@@ -672,10 +671,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
if (notify)
notify_remote_via_irq(queue->tx_irq);
- u64_stats_update_begin(&stats->syncp);
- stats->tx_bytes += skb->len;
- stats->tx_packets++;
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->bytes += skb->len;
+ tx_stats->packets++;
+ u64_stats_update_end(&tx_stats->syncp);
/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
xennet_tx_buf_gc(queue);
@@ -931,7 +930,7 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
static int handle_incoming_queue(struct netfront_queue *queue,
struct sk_buff_head *rxq)
{
- struct netfront_stats *stats = this_cpu_ptr(queue->info->stats);
+ struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats);
int packets_dropped = 0;
struct sk_buff *skb;
@@ -952,10 +951,10 @@ static int handle_incoming_queue(struct netfront_queue *queue,
continue;
}
- u64_stats_update_begin(&stats->syncp);
- stats->rx_packets++;
- stats->rx_bytes += skb->len;
- u64_stats_update_end(&stats->syncp);
+ u64_stats_update_begin(&rx_stats->syncp);
+ rx_stats->packets++;
+ rx_stats->bytes += skb->len;
+ u64_stats_update_end(&rx_stats->syncp);
/* Pass it up. */
napi_gro_receive(&queue->napi, skb);
@@ -1079,18 +1078,22 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
int cpu;
for_each_possible_cpu(cpu) {
- struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
+ struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu);
+ struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu);
u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
unsigned int start;
do {
- start = u64_stats_fetch_begin_irq(&stats->syncp);
+ start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
+ tx_packets = tx_stats->packets;
+ tx_bytes = tx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
- rx_packets = stats->rx_packets;
- tx_packets = stats->tx_packets;
- rx_bytes = stats->rx_bytes;
- tx_bytes = stats->tx_bytes;
- } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ do {
+ start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
+ rx_packets = rx_stats->packets;
+ rx_bytes = rx_stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
tot->rx_packets += rx_packets;
tot->tx_packets += tx_packets;
@@ -1275,6 +1278,15 @@ static const struct net_device_ops xennet_netdev_ops = {
#endif
};
+static void xennet_free_netdev(struct net_device *netdev)
+{
+ struct netfront_info *np = netdev_priv(netdev);
+
+ free_percpu(np->rx_stats);
+ free_percpu(np->tx_stats);
+ free_netdev(netdev);
+}
+
static struct net_device *xennet_create_dev(struct xenbus_device *dev)
{
int err;
@@ -1295,8 +1307,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
np->queues = NULL;
err = -ENOMEM;
- np->stats = netdev_alloc_pcpu_stats(struct netfront_stats);
- if (np->stats == NULL)
+ np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
+ if (np->rx_stats == NULL)
+ goto exit;
+ np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats);
+ if (np->tx_stats == NULL)
goto exit;
netdev->netdev_ops = &xennet_netdev_ops;
@@ -1327,7 +1342,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev)
return netdev;
exit:
- free_netdev(netdev);
+ xennet_free_netdev(netdev);
return ERR_PTR(err);
}
@@ -1369,7 +1384,7 @@ static int netfront_probe(struct xenbus_device *dev,
return 0;
fail:
- free_netdev(netdev);
+ xennet_free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);
return err;
}
@@ -2189,9 +2204,7 @@ static int xennet_remove(struct xenbus_device *dev)
info->queues = NULL;
}
- free_percpu(info->stats);
-
- free_netdev(info->netdev);
+ xennet_free_netdev(info->netdev);
return 0;
}
diff --git a/drivers/of/overlay.c b/drivers/of/overlay.c
index ea63fbd228ed..352b4f28f82c 100644
--- a/drivers/of/overlay.c
+++ b/drivers/of/overlay.c
@@ -114,17 +114,6 @@ static int of_overlay_apply_single_device_node(struct of_overlay *ov,
ret = of_overlay_apply_one(ov, tchild, child);
if (ret)
return ret;
-
- /* The properties are already copied, now do the child nodes */
- for_each_child_of_node(child, grandchild) {
- ret = of_overlay_apply_single_device_node(ov, tchild, grandchild);
- if (ret) {
- pr_err("%s: Failed to apply single node @%s/%s\n",
- __func__, tchild->full_name,
- grandchild->name);
- return ret;
- }
- }
}
return ret;
diff --git a/drivers/of/platform.c b/drivers/of/platform.c
index 5b33c6a21807..b0d50d70a8a1 100644
--- a/drivers/of/platform.c
+++ b/drivers/of/platform.c
@@ -188,7 +188,7 @@ static void of_dma_configure(struct device *dev)
size = dev->coherent_dma_mask;
} else {
offset = PFN_DOWN(paddr - dma_addr);
- dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset);
+ dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset);
}
dev->dma_pfn_offset = offset;
@@ -566,6 +566,10 @@ static int of_platform_notify(struct notifier_block *nb,
if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS))
return NOTIFY_OK; /* not for us */
+ /* already populated? (driver using of_populate manually) */
+ if (of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK;
+
/* pdev_parent may be NULL when no bus platform device */
pdev_parent = of_find_device_by_node(rd->dn->parent);
pdev = of_platform_device_create(rd->dn, NULL,
@@ -581,6 +585,11 @@ static int of_platform_notify(struct notifier_block *nb,
break;
case OF_RECONFIG_CHANGE_REMOVE:
+
+ /* already depopulated? */
+ if (!of_node_check_flag(rd->dn, OF_POPULATED))
+ return NOTIFY_OK;
+
/* find our device by node */
pdev = of_find_device_by_node(rd->dn);
if (pdev == NULL)
diff --git a/drivers/of/unittest-data/tests-overlay.dtsi b/drivers/of/unittest-data/tests-overlay.dtsi
index 75976da22b2e..a2b687d5f324 100644
--- a/drivers/of/unittest-data/tests-overlay.dtsi
+++ b/drivers/of/unittest-data/tests-overlay.dtsi
@@ -176,5 +176,60 @@
};
};
+ overlay10 {
+ fragment@0 {
+ target-path = "/testcase-data/overlay-node/test-bus";
+ __overlay__ {
+
+ /* suppress DTC warning */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ test-selftest10 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <10>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ test-selftest101 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <1>;
+ };
+
+ };
+ };
+ };
+ };
+
+ overlay11 {
+ fragment@0 {
+ target-path = "/testcase-data/overlay-node/test-bus";
+ __overlay__ {
+
+ /* suppress DTC warning */
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ test-selftest11 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <11>;
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ test-selftest111 {
+ compatible = "selftest";
+ status = "okay";
+ reg = <1>;
+ };
+
+ };
+ };
+ };
+ };
};
};
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 844838e11ef1..41a4a138f53b 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -978,6 +978,9 @@ static int selftest_probe(struct platform_device *pdev)
}
dev_dbg(dev, "%s for node @%s\n", __func__, np->full_name);
+
+ of_platform_populate(np, NULL, NULL, &pdev->dev);
+
return 0;
}
@@ -1385,6 +1388,39 @@ static void of_selftest_overlay_8(void)
selftest(1, "overlay test %d passed\n", 8);
}
+/* test insertion of a bus with parent devices */
+static void of_selftest_overlay_10(void)
+{
+ int ret;
+ char *child_path;
+
+ /* device should disable */
+ ret = of_selftest_apply_overlay_check(10, 10, 0, 1);
+ if (selftest(ret == 0, "overlay test %d failed; overlay application\n", 10))
+ return;
+
+ child_path = kasprintf(GFP_KERNEL, "%s/test-selftest101",
+ selftest_path(10));
+ if (selftest(child_path, "overlay test %d failed; kasprintf\n", 10))
+ return;
+
+ ret = of_path_platform_device_exists(child_path);
+ kfree(child_path);
+ if (selftest(ret, "overlay test %d failed; no child device\n", 10))
+ return;
+}
+
+/* test insertion of a bus with parent devices (and revert) */
+static void of_selftest_overlay_11(void)
+{
+ int ret;
+
+ /* device should disable */
+ ret = of_selftest_apply_revert_overlay_check(11, 11, 0, 1);
+ if (selftest(ret == 0, "overlay test %d failed; overlay application\n", 11))
+ return;
+}
+
static void __init of_selftest_overlay(void)
{
struct device_node *bus_np = NULL;
@@ -1433,6 +1469,9 @@ static void __init of_selftest_overlay(void)
of_selftest_overlay_6();
of_selftest_overlay_8();
+ of_selftest_overlay_10();
+ of_selftest_overlay_11();
+
out:
of_node_put(bus_np);
}
diff --git a/drivers/parisc/lba_pci.c b/drivers/parisc/lba_pci.c
index 37e71ff6408d..dceb9ddfd99a 100644
--- a/drivers/parisc/lba_pci.c
+++ b/drivers/parisc/lba_pci.c
@@ -694,9 +694,8 @@ lba_fixup_bus(struct pci_bus *bus)
int i;
/* PCI-PCI Bridge */
pci_read_bridge_bases(bus);
- for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
- pci_claim_resource(bus->self, i);
- }
+ for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++)
+ pci_claim_bridge_resource(bus->self, i);
} else {
/* Host-PCI Bridge */
int err;
diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c
index 73aef51a28f0..8fb16188cd82 100644
--- a/drivers/pci/bus.c
+++ b/drivers/pci/bus.c
@@ -228,6 +228,49 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
}
EXPORT_SYMBOL(pci_bus_alloc_resource);
+/*
+ * The @idx resource of @dev should be a PCI-PCI bridge window. If this
+ * resource fits inside a window of an upstream bridge, do nothing. If it
+ * overlaps an upstream window but extends outside it, clip the resource so
+ * it fits completely inside.
+ */
+bool pci_bus_clip_resource(struct pci_dev *dev, int idx)
+{
+ struct pci_bus *bus = dev->bus;
+ struct resource *res = &dev->resource[idx];
+ struct resource orig_res = *res;
+ struct resource *r;
+ int i;
+
+ pci_bus_for_each_resource(bus, r, i) {
+ resource_size_t start, end;
+
+ if (!r)
+ continue;
+
+ if (resource_type(res) != resource_type(r))
+ continue;
+
+ start = max(r->start, res->start);
+ end = min(r->end, res->end);
+
+ if (start > end)
+ continue; /* no overlap */
+
+ if (res->start == start && res->end == end)
+ return false; /* no change */
+
+ res->start = start;
+ res->end = end;
+ dev_printk(KERN_DEBUG, &dev->dev, "%pR clipped to %pR\n",
+ &orig_res, res);
+
+ return true;
+ }
+
+ return false;
+}
+
void __weak pcibios_resource_survey_bus(struct pci_bus *bus) { }
/**
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index cab05f31223f..e9d4fd861ba1 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3271,7 +3271,8 @@ static int pci_parent_bus_reset(struct pci_dev *dev, int probe)
{
struct pci_dev *pdev;
- if (pci_is_root_bus(dev->bus) || dev->subordinate || !dev->bus->self)
+ if (pci_is_root_bus(dev->bus) || dev->subordinate ||
+ !dev->bus->self || dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
return -ENOTTY;
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
@@ -3305,7 +3306,8 @@ static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
{
struct pci_dev *pdev;
- if (dev->subordinate || !dev->slot)
+ if (dev->subordinate || !dev->slot ||
+ dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
return -ENOTTY;
list_for_each_entry(pdev, &dev->bus->devices, bus_list)
@@ -3557,6 +3559,20 @@ int pci_try_reset_function(struct pci_dev *dev)
}
EXPORT_SYMBOL_GPL(pci_try_reset_function);
+/* Do any devices on or below this bus prevent a bus reset? */
+static bool pci_bus_resetable(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+ (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
+ return false;
+ }
+
+ return true;
+}
+
/* Lock devices from the top of the tree down */
static void pci_bus_lock(struct pci_bus *bus)
{
@@ -3607,6 +3623,22 @@ unlock:
return 0;
}
+/* Do any devices on or below this slot prevent a bus reset? */
+static bool pci_slot_resetable(struct pci_slot *slot)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &slot->bus->devices, bus_list) {
+ if (!dev->slot || dev->slot != slot)
+ continue;
+ if (dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET ||
+ (dev->subordinate && !pci_bus_resetable(dev->subordinate)))
+ return false;
+ }
+
+ return true;
+}
+
/* Lock devices from the top of the tree down */
static void pci_slot_lock(struct pci_slot *slot)
{
@@ -3728,7 +3760,7 @@ static int pci_slot_reset(struct pci_slot *slot, int probe)
{
int rc;
- if (!slot)
+ if (!slot || !pci_slot_resetable(slot))
return -ENOTTY;
if (!probe)
@@ -3820,7 +3852,7 @@ EXPORT_SYMBOL_GPL(pci_try_reset_slot);
static int pci_bus_reset(struct pci_bus *bus, int probe)
{
- if (!bus->self)
+ if (!bus->self || !pci_bus_resetable(bus))
return -ENOTTY;
if (probe)
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 8aff29a804ff..d54632a1db43 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -208,6 +208,7 @@ void __pci_bus_size_bridges(struct pci_bus *bus,
void __pci_bus_assign_resources(const struct pci_bus *bus,
struct list_head *realloc_head,
struct list_head *fail_head);
+bool pci_bus_clip_resource(struct pci_dev *dev, int idx);
/**
* pci_ari_enabled - query ARI forwarding status
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index ed6f89b6efe5..e52356aa09b8 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -3028,6 +3028,20 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
quirk_broken_intx_masking);
+static void quirk_no_bus_reset(struct pci_dev *dev)
+{
+ dev->dev_flags |= PCI_DEV_FLAGS_NO_BUS_RESET;
+}
+
+/*
+ * Atheros AR93xx chips do not behave after a bus reset. The device will
+ * throw a Link Down error on AER-capable systems and regardless of AER,
+ * config space of the device is never accessible again and typically
+ * causes the system to hang or reset when access is attempted.
+ * http://www.spinics.net/lists/linux-pci/msg34797.html
+ */
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATHEROS, 0x0030, quirk_no_bus_reset);
+
#ifdef CONFIG_ACPI
/*
* Apple: Shutdown Cactus Ridge Thunderbolt controller.
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 0482235eee92..e3e17f3c0f0f 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -530,9 +530,8 @@ EXPORT_SYMBOL(pci_setup_cardbus);
config space writes, so it's quite possible that an I/O window of
the bridge will have some undesirable address (e.g. 0) after the
first write. Ditto 64-bit prefetchable MMIO. */
-static void pci_setup_bridge_io(struct pci_bus *bus)
+static void pci_setup_bridge_io(struct pci_dev *bridge)
{
- struct pci_dev *bridge = bus->self;
struct resource *res;
struct pci_bus_region region;
unsigned long io_mask;
@@ -545,7 +544,7 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
io_mask = PCI_IO_1K_RANGE_MASK;
/* Set up the top and bottom of the PCI I/O segment for this bus. */
- res = bus->resource[0];
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 0];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_IO) {
pci_read_config_word(bridge, PCI_IO_BASE, &l);
@@ -568,15 +567,14 @@ static void pci_setup_bridge_io(struct pci_bus *bus)
pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
}
-static void pci_setup_bridge_mmio(struct pci_bus *bus)
+static void pci_setup_bridge_mmio(struct pci_dev *bridge)
{
- struct pci_dev *bridge = bus->self;
struct resource *res;
struct pci_bus_region region;
u32 l;
/* Set up the top and bottom of the PCI Memory segment for this bus. */
- res = bus->resource[1];
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 1];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_MEM) {
l = (region.start >> 16) & 0xfff0;
@@ -588,9 +586,8 @@ static void pci_setup_bridge_mmio(struct pci_bus *bus)
pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
}
-static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
+static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
{
- struct pci_dev *bridge = bus->self;
struct resource *res;
struct pci_bus_region region;
u32 l, bu, lu;
@@ -602,7 +599,7 @@ static void pci_setup_bridge_mmio_pref(struct pci_bus *bus)
/* Set up PREF base/limit. */
bu = lu = 0;
- res = bus->resource[2];
+ res = &bridge->resource[PCI_BRIDGE_RESOURCES + 2];
pcibios_resource_to_bus(bridge->bus, &region, res);
if (res->flags & IORESOURCE_PREFETCH) {
l = (region.start >> 16) & 0xfff0;
@@ -630,13 +627,13 @@ static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
&bus->busn_res);
if (type & IORESOURCE_IO)
- pci_setup_bridge_io(bus);
+ pci_setup_bridge_io(bridge);
if (type & IORESOURCE_MEM)
- pci_setup_bridge_mmio(bus);
+ pci_setup_bridge_mmio(bridge);
if (type & IORESOURCE_PREFETCH)
- pci_setup_bridge_mmio_pref(bus);
+ pci_setup_bridge_mmio_pref(bridge);
pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
}
@@ -649,6 +646,41 @@ void pci_setup_bridge(struct pci_bus *bus)
__pci_setup_bridge(bus, type);
}
+
+int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
+{
+ if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END)
+ return 0;
+
+ if (pci_claim_resource(bridge, i) == 0)
+ return 0; /* claimed the window */
+
+ if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
+ return 0;
+
+ if (!pci_bus_clip_resource(bridge, i))
+ return -EINVAL; /* clipping didn't change anything */
+
+ switch (i - PCI_BRIDGE_RESOURCES) {
+ case 0:
+ pci_setup_bridge_io(bridge);
+ break;
+ case 1:
+ pci_setup_bridge_mmio(bridge);
+ break;
+ case 2:
+ pci_setup_bridge_mmio_pref(bridge);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (pci_claim_resource(bridge, i) == 0)
+ return 0; /* claimed a smaller window */
+
+ return -EINVAL;
+}
+
/* Check whether the bridge supports optional I/O and
prefetchable memory ranges. If not, the respective
base/limit registers must be read-only and read as 0. */
diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c
index e34da13885e8..27fa62ce6136 100644
--- a/drivers/phy/phy-miphy28lp.c
+++ b/drivers/phy/phy-miphy28lp.c
@@ -1050,7 +1050,8 @@ static int miphy28lp_init(struct phy *phy)
ret = miphy28lp_init_usb3(miphy_phy);
break;
default:
- return -EINVAL;
+ ret = -EINVAL;
+ break;
}
mutex_unlock(&miphy_dev->miphy_mutex);
diff --git a/drivers/phy/phy-omap-control.c b/drivers/phy/phy-omap-control.c
index c96e8183a8ff..efe724f97e02 100644
--- a/drivers/phy/phy-omap-control.c
+++ b/drivers/phy/phy-omap-control.c
@@ -29,10 +29,9 @@
/**
* omap_control_pcie_pcs - set the PCS delay count
* @dev: the control module device
- * @id: index of the pcie PHY (should be 1 or 2)
* @delay: 8 bit delay value
*/
-void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay)
+void omap_control_pcie_pcs(struct device *dev, u8 delay)
{
u32 val;
struct omap_control_phy *control_phy;
@@ -55,8 +54,8 @@ void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay)
val = readl(control_phy->pcie_pcs);
val &= ~(OMAP_CTRL_PCIE_PCS_MASK <<
- (id * OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT));
- val |= delay << (id * OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
+ OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
+ val |= (delay << OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT);
writel(val, control_phy->pcie_pcs);
}
EXPORT_SYMBOL_GPL(omap_control_pcie_pcs);
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index fb02a67c9181..a2b08f3ccb03 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -244,7 +244,8 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev)
else
data->num_phys = 3;
- if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy"))
+ if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy") ||
+ of_device_is_compatible(np, "allwinner,sun6i-a31-usb-phy"))
data->disc_thresh = 3;
else
data->disc_thresh = 2;
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 1387b4d4afe3..465de2c800f2 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -82,7 +82,6 @@ struct ti_pipe3 {
struct clk *refclk;
struct clk *div_clk;
struct pipe3_dpll_map *dpll_map;
- u8 id;
};
static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -217,8 +216,13 @@ static int ti_pipe3_init(struct phy *x)
u32 val;
int ret = 0;
+ /*
+ * Set pcie_pcs register to 0x96 for proper functioning of phy
+ * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table
+ * 18-1804.
+ */
if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) {
- omap_control_pcie_pcs(phy->control_dev, phy->id, 0xF1);
+ omap_control_pcie_pcs(phy->control_dev, 0x96);
return 0;
}
@@ -347,8 +351,6 @@ static int ti_pipe3_probe(struct platform_device *pdev)
}
if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
- if (of_property_read_u8(node, "id", &phy->id) < 0)
- phy->id = 1;
clk = devm_clk_get(phy->dev, "dpll_ref");
if (IS_ERR(clk)) {
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index e4f65510c87e..89dca77ca038 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1801,14 +1801,15 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev)
if (pctldev == NULL)
return;
- mutex_lock(&pinctrldev_list_mutex);
mutex_lock(&pctldev->mutex);
-
pinctrl_remove_device_debugfs(pctldev);
+ mutex_unlock(&pctldev->mutex);
if (!IS_ERR(pctldev->p))
pinctrl_put(pctldev->p);
+ mutex_lock(&pinctrldev_list_mutex);
+ mutex_lock(&pctldev->mutex);
/* TODO: check that no pinmuxes are still active? */
list_del(&pctldev->node);
/* Destroy descriptor tree */
diff --git a/drivers/pinctrl/pinctrl-at91.c b/drivers/pinctrl/pinctrl-at91.c
index dfd021e8268f..f4cd0b9b2438 100644
--- a/drivers/pinctrl/pinctrl-at91.c
+++ b/drivers/pinctrl/pinctrl-at91.c
@@ -177,7 +177,7 @@ struct at91_pinctrl {
struct device *dev;
struct pinctrl_dev *pctl;
- int nbanks;
+ int nactive_banks;
uint32_t *mux_mask;
int nmux;
@@ -653,12 +653,18 @@ static int pin_check_config(struct at91_pinctrl *info, const char *name,
int mux;
/* check if it's a valid config */
- if (pin->bank >= info->nbanks) {
+ if (pin->bank >= gpio_banks) {
dev_err(info->dev, "%s: pin conf %d bank_id %d >= nbanks %d\n",
- name, index, pin->bank, info->nbanks);
+ name, index, pin->bank, gpio_banks);
return -EINVAL;
}
+ if (!gpio_chips[pin->bank]) {
+ dev_err(info->dev, "%s: pin conf %d bank_id %d not enabled\n",
+ name, index, pin->bank);
+ return -ENXIO;
+ }
+
if (pin->pin >= MAX_NB_GPIO_PER_BANK) {
dev_err(info->dev, "%s: pin conf %d pin_bank_id %d >= %d\n",
name, index, pin->pin, MAX_NB_GPIO_PER_BANK);
@@ -981,7 +987,8 @@ static void at91_pinctrl_child_count(struct at91_pinctrl *info,
for_each_child_of_node(np, child) {
if (of_device_is_compatible(child, gpio_compat)) {
- info->nbanks++;
+ if (of_device_is_available(child))
+ info->nactive_banks++;
} else {
info->nfunctions++;
info->ngroups += of_get_child_count(child);
@@ -1003,11 +1010,11 @@ static int at91_pinctrl_mux_mask(struct at91_pinctrl *info,
}
size /= sizeof(*list);
- if (!size || size % info->nbanks) {
- dev_err(info->dev, "wrong mux mask array should be by %d\n", info->nbanks);
+ if (!size || size % gpio_banks) {
+ dev_err(info->dev, "wrong mux mask array should be by %d\n", gpio_banks);
return -EINVAL;
}
- info->nmux = size / info->nbanks;
+ info->nmux = size / gpio_banks;
info->mux_mask = devm_kzalloc(info->dev, sizeof(u32) * size, GFP_KERNEL);
if (!info->mux_mask) {
@@ -1131,7 +1138,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
of_match_device(at91_pinctrl_of_match, &pdev->dev)->data;
at91_pinctrl_child_count(info, np);
- if (info->nbanks < 1) {
+ if (gpio_banks < 1) {
dev_err(&pdev->dev, "you need to specify at least one gpio-controller\n");
return -EINVAL;
}
@@ -1144,7 +1151,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
dev_dbg(&pdev->dev, "mux-mask\n");
tmp = info->mux_mask;
- for (i = 0; i < info->nbanks; i++) {
+ for (i = 0; i < gpio_banks; i++) {
for (j = 0; j < info->nmux; j++, tmp++) {
dev_dbg(&pdev->dev, "%d:%d\t0x%x\n", i, j, tmp[0]);
}
@@ -1162,7 +1169,7 @@ static int at91_pinctrl_probe_dt(struct platform_device *pdev,
if (!info->groups)
return -ENOMEM;
- dev_dbg(&pdev->dev, "nbanks = %d\n", info->nbanks);
+ dev_dbg(&pdev->dev, "nbanks = %d\n", gpio_banks);
dev_dbg(&pdev->dev, "nfunctions = %d\n", info->nfunctions);
dev_dbg(&pdev->dev, "ngroups = %d\n", info->ngroups);
@@ -1185,7 +1192,7 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
{
struct at91_pinctrl *info;
struct pinctrl_pin_desc *pdesc;
- int ret, i, j, k;
+ int ret, i, j, k, ngpio_chips_enabled = 0;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
@@ -1200,23 +1207,27 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
* to obtain references to the struct gpio_chip * for them, and we
* need this to proceed.
*/
- for (i = 0; i < info->nbanks; i++) {
- if (!gpio_chips[i]) {
- dev_warn(&pdev->dev, "GPIO chip %d not registered yet\n", i);
- devm_kfree(&pdev->dev, info);
- return -EPROBE_DEFER;
- }
+ for (i = 0; i < gpio_banks; i++)
+ if (gpio_chips[i])
+ ngpio_chips_enabled++;
+
+ if (ngpio_chips_enabled < info->nactive_banks) {
+ dev_warn(&pdev->dev,
+ "All GPIO chips are not registered yet (%d/%d)\n",
+ ngpio_chips_enabled, info->nactive_banks);
+ devm_kfree(&pdev->dev, info);
+ return -EPROBE_DEFER;
}
at91_pinctrl_desc.name = dev_name(&pdev->dev);
- at91_pinctrl_desc.npins = info->nbanks * MAX_NB_GPIO_PER_BANK;
+ at91_pinctrl_desc.npins = gpio_banks * MAX_NB_GPIO_PER_BANK;
at91_pinctrl_desc.pins = pdesc =
devm_kzalloc(&pdev->dev, sizeof(*pdesc) * at91_pinctrl_desc.npins, GFP_KERNEL);
if (!at91_pinctrl_desc.pins)
return -ENOMEM;
- for (i = 0 , k = 0; i < info->nbanks; i++) {
+ for (i = 0, k = 0; i < gpio_banks; i++) {
for (j = 0; j < MAX_NB_GPIO_PER_BANK; j++, k++) {
pdesc->number = k;
pdesc->name = kasprintf(GFP_KERNEL, "pio%c%d", i + 'A', j);
@@ -1234,8 +1245,9 @@ static int at91_pinctrl_probe(struct platform_device *pdev)
}
/* We will handle a range of GPIO pins */
- for (i = 0; i < info->nbanks; i++)
- pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
+ for (i = 0; i < gpio_banks; i++)
+ if (gpio_chips[i])
+ pinctrl_add_gpio_range(info->pctl, &gpio_chips[i]->range);
dev_info(&pdev->dev, "initialized AT91 pinctrl driver\n");
@@ -1613,9 +1625,10 @@ static void gpio_irq_handler(unsigned irq, struct irq_desc *desc)
static int at91_gpio_of_irq_setup(struct platform_device *pdev,
struct at91_gpio_chip *at91_gpio)
{
+ struct gpio_chip *gpiochip_prev = NULL;
struct at91_gpio_chip *prev = NULL;
struct irq_data *d = irq_get_irq_data(at91_gpio->pioc_virq);
- int ret;
+ int ret, i;
at91_gpio->pioc_hwirq = irqd_to_hwirq(d);
@@ -1641,24 +1654,33 @@ static int at91_gpio_of_irq_setup(struct platform_device *pdev,
return ret;
}
- /* Setup chained handler */
- if (at91_gpio->pioc_idx)
- prev = gpio_chips[at91_gpio->pioc_idx - 1];
-
/* The top level handler handles one bank of GPIOs, except
* on some SoC it can handle up to three...
* We only set up the handler for the first of the list.
*/
- if (prev && prev->next == at91_gpio)
+ gpiochip_prev = irq_get_handler_data(at91_gpio->pioc_virq);
+ if (!gpiochip_prev) {
+ /* Then register the chain on the parent IRQ */
+ gpiochip_set_chained_irqchip(&at91_gpio->chip,
+ &gpio_irqchip,
+ at91_gpio->pioc_virq,
+ gpio_irq_handler);
return 0;
+ }
- /* Then register the chain on the parent IRQ */
- gpiochip_set_chained_irqchip(&at91_gpio->chip,
- &gpio_irqchip,
- at91_gpio->pioc_virq,
- gpio_irq_handler);
+ prev = container_of(gpiochip_prev, struct at91_gpio_chip, chip);
- return 0;
+ /* we can only have 2 banks before */
+ for (i = 0; i < 2; i++) {
+ if (prev->next) {
+ prev = prev->next;
+ } else {
+ prev->next = at91_gpio;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
}
/* This structure is replicated for each GPIO block allocated at probe time */
@@ -1675,24 +1697,6 @@ static struct gpio_chip at91_gpio_template = {
.ngpio = MAX_NB_GPIO_PER_BANK,
};
-static void at91_gpio_probe_fixup(void)
-{
- unsigned i;
- struct at91_gpio_chip *at91_gpio, *last = NULL;
-
- for (i = 0; i < gpio_banks; i++) {
- at91_gpio = gpio_chips[i];
-
- /*
- * GPIO controller are grouped on some SoC:
- * PIOC, PIOD and PIOE can share the same IRQ line
- */
- if (last && last->pioc_virq == at91_gpio->pioc_virq)
- last->next = at91_gpio;
- last = at91_gpio;
- }
-}
-
static struct of_device_id at91_gpio_of_match[] = {
{ .compatible = "atmel,at91sam9x5-gpio", .data = &at91sam9x5_ops, },
{ .compatible = "atmel,at91rm9200-gpio", .data = &at91rm9200_ops },
@@ -1805,8 +1809,6 @@ static int at91_gpio_probe(struct platform_device *pdev)
gpio_chips[alias_idx] = at91_chip;
gpio_banks = max(gpio_banks, alias_idx + 1);
- at91_gpio_probe_fixup();
-
ret = at91_gpio_of_irq_setup(pdev, at91_chip);
if (ret)
goto irq_setup_err;
diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c
index ba74f0aa60c7..43eacc924b7e 100644
--- a/drivers/pinctrl/pinctrl-rockchip.c
+++ b/drivers/pinctrl/pinctrl-rockchip.c
@@ -89,6 +89,7 @@ struct rockchip_iomux {
* @reg_pull: optional separate register for additional pull settings
* @clk: clock of the gpio bank
* @irq: interrupt of the gpio bank
+ * @saved_enables: Saved content of GPIO_INTEN at suspend time.
* @pin_base: first pin number
* @nr_pins: number of pins in this bank
* @name: name of the bank
@@ -107,6 +108,7 @@ struct rockchip_pin_bank {
struct regmap *regmap_pull;
struct clk *clk;
int irq;
+ u32 saved_enables;
u32 pin_base;
u8 nr_pins;
char *name;
@@ -1396,10 +1398,7 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
{
struct irq_chip *chip = irq_get_chip(irq);
struct rockchip_pin_bank *bank = irq_get_handler_data(irq);
- u32 polarity = 0, data = 0;
u32 pend;
- bool edge_changed = false;
- unsigned long flags;
dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name);
@@ -1407,12 +1406,6 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS);
- if (bank->toggle_edge_mode) {
- polarity = readl_relaxed(bank->reg_base +
- GPIO_INT_POLARITY);
- data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT);
- }
-
while (pend) {
unsigned int virq;
@@ -1432,27 +1425,31 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc)
* needs manual intervention.
*/
if (bank->toggle_edge_mode & BIT(irq)) {
- if (data & BIT(irq))
- polarity &= ~BIT(irq);
- else
- polarity |= BIT(irq);
+ u32 data, data_old, polarity;
+ unsigned long flags;
- edge_changed = true;
- }
+ data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT);
+ do {
+ spin_lock_irqsave(&bank->slock, flags);
- generic_handle_irq(virq);
- }
+ polarity = readl_relaxed(bank->reg_base +
+ GPIO_INT_POLARITY);
+ if (data & BIT(irq))
+ polarity &= ~BIT(irq);
+ else
+ polarity |= BIT(irq);
+ writel(polarity,
+ bank->reg_base + GPIO_INT_POLARITY);
- if (bank->toggle_edge_mode && edge_changed) {
- /* Interrupt params should only be set with ints disabled */
- spin_lock_irqsave(&bank->slock, flags);
+ spin_unlock_irqrestore(&bank->slock, flags);
- data = readl_relaxed(bank->reg_base + GPIO_INTEN);
- writel_relaxed(0, bank->reg_base + GPIO_INTEN);
- writel(polarity, bank->reg_base + GPIO_INT_POLARITY);
- writel(data, bank->reg_base + GPIO_INTEN);
+ data_old = data;
+ data = readl_relaxed(bank->reg_base +
+ GPIO_EXT_PORT);
+ } while ((data & BIT(irq)) != (data_old & BIT(irq)));
+ }
- spin_unlock_irqrestore(&bank->slock, flags);
+ generic_handle_irq(virq);
}
chained_irq_exit(chip, desc);
@@ -1543,6 +1540,51 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type)
return 0;
}
+static void rockchip_irq_suspend(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct rockchip_pin_bank *bank = gc->private;
+
+ bank->saved_enables = irq_reg_readl(gc, GPIO_INTEN);
+ irq_reg_writel(gc, gc->wake_active, GPIO_INTEN);
+}
+
+static void rockchip_irq_resume(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ struct rockchip_pin_bank *bank = gc->private;
+
+ irq_reg_writel(gc, bank->saved_enables, GPIO_INTEN);
+}
+
+static void rockchip_irq_disable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ irq_gc_lock(gc);
+
+ val = irq_reg_readl(gc, GPIO_INTEN);
+ val &= ~d->mask;
+ irq_reg_writel(gc, val, GPIO_INTEN);
+
+ irq_gc_unlock(gc);
+}
+
+static void rockchip_irq_enable(struct irq_data *d)
+{
+ struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
+ u32 val;
+
+ irq_gc_lock(gc);
+
+ val = irq_reg_readl(gc, GPIO_INTEN);
+ val |= d->mask;
+ irq_reg_writel(gc, val, GPIO_INTEN);
+
+ irq_gc_unlock(gc);
+}
+
static int rockchip_interrupts_register(struct platform_device *pdev,
struct rockchip_pinctrl *info)
{
@@ -1581,12 +1623,16 @@ static int rockchip_interrupts_register(struct platform_device *pdev,
gc = irq_get_domain_generic_chip(bank->domain, 0);
gc->reg_base = bank->reg_base;
gc->private = bank;
- gc->chip_types[0].regs.mask = GPIO_INTEN;
+ gc->chip_types[0].regs.mask = GPIO_INTMASK;
gc->chip_types[0].regs.ack = GPIO_PORTS_EOI;
gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit;
- gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit;
- gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit;
+ gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit;
+ gc->chip_types[0].chip.irq_enable = rockchip_irq_enable;
+ gc->chip_types[0].chip.irq_disable = rockchip_irq_disable;
gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
+ gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend;
+ gc->chip_types[0].chip.irq_resume = rockchip_irq_resume;
gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type;
gc->wake_enabled = IRQ_MSK(bank->nr_pins);
diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c
index 7c9d51382248..9e5ec00084bb 100644
--- a/drivers/pinctrl/pinctrl-st.c
+++ b/drivers/pinctrl/pinctrl-st.c
@@ -1012,8 +1012,10 @@ static void st_pinconf_dbg_show(struct pinctrl_dev *pctldev,
struct seq_file *s, unsigned pin_id)
{
unsigned long config;
- st_pinconf_get(pctldev, pin_id, &config);
+ mutex_unlock(&pctldev->mutex);
+ st_pinconf_get(pctldev, pin_id, &config);
+ mutex_lock(&pctldev->mutex);
seq_printf(s, "[OE:%ld,PU:%ld,OD:%ld]\n"
"\t\t[retime:%ld,invclk:%ld,clknotdat:%ld,"
"de:%ld,rt-clk:%ld,rt-delay:%ld]",
@@ -1443,6 +1445,7 @@ static struct gpio_chip st_gpio_template = {
static struct irq_chip st_gpio_irqchip = {
.name = "GPIO",
+ .irq_disable = st_gpio_irq_mask,
.irq_mask = st_gpio_irq_mask,
.irq_unmask = st_gpio_irq_unmask,
.irq_set_type = st_gpio_irq_set_type,
diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c
index c5cef59f5965..779950c62e53 100644
--- a/drivers/pinctrl/pinctrl-xway.c
+++ b/drivers/pinctrl/pinctrl-xway.c
@@ -798,10 +798,8 @@ static int pinmux_xway_probe(struct platform_device *pdev)
/* load the gpio chip */
xway_chip.dev = &pdev->dev;
- of_gpiochip_add(&xway_chip);
ret = gpiochip_add(&xway_chip);
if (ret) {
- of_gpiochip_remove(&xway_chip);
dev_err(&pdev->dev, "Failed to register gpio chip\n");
return ret;
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index e730935fa457..ed7017df065d 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -865,10 +865,10 @@ static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action,
static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
{
- int i = 0;
+ int i;
const struct msm_function *func = pctrl->soc->functions;
- for (; i <= pctrl->soc->nfunctions; i++)
+ for (i = 0; i < pctrl->soc->nfunctions; i++)
if (!strcmp(func[i].name, "ps_hold")) {
pctrl->restart_nb.notifier_call = msm_ps_hold_restart;
pctrl->restart_nb.priority = 128;
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index 9411eae39a4e..3d21efe11d7b 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -2,11 +2,9 @@
* Driver for Dell laptop extras
*
* Copyright (c) Red Hat <mjg@redhat.com>
- * Copyright (c) 2014 Gabriele Mazzotta <gabriele.mzt@gmail.com>
- * Copyright (c) 2014 Pali Rohár <pali.rohar@gmail.com>
*
- * Based on documentation in the libsmbios package:
- * Copyright (C) 2005-2014 Dell Inc.
+ * Based on documentation in the libsmbios package, Copyright (C) 2005 Dell
+ * Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -34,13 +32,6 @@
#include "../../firmware/dcdbas.h"
#define BRIGHTNESS_TOKEN 0x7d
-#define KBD_LED_OFF_TOKEN 0x01E1
-#define KBD_LED_ON_TOKEN 0x01E2
-#define KBD_LED_AUTO_TOKEN 0x01E3
-#define KBD_LED_AUTO_25_TOKEN 0x02EA
-#define KBD_LED_AUTO_50_TOKEN 0x02EB
-#define KBD_LED_AUTO_75_TOKEN 0x02EC
-#define KBD_LED_AUTO_100_TOKEN 0x02F6
/* This structure will be modified by the firmware when we enter
* system management mode, hence the volatiles */
@@ -71,13 +62,6 @@ struct calling_interface_structure {
struct quirk_entry {
u8 touchpad_led;
-
- int needs_kbd_timeouts;
- /*
- * Ordered list of timeouts expressed in seconds.
- * The list must end with -1
- */
- int kbd_timeouts[];
};
static struct quirk_entry *quirks;
@@ -92,15 +76,6 @@ static int __init dmi_matched(const struct dmi_system_id *dmi)
return 1;
}
-/*
- * These values come from Windows utility provided by Dell. If any other value
- * is used then BIOS silently set timeout to 0 without any error message.
- */
-static struct quirk_entry quirk_dell_xps13_9333 = {
- .needs_kbd_timeouts = 1,
- .kbd_timeouts = { 0, 5, 15, 60, 5 * 60, 15 * 60, -1 },
-};
-
static int da_command_address;
static int da_command_code;
static int da_num_tokens;
@@ -292,15 +267,6 @@ static const struct dmi_system_id dell_quirks[] __initconst = {
},
.driver_data = &quirk_dell_vostro_v130,
},
- {
- .callback = dmi_matched,
- .ident = "Dell XPS13 9333",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
- DMI_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
- },
- .driver_data = &quirk_dell_xps13_9333,
- },
{ }
};
@@ -365,29 +331,17 @@ static void __init find_tokens(const struct dmi_header *dm, void *dummy)
}
}
-static int find_token_id(int tokenid)
+static int find_token_location(int tokenid)
{
int i;
-
for (i = 0; i < da_num_tokens; i++) {
if (da_tokens[i].tokenID == tokenid)
- return i;
+ return da_tokens[i].location;
}
return -1;
}
-static int find_token_location(int tokenid)
-{
- int id;
-
- id = find_token_id(tokenid);
- if (id == -1)
- return -1;
-
- return da_tokens[id].location;
-}
-
static struct calling_interface_buffer *
dell_send_request(struct calling_interface_buffer *buffer, int class,
int select)
@@ -408,20 +362,6 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
return buffer;
}
-static inline int dell_smi_error(int value)
-{
- switch (value) {
- case 0: /* Completed successfully */
- return 0;
- case -1: /* Completed with error */
- return -EIO;
- case -2: /* Function not supported */
- return -ENXIO;
- default: /* Unknown error */
- return -EINVAL;
- }
-}
-
/* Derived from information in DellWirelessCtl.cpp:
Class 17, select 11 is radio control. It returns an array of 32-bit values.
@@ -776,7 +716,7 @@ static int dell_send_intensity(struct backlight_device *bd)
else
dell_send_request(buffer, 1, 1);
- out:
+out:
release_buffer();
return ret;
}
@@ -800,7 +740,7 @@ static int dell_get_intensity(struct backlight_device *bd)
ret = buffer->output[1];
- out:
+out:
release_buffer();
return ret;
}
@@ -849,984 +789,6 @@ static void touchpad_led_exit(void)
led_classdev_unregister(&touchpad_led);
}
-/*
- * Derived from information in smbios-keyboard-ctl:
- *
- * cbClass 4
- * cbSelect 11
- * Keyboard illumination
- * cbArg1 determines the function to be performed
- *
- * cbArg1 0x0 = Get Feature Information
- * cbRES1 Standard return codes (0, -1, -2)
- * cbRES2, word0 Bitmap of user-selectable modes
- * bit 0 Always off (All systems)
- * bit 1 Always on (Travis ATG, Siberia)
- * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
- * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
- * bit 4 Auto: Input-activity-based On; input-activity based Off
- * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
- * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
- * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
- * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
- * bits 9-15 Reserved for future use
- * cbRES2, byte2 Reserved for future use
- * cbRES2, byte3 Keyboard illumination type
- * 0 Reserved
- * 1 Tasklight
- * 2 Backlight
- * 3-255 Reserved for future use
- * cbRES3, byte0 Supported auto keyboard illumination trigger bitmap.
- * bit 0 Any keystroke
- * bit 1 Touchpad activity
- * bit 2 Pointing stick
- * bit 3 Any mouse
- * bits 4-7 Reserved for future use
- * cbRES3, byte1 Supported timeout unit bitmap
- * bit 0 Seconds
- * bit 1 Minutes
- * bit 2 Hours
- * bit 3 Days
- * bits 4-7 Reserved for future use
- * cbRES3, byte2 Number of keyboard light brightness levels
- * cbRES4, byte0 Maximum acceptable seconds value (0 if seconds not supported).
- * cbRES4, byte1 Maximum acceptable minutes value (0 if minutes not supported).
- * cbRES4, byte2 Maximum acceptable hours value (0 if hours not supported).
- * cbRES4, byte3 Maximum acceptable days value (0 if days not supported)
- *
- * cbArg1 0x1 = Get Current State
- * cbRES1 Standard return codes (0, -1, -2)
- * cbRES2, word0 Bitmap of current mode state
- * bit 0 Always off (All systems)
- * bit 1 Always on (Travis ATG, Siberia)
- * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
- * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
- * bit 4 Auto: Input-activity-based On; input-activity based Off
- * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
- * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
- * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
- * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
- * bits 9-15 Reserved for future use
- * Note: Only One bit can be set
- * cbRES2, byte2 Currently active auto keyboard illumination triggers.
- * bit 0 Any keystroke
- * bit 1 Touchpad activity
- * bit 2 Pointing stick
- * bit 3 Any mouse
- * bits 4-7 Reserved for future use
- * cbRES2, byte3 Current Timeout
- * bits 7:6 Timeout units indicator:
- * 00b Seconds
- * 01b Minutes
- * 10b Hours
- * 11b Days
- * bits 5:0 Timeout value (0-63) in sec/min/hr/day
- * NOTE: A value of 0 means always on (no timeout) if any bits of RES3 byte
- * are set upon return from the [Get feature information] call.
- * cbRES3, byte0 Current setting of ALS value that turns the light on or off.
- * cbRES3, byte1 Current ALS reading
- * cbRES3, byte2 Current keyboard light level.
- *
- * cbArg1 0x2 = Set New State
- * cbRES1 Standard return codes (0, -1, -2)
- * cbArg2, word0 Bitmap of current mode state
- * bit 0 Always off (All systems)
- * bit 1 Always on (Travis ATG, Siberia)
- * bit 2 Auto: ALS-based On; ALS-based Off (Travis ATG)
- * bit 3 Auto: ALS- and input-activity-based On; input-activity based Off
- * bit 4 Auto: Input-activity-based On; input-activity based Off
- * bit 5 Auto: Input-activity-based On (illumination level 25%); input-activity based Off
- * bit 6 Auto: Input-activity-based On (illumination level 50%); input-activity based Off
- * bit 7 Auto: Input-activity-based On (illumination level 75%); input-activity based Off
- * bit 8 Auto: Input-activity-based On (illumination level 100%); input-activity based Off
- * bits 9-15 Reserved for future use
- * Note: Only One bit can be set
- * cbArg2, byte2 Desired auto keyboard illumination triggers. Must remain inactive to allow
- * keyboard to turn off automatically.
- * bit 0 Any keystroke
- * bit 1 Touchpad activity
- * bit 2 Pointing stick
- * bit 3 Any mouse
- * bits 4-7 Reserved for future use
- * cbArg2, byte3 Desired Timeout
- * bits 7:6 Timeout units indicator:
- * 00b Seconds
- * 01b Minutes
- * 10b Hours
- * 11b Days
- * bits 5:0 Timeout value (0-63) in sec/min/hr/day
- * cbArg3, byte0 Desired setting of ALS value that turns the light on or off.
- * cbArg3, byte2 Desired keyboard light level.
- */
-
-
-enum kbd_timeout_unit {
- KBD_TIMEOUT_SECONDS = 0,
- KBD_TIMEOUT_MINUTES,
- KBD_TIMEOUT_HOURS,
- KBD_TIMEOUT_DAYS,
-};
-
-enum kbd_mode_bit {
- KBD_MODE_BIT_OFF = 0,
- KBD_MODE_BIT_ON,
- KBD_MODE_BIT_ALS,
- KBD_MODE_BIT_TRIGGER_ALS,
- KBD_MODE_BIT_TRIGGER,
- KBD_MODE_BIT_TRIGGER_25,
- KBD_MODE_BIT_TRIGGER_50,
- KBD_MODE_BIT_TRIGGER_75,
- KBD_MODE_BIT_TRIGGER_100,
-};
-
-#define kbd_is_als_mode_bit(bit) \
- ((bit) == KBD_MODE_BIT_ALS || (bit) == KBD_MODE_BIT_TRIGGER_ALS)
-#define kbd_is_trigger_mode_bit(bit) \
- ((bit) >= KBD_MODE_BIT_TRIGGER_ALS && (bit) <= KBD_MODE_BIT_TRIGGER_100)
-#define kbd_is_level_mode_bit(bit) \
- ((bit) >= KBD_MODE_BIT_TRIGGER_25 && (bit) <= KBD_MODE_BIT_TRIGGER_100)
-
-struct kbd_info {
- u16 modes;
- u8 type;
- u8 triggers;
- u8 levels;
- u8 seconds;
- u8 minutes;
- u8 hours;
- u8 days;
-};
-
-struct kbd_state {
- u8 mode_bit;
- u8 triggers;
- u8 timeout_value;
- u8 timeout_unit;
- u8 als_setting;
- u8 als_value;
- u8 level;
-};
-
-static const int kbd_tokens[] = {
- KBD_LED_OFF_TOKEN,
- KBD_LED_AUTO_25_TOKEN,
- KBD_LED_AUTO_50_TOKEN,
- KBD_LED_AUTO_75_TOKEN,
- KBD_LED_AUTO_100_TOKEN,
- KBD_LED_ON_TOKEN,
-};
-
-static u16 kbd_token_bits;
-
-static struct kbd_info kbd_info;
-static bool kbd_als_supported;
-static bool kbd_triggers_supported;
-
-static u8 kbd_mode_levels[16];
-static int kbd_mode_levels_count;
-
-static u8 kbd_previous_level;
-static u8 kbd_previous_mode_bit;
-
-static bool kbd_led_present;
-
-/*
- * NOTE: there are three ways to set the keyboard backlight level.
- * First, via kbd_state.mode_bit (assigning KBD_MODE_BIT_TRIGGER_* value).
- * Second, via kbd_state.level (assigning numerical value <= kbd_info.levels).
- * Third, via SMBIOS tokens (KBD_LED_* in kbd_tokens)
- *
- * There are laptops which support only one of these methods. If we want to
- * support as many machines as possible we need to implement all three methods.
- * The first two methods use the kbd_state structure. The third uses SMBIOS
- * tokens. If kbd_info.levels == 0, the machine does not support setting the
- * keyboard backlight level via kbd_state.level.
- */
-
-static int kbd_get_info(struct kbd_info *info)
-{
- u8 units;
- int ret;
-
- get_buffer();
-
- buffer->input[0] = 0x0;
- dell_send_request(buffer, 4, 11);
- ret = buffer->output[0];
-
- if (ret) {
- ret = dell_smi_error(ret);
- goto out;
- }
-
- info->modes = buffer->output[1] & 0xFFFF;
- info->type = (buffer->output[1] >> 24) & 0xFF;
- info->triggers = buffer->output[2] & 0xFF;
- units = (buffer->output[2] >> 8) & 0xFF;
- info->levels = (buffer->output[2] >> 16) & 0xFF;
-
- if (units & BIT(0))
- info->seconds = (buffer->output[3] >> 0) & 0xFF;
- if (units & BIT(1))
- info->minutes = (buffer->output[3] >> 8) & 0xFF;
- if (units & BIT(2))
- info->hours = (buffer->output[3] >> 16) & 0xFF;
- if (units & BIT(3))
- info->days = (buffer->output[3] >> 24) & 0xFF;
-
- out:
- release_buffer();
- return ret;
-}
-
-static unsigned int kbd_get_max_level(void)
-{
- if (kbd_info.levels != 0)
- return kbd_info.levels;
- if (kbd_mode_levels_count > 0)
- return kbd_mode_levels_count - 1;
- return 0;
-}
-
-static int kbd_get_level(struct kbd_state *state)
-{
- int i;
-
- if (kbd_info.levels != 0)
- return state->level;
-
- if (kbd_mode_levels_count > 0) {
- for (i = 0; i < kbd_mode_levels_count; ++i)
- if (kbd_mode_levels[i] == state->mode_bit)
- return i;
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int kbd_set_level(struct kbd_state *state, u8 level)
-{
- if (kbd_info.levels != 0) {
- if (level != 0)
- kbd_previous_level = level;
- if (state->level == level)
- return 0;
- state->level = level;
- if (level != 0 && state->mode_bit == KBD_MODE_BIT_OFF)
- state->mode_bit = kbd_previous_mode_bit;
- else if (level == 0 && state->mode_bit != KBD_MODE_BIT_OFF) {
- kbd_previous_mode_bit = state->mode_bit;
- state->mode_bit = KBD_MODE_BIT_OFF;
- }
- return 0;
- }
-
- if (kbd_mode_levels_count > 0 && level < kbd_mode_levels_count) {
- if (level != 0)
- kbd_previous_level = level;
- state->mode_bit = kbd_mode_levels[level];
- return 0;
- }
-
- return -EINVAL;
-}
-
-static int kbd_get_state(struct kbd_state *state)
-{
- int ret;
-
- get_buffer();
-
- buffer->input[0] = 0x1;
- dell_send_request(buffer, 4, 11);
- ret = buffer->output[0];
-
- if (ret) {
- ret = dell_smi_error(ret);
- goto out;
- }
-
- state->mode_bit = ffs(buffer->output[1] & 0xFFFF);
- if (state->mode_bit != 0)
- state->mode_bit--;
-
- state->triggers = (buffer->output[1] >> 16) & 0xFF;
- state->timeout_value = (buffer->output[1] >> 24) & 0x3F;
- state->timeout_unit = (buffer->output[1] >> 30) & 0x3;
- state->als_setting = buffer->output[2] & 0xFF;
- state->als_value = (buffer->output[2] >> 8) & 0xFF;
- state->level = (buffer->output[2] >> 16) & 0xFF;
-
- out:
- release_buffer();
- return ret;
-}
-
-static int kbd_set_state(struct kbd_state *state)
-{
- int ret;
-
- get_buffer();
- buffer->input[0] = 0x2;
- buffer->input[1] = BIT(state->mode_bit) & 0xFFFF;
- buffer->input[1] |= (state->triggers & 0xFF) << 16;
- buffer->input[1] |= (state->timeout_value & 0x3F) << 24;
- buffer->input[1] |= (state->timeout_unit & 0x3) << 30;
- buffer->input[2] = state->als_setting & 0xFF;
- buffer->input[2] |= (state->level & 0xFF) << 16;
- dell_send_request(buffer, 4, 11);
- ret = buffer->output[0];
- release_buffer();
-
- return dell_smi_error(ret);
-}
-
-static int kbd_set_state_safe(struct kbd_state *state, struct kbd_state *old)
-{
- int ret;
-
- ret = kbd_set_state(state);
- if (ret == 0)
- return 0;
-
- /*
- * When setting the new state fails,try to restore the previous one.
- * This is needed on some machines where BIOS sets a default state when
- * setting a new state fails. This default state could be all off.
- */
-
- if (kbd_set_state(old))
- pr_err("Setting old previous keyboard state failed\n");
-
- return ret;
-}
-
-static int kbd_set_token_bit(u8 bit)
-{
- int id;
- int ret;
-
- if (bit >= ARRAY_SIZE(kbd_tokens))
- return -EINVAL;
-
- id = find_token_id(kbd_tokens[bit]);
- if (id == -1)
- return -EINVAL;
-
- get_buffer();
- buffer->input[0] = da_tokens[id].location;
- buffer->input[1] = da_tokens[id].value;
- dell_send_request(buffer, 1, 0);
- ret = buffer->output[0];
- release_buffer();
-
- return dell_smi_error(ret);
-}
-
-static int kbd_get_token_bit(u8 bit)
-{
- int id;
- int ret;
- int val;
-
- if (bit >= ARRAY_SIZE(kbd_tokens))
- return -EINVAL;
-
- id = find_token_id(kbd_tokens[bit]);
- if (id == -1)
- return -EINVAL;
-
- get_buffer();
- buffer->input[0] = da_tokens[id].location;
- dell_send_request(buffer, 0, 0);
- ret = buffer->output[0];
- val = buffer->output[1];
- release_buffer();
-
- if (ret)
- return dell_smi_error(ret);
-
- return (val == da_tokens[id].value);
-}
-
-static int kbd_get_first_active_token_bit(void)
-{
- int i;
- int ret;
-
- for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i) {
- ret = kbd_get_token_bit(i);
- if (ret == 1)
- return i;
- }
-
- return ret;
-}
-
-static int kbd_get_valid_token_counts(void)
-{
- return hweight16(kbd_token_bits);
-}
-
-static inline int kbd_init_info(void)
-{
- struct kbd_state state;
- int ret;
- int i;
-
- ret = kbd_get_info(&kbd_info);
- if (ret)
- return ret;
-
- kbd_get_state(&state);
-
- /* NOTE: timeout value is stored in 6 bits so max value is 63 */
- if (kbd_info.seconds > 63)
- kbd_info.seconds = 63;
- if (kbd_info.minutes > 63)
- kbd_info.minutes = 63;
- if (kbd_info.hours > 63)
- kbd_info.hours = 63;
- if (kbd_info.days > 63)
- kbd_info.days = 63;
-
- /* NOTE: On tested machines ON mode did not work and caused
- * problems (turned backlight off) so do not use it
- */
- kbd_info.modes &= ~BIT(KBD_MODE_BIT_ON);
-
- kbd_previous_level = kbd_get_level(&state);
- kbd_previous_mode_bit = state.mode_bit;
-
- if (kbd_previous_level == 0 && kbd_get_max_level() != 0)
- kbd_previous_level = 1;
-
- if (kbd_previous_mode_bit == KBD_MODE_BIT_OFF) {
- kbd_previous_mode_bit =
- ffs(kbd_info.modes & ~BIT(KBD_MODE_BIT_OFF));
- if (kbd_previous_mode_bit != 0)
- kbd_previous_mode_bit--;
- }
-
- if (kbd_info.modes & (BIT(KBD_MODE_BIT_ALS) |
- BIT(KBD_MODE_BIT_TRIGGER_ALS)))
- kbd_als_supported = true;
-
- if (kbd_info.modes & (
- BIT(KBD_MODE_BIT_TRIGGER_ALS) | BIT(KBD_MODE_BIT_TRIGGER) |
- BIT(KBD_MODE_BIT_TRIGGER_25) | BIT(KBD_MODE_BIT_TRIGGER_50) |
- BIT(KBD_MODE_BIT_TRIGGER_75) | BIT(KBD_MODE_BIT_TRIGGER_100)
- ))
- kbd_triggers_supported = true;
-
- /* kbd_mode_levels[0] is reserved, see below */
- for (i = 0; i < 16; ++i)
- if (kbd_is_level_mode_bit(i) && (BIT(i) & kbd_info.modes))
- kbd_mode_levels[1 + kbd_mode_levels_count++] = i;
-
- /*
- * Find the first supported mode and assign to kbd_mode_levels[0].
- * This should be 0 (off), but we cannot depend on the BIOS to
- * support 0.
- */
- if (kbd_mode_levels_count > 0) {
- for (i = 0; i < 16; ++i) {
- if (BIT(i) & kbd_info.modes) {
- kbd_mode_levels[0] = i;
- break;
- }
- }
- kbd_mode_levels_count++;
- }
-
- return 0;
-
-}
-
-static inline void kbd_init_tokens(void)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(kbd_tokens); ++i)
- if (find_token_id(kbd_tokens[i]) != -1)
- kbd_token_bits |= BIT(i);
-}
-
-static void kbd_init(void)
-{
- int ret;
-
- ret = kbd_init_info();
- kbd_init_tokens();
-
- if (kbd_token_bits != 0 || ret == 0)
- kbd_led_present = true;
-}
-
-static ssize_t kbd_led_timeout_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct kbd_state new_state;
- struct kbd_state state;
- bool convert;
- int value;
- int ret;
- char ch;
- u8 unit;
- int i;
-
- ret = sscanf(buf, "%d %c", &value, &ch);
- if (ret < 1)
- return -EINVAL;
- else if (ret == 1)
- ch = 's';
-
- if (value < 0)
- return -EINVAL;
-
- convert = false;
-
- switch (ch) {
- case 's':
- if (value > kbd_info.seconds)
- convert = true;
- unit = KBD_TIMEOUT_SECONDS;
- break;
- case 'm':
- if (value > kbd_info.minutes)
- convert = true;
- unit = KBD_TIMEOUT_MINUTES;
- break;
- case 'h':
- if (value > kbd_info.hours)
- convert = true;
- unit = KBD_TIMEOUT_HOURS;
- break;
- case 'd':
- if (value > kbd_info.days)
- convert = true;
- unit = KBD_TIMEOUT_DAYS;
- break;
- default:
- return -EINVAL;
- }
-
- if (quirks && quirks->needs_kbd_timeouts)
- convert = true;
-
- if (convert) {
- /* Convert value from current units to seconds */
- switch (unit) {
- case KBD_TIMEOUT_DAYS:
- value *= 24;
- case KBD_TIMEOUT_HOURS:
- value *= 60;
- case KBD_TIMEOUT_MINUTES:
- value *= 60;
- unit = KBD_TIMEOUT_SECONDS;
- }
-
- if (quirks && quirks->needs_kbd_timeouts) {
- for (i = 0; quirks->kbd_timeouts[i] != -1; i++) {
- if (value <= quirks->kbd_timeouts[i]) {
- value = quirks->kbd_timeouts[i];
- break;
- }
- }
- }
-
- if (value <= kbd_info.seconds && kbd_info.seconds) {
- unit = KBD_TIMEOUT_SECONDS;
- } else if (value / 60 <= kbd_info.minutes && kbd_info.minutes) {
- value /= 60;
- unit = KBD_TIMEOUT_MINUTES;
- } else if (value / (60 * 60) <= kbd_info.hours && kbd_info.hours) {
- value /= (60 * 60);
- unit = KBD_TIMEOUT_HOURS;
- } else if (value / (60 * 60 * 24) <= kbd_info.days && kbd_info.days) {
- value /= (60 * 60 * 24);
- unit = KBD_TIMEOUT_DAYS;
- } else {
- return -EINVAL;
- }
- }
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- new_state = state;
- new_state.timeout_value = value;
- new_state.timeout_unit = unit;
-
- ret = kbd_set_state_safe(&new_state, &state);
- if (ret)
- return ret;
-
- return count;
-}
-
-static ssize_t kbd_led_timeout_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kbd_state state;
- int ret;
- int len;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- len = sprintf(buf, "%d", state.timeout_value);
-
- switch (state.timeout_unit) {
- case KBD_TIMEOUT_SECONDS:
- return len + sprintf(buf+len, "s\n");
- case KBD_TIMEOUT_MINUTES:
- return len + sprintf(buf+len, "m\n");
- case KBD_TIMEOUT_HOURS:
- return len + sprintf(buf+len, "h\n");
- case KBD_TIMEOUT_DAYS:
- return len + sprintf(buf+len, "d\n");
- default:
- return -EINVAL;
- }
-
- return len;
-}
-
-static DEVICE_ATTR(stop_timeout, S_IRUGO | S_IWUSR,
- kbd_led_timeout_show, kbd_led_timeout_store);
-
-static const char * const kbd_led_triggers[] = {
- "keyboard",
- "touchpad",
- /*"trackstick"*/ NULL, /* NOTE: trackstick is just alias for touchpad */
- "mouse",
-};
-
-static ssize_t kbd_led_triggers_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct kbd_state new_state;
- struct kbd_state state;
- bool triggers_enabled = false;
- bool als_enabled = false;
- bool disable_als = false;
- bool enable_als = false;
- int trigger_bit = -1;
- char trigger[21];
- int i, ret;
-
- ret = sscanf(buf, "%20s", trigger);
- if (ret != 1)
- return -EINVAL;
-
- if (trigger[0] != '+' && trigger[0] != '-')
- return -EINVAL;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- if (kbd_als_supported)
- als_enabled = kbd_is_als_mode_bit(state.mode_bit);
-
- if (kbd_triggers_supported)
- triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
-
- if (kbd_als_supported) {
- if (strcmp(trigger, "+als") == 0) {
- if (als_enabled)
- return count;
- enable_als = true;
- } else if (strcmp(trigger, "-als") == 0) {
- if (!als_enabled)
- return count;
- disable_als = true;
- }
- }
-
- if (enable_als || disable_als) {
- new_state = state;
- if (enable_als) {
- if (triggers_enabled)
- new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
- else
- new_state.mode_bit = KBD_MODE_BIT_ALS;
- } else {
- if (triggers_enabled) {
- new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
- kbd_set_level(&new_state, kbd_previous_level);
- } else {
- new_state.mode_bit = KBD_MODE_BIT_ON;
- }
- }
- if (!(kbd_info.modes & BIT(new_state.mode_bit)))
- return -EINVAL;
- ret = kbd_set_state_safe(&new_state, &state);
- if (ret)
- return ret;
- kbd_previous_mode_bit = new_state.mode_bit;
- return count;
- }
-
- if (kbd_triggers_supported) {
- for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
- if (!(kbd_info.triggers & BIT(i)))
- continue;
- if (!kbd_led_triggers[i])
- continue;
- if (strcmp(trigger+1, kbd_led_triggers[i]) != 0)
- continue;
- if (trigger[0] == '+' &&
- triggers_enabled && (state.triggers & BIT(i)))
- return count;
- if (trigger[0] == '-' &&
- (!triggers_enabled || !(state.triggers & BIT(i))))
- return count;
- trigger_bit = i;
- break;
- }
- }
-
- if (trigger_bit != -1) {
- new_state = state;
- if (trigger[0] == '+')
- new_state.triggers |= BIT(trigger_bit);
- else {
- new_state.triggers &= ~BIT(trigger_bit);
- /* NOTE: trackstick bit (2) must be disabled when
- * disabling touchpad bit (1), otherwise touchpad
- * bit (1) will not be disabled */
- if (trigger_bit == 1)
- new_state.triggers &= ~BIT(2);
- }
- if ((kbd_info.triggers & new_state.triggers) !=
- new_state.triggers)
- return -EINVAL;
- if (new_state.triggers && !triggers_enabled) {
- if (als_enabled)
- new_state.mode_bit = KBD_MODE_BIT_TRIGGER_ALS;
- else {
- new_state.mode_bit = KBD_MODE_BIT_TRIGGER;
- kbd_set_level(&new_state, kbd_previous_level);
- }
- } else if (new_state.triggers == 0) {
- if (als_enabled)
- new_state.mode_bit = KBD_MODE_BIT_ALS;
- else
- kbd_set_level(&new_state, 0);
- }
- if (!(kbd_info.modes & BIT(new_state.mode_bit)))
- return -EINVAL;
- ret = kbd_set_state_safe(&new_state, &state);
- if (ret)
- return ret;
- if (new_state.mode_bit != KBD_MODE_BIT_OFF)
- kbd_previous_mode_bit = new_state.mode_bit;
- return count;
- }
-
- return -EINVAL;
-}
-
-static ssize_t kbd_led_triggers_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kbd_state state;
- bool triggers_enabled;
- int level, i, ret;
- int len = 0;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- len = 0;
-
- if (kbd_triggers_supported) {
- triggers_enabled = kbd_is_trigger_mode_bit(state.mode_bit);
- level = kbd_get_level(&state);
- for (i = 0; i < ARRAY_SIZE(kbd_led_triggers); ++i) {
- if (!(kbd_info.triggers & BIT(i)))
- continue;
- if (!kbd_led_triggers[i])
- continue;
- if ((triggers_enabled || level <= 0) &&
- (state.triggers & BIT(i)))
- buf[len++] = '+';
- else
- buf[len++] = '-';
- len += sprintf(buf+len, "%s ", kbd_led_triggers[i]);
- }
- }
-
- if (kbd_als_supported) {
- if (kbd_is_als_mode_bit(state.mode_bit))
- len += sprintf(buf+len, "+als ");
- else
- len += sprintf(buf+len, "-als ");
- }
-
- if (len)
- buf[len - 1] = '\n';
-
- return len;
-}
-
-static DEVICE_ATTR(start_triggers, S_IRUGO | S_IWUSR,
- kbd_led_triggers_show, kbd_led_triggers_store);
-
-static ssize_t kbd_led_als_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct kbd_state state;
- struct kbd_state new_state;
- u8 setting;
- int ret;
-
- ret = kstrtou8(buf, 10, &setting);
- if (ret)
- return ret;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- new_state = state;
- new_state.als_setting = setting;
-
- ret = kbd_set_state_safe(&new_state, &state);
- if (ret)
- return ret;
-
- return count;
-}
-
-static ssize_t kbd_led_als_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct kbd_state state;
- int ret;
-
- ret = kbd_get_state(&state);
- if (ret)
- return ret;
-
- return sprintf(buf, "%d\n", state.als_setting);
-}
-
-static DEVICE_ATTR(als_setting, S_IRUGO | S_IWUSR,
- kbd_led_als_show, kbd_led_als_store);
-
-static struct attribute *kbd_led_attrs[] = {
- &dev_attr_stop_timeout.attr,
- &dev_attr_start_triggers.attr,
- &dev_attr_als_setting.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(kbd_led);
-
-static enum led_brightness kbd_led_level_get(struct led_classdev *led_cdev)
-{
- int ret;
- u16 num;
- struct kbd_state state;
-
- if (kbd_get_max_level()) {
- ret = kbd_get_state(&state);
- if (ret)
- return 0;
- ret = kbd_get_level(&state);
- if (ret < 0)
- return 0;
- return ret;
- }
-
- if (kbd_get_valid_token_counts()) {
- ret = kbd_get_first_active_token_bit();
- if (ret < 0)
- return 0;
- for (num = kbd_token_bits; num != 0 && ret > 0; --ret)
- num &= num - 1; /* clear the first bit set */
- if (num == 0)
- return 0;
- return ffs(num) - 1;
- }
-
- pr_warn("Keyboard brightness level control not supported\n");
- return 0;
-}
-
-static void kbd_led_level_set(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- struct kbd_state state;
- struct kbd_state new_state;
- u16 num;
-
- if (kbd_get_max_level()) {
- if (kbd_get_state(&state))
- return;
- new_state = state;
- if (kbd_set_level(&new_state, value))
- return;
- kbd_set_state_safe(&new_state, &state);
- return;
- }
-
- if (kbd_get_valid_token_counts()) {
- for (num = kbd_token_bits; num != 0 && value > 0; --value)
- num &= num - 1; /* clear the first bit set */
- if (num == 0)
- return;
- kbd_set_token_bit(ffs(num) - 1);
- return;
- }
-
- pr_warn("Keyboard brightness level control not supported\n");
-}
-
-static struct led_classdev kbd_led = {
- .name = "dell::kbd_backlight",
- .brightness_set = kbd_led_level_set,
- .brightness_get = kbd_led_level_get,
- .groups = kbd_led_groups,
-};
-
-static int __init kbd_led_init(struct device *dev)
-{
- kbd_init();
- if (!kbd_led_present)
- return -ENODEV;
- kbd_led.max_brightness = kbd_get_max_level();
- if (!kbd_led.max_brightness) {
- kbd_led.max_brightness = kbd_get_valid_token_counts();
- if (kbd_led.max_brightness)
- kbd_led.max_brightness--;
- }
- return led_classdev_register(dev, &kbd_led);
-}
-
-static void brightness_set_exit(struct led_classdev *led_cdev,
- enum led_brightness value)
-{
- /* Don't change backlight level on exit */
-};
-
-static void kbd_led_exit(void)
-{
- if (!kbd_led_present)
- return;
- kbd_led.brightness_set = brightness_set_exit;
- led_classdev_unregister(&kbd_led);
-}
-
static int __init dell_init(void)
{
int max_intensity = 0;
@@ -1879,8 +841,6 @@ static int __init dell_init(void)
if (quirks && quirks->touchpad_led)
touchpad_led_init(&platform_device->dev);
- kbd_led_init(&platform_device->dev);
-
dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
if (dell_laptop_dir != NULL)
debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
@@ -1948,7 +908,6 @@ static void __exit dell_exit(void)
debugfs_remove_recursive(dell_laptop_dir);
if (quirks && quirks->touchpad_led)
touchpad_led_exit();
- kbd_led_exit();
i8042_remove_filter(dell_laptop_i8042_filter);
cancel_delayed_work_sync(&dell_rfkill_work);
backlight_device_unregister(dell_backlight_device);
@@ -1965,7 +924,5 @@ module_init(dell_init);
module_exit(dell_exit);
MODULE_AUTHOR("Matthew Garrett <mjg@redhat.com>");
-MODULE_AUTHOR("Gabriele Mazzotta <gabriele.mzt@gmail.com>");
-MODULE_AUTHOR("Pali Rohár <pali.rohar@gmail.com>");
MODULE_DESCRIPTION("Dell laptop driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index c71443c4f265..97b5e4ee1ca4 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -1041,6 +1041,7 @@ static const struct x86_cpu_id rapl_ids[] = {
RAPL_CPU(0x45, rapl_defaults_core),/* Haswell ULT */
RAPL_CPU(0x4C, rapl_defaults_atom),/* Braswell */
RAPL_CPU(0x4A, rapl_defaults_atom),/* Tangier */
+ RAPL_CPU(0x56, rapl_defaults_core),/* Future Xeon */
RAPL_CPU(0x5A, rapl_defaults_atom),/* Annidale */
{}
};
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index e225711bb8bc..9c48fb32f660 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -1488,7 +1488,7 @@ struct regulator *regulator_get_optional(struct device *dev, const char *id)
}
EXPORT_SYMBOL_GPL(regulator_get_optional);
-/* Locks held by regulator_put() */
+/* regulator_list_mutex lock held by regulator_put() */
static void _regulator_put(struct regulator *regulator)
{
struct regulator_dev *rdev;
@@ -1503,12 +1503,14 @@ static void _regulator_put(struct regulator *regulator)
/* remove any sysfs entries */
if (regulator->dev)
sysfs_remove_link(&rdev->dev.kobj, regulator->supply_name);
+ mutex_lock(&rdev->mutex);
kfree(regulator->supply_name);
list_del(&regulator->list);
kfree(regulator);
rdev->open_count--;
rdev->exclusive = 0;
+ mutex_unlock(&rdev->mutex);
module_put(rdev->owner);
}
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index c1444c3d84c2..ff828117798f 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -405,6 +405,40 @@ static struct regulator_ops s2mps14_reg_ops;
.enable_mask = S2MPS14_ENABLE_MASK \
}
+#define regulator_desc_s2mps13_buck7(num, min, step, min_sel) { \
+ .name = "BUCK"#num, \
+ .id = S2MPS13_BUCK##num, \
+ .ops = &s2mps14_reg_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = min, \
+ .uV_step = step, \
+ .linear_min_sel = min_sel, \
+ .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \
+ .vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \
+ .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS13_REG_B1CTRL + (num - 1) * 2, \
+ .enable_mask = S2MPS14_ENABLE_MASK \
+}
+
+#define regulator_desc_s2mps13_buck8_10(num, min, step, min_sel) { \
+ .name = "BUCK"#num, \
+ .id = S2MPS13_BUCK##num, \
+ .ops = &s2mps14_reg_ops, \
+ .type = REGULATOR_VOLTAGE, \
+ .owner = THIS_MODULE, \
+ .min_uV = min, \
+ .uV_step = step, \
+ .linear_min_sel = min_sel, \
+ .n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
+ .ramp_delay = S2MPS13_BUCK_RAMP_DELAY, \
+ .vsel_reg = S2MPS13_REG_B1OUT + (num) * 2 - 1, \
+ .vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
+ .enable_reg = S2MPS13_REG_B1CTRL + (num) * 2 - 1, \
+ .enable_mask = S2MPS14_ENABLE_MASK \
+}
+
static const struct regulator_desc s2mps13_regulators[] = {
regulator_desc_s2mps13_ldo(1, MIN_800_MV, STEP_12_5_MV, 0x00),
regulator_desc_s2mps13_ldo(2, MIN_1400_MV, STEP_50_MV, 0x0C),
@@ -452,10 +486,10 @@ static const struct regulator_desc s2mps13_regulators[] = {
regulator_desc_s2mps13_buck(4, MIN_500_MV, STEP_6_25_MV, 0x10),
regulator_desc_s2mps13_buck(5, MIN_500_MV, STEP_6_25_MV, 0x10),
regulator_desc_s2mps13_buck(6, MIN_500_MV, STEP_6_25_MV, 0x10),
- regulator_desc_s2mps13_buck(7, MIN_500_MV, STEP_6_25_MV, 0x10),
- regulator_desc_s2mps13_buck(8, MIN_1000_MV, STEP_12_5_MV, 0x20),
- regulator_desc_s2mps13_buck(9, MIN_1000_MV, STEP_12_5_MV, 0x20),
- regulator_desc_s2mps13_buck(10, MIN_500_MV, STEP_6_25_MV, 0x10),
+ regulator_desc_s2mps13_buck7(7, MIN_500_MV, STEP_6_25_MV, 0x10),
+ regulator_desc_s2mps13_buck8_10(8, MIN_1000_MV, STEP_12_5_MV, 0x20),
+ regulator_desc_s2mps13_buck8_10(9, MIN_1000_MV, STEP_12_5_MV, 0x20),
+ regulator_desc_s2mps13_buck8_10(10, MIN_500_MV, STEP_6_25_MV, 0x10),
};
static int s2mps14_regulator_enable(struct regulator_dev *rdev)
@@ -570,7 +604,7 @@ static struct regulator_ops s2mps14_reg_ops = {
.enable_mask = S2MPS14_ENABLE_MASK \
}
-#define regulator_desc_s2mps14_buck(num, min, step) { \
+#define regulator_desc_s2mps14_buck(num, min, step, min_sel) { \
.name = "BUCK"#num, \
.id = S2MPS14_BUCK##num, \
.ops = &s2mps14_reg_ops, \
@@ -579,7 +613,7 @@ static struct regulator_ops s2mps14_reg_ops = {
.min_uV = min, \
.uV_step = step, \
.n_voltages = S2MPS14_BUCK_N_VOLTAGES, \
- .linear_min_sel = S2MPS14_BUCK1235_START_SEL, \
+ .linear_min_sel = min_sel, \
.ramp_delay = S2MPS14_BUCK_RAMP_DELAY, \
.vsel_reg = S2MPS14_REG_B1CTRL2 + (num - 1) * 2, \
.vsel_mask = S2MPS14_BUCK_VSEL_MASK, \
@@ -613,11 +647,16 @@ static const struct regulator_desc s2mps14_regulators[] = {
regulator_desc_s2mps14_ldo(23, MIN_800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(24, MIN_1800_MV, STEP_25_MV),
regulator_desc_s2mps14_ldo(25, MIN_1800_MV, STEP_25_MV),
- regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV),
- regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV),
- regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV),
+ regulator_desc_s2mps14_buck(1, MIN_600_MV, STEP_6_25_MV,
+ S2MPS14_BUCK1235_START_SEL),
+ regulator_desc_s2mps14_buck(2, MIN_600_MV, STEP_6_25_MV,
+ S2MPS14_BUCK1235_START_SEL),
+ regulator_desc_s2mps14_buck(3, MIN_600_MV, STEP_6_25_MV,
+ S2MPS14_BUCK1235_START_SEL),
+ regulator_desc_s2mps14_buck(4, MIN_1400_MV, STEP_12_5_MV,
+ S2MPS14_BUCK4_START_SEL),
+ regulator_desc_s2mps14_buck(5, MIN_600_MV, STEP_6_25_MV,
+ S2MPS14_BUCK1235_START_SEL),
};
static int s2mps14_pmic_enable_ext_control(struct s2mps11_info *s2mps11,
diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c
index eebc52cb6984..3d95c87160b3 100644
--- a/drivers/reset/reset-sunxi.c
+++ b/drivers/reset/reset-sunxi.c
@@ -102,6 +102,8 @@ static int sunxi_reset_init(struct device_node *np)
goto err_alloc;
}
+ spin_lock_init(&data->lock);
+
data->rcdev.owner = THIS_MODULE;
data->rcdev.nr_resets = size * 32;
data->rcdev.ops = &sunxi_reset_ops;
@@ -157,6 +159,8 @@ static int sunxi_reset_probe(struct platform_device *pdev)
if (IS_ERR(data->membase))
return PTR_ERR(data->membase);
+ spin_lock_init(&data->lock);
+
data->rcdev.owner = THIS_MODULE;
data->rcdev.nr_resets = resource_size(res) * 32;
data->rcdev.ops = &sunxi_reset_ops;
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index b5e7c4670205..89ac1d5083c6 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -832,6 +832,7 @@ static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
static const struct platform_device_id s5m_rtc_id[] = {
{ "s5m-rtc", S5M8767X },
{ "s2mps14-rtc", S2MPS14X },
+ { },
};
static struct platform_driver s5m_rtc_driver = {
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 91e97ec01418..4d41bf75c233 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -1163,9 +1163,13 @@ static inline int ap_test_config_card_id(unsigned int id)
*/
static inline int ap_test_config_domain(unsigned int domain)
{
- if (!ap_configuration)
- return 1;
- return ap_test_config(ap_configuration->aqm, domain);
+ if (!ap_configuration) /* QCI not supported */
+ if (domain < 16)
+ return 1; /* then domains 0...15 are configured */
+ else
+ return 0;
+ else
+ return ap_test_config(ap_configuration->aqm, domain);
}
/**
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index f407e3763432..642c77c76b84 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1784,6 +1784,8 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel,
QETH_DBF_TEXT(SETUP, 2, "idxanswr");
card = CARD_FROM_CDEV(channel->ccwdev);
iob = qeth_get_buffer(channel);
+ if (!iob)
+ return -ENOMEM;
iob->callback = idx_reply_cb;
memcpy(&channel->ccw, READ_CCW, sizeof(struct ccw1));
channel->ccw.count = QETH_BUFSIZE;
@@ -1834,6 +1836,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel,
QETH_DBF_TEXT(SETUP, 2, "idxactch");
iob = qeth_get_buffer(channel);
+ if (!iob)
+ return -ENOMEM;
iob->callback = idx_reply_cb;
memcpy(&channel->ccw, WRITE_CCW, sizeof(struct ccw1));
channel->ccw.count = IDX_ACTIVATE_SIZE;
@@ -2021,10 +2025,36 @@ void qeth_prepare_control_data(struct qeth_card *card, int len,
}
EXPORT_SYMBOL_GPL(qeth_prepare_control_data);
+/**
+ * qeth_send_control_data() - send control command to the card
+ * @card: qeth_card structure pointer
+ * @len: size of the command buffer
+ * @iob: qeth_cmd_buffer pointer
+ * @reply_cb: callback function pointer
+ * @cb_card: pointer to the qeth_card structure
+ * @cb_reply: pointer to the qeth_reply structure
+ * @cb_cmd: pointer to the original iob for non-IPA
+ * commands, or to the qeth_ipa_cmd structure
+ * for the IPA commands.
+ * @reply_param: private pointer passed to the callback
+ *
+ * Returns the value of the `return_code' field of the response
+ * block returned from the hardware, or other error indication.
+ * Value of zero indicates successful execution of the command.
+ *
+ * Callback function gets called one or more times, with cb_cmd
+ * pointing to the response returned by the hardware. Callback
+ * function must return non-zero if more reply blocks are expected,
+ * and zero if the last or only reply block is received. Callback
+ * function can get the value of the reply_param pointer from the
+ * field 'param' of the structure qeth_reply.
+ */
+
int qeth_send_control_data(struct qeth_card *card, int len,
struct qeth_cmd_buffer *iob,
- int (*reply_cb)(struct qeth_card *, struct qeth_reply *,
- unsigned long),
+ int (*reply_cb)(struct qeth_card *cb_card,
+ struct qeth_reply *cb_reply,
+ unsigned long cb_cmd),
void *reply_param)
{
int rc;
@@ -2914,9 +2944,16 @@ struct qeth_cmd_buffer *qeth_get_ipacmd_buffer(struct qeth_card *card,
struct qeth_cmd_buffer *iob;
struct qeth_ipa_cmd *cmd;
- iob = qeth_wait_for_buffer(&card->write);
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
+ iob = qeth_get_buffer(&card->write);
+ if (iob) {
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ qeth_fill_ipacmd_header(card, cmd, ipacmd, prot);
+ } else {
+ dev_warn(&card->gdev->dev,
+ "The qeth driver ran out of channel command buffers\n");
+ QETH_DBF_MESSAGE(1, "%s The qeth driver ran out of channel command buffers",
+ dev_name(&card->gdev->dev));
+ }
return iob;
}
@@ -2932,6 +2969,12 @@ void qeth_prepare_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
}
EXPORT_SYMBOL_GPL(qeth_prepare_ipa_cmd);
+/**
+ * qeth_send_ipa_cmd() - send an IPA command
+ *
+ * See qeth_send_control_data() for explanation of the arguments.
+ */
+
int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
int (*reply_cb)(struct qeth_card *, struct qeth_reply*,
unsigned long),
@@ -2968,6 +3011,8 @@ int qeth_send_startlan(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "strtlan");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_STARTLAN, 0);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
return rc;
}
@@ -3013,11 +3058,13 @@ static struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *card,
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETADAPTERPARMS,
QETH_PROT_IPV4);
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
- cmd->data.setadapterparms.hdr.command_code = command;
- cmd->data.setadapterparms.hdr.used_total = 1;
- cmd->data.setadapterparms.hdr.seq_no = 1;
+ if (iob) {
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.setadapterparms.hdr.cmdlength = cmdlen;
+ cmd->data.setadapterparms.hdr.command_code = command;
+ cmd->data.setadapterparms.hdr.used_total = 1;
+ cmd->data.setadapterparms.hdr.seq_no = 1;
+ }
return iob;
}
@@ -3030,6 +3077,8 @@ int qeth_query_setadapterparms(struct qeth_card *card)
QETH_CARD_TEXT(card, 3, "queryadp");
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_COMMANDS_SUPPORTED,
sizeof(struct qeth_ipacmd_setadpparms));
+ if (!iob)
+ return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, qeth_query_setadapterparms_cb, NULL);
return rc;
}
@@ -3080,6 +3129,8 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
QETH_DBF_TEXT_(SETUP, 2, "qipassi%i", prot);
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_QIPASSIST, prot);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_send_ipa_cmd(card, iob, qeth_query_ipassists_cb, NULL);
return rc;
}
@@ -3119,6 +3170,8 @@ int qeth_query_switch_attributes(struct qeth_card *card,
return -ENOMEDIUM;
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
sizeof(struct qeth_ipacmd_setadpparms_hdr));
+ if (!iob)
+ return -ENOMEM;
return qeth_send_ipa_cmd(card, iob,
qeth_query_switch_attributes_cb, sw_info);
}
@@ -3146,6 +3199,8 @@ static int qeth_query_setdiagass(struct qeth_card *card)
QETH_DBF_TEXT(SETUP, 2, "qdiagass");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.diagass.subcmd_len = 16;
cmd->data.diagass.subcmd = QETH_DIAGS_CMD_QUERY;
@@ -3197,6 +3252,8 @@ int qeth_hw_trap(struct qeth_card *card, enum qeth_diags_trap_action action)
QETH_DBF_TEXT(SETUP, 2, "diagtrap");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.diagass.subcmd_len = 80;
cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRAP;
@@ -4162,6 +4219,8 @@ void qeth_setadp_promisc_mode(struct qeth_card *card)
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_PROMISC_MODE,
sizeof(struct qeth_ipacmd_setadpparms));
+ if (!iob)
+ return;
cmd = (struct qeth_ipa_cmd *)(iob->data + IPA_PDU_HEADER_SIZE);
cmd->data.setadapterparms.data.mode = mode;
qeth_send_ipa_cmd(card, iob, qeth_setadp_promisc_mode_cb, NULL);
@@ -4232,6 +4291,8 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
iob = qeth_get_adapter_cmd(card, IPA_SETADP_ALTER_MAC_ADDRESS,
sizeof(struct qeth_ipacmd_setadpparms));
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setadapterparms.data.change_addr.cmd = CHANGE_ADDR_READ_MAC;
cmd->data.setadapterparms.data.change_addr.addr_size = OSA_ADDR_LEN;
@@ -4345,6 +4406,8 @@ static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
sizeof(struct qeth_ipacmd_setadpparms_hdr) +
sizeof(struct qeth_set_access_ctrl));
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
access_ctrl_req->subcmd_code = isolation;
@@ -4588,6 +4651,10 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_SNMP_CONTROL,
QETH_SNMP_SETADP_CMDLENGTH + req_len);
+ if (!iob) {
+ rc = -ENOMEM;
+ goto out;
+ }
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
memcpy(&cmd->data.setadapterparms.data.snmp, &ureq->cmd, req_len);
rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len,
@@ -4599,7 +4666,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata)
if (copy_to_user(udata, qinfo.udata, qinfo.udata_len))
rc = -EFAULT;
}
-
+out:
kfree(ureq);
kfree(qinfo.udata);
return rc;
@@ -4670,6 +4737,10 @@ int qeth_query_oat_command(struct qeth_card *card, char __user *udata)
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_OAT,
sizeof(struct qeth_ipacmd_setadpparms_hdr) +
sizeof(struct qeth_query_oat));
+ if (!iob) {
+ rc = -ENOMEM;
+ goto out_free;
+ }
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
oat_req = &cmd->data.setadapterparms.data.query_oat;
oat_req->subcmd_code = oat_data.command;
@@ -4735,6 +4806,8 @@ static int qeth_query_card_info(struct qeth_card *card,
return -EOPNOTSUPP;
iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_CARD_INFO,
sizeof(struct qeth_ipacmd_setadpparms_hdr));
+ if (!iob)
+ return -ENOMEM;
return qeth_send_ipa_cmd(card, iob, qeth_query_card_info_cb,
(void *)carrier_info);
}
@@ -5060,11 +5133,23 @@ retriable:
card->options.adp.supported_funcs = 0;
card->options.sbp.supported_funcs = 0;
card->info.diagass_support = 0;
- qeth_query_ipassists(card, QETH_PROT_IPV4);
- if (qeth_is_supported(card, IPA_SETADAPTERPARMS))
- qeth_query_setadapterparms(card);
- if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST))
- qeth_query_setdiagass(card);
+ rc = qeth_query_ipassists(card, QETH_PROT_IPV4);
+ if (rc == -ENOMEM)
+ goto out;
+ if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
+ rc = qeth_query_setadapterparms(card);
+ if (rc < 0) {
+ QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ goto out;
+ }
+ }
+ if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
+ rc = qeth_query_setdiagass(card);
+ if (rc < 0) {
+ QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+ goto out;
+ }
+ }
return 0;
out:
dev_warn(&card->gdev->dev, "The qeth device driver failed to recover "
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index d02cd1a67943..ce87ae72edbd 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -27,10 +27,7 @@ static int qeth_l2_set_offline(struct ccwgroup_device *);
static int qeth_l2_stop(struct net_device *);
static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
- enum qeth_ipa_cmds,
- int (*reply_cb) (struct qeth_card *,
- struct qeth_reply*,
- unsigned long));
+ enum qeth_ipa_cmds);
static void qeth_l2_set_multicast_list(struct net_device *);
static int qeth_l2_recover(void *);
static void qeth_bridgeport_query_support(struct qeth_card *card);
@@ -130,56 +127,71 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no)
return ndev;
}
-static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card,
- struct qeth_reply *reply,
- unsigned long data)
+static int qeth_setdel_makerc(struct qeth_card *card, int retcode)
{
- struct qeth_ipa_cmd *cmd;
- __u8 *mac;
+ int rc;
- QETH_CARD_TEXT(card, 2, "L2Sgmacb");
- cmd = (struct qeth_ipa_cmd *) data;
- mac = &cmd->data.setdelmac.mac[0];
- /* MAC already registered, needed in couple/uncouple case */
- if (cmd->hdr.return_code == IPA_RC_L2_DUP_MAC) {
- QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s \n",
- mac, QETH_CARD_IFNAME(card));
- cmd->hdr.return_code = 0;
+ if (retcode)
+ QETH_CARD_TEXT_(card, 2, "err%04x", retcode);
+ switch (retcode) {
+ case IPA_RC_SUCCESS:
+ rc = 0;
+ break;
+ case IPA_RC_L2_UNSUPPORTED_CMD:
+ rc = -ENOSYS;
+ break;
+ case IPA_RC_L2_ADDR_TABLE_FULL:
+ rc = -ENOSPC;
+ break;
+ case IPA_RC_L2_DUP_MAC:
+ case IPA_RC_L2_DUP_LAYER3_MAC:
+ rc = -EEXIST;
+ break;
+ case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
+ case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+ rc = -EPERM;
+ break;
+ case IPA_RC_L2_MAC_NOT_FOUND:
+ rc = -ENOENT;
+ break;
+ case -ENOMEM:
+ rc = -ENOMEM;
+ break;
+ default:
+ rc = -EIO;
+ break;
}
- if (cmd->hdr.return_code)
- QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %x\n",
- mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
- return 0;
+ return rc;
}
static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
{
- QETH_CARD_TEXT(card, 2, "L2Sgmac");
- return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC,
- qeth_l2_send_setgroupmac_cb);
-}
-
-static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card,
- struct qeth_reply *reply,
- unsigned long data)
-{
- struct qeth_ipa_cmd *cmd;
- __u8 *mac;
+ int rc;
- QETH_CARD_TEXT(card, 2, "L2Dgmacb");
- cmd = (struct qeth_ipa_cmd *) data;
- mac = &cmd->data.setdelmac.mac[0];
- if (cmd->hdr.return_code)
- QETH_DBF_MESSAGE(2, "Could not delete group MAC %pM on %s: %x\n",
- mac, QETH_CARD_IFNAME(card), cmd->hdr.return_code);
- return 0;
+ QETH_CARD_TEXT(card, 2, "L2Sgmac");
+ rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+ IPA_CMD_SETGMAC));
+ if (rc == -EEXIST)
+ QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
+ mac, QETH_CARD_IFNAME(card));
+ else if (rc)
+ QETH_DBF_MESSAGE(2, "Could not set group MAC %pM on %s: %d\n",
+ mac, QETH_CARD_IFNAME(card), rc);
+ return rc;
}
static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
{
+ int rc;
+
QETH_CARD_TEXT(card, 2, "L2Dgmac");
- return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC,
- qeth_l2_send_delgroupmac_cb);
+ rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+ IPA_CMD_DELGMAC));
+ if (rc)
+ QETH_DBF_MESSAGE(2,
+ "Could not delete group MAC %pM on %s: %d\n",
+ mac, QETH_CARD_IFNAME(card), rc);
+ return rc;
}
static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
@@ -197,10 +209,11 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac, int vmac)
mc->is_vmac = vmac;
if (vmac) {
- rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
- NULL);
+ rc = qeth_setdel_makerc(card,
+ qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC));
} else {
- rc = qeth_l2_send_setgroupmac(card, mac);
+ rc = qeth_setdel_makerc(card,
+ qeth_l2_send_setgroupmac(card, mac));
}
if (!rc)
@@ -218,7 +231,7 @@ static void qeth_l2_del_all_mc(struct qeth_card *card, int del)
if (del) {
if (mc->is_vmac)
qeth_l2_send_setdelmac(card, mc->mc_addr,
- IPA_CMD_DELVMAC, NULL);
+ IPA_CMD_DELVMAC);
else
qeth_l2_send_delgroupmac(card, mc->mc_addr);
}
@@ -291,6 +304,8 @@ static int qeth_l2_send_setdelvlan(struct qeth_card *card, __u16 i,
QETH_CARD_TEXT_(card, 4, "L2sdv%x", ipacmd);
iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setdelvlan.vlan_id = i;
return qeth_send_ipa_cmd(card, iob,
@@ -313,6 +328,7 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
{
struct qeth_card *card = dev->ml_priv;
struct qeth_vlan_vid *id;
+ int rc;
QETH_CARD_TEXT_(card, 4, "aid:%d", vid);
if (!vid)
@@ -328,7 +344,11 @@ static int qeth_l2_vlan_rx_add_vid(struct net_device *dev,
id = kmalloc(sizeof(struct qeth_vlan_vid), GFP_ATOMIC);
if (id) {
id->vid = vid;
- qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
+ rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_SETVLAN);
+ if (rc) {
+ kfree(id);
+ return rc;
+ }
spin_lock_bh(&card->vlanlock);
list_add_tail(&id->list, &card->vid_list);
spin_unlock_bh(&card->vlanlock);
@@ -343,6 +363,7 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
{
struct qeth_vlan_vid *id, *tmpid = NULL;
struct qeth_card *card = dev->ml_priv;
+ int rc = 0;
QETH_CARD_TEXT_(card, 4, "kid:%d", vid);
if (card->info.type == QETH_CARD_TYPE_OSM) {
@@ -363,11 +384,11 @@ static int qeth_l2_vlan_rx_kill_vid(struct net_device *dev,
}
spin_unlock_bh(&card->vlanlock);
if (tmpid) {
- qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
+ rc = qeth_l2_send_setdelvlan(card, vid, IPA_CMD_DELVLAN);
kfree(tmpid);
}
qeth_l2_set_multicast_list(card->dev);
- return 0;
+ return rc;
}
static int qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
@@ -539,91 +560,62 @@ out:
}
static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
- enum qeth_ipa_cmds ipacmd,
- int (*reply_cb) (struct qeth_card *,
- struct qeth_reply*,
- unsigned long))
+ enum qeth_ipa_cmds ipacmd)
{
struct qeth_ipa_cmd *cmd;
struct qeth_cmd_buffer *iob;
QETH_CARD_TEXT(card, 2, "L2sdmac");
iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
- return qeth_send_ipa_cmd(card, iob, reply_cb, NULL);
+ return qeth_send_ipa_cmd(card, iob, NULL, NULL);
}
-static int qeth_l2_send_setmac_cb(struct qeth_card *card,
- struct qeth_reply *reply,
- unsigned long data)
+static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
{
- struct qeth_ipa_cmd *cmd;
+ int rc;
- QETH_CARD_TEXT(card, 2, "L2Smaccb");
- cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->hdr.return_code) {
- QETH_CARD_TEXT_(card, 2, "L2er%x", cmd->hdr.return_code);
+ QETH_CARD_TEXT(card, 2, "L2Setmac");
+ rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+ IPA_CMD_SETVMAC));
+ if (rc == 0) {
+ card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+ memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
+ dev_info(&card->gdev->dev,
+ "MAC address %pM successfully registered on device %s\n",
+ card->dev->dev_addr, card->dev->name);
+ } else {
card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
- switch (cmd->hdr.return_code) {
- case IPA_RC_L2_DUP_MAC:
- case IPA_RC_L2_DUP_LAYER3_MAC:
+ switch (rc) {
+ case -EEXIST:
dev_warn(&card->gdev->dev,
- "MAC address %pM already exists\n",
- cmd->data.setdelmac.mac);
+ "MAC address %pM already exists\n", mac);
break;
- case IPA_RC_L2_MAC_NOT_AUTH_BY_HYP:
- case IPA_RC_L2_MAC_NOT_AUTH_BY_ADP:
+ case -EPERM:
dev_warn(&card->gdev->dev,
- "MAC address %pM is not authorized\n",
- cmd->data.setdelmac.mac);
- break;
- default:
+ "MAC address %pM is not authorized\n", mac);
break;
}
- } else {
- card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
- memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac,
- OSA_ADDR_LEN);
- dev_info(&card->gdev->dev,
- "MAC address %pM successfully registered on device %s\n",
- card->dev->dev_addr, card->dev->name);
- }
- return 0;
-}
-
-static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
-{
- QETH_CARD_TEXT(card, 2, "L2Setmac");
- return qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC,
- qeth_l2_send_setmac_cb);
-}
-
-static int qeth_l2_send_delmac_cb(struct qeth_card *card,
- struct qeth_reply *reply,
- unsigned long data)
-{
- struct qeth_ipa_cmd *cmd;
-
- QETH_CARD_TEXT(card, 2, "L2Dmaccb");
- cmd = (struct qeth_ipa_cmd *) data;
- if (cmd->hdr.return_code) {
- QETH_CARD_TEXT_(card, 2, "err%d", cmd->hdr.return_code);
- return 0;
}
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
-
- return 0;
+ return rc;
}
static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
{
+ int rc;
+
QETH_CARD_TEXT(card, 2, "L2Delmac");
if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
return 0;
- return qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC,
- qeth_l2_send_delmac_cb);
+ rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
+ IPA_CMD_DELVMAC));
+ if (rc == 0)
+ card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+ return rc;
}
static int qeth_l2_request_initial_mac(struct qeth_card *card)
@@ -651,7 +643,7 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card)
if (rc) {
QETH_DBF_MESSAGE(2, "couldn't get MAC address on "
"device %s: x%x\n", CARD_BUS_ID(card), rc);
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "1err%04x", rc);
return rc;
}
QETH_DBF_HEX(SETUP, 2, card->dev->dev_addr, OSA_ADDR_LEN);
@@ -687,7 +679,7 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p)
return -ERESTARTSYS;
}
rc = qeth_l2_send_delmac(card, &card->dev->dev_addr[0]);
- if (!rc || (rc == IPA_RC_L2_MAC_NOT_FOUND))
+ if (!rc || (rc == -ENOENT))
rc = qeth_l2_send_setmac(card, addr->sa_data);
return rc ? -EINVAL : 0;
}
@@ -996,7 +988,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
recover_flag = card->state;
rc = qeth_core_hardsetup_card(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
rc = -ENODEV;
goto out_remove;
}
@@ -1730,6 +1722,8 @@ static void qeth_bridgeport_query_support(struct qeth_card *card)
QETH_CARD_TEXT(card, 2, "brqsuppo");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ if (!iob)
+ return;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.sbp.hdr.cmdlength =
sizeof(struct qeth_ipacmd_sbp_hdr) +
@@ -1805,6 +1799,8 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
if (!(card->options.sbp.supported_funcs & IPA_SBP_QUERY_BRIDGE_PORTS))
return -EOPNOTSUPP;
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.sbp.hdr.cmdlength =
sizeof(struct qeth_ipacmd_sbp_hdr);
@@ -1817,9 +1813,7 @@ int qeth_bridgeport_query_ports(struct qeth_card *card,
if (rc)
return rc;
rc = qeth_bridgeport_makerc(card, &cbctl, IPA_SBP_QUERY_BRIDGE_PORTS);
- if (rc)
- return rc;
- return 0;
+ return rc;
}
EXPORT_SYMBOL_GPL(qeth_bridgeport_query_ports);
@@ -1873,6 +1867,8 @@ int qeth_bridgeport_setrole(struct qeth_card *card, enum qeth_sbp_roles role)
if (!(card->options.sbp.supported_funcs & setcmd))
return -EOPNOTSUPP;
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETBRIDGEPORT, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.sbp.hdr.cmdlength = cmdlength;
cmd->data.sbp.hdr.command_code = setcmd;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 625227ad16ee..e2a0ee845399 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -549,6 +549,8 @@ static int qeth_l3_send_setdelmc(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "setdelmc");
iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
memcpy(&cmd->data.setdelipm.mac, addr->mac, OSA_ADDR_LEN);
if (addr->proto == QETH_PROT_IPV6)
@@ -588,6 +590,8 @@ static int qeth_l3_send_setdelip(struct qeth_card *card,
QETH_CARD_TEXT_(card, 4, "flags%02X", flags);
iob = qeth_get_ipacmd_buffer(card, ipacmd, addr->proto);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
if (addr->proto == QETH_PROT_IPV6) {
memcpy(cmd->data.setdelip6.ip_addr, &addr->u.a6.addr,
@@ -616,6 +620,8 @@ static int qeth_l3_send_setrouting(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "setroutg");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETRTG, prot);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setrtg.type = (type);
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
@@ -1049,12 +1055,14 @@ static struct qeth_cmd_buffer *qeth_l3_get_setassparms_cmd(
QETH_CARD_TEXT(card, 4, "getasscm");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SETASSPARMS, prot);
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- cmd->data.setassparms.hdr.assist_no = ipa_func;
- cmd->data.setassparms.hdr.length = 8 + len;
- cmd->data.setassparms.hdr.command_code = cmd_code;
- cmd->data.setassparms.hdr.return_code = 0;
- cmd->data.setassparms.hdr.seq_no = 0;
+ if (iob) {
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.setassparms.hdr.assist_no = ipa_func;
+ cmd->data.setassparms.hdr.length = 8 + len;
+ cmd->data.setassparms.hdr.command_code = cmd_code;
+ cmd->data.setassparms.hdr.return_code = 0;
+ cmd->data.setassparms.hdr.seq_no = 0;
+ }
return iob;
}
@@ -1090,6 +1098,8 @@ static int qeth_l3_send_simple_setassparms_ipv6(struct qeth_card *card,
QETH_CARD_TEXT(card, 4, "simassp6");
iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
0, QETH_PROT_IPV6);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_l3_send_setassparms(card, iob, 0, 0,
qeth_l3_default_setassparms_cb, NULL);
return rc;
@@ -1108,6 +1118,8 @@ static int qeth_l3_send_simple_setassparms(struct qeth_card *card,
length = sizeof(__u32);
iob = qeth_l3_get_setassparms_cmd(card, ipa_func, cmd_code,
length, QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_l3_send_setassparms(card, iob, length, data,
qeth_l3_default_setassparms_cb, NULL);
return rc;
@@ -1494,6 +1506,8 @@ static int qeth_l3_iqd_read_initial_mac(struct qeth_card *card)
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
QETH_PROT_IPV6);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
card->info.unique_id;
@@ -1537,6 +1551,8 @@ static int qeth_l3_get_unique_id(struct qeth_card *card)
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_CREATE_ADDR,
QETH_PROT_IPV6);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
*((__u16 *) &cmd->data.create_destroy_addr.unique_id[6]) =
card->info.unique_id;
@@ -1611,6 +1627,8 @@ qeth_diags_trace(struct qeth_card *card, enum qeth_diags_trace_cmds diags_cmd)
QETH_DBF_TEXT(SETUP, 2, "diagtrac");
iob = qeth_get_ipacmd_buffer(card, IPA_CMD_SET_DIAG_ASS, 0);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.diagass.subcmd_len = 16;
cmd->data.diagass.subcmd = QETH_DIAGS_CMD_TRACE;
@@ -2442,6 +2460,8 @@ static int qeth_l3_query_arp_cache_info(struct qeth_card *card,
IPA_CMD_ASS_ARP_QUERY_INFO,
sizeof(struct qeth_arp_query_data) - sizeof(char),
prot);
+ if (!iob)
+ return -ENOMEM;
cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
cmd->data.setassparms.data.query_arp.request_bits = 0x000F;
cmd->data.setassparms.data.query_arp.reply_bits = 0;
@@ -2535,6 +2555,8 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card,
IPA_CMD_ASS_ARP_ADD_ENTRY,
sizeof(struct qeth_arp_cache_entry),
QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_l3_send_setassparms(card, iob,
sizeof(struct qeth_arp_cache_entry),
(unsigned long) entry,
@@ -2574,6 +2596,8 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card,
IPA_CMD_ASS_ARP_REMOVE_ENTRY,
12,
QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
rc = qeth_l3_send_setassparms(card, iob,
12, (unsigned long)buf,
qeth_l3_default_setassparms_cb, NULL);
@@ -3262,6 +3286,8 @@ static const struct net_device_ops qeth_l3_osa_netdev_ops = {
static int qeth_l3_setup_netdev(struct qeth_card *card)
{
+ int rc;
+
if (card->info.type == QETH_CARD_TYPE_OSD ||
card->info.type == QETH_CARD_TYPE_OSX) {
if ((card->info.link_type == QETH_LINK_TYPE_LANE_TR) ||
@@ -3293,7 +3319,9 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
return -ENODEV;
card->dev->flags |= IFF_NOARP;
card->dev->netdev_ops = &qeth_l3_netdev_ops;
- qeth_l3_iqd_read_initial_mac(card);
+ rc = qeth_l3_iqd_read_initial_mac(card);
+ if (rc)
+ return rc;
if (card->options.hsuid[0])
memcpy(card->dev->perm_addr, card->options.hsuid, 9);
} else
@@ -3360,7 +3388,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
recover_flag = card->state;
rc = qeth_core_hardsetup_card(card);
if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
rc = -ENODEV;
goto out_remove;
}
@@ -3401,7 +3429,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
contin:
rc = qeth_l3_setadapter_parms(card);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
if (!card->options.sniffer) {
rc = qeth_l3_start_ipassists(card);
if (rc) {
@@ -3410,10 +3438,10 @@ contin:
}
rc = qeth_l3_setrouting_v4(card);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "4err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "4err%04x", rc);
rc = qeth_l3_setrouting_v6(card);
if (rc)
- QETH_DBF_TEXT_(SETUP, 2, "5err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "5err%04x", rc);
}
netif_tx_disable(card->dev);
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 3b73b96619e2..26270c351624 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
#define DRV_NAME "fnic"
#define DRV_DESCRIPTION "Cisco FCoE HBA Driver"
-#define DRV_VERSION "1.6.0.16"
+#define DRV_VERSION "1.6.0.17"
#define PFX DRV_NAME ": "
#define DFX DRV_NAME "%d: "
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 2097de42a147..155b286f1a9d 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -1892,6 +1892,21 @@ int fnic_abort_cmd(struct scsi_cmnd *sc)
goto fnic_abort_cmd_end;
}
+ /* IO out of order */
+
+ if (!(CMD_FLAGS(sc) & (FNIC_IO_ABORTED | FNIC_IO_DONE))) {
+ spin_unlock_irqrestore(io_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "Issuing Host reset due to out of order IO\n");
+
+ if (fnic_host_reset(sc) == FAILED) {
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "fnic_host_reset failed.\n");
+ }
+ ret = FAILED;
+ goto fnic_abort_cmd_end;
+ }
+
CMD_STATE(sc) = FNIC_IOREQ_ABTS_COMPLETE;
/*
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index df4e27cd996a..9219953ee949 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -683,6 +683,7 @@ static void ipr_init_ipr_cmnd(struct ipr_cmnd *ipr_cmd,
ipr_reinit_ipr_cmnd(ipr_cmd);
ipr_cmd->u.scratch = 0;
ipr_cmd->sibling = NULL;
+ ipr_cmd->eh_comp = NULL;
ipr_cmd->fast_done = fast_done;
init_timer(&ipr_cmd->timer);
}
@@ -848,6 +849,8 @@ static void ipr_scsi_eh_done(struct ipr_cmnd *ipr_cmd)
scsi_dma_unmap(ipr_cmd->scsi_cmd);
scsi_cmd->scsi_done(scsi_cmd);
+ if (ipr_cmd->eh_comp)
+ complete(ipr_cmd->eh_comp);
list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
}
@@ -4811,6 +4814,84 @@ static int ipr_slave_alloc(struct scsi_device *sdev)
return rc;
}
+/**
+ * ipr_match_lun - Match function for specified LUN
+ * @ipr_cmd: ipr command struct
+ * @device: device to match (sdev)
+ *
+ * Returns:
+ * 1 if command matches sdev / 0 if command does not match sdev
+ **/
+static int ipr_match_lun(struct ipr_cmnd *ipr_cmd, void *device)
+{
+ if (ipr_cmd->scsi_cmd && ipr_cmd->scsi_cmd->device == device)
+ return 1;
+ return 0;
+}
+
+/**
+ * ipr_wait_for_ops - Wait for matching commands to complete
+ * @ipr_cmd: ipr command struct
+ * @device: device to match (sdev)
+ * @match: match function to use
+ *
+ * Returns:
+ * SUCCESS / FAILED
+ **/
+static int ipr_wait_for_ops(struct ipr_ioa_cfg *ioa_cfg, void *device,
+ int (*match)(struct ipr_cmnd *, void *))
+{
+ struct ipr_cmnd *ipr_cmd;
+ int wait;
+ unsigned long flags;
+ struct ipr_hrr_queue *hrrq;
+ signed long timeout = IPR_ABORT_TASK_TIMEOUT;
+ DECLARE_COMPLETION_ONSTACK(comp);
+
+ ENTER;
+ do {
+ wait = 0;
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock_irqsave(hrrq->lock, flags);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (match(ipr_cmd, device)) {
+ ipr_cmd->eh_comp = &comp;
+ wait++;
+ }
+ }
+ spin_unlock_irqrestore(hrrq->lock, flags);
+ }
+
+ if (wait) {
+ timeout = wait_for_completion_timeout(&comp, timeout);
+
+ if (!timeout) {
+ wait = 0;
+
+ for_each_hrrq(hrrq, ioa_cfg) {
+ spin_lock_irqsave(hrrq->lock, flags);
+ list_for_each_entry(ipr_cmd, &hrrq->hrrq_pending_q, queue) {
+ if (match(ipr_cmd, device)) {
+ ipr_cmd->eh_comp = NULL;
+ wait++;
+ }
+ }
+ spin_unlock_irqrestore(hrrq->lock, flags);
+ }
+
+ if (wait)
+ dev_err(&ioa_cfg->pdev->dev, "Timed out waiting for aborted commands\n");
+ LEAVE;
+ return wait ? FAILED : SUCCESS;
+ }
+ }
+ } while (wait);
+
+ LEAVE;
+ return SUCCESS;
+}
+
static int ipr_eh_host_reset(struct scsi_cmnd *cmd)
{
struct ipr_ioa_cfg *ioa_cfg;
@@ -5030,11 +5111,17 @@ static int __ipr_eh_dev_reset(struct scsi_cmnd *scsi_cmd)
static int ipr_eh_dev_reset(struct scsi_cmnd *cmd)
{
int rc;
+ struct ipr_ioa_cfg *ioa_cfg;
+
+ ioa_cfg = (struct ipr_ioa_cfg *) cmd->device->host->hostdata;
spin_lock_irq(cmd->device->host->host_lock);
rc = __ipr_eh_dev_reset(cmd);
spin_unlock_irq(cmd->device->host->host_lock);
+ if (rc == SUCCESS)
+ rc = ipr_wait_for_ops(ioa_cfg, cmd->device, ipr_match_lun);
+
return rc;
}
@@ -5234,13 +5321,18 @@ static int ipr_eh_abort(struct scsi_cmnd *scsi_cmd)
{
unsigned long flags;
int rc;
+ struct ipr_ioa_cfg *ioa_cfg;
ENTER;
+ ioa_cfg = (struct ipr_ioa_cfg *) scsi_cmd->device->host->hostdata;
+
spin_lock_irqsave(scsi_cmd->device->host->host_lock, flags);
rc = ipr_cancel_op(scsi_cmd);
spin_unlock_irqrestore(scsi_cmd->device->host->host_lock, flags);
+ if (rc == SUCCESS)
+ rc = ipr_wait_for_ops(ioa_cfg, scsi_cmd->device, ipr_match_lun);
LEAVE;
return rc;
}
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index b4f3eec51bc9..ec03b42fa2b9 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1606,6 +1606,7 @@ struct ipr_cmnd {
struct scsi_device *sdev;
} u;
+ struct completion *eh_comp;
struct ipr_hrr_queue *hrrq;
struct ipr_ioa_cfg *ioa_cfg;
};
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 12ca291c1380..cce1cbc1a927 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -734,7 +734,9 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
* Return target busy if we've received a non-zero retry_delay_timer
* in a FCP_RSP.
*/
- if (time_after(jiffies, fcport->retry_delay_timestamp))
+ if (fcport->retry_delay_timestamp == 0) {
+ /* retry delay not set */
+ } else if (time_after(jiffies, fcport->retry_delay_timestamp))
fcport->retry_delay_timestamp = 0;
else
goto qc24_target_busy;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index e02885451425..9b3829931f40 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -986,9 +986,9 @@ int scsi_device_get(struct scsi_device *sdev)
return -ENXIO;
if (!get_device(&sdev->sdev_gendev))
return -ENXIO;
- /* We can fail this if we're doing SCSI operations
+ /* We can fail try_module_get if we're doing SCSI operations
* from module exit (like cache flush) */
- try_module_get(sdev->host->hostt->module);
+ __module_get(sdev->host->hostt->module);
return 0;
}
@@ -1004,14 +1004,7 @@ EXPORT_SYMBOL(scsi_device_get);
*/
void scsi_device_put(struct scsi_device *sdev)
{
-#ifdef CONFIG_MODULE_UNLOAD
- struct module *module = sdev->host->hostt->module;
-
- /* The module refcount will be zero if scsi_device_get()
- * was called from a module removal routine */
- if (module && module_refcount(module) != 0)
- module_put(module);
-#endif
+ module_put(sdev->host->hostt->module);
put_device(&sdev->sdev_gendev);
}
EXPORT_SYMBOL(scsi_device_put);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 7b8b51bc29b4..4aca1b0378c2 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1623,7 +1623,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
req_opcode = cmd[3];
req_sa = get_unaligned_be16(cmd + 4);
alloc_len = get_unaligned_be32(cmd + 6);
- if (alloc_len < 4 && alloc_len > 0xffff) {
+ if (alloc_len < 4 || alloc_len > 0xffff) {
mk_sense_invalid_fld(scp, SDEB_IN_CDB, 6, -1);
return check_condition_result;
}
@@ -1631,7 +1631,7 @@ resp_rsup_opcodes(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
a_len = 8192;
else
a_len = alloc_len;
- arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_KERNEL);
+ arr = kzalloc((a_len < 256) ? 320 : a_len + 64, GFP_ATOMIC);
if (NULL == arr) {
mk_sense_buffer(scp, ILLEGAL_REQUEST, INSUFF_RES_ASC,
INSUFF_RES_ASCQ);
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index e42fff6e8c10..8afb01604d51 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1041,7 +1041,7 @@ retry:
}
/* signal not to enter either branch of the if () below */
timeleft = 0;
- rtn = NEEDS_RETRY;
+ rtn = FAILED;
} else {
timeleft = wait_for_completion_timeout(&done, timeout);
rtn = SUCCESS;
@@ -1081,7 +1081,7 @@ retry:
rtn = FAILED;
break;
}
- } else if (!rtn) {
+ } else if (rtn != FAILED) {
scsi_abort_eh_cmnd(scmd);
rtn = FAILED;
}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9ea95dd3e260..17bb541f7cc2 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -591,7 +591,6 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
{
struct scatterlist *first_chunk = NULL;
- gfp_t gfp_mask = mq ? GFP_NOIO : GFP_ATOMIC;
int ret;
BUG_ON(!nents);
@@ -606,7 +605,7 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
}
ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
- first_chunk, gfp_mask, scsi_sg_alloc);
+ first_chunk, GFP_ATOMIC, scsi_sg_alloc);
if (unlikely(ret))
scsi_free_sgtable(sdb, mq);
return ret;
@@ -1144,7 +1143,17 @@ int scsi_init_io(struct scsi_cmnd *cmd)
struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
int ivecs, count;
- BUG_ON(prot_sdb == NULL);
+ if (prot_sdb == NULL) {
+ /*
+ * This can happen if someone (e.g. multipath)
+ * queues a command to a device on an adapter
+ * that does not support DIX.
+ */
+ WARN_ON_ONCE(1);
+ error = BLKPREP_KILL;
+ goto err_exit;
+ }
+
ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio);
if (scsi_alloc_sgtable(prot_sdb, ivecs, is_mq)) {
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index fedab3c21ddf..399516925d80 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2623,8 +2623,9 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
sd_config_discard(sdkp, SD_LBP_WS16);
} else { /* LBP VPD page tells us what to use */
-
- if (sdkp->lbpws)
+ if (sdkp->lbpu && sdkp->max_unmap_blocks && !sdkp->lbprz)
+ sd_config_discard(sdkp, SD_LBP_UNMAP);
+ else if (sdkp->lbpws)
sd_config_discard(sdkp, SD_LBP_WS16);
else if (sdkp->lbpws10)
sd_config_discard(sdkp, SD_LBP_WS10);
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index 7281316a5ecb..a67d37c7e3c0 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -271,7 +271,6 @@ int dw_spi_mid_init(struct dw_spi *dws)
iounmap(clk_reg);
dws->num_cs = 16;
- dws->fifo_len = 40; /* FIFO has 40 words buffer */
#ifdef CONFIG_SPI_DW_MID_DMA
dws->dma_priv = kzalloc(sizeof(struct mid_dma), GFP_KERNEL);
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index d0d5542efc06..8edcd1b84562 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -621,13 +621,13 @@ static void spi_hw_init(struct dw_spi *dws)
if (!dws->fifo_len) {
u32 fifo;
- for (fifo = 2; fifo <= 257; fifo++) {
+ for (fifo = 2; fifo <= 256; fifo++) {
dw_writew(dws, DW_SPI_TXFLTR, fifo);
if (fifo != dw_readw(dws, DW_SPI_TXFLTR))
break;
}
- dws->fifo_len = (fifo == 257) ? 0 : fifo;
+ dws->fifo_len = (fifo == 2) ? 0 : fifo - 1;
dw_writew(dws, DW_SPI_TXFLTR, 0);
}
}
@@ -673,7 +673,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
if (dws->dma_ops && dws->dma_ops->dma_init) {
ret = dws->dma_ops->dma_init(dws);
if (ret) {
- dev_warn(&master->dev, "DMA init failed\n");
+ dev_warn(dev, "DMA init failed\n");
dws->dma_inited = 0;
}
}
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index b410499cddca..aad6683db81b 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -341,7 +341,7 @@ static int img_spfi_start_dma(struct spi_master *master,
default:
rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
rxconf.src_addr_width = 1;
- rxconf.src_maxburst = 1;
+ rxconf.src_maxburst = 4;
}
dmaengine_slave_config(spfi->rx_ch, &rxconf);
@@ -368,7 +368,7 @@ static int img_spfi_start_dma(struct spi_master *master,
default:
txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
txconf.dst_addr_width = 1;
- txconf.dst_maxburst = 1;
+ txconf.dst_maxburst = 4;
break;
}
dmaengine_slave_config(spfi->tx_ch, &txconf);
@@ -390,14 +390,14 @@ static int img_spfi_start_dma(struct spi_master *master,
dma_async_issue_pending(spfi->rx_ch);
}
+ spfi_start(spfi);
+
if (xfer->tx_buf) {
spfi->tx_dma_busy = true;
dmaengine_submit(txdesc);
dma_async_issue_pending(spfi->tx_ch);
}
- spfi_start(spfi);
-
return 1;
stop_dma:
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 05c623cfb078..23822e7df6c1 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -546,8 +546,8 @@ static void giveback(struct driver_data *drv_data)
cs_deassert(drv_data);
}
- spi_finalize_current_message(drv_data->master);
drv_data->cur_chip = NULL;
+ spi_finalize_current_message(drv_data->master);
}
static void reset_sccr1(struct driver_data *drv_data)
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 239be7cbe5a8..3ab7a21445fc 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -82,7 +82,7 @@ struct sh_msiof_spi_priv {
#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */
#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
-#define MDR1_FLD_MASK 0x000000c0 /* Frame Sync Signal Interval (0-3) */
+#define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */
#define MDR1_FLD_SHIFT 2
#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
/* TMDR1 */
@@ -480,6 +480,8 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
struct device_node *np = spi->master->dev.of_node;
struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
+ pm_runtime_get_sync(&p->pdev->dev);
+
if (!np) {
/*
* Use spi->controller_data for CS (same strategy as spi_gpio),
@@ -498,6 +500,9 @@ static int sh_msiof_spi_setup(struct spi_device *spi)
if (spi->cs_gpio >= 0)
gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+
+ pm_runtime_put_sync(&p->pdev->dev);
+
return 0;
}
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index 1bf891bd321a..4f361b77c749 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -264,7 +264,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
if ((bits & (MDS_INODELOCK_LOOKUP | MDS_INODELOCK_PERM)) &&
inode->i_sb->s_root != NULL &&
- is_root_inode(inode))
+ !is_root_inode(inode))
ll_invalidate_aliases(inode);
iput(inode);
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 930f6010203e..65d610abe06e 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -632,7 +632,7 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
return 0;
}
- if (cfio->fault.ft_flags & VM_FAULT_SIGBUS) {
+ if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
return -EFAULT;
}
diff --git a/drivers/staging/media/tlg2300/Kconfig b/drivers/staging/media/tlg2300/Kconfig
index 81784c6f7b88..77d8753f6ba4 100644
--- a/drivers/staging/media/tlg2300/Kconfig
+++ b/drivers/staging/media/tlg2300/Kconfig
@@ -1,6 +1,7 @@
config VIDEO_TLG2300
tristate "Telegent TLG2300 USB video capture support (Deprecated)"
depends on VIDEO_DEV && I2C && SND && DVB_CORE
+ depends on MEDIA_USB_SUPPORT
select VIDEO_TUNER
select VIDEO_TVEEPROM
depends on RC_CORE
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 093535c6217b..120b70d72d79 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -85,23 +85,20 @@ static struct nvec_chip *nvec_power_handle;
static const struct mfd_cell nvec_devices[] = {
{
.name = "nvec-kbd",
- .id = 1,
},
{
.name = "nvec-mouse",
- .id = 1,
},
{
.name = "nvec-power",
- .id = 1,
+ .id = 0,
},
{
.name = "nvec-power",
- .id = 2,
+ .id = 1,
},
{
.name = "nvec-paz00",
- .id = 1,
},
};
@@ -891,7 +888,7 @@ static int tegra_nvec_probe(struct platform_device *pdev)
nvec_msg_free(nvec, msg);
}
- ret = mfd_add_devices(nvec->dev, -1, nvec_devices,
+ ret = mfd_add_devices(nvec->dev, 0, nvec_devices,
ARRAY_SIZE(nvec_devices), NULL, 0, NULL);
if (ret)
dev_err(nvec->dev, "error adding subdevices\n");
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 86c72ba0a0cd..f8c5fc371c4c 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -2177,7 +2177,7 @@ bool BBbVT3253Init(struct vnt_private *priv)
/* Init ANT B select,RX Config CR10 = 0x28->0x2A, 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted) */
/*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/
/* Select VC1/VC2, CR215 = 0x02->0x06 */
- bResult &= BBbWriteEmbedded(dwIoBase, 0xd7, 0x06);
+ bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
/* }} */
for (ii = 0; ii < CB_VT3253B0_AGC; ii++)
diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c
index c8f739dd346e..70f870541f92 100644
--- a/drivers/staging/vt6655/channel.c
+++ b/drivers/staging/vt6655/channel.c
@@ -182,6 +182,14 @@ bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel)
if (pDevice->byCurrentCh == uConnectionChannel)
return bResult;
+ /* Set VGA to max sensitivity */
+ if (pDevice->bUpdateBBVGA &&
+ pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) {
+ pDevice->byBBVGACurrent = pDevice->abyBBVGA[0];
+
+ BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent);
+ }
+
/* clear NAV */
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV);
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index 83e4162c0094..cd1a277d853b 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1232,7 +1232,7 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
head_td = priv->apCurrTD[dma_idx];
- head_td->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
+ head_td->m_td1TD1.byTCR = 0;
head_td->pTDInfo->skb = skb;
@@ -1257,6 +1257,11 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
priv->bPWBitOn = false;
+ /* Set TSR1 & ReqCount in TxDescHead */
+ head_td->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
+ head_td->m_td1TD1.wReqCount =
+ cpu_to_le16((u16)head_td->pTDInfo->dwReqCount);
+
head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
if (dma_idx == TYPE_AC0DMA)
@@ -1500,9 +1505,11 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
if (conf->enable_beacon) {
vnt_beacon_enable(priv, vif, conf);
- MACvRegBitsOn(priv, MAC_REG_TCR, TCR_AUTOBCNTX);
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_TCR,
+ TCR_AUTOBCNTX);
} else {
- MACvRegBitsOff(priv, MAC_REG_TCR, TCR_AUTOBCNTX);
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR,
+ TCR_AUTOBCNTX);
}
}
diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c
index 61c39dd7ad01..b5b0155961f2 100644
--- a/drivers/staging/vt6655/rxtx.c
+++ b/drivers/staging/vt6655/rxtx.c
@@ -1204,13 +1204,10 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType,
ptdCurr = (PSTxDesc)pHeadTD;
- ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding;
+ ptdCurr->pTDInfo->dwReqCount = cbReqCount;
ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength;
ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma;
ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma);
- /* Set TSR1 & ReqCount in TxDescHead */
- ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU);
- ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount));
return cbHeaderLength;
}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 55f6774f706f..aebde3289c50 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -2027,10 +2027,10 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
goto reject;
}
if (!strncmp("=All", text_ptr, 4)) {
- cmd->cmd_flags |= IFC_SENDTARGETS_ALL;
+ cmd->cmd_flags |= ICF_SENDTARGETS_ALL;
} else if (!strncmp("=iqn.", text_ptr, 5) ||
!strncmp("=eui.", text_ptr, 5)) {
- cmd->cmd_flags |= IFC_SENDTARGETS_SINGLE;
+ cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE;
} else {
pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr);
goto reject;
@@ -3415,10 +3415,10 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
return -ENOMEM;
}
/*
- * Locate pointer to iqn./eui. string for IFC_SENDTARGETS_SINGLE
+ * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE
* explicit case..
*/
- if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) {
+ if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) {
text_ptr = strchr(text_in, '=');
if (!text_ptr) {
pr_err("Unable to locate '=' string in text_in:"
@@ -3434,7 +3434,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
spin_lock(&tiqn_lock);
list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) {
- if ((cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) &&
+ if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) &&
strcmp(tiqn->tiqn, text_ptr)) {
continue;
}
@@ -3512,7 +3512,7 @@ eob:
if (end_of_buf)
break;
- if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE)
+ if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE)
break;
}
spin_unlock(&tiqn_lock);
diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
index 09a522bae222..cbcff38ac9b7 100644
--- a/drivers/target/iscsi/iscsi_target_core.h
+++ b/drivers/target/iscsi/iscsi_target_core.h
@@ -135,8 +135,8 @@ enum cmd_flags_table {
ICF_CONTIG_MEMORY = 0x00000020,
ICF_ATTACHED_TO_RQUEUE = 0x00000040,
ICF_OOO_CMDSN = 0x00000080,
- IFC_SENDTARGETS_ALL = 0x00000100,
- IFC_SENDTARGETS_SINGLE = 0x00000200,
+ ICF_SENDTARGETS_ALL = 0x00000100,
+ ICF_SENDTARGETS_SINGLE = 0x00000200,
};
/* struct iscsi_cmd->i_state */
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 7653cfb027a2..58f49ff69b14 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1103,51 +1103,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth)
}
EXPORT_SYMBOL(se_dev_set_queue_depth);
-int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors)
-{
- int block_size = dev->dev_attrib.block_size;
-
- if (dev->export_count) {
- pr_err("dev[%p]: Unable to change SE Device"
- " fabric_max_sectors while export_count is %d\n",
- dev, dev->export_count);
- return -EINVAL;
- }
- if (!fabric_max_sectors) {
- pr_err("dev[%p]: Illegal ZERO value for"
- " fabric_max_sectors\n", dev);
- return -EINVAL;
- }
- if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
- pr_err("dev[%p]: Passed fabric_max_sectors: %u less than"
- " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors,
- DA_STATUS_MAX_SECTORS_MIN);
- return -EINVAL;
- }
- if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
- pr_err("dev[%p]: Passed fabric_max_sectors: %u"
- " greater than DA_STATUS_MAX_SECTORS_MAX:"
- " %u\n", dev, fabric_max_sectors,
- DA_STATUS_MAX_SECTORS_MAX);
- return -EINVAL;
- }
- /*
- * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
- */
- if (!block_size) {
- block_size = 512;
- pr_warn("Defaulting to 512 for zero block_size\n");
- }
- fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors,
- block_size);
-
- dev->dev_attrib.fabric_max_sectors = fabric_max_sectors;
- pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
- dev, fabric_max_sectors);
- return 0;
-}
-EXPORT_SYMBOL(se_dev_set_fabric_max_sectors);
-
int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
{
if (dev->export_count) {
@@ -1156,10 +1111,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
dev, dev->export_count);
return -EINVAL;
}
- if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) {
+ if (optimal_sectors > dev->dev_attrib.hw_max_sectors) {
pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
- " greater than fabric_max_sectors: %u\n", dev,
- optimal_sectors, dev->dev_attrib.fabric_max_sectors);
+ " greater than hw_max_sectors: %u\n", dev,
+ optimal_sectors, dev->dev_attrib.hw_max_sectors);
return -EINVAL;
}
@@ -1553,8 +1508,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
dev->dev_attrib.unmap_granularity_alignment =
DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT;
dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN;
- dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS;
- dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS;
xcopy_lun = &dev->xcopy_lun;
xcopy_lun->lun_se_dev = dev;
@@ -1595,6 +1548,7 @@ int target_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors =
se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors,
dev->dev_attrib.hw_block_size);
+ dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors;
dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX);
dev->creation_time = get_jiffies_64();
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index c2aea099ea4a..d836de200a03 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -621,7 +621,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
struct fd_prot fd_prot;
sense_reason_t rc;
int ret = 0;
-
+ /*
+ * We are currently limited by the number of iovecs (2048) per
+ * single vfs_[writev,readv] call.
+ */
+ if (cmd->data_length > FD_MAX_BYTES) {
+ pr_err("FILEIO: Not able to process I/O of %u bytes due to"
+ "FD_MAX_BYTES: %u iovec count limitiation\n",
+ cmd->data_length, FD_MAX_BYTES);
+ return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+ }
/*
* Call vectorized fileio functions to map struct scatterlist
* physical memory addresses to struct iovec virtual memory.
@@ -959,7 +968,6 @@ static struct configfs_attribute *fileio_backend_dev_attrs[] = {
&fileio_dev_attrib_hw_block_size.attr,
&fileio_dev_attrib_block_size.attr,
&fileio_dev_attrib_hw_max_sectors.attr,
- &fileio_dev_attrib_fabric_max_sectors.attr,
&fileio_dev_attrib_optimal_sectors.attr,
&fileio_dev_attrib_hw_queue_depth.attr,
&fileio_dev_attrib_queue_depth.attr,
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 3efff94fbd97..78346b850968 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -124,7 +124,7 @@ static int iblock_configure_device(struct se_device *dev)
q = bdev_get_queue(bd);
dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
- dev->dev_attrib.hw_max_sectors = UINT_MAX;
+ dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
/*
@@ -883,7 +883,6 @@ static struct configfs_attribute *iblock_backend_dev_attrs[] = {
&iblock_dev_attrib_hw_block_size.attr,
&iblock_dev_attrib_block_size.attr,
&iblock_dev_attrib_hw_max_sectors.attr,
- &iblock_dev_attrib_fabric_max_sectors.attr,
&iblock_dev_attrib_optimal_sectors.attr,
&iblock_dev_attrib_hw_queue_depth.attr,
&iblock_dev_attrib_queue_depth.attr,
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index d56f2aaba9af..283cf786ef98 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -528,6 +528,18 @@ static int core_scsi3_pr_seq_non_holder(
return 0;
}
+ } else if (we && registered_nexus) {
+ /*
+ * Reads are allowed for Write Exclusive locks
+ * from all registrants.
+ */
+ if (cmd->data_direction == DMA_FROM_DEVICE) {
+ pr_debug("Allowing READ CDB: 0x%02x for %s"
+ " reservation\n", cdb[0],
+ core_scsi3_pr_dump_type(pr_reg_type));
+
+ return 0;
+ }
}
pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
" for %s reservation\n", transport_dump_cmd_direction(cmd),
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 60ebd170a561..98e83ac5661b 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -657,7 +657,6 @@ static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = {
&rd_mcp_dev_attrib_hw_block_size.attr,
&rd_mcp_dev_attrib_block_size.attr,
&rd_mcp_dev_attrib_hw_max_sectors.attr,
- &rd_mcp_dev_attrib_fabric_max_sectors.attr,
&rd_mcp_dev_attrib_optimal_sectors.attr,
&rd_mcp_dev_attrib_hw_queue_depth.attr,
&rd_mcp_dev_attrib_queue_depth.attr,
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index 11bea1952435..cd4bed7b2757 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -953,21 +953,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
unsigned long long end_lba;
-
- if (sectors > dev->dev_attrib.fabric_max_sectors) {
- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
- " big sectors %u exceeds fabric_max_sectors:"
- " %u\n", cdb[0], sectors,
- dev->dev_attrib.fabric_max_sectors);
- return TCM_INVALID_CDB_FIELD;
- }
- if (sectors > dev->dev_attrib.hw_max_sectors) {
- printk_ratelimited(KERN_ERR "SCSI OP %02xh with too"
- " big sectors %u exceeds backend hw_max_sectors:"
- " %u\n", cdb[0], sectors,
- dev->dev_attrib.hw_max_sectors);
- return TCM_INVALID_CDB_FIELD;
- }
check_lba:
end_lba = dev->transport->get_blocks(dev) + 1;
if (cmd->t_task_lba + sectors > end_lba) {
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index 1307600fe726..4c71657da56a 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -505,7 +505,6 @@ static sense_reason_t
spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
{
struct se_device *dev = cmd->se_dev;
- u32 max_sectors;
int have_tp = 0;
int opt, min;
@@ -539,9 +538,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf)
/*
* Set MAXIMUM TRANSFER LENGTH
*/
- max_sectors = min(dev->dev_attrib.fabric_max_sectors,
- dev->dev_attrib.hw_max_sectors);
- put_unaligned_be32(max_sectors, &buf[8]);
+ put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]);
/*
* Set OPTIMAL TRANSFER LENGTH
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index 8bfa61c9693d..1157b559683b 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1118,7 +1118,6 @@ static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
&tcmu_dev_attrib_hw_block_size.attr,
&tcmu_dev_attrib_block_size.attr,
&tcmu_dev_attrib_hw_max_sectors.attr,
- &tcmu_dev_attrib_fabric_max_sectors.attr,
&tcmu_dev_attrib_optimal_sectors.attr,
&tcmu_dev_attrib_hw_queue_depth.attr,
&tcmu_dev_attrib_queue_depth.attr,
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index ad09e51ffae4..f65f0d109fc8 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -4,6 +4,8 @@
* Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
* Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
*
+ * Copyright (C) 2014 Viresh Kumar <viresh.kumar@linaro.org>
+ *
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -28,6 +30,20 @@
#include <linux/cpu.h>
#include <linux/cpu_cooling.h>
+/*
+ * Cooling state <-> CPUFreq frequency
+ *
+ * Cooling states are translated to frequencies throughout this driver and this
+ * is the relation between them.
+ *
+ * Highest cooling state corresponds to lowest possible frequency.
+ *
+ * i.e.
+ * level 0 --> 1st Max Freq
+ * level 1 --> 2nd Max Freq
+ * ...
+ */
+
/**
* struct cpufreq_cooling_device - data for cooling device with cpufreq
* @id: unique integer value corresponding to each cpufreq_cooling_device
@@ -38,25 +54,27 @@
* cooling devices.
* @cpufreq_val: integer value representing the absolute value of the clipped
* frequency.
+ * @max_level: maximum cooling level. One less than total number of valid
+ * cpufreq frequencies.
* @allowed_cpus: all the cpus involved for this cpufreq_cooling_device.
+ * @node: list_head to link all cpufreq_cooling_device together.
*
- * This structure is required for keeping information of each
- * cpufreq_cooling_device registered. In order to prevent corruption of this a
- * mutex lock cooling_cpufreq_lock is used.
+ * This structure is required for keeping information of each registered
+ * cpufreq_cooling_device.
*/
struct cpufreq_cooling_device {
int id;
struct thermal_cooling_device *cool_dev;
unsigned int cpufreq_state;
unsigned int cpufreq_val;
+ unsigned int max_level;
+ unsigned int *freq_table; /* In descending order */
struct cpumask allowed_cpus;
struct list_head node;
};
static DEFINE_IDR(cpufreq_idr);
static DEFINE_MUTEX(cooling_cpufreq_lock);
-static unsigned int cpufreq_dev_count;
-
static LIST_HEAD(cpufreq_dev_list);
/**
@@ -98,120 +116,30 @@ static void release_idr(struct idr *idr, int id)
/* Below code defines functions to be used for cpufreq as cooling device */
/**
- * is_cpufreq_valid - function to check frequency transitioning capability.
- * @cpu: cpu for which check is needed.
+ * get_level: Find the level for a particular frequency
+ * @cpufreq_dev: cpufreq_dev for which the property is required
+ * @freq: Frequency
*
- * This function will check the current state of the system if
- * it is capable of changing the frequency for a given @cpu.
- *
- * Return: 0 if the system is not currently capable of changing
- * the frequency of given cpu. !0 in case the frequency is changeable.
+ * Return: level on success, THERMAL_CSTATE_INVALID on error.
*/
-static int is_cpufreq_valid(int cpu)
+static unsigned long get_level(struct cpufreq_cooling_device *cpufreq_dev,
+ unsigned int freq)
{
- struct cpufreq_policy policy;
-
- return !cpufreq_get_policy(&policy, cpu);
-}
-
-enum cpufreq_cooling_property {
- GET_LEVEL,
- GET_FREQ,
- GET_MAXL,
-};
-
-/**
- * get_property - fetch a property of interest for a give cpu.
- * @cpu: cpu for which the property is required
- * @input: query parameter
- * @output: query return
- * @property: type of query (frequency, level, max level)
- *
- * This is the common function to
- * 1. get maximum cpu cooling states
- * 2. translate frequency to cooling state
- * 3. translate cooling state to frequency
- * Note that the code may be not in good shape
- * but it is written in this way in order to:
- * a) reduce duplicate code as most of the code can be shared.
- * b) make sure the logic is consistent when translating between
- * cooling states and frequencies.
- *
- * Return: 0 on success, -EINVAL when invalid parameters are passed.
- */
-static int get_property(unsigned int cpu, unsigned long input,
- unsigned int *output,
- enum cpufreq_cooling_property property)
-{
- int i;
- unsigned long max_level = 0, level = 0;
- unsigned int freq = CPUFREQ_ENTRY_INVALID;
- int descend = -1;
- struct cpufreq_frequency_table *pos, *table =
- cpufreq_frequency_get_table(cpu);
-
- if (!output)
- return -EINVAL;
-
- if (!table)
- return -EINVAL;
-
- cpufreq_for_each_valid_entry(pos, table) {
- /* ignore duplicate entry */
- if (freq == pos->frequency)
- continue;
-
- /* get the frequency order */
- if (freq != CPUFREQ_ENTRY_INVALID && descend == -1)
- descend = freq > pos->frequency;
-
- freq = pos->frequency;
- max_level++;
- }
-
- /* No valid cpu frequency entry */
- if (max_level == 0)
- return -EINVAL;
+ unsigned long level;
- /* max_level is an index, not a counter */
- max_level--;
+ for (level = 0; level <= cpufreq_dev->max_level; level++) {
+ if (freq == cpufreq_dev->freq_table[level])
+ return level;
- /* get max level */
- if (property == GET_MAXL) {
- *output = (unsigned int)max_level;
- return 0;
+ if (freq > cpufreq_dev->freq_table[level])
+ break;
}
- if (property == GET_FREQ)
- level = descend ? input : (max_level - input);
-
- i = 0;
- cpufreq_for_each_valid_entry(pos, table) {
- /* ignore duplicate entry */
- if (freq == pos->frequency)
- continue;
-
- /* now we have a valid frequency entry */
- freq = pos->frequency;
-
- if (property == GET_LEVEL && (unsigned int)input == freq) {
- /* get level by frequency */
- *output = descend ? i : (max_level - i);
- return 0;
- }
- if (property == GET_FREQ && level == i) {
- /* get frequency by level */
- *output = freq;
- return 0;
- }
- i++;
- }
-
- return -EINVAL;
+ return THERMAL_CSTATE_INVALID;
}
/**
- * cpufreq_cooling_get_level - for a give cpu, return the cooling level.
+ * cpufreq_cooling_get_level - for a given cpu, return the cooling level.
* @cpu: cpu for which the level is required
* @freq: the frequency of interest
*
@@ -223,77 +151,21 @@ static int get_property(unsigned int cpu, unsigned long input,
*/
unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
{
- unsigned int val;
-
- if (get_property(cpu, (unsigned long)freq, &val, GET_LEVEL))
- return THERMAL_CSTATE_INVALID;
-
- return (unsigned long)val;
-}
-EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
-
-/**
- * get_cpu_frequency - get the absolute value of frequency from level.
- * @cpu: cpu for which frequency is fetched.
- * @level: cooling level
- *
- * This function matches cooling level with frequency. Based on a cooling level
- * of frequency, equals cooling state of cpu cooling device, it will return
- * the corresponding frequency.
- * e.g level=0 --> 1st MAX FREQ, level=1 ---> 2nd MAX FREQ, .... etc
- *
- * Return: 0 on error, the corresponding frequency otherwise.
- */
-static unsigned int get_cpu_frequency(unsigned int cpu, unsigned long level)
-{
- int ret = 0;
- unsigned int freq;
-
- ret = get_property(cpu, level, &freq, GET_FREQ);
- if (ret)
- return 0;
-
- return freq;
-}
-
-/**
- * cpufreq_apply_cooling - function to apply frequency clipping.
- * @cpufreq_device: cpufreq_cooling_device pointer containing frequency
- * clipping data.
- * @cooling_state: value of the cooling state.
- *
- * Function used to make sure the cpufreq layer is aware of current thermal
- * limits. The limits are applied by updating the cpufreq policy.
- *
- * Return: 0 on success, an error code otherwise (-EINVAL in case wrong
- * cooling state).
- */
-static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
- unsigned long cooling_state)
-{
- unsigned int cpuid, clip_freq;
- struct cpumask *mask = &cpufreq_device->allowed_cpus;
- unsigned int cpu = cpumask_any(mask);
-
-
- /* Check if the old cooling action is same as new cooling action */
- if (cpufreq_device->cpufreq_state == cooling_state)
- return 0;
-
- clip_freq = get_cpu_frequency(cpu, cooling_state);
- if (!clip_freq)
- return -EINVAL;
-
- cpufreq_device->cpufreq_state = cooling_state;
- cpufreq_device->cpufreq_val = clip_freq;
+ struct cpufreq_cooling_device *cpufreq_dev;
- for_each_cpu(cpuid, mask) {
- if (is_cpufreq_valid(cpuid))
- cpufreq_update_policy(cpuid);
+ mutex_lock(&cooling_cpufreq_lock);
+ list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
+ if (cpumask_test_cpu(cpu, &cpufreq_dev->allowed_cpus)) {
+ mutex_unlock(&cooling_cpufreq_lock);
+ return get_level(cpufreq_dev, freq);
+ }
}
+ mutex_unlock(&cooling_cpufreq_lock);
- return 0;
+ pr_err("%s: cpu:%d not part of any cooling device\n", __func__, cpu);
+ return THERMAL_CSTATE_INVALID;
}
+EXPORT_SYMBOL_GPL(cpufreq_cooling_get_level);
/**
* cpufreq_thermal_notifier - notifier callback for cpufreq policy change.
@@ -323,11 +195,6 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
&cpufreq_dev->allowed_cpus))
continue;
- if (!cpufreq_dev->cpufreq_val)
- cpufreq_dev->cpufreq_val = get_cpu_frequency(
- cpumask_any(&cpufreq_dev->allowed_cpus),
- cpufreq_dev->cpufreq_state);
-
max_freq = cpufreq_dev->cpufreq_val;
if (policy->max != max_freq)
@@ -354,19 +221,9 @@ static int cpufreq_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
- struct cpumask *mask = &cpufreq_device->allowed_cpus;
- unsigned int cpu;
- unsigned int count = 0;
- int ret;
-
- cpu = cpumask_any(mask);
-
- ret = get_property(cpu, 0, &count, GET_MAXL);
- if (count > 0)
- *state = count;
-
- return ret;
+ *state = cpufreq_device->max_level;
+ return 0;
}
/**
@@ -403,8 +260,24 @@ static int cpufreq_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct cpufreq_cooling_device *cpufreq_device = cdev->devdata;
+ unsigned int cpu = cpumask_any(&cpufreq_device->allowed_cpus);
+ unsigned int clip_freq;
+
+ /* Request state should be less than max_level */
+ if (WARN_ON(state > cpufreq_device->max_level))
+ return -EINVAL;
+
+ /* Check if the old cooling action is same as new cooling action */
+ if (cpufreq_device->cpufreq_state == state)
+ return 0;
- return cpufreq_apply_cooling(cpufreq_device, state);
+ clip_freq = cpufreq_device->freq_table[state];
+ cpufreq_device->cpufreq_state = state;
+ cpufreq_device->cpufreq_val = clip_freq;
+
+ cpufreq_update_policy(cpu);
+
+ return 0;
}
/* Bind cpufreq callbacks to thermal cooling device ops */
@@ -419,10 +292,25 @@ static struct notifier_block thermal_cpufreq_notifier_block = {
.notifier_call = cpufreq_thermal_notifier,
};
+static unsigned int find_next_max(struct cpufreq_frequency_table *table,
+ unsigned int prev_max)
+{
+ struct cpufreq_frequency_table *pos;
+ unsigned int max = 0;
+
+ cpufreq_for_each_valid_entry(pos, table) {
+ if (pos->frequency > max && pos->frequency < prev_max)
+ max = pos->frequency;
+ }
+
+ return max;
+}
+
/**
* __cpufreq_cooling_register - helper function to create cpufreq cooling device
* @np: a valid struct device_node to the cooling device device tree node
* @clip_cpus: cpumask of cpus where the frequency constraints will happen.
+ * Normally this should be same as cpufreq policy->related_cpus.
*
* This interface function registers the cpufreq cooling device with the name
* "thermal-cpufreq-%x". This api can support multiple instances of cpufreq
@@ -437,37 +325,42 @@ __cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus)
{
struct thermal_cooling_device *cool_dev;
- struct cpufreq_cooling_device *cpufreq_dev = NULL;
- unsigned int min = 0, max = 0;
+ struct cpufreq_cooling_device *cpufreq_dev;
char dev_name[THERMAL_NAME_LENGTH];
- int ret = 0, i;
- struct cpufreq_policy policy;
+ struct cpufreq_frequency_table *pos, *table;
+ unsigned int freq, i;
+ int ret;
- /* Verify that all the clip cpus have same freq_min, freq_max limit */
- for_each_cpu(i, clip_cpus) {
- /* continue if cpufreq policy not found and not return error */
- if (!cpufreq_get_policy(&policy, i))
- continue;
- if (min == 0 && max == 0) {
- min = policy.cpuinfo.min_freq;
- max = policy.cpuinfo.max_freq;
- } else {
- if (min != policy.cpuinfo.min_freq ||
- max != policy.cpuinfo.max_freq)
- return ERR_PTR(-EINVAL);
- }
+ table = cpufreq_frequency_get_table(cpumask_first(clip_cpus));
+ if (!table) {
+ pr_debug("%s: CPUFreq table not found\n", __func__);
+ return ERR_PTR(-EPROBE_DEFER);
}
- cpufreq_dev = kzalloc(sizeof(struct cpufreq_cooling_device),
- GFP_KERNEL);
+
+ cpufreq_dev = kzalloc(sizeof(*cpufreq_dev), GFP_KERNEL);
if (!cpufreq_dev)
return ERR_PTR(-ENOMEM);
+ /* Find max levels */
+ cpufreq_for_each_valid_entry(pos, table)
+ cpufreq_dev->max_level++;
+
+ cpufreq_dev->freq_table = kmalloc(sizeof(*cpufreq_dev->freq_table) *
+ cpufreq_dev->max_level, GFP_KERNEL);
+ if (!cpufreq_dev->freq_table) {
+ cool_dev = ERR_PTR(-ENOMEM);
+ goto free_cdev;
+ }
+
+ /* max_level is an index, not a counter */
+ cpufreq_dev->max_level--;
+
cpumask_copy(&cpufreq_dev->allowed_cpus, clip_cpus);
ret = get_idr(&cpufreq_idr, &cpufreq_dev->id);
if (ret) {
- kfree(cpufreq_dev);
- return ERR_PTR(-EINVAL);
+ cool_dev = ERR_PTR(ret);
+ goto free_table;
}
snprintf(dev_name, sizeof(dev_name), "thermal-cpufreq-%d",
@@ -475,25 +368,44 @@ __cpufreq_cooling_register(struct device_node *np,
cool_dev = thermal_of_cooling_device_register(np, dev_name, cpufreq_dev,
&cpufreq_cooling_ops);
- if (IS_ERR(cool_dev)) {
- release_idr(&cpufreq_idr, cpufreq_dev->id);
- kfree(cpufreq_dev);
- return cool_dev;
+ if (IS_ERR(cool_dev))
+ goto remove_idr;
+
+ /* Fill freq-table in descending order of frequencies */
+ for (i = 0, freq = -1; i <= cpufreq_dev->max_level; i++) {
+ freq = find_next_max(table, freq);
+ cpufreq_dev->freq_table[i] = freq;
+
+ /* Warn for duplicate entries */
+ if (!freq)
+ pr_warn("%s: table has duplicate entries\n", __func__);
+ else
+ pr_debug("%s: freq:%u KHz\n", __func__, freq);
}
+
+ cpufreq_dev->cpufreq_val = cpufreq_dev->freq_table[0];
cpufreq_dev->cool_dev = cool_dev;
- cpufreq_dev->cpufreq_state = 0;
+
mutex_lock(&cooling_cpufreq_lock);
/* Register the notifier for first cpufreq cooling device */
- if (cpufreq_dev_count == 0)
+ if (list_empty(&cpufreq_dev_list))
cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
- cpufreq_dev_count++;
list_add(&cpufreq_dev->node, &cpufreq_dev_list);
mutex_unlock(&cooling_cpufreq_lock);
return cool_dev;
+
+remove_idr:
+ release_idr(&cpufreq_idr, cpufreq_dev->id);
+free_table:
+ kfree(cpufreq_dev->freq_table);
+free_cdev:
+ kfree(cpufreq_dev);
+
+ return cool_dev;
}
/**
@@ -554,16 +466,16 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
cpufreq_dev = cdev->devdata;
mutex_lock(&cooling_cpufreq_lock);
list_del(&cpufreq_dev->node);
- cpufreq_dev_count--;
/* Unregister the notifier for the last cpufreq cooling device */
- if (cpufreq_dev_count == 0)
+ if (list_empty(&cpufreq_dev_list))
cpufreq_unregister_notifier(&thermal_cpufreq_notifier_block,
CPUFREQ_POLICY_NOTIFIER);
mutex_unlock(&cooling_cpufreq_lock);
thermal_cooling_device_unregister(cpufreq_dev->cool_dev);
release_idr(&cpufreq_idr, cpufreq_dev->id);
+ kfree(cpufreq_dev->freq_table);
kfree(cpufreq_dev);
}
EXPORT_SYMBOL_GPL(cpufreq_cooling_unregister);
diff --git a/drivers/thermal/db8500_cpufreq_cooling.c b/drivers/thermal/db8500_cpufreq_cooling.c
index 000d53e934a0..607b62c7e611 100644
--- a/drivers/thermal/db8500_cpufreq_cooling.c
+++ b/drivers/thermal/db8500_cpufreq_cooling.c
@@ -18,7 +18,6 @@
*/
#include <linux/cpu_cooling.h>
-#include <linux/cpufreq.h>
#include <linux/err.h>
#include <linux/module.h>
#include <linux/of.h>
@@ -28,18 +27,17 @@
static int db8500_cpufreq_cooling_probe(struct platform_device *pdev)
{
struct thermal_cooling_device *cdev;
- struct cpumask mask_val;
-
- /* make sure cpufreq driver has been initialized */
- if (!cpufreq_frequency_get_table(0))
- return -EPROBE_DEFER;
-
- cpumask_set_cpu(0, &mask_val);
- cdev = cpufreq_cooling_register(&mask_val);
+ cdev = cpufreq_cooling_register(cpu_present_mask);
if (IS_ERR(cdev)) {
- dev_err(&pdev->dev, "Failed to register cooling device\n");
- return PTR_ERR(cdev);
+ int ret = PTR_ERR(cdev);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to register cooling device %d\n",
+ ret);
+
+ return ret;
}
platform_set_drvdata(pdev, cdev);
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 88b32f942dcf..2ccbc0788353 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -9,7 +9,6 @@
#include <linux/clk.h>
#include <linux/cpu_cooling.h>
-#include <linux/cpufreq.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -454,15 +453,10 @@ static int imx_thermal_probe(struct platform_device *pdev)
const struct of_device_id *of_id =
of_match_device(of_imx_thermal_match, &pdev->dev);
struct imx_thermal_data *data;
- struct cpumask clip_cpus;
struct regmap *map;
int measure_freq;
int ret;
- if (!cpufreq_get_current_driver()) {
- dev_dbg(&pdev->dev, "no cpufreq driver!");
- return -EPROBE_DEFER;
- }
data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -516,12 +510,13 @@ static int imx_thermal_probe(struct platform_device *pdev)
regmap_write(map, MISC0 + REG_SET, MISC0_REFTOP_SELBIASOFF);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
- cpumask_set_cpu(0, &clip_cpus);
- data->cdev = cpufreq_cooling_register(&clip_cpus);
+ data->cdev = cpufreq_cooling_register(cpu_present_mask);
if (IS_ERR(data->cdev)) {
ret = PTR_ERR(data->cdev);
- dev_err(&pdev->dev,
- "failed to register cpufreq cooling device: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "failed to register cpufreq cooling device: %d\n",
+ ret);
return ret;
}
@@ -613,6 +608,7 @@ static int imx_thermal_suspend(struct device *dev)
regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN);
data->mode = THERMAL_DEVICE_DISABLED;
+ clk_disable_unprepare(data->thermal_clk);
return 0;
}
@@ -622,6 +618,7 @@ static int imx_thermal_resume(struct device *dev)
struct imx_thermal_data *data = dev_get_drvdata(dev);
struct regmap *map = data->tempmon;
+ clk_prepare_enable(data->thermal_clk);
/* Enabled thermal sensor after resume */
regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
diff --git a/drivers/thermal/int340x_thermal/Makefile b/drivers/thermal/int340x_thermal/Makefile
index ffe40bffaf1a..d4413698a85f 100644
--- a/drivers/thermal/int340x_thermal/Makefile
+++ b/drivers/thermal/int340x_thermal/Makefile
@@ -1,4 +1,5 @@
obj-$(CONFIG_INT340X_THERMAL) += int3400_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o
obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o
+obj-$(CONFIG_INT340X_THERMAL) += processor_thermal_device.o
obj-$(CONFIG_ACPI_THERMAL_REL) += acpi_thermal_rel.o
diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
index e4e61b3fb11e..2c2ec7666eb1 100644
--- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
+++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
@@ -82,7 +82,7 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
struct acpi_buffer trt_format = { sizeof("RRNNNNNN"), "RRNNNNNN" };
if (!acpi_has_method(handle, "_TRT"))
- return 0;
+ return -ENODEV;
status = acpi_evaluate_object(handle, "_TRT", NULL, &buffer);
if (ACPI_FAILURE(status))
@@ -119,15 +119,11 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
continue;
result = acpi_bus_get_device(trt->source, &adev);
- if (!result)
- acpi_create_platform_device(adev);
- else
+ if (result)
pr_warn("Failed to get source ACPI device\n");
result = acpi_bus_get_device(trt->target, &adev);
- if (!result)
- acpi_create_platform_device(adev);
- else
+ if (result)
pr_warn("Failed to get target ACPI device\n");
}
@@ -167,7 +163,7 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
sizeof("RRNNNNNNNNNNN"), "RRNNNNNNNNNNN" };
if (!acpi_has_method(handle, "_ART"))
- return 0;
+ return -ENODEV;
status = acpi_evaluate_object(handle, "_ART", NULL, &buffer);
if (ACPI_FAILURE(status))
@@ -206,16 +202,12 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
if (art->source) {
result = acpi_bus_get_device(art->source, &adev);
- if (!result)
- acpi_create_platform_device(adev);
- else
+ if (result)
pr_warn("Failed to get source ACPI device\n");
}
if (art->target) {
result = acpi_bus_get_device(art->target, &adev);
- if (!result)
- acpi_create_platform_device(adev);
- else
+ if (result)
pr_warn("Failed to get source ACPI device\n");
}
}
@@ -321,8 +313,8 @@ static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd,
unsigned long length = 0;
int count = 0;
char __user *arg = (void __user *)__arg;
- struct trt *trts;
- struct art *arts;
+ struct trt *trts = NULL;
+ struct art *arts = NULL;
switch (cmd) {
case ACPI_THERMAL_GET_TRT_COUNT:
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
index dcb306ea14a4..65a98a97df07 100644
--- a/drivers/thermal/int340x_thermal/int3400_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -335,7 +335,6 @@ static struct platform_driver int3400_thermal_driver = {
.remove = int3400_thermal_remove,
.driver = {
.name = "int3400 thermal",
- .owner = THIS_MODULE,
.acpi_match_table = ACPI_PTR(int3400_thermal_match),
},
};
diff --git a/drivers/thermal/int340x_thermal/int3402_thermal.c b/drivers/thermal/int340x_thermal/int3402_thermal.c
index a5d08c14ba24..c5cbc3af3a05 100644
--- a/drivers/thermal/int340x_thermal/int3402_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3402_thermal.c
@@ -231,7 +231,6 @@ static struct platform_driver int3402_thermal_driver = {
.remove = int3402_thermal_remove,
.driver = {
.name = "int3402 thermal",
- .owner = THIS_MODULE,
.acpi_match_table = int3402_thermal_match,
},
};
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
index 1bfa6a69e77a..0faf500d8a77 100644
--- a/drivers/thermal/int340x_thermal/int3403_thermal.c
+++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
@@ -301,6 +301,8 @@ static int int3403_sensor_remove(struct int3403_priv *priv)
{
struct int3403_sensor *obj = priv->priv;
+ acpi_remove_notify_handler(priv->adev->handle,
+ ACPI_DEVICE_NOTIFY, int3403_notify);
thermal_zone_device_unregister(obj->tzone);
return 0;
}
@@ -369,6 +371,7 @@ static int int3403_cdev_add(struct int3403_priv *priv)
p = buf.pointer;
if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
printk(KERN_WARNING "Invalid PPSS data\n");
+ kfree(buf.pointer);
return -EFAULT;
}
@@ -381,6 +384,7 @@ static int int3403_cdev_add(struct int3403_priv *priv)
priv->priv = obj;
+ kfree(buf.pointer);
/* TODO: add ACPI notification support */
return result;
diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c
new file mode 100644
index 000000000000..0fe5dbbea968
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c
@@ -0,0 +1,311 @@
+/*
+ * processor_thermal_device.c
+ * Copyright (c) 2014, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/acpi.h>
+
+/* Broadwell-U/HSB thermal reporting device */
+#define PCI_DEVICE_ID_PROC_BDW_THERMAL 0x1603
+#define PCI_DEVICE_ID_PROC_HSB_THERMAL 0x0A03
+
+/* Braswell thermal reporting device */
+#define PCI_DEVICE_ID_PROC_BSW_THERMAL 0x22DC
+
+struct power_config {
+ u32 index;
+ u32 min_uw;
+ u32 max_uw;
+ u32 tmin_us;
+ u32 tmax_us;
+ u32 step_uw;
+};
+
+struct proc_thermal_device {
+ struct device *dev;
+ struct acpi_device *adev;
+ struct power_config power_limits[2];
+};
+
+enum proc_thermal_emum_mode_type {
+ PROC_THERMAL_NONE,
+ PROC_THERMAL_PCI,
+ PROC_THERMAL_PLATFORM_DEV
+};
+
+/*
+ * We can have only one type of enumeration, PCI or Platform,
+ * not both. So we don't need instance specific data.
+ */
+static enum proc_thermal_emum_mode_type proc_thermal_emum_mode =
+ PROC_THERMAL_NONE;
+
+#define POWER_LIMIT_SHOW(index, suffix) \
+static ssize_t power_limit_##index##_##suffix##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ struct pci_dev *pci_dev; \
+ struct platform_device *pdev; \
+ struct proc_thermal_device *proc_dev; \
+\
+ if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) { \
+ pdev = to_platform_device(dev); \
+ proc_dev = platform_get_drvdata(pdev); \
+ } else { \
+ pci_dev = to_pci_dev(dev); \
+ proc_dev = pci_get_drvdata(pci_dev); \
+ } \
+ return sprintf(buf, "%lu\n",\
+ (unsigned long)proc_dev->power_limits[index].suffix * 1000); \
+}
+
+POWER_LIMIT_SHOW(0, min_uw)
+POWER_LIMIT_SHOW(0, max_uw)
+POWER_LIMIT_SHOW(0, step_uw)
+POWER_LIMIT_SHOW(0, tmin_us)
+POWER_LIMIT_SHOW(0, tmax_us)
+
+POWER_LIMIT_SHOW(1, min_uw)
+POWER_LIMIT_SHOW(1, max_uw)
+POWER_LIMIT_SHOW(1, step_uw)
+POWER_LIMIT_SHOW(1, tmin_us)
+POWER_LIMIT_SHOW(1, tmax_us)
+
+static DEVICE_ATTR_RO(power_limit_0_min_uw);
+static DEVICE_ATTR_RO(power_limit_0_max_uw);
+static DEVICE_ATTR_RO(power_limit_0_step_uw);
+static DEVICE_ATTR_RO(power_limit_0_tmin_us);
+static DEVICE_ATTR_RO(power_limit_0_tmax_us);
+
+static DEVICE_ATTR_RO(power_limit_1_min_uw);
+static DEVICE_ATTR_RO(power_limit_1_max_uw);
+static DEVICE_ATTR_RO(power_limit_1_step_uw);
+static DEVICE_ATTR_RO(power_limit_1_tmin_us);
+static DEVICE_ATTR_RO(power_limit_1_tmax_us);
+
+static struct attribute *power_limit_attrs[] = {
+ &dev_attr_power_limit_0_min_uw.attr,
+ &dev_attr_power_limit_1_min_uw.attr,
+ &dev_attr_power_limit_0_max_uw.attr,
+ &dev_attr_power_limit_1_max_uw.attr,
+ &dev_attr_power_limit_0_step_uw.attr,
+ &dev_attr_power_limit_1_step_uw.attr,
+ &dev_attr_power_limit_0_tmin_us.attr,
+ &dev_attr_power_limit_1_tmin_us.attr,
+ &dev_attr_power_limit_0_tmax_us.attr,
+ &dev_attr_power_limit_1_tmax_us.attr,
+ NULL
+};
+
+static struct attribute_group power_limit_attribute_group = {
+ .attrs = power_limit_attrs,
+ .name = "power_limits"
+};
+
+static int proc_thermal_add(struct device *dev,
+ struct proc_thermal_device **priv)
+{
+ struct proc_thermal_device *proc_priv;
+ struct acpi_device *adev;
+ acpi_status status;
+ struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *elements, *ppcc;
+ union acpi_object *p;
+ int i;
+ int ret;
+
+ adev = ACPI_COMPANION(dev);
+ if (!adev)
+ return -ENODEV;
+
+ status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ p = buf.pointer;
+ if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
+ dev_err(dev, "Invalid PPCC data\n");
+ ret = -EFAULT;
+ goto free_buffer;
+ }
+ if (!p->package.count) {
+ dev_err(dev, "Invalid PPCC package size\n");
+ ret = -EFAULT;
+ goto free_buffer;
+ }
+
+ proc_priv = devm_kzalloc(dev, sizeof(*proc_priv), GFP_KERNEL);
+ if (!proc_priv) {
+ ret = -ENOMEM;
+ goto free_buffer;
+ }
+
+ proc_priv->dev = dev;
+ proc_priv->adev = adev;
+
+ for (i = 0; i < min((int)p->package.count - 1, 2); ++i) {
+ elements = &(p->package.elements[i+1]);
+ if (elements->type != ACPI_TYPE_PACKAGE ||
+ elements->package.count != 6) {
+ ret = -EFAULT;
+ goto free_buffer;
+ }
+ ppcc = elements->package.elements;
+ proc_priv->power_limits[i].index = ppcc[0].integer.value;
+ proc_priv->power_limits[i].min_uw = ppcc[1].integer.value;
+ proc_priv->power_limits[i].max_uw = ppcc[2].integer.value;
+ proc_priv->power_limits[i].tmin_us = ppcc[3].integer.value;
+ proc_priv->power_limits[i].tmax_us = ppcc[4].integer.value;
+ proc_priv->power_limits[i].step_uw = ppcc[5].integer.value;
+ }
+
+ *priv = proc_priv;
+
+ ret = sysfs_create_group(&dev->kobj,
+ &power_limit_attribute_group);
+
+free_buffer:
+ kfree(buf.pointer);
+
+ return ret;
+}
+
+void proc_thermal_remove(struct proc_thermal_device *proc_priv)
+{
+ sysfs_remove_group(&proc_priv->dev->kobj,
+ &power_limit_attribute_group);
+}
+
+static int int3401_add(struct platform_device *pdev)
+{
+ struct proc_thermal_device *proc_priv;
+ int ret;
+
+ if (proc_thermal_emum_mode == PROC_THERMAL_PCI) {
+ dev_err(&pdev->dev, "error: enumerated as PCI dev\n");
+ return -ENODEV;
+ }
+
+ ret = proc_thermal_add(&pdev->dev, &proc_priv);
+ if (ret)
+ return ret;
+
+ platform_set_drvdata(pdev, proc_priv);
+ proc_thermal_emum_mode = PROC_THERMAL_PLATFORM_DEV;
+
+ return 0;
+}
+
+static int int3401_remove(struct platform_device *pdev)
+{
+ proc_thermal_remove(platform_get_drvdata(pdev));
+
+ return 0;
+}
+
+static int proc_thermal_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *unused)
+{
+ struct proc_thermal_device *proc_priv;
+ int ret;
+
+ if (proc_thermal_emum_mode == PROC_THERMAL_PLATFORM_DEV) {
+ dev_err(&pdev->dev, "error: enumerated as platform dev\n");
+ return -ENODEV;
+ }
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "error: could not enable device\n");
+ return ret;
+ }
+
+ ret = proc_thermal_add(&pdev->dev, &proc_priv);
+ if (ret) {
+ pci_disable_device(pdev);
+ return ret;
+ }
+
+ pci_set_drvdata(pdev, proc_priv);
+ proc_thermal_emum_mode = PROC_THERMAL_PCI;
+
+ return 0;
+}
+
+static void proc_thermal_pci_remove(struct pci_dev *pdev)
+{
+ proc_thermal_remove(pci_get_drvdata(pdev));
+ pci_disable_device(pdev);
+}
+
+static const struct pci_device_id proc_thermal_pci_ids[] = {
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BDW_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_HSB_THERMAL)},
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PROC_BSW_THERMAL)},
+ { 0, },
+};
+
+MODULE_DEVICE_TABLE(pci, proc_thermal_pci_ids);
+
+static struct pci_driver proc_thermal_pci_driver = {
+ .name = "proc_thermal",
+ .probe = proc_thermal_pci_probe,
+ .remove = proc_thermal_pci_remove,
+ .id_table = proc_thermal_pci_ids,
+};
+
+static const struct acpi_device_id int3401_device_ids[] = {
+ {"INT3401", 0},
+ {"", 0},
+};
+MODULE_DEVICE_TABLE(acpi, int3401_device_ids);
+
+static struct platform_driver int3401_driver = {
+ .probe = int3401_add,
+ .remove = int3401_remove,
+ .driver = {
+ .name = "int3401 thermal",
+ .acpi_match_table = int3401_device_ids,
+ },
+};
+
+static int __init proc_thermal_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&int3401_driver);
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&proc_thermal_pci_driver);
+
+ return ret;
+}
+
+static void __exit proc_thermal_exit(void)
+{
+ platform_driver_unregister(&int3401_driver);
+ pci_unregister_driver(&proc_thermal_pci_driver);
+}
+
+module_init(proc_thermal_init);
+module_exit(proc_thermal_exit);
+
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_DESCRIPTION("Processor Thermal Reporting Device Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/intel_powerclamp.c b/drivers/thermal/intel_powerclamp.c
index e98b4249187c..6ceebd659dd4 100644
--- a/drivers/thermal/intel_powerclamp.c
+++ b/drivers/thermal/intel_powerclamp.c
@@ -688,6 +688,7 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
{ X86_VENDOR_INTEL, 6, 0x45},
{ X86_VENDOR_INTEL, 6, 0x46},
{ X86_VENDOR_INTEL, 6, 0x4c},
+ { X86_VENDOR_INTEL, 6, 0x56},
{}
};
MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index e145b66df444..d717f3dab6f1 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(of_thermal_is_trip_valid);
*
* Return: pointer to trip points table, NULL otherwise
*/
-const struct thermal_trip * const
+const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *tz)
{
struct __thermal_zone *data = tz->devdata;
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 8803e693fe68..2580a4872f90 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -63,7 +63,7 @@ struct rcar_thermal_priv {
struct mutex lock;
struct list_head list;
int id;
- int ctemp;
+ u32 ctemp;
};
#define rcar_thermal_for_each_priv(pos, common) \
@@ -145,7 +145,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv)
{
struct device *dev = rcar_priv_to_dev(priv);
int i;
- int ctemp, old, new;
+ u32 ctemp, old, new;
int ret = -EINVAL;
mutex_lock(&priv->lock);
@@ -372,6 +372,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
int i;
int ret = -ENODEV;
int idle = IDLE_INTERVAL;
+ u32 enr_bits = 0;
common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL);
if (!common)
@@ -390,7 +391,7 @@ static int rcar_thermal_probe(struct platform_device *pdev)
/*
* platform has IRQ support.
- * Then, drier use common register
+ * Then, driver uses common registers
*/
ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0,
@@ -408,9 +409,6 @@ static int rcar_thermal_probe(struct platform_device *pdev)
if (IS_ERR(common->base))
return PTR_ERR(common->base);
- /* enable temperature comparation */
- rcar_thermal_common_write(common, ENR, 0x00030303);
-
idle = 0; /* polling delay is not needed */
}
@@ -452,8 +450,15 @@ static int rcar_thermal_probe(struct platform_device *pdev)
rcar_thermal_irq_enable(priv);
list_move_tail(&priv->list, &common->head);
+
+ /* update ENR bits */
+ enr_bits |= 3 << (i * 8);
}
+ /* enable temperature comparation */
+ if (irq)
+ rcar_thermal_common_write(common, ENR, enr_bits);
+
platform_set_drvdata(pdev, common);
dev_info(dev, "%d sensor probed\n", i);
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index 1bcddfc60e91..9c6ce548e363 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -677,7 +677,6 @@ static SIMPLE_DEV_PM_OPS(rockchip_thermal_pm_ops,
static struct platform_driver rockchip_thermal_driver = {
.driver = {
.name = "rockchip-thermal",
- .owner = THIS_MODULE,
.pm = &rockchip_thermal_pm_ops,
.of_match_table = of_rockchip_thermal_match,
},
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index f760389a204c..c43306ecc0ab 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -1,6 +1,6 @@
config EXYNOS_THERMAL
tristate "Exynos thermal management unit driver"
- depends on ARCH_HAS_BANDGAP && OF
+ depends on OF
help
If you say yes here you get support for the TMU (Thermal Management
Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
index b6be572704a4..6dc3815cc73f 100644
--- a/drivers/thermal/samsung/exynos_thermal_common.c
+++ b/drivers/thermal/samsung/exynos_thermal_common.c
@@ -347,7 +347,6 @@ void exynos_report_trigger(struct thermal_sensor_conf *conf)
int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
{
int ret;
- struct cpumask mask_val;
struct exynos_thermal_zone *th_zone;
if (!sensor_conf || !sensor_conf->read_temperature) {
@@ -367,13 +366,14 @@ int exynos_register_thermal(struct thermal_sensor_conf *sensor_conf)
* sensor
*/
if (sensor_conf->cooling_data.freq_clip_count > 0) {
- cpumask_set_cpu(0, &mask_val);
th_zone->cool_dev[th_zone->cool_dev_size] =
- cpufreq_cooling_register(&mask_val);
+ cpufreq_cooling_register(cpu_present_mask);
if (IS_ERR(th_zone->cool_dev[th_zone->cool_dev_size])) {
- dev_err(sensor_conf->dev,
- "Failed to register cpufreq cooling device\n");
- ret = -EINVAL;
+ ret = PTR_ERR(th_zone->cool_dev[th_zone->cool_dev_size]);
+ if (ret != -EPROBE_DEFER)
+ dev_err(sensor_conf->dev,
+ "Failed to register cpufreq cooling device: %d\n",
+ ret);
goto err_unregister;
}
th_zone->cool_dev_size++;
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index d44d91d681d4..d2f1e62a4232 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -927,7 +927,10 @@ static int exynos_tmu_probe(struct platform_device *pdev)
/* Register the sensor with thermal management interface */
ret = exynos_register_thermal(sensor_conf);
if (ret) {
- dev_err(&pdev->dev, "Failed to register thermal interface\n");
+ if (ret != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Failed to register thermal interface: %d\n",
+ ret);
goto err_clk;
}
data->reg_conf = sensor_conf;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 84fdf0792e27..87e0b0782023 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -930,7 +930,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
struct thermal_zone_device *pos1;
struct thermal_cooling_device *pos2;
unsigned long max_state;
- int result;
+ int result, ret;
if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE))
return -EINVAL;
@@ -947,7 +947,9 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz,
if (tz != pos1 || cdev != pos2)
return -EINVAL;
- cdev->ops->get_max_state(cdev, &max_state);
+ ret = cdev->ops->get_max_state(cdev, &max_state);
+ if (ret)
+ return ret;
/* lower default 0, upper default max_state */
lower = lower == THERMAL_NO_LIMIT ? 0 : lower;
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 9083e7520623..0531c752fbbb 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -91,7 +91,7 @@ int of_parse_thermal_zones(void);
void of_thermal_destroy_zones(void);
int of_thermal_get_ntrips(struct thermal_zone_device *);
bool of_thermal_is_trip_valid(struct thermal_zone_device *, int);
-const struct thermal_trip * const
+const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *);
#else
static inline int of_parse_thermal_zones(void) { return 0; }
@@ -105,7 +105,7 @@ static inline bool of_thermal_is_trip_valid(struct thermal_zone_device *tz,
{
return 0;
}
-static inline const struct thermal_trip * const
+static inline const struct thermal_trip *
of_thermal_get_trip_points(struct thermal_zone_device *tz)
{
return NULL;
diff --git a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
index 5fd03865e396..3fb054a10f6a 100644
--- a/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
+++ b/drivers/thermal/ti-soc-thermal/ti-thermal-common.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/workqueue.h>
#include <linux/thermal.h>
-#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/cpu_cooling.h>
#include <linux/of.h>
@@ -407,17 +406,17 @@ int ti_thermal_register_cpu_cooling(struct ti_bandgap *bgp, int id)
if (!data)
return -EINVAL;
- if (!cpufreq_get_current_driver()) {
- dev_dbg(bgp->dev, "no cpufreq driver yet\n");
- return -EPROBE_DEFER;
- }
-
/* Register cooling device */
data->cool_dev = cpufreq_cooling_register(cpu_present_mask);
if (IS_ERR(data->cool_dev)) {
- dev_err(bgp->dev,
- "Failed to register cpufreq cooling device\n");
- return PTR_ERR(data->cool_dev);
+ int ret = PTR_ERR(data->cool_dev);
+
+ if (ret != -EPROBE_DEFER)
+ dev_err(bgp->dev,
+ "Failed to register cpu cooling device %d\n",
+ ret);
+
+ return ret;
}
ti_bandgap_set_sensor_data(bgp, id, data);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index d2b496750d59..4ddfa60c9222 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2399,17 +2399,12 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
poll_wait(file, &tty->read_wait, wait);
poll_wait(file, &tty->write_wait, wait);
- if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
- mask |= POLLHUP;
if (input_available_p(tty, 1))
mask |= POLLIN | POLLRDNORM;
- else if (mask & POLLHUP) {
- tty_flush_to_ldisc(tty);
- if (input_available_p(tty, 1))
- mask |= POLLIN | POLLRDNORM;
- }
if (tty->packet && tty->link->ctrl_status)
mask |= POLLPRI | POLLIN | POLLRDNORM;
+ if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+ mask |= POLLHUP;
if (tty_hung_up_p(file))
mask |= POLLHUP;
if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 31feeb2d0a66..d1f8dc6aabcb 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1815,7 +1815,7 @@ pci_wch_ch353_setup(struct serial_private *priv,
}
static int
-pci_wch_ch382_setup(struct serial_private *priv,
+pci_wch_ch38x_setup(struct serial_private *priv,
const struct pciserial_board *board,
struct uart_8250_port *port, int idx)
{
@@ -1880,6 +1880,7 @@ pci_wch_ch382_setup(struct serial_private *priv,
#define PCIE_VENDOR_ID_WCH 0x1c00
#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
+#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
@@ -2571,13 +2572,21 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
- /* WCH CH382 2S1P card (16750 clone) */
+ /* WCH CH382 2S1P card (16850 clone) */
{
.vendor = PCIE_VENDOR_ID_WCH,
.device = PCIE_DEVICE_ID_WCH_CH382_2S1P,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
- .setup = pci_wch_ch382_setup,
+ .setup = pci_wch_ch38x_setup,
+ },
+ /* WCH CH384 4S card (16850 clone) */
+ {
+ .vendor = PCIE_VENDOR_ID_WCH,
+ .device = PCIE_DEVICE_ID_WCH_CH384_4S,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_wch_ch38x_setup,
},
/*
* ASIX devices with FIFO bug
@@ -2876,6 +2885,7 @@ enum pci_board_num_t {
pbn_fintek_4,
pbn_fintek_8,
pbn_fintek_12,
+ pbn_wch384_4,
};
/*
@@ -3675,6 +3685,14 @@ static struct pciserial_board pci_boards[] = {
.base_baud = 115200,
.first_offset = 0x40,
},
+
+ [pbn_wch384_4] = {
+ .flags = FL_BASE0,
+ .num_ports = 4,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ .first_offset = 0xC0,
+ },
};
static const struct pci_device_id blacklist[] = {
@@ -3687,6 +3705,7 @@ static const struct pci_device_id blacklist[] = {
{ PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */
{ PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */
{ PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */
+ { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */
};
/*
@@ -5400,6 +5419,10 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_b0_bt_2_115200 },
+ { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, pbn_wch384_4 },
+
/*
* Commtech, Inc. Fastcom adapters
*/
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index 19273e31d224..107e80722575 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1757,32 +1757,43 @@ static struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = {
#endif
#if defined(CONFIG_ARCH_EXYNOS)
+#define EXYNOS_COMMON_SERIAL_DRV_DATA \
+ .info = &(struct s3c24xx_uart_info) { \
+ .name = "Samsung Exynos UART", \
+ .type = PORT_S3C6400, \
+ .has_divslot = 1, \
+ .rx_fifomask = S5PV210_UFSTAT_RXMASK, \
+ .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, \
+ .rx_fifofull = S5PV210_UFSTAT_RXFULL, \
+ .tx_fifofull = S5PV210_UFSTAT_TXFULL, \
+ .tx_fifomask = S5PV210_UFSTAT_TXMASK, \
+ .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, \
+ .def_clk_sel = S3C2410_UCON_CLKSEL0, \
+ .num_clks = 1, \
+ .clksel_mask = 0, \
+ .clksel_shift = 0, \
+ }, \
+ .def_cfg = &(struct s3c2410_uartcfg) { \
+ .ucon = S5PV210_UCON_DEFAULT, \
+ .ufcon = S5PV210_UFCON_DEFAULT, \
+ .has_fracval = 1, \
+ } \
+
static struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = {
- .info = &(struct s3c24xx_uart_info) {
- .name = "Samsung Exynos4 UART",
- .type = PORT_S3C6400,
- .has_divslot = 1,
- .rx_fifomask = S5PV210_UFSTAT_RXMASK,
- .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT,
- .rx_fifofull = S5PV210_UFSTAT_RXFULL,
- .tx_fifofull = S5PV210_UFSTAT_TXFULL,
- .tx_fifomask = S5PV210_UFSTAT_TXMASK,
- .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT,
- .def_clk_sel = S3C2410_UCON_CLKSEL0,
- .num_clks = 1,
- .clksel_mask = 0,
- .clksel_shift = 0,
- },
- .def_cfg = &(struct s3c2410_uartcfg) {
- .ucon = S5PV210_UCON_DEFAULT,
- .ufcon = S5PV210_UFCON_DEFAULT,
- .has_fracval = 1,
- },
+ EXYNOS_COMMON_SERIAL_DRV_DATA,
.fifosize = { 256, 64, 16, 16 },
};
+
+static struct s3c24xx_serial_drv_data exynos5433_serial_drv_data = {
+ EXYNOS_COMMON_SERIAL_DRV_DATA,
+ .fifosize = { 64, 256, 16, 256 },
+};
+
#define EXYNOS4210_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos4210_serial_drv_data)
+#define EXYNOS5433_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos5433_serial_drv_data)
#else
#define EXYNOS4210_SERIAL_DRV_DATA (kernel_ulong_t)NULL
+#define EXYNOS5433_SERIAL_DRV_DATA (kernel_ulong_t)NULL
#endif
static struct platform_device_id s3c24xx_serial_driver_ids[] = {
@@ -1804,6 +1815,9 @@ static struct platform_device_id s3c24xx_serial_driver_ids[] = {
}, {
.name = "exynos4210-uart",
.driver_data = EXYNOS4210_SERIAL_DRV_DATA,
+ }, {
+ .name = "exynos5433-uart",
+ .driver_data = EXYNOS5433_SERIAL_DRV_DATA,
},
{ },
};
@@ -1823,6 +1837,8 @@ static const struct of_device_id s3c24xx_uart_dt_match[] = {
.data = (void *)S5PV210_SERIAL_DRV_DATA },
{ .compatible = "samsung,exynos4210-uart",
.data = (void *)EXYNOS4210_SERIAL_DRV_DATA },
+ { .compatible = "samsung,exynos5433-uart",
+ .data = (void *)EXYNOS5433_SERIAL_DRV_DATA },
{},
};
MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 57ca61b14670..984605bb5bf1 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -2164,7 +2164,9 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port)
break;
}
- dev_info(port->dev, "%s%d at %s (irq = %d, base_baud = %d) is a %s\n",
+ printk(KERN_INFO "%s%s%s%d at %s (irq = %d, base_baud = %d) is a %s\n",
+ port->dev ? dev_name(port->dev) : "",
+ port->dev ? ": " : "",
drv->dev_name,
drv->tty_driver->name_base + port->line,
address, port->irq, port->uartclk / 16, uart_type(port));
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 4f35b43e2475..51f066aa375e 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1464,6 +1464,9 @@ static int tty_reopen(struct tty_struct *tty)
driver->subtype == PTY_TYPE_MASTER)
return -EIO;
+ if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN))
+ return -EBUSY;
+
tty->count++;
WARN_ON(!tty->ldisc);
@@ -2106,10 +2109,6 @@ retry_open:
retval = -ENODEV;
filp->f_flags = saved_flags;
- if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) &&
- !capable(CAP_SYS_ADMIN))
- retval = -EBUSY;
-
if (retval) {
#ifdef TTY_DEBUG_HANGUP
printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 5b9825a4538a..a57dc8866fc5 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -669,7 +669,6 @@ static int ci_hdrc_probe(struct platform_device *pdev)
if (!ci)
return -ENOMEM;
- platform_set_drvdata(pdev, ci);
ci->dev = dev;
ci->platdata = dev_get_platdata(dev);
ci->imx28_write_fix = !!(ci->platdata->flags &
@@ -783,6 +782,7 @@ static int ci_hdrc_probe(struct platform_device *pdev)
}
}
+ platform_set_drvdata(pdev, ci);
ret = devm_request_irq(dev, ci->irq, ci_irq, IRQF_SHARED,
ci->platdata->name, ci);
if (ret)
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index c1694cff1eaf..48731d0bab35 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -91,6 +91,7 @@ static int host_start(struct ci_hdrc *ci)
if (!hcd)
return -ENOMEM;
+ dev_set_drvdata(ci->dev, ci);
hcd->rsrc_start = ci->hw_bank.phys;
hcd->rsrc_len = ci->hw_bank.size;
hcd->regs = ci->hw_bank.abs;
diff --git a/drivers/usb/core/otg_whitelist.h b/drivers/usb/core/otg_whitelist.h
index de0c9c9d7091..a6315abe7b7c 100644
--- a/drivers/usb/core/otg_whitelist.h
+++ b/drivers/usb/core/otg_whitelist.h
@@ -55,6 +55,11 @@ static int is_targeted(struct usb_device *dev)
le16_to_cpu(dev->descriptor.idProduct) == 0xbadd))
return 0;
+ /* OTG PET device is always targeted (see OTG 2.0 ECN 6.4.2) */
+ if ((le16_to_cpu(dev->descriptor.idVendor) == 0x1a0a &&
+ le16_to_cpu(dev->descriptor.idProduct) == 0x0200))
+ return 1;
+
/* NOTE: can't use usb_match_id() since interface caches
* aren't set up yet. this is cut/paste from that code.
*/
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 0ffb4ed0a945..41e510ae8c83 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -179,6 +179,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0b05, 0x17e0), .driver_info =
USB_QUIRK_IGNORE_REMOTE_WAKEUP },
+ /* Protocol and OTG Electrical Test Device */
+ { USB_DEVICE(0x1a0a, 0x0200), .driver_info =
+ USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL },
+
{ } /* terminating entry must be last */
};
diff --git a/drivers/usb/dwc2/core_intr.c b/drivers/usb/dwc2/core_intr.c
index ad43c5bc1ef1..02e3e2d4ea56 100644
--- a/drivers/usb/dwc2/core_intr.c
+++ b/drivers/usb/dwc2/core_intr.c
@@ -476,13 +476,13 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
u32 gintsts;
irqreturn_t retval = IRQ_NONE;
+ spin_lock(&hsotg->lock);
+
if (!dwc2_is_controller_alive(hsotg)) {
dev_warn(hsotg->dev, "Controller is dead\n");
goto out;
}
- spin_lock(&hsotg->lock);
-
gintsts = dwc2_read_common_intr(hsotg);
if (gintsts & ~GINTSTS_PRTINT)
retval = IRQ_HANDLED;
@@ -515,8 +515,8 @@ irqreturn_t dwc2_handle_common_intr(int irq, void *dev)
}
}
- spin_unlock(&hsotg->lock);
out:
+ spin_unlock(&hsotg->lock);
return retval;
}
EXPORT_SYMBOL_GPL(dwc2_handle_common_intr);
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 200168ec2d75..79242008085b 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -2567,7 +2567,7 @@ error:
* s3c_hsotg_ep_disable - disable given endpoint
* @ep: The endpoint to disable.
*/
-static int s3c_hsotg_ep_disable(struct usb_ep *ep)
+static int s3c_hsotg_ep_disable_force(struct usb_ep *ep, bool force)
{
struct s3c_hsotg_ep *hs_ep = our_ep(ep);
struct dwc2_hsotg *hsotg = hs_ep->parent;
@@ -2588,7 +2588,7 @@ static int s3c_hsotg_ep_disable(struct usb_ep *ep)
spin_lock_irqsave(&hsotg->lock, flags);
/* terminate all requests with shutdown */
- kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false);
+ kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, force);
hsotg->fifo_map &= ~(1<<hs_ep->fifo_index);
hs_ep->fifo_index = 0;
@@ -2609,6 +2609,10 @@ static int s3c_hsotg_ep_disable(struct usb_ep *ep)
return 0;
}
+static int s3c_hsotg_ep_disable(struct usb_ep *ep)
+{
+ return s3c_hsotg_ep_disable_force(ep, false);
+}
/**
* on_list - check request is on the given endpoint
* @ep: The endpoint to check.
@@ -2924,7 +2928,7 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget)
/* all endpoints should be shutdown */
for (ep = 1; ep < hsotg->num_of_eps; ep++)
- s3c_hsotg_ep_disable(&hsotg->eps[ep].ep);
+ s3c_hsotg_ep_disable_force(&hsotg->eps[ep].ep, true);
spin_lock_irqsave(&hsotg->lock, flags);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 7c4faf738747..b642a2f998f9 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -33,6 +33,8 @@
#define PCI_DEVICE_ID_INTEL_BYT 0x0f37
#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e
#define PCI_DEVICE_ID_INTEL_BSW 0x22B7
+#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30
+#define PCI_DEVICE_ID_INTEL_SPTH 0xa130
struct dwc3_pci {
struct device *dev;
@@ -219,6 +221,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), },
{ } /* Terminating Entry */
};
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index f03b136ecfce..8f65ab3a3b92 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -882,8 +882,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
if (i == (request->num_mapped_sgs - 1) ||
sg_is_last(s)) {
- if (list_is_last(&req->list,
- &dep->request_list))
+ if (list_empty(&dep->request_list))
last_one = true;
chain = false;
}
@@ -901,6 +900,9 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
if (last_one)
break;
}
+
+ if (last_one)
+ break;
} else {
dma = req->request.dma;
length = req->request.length;
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 6e04e302dc3a..a1bc3e3a0b09 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -399,8 +399,9 @@ static int hidg_setup(struct usb_function *f,
value = __le16_to_cpu(ctrl->wValue);
length = __le16_to_cpu(ctrl->wLength);
- VDBG(cdev, "hid_setup crtl_request : bRequestType:0x%x bRequest:0x%x "
- "Value:0x%x\n", ctrl->bRequestType, ctrl->bRequest, value);
+ VDBG(cdev,
+ "%s crtl_request : bRequestType:0x%x bRequest:0x%x Value:0x%x\n",
+ __func__, ctrl->bRequestType, ctrl->bRequest, value);
switch ((ctrl->bRequestType << 8) | ctrl->bRequest) {
case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index a90440300735..259b656c0b3e 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -520,7 +520,7 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req)
req = midi_alloc_ep_req(ep, midi->buflen);
if (!req) {
- ERROR(midi, "gmidi_transmit: alloc_ep_request failed\n");
+ ERROR(midi, "%s: alloc_ep_request failed\n", __func__);
return;
}
req->length = 0;
diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c
index f7b203293205..e9715845f82e 100644
--- a/drivers/usb/gadget/function/f_uac1.c
+++ b/drivers/usb/gadget/function/f_uac1.c
@@ -897,7 +897,6 @@ static void f_audio_free_inst(struct usb_function_instance *f)
struct f_uac1_opts *opts;
opts = container_of(f, struct f_uac1_opts, func_inst);
- gaudio_cleanup(opts->card);
if (opts->fn_play_alloc)
kfree(opts->fn_play);
if (opts->fn_cap_alloc)
@@ -935,6 +934,7 @@ static void f_audio_free(struct usb_function *f)
struct f_audio *audio = func_to_audio(f);
struct f_uac1_opts *opts;
+ gaudio_cleanup(&audio->card);
opts = container_of(f->fi, struct f_uac1_opts, func_inst);
kfree(audio);
mutex_lock(&opts->lock);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index c744e4975d74..db49ec4c748e 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -441,6 +441,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
kbuf = memdup_user(buf, len);
if (IS_ERR(kbuf)) {
value = PTR_ERR(kbuf);
+ kbuf = NULL;
goto free1;
}
@@ -449,6 +450,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr)
data->name, len, (int) value);
free1:
mutex_unlock(&data->lock);
+ kfree (kbuf);
return value;
}
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index ce882371786b..9f93bed42052 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -716,10 +716,10 @@ static int queue_dma(struct usba_udc *udc, struct usba_ep *ep,
req->using_dma = 1;
req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length)
| USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE
- | USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
+ | USBA_DMA_END_BUF_EN;
- if (ep->is_in)
- req->ctrl |= USBA_DMA_END_BUF_EN;
+ if (!ep->is_in)
+ req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE;
/*
* Add this request to the queue and submit for DMA if
@@ -828,7 +828,7 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct usba_ep *ep = to_usba_ep(_ep);
struct usba_udc *udc = ep->udc;
- struct usba_request *req = to_usba_req(_req);
+ struct usba_request *req;
unsigned long flags;
u32 status;
@@ -837,6 +837,16 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req)
spin_lock_irqsave(&udc->lock, flags);
+ list_for_each_entry(req, &ep->queue, queue) {
+ if (&req->req == _req)
+ break;
+ }
+
+ if (&req->req != _req) {
+ spin_unlock_irqrestore(&udc->lock, flags);
+ return -EINVAL;
+ }
+
if (req->using_dma) {
/*
* If this request is currently being transferred,
@@ -1563,7 +1573,6 @@ static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep)
if ((epstatus & epctrl) & USBA_RX_BK_RDY) {
DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name);
receive_data(ep);
- usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY);
}
}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index ff67ceac77c4..d4fe8d769bd6 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -718,10 +718,11 @@ static int ep_queue(struct bdc_ep *ep, struct bdc_req *req)
struct bdc *bdc;
int ret = 0;
- bdc = ep->bdc;
if (!req || !ep || !ep->usb_ep.desc)
return -EINVAL;
+ bdc = ep->bdc;
+
req->usb_req.actual = 0;
req->usb_req.status = -EINPROGRESS;
req->epnum = ep->ep_num;
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index e113fd73aeae..f9a332775c47 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1581,6 +1581,10 @@ iso_stream_schedule (
else
next = (now + 2 + 7) & ~0x07; /* full frame cache */
+ /* If needed, initialize last_iso_frame so that this URB will be seen */
+ if (ehci->isoc_count == 0)
+ ehci->last_iso_frame = now >> 3;
+
/*
* Use ehci->last_iso_frame as the base. There can't be any
* TDs scheduled for earlier than that.
@@ -1600,11 +1604,11 @@ iso_stream_schedule (
*/
now2 = (now - base) & (mod - 1);
- /* Is the schedule already full? */
+ /* Is the schedule about to wrap around? */
if (unlikely(!empty && start < period)) {
- ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n",
+ ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n",
urb, stream->next_uframe, base, period, mod);
- status = -ENOSPC;
+ status = -EFBIG;
goto fail;
}
@@ -1671,10 +1675,6 @@ iso_stream_schedule (
urb->start_frame = start & (mod - 1);
if (!stream->highspeed)
urb->start_frame >>= 3;
-
- /* Make sure scan_isoc() sees these */
- if (ehci->isoc_count == 0)
- ehci->last_iso_frame = now >> 3;
return status;
fail:
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 19a9af1b4d74..ff9af29b4e9f 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -451,7 +451,7 @@ static int tegra_ehci_probe(struct platform_device *pdev)
u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
if (IS_ERR(u_phy)) {
- err = PTR_ERR(u_phy);
+ err = -EPROBE_DEFER;
goto cleanup_clk_en;
}
hcd->usb_phy = u_phy;
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index dd483c13565b..ce636466edb7 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -567,7 +567,8 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
{
void __iomem *base;
u32 control;
- u32 fminterval;
+ u32 fminterval = 0;
+ bool no_fminterval = false;
int cnt;
if (!mmio_resource_enabled(pdev, 0))
@@ -577,6 +578,13 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
if (base == NULL)
return;
+ /*
+ * ULi M5237 OHCI controller locks the whole system when accessing
+ * the OHCI_FMINTERVAL offset.
+ */
+ if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237)
+ no_fminterval = true;
+
control = readl(base + OHCI_CONTROL);
/* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */
@@ -615,7 +623,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
}
/* software reset of the controller, preserving HcFmInterval */
- fminterval = readl(base + OHCI_FMINTERVAL);
+ if (!no_fminterval)
+ fminterval = readl(base + OHCI_FMINTERVAL);
+
writel(OHCI_HCR, base + OHCI_CMDSTATUS);
/* reset requires max 10 us delay */
@@ -624,7 +634,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev)
break;
udelay(1);
}
- writel(fminterval, base + OHCI_FMINTERVAL);
+
+ if (!no_fminterval)
+ writel(fminterval, base + OHCI_FMINTERVAL);
/* Now the controller is safely in SUSPEND and nothing can wake it up */
iounmap(base);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 142b601f9563..7f76c8a12f89 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -82,6 +82,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
"must be suspended extra slowly",
pdev->revision);
}
+ if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK)
+ xhci->quirks |= XHCI_BROKEN_STREAMS;
/* Fresco Logic confirms: all revisions of this chip do not
* support MSI, even though some of them claim to in their PCI
* capabilities.
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 01fcbb5eb06e..c50d8d202618 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3803,6 +3803,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
return -EINVAL;
}
+ if (setup == SETUP_CONTEXT_ONLY) {
+ slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
+ if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
+ SLOT_STATE_DEFAULT) {
+ xhci_dbg(xhci, "Slot already in default state\n");
+ return 0;
+ }
+ }
+
command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
if (!command)
return -ENOMEM;
diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig
index 9d68372dd9aa..b005010240e5 100644
--- a/drivers/usb/musb/Kconfig
+++ b/drivers/usb/musb/Kconfig
@@ -72,6 +72,8 @@ config USB_MUSB_DA8XX
config USB_MUSB_TUSB6010
tristate "TUSB6010"
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules
config USB_MUSB_OMAP2PLUS
tristate "OMAP2430 and onwards"
@@ -85,6 +87,7 @@ config USB_MUSB_AM35X
config USB_MUSB_DSPS
tristate "TI DSPS platforms"
select USB_MUSB_AM335X_CHILD
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
depends on OF_IRQ
config USB_MUSB_BLACKFIN
@@ -93,6 +96,7 @@ config USB_MUSB_BLACKFIN
config USB_MUSB_UX500
tristate "Ux500 platforms"
+ depends on ARCH_U8500 || COMPILE_TEST
config USB_MUSB_JZ4740
tristate "JZ4740"
diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c
index a441a2de8619..178250145613 100644
--- a/drivers/usb/musb/blackfin.c
+++ b/drivers/usb/musb/blackfin.c
@@ -63,7 +63,7 @@ static void bfin_writew(void __iomem *addr, unsigned offset, u16 data)
bfin_write16(addr + offset, data);
}
-static void binf_writel(void __iomem *addr, unsigned offset, u32 data)
+static void bfin_writel(void __iomem *addr, unsigned offset, u32 data)
{
bfin_write16(addr + offset, (u16)data);
}
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index f64fd964dc6d..c39a16ad7832 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -628,9 +628,9 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller)
ret = of_property_read_string_index(np, "dma-names", i, &str);
if (ret)
goto err;
- if (!strncmp(str, "tx", 2))
+ if (strstarts(str, "tx"))
is_tx = 1;
- else if (!strncmp(str, "rx", 2))
+ else if (strstarts(str, "rx"))
is_tx = 0;
else {
dev_err(dev, "Wrong dmatype %s\n", str);
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index ad3701a97389..48131aa8472c 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -59,20 +59,12 @@ static const struct musb_register_map musb_regmap[] = {
{ "RxMaxPp", MUSB_RXMAXP, 16 },
{ "RxCSR", MUSB_RXCSR, 16 },
{ "RxCount", MUSB_RXCOUNT, 16 },
- { "ConfigData", MUSB_CONFIGDATA,8 },
{ "IntrRxE", MUSB_INTRRXE, 16 },
{ "IntrTxE", MUSB_INTRTXE, 16 },
{ "IntrUsbE", MUSB_INTRUSBE, 8 },
{ "DevCtl", MUSB_DEVCTL, 8 },
- { "BabbleCtl", MUSB_BABBLE_CTL,8 },
- { "TxFIFOsz", MUSB_TXFIFOSZ, 8 },
- { "RxFIFOsz", MUSB_RXFIFOSZ, 8 },
- { "TxFIFOadd", MUSB_TXFIFOADD, 16 },
- { "RxFIFOadd", MUSB_RXFIFOADD, 16 },
{ "VControl", 0x68, 32 },
{ "HWVers", 0x69, 16 },
- { "EPInfo", MUSB_EPINFO, 8 },
- { "RAMInfo", MUSB_RAMINFO, 8 },
{ "LinkInfo", MUSB_LINKINFO, 8 },
{ "VPLen", MUSB_VPLEN, 8 },
{ "HS_EOF1", MUSB_HS_EOF1, 8 },
@@ -103,6 +95,16 @@ static const struct musb_register_map musb_regmap[] = {
{ "DMA_CNTLch7", 0x274, 16 },
{ "DMA_ADDRch7", 0x278, 32 },
{ "DMA_COUNTch7", 0x27C, 32 },
+#ifndef CONFIG_BLACKFIN
+ { "ConfigData", MUSB_CONFIGDATA,8 },
+ { "BabbleCtl", MUSB_BABBLE_CTL,8 },
+ { "TxFIFOsz", MUSB_TXFIFOSZ, 8 },
+ { "RxFIFOsz", MUSB_RXFIFOSZ, 8 },
+ { "TxFIFOadd", MUSB_TXFIFOADD, 16 },
+ { "RxFIFOadd", MUSB_RXFIFOADD, 16 },
+ { "EPInfo", MUSB_EPINFO, 8 },
+ { "RAMInfo", MUSB_RAMINFO, 8 },
+#endif
{ } /* Terminating Entry */
};
@@ -197,30 +199,30 @@ static ssize_t musb_test_mode_write(struct file *file,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
- if (!strncmp(buf, "force host", 9))
+ if (strstarts(buf, "force host"))
test = MUSB_TEST_FORCE_HOST;
- if (!strncmp(buf, "fifo access", 11))
+ if (strstarts(buf, "fifo access"))
test = MUSB_TEST_FIFO_ACCESS;
- if (!strncmp(buf, "force full-speed", 15))
+ if (strstarts(buf, "force full-speed"))
test = MUSB_TEST_FORCE_FS;
- if (!strncmp(buf, "force high-speed", 15))
+ if (strstarts(buf, "force high-speed"))
test = MUSB_TEST_FORCE_HS;
- if (!strncmp(buf, "test packet", 10)) {
+ if (strstarts(buf, "test packet")) {
test = MUSB_TEST_PACKET;
musb_load_testpacket(musb);
}
- if (!strncmp(buf, "test K", 6))
+ if (strstarts(buf, "test K"))
test = MUSB_TEST_K;
- if (!strncmp(buf, "test J", 6))
+ if (strstarts(buf, "test J"))
test = MUSB_TEST_J;
- if (!strncmp(buf, "test SE0 NAK", 12))
+ if (strstarts(buf, "test SE0 NAK"))
test = MUSB_TEST_SE0_NAK;
musb_writeb(musb->mregs, MUSB_TESTMODE, test);
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 23d474d3d7f4..883a9adfdfff 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -2663,7 +2663,6 @@ void musb_host_cleanup(struct musb *musb)
if (musb->port_mode == MUSB_PORT_MODE_GADGET)
return;
usb_remove_hcd(musb->hcd);
- musb->hcd = NULL;
}
void musb_host_free(struct musb *musb)
diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c
index 699e38c73d82..697a741a0cb1 100644
--- a/drivers/usb/phy/phy-mv-usb.c
+++ b/drivers/usb/phy/phy-mv-usb.c
@@ -338,7 +338,6 @@ static void mv_otg_update_inputs(struct mv_otg *mvotg)
static void mv_otg_update_state(struct mv_otg *mvotg)
{
struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl;
- struct usb_phy *phy = &mvotg->phy;
int old_state = mvotg->phy.otg->state;
switch (old_state) {
@@ -858,10 +857,10 @@ static int mv_otg_suspend(struct platform_device *pdev, pm_message_t state)
{
struct mv_otg *mvotg = platform_get_drvdata(pdev);
- if (mvotg->phy.state != OTG_STATE_B_IDLE) {
+ if (mvotg->phy.otg->state != OTG_STATE_B_IDLE) {
dev_info(&pdev->dev,
"OTG state is not B_IDLE, it is %d!\n",
- mvotg->phy.state);
+ mvotg->phy.otg->state);
return -EAGAIN;
}
diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c
index b4066a001ba0..2f9735b35338 100644
--- a/drivers/usb/phy/phy.c
+++ b/drivers/usb/phy/phy.c
@@ -59,6 +59,9 @@ static struct usb_phy *__of_usb_find_phy(struct device_node *node)
{
struct usb_phy *phy;
+ if (!of_device_is_available(node))
+ return ERR_PTR(-ENODEV);
+
list_for_each_entry(phy, &phy_list, head) {
if (node != phy->dev->of_node)
continue;
@@ -66,7 +69,7 @@ static struct usb_phy *__of_usb_find_phy(struct device_node *node)
return phy;
}
- return ERR_PTR(-ENODEV);
+ return ERR_PTR(-EPROBE_DEFER);
}
static void devm_usb_phy_release(struct device *dev, void *res)
@@ -190,10 +193,13 @@ struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev,
spin_lock_irqsave(&phy_lock, flags);
phy = __of_usb_find_phy(node);
- if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) {
- if (!IS_ERR(phy))
- phy = ERR_PTR(-EPROBE_DEFER);
+ if (IS_ERR(phy)) {
+ devres_free(ptr);
+ goto err1;
+ }
+ if (!try_module_get(phy->dev->driver->owner)) {
+ phy = ERR_PTR(-ENODEV);
devres_free(ptr);
goto err1;
}
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index 8d7fc48b1f30..29fa1c3d0089 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -46,6 +46,8 @@ static struct console usbcons;
* ------------------------------------------------------------
*/
+static const struct tty_operations usb_console_fake_tty_ops = {
+};
/*
* The parsing of the command line works exactly like the
@@ -137,13 +139,17 @@ static int usb_console_setup(struct console *co, char *options)
goto reset_open_count;
}
kref_init(&tty->kref);
- tty_port_tty_set(&port->port, tty);
tty->driver = usb_serial_tty_driver;
tty->index = co->index;
+ init_ldsem(&tty->ldisc_sem);
+ INIT_LIST_HEAD(&tty->tty_files);
+ kref_get(&tty->driver->kref);
+ tty->ops = &usb_console_fake_tty_ops;
if (tty_init_termios(tty)) {
retval = -ENOMEM;
- goto free_tty;
+ goto put_tty;
}
+ tty_port_tty_set(&port->port, tty);
}
/* only call the device specific open if this
@@ -161,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options)
serial->type->set_termios(tty, port, &dummy);
tty_port_tty_set(&port->port, NULL);
- kfree(tty);
+ tty_kref_put(tty);
}
set_bit(ASYNCB_INITIALIZED, &port->port.flags);
}
@@ -177,8 +183,8 @@ static int usb_console_setup(struct console *co, char *options)
fail:
tty_port_tty_set(&port->port, NULL);
- free_tty:
- kfree(tty);
+ put_tty:
+ tty_kref_put(tty);
reset_open_count:
port->port.count = 0;
usb_autopm_put_interface(serial->interface);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 6c4eb3cf5efd..f4c56fc1a9f6 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -120,10 +120,12 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
{ USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
{ USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
- { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */
+ { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */
+ { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */
{ USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
{ USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
+ { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
{ USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
{ USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 1bd192290b08..ccf1df7c4b80 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -286,7 +286,7 @@ static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port,
res = usb_submit_urb(port->read_urbs[index], mem_flags);
if (res) {
- if (res != -EPERM) {
+ if (res != -EPERM && res != -ENODEV) {
dev_err(&port->dev,
"%s - usb_submit_urb failed: %d\n",
__func__, res);
@@ -373,7 +373,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb)
__func__, urb->status);
return;
default:
- dev_err(&port->dev, "%s - nonzero urb status: %d\n",
+ dev_dbg(&port->dev, "%s - nonzero urb status: %d\n",
__func__, urb->status);
goto resubmit;
}
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 077c714f1285..e07b15ed5814 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -410,6 +410,8 @@ static void usa26_instat_callback(struct urb *urb)
}
port = serial->port[msg->port];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
@@ -420,7 +422,7 @@ static void usa26_instat_callback(struct urb *urb)
if (old_dcd_state != p_priv->dcd_state)
tty_port_tty_hangup(&port->port, true);
-
+resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
@@ -527,6 +529,8 @@ static void usa28_instat_callback(struct urb *urb)
}
port = serial->port[msg->port];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
@@ -537,7 +541,7 @@ static void usa28_instat_callback(struct urb *urb)
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
tty_port_tty_hangup(&port->port, true);
-
+resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
@@ -607,6 +611,8 @@ static void usa49_instat_callback(struct urb *urb)
}
port = serial->port[msg->portNumber];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
@@ -617,7 +623,7 @@ static void usa49_instat_callback(struct urb *urb)
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
tty_port_tty_hangup(&port->port, true);
-
+resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
@@ -855,6 +861,8 @@ static void usa90_instat_callback(struct urb *urb)
port = serial->port[0];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
@@ -865,7 +873,7 @@ static void usa90_instat_callback(struct urb *urb)
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
tty_port_tty_hangup(&port->port, true);
-
+resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
@@ -926,6 +934,8 @@ static void usa67_instat_callback(struct urb *urb)
port = serial->port[msg->port];
p_priv = usb_get_serial_port_data(port);
+ if (!p_priv)
+ goto resubmit;
/* Update handshaking pin state information */
old_dcd_state = p_priv->dcd_state;
@@ -934,7 +944,7 @@ static void usa67_instat_callback(struct urb *urb)
if (old_dcd_state != p_priv->dcd_state && old_dcd_state)
tty_port_tty_hangup(&port->port, true);
-
+resubmit:
/* Resubmit urb so we continue receiving */
err = usb_submit_urb(urb, GFP_ATOMIC);
if (err != 0)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 7a4c21b4f676..efdcee15b520 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -234,6 +234,8 @@ static void option_instat_callback(struct urb *urb);
#define QUALCOMM_VENDOR_ID 0x05C6
+#define SIERRA_VENDOR_ID 0x1199
+
#define CMOTECH_VENDOR_ID 0x16d8
#define CMOTECH_PRODUCT_6001 0x6001
#define CMOTECH_PRODUCT_CMU_300 0x6002
@@ -512,7 +514,7 @@ enum option_blacklist_reason {
OPTION_BLACKLIST_RESERVED_IF = 2
};
-#define MAX_BL_NUM 8
+#define MAX_BL_NUM 11
struct option_blacklist_info {
/* bitfield of interface numbers for OPTION_BLACKLIST_SENDSETUP */
const unsigned long sendsetup;
@@ -601,6 +603,11 @@ static const struct option_blacklist_info telit_le920_blacklist = {
.reserved = BIT(1) | BIT(5),
};
+static const struct option_blacklist_info sierra_mc73xx_blacklist = {
+ .sendsetup = BIT(0) | BIT(2),
+ .reserved = BIT(8) | BIT(10) | BIT(11),
+};
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1098,6 +1105,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
+ { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
+ .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index cb3e14780a7e..9c63897b3a56 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -142,7 +142,6 @@ static const struct usb_device_id id_table[] = {
{DEVICE_SWI(0x0f3d, 0x68a2)}, /* Sierra Wireless MC7700 */
{DEVICE_SWI(0x114f, 0x68a2)}, /* Sierra Wireless MC7750 */
{DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */
- {DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC73xx */
{DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
{DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
{DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index 8a6f371ed6e7..9893d696fc97 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -69,16 +69,39 @@ static int uas_use_uas_driver(struct usb_interface *intf,
return 0;
/*
- * ASM1051 and older ASM1053 devices have the same usb-id, and UAS is
- * broken on the ASM1051, use the number of streams to differentiate.
- * New ASM1053-s also support 32 streams, but have a different prod-id.
+ * ASMedia has a number of usb3 to sata bridge chips, at the time of
+ * this writing the following versions exist:
+ * ASM1051 - no uas support version
+ * ASM1051 - with broken (*) uas support
+ * ASM1053 - with working uas support
+ * ASM1153 - with working uas support
+ *
+ * Devices with these chips re-use a number of device-ids over the
+ * entire line, so the device-id is useless to determine if we're
+ * dealing with an ASM1051 (which we want to avoid).
+ *
+ * The ASM1153 can be identified by config.MaxPower == 0,
+ * where as the ASM105x models have config.MaxPower == 36.
+ *
+ * Differentiating between the ASM1053 and ASM1051 is trickier, when
+ * connected over USB-3 we can look at the number of streams supported,
+ * ASM1051 supports 32 streams, where as early ASM1053 versions support
+ * 16 streams, newer ASM1053-s also support 32 streams, but have a
+ * different prod-id.
+ *
+ * (*) ASM1051 chips do work with UAS with some disks (with the
+ * US_FL_NO_REPORT_OPCODES quirk), but are broken with other disks
*/
if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c &&
- le16_to_cpu(udev->descriptor.idProduct) == 0x55aa) {
- if (udev->speed < USB_SPEED_SUPER) {
+ (le16_to_cpu(udev->descriptor.idProduct) == 0x5106 ||
+ le16_to_cpu(udev->descriptor.idProduct) == 0x55aa)) {
+ if (udev->actconfig->desc.bMaxPower == 0) {
+ /* ASM1153, do nothing */
+ } else if (udev->speed < USB_SPEED_SUPER) {
/* No streams info, assume ASM1051 */
flags |= US_FL_IGNORE_UAS;
} else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) {
+ /* Possibly an ASM1051, disable uas */
flags |= US_FL_IGNORE_UAS;
}
}
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 11c7a9676441..d684b4b8108f 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -507,7 +507,7 @@ UNUSUAL_DEV( 0x04e6, 0x000c, 0x0100, 0x0100,
UNUSUAL_DEV( 0x04e6, 0x000f, 0x0000, 0x9999,
"SCM Microsystems",
"eUSB SCSI Adapter (Bus Powered)",
- USB_SC_DEVICE, USB_PR_DEVICE, usb_stor_euscsi_init,
+ USB_SC_SCSI, USB_PR_BULK, usb_stor_euscsi_init,
US_FL_SCM_MULT_TARG ),
UNUSUAL_DEV( 0x04e6, 0x0101, 0x0200, 0x0200,
@@ -1995,6 +1995,13 @@ UNUSUAL_DEV( 0x152d, 0x2329, 0x0100, 0x0100,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_RESIDUE | US_FL_SANE_SENSE ),
+/* Reported by Dmitry Nezhevenko <dion@dion.org.ua> */
+UNUSUAL_DEV( 0x152d, 0x2566, 0x0114, 0x0114,
+ "JMicron",
+ "USB to ATA/ATAPI Bridge",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_BROKEN_FUA ),
+
/* Entrega Technologies U1-SC25 (later Xircom PortGear PGSCSI)
* and Mac USB Dock USB-SCSI */
UNUSUAL_DEV( 0x1645, 0x0007, 0x0100, 0x0133,
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 18a283d6de1c..dbc00e56c7f5 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -40,6 +40,16 @@
* and don't forget to CC: the USB development list <linux-usb@vger.kernel.org>
*/
+/*
+ * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI
+ * commands in UAS mode. Observed with the 1.28 firmware; are there others?
+ */
+UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128,
+ "Apricorn",
+ "",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_IGNORE_UAS),
+
/* https://bugzilla.kernel.org/show_bug.cgi?id=79511 */
UNUSUAL_DEV(0x0bc2, 0x2312, 0x0000, 0x9999,
"Seagate",
@@ -68,6 +78,20 @@ UNUSUAL_DEV(0x0bc2, 0xa003, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: Marcin ZajÄ…czkowski <mszpak@wp.pl> */
+UNUSUAL_DEV(0x0bc2, 0xa013, 0x0000, 0x9999,
+ "Seagate",
+ "Backup Plus",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
+/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
+UNUSUAL_DEV(0x0bc2, 0xa0a4, 0x0000, 0x9999,
+ "Seagate",
+ "Backup Plus Desk",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
/* https://bbs.archlinux.org/viewtopic.php?id=183190 */
UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999,
"Seagate",
@@ -82,6 +106,13 @@ UNUSUAL_DEV(0x0bc2, 0xab21, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: G. Richard Bellamy <rbellamy@pteradigm.com> */
+UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999,
+ "Seagate",
+ "BUP Fast HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
/* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */
UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
"JMicron",
@@ -89,14 +120,6 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_REPORT_OPCODES),
-/* Most ASM1051 based devices have issues with uas, blacklist them all */
-/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
-UNUSUAL_DEV(0x174c, 0x5106, 0x0000, 0x9999,
- "ASMedia",
- "ASM1051",
- USB_SC_DEVICE, USB_PR_DEVICE, NULL,
- US_FL_IGNORE_UAS),
-
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
"VIA",
@@ -104,9 +127,23 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_NO_ATA_1X),
+/* Reported-by: Takeo Nakayama <javhera@gmx.com> */
+UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999,
+ "JMicron",
+ "JMS566",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
+
/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
"Hitachi",
"External HDD",
USB_SC_DEVICE, USB_PR_DEVICE, NULL,
US_FL_IGNORE_UAS),
+
+/* Reported-by: Richard Henderson <rth@redhat.com> */
+UNUSUAL_DEV(0x4971, 0x8017, 0x0000, 0x9999,
+ "SimpleTech",
+ "External HDD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_REPORT_OPCODES),
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 255201f22126..7cc0122a18ce 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -840,13 +840,11 @@ static const struct vfio_device_ops vfio_pci_ops = {
static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
- u8 type;
struct vfio_pci_device *vdev;
struct iommu_group *group;
int ret;
- pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type);
- if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL)
+ if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL)
return -EINVAL;
group = iommu_group_get(&pdev->dev);
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 14419a8ccbb6..d415d69dc237 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -538,7 +538,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq,
++headcount;
seg += in;
}
- heads[headcount - 1].len = cpu_to_vhost32(vq, len - datalen);
+ heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen);
*iovcount = seg;
if (unlikely(log))
*log_num = nlogs;
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 01c01cb3933f..d695b1673ae5 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -911,6 +911,23 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd,
return 0;
}
+static int vhost_scsi_to_tcm_attr(int attr)
+{
+ switch (attr) {
+ case VIRTIO_SCSI_S_SIMPLE:
+ return TCM_SIMPLE_TAG;
+ case VIRTIO_SCSI_S_ORDERED:
+ return TCM_ORDERED_TAG;
+ case VIRTIO_SCSI_S_HEAD:
+ return TCM_HEAD_TAG;
+ case VIRTIO_SCSI_S_ACA:
+ return TCM_ACA_TAG;
+ default:
+ break;
+ }
+ return TCM_SIMPLE_TAG;
+}
+
static void tcm_vhost_submission_work(struct work_struct *work)
{
struct tcm_vhost_cmd *cmd =
@@ -936,9 +953,10 @@ static void tcm_vhost_submission_work(struct work_struct *work)
rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess,
cmd->tvc_cdb, &cmd->tvc_sense_buf[0],
cmd->tvc_lun, cmd->tvc_exp_data_len,
- cmd->tvc_task_attr, cmd->tvc_data_direction,
- TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
- NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count);
+ vhost_scsi_to_tcm_attr(cmd->tvc_task_attr),
+ cmd->tvc_data_direction, TARGET_SCF_ACK_KREF,
+ sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr,
+ cmd->tvc_prot_sgl_count);
if (rc < 0) {
transport_send_check_condition_and_sense(se_cmd,
TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index ed71b5347a76..cb807d0ea498 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -713,9 +713,13 @@ long vhost_vring_ioctl(struct vhost_dev *d, int ioctl, void __user *argp)
r = -EFAULT;
break;
}
- if ((a.avail_user_addr & (sizeof *vq->avail->ring - 1)) ||
- (a.used_user_addr & (sizeof *vq->used->ring - 1)) ||
- (a.log_guest_addr & (sizeof *vq->used->ring - 1))) {
+
+ /* Make sure it's safe to cast pointers to vring types. */
+ BUILD_BUG_ON(__alignof__ *vq->avail > VRING_AVAIL_ALIGN_SIZE);
+ BUILD_BUG_ON(__alignof__ *vq->used > VRING_USED_ALIGN_SIZE);
+ if ((a.avail_user_addr & (VRING_AVAIL_ALIGN_SIZE - 1)) ||
+ (a.used_user_addr & (VRING_USED_ALIGN_SIZE - 1)) ||
+ (a.log_guest_addr & (sizeof(u64) - 1))) {
r = -EINVAL;
break;
}
diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c
index 1c29bd19e3d5..0e5fde1d3ffb 100644
--- a/drivers/video/fbdev/broadsheetfb.c
+++ b/drivers/video/fbdev/broadsheetfb.c
@@ -636,7 +636,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
err = broadsheet_spiflash_read_range(par, start_sector_addr,
data_start_addr, sector_buffer);
if (err)
- return err;
+ goto out;
}
/* now we copy our data into the right place in the sector buffer */
@@ -657,7 +657,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
err = broadsheet_spiflash_read_range(par, tail_start_addr,
tail_len, sector_buffer + tail_start_addr);
if (err)
- return err;
+ goto out;
}
/* if we got here we have the full sector that we want to rewrite. */
@@ -665,11 +665,13 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par,
/* first erase the sector */
err = broadsheet_spiflash_erase_sector(par, start_sector_addr);
if (err)
- return err;
+ goto out;
/* now write it */
err = broadsheet_spiflash_write_sector(par, start_sector_addr,
sector_buffer, sector_size);
+out:
+ kfree(sector_buffer);
return err;
}
diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c
index 900aa4ecd617..d6cab1fd9a47 100644
--- a/drivers/video/fbdev/core/fb_defio.c
+++ b/drivers/video/fbdev/core/fb_defio.c
@@ -83,9 +83,10 @@ int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasy
cancel_delayed_work_sync(&info->deferred_work);
/* Run it immediately */
- err = schedule_delayed_work(&info->deferred_work, 0);
+ schedule_delayed_work(&info->deferred_work, 0);
mutex_unlock(&inode->i_mutex);
- return err;
+
+ return 0;
}
EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
diff --git a/drivers/video/fbdev/omap2/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
index 87accdb59c81..ac83ef5cfd7d 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
@@ -132,7 +132,6 @@ static const struct dss_pll_hw dss_omap4_hdmi_pll_hw = {
.mX_max = 127,
.fint_min = 500000,
.fint_max = 2500000,
- .clkdco_max = 1800000000,
.clkdco_min = 500000000,
.clkdco_low = 1000000000,
@@ -156,7 +155,6 @@ static const struct dss_pll_hw dss_omap5_hdmi_pll_hw = {
.mX_max = 127,
.fint_min = 620000,
.fint_max = 2500000,
- .clkdco_max = 1800000000,
.clkdco_min = 750000000,
.clkdco_low = 1500000000,
diff --git a/drivers/video/fbdev/omap2/dss/pll.c b/drivers/video/fbdev/omap2/dss/pll.c
index 50bc62c5d367..335ffac224b9 100644
--- a/drivers/video/fbdev/omap2/dss/pll.c
+++ b/drivers/video/fbdev/omap2/dss/pll.c
@@ -97,7 +97,8 @@ int dss_pll_enable(struct dss_pll *pll)
return 0;
err_enable:
- regulator_disable(pll->regulator);
+ if (pll->regulator)
+ regulator_disable(pll->regulator);
err_reg:
clk_disable_unprepare(pll->clkin);
return r;
diff --git a/drivers/video/fbdev/omap2/dss/sdi.c b/drivers/video/fbdev/omap2/dss/sdi.c
index d51a983075bc..5c2ccab5a958 100644
--- a/drivers/video/fbdev/omap2/dss/sdi.c
+++ b/drivers/video/fbdev/omap2/dss/sdi.c
@@ -342,6 +342,8 @@ static void sdi_init_output(struct platform_device *pdev)
out->output_type = OMAP_DISPLAY_TYPE_SDI;
out->name = "sdi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
+ /* We have SDI only on OMAP3, where it's on port 1 */
+ out->port_num = 1;
out->ops.sdi = &sdi_ops;
out->owner = THIS_MODULE;
diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c
index 92cac803dee3..1085c0432158 100644
--- a/drivers/video/fbdev/simplefb.c
+++ b/drivers/video/fbdev/simplefb.c
@@ -402,7 +402,7 @@ static int __init simplefb_init(void)
if (ret)
return ret;
- if (IS_ENABLED(CONFIG_OF) && of_chosen) {
+ if (IS_ENABLED(CONFIG_OF_ADDRESS) && of_chosen) {
for_each_child_of_node(of_chosen, np) {
if (of_device_is_compatible(np, "simple-framebuffer"))
of_platform_device_create(np, NULL, NULL);
diff --git a/drivers/video/logo/logo.c b/drivers/video/logo/logo.c
index 940cd196eef5..10fbfd8ab963 100644
--- a/drivers/video/logo/logo.c
+++ b/drivers/video/logo/logo.c
@@ -21,6 +21,21 @@ static bool nologo;
module_param(nologo, bool, 0);
MODULE_PARM_DESC(nologo, "Disables startup logo");
+/*
+ * Logos are located in the initdata, and will be freed in kernel_init.
+ * Use late_init to mark the logos as freed to prevent any further use.
+ */
+
+static bool logos_freed;
+
+static int __init fb_logo_late_init(void)
+{
+ logos_freed = true;
+ return 0;
+}
+
+late_initcall(fb_logo_late_init);
+
/* logo's are marked __initdata. Use __init_refok to tell
* modpost that it is intended that this function uses data
* marked __initdata.
@@ -29,7 +44,7 @@ const struct linux_logo * __init_refok fb_find_logo(int depth)
{
const struct linux_logo *logo = NULL;
- if (nologo)
+ if (nologo || logos_freed)
return NULL;
if (depth >= 1) {
diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c
index 2ef9529809d8..9756f21b809e 100644
--- a/drivers/virtio/virtio_pci_common.c
+++ b/drivers/virtio/virtio_pci_common.c
@@ -282,6 +282,7 @@ void vp_del_vqs(struct virtio_device *vdev)
vp_free_vectors(vdev);
kfree(vp_dev->vqs);
+ vp_dev->vqs = NULL;
}
static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs,
@@ -421,15 +422,6 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
return 0;
}
-void virtio_pci_release_dev(struct device *_d)
-{
- /*
- * No need for a release method as we allocate/free
- * all devices together with the pci devices.
- * Provide an empty one to avoid getting a warning from core.
- */
-}
-
#ifdef CONFIG_PM_SLEEP
static int virtio_pci_freeze(struct device *dev)
{
diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h
index adddb647b21d..5a497289b7e9 100644
--- a/drivers/virtio/virtio_pci_common.h
+++ b/drivers/virtio/virtio_pci_common.h
@@ -126,7 +126,6 @@ const char *vp_bus_name(struct virtio_device *vdev);
* - ignore the affinity request if we're using INTX
*/
int vp_set_vq_affinity(struct virtqueue *vq, int cpu);
-void virtio_pci_release_dev(struct device *);
int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id);
diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c
index 6c76f0f5658c..a5486e65e04b 100644
--- a/drivers/virtio/virtio_pci_legacy.c
+++ b/drivers/virtio/virtio_pci_legacy.c
@@ -211,6 +211,17 @@ static const struct virtio_config_ops virtio_pci_config_ops = {
.set_vq_affinity = vp_set_vq_affinity,
};
+static void virtio_pci_release_dev(struct device *_d)
+{
+ struct virtio_device *vdev = dev_to_virtio(_d);
+ struct virtio_pci_device *vp_dev = to_vp_device(vdev);
+
+ /* As struct device is a kobject, it's not safe to
+ * free the memory (including the reference counter itself)
+ * until it's release callback. */
+ kfree(vp_dev);
+}
+
/* the PCI probing function */
int virtio_pci_legacy_probe(struct pci_dev *pci_dev,
const struct pci_device_id *id)
@@ -302,5 +313,4 @@ void virtio_pci_legacy_remove(struct pci_dev *pci_dev)
pci_iounmap(pci_dev, vp_dev->ioaddr);
pci_release_regions(pci_dev);
pci_disable_device(pci_dev);
- kfree(vp_dev);
}
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
index 5927c0a98a74..bcfd2a22208f 100644
--- a/drivers/watchdog/cadence_wdt.c
+++ b/drivers/watchdog/cadence_wdt.c
@@ -503,7 +503,6 @@ static struct platform_driver cdns_wdt_driver = {
.shutdown = cdns_wdt_shutdown,
.driver = {
.name = "cdns-wdt",
- .owner = THIS_MODULE,
.of_match_table = cdns_wdt_of_match,
.pm = &cdns_wdt_pm_ops,
},
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index d6add516a7a7..5142bbabe027 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -52,6 +52,8 @@
#define IMX2_WDT_WRSR 0x04 /* Reset Status Register */
#define IMX2_WDT_WRSR_TOUT (1 << 1) /* -> Reset due to Timeout */
+#define IMX2_WDT_WMCR 0x08 /* Misc Register */
+
#define IMX2_WDT_MAX_TIME 128
#define IMX2_WDT_DEFAULT_TIME 60 /* in seconds */
@@ -274,6 +276,13 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
imx2_wdt_ping_if_active(wdog);
+ /*
+ * Disable the watchdog power down counter at boot. Otherwise the power
+ * down counter will pull down the #WDOG interrupt line for one clock
+ * cycle.
+ */
+ regmap_write(wdev->regmap, IMX2_WDT_WMCR, 0);
+
ret = watchdog_register_device(wdog);
if (ret) {
dev_err(&pdev->dev, "cannot register watchdog device\n");
@@ -327,18 +336,21 @@ static void imx2_wdt_shutdown(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-/* Disable watchdog if it is active during suspend */
+/* Disable watchdog if it is active or non-active but still running */
static int imx2_wdt_suspend(struct device *dev)
{
struct watchdog_device *wdog = dev_get_drvdata(dev);
struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
- imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
- imx2_wdt_ping(wdog);
+ /* The watchdog IP block is running */
+ if (imx2_wdt_is_running(wdev)) {
+ imx2_wdt_set_timeout(wdog, IMX2_WDT_MAX_TIME);
+ imx2_wdt_ping(wdog);
- /* Watchdog has been stopped but IP block is still running */
- if (!watchdog_active(wdog) && imx2_wdt_is_running(wdev))
- del_timer_sync(&wdev->timer);
+ /* The watchdog is not active */
+ if (!watchdog_active(wdog))
+ del_timer_sync(&wdev->timer);
+ }
clk_disable_unprepare(wdev->clk);
@@ -354,15 +366,25 @@ static int imx2_wdt_resume(struct device *dev)
clk_prepare_enable(wdev->clk);
if (watchdog_active(wdog) && !imx2_wdt_is_running(wdev)) {
- /* Resumes from deep sleep we need restart
- * the watchdog again.
+ /*
+ * If the watchdog is still active and resumes
+ * from deep sleep state, need to restart the
+ * watchdog again.
*/
imx2_wdt_setup(wdog);
imx2_wdt_set_timeout(wdog, wdog->timeout);
imx2_wdt_ping(wdog);
} else if (imx2_wdt_is_running(wdev)) {
+ /* Resuming from non-deep sleep state. */
+ imx2_wdt_set_timeout(wdog, wdog->timeout);
imx2_wdt_ping(wdog);
- mod_timer(&wdev->timer, jiffies + wdog->timeout * HZ / 2);
+ /*
+ * But the watchdog is not active, then start
+ * the timer again.
+ */
+ if (!watchdog_active(wdog))
+ mod_timer(&wdev->timer,
+ jiffies + wdog->timeout * HZ / 2);
}
return 0;
diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c
index ef6a298e8c45..1f4155ee3404 100644
--- a/drivers/watchdog/meson_wdt.c
+++ b/drivers/watchdog/meson_wdt.c
@@ -215,7 +215,6 @@ static struct platform_driver meson_wdt_driver = {
.remove = meson_wdt_remove,
.shutdown = meson_wdt_shutdown,
.driver = {
- .owner = THIS_MODULE,
.name = DRV_NAME,
.of_match_table = meson_wdt_dt_ids,
},
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index 2d3e32ebfd15..8729cf68d2fe 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1552,7 +1552,6 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
{
int ret;
int type;
- struct btrfs_tree_block_info *info;
struct btrfs_extent_inline_ref *eiref;
if (*ptr == (unsigned long)-1)
@@ -1573,9 +1572,17 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
}
/* we can treat both ref types equally here */
- info = (struct btrfs_tree_block_info *)(ei + 1);
*out_root = btrfs_extent_inline_ref_offset(eb, eiref);
- *out_level = btrfs_tree_block_level(eb, info);
+
+ if (key->type == BTRFS_EXTENT_ITEM_KEY) {
+ struct btrfs_tree_block_info *info;
+
+ info = (struct btrfs_tree_block_info *)(ei + 1);
+ *out_level = btrfs_tree_block_level(eb, info);
+ } else {
+ ASSERT(key->type == BTRFS_METADATA_ITEM_KEY);
+ *out_level = (u8)key->offset;
+ }
if (ret == 1)
*ptr = (unsigned long)-1;
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 7e607416755a..0b180708bf79 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -1171,6 +1171,7 @@ struct btrfs_space_info {
struct percpu_counter total_bytes_pinned;
struct list_head list;
+ /* Protected by the spinlock 'lock'. */
struct list_head ro_bgs;
struct rw_semaphore groups_sem;
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 054577bddaf2..de4e70fb3cbb 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1857,6 +1857,14 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode)
{
struct btrfs_delayed_node *delayed_node;
+ /*
+ * we don't do delayed inode updates during log recovery because it
+ * leads to enospc problems. This means we also can't do
+ * delayed inode refs
+ */
+ if (BTRFS_I(inode)->root->fs_info->log_root_recovering)
+ return -EAGAIN;
+
delayed_node = btrfs_get_or_create_delayed_node(inode);
if (IS_ERR(delayed_node))
return PTR_ERR(delayed_node);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a80b97100d90..a684086c3c81 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3139,9 +3139,11 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
struct extent_buffer *leaf;
ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
- if (ret < 0)
+ if (ret) {
+ if (ret > 0)
+ ret = -ENOENT;
goto fail;
- BUG_ON(ret); /* Corruption */
+ }
leaf = path->nodes[0];
bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
@@ -3149,11 +3151,9 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
btrfs_mark_buffer_dirty(leaf);
btrfs_release_path(path);
fail:
- if (ret) {
+ if (ret)
btrfs_abort_transaction(trans, root, ret);
- return ret;
- }
- return 0;
+ return ret;
}
@@ -9422,7 +9422,6 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
* are still on the list after taking the semaphore
*/
list_del_init(&block_group->list);
- list_del_init(&block_group->ro_list);
if (list_empty(&block_group->space_info->block_groups[index])) {
kobj = block_group->space_info->block_group_kobjs[index];
block_group->space_info->block_group_kobjs[index] = NULL;
@@ -9464,6 +9463,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
btrfs_remove_free_space_cache(block_group);
spin_lock(&block_group->space_info->lock);
+ list_del_init(&block_group->ro_list);
block_group->space_info->total_bytes -= block_group->key.offset;
block_group->space_info->bytes_readonly -= block_group->key.offset;
block_group->space_info->disk_total -= block_group->key.offset * factor;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 4ebabd237153..790dbae3343c 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2190,7 +2190,7 @@ void btrfs_free_io_failure_record(struct inode *inode, u64 start, u64 end)
next = next_state(state);
- failrec = (struct io_failure_record *)state->private;
+ failrec = (struct io_failure_record *)(unsigned long)state->private;
free_extent_state(state);
kfree(failrec);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index e687bb0dc73a..8bf326affb94 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -6255,8 +6255,10 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
out_fail:
btrfs_end_transaction(trans, root);
- if (drop_on_err)
+ if (drop_on_err) {
+ inode_dec_link_count(inode);
iput(inode);
+ }
btrfs_balance_delayed_items(root);
btrfs_btree_balance_dirty(root);
return err;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index f2bb13a23f86..e427cb7ee12c 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -2607,9 +2607,9 @@ static int scrub_extent_for_parity(struct scrub_parity *sparity,
ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
flags, gen, mirror_num,
have_csum ? csum : NULL);
-skip:
if (ret)
return ret;
+skip:
len -= l;
logical += l;
physical += l;
@@ -3053,7 +3053,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
ppath = btrfs_alloc_path();
if (!ppath) {
- btrfs_free_path(ppath);
+ btrfs_free_path(path);
return -ENOMEM;
}
@@ -3065,6 +3065,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
path->search_commit_root = 1;
path->skip_locking = 1;
+ ppath->search_commit_root = 1;
+ ppath->skip_locking = 1;
/*
* trigger the readahead for extent tree csum tree and wait for
* completion. During readahead, the scrub is officially paused
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 60f7cbe815e9..6f49b2872a64 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1000,10 +1000,20 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
*/
if (fs_info->pending_changes == 0)
return 0;
+ /*
+ * A non-blocking test if the fs is frozen. We must not
+ * start a new transaction here otherwise a deadlock
+ * happens. The pending operations are delayed to the
+ * next commit after thawing.
+ */
+ if (__sb_start_write(sb, SB_FREEZE_WRITE, false))
+ __sb_end_write(sb, SB_FREEZE_WRITE);
+ else
+ return 0;
trans = btrfs_start_transaction(root, 0);
- } else {
- return PTR_ERR(trans);
}
+ if (IS_ERR(trans))
+ return PTR_ERR(trans);
}
return btrfs_commit_transaction(trans, root);
}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index a605d4e2f2bc..e88b59d13439 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -2118,7 +2118,7 @@ void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
unsigned long prev;
unsigned long bit;
- prev = cmpxchg(&fs_info->pending_changes, 0, 0);
+ prev = xchg(&fs_info->pending_changes, 0);
if (!prev)
return;
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index f5013d92a7e6..c81c0e004588 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -1416,7 +1416,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page,
}
}
- dout("fill_inline_data %p %llx.%llx len %lu locked_page %p\n",
+ dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n",
inode, ceph_vinop(inode), len, locked_page);
if (len > 0) {
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 6e139111fdb2..22b289a3b1c4 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -661,16 +661,16 @@ set_credits(struct TCP_Server_Info *server, const int val)
server->ops->set_credits(server, val);
}
-static inline __u64
+static inline __le64
get_next_mid64(struct TCP_Server_Info *server)
{
- return server->ops->get_next_mid(server);
+ return cpu_to_le64(server->ops->get_next_mid(server));
}
static inline __le16
get_next_mid(struct TCP_Server_Info *server)
{
- __u16 mid = get_next_mid64(server);
+ __u16 mid = server->ops->get_next_mid(server);
/*
* The value in the SMB header should be little endian for easy
* on-the-wire decoding.
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 45cb59bcc791..8b7898b7670f 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -86,21 +86,16 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
}
src_inode = file_inode(src_file.file);
+ rc = -EINVAL;
+ if (S_ISDIR(src_inode->i_mode))
+ goto out_fput;
/*
* Note: cifs case is easier than btrfs since server responsible for
* checks for proper open modes and file type and if it wants
* server could even support copy of range where source = target
*/
-
- /* so we do not deadlock racing two ioctls on same files */
- if (target_inode < src_inode) {
- mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_PARENT);
- mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_CHILD);
- } else {
- mutex_lock_nested(&src_inode->i_mutex, I_MUTEX_PARENT);
- mutex_lock_nested(&target_inode->i_mutex, I_MUTEX_CHILD);
- }
+ lock_two_nondirectories(target_inode, src_inode);
/* determine range to clone */
rc = -EINVAL;
@@ -124,13 +119,7 @@ static long cifs_ioctl_clone(unsigned int xid, struct file *dst_file,
out_unlock:
/* although unlocking in the reverse order from locking is not
strictly necessary here it is a little cleaner to be consistent */
- if (target_inode < src_inode) {
- mutex_unlock(&src_inode->i_mutex);
- mutex_unlock(&target_inode->i_mutex);
- } else {
- mutex_unlock(&target_inode->i_mutex);
- mutex_unlock(&src_inode->i_mutex);
- }
+ unlock_two_nondirectories(src_inode, target_inode);
out_fput:
fdput(src_file);
out_drop_write:
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index b333ff60781d..abae6dd2c6b9 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -926,6 +926,7 @@ cifs_NTtimeToUnix(__le64 ntutc)
/* Subtract the NTFS time offset, then convert to 1s intervals. */
s64 t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET;
+ u64 abs_t;
/*
* Unfortunately can not use normal 64 bit division on 32 bit arch, but
@@ -933,13 +934,14 @@ cifs_NTtimeToUnix(__le64 ntutc)
* to special case them
*/
if (t < 0) {
- t = -t;
- ts.tv_nsec = (long)(do_div(t, 10000000) * 100);
+ abs_t = -t;
+ ts.tv_nsec = (long)(do_div(abs_t, 10000000) * 100);
ts.tv_nsec = -ts.tv_nsec;
- ts.tv_sec = -t;
+ ts.tv_sec = -abs_t;
} else {
- ts.tv_nsec = (long)do_div(t, 10000000) * 100;
- ts.tv_sec = t;
+ abs_t = t;
+ ts.tv_nsec = (long)do_div(abs_t, 10000000) * 100;
+ ts.tv_sec = abs_t;
}
return ts;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 8eaf20a80649..c295338e0a98 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -69,7 +69,8 @@ static inline void dump_cifs_file_struct(struct file *file, char *label)
* Attempt to preload the dcache with the results from the FIND_FIRST/NEXT
*
* Find the dentry that matches "name". If there isn't one, create one. If it's
- * a negative dentry or the uniqueid changed, then drop it and recreate it.
+ * a negative dentry or the uniqueid or filetype(mode) changed,
+ * then drop it and recreate it.
*/
static void
cifs_prime_dcache(struct dentry *parent, struct qstr *name,
@@ -97,8 +98,11 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM))
fattr->cf_uniqueid = CIFS_I(inode)->uniqueid;
- /* update inode in place if i_ino didn't change */
- if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
+ /* update inode in place
+ * if both i_ino and i_mode didn't change */
+ if (CIFS_I(inode)->uniqueid == fattr->cf_uniqueid &&
+ (inode->i_mode & S_IFMT) ==
+ (fattr->cf_mode & S_IFMT)) {
cifs_fattr_to_inode(inode, fattr);
goto out;
}
diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
index f1cefc9763ed..689f035915cf 100644
--- a/fs/cifs/smb2misc.c
+++ b/fs/cifs/smb2misc.c
@@ -32,12 +32,14 @@
static int
check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
{
+ __u64 wire_mid = le64_to_cpu(hdr->MessageId);
+
/*
* Make sure that this really is an SMB, that it is a response,
* and that the message ids match.
*/
if ((*(__le32 *)hdr->ProtocolId == SMB2_PROTO_NUMBER) &&
- (mid == hdr->MessageId)) {
+ (mid == wire_mid)) {
if (hdr->Flags & SMB2_FLAGS_SERVER_TO_REDIR)
return 0;
else {
@@ -51,11 +53,11 @@ check_smb2_hdr(struct smb2_hdr *hdr, __u64 mid)
if (*(__le32 *)hdr->ProtocolId != SMB2_PROTO_NUMBER)
cifs_dbg(VFS, "Bad protocol string signature header %x\n",
*(unsigned int *) hdr->ProtocolId);
- if (mid != hdr->MessageId)
+ if (mid != wire_mid)
cifs_dbg(VFS, "Mids do not match: %llu and %llu\n",
- mid, hdr->MessageId);
+ mid, wire_mid);
}
- cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", hdr->MessageId);
+ cifs_dbg(VFS, "Bad SMB detected. The Mid=%llu\n", wire_mid);
return 1;
}
@@ -95,7 +97,7 @@ smb2_check_message(char *buf, unsigned int length)
{
struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
struct smb2_pdu *pdu = (struct smb2_pdu *)hdr;
- __u64 mid = hdr->MessageId;
+ __u64 mid = le64_to_cpu(hdr->MessageId);
__u32 len = get_rfc1002_length(buf);
__u32 clc_len; /* calculated length */
int command;
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 93fd0586f9ec..96b5d40a2ece 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -176,10 +176,11 @@ smb2_find_mid(struct TCP_Server_Info *server, char *buf)
{
struct mid_q_entry *mid;
struct smb2_hdr *hdr = (struct smb2_hdr *)buf;
+ __u64 wire_mid = le64_to_cpu(hdr->MessageId);
spin_lock(&GlobalMid_Lock);
list_for_each_entry(mid, &server->pending_mid_q, qhead) {
- if ((mid->mid == hdr->MessageId) &&
+ if ((mid->mid == wire_mid) &&
(mid->mid_state == MID_REQUEST_SUBMITTED) &&
(mid->command == hdr->Command)) {
spin_unlock(&GlobalMid_Lock);
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index ce858477002a..70867d54fb8b 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -110,7 +110,7 @@ struct smb2_hdr {
__le16 CreditRequest; /* CreditResponse */
__le32 Flags;
__le32 NextCommand;
- __u64 MessageId; /* opaque - so can stay little endian */
+ __le64 MessageId;
__le32 ProcessId;
__u32 TreeId; /* opaque - so do not make little endian */
__u64 SessionId; /* opaque - so do not make little endian */
diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c
index 5111e7272db6..d4c5b6f109a7 100644
--- a/fs/cifs/smb2transport.c
+++ b/fs/cifs/smb2transport.c
@@ -490,7 +490,7 @@ smb2_mid_entry_alloc(const struct smb2_hdr *smb_buffer,
return temp;
else {
memset(temp, 0, sizeof(struct mid_q_entry));
- temp->mid = smb_buffer->MessageId; /* always LE */
+ temp->mid = le64_to_cpu(smb_buffer->MessageId);
temp->pid = current->pid;
temp->command = smb_buffer->Command; /* Always LE */
temp->when_alloc = jiffies;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index e5d3eadf47b1..bed43081720f 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -5166,8 +5166,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
/* fallback to generic here if not in extents fmt */
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
- return __generic_block_fiemap(inode, fieinfo, start, len,
- ext4_get_block);
+ return generic_block_fiemap(inode, fieinfo, start, len,
+ ext4_get_block);
if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
return -EBADR;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 513c12cf444c..8131be8c0af3 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -273,19 +273,24 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
* we determine this extent as a data or a hole according to whether the
* page cache has data or not.
*/
-static int ext4_find_unwritten_pgoff(struct inode *inode, int whence,
- loff_t endoff, loff_t *offset)
+static int ext4_find_unwritten_pgoff(struct inode *inode,
+ int whence,
+ struct ext4_map_blocks *map,
+ loff_t *offset)
{
struct pagevec pvec;
+ unsigned int blkbits;
pgoff_t index;
pgoff_t end;
+ loff_t endoff;
loff_t startoff;
loff_t lastoff;
int found = 0;
+ blkbits = inode->i_sb->s_blocksize_bits;
startoff = *offset;
lastoff = startoff;
-
+ endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits;
index = startoff >> PAGE_CACHE_SHIFT;
end = endoff >> PAGE_CACHE_SHIFT;
@@ -403,144 +408,147 @@ out:
static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
{
struct inode *inode = file->f_mapping->host;
- struct fiemap_extent_info fie;
- struct fiemap_extent ext[2];
- loff_t next;
- int i, ret = 0;
+ struct ext4_map_blocks map;
+ struct extent_status es;
+ ext4_lblk_t start, last, end;
+ loff_t dataoff, isize;
+ int blkbits;
+ int ret = 0;
mutex_lock(&inode->i_mutex);
- if (offset >= inode->i_size) {
+
+ isize = i_size_read(inode);
+ if (offset >= isize) {
mutex_unlock(&inode->i_mutex);
return -ENXIO;
}
- fie.fi_flags = 0;
- fie.fi_extents_max = 2;
- fie.fi_extents_start = (struct fiemap_extent __user *) &ext;
- while (1) {
- mm_segment_t old_fs = get_fs();
-
- fie.fi_extents_mapped = 0;
- memset(ext, 0, sizeof(*ext) * fie.fi_extents_max);
-
- set_fs(get_ds());
- ret = ext4_fiemap(inode, &fie, offset, maxsize - offset);
- set_fs(old_fs);
- if (ret)
+
+ blkbits = inode->i_sb->s_blocksize_bits;
+ start = offset >> blkbits;
+ last = start;
+ end = isize >> blkbits;
+ dataoff = offset;
+
+ do {
+ map.m_lblk = last;
+ map.m_len = end - last + 1;
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
+ if (last != start)
+ dataoff = (loff_t)last << blkbits;
break;
+ }
- /* No extents found, EOF */
- if (!fie.fi_extents_mapped) {
- ret = -ENXIO;
+ /*
+ * If there is a delay extent at this offset,
+ * it will be as a data.
+ */
+ ext4_es_find_delayed_extent_range(inode, last, last, &es);
+ if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
+ if (last != start)
+ dataoff = (loff_t)last << blkbits;
break;
}
- for (i = 0; i < fie.fi_extents_mapped; i++) {
- next = (loff_t)(ext[i].fe_length + ext[i].fe_logical);
- if (offset < (loff_t)ext[i].fe_logical)
- offset = (loff_t)ext[i].fe_logical;
- /*
- * If extent is not unwritten, then it contains valid
- * data, mapped or delayed.
- */
- if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN))
- goto out;
+ /*
+ * If there is a unwritten extent at this offset,
+ * it will be as a data or a hole according to page
+ * cache that has data or not.
+ */
+ if (map.m_flags & EXT4_MAP_UNWRITTEN) {
+ int unwritten;
+ unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA,
+ &map, &dataoff);
+ if (unwritten)
+ break;
+ }
- /*
- * If there is a unwritten extent at this offset,
- * it will be as a data or a hole according to page
- * cache that has data or not.
- */
- if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
- next, &offset))
- goto out;
+ last++;
+ dataoff = (loff_t)last << blkbits;
+ } while (last <= end);
- if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) {
- ret = -ENXIO;
- goto out;
- }
- offset = next;
- }
- }
- if (offset > inode->i_size)
- offset = inode->i_size;
-out:
mutex_unlock(&inode->i_mutex);
- if (ret)
- return ret;
- return vfs_setpos(file, offset, maxsize);
+ if (dataoff > isize)
+ return -ENXIO;
+
+ return vfs_setpos(file, dataoff, maxsize);
}
/*
- * ext4_seek_hole() retrieves the offset for SEEK_HOLE
+ * ext4_seek_hole() retrieves the offset for SEEK_HOLE.
*/
static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
{
struct inode *inode = file->f_mapping->host;
- struct fiemap_extent_info fie;
- struct fiemap_extent ext[2];
- loff_t next;
- int i, ret = 0;
+ struct ext4_map_blocks map;
+ struct extent_status es;
+ ext4_lblk_t start, last, end;
+ loff_t holeoff, isize;
+ int blkbits;
+ int ret = 0;
mutex_lock(&inode->i_mutex);
- if (offset >= inode->i_size) {
+
+ isize = i_size_read(inode);
+ if (offset >= isize) {
mutex_unlock(&inode->i_mutex);
return -ENXIO;
}
- fie.fi_flags = 0;
- fie.fi_extents_max = 2;
- fie.fi_extents_start = (struct fiemap_extent __user *)&ext;
- while (1) {
- mm_segment_t old_fs = get_fs();
-
- fie.fi_extents_mapped = 0;
- memset(ext, 0, sizeof(*ext));
+ blkbits = inode->i_sb->s_blocksize_bits;
+ start = offset >> blkbits;
+ last = start;
+ end = isize >> blkbits;
+ holeoff = offset;
- set_fs(get_ds());
- ret = ext4_fiemap(inode, &fie, offset, maxsize - offset);
- set_fs(old_fs);
- if (ret)
- break;
+ do {
+ map.m_lblk = last;
+ map.m_len = end - last + 1;
+ ret = ext4_map_blocks(NULL, inode, &map, 0);
+ if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
+ last += ret;
+ holeoff = (loff_t)last << blkbits;
+ continue;
+ }
- /* No extents found */
- if (!fie.fi_extents_mapped)
- break;
+ /*
+ * If there is a delay extent at this offset,
+ * we will skip this extent.
+ */
+ ext4_es_find_delayed_extent_range(inode, last, last, &es);
+ if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
+ last = es.es_lblk + es.es_len;
+ holeoff = (loff_t)last << blkbits;
+ continue;
+ }
- for (i = 0; i < fie.fi_extents_mapped; i++) {
- next = (loff_t)(ext[i].fe_logical + ext[i].fe_length);
- /*
- * If extent is not unwritten, then it contains valid
- * data, mapped or delayed.
- */
- if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) {
- if (offset < (loff_t)ext[i].fe_logical)
- goto out;
- offset = next;
+ /*
+ * If there is a unwritten extent at this offset,
+ * it will be as a data or a hole according to page
+ * cache that has data or not.
+ */
+ if (map.m_flags & EXT4_MAP_UNWRITTEN) {
+ int unwritten;
+ unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
+ &map, &holeoff);
+ if (!unwritten) {
+ last += ret;
+ holeoff = (loff_t)last << blkbits;
continue;
}
- /*
- * If there is a unwritten extent at this offset,
- * it will be as a data or a hole according to page
- * cache that has data or not.
- */
- if (ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
- next, &offset))
- goto out;
-
- offset = next;
- if (ext[i].fe_flags & FIEMAP_EXTENT_LAST)
- goto out;
}
- }
- if (offset > inode->i_size)
- offset = inode->i_size;
-out:
+
+ /* find a hole */
+ break;
+ } while (last <= end);
+
mutex_unlock(&inode->i_mutex);
- if (ret)
- return ret;
- return vfs_setpos(file, offset, maxsize);
+ if (holeoff > isize)
+ holeoff = isize;
+
+ return vfs_setpos(file, holeoff, maxsize);
}
/*
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index bf76f405a5f9..8a8ec6293b19 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -24,6 +24,18 @@ int ext4_resize_begin(struct super_block *sb)
return -EPERM;
/*
+ * If we are not using the primary superblock/GDT copy don't resize,
+ * because the user tools have no way of handling this. Probably a
+ * bad time to do it anyways.
+ */
+ if (EXT4_SB(sb)->s_sbh->b_blocknr !=
+ le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
+ ext4_warning(sb, "won't resize using backup superblock at %llu",
+ (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
+ return -EPERM;
+ }
+
+ /*
* We are not allowed to do online-resizing on a filesystem mounted
* with error, because it can destroy the filesystem easily.
*/
@@ -758,18 +770,6 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
"EXT4-fs: ext4_add_new_gdb: adding group block %lu\n",
gdb_num);
- /*
- * If we are not using the primary superblock/GDT copy don't resize,
- * because the user tools have no way of handling this. Probably a
- * bad time to do it anyways.
- */
- if (EXT4_SB(sb)->s_sbh->b_blocknr !=
- le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
- ext4_warning(sb, "won't resize using backup superblock at %llu",
- (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
- return -EPERM;
- }
-
gdb_bh = sb_bread(sb, gdblock);
if (!gdb_bh)
return -EIO;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 43c92b1685cb..74c5f53595fb 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -3482,7 +3482,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
- ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are "
+ ext4_warning(sb, "metadata_csum and uninit_bg are "
"redundant flags; please run fsck.");
/* Check for a known checksum algorithm */
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 99d440a4a6ba..ee85cd4e136a 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -740,14 +740,15 @@ static int __init fcntl_init(void)
* Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
* is defined as O_NONBLOCK on some platforms and not on others.
*/
- BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
+ BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32(
O_RDONLY | O_WRONLY | O_RDWR |
O_CREAT | O_EXCL | O_NOCTTY |
O_TRUNC | O_APPEND | /* O_NONBLOCK | */
__O_SYNC | O_DSYNC | FASYNC |
O_DIRECT | O_LARGEFILE | O_DIRECTORY |
O_NOFOLLOW | O_NOATIME | O_CLOEXEC |
- __FMODE_EXEC | O_PATH | __O_TMPFILE
+ __FMODE_EXEC | O_PATH | __O_TMPFILE |
+ __FMODE_NONOTIFY
));
fasync_cache = kmem_cache_create("fasync_cache",
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index ba1107977f2e..ed19a7d622fa 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -131,6 +131,13 @@ static void fuse_req_init_context(struct fuse_req *req)
req->in.h.pid = current->pid;
}
+void fuse_set_initialized(struct fuse_conn *fc)
+{
+ /* Make sure stores before this are seen on another CPU */
+ smp_wmb();
+ fc->initialized = 1;
+}
+
static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background)
{
return !fc->initialized || (for_background && fc->blocked);
@@ -155,6 +162,8 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages,
if (intr)
goto out;
}
+ /* Matches smp_wmb() in fuse_set_initialized() */
+ smp_rmb();
err = -ENOTCONN;
if (!fc->connected)
@@ -253,6 +262,8 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc,
atomic_inc(&fc->num_waiting);
wait_event(fc->blocked_waitq, fc->initialized);
+ /* Matches smp_wmb() in fuse_set_initialized() */
+ smp_rmb();
req = fuse_request_alloc(0);
if (!req)
req = get_reserved_req(fc, file);
@@ -511,6 +522,39 @@ void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req)
}
EXPORT_SYMBOL_GPL(fuse_request_send);
+static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args)
+{
+ if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS)
+ args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE;
+
+ if (fc->minor < 9) {
+ switch (args->in.h.opcode) {
+ case FUSE_LOOKUP:
+ case FUSE_CREATE:
+ case FUSE_MKNOD:
+ case FUSE_MKDIR:
+ case FUSE_SYMLINK:
+ case FUSE_LINK:
+ args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
+ break;
+ case FUSE_GETATTR:
+ case FUSE_SETATTR:
+ args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
+ break;
+ }
+ }
+ if (fc->minor < 12) {
+ switch (args->in.h.opcode) {
+ case FUSE_CREATE:
+ args->in.args[0].size = sizeof(struct fuse_open_in);
+ break;
+ case FUSE_MKNOD:
+ args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE;
+ break;
+ }
+ }
+}
+
ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
{
struct fuse_req *req;
@@ -520,6 +564,9 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args)
if (IS_ERR(req))
return PTR_ERR(req);
+ /* Needs to be done after fuse_get_req() so that fc->minor is valid */
+ fuse_adjust_compat(fc, args);
+
req->in.h.opcode = args->in.h.opcode;
req->in.h.nodeid = args->in.h.nodeid;
req->in.numargs = args->in.numargs;
@@ -2127,7 +2174,7 @@ void fuse_abort_conn(struct fuse_conn *fc)
if (fc->connected) {
fc->connected = 0;
fc->blocked = 0;
- fc->initialized = 1;
+ fuse_set_initialized(fc);
end_io_requests(fc);
end_queued_requests(fc);
end_polls(fc);
@@ -2146,7 +2193,7 @@ int fuse_dev_release(struct inode *inode, struct file *file)
spin_lock(&fc->lock);
fc->connected = 0;
fc->blocked = 0;
- fc->initialized = 1;
+ fuse_set_initialized(fc);
end_queued_requests(fc);
end_polls(fc);
wake_up_all(&fc->blocked_waitq);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 252b8a5de8b5..08e7b1a9d5d0 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -156,10 +156,7 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args,
args->in.args[0].size = name->len + 1;
args->in.args[0].value = name->name;
args->out.numargs = 1;
- if (fc->minor < 9)
- args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
- else
- args->out.args[0].size = sizeof(struct fuse_entry_out);
+ args->out.args[0].size = sizeof(struct fuse_entry_out);
args->out.args[0].value = outarg;
}
@@ -422,16 +419,12 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry,
args.in.h.opcode = FUSE_CREATE;
args.in.h.nodeid = get_node_id(dir);
args.in.numargs = 2;
- args.in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) :
- sizeof(inarg);
+ args.in.args[0].size = sizeof(inarg);
args.in.args[0].value = &inarg;
args.in.args[1].size = entry->d_name.len + 1;
args.in.args[1].value = entry->d_name.name;
args.out.numargs = 2;
- if (fc->minor < 9)
- args.out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
- else
- args.out.args[0].size = sizeof(outentry);
+ args.out.args[0].size = sizeof(outentry);
args.out.args[0].value = &outentry;
args.out.args[1].size = sizeof(outopen);
args.out.args[1].value = &outopen;
@@ -539,10 +532,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args,
memset(&outarg, 0, sizeof(outarg));
args->in.h.nodeid = get_node_id(dir);
args->out.numargs = 1;
- if (fc->minor < 9)
- args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE;
- else
- args->out.args[0].size = sizeof(outarg);
+ args->out.args[0].size = sizeof(outarg);
args->out.args[0].value = &outarg;
err = fuse_simple_request(fc, args);
if (err)
@@ -592,8 +582,7 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode,
inarg.umask = current_umask();
args.in.h.opcode = FUSE_MKNOD;
args.in.numargs = 2;
- args.in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE :
- sizeof(inarg);
+ args.in.args[0].size = sizeof(inarg);
args.in.args[0].value = &inarg;
args.in.args[1].size = entry->d_name.len + 1;
args.in.args[1].value = entry->d_name.name;
@@ -899,10 +888,7 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
args.in.args[0].size = sizeof(inarg);
args.in.args[0].value = &inarg;
args.out.numargs = 1;
- if (fc->minor < 9)
- args.out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
- else
- args.out.args[0].size = sizeof(outarg);
+ args.out.args[0].size = sizeof(outarg);
args.out.args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err) {
@@ -1574,10 +1560,7 @@ static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args,
args->in.args[0].size = sizeof(*inarg_p);
args->in.args[0].value = inarg_p;
args->out.numargs = 1;
- if (fc->minor < 9)
- args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE;
- else
- args->out.args[0].size = sizeof(*outarg_p);
+ args->out.args[0].size = sizeof(*outarg_p);
args->out.args[0].value = outarg_p;
}
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index e0fc6725d1d0..1cdfb07c1376 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -906,4 +906,6 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc);
int fuse_do_setattr(struct inode *inode, struct iattr *attr,
struct file *file);
+void fuse_set_initialized(struct fuse_conn *fc);
+
#endif /* _FS_FUSE_I_H */
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 6749109f255d..f38256e4476e 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -424,8 +424,7 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf)
args.in.h.opcode = FUSE_STATFS;
args.in.h.nodeid = get_node_id(dentry->d_inode);
args.out.numargs = 1;
- args.out.args[0].size =
- fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg);
+ args.out.args[0].size = sizeof(outarg);
args.out.args[0].value = &outarg;
err = fuse_simple_request(fc, &args);
if (!err)
@@ -898,7 +897,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
fc->max_write = max_t(unsigned, 4096, fc->max_write);
fc->conn_init = 1;
}
- fc->initialized = 1;
+ fuse_set_initialized(fc);
wake_up_all(&fc->blocked_waitq);
}
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index c8b148bbdc8b..3e193cb36996 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -667,7 +667,7 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
s64 change, struct gfs2_quota_data *qd,
- struct fs_disk_quota *fdq)
+ struct qc_dqblk *fdq)
{
struct inode *inode = &ip->i_inode;
struct gfs2_sbd *sdp = GFS2_SB(inode);
@@ -697,16 +697,16 @@ static int gfs2_adjust_quota(struct gfs2_inode *ip, loff_t loc,
be64_add_cpu(&q.qu_value, change);
qd->qd_qb.qb_value = q.qu_value;
if (fdq) {
- if (fdq->d_fieldmask & FS_DQ_BSOFT) {
- q.qu_warn = cpu_to_be64(fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift);
+ if (fdq->d_fieldmask & QC_SPC_SOFT) {
+ q.qu_warn = cpu_to_be64(fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift);
qd->qd_qb.qb_warn = q.qu_warn;
}
- if (fdq->d_fieldmask & FS_DQ_BHARD) {
- q.qu_limit = cpu_to_be64(fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift);
+ if (fdq->d_fieldmask & QC_SPC_HARD) {
+ q.qu_limit = cpu_to_be64(fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift);
qd->qd_qb.qb_limit = q.qu_limit;
}
- if (fdq->d_fieldmask & FS_DQ_BCOUNT) {
- q.qu_value = cpu_to_be64(fdq->d_bcount >> sdp->sd_fsb2bb_shift);
+ if (fdq->d_fieldmask & QC_SPACE) {
+ q.qu_value = cpu_to_be64(fdq->d_space >> sdp->sd_sb.sb_bsize_shift);
qd->qd_qb.qb_value = q.qu_value;
}
}
@@ -1497,7 +1497,7 @@ static int gfs2_quota_get_xstate(struct super_block *sb,
}
static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
- struct fs_disk_quota *fdq)
+ struct qc_dqblk *fdq)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_quota_lvb *qlvb;
@@ -1505,7 +1505,7 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
struct gfs2_holder q_gh;
int error;
- memset(fdq, 0, sizeof(struct fs_disk_quota));
+ memset(fdq, 0, sizeof(*fdq));
if (sdp->sd_args.ar_quota == GFS2_QUOTA_OFF)
return -ESRCH; /* Crazy XFS error code */
@@ -1522,12 +1522,9 @@ static int gfs2_get_dqblk(struct super_block *sb, struct kqid qid,
goto out;
qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lksb.sb_lvbptr;
- fdq->d_version = FS_DQUOT_VERSION;
- fdq->d_flags = (qid.type == USRQUOTA) ? FS_USER_QUOTA : FS_GROUP_QUOTA;
- fdq->d_id = from_kqid_munged(current_user_ns(), qid);
- fdq->d_blk_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_fsb2bb_shift;
- fdq->d_blk_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_fsb2bb_shift;
- fdq->d_bcount = be64_to_cpu(qlvb->qb_value) << sdp->sd_fsb2bb_shift;
+ fdq->d_spc_hardlimit = be64_to_cpu(qlvb->qb_limit) << sdp->sd_sb.sb_bsize_shift;
+ fdq->d_spc_softlimit = be64_to_cpu(qlvb->qb_warn) << sdp->sd_sb.sb_bsize_shift;
+ fdq->d_space = be64_to_cpu(qlvb->qb_value) << sdp->sd_sb.sb_bsize_shift;
gfs2_glock_dq_uninit(&q_gh);
out:
@@ -1536,10 +1533,10 @@ out:
}
/* GFS2 only supports a subset of the XFS fields */
-#define GFS2_FIELDMASK (FS_DQ_BSOFT|FS_DQ_BHARD|FS_DQ_BCOUNT)
+#define GFS2_FIELDMASK (QC_SPC_SOFT|QC_SPC_HARD|QC_SPACE)
static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
- struct fs_disk_quota *fdq)
+ struct qc_dqblk *fdq)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
struct gfs2_inode *ip = GFS2_I(sdp->sd_quota_inode);
@@ -1583,17 +1580,17 @@ static int gfs2_set_dqblk(struct super_block *sb, struct kqid qid,
goto out_i;
/* If nothing has changed, this is a no-op */
- if ((fdq->d_fieldmask & FS_DQ_BSOFT) &&
- ((fdq->d_blk_softlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
- fdq->d_fieldmask ^= FS_DQ_BSOFT;
+ if ((fdq->d_fieldmask & QC_SPC_SOFT) &&
+ ((fdq->d_spc_softlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_warn)))
+ fdq->d_fieldmask ^= QC_SPC_SOFT;
- if ((fdq->d_fieldmask & FS_DQ_BHARD) &&
- ((fdq->d_blk_hardlimit >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
- fdq->d_fieldmask ^= FS_DQ_BHARD;
+ if ((fdq->d_fieldmask & QC_SPC_HARD) &&
+ ((fdq->d_spc_hardlimit >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_limit)))
+ fdq->d_fieldmask ^= QC_SPC_HARD;
- if ((fdq->d_fieldmask & FS_DQ_BCOUNT) &&
- ((fdq->d_bcount >> sdp->sd_fsb2bb_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
- fdq->d_fieldmask ^= FS_DQ_BCOUNT;
+ if ((fdq->d_fieldmask & QC_SPACE) &&
+ ((fdq->d_space >> sdp->sd_sb.sb_bsize_shift) == be64_to_cpu(qd->qd_qb.qb_value)))
+ fdq->d_fieldmask ^= QC_SPACE;
if (fdq->d_fieldmask == 0)
goto out_i;
diff --git a/fs/isofs/rock.c b/fs/isofs/rock.c
index bb63254ed848..735d7522a3a9 100644
--- a/fs/isofs/rock.c
+++ b/fs/isofs/rock.c
@@ -362,6 +362,9 @@ repeat:
rs.cont_size = isonum_733(rr->u.CE.size);
break;
case SIG('E', 'R'):
+ /* Invalid length of ER tag id? */
+ if (rr->u.ER.len_id + offsetof(struct rock_ridge, u.ER.data) > rr->len)
+ goto out;
ISOFS_SB(inode->i_sb)->s_rock = 1;
printk(KERN_DEBUG "ISO 9660 Extensions: ");
{
diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
index 37989f02a226..2d881b381d2b 100644
--- a/fs/kernfs/dir.c
+++ b/fs/kernfs/dir.c
@@ -201,10 +201,14 @@ static unsigned int kernfs_name_hash(const char *name, const void *ns)
static int kernfs_name_compare(unsigned int hash, const char *name,
const void *ns, const struct kernfs_node *kn)
{
- if (hash != kn->hash)
- return hash - kn->hash;
- if (ns != kn->ns)
- return ns - kn->ns;
+ if (hash < kn->hash)
+ return -1;
+ if (hash > kn->hash)
+ return 1;
+ if (ns < kn->ns)
+ return -1;
+ if (ns > kn->ns)
+ return 1;
return strcmp(name, kn->name);
}
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index e94c887da2d7..55505cbe11af 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -138,10 +138,6 @@ lockd(void *vrqstp)
dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n");
- if (!nlm_timeout)
- nlm_timeout = LOCKD_DFLT_TIMEO;
- nlmsvc_timeout = nlm_timeout * HZ;
-
/*
* The main request loop. We don't terminate until the last
* NFS mount or NFS daemon has gone away.
@@ -350,6 +346,10 @@ static struct svc_serv *lockd_create_svc(void)
printk(KERN_WARNING
"lockd_up: no pid, %d users??\n", nlmsvc_users);
+ if (!nlm_timeout)
+ nlm_timeout = LOCKD_DFLT_TIMEO;
+ nlmsvc_timeout = nlm_timeout * HZ;
+
serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, svc_rpcb_cleanup);
if (!serv) {
printk(KERN_WARNING "lockd_up: create service failed\n");
diff --git a/fs/locks.c b/fs/locks.c
index 735b8d3fa78c..59e2f905e4ff 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1702,7 +1702,7 @@ static int generic_delete_lease(struct file *filp)
break;
}
trace_generic_delete_lease(inode, fl);
- if (fl)
+ if (fl && IS_LEASE(fl))
error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose);
spin_unlock(&inode->i_lock);
locks_dispose_list(&dispose);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 10bf07280f4a..294692ff83b1 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -212,6 +212,12 @@ static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
*/
ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, struct iov_iter *iter, loff_t pos)
{
+ struct inode *inode = iocb->ki_filp->f_mapping->host;
+
+ /* we only support swap file calling nfs_direct_IO */
+ if (!IS_SWAPFILE(inode))
+ return 0;
+
#ifndef CONFIG_NFS_SWAP
dprintk("NFS: nfs_direct_IO (%pD) off/no(%Ld/%lu) EINVAL\n",
iocb->ki_filp, (long long) pos, iter->nr_segs);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 4bffe637ea32..2211f6ba8736 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -352,8 +352,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
nfs_attr_check_mountpoint(sb, fattr);
- if (((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0) &&
- !nfs_attr_use_mounted_on_fileid(fattr))
+ if (nfs_attr_use_mounted_on_fileid(fattr))
+ fattr->fileid = fattr->mounted_on_fileid;
+ else if ((fattr->valid & NFS_ATTR_FATTR_FILEID) == 0)
goto out_no_inode;
if ((fattr->valid & NFS_ATTR_FATTR_TYPE) == 0)
goto out_no_inode;
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index efaa31c70fbe..b6f34bfa6fe8 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -31,8 +31,6 @@ static inline int nfs_attr_use_mounted_on_fileid(struct nfs_fattr *fattr)
(((fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT) == 0) &&
((fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) == 0)))
return 0;
-
- fattr->fileid = fattr->mounted_on_fileid;
return 1;
}
diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c
index 03311259b0c4..706ad10b8186 100644
--- a/fs/nfs/nfs4client.c
+++ b/fs/nfs/nfs4client.c
@@ -228,6 +228,7 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
kfree(clp->cl_serverowner);
kfree(clp->cl_serverscope);
kfree(clp->cl_implid);
+ kfree(clp->cl_owner_id);
}
void nfs4_free_client(struct nfs_client *clp)
@@ -452,6 +453,14 @@ static void nfs4_swap_callback_idents(struct nfs_client *keep,
spin_unlock(&nn->nfs_client_lock);
}
+static bool nfs4_match_client_owner_id(const struct nfs_client *clp1,
+ const struct nfs_client *clp2)
+{
+ if (clp1->cl_owner_id == NULL || clp2->cl_owner_id == NULL)
+ return true;
+ return strcmp(clp1->cl_owner_id, clp2->cl_owner_id) == 0;
+}
+
/**
* nfs40_walk_client_list - Find server that recognizes a client ID
*
@@ -483,9 +492,6 @@ int nfs40_walk_client_list(struct nfs_client *new,
if (pos->rpc_ops != new->rpc_ops)
continue;
- if (pos->cl_proto != new->cl_proto)
- continue;
-
if (pos->cl_minorversion != new->cl_minorversion)
continue;
@@ -510,6 +516,9 @@ int nfs40_walk_client_list(struct nfs_client *new,
if (pos->cl_clientid != new->cl_clientid)
continue;
+ if (!nfs4_match_client_owner_id(pos, new))
+ continue;
+
atomic_inc(&pos->cl_count);
spin_unlock(&nn->nfs_client_lock);
@@ -566,20 +575,14 @@ static bool nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b)
}
/*
- * Returns true if the server owners match
+ * Returns true if the server major ids match
*/
static bool
-nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b)
+nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b)
{
struct nfs41_server_owner *o1 = a->cl_serverowner;
struct nfs41_server_owner *o2 = b->cl_serverowner;
- if (o1->minor_id != o2->minor_id) {
- dprintk("NFS: --> %s server owner minor IDs do not match\n",
- __func__);
- return false;
- }
-
if (o1->major_id_sz != o2->major_id_sz)
goto out_major_mismatch;
if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0)
@@ -621,9 +624,6 @@ int nfs41_walk_client_list(struct nfs_client *new,
if (pos->rpc_ops != new->rpc_ops)
continue;
- if (pos->cl_proto != new->cl_proto)
- continue;
-
if (pos->cl_minorversion != new->cl_minorversion)
continue;
@@ -639,7 +639,7 @@ int nfs41_walk_client_list(struct nfs_client *new,
prev = pos;
status = nfs_wait_client_init_complete(pos);
- if (status == 0) {
+ if (pos->cl_cons_state == NFS_CS_SESSION_INITING) {
nfs4_schedule_lease_recovery(pos);
status = nfs4_wait_clnt_recover(pos);
}
@@ -654,7 +654,19 @@ int nfs41_walk_client_list(struct nfs_client *new,
if (!nfs4_match_clientids(pos, new))
continue;
- if (!nfs4_match_serverowners(pos, new))
+ /*
+ * Note that session trunking is just a special subcase of
+ * client id trunking. In either case, we want to fall back
+ * to using the existing nfs_client.
+ */
+ if (!nfs4_check_clientid_trunking(pos, new))
+ continue;
+
+ /* Unlike NFSv4.0, we know that NFSv4.1 always uses the
+ * uniform string, however someone might switch the
+ * uniquifier string on us.
+ */
+ if (!nfs4_match_client_owner_id(pos, new))
continue;
atomic_inc(&pos->cl_count);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index e7f8d5ff2581..c347705b0161 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -1117,8 +1117,6 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode)
return 0;
if ((delegation->type & fmode) != fmode)
return 0;
- if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags))
- return 0;
if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
return 0;
nfs_mark_delegation_referenced(delegation);
@@ -4917,11 +4915,14 @@ static void nfs4_init_boot_verifier(const struct nfs_client *clp,
}
static unsigned int
-nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
+nfs4_init_nonuniform_client_string(struct nfs_client *clp,
char *buf, size_t len)
{
unsigned int result;
+ if (clp->cl_owner_id != NULL)
+ return strlcpy(buf, clp->cl_owner_id, len);
+
rcu_read_lock();
result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s",
clp->cl_ipaddr,
@@ -4930,24 +4931,32 @@ nfs4_init_nonuniform_client_string(const struct nfs_client *clp,
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_PROTO));
rcu_read_unlock();
+ clp->cl_owner_id = kstrdup(buf, GFP_KERNEL);
return result;
}
static unsigned int
-nfs4_init_uniform_client_string(const struct nfs_client *clp,
+nfs4_init_uniform_client_string(struct nfs_client *clp,
char *buf, size_t len)
{
const char *nodename = clp->cl_rpcclient->cl_nodename;
+ unsigned int result;
+
+ if (clp->cl_owner_id != NULL)
+ return strlcpy(buf, clp->cl_owner_id, len);
if (nfs4_client_id_uniquifier[0] != '\0')
- return scnprintf(buf, len, "Linux NFSv%u.%u %s/%s",
+ result = scnprintf(buf, len, "Linux NFSv%u.%u %s/%s",
clp->rpc_ops->version,
clp->cl_minorversion,
nfs4_client_id_uniquifier,
nodename);
- return scnprintf(buf, len, "Linux NFSv%u.%u %s",
+ else
+ result = scnprintf(buf, len, "Linux NFSv%u.%u %s",
clp->rpc_ops->version, clp->cl_minorversion,
nodename);
+ clp->cl_owner_id = kstrdup(buf, GFP_KERNEL);
+ return result;
}
/*
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 3550a9c87616..c06a1ba80d73 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -3897,11 +3897,11 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
status = nfs4_setlease(dp);
goto out;
}
- atomic_inc(&fp->fi_delegees);
if (fp->fi_had_conflict) {
status = -EAGAIN;
goto out_unlock;
}
+ atomic_inc(&fp->fi_delegees);
hash_delegation_locked(dp, fp);
status = 0;
out_unlock:
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
index c991616acca9..bff8567aa42d 100644
--- a/fs/notify/fanotify/fanotify_user.c
+++ b/fs/notify/fanotify/fanotify_user.c
@@ -259,16 +259,15 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
struct fsnotify_event *kevent;
char __user *start;
int ret;
- DEFINE_WAIT(wait);
+ DEFINE_WAIT_FUNC(wait, woken_wake_function);
start = buf;
group = file->private_data;
pr_debug("%s: group=%p\n", __func__, group);
+ add_wait_queue(&group->notification_waitq, &wait);
while (1) {
- prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE);
-
mutex_lock(&group->notification_mutex);
kevent = get_one_event(group, count);
mutex_unlock(&group->notification_mutex);
@@ -289,7 +288,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
if (start != buf)
break;
- schedule();
+
+ wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
continue;
}
@@ -318,8 +318,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
buf += ret;
count -= ret;
}
+ remove_wait_queue(&group->notification_waitq, &wait);
- finish_wait(&group->notification_waitq, &wait);
if (start != buf && ret != -EFAULT)
ret = buf - start;
return ret;
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 79b5af5e6a7b..cecd875653e4 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -2023,11 +2023,8 @@ leave:
dlm_lockres_drop_inflight_ref(dlm, res);
spin_unlock(&res->spinlock);
- if (ret < 0) {
+ if (ret < 0)
mlog_errno(ret);
- if (newlock)
- dlm_lock_put(newlock);
- }
return ret;
}
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index b931e04e3388..914c121ec890 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -94,6 +94,14 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb,
struct inode *inode,
const char *symname);
+static int ocfs2_double_lock(struct ocfs2_super *osb,
+ struct buffer_head **bh1,
+ struct inode *inode1,
+ struct buffer_head **bh2,
+ struct inode *inode2,
+ int rename);
+
+static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2);
/* An orphan dir name is an 8 byte value, printed as a hex string */
#define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64)))
@@ -678,8 +686,10 @@ static int ocfs2_link(struct dentry *old_dentry,
{
handle_t *handle;
struct inode *inode = old_dentry->d_inode;
+ struct inode *old_dir = old_dentry->d_parent->d_inode;
int err;
struct buffer_head *fe_bh = NULL;
+ struct buffer_head *old_dir_bh = NULL;
struct buffer_head *parent_fe_bh = NULL;
struct ocfs2_dinode *fe = NULL;
struct ocfs2_super *osb = OCFS2_SB(dir->i_sb);
@@ -696,19 +706,33 @@ static int ocfs2_link(struct dentry *old_dentry,
dquot_initialize(dir);
- err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT);
+ err = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
+ &parent_fe_bh, dir, 0);
if (err < 0) {
if (err != -ENOENT)
mlog_errno(err);
return err;
}
+ /* make sure both dirs have bhs
+ * get an extra ref on old_dir_bh if old==new */
+ if (!parent_fe_bh) {
+ if (old_dir_bh) {
+ parent_fe_bh = old_dir_bh;
+ get_bh(parent_fe_bh);
+ } else {
+ mlog(ML_ERROR, "%s: no old_dir_bh!\n", osb->uuid_str);
+ err = -EIO;
+ goto out;
+ }
+ }
+
if (!dir->i_nlink) {
err = -ENOENT;
goto out;
}
- err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name,
+ err = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name,
old_dentry->d_name.len, &old_de_ino);
if (err) {
err = -ENOENT;
@@ -801,10 +825,11 @@ out_unlock_inode:
ocfs2_inode_unlock(inode, 1);
out:
- ocfs2_inode_unlock(dir, 1);
+ ocfs2_double_unlock(old_dir, dir);
brelse(fe_bh);
brelse(parent_fe_bh);
+ brelse(old_dir_bh);
ocfs2_free_dir_lookup_result(&lookup);
@@ -1072,14 +1097,15 @@ static int ocfs2_check_if_ancestor(struct ocfs2_super *osb,
}
/*
- * The only place this should be used is rename!
+ * The only place this should be used is rename and link!
* if they have the same id, then the 1st one is the only one locked.
*/
static int ocfs2_double_lock(struct ocfs2_super *osb,
struct buffer_head **bh1,
struct inode *inode1,
struct buffer_head **bh2,
- struct inode *inode2)
+ struct inode *inode2,
+ int rename)
{
int status;
int inode1_is_ancestor, inode2_is_ancestor;
@@ -1127,7 +1153,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
}
/* lock id2 */
status = ocfs2_inode_lock_nested(inode2, bh2, 1,
- OI_LS_RENAME1);
+ rename == 1 ? OI_LS_RENAME1 : OI_LS_PARENT);
if (status < 0) {
if (status != -ENOENT)
mlog_errno(status);
@@ -1136,7 +1162,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
}
/* lock id1 */
- status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2);
+ status = ocfs2_inode_lock_nested(inode1, bh1, 1,
+ rename == 1 ? OI_LS_RENAME2 : OI_LS_PARENT);
if (status < 0) {
/*
* An error return must mean that no cluster locks
@@ -1252,7 +1279,7 @@ static int ocfs2_rename(struct inode *old_dir,
/* if old and new are the same, this'll just do one lock. */
status = ocfs2_double_lock(osb, &old_dir_bh, old_dir,
- &new_dir_bh, new_dir);
+ &new_dir_bh, new_dir, 1);
if (status < 0) {
mlog_errno(status);
goto bail;
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 8f0acef3d184..69df5b239844 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -2396,30 +2396,25 @@ static inline qsize_t stoqb(qsize_t space)
}
/* Generic routine for getting common part of quota structure */
-static void do_get_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
+static void do_get_dqblk(struct dquot *dquot, struct qc_dqblk *di)
{
struct mem_dqblk *dm = &dquot->dq_dqb;
memset(di, 0, sizeof(*di));
- di->d_version = FS_DQUOT_VERSION;
- di->d_flags = dquot->dq_id.type == USRQUOTA ?
- FS_USER_QUOTA : FS_GROUP_QUOTA;
- di->d_id = from_kqid_munged(current_user_ns(), dquot->dq_id);
-
spin_lock(&dq_data_lock);
- di->d_blk_hardlimit = stoqb(dm->dqb_bhardlimit);
- di->d_blk_softlimit = stoqb(dm->dqb_bsoftlimit);
+ di->d_spc_hardlimit = dm->dqb_bhardlimit;
+ di->d_spc_softlimit = dm->dqb_bsoftlimit;
di->d_ino_hardlimit = dm->dqb_ihardlimit;
di->d_ino_softlimit = dm->dqb_isoftlimit;
- di->d_bcount = dm->dqb_curspace + dm->dqb_rsvspace;
- di->d_icount = dm->dqb_curinodes;
- di->d_btimer = dm->dqb_btime;
- di->d_itimer = dm->dqb_itime;
+ di->d_space = dm->dqb_curspace + dm->dqb_rsvspace;
+ di->d_ino_count = dm->dqb_curinodes;
+ di->d_spc_timer = dm->dqb_btime;
+ di->d_ino_timer = dm->dqb_itime;
spin_unlock(&dq_data_lock);
}
int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
- struct fs_disk_quota *di)
+ struct qc_dqblk *di)
{
struct dquot *dquot;
@@ -2433,70 +2428,70 @@ int dquot_get_dqblk(struct super_block *sb, struct kqid qid,
}
EXPORT_SYMBOL(dquot_get_dqblk);
-#define VFS_FS_DQ_MASK \
- (FS_DQ_BCOUNT | FS_DQ_BSOFT | FS_DQ_BHARD | \
- FS_DQ_ICOUNT | FS_DQ_ISOFT | FS_DQ_IHARD | \
- FS_DQ_BTIMER | FS_DQ_ITIMER)
+#define VFS_QC_MASK \
+ (QC_SPACE | QC_SPC_SOFT | QC_SPC_HARD | \
+ QC_INO_COUNT | QC_INO_SOFT | QC_INO_HARD | \
+ QC_SPC_TIMER | QC_INO_TIMER)
/* Generic routine for setting common part of quota structure */
-static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
+static int do_set_dqblk(struct dquot *dquot, struct qc_dqblk *di)
{
struct mem_dqblk *dm = &dquot->dq_dqb;
int check_blim = 0, check_ilim = 0;
struct mem_dqinfo *dqi = &sb_dqopt(dquot->dq_sb)->info[dquot->dq_id.type];
- if (di->d_fieldmask & ~VFS_FS_DQ_MASK)
+ if (di->d_fieldmask & ~VFS_QC_MASK)
return -EINVAL;
- if (((di->d_fieldmask & FS_DQ_BSOFT) &&
- (di->d_blk_softlimit > dqi->dqi_maxblimit)) ||
- ((di->d_fieldmask & FS_DQ_BHARD) &&
- (di->d_blk_hardlimit > dqi->dqi_maxblimit)) ||
- ((di->d_fieldmask & FS_DQ_ISOFT) &&
+ if (((di->d_fieldmask & QC_SPC_SOFT) &&
+ stoqb(di->d_spc_softlimit) > dqi->dqi_maxblimit) ||
+ ((di->d_fieldmask & QC_SPC_HARD) &&
+ stoqb(di->d_spc_hardlimit) > dqi->dqi_maxblimit) ||
+ ((di->d_fieldmask & QC_INO_SOFT) &&
(di->d_ino_softlimit > dqi->dqi_maxilimit)) ||
- ((di->d_fieldmask & FS_DQ_IHARD) &&
+ ((di->d_fieldmask & QC_INO_HARD) &&
(di->d_ino_hardlimit > dqi->dqi_maxilimit)))
return -ERANGE;
spin_lock(&dq_data_lock);
- if (di->d_fieldmask & FS_DQ_BCOUNT) {
- dm->dqb_curspace = di->d_bcount - dm->dqb_rsvspace;
+ if (di->d_fieldmask & QC_SPACE) {
+ dm->dqb_curspace = di->d_space - dm->dqb_rsvspace;
check_blim = 1;
set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
}
- if (di->d_fieldmask & FS_DQ_BSOFT)
- dm->dqb_bsoftlimit = qbtos(di->d_blk_softlimit);
- if (di->d_fieldmask & FS_DQ_BHARD)
- dm->dqb_bhardlimit = qbtos(di->d_blk_hardlimit);
- if (di->d_fieldmask & (FS_DQ_BSOFT | FS_DQ_BHARD)) {
+ if (di->d_fieldmask & QC_SPC_SOFT)
+ dm->dqb_bsoftlimit = di->d_spc_softlimit;
+ if (di->d_fieldmask & QC_SPC_HARD)
+ dm->dqb_bhardlimit = di->d_spc_hardlimit;
+ if (di->d_fieldmask & (QC_SPC_SOFT | QC_SPC_HARD)) {
check_blim = 1;
set_bit(DQ_LASTSET_B + QIF_BLIMITS_B, &dquot->dq_flags);
}
- if (di->d_fieldmask & FS_DQ_ICOUNT) {
- dm->dqb_curinodes = di->d_icount;
+ if (di->d_fieldmask & QC_INO_COUNT) {
+ dm->dqb_curinodes = di->d_ino_count;
check_ilim = 1;
set_bit(DQ_LASTSET_B + QIF_INODES_B, &dquot->dq_flags);
}
- if (di->d_fieldmask & FS_DQ_ISOFT)
+ if (di->d_fieldmask & QC_INO_SOFT)
dm->dqb_isoftlimit = di->d_ino_softlimit;
- if (di->d_fieldmask & FS_DQ_IHARD)
+ if (di->d_fieldmask & QC_INO_HARD)
dm->dqb_ihardlimit = di->d_ino_hardlimit;
- if (di->d_fieldmask & (FS_DQ_ISOFT | FS_DQ_IHARD)) {
+ if (di->d_fieldmask & (QC_INO_SOFT | QC_INO_HARD)) {
check_ilim = 1;
set_bit(DQ_LASTSET_B + QIF_ILIMITS_B, &dquot->dq_flags);
}
- if (di->d_fieldmask & FS_DQ_BTIMER) {
- dm->dqb_btime = di->d_btimer;
+ if (di->d_fieldmask & QC_SPC_TIMER) {
+ dm->dqb_btime = di->d_spc_timer;
check_blim = 1;
set_bit(DQ_LASTSET_B + QIF_BTIME_B, &dquot->dq_flags);
}
- if (di->d_fieldmask & FS_DQ_ITIMER) {
- dm->dqb_itime = di->d_itimer;
+ if (di->d_fieldmask & QC_INO_TIMER) {
+ dm->dqb_itime = di->d_ino_timer;
check_ilim = 1;
set_bit(DQ_LASTSET_B + QIF_ITIME_B, &dquot->dq_flags);
}
@@ -2506,7 +2501,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
dm->dqb_curspace < dm->dqb_bsoftlimit) {
dm->dqb_btime = 0;
clear_bit(DQ_BLKS_B, &dquot->dq_flags);
- } else if (!(di->d_fieldmask & FS_DQ_BTIMER))
+ } else if (!(di->d_fieldmask & QC_SPC_TIMER))
/* Set grace only if user hasn't provided his own... */
dm->dqb_btime = get_seconds() + dqi->dqi_bgrace;
}
@@ -2515,7 +2510,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
dm->dqb_curinodes < dm->dqb_isoftlimit) {
dm->dqb_itime = 0;
clear_bit(DQ_INODES_B, &dquot->dq_flags);
- } else if (!(di->d_fieldmask & FS_DQ_ITIMER))
+ } else if (!(di->d_fieldmask & QC_INO_TIMER))
/* Set grace only if user hasn't provided his own... */
dm->dqb_itime = get_seconds() + dqi->dqi_igrace;
}
@@ -2531,7 +2526,7 @@ static int do_set_dqblk(struct dquot *dquot, struct fs_disk_quota *di)
}
int dquot_set_dqblk(struct super_block *sb, struct kqid qid,
- struct fs_disk_quota *di)
+ struct qc_dqblk *di)
{
struct dquot *dquot;
int rc;
diff --git a/fs/quota/quota.c b/fs/quota/quota.c
index 2aa4151f99d2..6f3856328eea 100644
--- a/fs/quota/quota.c
+++ b/fs/quota/quota.c
@@ -118,17 +118,27 @@ static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
return sb->s_qcop->set_info(sb, type, &info);
}
-static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
+static inline qsize_t qbtos(qsize_t blocks)
+{
+ return blocks << QIF_DQBLKSIZE_BITS;
+}
+
+static inline qsize_t stoqb(qsize_t space)
+{
+ return (space + QIF_DQBLKSIZE - 1) >> QIF_DQBLKSIZE_BITS;
+}
+
+static void copy_to_if_dqblk(struct if_dqblk *dst, struct qc_dqblk *src)
{
memset(dst, 0, sizeof(*dst));
- dst->dqb_bhardlimit = src->d_blk_hardlimit;
- dst->dqb_bsoftlimit = src->d_blk_softlimit;
- dst->dqb_curspace = src->d_bcount;
+ dst->dqb_bhardlimit = stoqb(src->d_spc_hardlimit);
+ dst->dqb_bsoftlimit = stoqb(src->d_spc_softlimit);
+ dst->dqb_curspace = src->d_space;
dst->dqb_ihardlimit = src->d_ino_hardlimit;
dst->dqb_isoftlimit = src->d_ino_softlimit;
- dst->dqb_curinodes = src->d_icount;
- dst->dqb_btime = src->d_btimer;
- dst->dqb_itime = src->d_itimer;
+ dst->dqb_curinodes = src->d_ino_count;
+ dst->dqb_btime = src->d_spc_timer;
+ dst->dqb_itime = src->d_ino_timer;
dst->dqb_valid = QIF_ALL;
}
@@ -136,7 +146,7 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct kqid qid;
- struct fs_disk_quota fdq;
+ struct qc_dqblk fdq;
struct if_dqblk idq;
int ret;
@@ -154,36 +164,36 @@ static int quota_getquota(struct super_block *sb, int type, qid_t id,
return 0;
}
-static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src)
+static void copy_from_if_dqblk(struct qc_dqblk *dst, struct if_dqblk *src)
{
- dst->d_blk_hardlimit = src->dqb_bhardlimit;
- dst->d_blk_softlimit = src->dqb_bsoftlimit;
- dst->d_bcount = src->dqb_curspace;
+ dst->d_spc_hardlimit = qbtos(src->dqb_bhardlimit);
+ dst->d_spc_softlimit = qbtos(src->dqb_bsoftlimit);
+ dst->d_space = src->dqb_curspace;
dst->d_ino_hardlimit = src->dqb_ihardlimit;
dst->d_ino_softlimit = src->dqb_isoftlimit;
- dst->d_icount = src->dqb_curinodes;
- dst->d_btimer = src->dqb_btime;
- dst->d_itimer = src->dqb_itime;
+ dst->d_ino_count = src->dqb_curinodes;
+ dst->d_spc_timer = src->dqb_btime;
+ dst->d_ino_timer = src->dqb_itime;
dst->d_fieldmask = 0;
if (src->dqb_valid & QIF_BLIMITS)
- dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD;
+ dst->d_fieldmask |= QC_SPC_SOFT | QC_SPC_HARD;
if (src->dqb_valid & QIF_SPACE)
- dst->d_fieldmask |= FS_DQ_BCOUNT;
+ dst->d_fieldmask |= QC_SPACE;
if (src->dqb_valid & QIF_ILIMITS)
- dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD;
+ dst->d_fieldmask |= QC_INO_SOFT | QC_INO_HARD;
if (src->dqb_valid & QIF_INODES)
- dst->d_fieldmask |= FS_DQ_ICOUNT;
+ dst->d_fieldmask |= QC_INO_COUNT;
if (src->dqb_valid & QIF_BTIME)
- dst->d_fieldmask |= FS_DQ_BTIMER;
+ dst->d_fieldmask |= QC_SPC_TIMER;
if (src->dqb_valid & QIF_ITIME)
- dst->d_fieldmask |= FS_DQ_ITIMER;
+ dst->d_fieldmask |= QC_INO_TIMER;
}
static int quota_setquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
- struct fs_disk_quota fdq;
+ struct qc_dqblk fdq;
struct if_dqblk idq;
struct kqid qid;
@@ -247,10 +257,78 @@ static int quota_getxstatev(struct super_block *sb, void __user *addr)
return ret;
}
+/*
+ * XFS defines BBTOB and BTOBB macros inside fs/xfs/ and we cannot move them
+ * out of there as xfsprogs rely on definitions being in that header file. So
+ * just define same functions here for quota purposes.
+ */
+#define XFS_BB_SHIFT 9
+
+static inline u64 quota_bbtob(u64 blocks)
+{
+ return blocks << XFS_BB_SHIFT;
+}
+
+static inline u64 quota_btobb(u64 bytes)
+{
+ return (bytes + (1 << XFS_BB_SHIFT) - 1) >> XFS_BB_SHIFT;
+}
+
+static void copy_from_xfs_dqblk(struct qc_dqblk *dst, struct fs_disk_quota *src)
+{
+ dst->d_spc_hardlimit = quota_bbtob(src->d_blk_hardlimit);
+ dst->d_spc_softlimit = quota_bbtob(src->d_blk_softlimit);
+ dst->d_ino_hardlimit = src->d_ino_hardlimit;
+ dst->d_ino_softlimit = src->d_ino_softlimit;
+ dst->d_space = quota_bbtob(src->d_bcount);
+ dst->d_ino_count = src->d_icount;
+ dst->d_ino_timer = src->d_itimer;
+ dst->d_spc_timer = src->d_btimer;
+ dst->d_ino_warns = src->d_iwarns;
+ dst->d_spc_warns = src->d_bwarns;
+ dst->d_rt_spc_hardlimit = quota_bbtob(src->d_rtb_hardlimit);
+ dst->d_rt_spc_softlimit = quota_bbtob(src->d_rtb_softlimit);
+ dst->d_rt_space = quota_bbtob(src->d_rtbcount);
+ dst->d_rt_spc_timer = src->d_rtbtimer;
+ dst->d_rt_spc_warns = src->d_rtbwarns;
+ dst->d_fieldmask = 0;
+ if (src->d_fieldmask & FS_DQ_ISOFT)
+ dst->d_fieldmask |= QC_INO_SOFT;
+ if (src->d_fieldmask & FS_DQ_IHARD)
+ dst->d_fieldmask |= QC_INO_HARD;
+ if (src->d_fieldmask & FS_DQ_BSOFT)
+ dst->d_fieldmask |= QC_SPC_SOFT;
+ if (src->d_fieldmask & FS_DQ_BHARD)
+ dst->d_fieldmask |= QC_SPC_HARD;
+ if (src->d_fieldmask & FS_DQ_RTBSOFT)
+ dst->d_fieldmask |= QC_RT_SPC_SOFT;
+ if (src->d_fieldmask & FS_DQ_RTBHARD)
+ dst->d_fieldmask |= QC_RT_SPC_HARD;
+ if (src->d_fieldmask & FS_DQ_BTIMER)
+ dst->d_fieldmask |= QC_SPC_TIMER;
+ if (src->d_fieldmask & FS_DQ_ITIMER)
+ dst->d_fieldmask |= QC_INO_TIMER;
+ if (src->d_fieldmask & FS_DQ_RTBTIMER)
+ dst->d_fieldmask |= QC_RT_SPC_TIMER;
+ if (src->d_fieldmask & FS_DQ_BWARNS)
+ dst->d_fieldmask |= QC_SPC_WARNS;
+ if (src->d_fieldmask & FS_DQ_IWARNS)
+ dst->d_fieldmask |= QC_INO_WARNS;
+ if (src->d_fieldmask & FS_DQ_RTBWARNS)
+ dst->d_fieldmask |= QC_RT_SPC_WARNS;
+ if (src->d_fieldmask & FS_DQ_BCOUNT)
+ dst->d_fieldmask |= QC_SPACE;
+ if (src->d_fieldmask & FS_DQ_ICOUNT)
+ dst->d_fieldmask |= QC_INO_COUNT;
+ if (src->d_fieldmask & FS_DQ_RTBCOUNT)
+ dst->d_fieldmask |= QC_RT_SPACE;
+}
+
static int quota_setxquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
+ struct qc_dqblk qdq;
struct kqid qid;
if (copy_from_user(&fdq, addr, sizeof(fdq)))
@@ -260,13 +338,44 @@ static int quota_setxquota(struct super_block *sb, int type, qid_t id,
qid = make_kqid(current_user_ns(), type, id);
if (!qid_valid(qid))
return -EINVAL;
- return sb->s_qcop->set_dqblk(sb, qid, &fdq);
+ copy_from_xfs_dqblk(&qdq, &fdq);
+ return sb->s_qcop->set_dqblk(sb, qid, &qdq);
+}
+
+static void copy_to_xfs_dqblk(struct fs_disk_quota *dst, struct qc_dqblk *src,
+ int type, qid_t id)
+{
+ memset(dst, 0, sizeof(*dst));
+ dst->d_version = FS_DQUOT_VERSION;
+ dst->d_id = id;
+ if (type == USRQUOTA)
+ dst->d_flags = FS_USER_QUOTA;
+ else if (type == PRJQUOTA)
+ dst->d_flags = FS_PROJ_QUOTA;
+ else
+ dst->d_flags = FS_GROUP_QUOTA;
+ dst->d_blk_hardlimit = quota_btobb(src->d_spc_hardlimit);
+ dst->d_blk_softlimit = quota_btobb(src->d_spc_softlimit);
+ dst->d_ino_hardlimit = src->d_ino_hardlimit;
+ dst->d_ino_softlimit = src->d_ino_softlimit;
+ dst->d_bcount = quota_btobb(src->d_space);
+ dst->d_icount = src->d_ino_count;
+ dst->d_itimer = src->d_ino_timer;
+ dst->d_btimer = src->d_spc_timer;
+ dst->d_iwarns = src->d_ino_warns;
+ dst->d_bwarns = src->d_spc_warns;
+ dst->d_rtb_hardlimit = quota_btobb(src->d_rt_spc_hardlimit);
+ dst->d_rtb_softlimit = quota_btobb(src->d_rt_spc_softlimit);
+ dst->d_rtbcount = quota_btobb(src->d_rt_space);
+ dst->d_rtbtimer = src->d_rt_spc_timer;
+ dst->d_rtbwarns = src->d_rt_spc_warns;
}
static int quota_getxquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
+ struct qc_dqblk qdq;
struct kqid qid;
int ret;
@@ -275,8 +384,11 @@ static int quota_getxquota(struct super_block *sb, int type, qid_t id,
qid = make_kqid(current_user_ns(), type, id);
if (!qid_valid(qid))
return -EINVAL;
- ret = sb->s_qcop->get_dqblk(sb, qid, &fdq);
- if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
+ ret = sb->s_qcop->get_dqblk(sb, qid, &qdq);
+ if (ret)
+ return ret;
+ copy_to_xfs_dqblk(&fdq, &qdq, type, id);
+ if (copy_to_user(addr, &fdq, sizeof(fdq)))
return -EFAULT;
return ret;
}
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index a012c51caffd..05e90edd1992 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -57,6 +57,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
sector_t offset;
int i, num, ret = 0;
struct extent_position epos = { NULL, 0, {0, 0} };
+ struct super_block *sb = dir->i_sb;
if (ctx->pos == 0) {
if (!dir_emit_dot(file, ctx))
@@ -76,16 +77,16 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
if (nf_pos == 0)
nf_pos = udf_ext0_offset(dir);
- fibh.soffset = fibh.eoffset = nf_pos & (dir->i_sb->s_blocksize - 1);
+ fibh.soffset = fibh.eoffset = nf_pos & (sb->s_blocksize - 1);
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
- if (inode_bmap(dir, nf_pos >> dir->i_sb->s_blocksize_bits,
+ if (inode_bmap(dir, nf_pos >> sb->s_blocksize_bits,
&epos, &eloc, &elen, &offset)
!= (EXT_RECORDED_ALLOCATED >> 30)) {
ret = -ENOENT;
goto out;
}
- block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
- if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
+ block = udf_get_lb_pblock(sb, &eloc, offset);
+ if ((++offset << sb->s_blocksize_bits) < elen) {
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (iinfo->i_alloc_type ==
@@ -95,18 +96,18 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
offset = 0;
}
- if (!(fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block))) {
+ if (!(fibh.sbh = fibh.ebh = udf_tread(sb, block))) {
ret = -EIO;
goto out;
}
- if (!(offset & ((16 >> (dir->i_sb->s_blocksize_bits - 9)) - 1))) {
- i = 16 >> (dir->i_sb->s_blocksize_bits - 9);
- if (i + offset > (elen >> dir->i_sb->s_blocksize_bits))
- i = (elen >> dir->i_sb->s_blocksize_bits) - offset;
+ if (!(offset & ((16 >> (sb->s_blocksize_bits - 9)) - 1))) {
+ i = 16 >> (sb->s_blocksize_bits - 9);
+ if (i + offset > (elen >> sb->s_blocksize_bits))
+ i = (elen >> sb->s_blocksize_bits) - offset;
for (num = 0; i > 0; i--) {
- block = udf_get_lb_pblock(dir->i_sb, &eloc, offset + i);
- tmp = udf_tgetblk(dir->i_sb, block);
+ block = udf_get_lb_pblock(sb, &eloc, offset + i);
+ tmp = udf_tgetblk(sb, block);
if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
bha[num++] = tmp;
else
@@ -152,12 +153,12 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
}
if ((cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
- if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE))
+ if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
continue;
}
if ((cfi.fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
- if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE))
+ if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
continue;
}
@@ -167,12 +168,12 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
continue;
}
- flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
+ flen = udf_get_filename(sb, nameptr, lfi, fname, UDF_NAME_LEN);
if (!flen)
continue;
tloc = lelb_to_cpu(cfi.icb.extLocation);
- iblock = udf_get_lb_pblock(dir->i_sb, &tloc, 0);
+ iblock = udf_get_lb_pblock(sb, &tloc, 0);
if (!dir_emit(ctx, fname, flen, iblock, DT_UNKNOWN))
goto out;
} /* end while */
diff --git a/fs/udf/file.c b/fs/udf/file.c
index bb15771b92ae..08f3555fbeac 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -224,7 +224,7 @@ out:
static int udf_release_file(struct inode *inode, struct file *filp)
{
if (filp->f_mode & FMODE_WRITE &&
- atomic_read(&inode->i_writecount) > 1) {
+ atomic_read(&inode->i_writecount) == 1) {
/*
* Grab i_mutex to avoid races with writes changing i_size
* while we are running.
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index c9b4df5810d5..5bc71d9a674a 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1489,6 +1489,20 @@ reread:
}
inode->i_generation = iinfo->i_unique;
+ /* Sanity checks for files in ICB so that we don't get confused later */
+ if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
+ /*
+ * For file in ICB data is stored in allocation descriptor
+ * so sizes should match
+ */
+ if (iinfo->i_lenAlloc != inode->i_size)
+ goto out;
+ /* File in ICB has to fit in there... */
+ if (inode->i_size > inode->i_sb->s_blocksize -
+ udf_file_entry_alloc_offset(inode))
+ goto out;
+ }
+
switch (fe->icbTag.fileType) {
case ICBTAG_FILE_TYPE_DIRECTORY:
inode->i_op = &udf_dir_inode_operations;
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index c12e260fd6c4..33b246b82c98 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -159,18 +159,19 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
struct udf_inode_info *dinfo = UDF_I(dir);
int isdotdot = child->len == 2 &&
child->name[0] == '.' && child->name[1] == '.';
+ struct super_block *sb = dir->i_sb;
size = udf_ext0_offset(dir) + dir->i_size;
f_pos = udf_ext0_offset(dir);
fibh->sbh = fibh->ebh = NULL;
- fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1);
+ fibh->soffset = fibh->eoffset = f_pos & (sb->s_blocksize - 1);
if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
- if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos,
+ if (inode_bmap(dir, f_pos >> sb->s_blocksize_bits, &epos,
&eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30))
goto out_err;
- block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
- if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
+ block = udf_get_lb_pblock(sb, &eloc, offset);
+ if ((++offset << sb->s_blocksize_bits) < elen) {
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
epos.offset -= sizeof(struct short_ad);
else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
@@ -178,7 +179,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
} else
offset = 0;
- fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block);
+ fibh->sbh = fibh->ebh = udf_tread(sb, block);
if (!fibh->sbh)
goto out_err;
}
@@ -217,12 +218,12 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
}
if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
- if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE))
+ if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
continue;
}
if ((cfi->fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) {
- if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE))
+ if (!UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
continue;
}
@@ -233,7 +234,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
if (!lfi)
continue;
- flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
+ flen = udf_get_filename(sb, nameptr, lfi, fname, UDF_NAME_LEN);
if (flen && udf_match(flen, fname, child->len, child->name))
goto out_ok;
}
diff --git a/fs/udf/symlink.c b/fs/udf/symlink.c
index 6fb7945c1e6e..ac10ca939f26 100644
--- a/fs/udf/symlink.c
+++ b/fs/udf/symlink.c
@@ -30,49 +30,73 @@
#include <linux/buffer_head.h>
#include "udf_i.h"
-static void udf_pc_to_char(struct super_block *sb, unsigned char *from,
- int fromlen, unsigned char *to)
+static int udf_pc_to_char(struct super_block *sb, unsigned char *from,
+ int fromlen, unsigned char *to, int tolen)
{
struct pathComponent *pc;
int elen = 0;
+ int comp_len;
unsigned char *p = to;
+ /* Reserve one byte for terminating \0 */
+ tolen--;
while (elen < fromlen) {
pc = (struct pathComponent *)(from + elen);
+ elen += sizeof(struct pathComponent);
switch (pc->componentType) {
case 1:
/*
* Symlink points to some place which should be agreed
* upon between originator and receiver of the media. Ignore.
*/
- if (pc->lengthComponentIdent > 0)
+ if (pc->lengthComponentIdent > 0) {
+ elen += pc->lengthComponentIdent;
break;
+ }
/* Fall through */
case 2:
+ if (tolen == 0)
+ return -ENAMETOOLONG;
p = to;
*p++ = '/';
+ tolen--;
break;
case 3:
+ if (tolen < 3)
+ return -ENAMETOOLONG;
memcpy(p, "../", 3);
p += 3;
+ tolen -= 3;
break;
case 4:
+ if (tolen < 2)
+ return -ENAMETOOLONG;
memcpy(p, "./", 2);
p += 2;
+ tolen -= 2;
/* that would be . - just ignore */
break;
case 5:
- p += udf_get_filename(sb, pc->componentIdent, p,
- pc->lengthComponentIdent);
+ elen += pc->lengthComponentIdent;
+ if (elen > fromlen)
+ return -EIO;
+ comp_len = udf_get_filename(sb, pc->componentIdent,
+ pc->lengthComponentIdent,
+ p, tolen);
+ p += comp_len;
+ tolen -= comp_len;
+ if (tolen == 0)
+ return -ENAMETOOLONG;
*p++ = '/';
+ tolen--;
break;
}
- elen += sizeof(struct pathComponent) + pc->lengthComponentIdent;
}
if (p > to + 1)
p[-1] = '\0';
else
p[0] = '\0';
+ return 0;
}
static int udf_symlink_filler(struct file *file, struct page *page)
@@ -80,11 +104,17 @@ static int udf_symlink_filler(struct file *file, struct page *page)
struct inode *inode = page->mapping->host;
struct buffer_head *bh = NULL;
unsigned char *symlink;
- int err = -EIO;
+ int err;
unsigned char *p = kmap(page);
struct udf_inode_info *iinfo;
uint32_t pos;
+ /* We don't support symlinks longer than one block */
+ if (inode->i_size > inode->i_sb->s_blocksize) {
+ err = -ENAMETOOLONG;
+ goto out_unmap;
+ }
+
iinfo = UDF_I(inode);
pos = udf_block_map(inode, 0);
@@ -94,14 +124,18 @@ static int udf_symlink_filler(struct file *file, struct page *page)
} else {
bh = sb_bread(inode->i_sb, pos);
- if (!bh)
- goto out;
+ if (!bh) {
+ err = -EIO;
+ goto out_unlock_inode;
+ }
symlink = bh->b_data;
}
- udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p);
+ err = udf_pc_to_char(inode->i_sb, symlink, inode->i_size, p, PAGE_SIZE);
brelse(bh);
+ if (err)
+ goto out_unlock_inode;
up_read(&iinfo->i_data_sem);
SetPageUptodate(page);
@@ -109,9 +143,10 @@ static int udf_symlink_filler(struct file *file, struct page *page)
unlock_page(page);
return 0;
-out:
+out_unlock_inode:
up_read(&iinfo->i_data_sem);
SetPageError(page);
+out_unmap:
kunmap(page);
unlock_page(page);
return err;
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 1cc3c993ebd0..47bb3f5ca360 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -211,7 +211,8 @@ udf_get_lb_pblock(struct super_block *sb, struct kernel_lb_addr *loc,
}
/* unicode.c */
-extern int udf_get_filename(struct super_block *, uint8_t *, uint8_t *, int);
+extern int udf_get_filename(struct super_block *, uint8_t *, int, uint8_t *,
+ int);
extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *,
int);
extern int udf_build_ustr(struct ustr *, dstring *, int);
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index afd470e588ff..b84fee372734 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -28,7 +28,8 @@
#include "udf_sb.h"
-static int udf_translate_to_linux(uint8_t *, uint8_t *, int, uint8_t *, int);
+static int udf_translate_to_linux(uint8_t *, int, uint8_t *, int, uint8_t *,
+ int);
static int udf_char_to_ustr(struct ustr *dest, const uint8_t *src, int strlen)
{
@@ -333,8 +334,8 @@ try_again:
return u_len + 1;
}
-int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
- int flen)
+int udf_get_filename(struct super_block *sb, uint8_t *sname, int slen,
+ uint8_t *dname, int dlen)
{
struct ustr *filename, *unifilename;
int len = 0;
@@ -347,7 +348,7 @@ int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
if (!unifilename)
goto out1;
- if (udf_build_ustr_exact(unifilename, sname, flen))
+ if (udf_build_ustr_exact(unifilename, sname, slen))
goto out2;
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
@@ -366,7 +367,8 @@ int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
} else
goto out2;
- len = udf_translate_to_linux(dname, filename->u_name, filename->u_len,
+ len = udf_translate_to_linux(dname, dlen,
+ filename->u_name, filename->u_len,
unifilename->u_name, unifilename->u_len);
out2:
kfree(unifilename);
@@ -403,10 +405,12 @@ int udf_put_filename(struct super_block *sb, const uint8_t *sname,
#define EXT_MARK '.'
#define CRC_MARK '#'
#define EXT_SIZE 5
+/* Number of chars we need to store generated CRC to make filename unique */
+#define CRC_LEN 5
-static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
- int udfLen, uint8_t *fidName,
- int fidNameLen)
+static int udf_translate_to_linux(uint8_t *newName, int newLen,
+ uint8_t *udfName, int udfLen,
+ uint8_t *fidName, int fidNameLen)
{
int index, newIndex = 0, needsCRC = 0;
int extIndex = 0, newExtIndex = 0, hasExt = 0;
@@ -439,7 +443,7 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
newExtIndex = newIndex;
}
}
- if (newIndex < 256)
+ if (newIndex < newLen)
newName[newIndex++] = curr;
else
needsCRC = 1;
@@ -467,13 +471,13 @@ static int udf_translate_to_linux(uint8_t *newName, uint8_t *udfName,
}
ext[localExtIndex++] = curr;
}
- maxFilenameLen = 250 - localExtIndex;
+ maxFilenameLen = newLen - CRC_LEN - localExtIndex;
if (newIndex > maxFilenameLen)
newIndex = maxFilenameLen;
else
newIndex = newExtIndex;
- } else if (newIndex > 250)
- newIndex = 250;
+ } else if (newIndex > newLen - CRC_LEN)
+ newIndex = newLen - CRC_LEN;
newName[newIndex++] = CRC_MARK;
valueCRC = crc_itu_t(0, fidName, fidNameLen);
newName[newIndex++] = hex_asc_upper_hi(valueCRC >> 8);
diff --git a/fs/xfs/xfs_qm.h b/fs/xfs/xfs_qm.h
index 3a07a937e232..41f6c0b9d51c 100644
--- a/fs/xfs/xfs_qm.h
+++ b/fs/xfs/xfs_qm.h
@@ -166,9 +166,9 @@ extern void xfs_qm_dqrele_all_inodes(struct xfs_mount *, uint);
/* quota ops */
extern int xfs_qm_scall_trunc_qfiles(struct xfs_mount *, uint);
extern int xfs_qm_scall_getquota(struct xfs_mount *, xfs_dqid_t,
- uint, struct fs_disk_quota *);
+ uint, struct qc_dqblk *);
extern int xfs_qm_scall_setqlim(struct xfs_mount *, xfs_dqid_t, uint,
- struct fs_disk_quota *);
+ struct qc_dqblk *);
extern int xfs_qm_scall_getqstat(struct xfs_mount *,
struct fs_quota_stat *);
extern int xfs_qm_scall_getqstatv(struct xfs_mount *,
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 74fca68e43b6..cb6168ec92c9 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -39,7 +39,6 @@ STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint);
STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *,
uint);
STATIC uint xfs_qm_export_flags(uint);
-STATIC uint xfs_qm_export_qtype_flags(uint);
/*
* Turn off quota accounting and/or enforcement for all udquots and/or
@@ -573,8 +572,8 @@ xfs_qm_scall_getqstatv(
return 0;
}
-#define XFS_DQ_MASK \
- (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK)
+#define XFS_QC_MASK \
+ (QC_LIMIT_MASK | QC_TIMER_MASK | QC_WARNS_MASK)
/*
* Adjust quota limits, and start/stop timers accordingly.
@@ -584,7 +583,7 @@ xfs_qm_scall_setqlim(
struct xfs_mount *mp,
xfs_dqid_t id,
uint type,
- fs_disk_quota_t *newlim)
+ struct qc_dqblk *newlim)
{
struct xfs_quotainfo *q = mp->m_quotainfo;
struct xfs_disk_dquot *ddq;
@@ -593,9 +592,9 @@ xfs_qm_scall_setqlim(
int error;
xfs_qcnt_t hard, soft;
- if (newlim->d_fieldmask & ~XFS_DQ_MASK)
+ if (newlim->d_fieldmask & ~XFS_QC_MASK)
return -EINVAL;
- if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0)
+ if ((newlim->d_fieldmask & XFS_QC_MASK) == 0)
return 0;
/*
@@ -633,11 +632,11 @@ xfs_qm_scall_setqlim(
/*
* Make sure that hardlimits are >= soft limits before changing.
*/
- hard = (newlim->d_fieldmask & FS_DQ_BHARD) ?
- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) :
+ hard = (newlim->d_fieldmask & QC_SPC_HARD) ?
+ (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_hardlimit) :
be64_to_cpu(ddq->d_blk_hardlimit);
- soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ?
- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) :
+ soft = (newlim->d_fieldmask & QC_SPC_SOFT) ?
+ (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_spc_softlimit) :
be64_to_cpu(ddq->d_blk_softlimit);
if (hard == 0 || hard >= soft) {
ddq->d_blk_hardlimit = cpu_to_be64(hard);
@@ -650,11 +649,11 @@ xfs_qm_scall_setqlim(
} else {
xfs_debug(mp, "blkhard %Ld < blksoft %Ld", hard, soft);
}
- hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ?
- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) :
+ hard = (newlim->d_fieldmask & QC_RT_SPC_HARD) ?
+ (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_hardlimit) :
be64_to_cpu(ddq->d_rtb_hardlimit);
- soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ?
- (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) :
+ soft = (newlim->d_fieldmask & QC_RT_SPC_SOFT) ?
+ (xfs_qcnt_t) XFS_B_TO_FSB(mp, newlim->d_rt_spc_softlimit) :
be64_to_cpu(ddq->d_rtb_softlimit);
if (hard == 0 || hard >= soft) {
ddq->d_rtb_hardlimit = cpu_to_be64(hard);
@@ -667,10 +666,10 @@ xfs_qm_scall_setqlim(
xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld", hard, soft);
}
- hard = (newlim->d_fieldmask & FS_DQ_IHARD) ?
+ hard = (newlim->d_fieldmask & QC_INO_HARD) ?
(xfs_qcnt_t) newlim->d_ino_hardlimit :
be64_to_cpu(ddq->d_ino_hardlimit);
- soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ?
+ soft = (newlim->d_fieldmask & QC_INO_SOFT) ?
(xfs_qcnt_t) newlim->d_ino_softlimit :
be64_to_cpu(ddq->d_ino_softlimit);
if (hard == 0 || hard >= soft) {
@@ -687,12 +686,12 @@ xfs_qm_scall_setqlim(
/*
* Update warnings counter(s) if requested
*/
- if (newlim->d_fieldmask & FS_DQ_BWARNS)
- ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns);
- if (newlim->d_fieldmask & FS_DQ_IWARNS)
- ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns);
- if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
- ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns);
+ if (newlim->d_fieldmask & QC_SPC_WARNS)
+ ddq->d_bwarns = cpu_to_be16(newlim->d_spc_warns);
+ if (newlim->d_fieldmask & QC_INO_WARNS)
+ ddq->d_iwarns = cpu_to_be16(newlim->d_ino_warns);
+ if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
+ ddq->d_rtbwarns = cpu_to_be16(newlim->d_rt_spc_warns);
if (id == 0) {
/*
@@ -702,24 +701,24 @@ xfs_qm_scall_setqlim(
* soft and hard limit values (already done, above), and
* for warnings.
*/
- if (newlim->d_fieldmask & FS_DQ_BTIMER) {
- q->qi_btimelimit = newlim->d_btimer;
- ddq->d_btimer = cpu_to_be32(newlim->d_btimer);
+ if (newlim->d_fieldmask & QC_SPC_TIMER) {
+ q->qi_btimelimit = newlim->d_spc_timer;
+ ddq->d_btimer = cpu_to_be32(newlim->d_spc_timer);
}
- if (newlim->d_fieldmask & FS_DQ_ITIMER) {
- q->qi_itimelimit = newlim->d_itimer;
- ddq->d_itimer = cpu_to_be32(newlim->d_itimer);
+ if (newlim->d_fieldmask & QC_INO_TIMER) {
+ q->qi_itimelimit = newlim->d_ino_timer;
+ ddq->d_itimer = cpu_to_be32(newlim->d_ino_timer);
}
- if (newlim->d_fieldmask & FS_DQ_RTBTIMER) {
- q->qi_rtbtimelimit = newlim->d_rtbtimer;
- ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer);
+ if (newlim->d_fieldmask & QC_RT_SPC_TIMER) {
+ q->qi_rtbtimelimit = newlim->d_rt_spc_timer;
+ ddq->d_rtbtimer = cpu_to_be32(newlim->d_rt_spc_timer);
}
- if (newlim->d_fieldmask & FS_DQ_BWARNS)
- q->qi_bwarnlimit = newlim->d_bwarns;
- if (newlim->d_fieldmask & FS_DQ_IWARNS)
- q->qi_iwarnlimit = newlim->d_iwarns;
- if (newlim->d_fieldmask & FS_DQ_RTBWARNS)
- q->qi_rtbwarnlimit = newlim->d_rtbwarns;
+ if (newlim->d_fieldmask & QC_SPC_WARNS)
+ q->qi_bwarnlimit = newlim->d_spc_warns;
+ if (newlim->d_fieldmask & QC_INO_WARNS)
+ q->qi_iwarnlimit = newlim->d_ino_warns;
+ if (newlim->d_fieldmask & QC_RT_SPC_WARNS)
+ q->qi_rtbwarnlimit = newlim->d_rt_spc_warns;
} else {
/*
* If the user is now over quota, start the timelimit.
@@ -824,7 +823,7 @@ xfs_qm_scall_getquota(
struct xfs_mount *mp,
xfs_dqid_t id,
uint type,
- struct fs_disk_quota *dst)
+ struct qc_dqblk *dst)
{
struct xfs_dquot *dqp;
int error;
@@ -848,28 +847,25 @@ xfs_qm_scall_getquota(
}
memset(dst, 0, sizeof(*dst));
- dst->d_version = FS_DQUOT_VERSION;
- dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags);
- dst->d_id = be32_to_cpu(dqp->q_core.d_id);
- dst->d_blk_hardlimit =
- XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
- dst->d_blk_softlimit =
- XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
+ dst->d_spc_hardlimit =
+ XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit));
+ dst->d_spc_softlimit =
+ XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit));
dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit);
dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit);
- dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount);
- dst->d_icount = dqp->q_res_icount;
- dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer);
- dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer);
- dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns);
- dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns);
- dst->d_rtb_hardlimit =
- XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
- dst->d_rtb_softlimit =
- XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
- dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount);
- dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer);
- dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns);
+ dst->d_space = XFS_FSB_TO_B(mp, dqp->q_res_bcount);
+ dst->d_ino_count = dqp->q_res_icount;
+ dst->d_spc_timer = be32_to_cpu(dqp->q_core.d_btimer);
+ dst->d_ino_timer = be32_to_cpu(dqp->q_core.d_itimer);
+ dst->d_ino_warns = be16_to_cpu(dqp->q_core.d_iwarns);
+ dst->d_spc_warns = be16_to_cpu(dqp->q_core.d_bwarns);
+ dst->d_rt_spc_hardlimit =
+ XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit));
+ dst->d_rt_spc_softlimit =
+ XFS_FSB_TO_B(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit));
+ dst->d_rt_space = XFS_FSB_TO_B(mp, dqp->q_res_rtbcount);
+ dst->d_rt_spc_timer = be32_to_cpu(dqp->q_core.d_rtbtimer);
+ dst->d_rt_spc_warns = be16_to_cpu(dqp->q_core.d_rtbwarns);
/*
* Internally, we don't reset all the timers when quota enforcement
@@ -882,23 +878,23 @@ xfs_qm_scall_getquota(
dqp->q_core.d_flags == XFS_DQ_GROUP) ||
(!XFS_IS_PQUOTA_ENFORCED(mp) &&
dqp->q_core.d_flags == XFS_DQ_PROJ)) {
- dst->d_btimer = 0;
- dst->d_itimer = 0;
- dst->d_rtbtimer = 0;
+ dst->d_spc_timer = 0;
+ dst->d_ino_timer = 0;
+ dst->d_rt_spc_timer = 0;
}
#ifdef DEBUG
- if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) ||
- (XFS_IS_GQUOTA_ENFORCED(mp) && dst->d_flags == FS_GROUP_QUOTA) ||
- (XFS_IS_PQUOTA_ENFORCED(mp) && dst->d_flags == FS_PROJ_QUOTA)) &&
- dst->d_id != 0) {
- if ((dst->d_bcount > dst->d_blk_softlimit) &&
- (dst->d_blk_softlimit > 0)) {
- ASSERT(dst->d_btimer != 0);
+ if (((XFS_IS_UQUOTA_ENFORCED(mp) && type == XFS_DQ_USER) ||
+ (XFS_IS_GQUOTA_ENFORCED(mp) && type == XFS_DQ_GROUP) ||
+ (XFS_IS_PQUOTA_ENFORCED(mp) && type == XFS_DQ_PROJ)) &&
+ id != 0) {
+ if ((dst->d_space > dst->d_spc_softlimit) &&
+ (dst->d_spc_softlimit > 0)) {
+ ASSERT(dst->d_spc_timer != 0);
}
- if ((dst->d_icount > dst->d_ino_softlimit) &&
+ if ((dst->d_ino_count > dst->d_ino_softlimit) &&
(dst->d_ino_softlimit > 0)) {
- ASSERT(dst->d_itimer != 0);
+ ASSERT(dst->d_ino_timer != 0);
}
}
#endif
@@ -908,26 +904,6 @@ out_put:
}
STATIC uint
-xfs_qm_export_qtype_flags(
- uint flags)
-{
- /*
- * Can't be more than one, or none.
- */
- ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) !=
- (FS_PROJ_QUOTA | FS_USER_QUOTA));
- ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) !=
- (FS_PROJ_QUOTA | FS_GROUP_QUOTA));
- ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) !=
- (FS_USER_QUOTA | FS_GROUP_QUOTA));
- ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0);
-
- return (flags & XFS_DQ_USER) ?
- FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ?
- FS_PROJ_QUOTA : FS_GROUP_QUOTA;
-}
-
-STATIC uint
xfs_qm_export_flags(
uint flags)
{
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index 7542bbeca6a1..801a84c1cdc3 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -131,7 +131,7 @@ STATIC int
xfs_fs_get_dqblk(
struct super_block *sb,
struct kqid qid,
- struct fs_disk_quota *fdq)
+ struct qc_dqblk *qdq)
{
struct xfs_mount *mp = XFS_M(sb);
@@ -141,14 +141,14 @@ xfs_fs_get_dqblk(
return -ESRCH;
return xfs_qm_scall_getquota(mp, from_kqid(&init_user_ns, qid),
- xfs_quota_type(qid.type), fdq);
+ xfs_quota_type(qid.type), qdq);
}
STATIC int
xfs_fs_set_dqblk(
struct super_block *sb,
struct kqid qid,
- struct fs_disk_quota *fdq)
+ struct qc_dqblk *qdq)
{
struct xfs_mount *mp = XFS_M(sb);
@@ -160,7 +160,7 @@ xfs_fs_set_dqblk(
return -ESRCH;
return xfs_qm_scall_setqlim(mp, from_kqid(&init_user_ns, qid),
- xfs_quota_type(qid.type), fdq);
+ xfs_quota_type(qid.type), qdq);
}
const struct quotactl_ops xfs_quotactl_operations = {
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 3ca9b751f122..b95dc32a6e6b 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -196,8 +196,8 @@ struct acpi_processor_flags {
struct acpi_processor {
acpi_handle handle;
u32 acpi_id;
- u32 apic_id;
- u32 id;
+ u32 phys_id; /* CPU hardware ID such as APIC ID for x86 */
+ u32 id; /* CPU logical ID allocated by OS */
u32 pblk;
int performance_platform_limit;
int throttling_platform_limit;
@@ -310,8 +310,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
#endif /* CONFIG_CPU_FREQ */
/* in processor_core.c */
-int acpi_get_apicid(acpi_handle, int type, u32 acpi_id);
-int acpi_map_cpuid(int apic_id, u32 acpi_id);
+int acpi_get_phys_id(acpi_handle, int type, u32 acpi_id);
+int acpi_map_cpuid(int phys_id, u32 acpi_id);
int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id);
/* in processor_pdc.c */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 08848050922e..db284bff29dc 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -136,8 +136,12 @@ static inline void __tlb_adjust_range(struct mmu_gather *tlb,
static inline void __tlb_reset_range(struct mmu_gather *tlb)
{
- tlb->start = TASK_SIZE;
- tlb->end = 0;
+ if (tlb->fullmm) {
+ tlb->start = tlb->end = ~0;
+ } else {
+ tlb->start = TASK_SIZE;
+ tlb->end = 0;
+ }
}
/*
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 8ba35c622e22..e1b2e8b98af7 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -901,11 +901,15 @@ extern int drm_vblank_init(struct drm_device *dev, int num_crtcs);
extern int drm_wait_vblank(struct drm_device *dev, void *data,
struct drm_file *filp);
extern u32 drm_vblank_count(struct drm_device *dev, int crtc);
+extern u32 drm_crtc_vblank_count(struct drm_crtc *crtc);
extern u32 drm_vblank_count_and_time(struct drm_device *dev, int crtc,
struct timeval *vblanktime);
extern void drm_send_vblank_event(struct drm_device *dev, int crtc,
struct drm_pending_vblank_event *e);
+extern void drm_crtc_send_vblank_event(struct drm_crtc *crtc,
+ struct drm_pending_vblank_event *e);
extern bool drm_handle_vblank(struct drm_device *dev, int crtc);
+extern bool drm_crtc_handle_vblank(struct drm_crtc *crtc);
extern int drm_vblank_get(struct drm_device *dev, int crtc);
extern void drm_vblank_put(struct drm_device *dev, int crtc);
extern int drm_crtc_vblank_get(struct drm_crtc *crtc);
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 780511a459c0..1e6ae1458f7a 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -119,13 +119,6 @@ struct drm_gem_object {
* simply leave it as NULL.
*/
struct dma_buf_attachment *import_attach;
-
- /**
- * dumb - created as dumb buffer
- * Whether the gem object was created using the dumb buffer interface
- * as such it may not be used for GPU rendering.
- */
- bool dumb;
};
void drm_gem_object_release(struct drm_gem_object *obj);
diff --git a/include/dt-bindings/interrupt-controller/arm-gic.h b/include/dt-bindings/interrupt-controller/arm-gic.h
index 1ea1b702fec2..d4110d5caa3e 100644
--- a/include/dt-bindings/interrupt-controller/arm-gic.h
+++ b/include/dt-bindings/interrupt-controller/arm-gic.h
@@ -7,14 +7,14 @@
#include <dt-bindings/interrupt-controller/irq.h>
-/* interrupt specific cell 0 */
+/* interrupt specifier cell 0 */
#define GIC_SPI 0
#define GIC_PPI 1
/*
* Interrupt specifier cell 2.
- * The flaggs in irq.h are valid, plus those below.
+ * The flags in irq.h are valid, plus those below.
*/
#define GIC_CPU_MASK_RAW(x) ((x) << 8)
#define GIC_CPU_MASK_SIMPLE(num) GIC_CPU_MASK_RAW((1 << (num)) - 1)
diff --git a/include/dt-bindings/thermal/thermal.h b/include/dt-bindings/thermal/thermal.h
index 59822a995858..b5e6b0069ac7 100644
--- a/include/dt-bindings/thermal/thermal.h
+++ b/include/dt-bindings/thermal/thermal.h
@@ -11,7 +11,7 @@
#define _DT_BINDINGS_THERMAL_THERMAL_H
/* On cooling devices upper and lower limits */
-#define THERMAL_NO_LIMIT (-1UL)
+#define THERMAL_NO_LIMIT (~0)
#endif
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 856d381b1d5b..d459cd17b477 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -147,8 +147,8 @@ void acpi_numa_arch_fixup(void);
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
-int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu);
-int acpi_unmap_lsapic(int cpu);
+int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu);
+int acpi_unmap_cpu(int cpu);
#endif /* CONFIG_ACPI_HOTPLUG_CPU */
int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base);
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 0c04917c2f12..af84234e1f6e 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -47,6 +47,7 @@ struct sk_buff;
struct audit_krule {
int vers_ops;
+ u32 pflags;
u32 flags;
u32 listnr;
u32 action;
@@ -64,6 +65,9 @@ struct audit_krule {
u64 prio;
};
+/* Flag to indicate legacy AUDIT_LOGINUID unset usage */
+#define AUDIT_LOGINUID_LEGACY 0x1
+
struct audit_field {
u32 type;
union {
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 8aded9ab2e4e..5735e7130d63 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -34,7 +34,6 @@ struct blk_mq_hw_ctx {
unsigned long flags; /* BLK_MQ_F_* flags */
struct request_queue *queue;
- unsigned int queue_num;
struct blk_flush_queue *fq;
void *driver_data;
@@ -54,7 +53,7 @@ struct blk_mq_hw_ctx {
unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
unsigned int numa_node;
- unsigned int cmd_size; /* per-request extra data */
+ unsigned int queue_num;
atomic_t nr_active;
@@ -195,13 +194,16 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
+int blk_mq_request_started(struct request *rq);
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, int error);
void __blk_mq_end_request(struct request *rq, int error);
void blk_mq_requeue_request(struct request *rq);
void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
+void blk_mq_cancel_requeue_work(struct request_queue *q);
void blk_mq_kick_requeue_list(struct request_queue *q);
+void blk_mq_abort_requeue_list(struct request_queue *q);
void blk_mq_complete_request(struct request *rq);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -212,6 +214,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn,
void *priv);
+void blk_mq_unfreeze_queue(struct request_queue *q);
+void blk_mq_freeze_queue_start(struct request_queue *q);
/*
* Driver command data is immediately after the request. So subtract request
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 445d59231bc4..c294e3e25e37 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -190,6 +190,7 @@ enum rq_flag_bits {
__REQ_PM, /* runtime pm request */
__REQ_HASHED, /* on IO scheduler merge hash */
__REQ_MQ_INFLIGHT, /* track inflight for MQ */
+ __REQ_NO_TIMEOUT, /* requests may never expire */
__REQ_NR_BITS, /* stops here */
};
@@ -243,5 +244,6 @@ enum rq_flag_bits {
#define REQ_PM (1ULL << __REQ_PM)
#define REQ_HASHED (1ULL << __REQ_HASHED)
#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
+#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT)
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 5d86416d35f2..61b19c46bdb3 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -87,8 +87,8 @@ struct ceph_osd_req_op {
struct ceph_osd_data osd_data;
} extent;
struct {
- __le32 name_len;
- __le32 value_len;
+ u32 name_len;
+ u32 value_len;
__u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */
__u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */
struct ceph_osd_data osd_data;
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index a1c81f80978e..33063f872ee3 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -215,7 +215,7 @@ static __always_inline void __read_once_size(volatile void *p, void *res, int si
}
}
-static __always_inline void __assign_once_size(volatile void *p, void *res, int size)
+static __always_inline void __write_once_size(volatile void *p, void *res, int size)
{
switch (size) {
case 1: *(volatile __u8 *)p = *(__u8 *)res; break;
@@ -235,15 +235,15 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
/*
* Prevent the compiler from merging or refetching reads or writes. The
* compiler is also forbidden from reordering successive instances of
- * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the
+ * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
* compiler is aware of some particular ordering. One way to make the
* compiler aware of ordering is to put the two invocations of READ_ONCE,
- * ASSIGN_ONCE or ACCESS_ONCE() in different C statements.
+ * WRITE_ONCE or ACCESS_ONCE() in different C statements.
*
* In contrast to ACCESS_ONCE these two macros will also work on aggregate
* data types like structs or unions. If the size of the accessed data
* type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
- * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a
+ * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
* compile-time warning.
*
* Their two major use cases are: (1) Mediating communication between
@@ -257,8 +257,8 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int
#define READ_ONCE(x) \
({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; })
-#define ASSIGN_ONCE(val, x) \
- ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; })
+#define WRITE_ONCE(x, val) \
+ ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; })
#endif /* __KERNEL__ */
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index c303d383def1..bd955270d5aa 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -50,7 +50,7 @@ static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus)
{
- return NULL;
+ return ERR_PTR(-ENOSYS);
}
#endif
@@ -65,13 +65,13 @@ unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq);
static inline struct thermal_cooling_device *
cpufreq_cooling_register(const struct cpumask *clip_cpus)
{
- return NULL;
+ return ERR_PTR(-ENOSYS);
}
static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct device_node *np,
const struct cpumask *clip_cpus)
{
- return NULL;
+ return ERR_PTR(-ENOSYS);
}
static inline
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index a07e087f54b2..ab70f3bc44ad 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -53,7 +53,6 @@ struct cpuidle_state {
};
/* Idle State Flags */
-#define CPUIDLE_FLAG_TIME_INVALID (0x01) /* is residency time measurable? */
#define CPUIDLE_FLAG_COUPLED (0x02) /* state applies to multiple cpus */
#define CPUIDLE_FLAG_TIMER_STOP (0x04) /* timer is stopped on this state */
@@ -89,8 +88,6 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
/**
* cpuidle_get_last_residency - retrieves the last state's residency time
* @dev: the target CPU
- *
- * NOTE: this value is invalid if CPUIDLE_FLAG_TIME_INVALID is set
*/
static inline int cpuidle_get_last_residency(struct cpuidle_device *dev)
{
diff --git a/include/linux/fs.h b/include/linux/fs.h
index f90c0282c114..42efe13077b6 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -135,7 +135,7 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
/* File was opened by fanotify and shouldn't generate fanotify events */
-#define FMODE_NONOTIFY ((__force fmode_t)0x1000000)
+#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
/*
* Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index 55b685719d52..09460d6d6682 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -11,6 +11,10 @@ extern void genl_unlock(void);
extern int lockdep_genl_is_held(void);
#endif
+/* for synchronisation between af_netlink and genetlink */
+extern atomic_t genl_sk_destructing_cnt;
+extern wait_queue_head_t genl_sk_destructing_waitq;
+
/**
* rcu_dereference_genl - rcu_dereference with debug checking
* @p: The pointer to read, prior to dereferencing
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index e3a1721c8354..7c7695940ddd 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -228,7 +228,9 @@ struct i2c_client {
struct device dev; /* the device structure */
int irq; /* irq issued by device */
struct list_head detected;
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
i2c_slave_cb_t slave_cb; /* callback for slave mode */
+#endif
};
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
@@ -253,6 +255,7 @@ static inline void i2c_set_clientdata(struct i2c_client *dev, void *data)
/* I2C slave support */
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
enum i2c_slave_event {
I2C_SLAVE_REQ_READ_START,
I2C_SLAVE_REQ_READ_END,
@@ -269,6 +272,7 @@ static inline int i2c_slave_event(struct i2c_client *client,
{
return client->slave_cb(client, event, val);
}
+#endif
/**
* struct i2c_board_info - template for device creation
@@ -404,8 +408,10 @@ struct i2c_algorithm {
/* To determine what the adapter supports */
u32 (*functionality) (struct i2c_adapter *);
+#if IS_ENABLED(CONFIG_I2C_SLAVE)
int (*reg_slave)(struct i2c_client *client);
int (*unreg_slave)(struct i2c_client *client);
+#endif
};
/**
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 290db1269c4c..75ae2e2631fc 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -13,11 +13,54 @@
* Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com>
*/
+/* Shifted versions of the command enable bits are be used if the command
+ * has no arguments (see kdb_check_flags). This allows commands, such as
+ * go, to have different permissions depending upon whether it is called
+ * with an argument.
+ */
+#define KDB_ENABLE_NO_ARGS_SHIFT 10
+
typedef enum {
- KDB_REPEAT_NONE = 0, /* Do not repeat this command */
- KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */
- KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */
-} kdb_repeat_t;
+ KDB_ENABLE_ALL = (1 << 0), /* Enable everything */
+ KDB_ENABLE_MEM_READ = (1 << 1),
+ KDB_ENABLE_MEM_WRITE = (1 << 2),
+ KDB_ENABLE_REG_READ = (1 << 3),
+ KDB_ENABLE_REG_WRITE = (1 << 4),
+ KDB_ENABLE_INSPECT = (1 << 5),
+ KDB_ENABLE_FLOW_CTRL = (1 << 6),
+ KDB_ENABLE_SIGNAL = (1 << 7),
+ KDB_ENABLE_REBOOT = (1 << 8),
+ /* User exposed values stop here, all remaining flags are
+ * exclusively used to describe a commands behaviour.
+ */
+
+ KDB_ENABLE_ALWAYS_SAFE = (1 << 9),
+ KDB_ENABLE_MASK = (1 << KDB_ENABLE_NO_ARGS_SHIFT) - 1,
+
+ KDB_ENABLE_ALL_NO_ARGS = KDB_ENABLE_ALL << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_MEM_READ_NO_ARGS = KDB_ENABLE_MEM_READ
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_MEM_WRITE_NO_ARGS = KDB_ENABLE_MEM_WRITE
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_REG_READ_NO_ARGS = KDB_ENABLE_REG_READ
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_REG_WRITE_NO_ARGS = KDB_ENABLE_REG_WRITE
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_INSPECT_NO_ARGS = KDB_ENABLE_INSPECT
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_FLOW_CTRL_NO_ARGS = KDB_ENABLE_FLOW_CTRL
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_SIGNAL_NO_ARGS = KDB_ENABLE_SIGNAL
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_REBOOT_NO_ARGS = KDB_ENABLE_REBOOT
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = KDB_ENABLE_ALWAYS_SAFE
+ << KDB_ENABLE_NO_ARGS_SHIFT,
+ KDB_ENABLE_MASK_NO_ARGS = KDB_ENABLE_MASK << KDB_ENABLE_NO_ARGS_SHIFT,
+
+ KDB_REPEAT_NO_ARGS = 0x40000000, /* Repeat the command w/o arguments */
+ KDB_REPEAT_WITH_ARGS = 0x80000000, /* Repeat the command with args */
+} kdb_cmdflags_t;
typedef int (*kdb_func_t)(int, const char **);
@@ -62,6 +105,7 @@ extern atomic_t kdb_event;
#define KDB_BADLENGTH (-19)
#define KDB_NOBP (-20)
#define KDB_BADADDR (-21)
+#define KDB_NOPERM (-22)
/*
* kdb_diemsg
@@ -146,17 +190,17 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos)
/* Dynamic kdb shell command registration */
extern int kdb_register(char *, kdb_func_t, char *, char *, short);
-extern int kdb_register_repeat(char *, kdb_func_t, char *, char *,
- short, kdb_repeat_t);
+extern int kdb_register_flags(char *, kdb_func_t, char *, char *,
+ short, kdb_cmdflags_t);
extern int kdb_unregister(char *);
#else /* ! CONFIG_KGDB_KDB */
static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
static inline void kdb_init(int level) {}
static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
char *help, short minlen) { return 0; }
-static inline int kdb_register_repeat(char *cmd, kdb_func_t func, char *usage,
- char *help, short minlen,
- kdb_repeat_t repeat) { return 0; }
+static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage,
+ char *help, short minlen,
+ kdb_cmdflags_t flags) { return 0; }
static inline int kdb_unregister(char *cmd) { return 0; }
#endif /* CONFIG_KGDB_KDB */
enum {
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 5449d2f4a1ef..64ce58bee6f5 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -176,7 +176,7 @@ extern int _cond_resched(void);
*/
# define might_sleep() \
do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0)
-# define sched_annotate_sleep() __set_current_state(TASK_RUNNING)
+# define sched_annotate_sleep() (current->task_state_change = 0)
#else
static inline void ___might_sleep(const char *file, int line,
int preempt_offset) { }
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 2d182413b1db..91f705de2c0b 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -231,6 +231,7 @@ enum {
ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity
* led */
ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */
+ ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */
/* bits 24:31 of ap->flags are reserved for LLD specific flags */
@@ -422,6 +423,7 @@ enum {
ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */
ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
+ ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
diff --git a/include/linux/mfd/samsung/s2mps13.h b/include/linux/mfd/samsung/s2mps13.h
index ce5dda8958fe..b1fd675fa36f 100644
--- a/include/linux/mfd/samsung/s2mps13.h
+++ b/include/linux/mfd/samsung/s2mps13.h
@@ -59,6 +59,7 @@ enum s2mps13_reg {
S2MPS13_REG_B6CTRL,
S2MPS13_REG_B6OUT,
S2MPS13_REG_B7CTRL,
+ S2MPS13_REG_B7SW,
S2MPS13_REG_B7OUT,
S2MPS13_REG_B8CTRL,
S2MPS13_REG_B8OUT,
@@ -102,6 +103,7 @@ enum s2mps13_reg {
S2MPS13_REG_L26CTRL,
S2MPS13_REG_L27CTRL,
S2MPS13_REG_L28CTRL,
+ S2MPS13_REG_L29CTRL,
S2MPS13_REG_L30CTRL,
S2MPS13_REG_L31CTRL,
S2MPS13_REG_L32CTRL,
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index 575a86c7fcbd..f742b6717d52 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -50,6 +50,8 @@ enum {
STMPE_IDX_GPEDR_MSB,
STMPE_IDX_GPRER_LSB,
STMPE_IDX_GPFER_LSB,
+ STMPE_IDX_GPPUR_LSB,
+ STMPE_IDX_GPPDR_LSB,
STMPE_IDX_GPAFR_U_MSB,
STMPE_IDX_IEGPIOR_LSB,
STMPE_IDX_ISGPIOR_LSB,
@@ -113,24 +115,6 @@ extern int stmpe_set_altfunc(struct stmpe *stmpe, u32 pins,
extern int stmpe_enable(struct stmpe *stmpe, unsigned int blocks);
extern int stmpe_disable(struct stmpe *stmpe, unsigned int blocks);
-struct matrix_keymap_data;
-
-/**
- * struct stmpe_keypad_platform_data - STMPE keypad platform data
- * @keymap_data: key map table and size
- * @debounce_ms: debounce interval, in ms. Maximum is
- * %STMPE_KEYPAD_MAX_DEBOUNCE.
- * @scan_count: number of key scanning cycles to confirm key data.
- * Maximum is %STMPE_KEYPAD_MAX_SCAN_COUNT.
- * @no_autorepeat: disable key autorepeat
- */
-struct stmpe_keypad_platform_data {
- const struct matrix_keymap_data *keymap_data;
- unsigned int debounce_ms;
- unsigned int scan_count;
- bool no_autorepeat;
-};
-
#define STMPE_GPIO_NOREQ_811_TOUCH (0xf0)
/**
@@ -199,7 +183,6 @@ struct stmpe_ts_platform_data {
* @irq_gpio: gpio number over which irq will be requested (significant only if
* irq_over_gpio is true)
* @gpio: GPIO-specific platform data
- * @keypad: keypad-specific platform data
* @ts: touchscreen-specific platform data
*/
struct stmpe_platform_data {
@@ -212,7 +195,6 @@ struct stmpe_platform_data {
int autosleep_timeout;
struct stmpe_gpio_platform_data *gpio;
- struct stmpe_keypad_platform_data *keypad;
struct stmpe_ts_platform_data *ts;
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f80d0194c9bc..dd5ea3016fc4 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1070,6 +1070,7 @@ static inline int page_mapped(struct page *page)
#define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */
#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */
#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */
+#define VM_FAULT_SIGSEGV 0x0040
#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
@@ -1078,8 +1079,9 @@ static inline int page_mapped(struct page *page)
#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
-#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
- VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
+#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | \
+ VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE | \
+ VM_FAULT_FALLBACK)
/* Encode hstate index for a hwpoisoned large page */
#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
@@ -1952,7 +1954,7 @@ extern int expand_downwards(struct vm_area_struct *vma,
#if VM_GROWSUP
extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
#else
- #define expand_upwards(vma, address) do { } while (0)
+ #define expand_upwards(vma, address) (0)
#endif
/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h
index 375af80bde7d..f767a0de611f 100644
--- a/include/linux/mmc/sdhci.h
+++ b/include/linux/mmc/sdhci.h
@@ -137,6 +137,7 @@ struct sdhci_host {
#define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */
#define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */
#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */
+#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */
unsigned int version; /* SDHCI spec. version */
diff --git a/include/linux/module.h b/include/linux/module.h
index ebfb0e153c6a..b653d7c0a05a 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -444,7 +444,7 @@ extern void __module_put_and_exit(struct module *mod, long code)
#define module_put_and_exit(code) __module_put_and_exit(THIS_MODULE, code)
#ifdef CONFIG_MODULE_UNLOAD
-unsigned long module_refcount(struct module *mod);
+int module_refcount(struct module *mod);
void __symbol_put(const char *symbol);
#define symbol_put(x) __symbol_put(VMLINUX_SYMBOL_STR(x))
void symbol_put_addr(void *addr);
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index 7eeb9bbfb816..f7556261fe3c 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -26,7 +26,7 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
void *module_alloc(unsigned long size);
/* Free memory returned from module_alloc. */
-void module_free(struct module *mod, void *module_region);
+void module_memfree(void *module_region);
/*
* Apply the given relocation to the (simplified) ELF. Return -error
@@ -82,4 +82,6 @@ int module_finalize(const Elf_Ehdr *hdr,
/* Any cleanup needed when module leaves. */
void module_arch_cleanup(struct module *mod);
+/* Any cleanup before freeing mod->module_init */
+void module_arch_freeing_init(struct module *mod);
#endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c31f74d76ebd..52fd8e8694cf 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -852,11 +852,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* 3. Update dev->stats asynchronously and atomically, and define
* neither operation.
*
- * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid);
+ * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid);
* If device support VLAN filtering this function is called when a
* VLAN id is registered.
*
- * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid);
+ * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid);
* If device support VLAN filtering this function is called when a
* VLAN id is unregistered.
*
@@ -1012,12 +1012,15 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* Callback to use for xmit over the accelerated station. This
* is used in place of ndo_start_xmit on accelerated net
* devices.
- * bool (*ndo_gso_check) (struct sk_buff *skb,
- * struct net_device *dev);
+ * netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
+ * struct net_device *dev
+ * netdev_features_t features);
* Called by core transmit path to determine if device is capable of
- * performing GSO on a packet. The device returns true if it is
- * able to GSO the packet, false otherwise. If the return value is
- * false the stack will do software GSO.
+ * performing offload operations on a given packet. This is to give
+ * the device an opportunity to implement any restrictions that cannot
+ * be otherwise expressed by feature flags. The check is called with
+ * the set of features that the stack has calculated and it returns
+ * those the driver believes to be appropriate.
*
* int (*ndo_switch_parent_id_get)(struct net_device *dev,
* struct netdev_phys_item_id *psid);
@@ -1178,8 +1181,9 @@ struct net_device_ops {
struct net_device *dev,
void *priv);
int (*ndo_get_lock_subclass)(struct net_device *dev);
- bool (*ndo_gso_check) (struct sk_buff *skb,
- struct net_device *dev);
+ netdev_features_t (*ndo_features_check) (struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features);
#ifdef CONFIG_NET_SWITCHDEV
int (*ndo_switch_parent_id_get)(struct net_device *dev,
struct netdev_phys_item_id *psid);
@@ -2081,7 +2085,7 @@ extern rwlock_t dev_base_lock; /* Device list lock */
list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_in_bond_rcu(bond, slave) \
for_each_netdev_rcu(&init_net, slave) \
- if (netdev_master_upper_dev_get_rcu(slave) == bond)
+ if (netdev_master_upper_dev_get_rcu(slave) == (bond))
#define net_device_entry(lh) list_entry(lh, struct net_device, dev_list)
static inline struct net_device *next_net_device(struct net_device *dev)
@@ -3611,8 +3615,6 @@ static inline bool netif_needs_gso(struct net_device *dev, struct sk_buff *skb,
netdev_features_t features)
{
return skb_is_gso(skb) && (!skb_gso_ok(skb, features) ||
- (dev->netdev_ops->ndo_gso_check &&
- !dev->netdev_ops->ndo_gso_check(skb, dev)) ||
unlikely((skb->ip_summed != CHECKSUM_PARTIAL) &&
(skb->ip_summed != CHECKSUM_UNNECESSARY)));
}
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 9e572daa15d5..02fc86d2348e 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -46,8 +46,8 @@ struct netlink_kernel_cfg {
unsigned int flags;
void (*input)(struct sk_buff *skb);
struct mutex *cb_mutex;
- int (*bind)(int group);
- void (*unbind)(int group);
+ int (*bind)(struct net *net, int group);
+ void (*unbind)(struct net *net, int group);
bool (*compare)(struct net *net, struct sock *sk);
};
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 1e37fbb78f7a..ddea982355f3 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -74,6 +74,9 @@ struct nfs_client {
/* idmapper */
struct idmap * cl_idmap;
+ /* Client owner identifier */
+ const char * cl_owner_id;
+
/* Our own IP address, as a null-terminated string.
* This is used to generate the mv0 callback address.
*/
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 853698c721f7..76200984d1e2 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -85,11 +85,6 @@ static inline void oom_killer_enable(void)
oom_killer_disabled = false;
}
-static inline bool oom_gfp_allowed(gfp_t gfp_mask)
-{
- return (gfp_mask & __GFP_FS) && !(gfp_mask & __GFP_NORETRY);
-}
-
extern struct task_struct *find_lock_task_mm(struct task_struct *p);
static inline bool task_will_free_mem(struct task_struct *task)
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 7ea069cd3257..4b3736f7065c 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -251,7 +251,7 @@ pgoff_t page_cache_prev_hole(struct address_space *mapping,
#define FGP_NOWAIT 0x00000020
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
- int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask);
+ int fgp_flags, gfp_t cache_gfp_mask);
/**
* find_get_page - find and get a page reference
@@ -266,13 +266,13 @@ struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
static inline struct page *find_get_page(struct address_space *mapping,
pgoff_t offset)
{
- return pagecache_get_page(mapping, offset, 0, 0, 0);
+ return pagecache_get_page(mapping, offset, 0, 0);
}
static inline struct page *find_get_page_flags(struct address_space *mapping,
pgoff_t offset, int fgp_flags)
{
- return pagecache_get_page(mapping, offset, fgp_flags, 0, 0);
+ return pagecache_get_page(mapping, offset, fgp_flags, 0);
}
/**
@@ -292,7 +292,7 @@ static inline struct page *find_get_page_flags(struct address_space *mapping,
static inline struct page *find_lock_page(struct address_space *mapping,
pgoff_t offset)
{
- return pagecache_get_page(mapping, offset, FGP_LOCK, 0, 0);
+ return pagecache_get_page(mapping, offset, FGP_LOCK, 0);
}
/**
@@ -319,7 +319,7 @@ static inline struct page *find_or_create_page(struct address_space *mapping,
{
return pagecache_get_page(mapping, offset,
FGP_LOCK|FGP_ACCESSED|FGP_CREAT,
- gfp_mask, gfp_mask & GFP_RECLAIM_MASK);
+ gfp_mask);
}
/**
@@ -340,8 +340,7 @@ static inline struct page *grab_cache_page_nowait(struct address_space *mapping,
{
return pagecache_get_page(mapping, index,
FGP_LOCK|FGP_CREAT|FGP_NOFS|FGP_NOWAIT,
- mapping_gfp_mask(mapping),
- GFP_NOFS);
+ mapping_gfp_mask(mapping));
}
struct page *find_get_entry(struct address_space *mapping, pgoff_t offset);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 360a966a97a5..9603094ed59b 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -175,6 +175,8 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4),
/* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */
PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5),
+ /* Do not use bus resets for device */
+ PCI_DEV_FLAGS_NO_BUS_RESET = (__force pci_dev_flags_t) (1 << 6),
};
enum pci_irq_reroute_variant {
@@ -1065,6 +1067,7 @@ resource_size_t pcibios_retrieve_fw_addr(struct pci_dev *dev, int idx);
void pci_bus_assign_resources(const struct pci_bus *bus);
void pci_bus_size_bridges(struct pci_bus *bus);
int pci_claim_resource(struct pci_dev *, int);
+int pci_claim_bridge_resource(struct pci_dev *bridge, int i);
void pci_assign_unassigned_resources(void);
void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge);
void pci_assign_unassigned_bus_resources(struct pci_bus *bus);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 486e84ccb1f9..664de5a4ec46 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -79,11 +79,6 @@ struct perf_branch_stack {
struct perf_branch_entry entries[0];
};
-struct perf_regs {
- __u64 abi;
- struct pt_regs *regs;
-};
-
struct task_struct;
/*
@@ -455,11 +450,6 @@ struct perf_event {
#endif /* CONFIG_PERF_EVENTS */
};
-enum perf_event_context_type {
- task_context,
- cpu_context,
-};
-
/**
* struct perf_event_context - event context structure
*
@@ -467,7 +457,6 @@ enum perf_event_context_type {
*/
struct perf_event_context {
struct pmu *pmu;
- enum perf_event_context_type type;
/*
* Protect the states of the events in the list,
* nr_active, and the list:
@@ -610,7 +599,14 @@ struct perf_sample_data {
u32 reserved;
} cpu_entry;
struct perf_callchain_entry *callchain;
+
+ /*
+ * regs_user may point to task_pt_regs or to regs_user_copy, depending
+ * on arch details.
+ */
struct perf_regs regs_user;
+ struct pt_regs regs_user_copy;
+
struct perf_regs regs_intr;
u64 stack_user_size;
} ____cacheline_aligned;
diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h
index 3c73d5fe18be..a5f98d53d732 100644
--- a/include/linux/perf_regs.h
+++ b/include/linux/perf_regs.h
@@ -1,11 +1,19 @@
#ifndef _LINUX_PERF_REGS_H
#define _LINUX_PERF_REGS_H
+struct perf_regs {
+ __u64 abi;
+ struct pt_regs *regs;
+};
+
#ifdef CONFIG_HAVE_PERF_REGS
#include <asm/perf_regs.h>
u64 perf_reg_value(struct pt_regs *regs, int idx);
int perf_reg_validate(u64 mask);
u64 perf_reg_abi(struct task_struct *task);
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy);
#else
static inline u64 perf_reg_value(struct pt_regs *regs, int idx)
{
@@ -21,5 +29,13 @@ static inline u64 perf_reg_abi(struct task_struct *task)
{
return PERF_SAMPLE_REGS_ABI_NONE;
}
+
+static inline void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
#endif /* CONFIG_HAVE_PERF_REGS */
#endif /* _LINUX_PERF_REGS_H */
diff --git a/include/linux/phy/omap_control_phy.h b/include/linux/phy/omap_control_phy.h
index e9e6cfbfbb58..eb7d4a135a9e 100644
--- a/include/linux/phy/omap_control_phy.h
+++ b/include/linux/phy/omap_control_phy.h
@@ -66,7 +66,7 @@ enum omap_control_usb_mode {
#define OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF 0x0
#define OMAP_CTRL_PCIE_PCS_MASK 0xff
-#define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 0x8
+#define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 16
#define OMAP_CTRL_USB2_PHY_PD BIT(28)
@@ -79,7 +79,7 @@ enum omap_control_usb_mode {
void omap_control_phy_power(struct device *dev, int on);
void omap_control_usb_set_mode(struct device *dev,
enum omap_control_usb_mode mode);
-void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay);
+void omap_control_pcie_pcs(struct device *dev, u8 delay);
#else
static inline void omap_control_phy_power(struct device *dev, int on)
@@ -91,7 +91,7 @@ static inline void omap_control_usb_set_mode(struct device *dev,
{
}
-static inline void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay)
+static inline void omap_control_pcie_pcs(struct device *dev, u8 delay)
{
}
#endif
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 6cd20d5e651b..a9edab2c787a 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -271,6 +271,8 @@ typedef struct generic_pm_domain *(*genpd_xlate_t)(struct of_phandle_args *args,
int __of_genpd_add_provider(struct device_node *np, genpd_xlate_t xlate,
void *data);
void of_genpd_del_provider(struct device_node *np);
+struct generic_pm_domain *of_genpd_get_from_provider(
+ struct of_phandle_args *genpdspec);
struct generic_pm_domain *__of_genpd_xlate_simple(
struct of_phandle_args *genpdspec,
@@ -288,6 +290,12 @@ static inline int __of_genpd_add_provider(struct device_node *np,
}
static inline void of_genpd_del_provider(struct device_node *np) {}
+static inline struct generic_pm_domain *of_genpd_get_from_provider(
+ struct of_phandle_args *genpdspec)
+{
+ return NULL;
+}
+
#define __of_genpd_xlate_simple NULL
#define __of_genpd_xlate_onecell NULL
diff --git a/include/linux/printk.h b/include/linux/printk.h
index c8f170324e64..4d5bf5726578 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -10,9 +10,6 @@
extern const char linux_banner[];
extern const char linux_proc_banner[];
-extern char *log_buf_addr_get(void);
-extern u32 log_buf_len_get(void);
-
static inline int printk_get_level(const char *buffer)
{
if (buffer[0] == KERN_SOH_ASCII && buffer[1]) {
@@ -163,6 +160,8 @@ extern int kptr_restrict;
extern void wake_up_klogd(void);
+char *log_buf_addr_get(void);
+u32 log_buf_len_get(void);
void log_buf_kexec_setup(void);
void __init setup_log_buf(int early);
void dump_stack_set_arch_desc(const char *fmt, ...);
@@ -198,6 +197,16 @@ static inline void wake_up_klogd(void)
{
}
+static inline char *log_buf_addr_get(void)
+{
+ return NULL;
+}
+
+static inline u32 log_buf_len_get(void)
+{
+ return 0;
+}
+
static inline void log_buf_kexec_setup(void)
{
}
diff --git a/include/linux/quota.h b/include/linux/quota.h
index 50978b781a19..097d7eb2441e 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -321,6 +321,49 @@ struct dquot_operations {
struct path;
+/* Structure for communicating via ->get_dqblk() & ->set_dqblk() */
+struct qc_dqblk {
+ int d_fieldmask; /* mask of fields to change in ->set_dqblk() */
+ u64 d_spc_hardlimit; /* absolute limit on used space */
+ u64 d_spc_softlimit; /* preferred limit on used space */
+ u64 d_ino_hardlimit; /* maximum # allocated inodes */
+ u64 d_ino_softlimit; /* preferred inode limit */
+ u64 d_space; /* Space owned by the user */
+ u64 d_ino_count; /* # inodes owned by the user */
+ s64 d_ino_timer; /* zero if within inode limits */
+ /* if not, we refuse service */
+ s64 d_spc_timer; /* similar to above; for space */
+ int d_ino_warns; /* # warnings issued wrt num inodes */
+ int d_spc_warns; /* # warnings issued wrt used space */
+ u64 d_rt_spc_hardlimit; /* absolute limit on realtime space */
+ u64 d_rt_spc_softlimit; /* preferred limit on RT space */
+ u64 d_rt_space; /* realtime space owned */
+ s64 d_rt_spc_timer; /* similar to above; for RT space */
+ int d_rt_spc_warns; /* # warnings issued wrt RT space */
+};
+
+/* Field specifiers for ->set_dqblk() in struct qc_dqblk */
+#define QC_INO_SOFT (1<<0)
+#define QC_INO_HARD (1<<1)
+#define QC_SPC_SOFT (1<<2)
+#define QC_SPC_HARD (1<<3)
+#define QC_RT_SPC_SOFT (1<<4)
+#define QC_RT_SPC_HARD (1<<5)
+#define QC_LIMIT_MASK (QC_INO_SOFT | QC_INO_HARD | QC_SPC_SOFT | QC_SPC_HARD | \
+ QC_RT_SPC_SOFT | QC_RT_SPC_HARD)
+#define QC_SPC_TIMER (1<<6)
+#define QC_INO_TIMER (1<<7)
+#define QC_RT_SPC_TIMER (1<<8)
+#define QC_TIMER_MASK (QC_SPC_TIMER | QC_INO_TIMER | QC_RT_SPC_TIMER)
+#define QC_SPC_WARNS (1<<9)
+#define QC_INO_WARNS (1<<10)
+#define QC_RT_SPC_WARNS (1<<11)
+#define QC_WARNS_MASK (QC_SPC_WARNS | QC_INO_WARNS | QC_RT_SPC_WARNS)
+#define QC_SPACE (1<<12)
+#define QC_INO_COUNT (1<<13)
+#define QC_RT_SPACE (1<<14)
+#define QC_ACCT_MASK (QC_SPACE | QC_INO_COUNT | QC_RT_SPACE)
+
/* Operations handling requests from userspace */
struct quotactl_ops {
int (*quota_on)(struct super_block *, int, int, struct path *);
@@ -329,8 +372,8 @@ struct quotactl_ops {
int (*quota_sync)(struct super_block *, int);
int (*get_info)(struct super_block *, int, struct if_dqinfo *);
int (*set_info)(struct super_block *, int, struct if_dqinfo *);
- int (*get_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
- int (*set_dqblk)(struct super_block *, struct kqid, struct fs_disk_quota *);
+ int (*get_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
+ int (*set_dqblk)(struct super_block *, struct kqid, struct qc_dqblk *);
int (*get_xstate)(struct super_block *, struct fs_quota_stat *);
int (*set_xstate)(struct super_block *, unsigned int, int);
int (*get_xstatev)(struct super_block *, struct fs_quota_statv *);
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index f23538a6e411..29e3455f7d41 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -98,9 +98,9 @@ int dquot_quota_sync(struct super_block *sb, int type);
int dquot_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
int dquot_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii);
int dquot_get_dqblk(struct super_block *sb, struct kqid id,
- struct fs_disk_quota *di);
+ struct qc_dqblk *di);
int dquot_set_dqblk(struct super_block *sb, struct kqid id,
- struct fs_disk_quota *di);
+ struct qc_dqblk *di);
int __dquot_transfer(struct inode *inode, struct dquot **transfer_to);
int dquot_transfer(struct inode *inode, struct iattr *iattr);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index c0c2bce6b0b7..d9d7e7e56352 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -37,6 +37,16 @@ struct anon_vma {
atomic_t refcount;
/*
+ * Count of child anon_vmas and VMAs which points to this anon_vma.
+ *
+ * This counter is used for making decision about reusing anon_vma
+ * instead of forking new one. See comments in function anon_vma_clone.
+ */
+ unsigned degree;
+
+ struct anon_vma *parent; /* Parent of this anon_vma */
+
+ /*
* NOTE: the LSB of the rb_root.rb_node is set by
* mm_take_all_locks() _after_ taking the above lock. So the
* rb_root must only be read/written after taking the above lock
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index c611a02fbc51..fc52e307efab 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -38,7 +38,7 @@
#define THERMAL_CSTATE_INVALID -1UL
/* No upper/lower limit requirement */
-#define THERMAL_NO_LIMIT THERMAL_CSTATE_INVALID
+#define THERMAL_NO_LIMIT ((u32)~0)
/* Unit conversion macros */
#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
diff --git a/include/linux/time.h b/include/linux/time.h
index 203c2ad40d71..beebe3a02d43 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -110,6 +110,19 @@ static inline bool timespec_valid_strict(const struct timespec *ts)
return true;
}
+static inline bool timeval_valid(const struct timeval *tv)
+{
+ /* Dates before 1970 are bogus */
+ if (tv->tv_sec < 0)
+ return false;
+
+ /* Can't have more microseconds then a second */
+ if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
+ return false;
+
+ return true;
+}
+
extern struct timespec timespec_trunc(struct timespec t, unsigned gran);
#define CURRENT_TIME (current_kernel_time())
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index a219be961c0a..00048339c23e 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -177,7 +177,6 @@ int write_cache_pages(struct address_space *mapping,
struct writeback_control *wbc, writepage_t writepage,
void *data);
int do_writepages(struct address_space *mapping, struct writeback_control *wbc);
-void set_page_dirty_balance(struct page *page);
void writeback_set_ratelimit(void);
void tag_pages_for_writeback(struct address_space *mapping,
pgoff_t start, pgoff_t end);
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index af10c2cf8a1d..6c92415311ca 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -27,10 +27,18 @@ struct genl_info;
* @maxattr: maximum number of attributes supported
* @netnsok: set to true if the family can handle network
* namespaces and should be presented in all of them
+ * @parallel_ops: operations can be called in parallel and aren't
+ * synchronized by the core genetlink code
* @pre_doit: called before an operation's doit callback, it may
* do additional, common, filtering and return an error
* @post_doit: called after an operation's doit callback, it may
* undo operations done by pre_doit, for example release locks
+ * @mcast_bind: a socket bound to the given multicast group (which
+ * is given as the offset into the groups array)
+ * @mcast_unbind: a socket was unbound from the given multicast group.
+ * Note that unbind() will not be called symmetrically if the
+ * generic netlink family is removed while there are still open
+ * sockets.
* @attrbuf: buffer to store parsed attributes
* @family_list: family list
* @mcgrps: multicast groups used by this family (private)
@@ -53,6 +61,8 @@ struct genl_family {
void (*post_doit)(const struct genl_ops *ops,
struct sk_buff *skb,
struct genl_info *info);
+ int (*mcast_bind)(struct net *net, int group);
+ void (*mcast_unbind)(struct net *net, int group);
struct nlattr ** attrbuf; /* private */
const struct genl_ops * ops; /* private */
const struct genl_multicast_group *mcgrps; /* private */
@@ -395,11 +405,11 @@ static inline int genl_set_err(struct genl_family *family, struct net *net,
}
static inline int genl_has_listeners(struct genl_family *family,
- struct sock *sk, unsigned int group)
+ struct net *net, unsigned int group)
{
if (WARN_ON_ONCE(group >= family->n_mcgrps))
return -EINVAL;
group = family->mcgrp_offset + group;
- return netlink_has_listeners(sk, group);
+ return netlink_has_listeners(net->genl_sock, group);
}
#endif /* __NET_GENERIC_NETLINK_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index 0bb620702929..f7cbd703d15d 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -39,11 +39,12 @@ struct inet_skb_parm {
struct ip_options opt; /* Compiled IP options */
unsigned char flags;
-#define IPSKB_FORWARDED 1
-#define IPSKB_XFRM_TUNNEL_SIZE 2
-#define IPSKB_XFRM_TRANSFORMED 4
-#define IPSKB_FRAG_COMPLETE 8
-#define IPSKB_REROUTED 16
+#define IPSKB_FORWARDED BIT(0)
+#define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
+#define IPSKB_XFRM_TRANSFORMED BIT(2)
+#define IPSKB_FRAG_COMPLETE BIT(3)
+#define IPSKB_REROUTED BIT(4)
+#define IPSKB_DOREDIRECT BIT(5)
u16 frag_max_size;
};
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 58d719ddaa60..29c7be8808d5 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1270,8 +1270,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
*
* @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the
* driver to indicate that it requires IV generation for this
- * particular key. Setting this flag does not necessarily mean that SKBs
- * will have sufficient tailroom for ICV or MIC.
+ * particular key.
* @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by
* the driver for a TKIP key if it requires Michael MIC
* generation in software.
@@ -1283,9 +1282,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev);
* @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver
* if space should be prepared for the IV, but the IV
* itself should not be generated. Do not set together with
- * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does
- * not necessarily mean that SKBs will have sufficient tailroom for ICV or
- * MIC.
+ * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key.
* @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received
* management frames. The flag can help drivers that have a hardware
* crypto implementation that doesn't deal with management frames
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index eb070b3674a1..76f708486aae 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -190,7 +190,6 @@ struct neigh_hash_table {
struct neigh_table {
- struct neigh_table *next;
int family;
int entry_size;
int key_len;
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index 57cccd0052e5..903461aa5644 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -1,6 +1,9 @@
#ifndef __NET_VXLAN_H
#define __NET_VXLAN_H 1
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/if_vlan.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/udp.h>
@@ -51,16 +54,33 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
__be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
__be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
-static inline bool vxlan_gso_check(struct sk_buff *skb)
+static inline netdev_features_t vxlan_features_check(struct sk_buff *skb,
+ netdev_features_t features)
{
- if ((skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) &&
+ u8 l4_hdr = 0;
+
+ if (!skb->encapsulation)
+ return features;
+
+ switch (vlan_get_protocol(skb)) {
+ case htons(ETH_P_IP):
+ l4_hdr = ip_hdr(skb)->protocol;
+ break;
+ case htons(ETH_P_IPV6):
+ l4_hdr = ipv6_hdr(skb)->nexthdr;
+ break;
+ default:
+ return features;;
+ }
+
+ if ((l4_hdr == IPPROTO_UDP) &&
(skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
skb->inner_protocol != htons(ETH_P_TEB) ||
(skb_inner_mac_header(skb) - skb_transport_header(skb) !=
sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
- return false;
+ return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK);
- return true;
+ return features;
}
/* IP header + UDP + VXLAN + Ethernet header */
diff --git a/include/sound/ak4113.h b/include/sound/ak4113.h
index 2609048c1d44..3a34f6edc2d1 100644
--- a/include/sound/ak4113.h
+++ b/include/sound/ak4113.h
@@ -286,7 +286,7 @@ struct ak4113 {
ak4113_write_t *write;
ak4113_read_t *read;
void *private_data;
- unsigned int init:1;
+ atomic_t wq_processing;
spinlock_t lock;
unsigned char regmap[AK4113_WRITABLE_REGS];
struct snd_kcontrol *kctls[AK4113_CONTROLS];
diff --git a/include/sound/ak4114.h b/include/sound/ak4114.h
index 52f02a60dba7..069299a88915 100644
--- a/include/sound/ak4114.h
+++ b/include/sound/ak4114.h
@@ -168,7 +168,7 @@ struct ak4114 {
ak4114_write_t * write;
ak4114_read_t * read;
void * private_data;
- unsigned int init: 1;
+ atomic_t wq_processing;
spinlock_t lock;
unsigned char regmap[6];
unsigned char txcsb[5];
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index 1e7f74acc2ec..b429b73e875e 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -857,7 +857,7 @@ static inline unsigned int params_channels(const struct snd_pcm_hw_params *p)
}
/**
- * params_channels - Get the sample rate from the hw params
+ * params_rate - Get the sample rate from the hw params
* @p: hw params
*/
static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
@@ -866,7 +866,7 @@ static inline unsigned int params_rate(const struct snd_pcm_hw_params *p)
}
/**
- * params_channels - Get the period size (in frames) from the hw params
+ * params_period_size - Get the period size (in frames) from the hw params
* @p: hw params
*/
static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
@@ -875,7 +875,7 @@ static inline unsigned int params_period_size(const struct snd_pcm_hw_params *p)
}
/**
- * params_channels - Get the number of periods from the hw params
+ * params_periods - Get the number of periods from the hw params
* @p: hw params
*/
static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
@@ -884,7 +884,7 @@ static inline unsigned int params_periods(const struct snd_pcm_hw_params *p)
}
/**
- * params_channels - Get the buffer size (in frames) from the hw params
+ * params_buffer_size - Get the buffer size (in frames) from the hw params
* @p: hw params
*/
static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
@@ -893,7 +893,7 @@ static inline unsigned int params_buffer_size(const struct snd_pcm_hw_params *p)
}
/**
- * params_channels - Get the buffer size (in bytes) from the hw params
+ * params_buffer_bytes - Get the buffer size (in bytes) from the hw params
* @p: hw params
*/
static inline unsigned int params_buffer_bytes(const struct snd_pcm_hw_params *p)
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 430cfaf92285..db81c65b8f48 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -135,7 +135,6 @@ int se_dev_set_is_nonrot(struct se_device *, int);
int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
int se_dev_set_queue_depth(struct se_device *, u32);
int se_dev_set_max_sectors(struct se_device *, u32);
-int se_dev_set_fabric_max_sectors(struct se_device *, u32);
int se_dev_set_optimal_sectors(struct se_device *, u32);
int se_dev_set_block_size(struct se_device *, u32);
diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h
index 3247d7530107..186f7a923570 100644
--- a/include/target/target_core_backend_configfs.h
+++ b/include/target/target_core_backend_configfs.h
@@ -98,8 +98,6 @@ static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name
TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \
TB_DEV_ATTR_RO(_backend, hw_max_sectors); \
- DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors); \
- TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \
TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \
DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 397fb635766a..4a8795a87b9e 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -77,8 +77,6 @@
#define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0
/* Default max_write_same_len, disabled by default */
#define DA_MAX_WRITE_SAME_LEN 0
-/* Default max transfer length */
-#define DA_FABRIC_MAX_SECTORS 8192
/* Use a model alias based on the configfs backend device name */
#define DA_EMULATE_MODEL_ALIAS 0
/* Emulation for Direct Page Out */
@@ -694,7 +692,6 @@ struct se_dev_attrib {
u32 hw_block_size;
u32 block_size;
u32 hw_max_sectors;
- u32 fabric_max_sectors;
u32 optimal_sectors;
u32 hw_queue_depth;
u32 queue_depth;
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index 6edf1f2028cd..86b399c66c3d 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -146,6 +146,14 @@ TRACE_EVENT(kvm_msi_set_irq,
#if defined(CONFIG_HAVE_KVM_IRQFD)
+#ifdef kvm_irqchips
+#define kvm_ack_irq_string "irqchip %s pin %u"
+#define kvm_ack_irq_parm __print_symbolic(__entry->irqchip, kvm_irqchips), __entry->pin
+#else
+#define kvm_ack_irq_string "irqchip %d pin %u"
+#define kvm_ack_irq_parm __entry->irqchip, __entry->pin
+#endif
+
TRACE_EVENT(kvm_ack_irq,
TP_PROTO(unsigned int irqchip, unsigned int pin),
TP_ARGS(irqchip, pin),
@@ -160,13 +168,7 @@ TRACE_EVENT(kvm_ack_irq,
__entry->pin = pin;
),
-#ifdef kvm_irqchips
- TP_printk("irqchip %s pin %u",
- __print_symbolic(__entry->irqchip, kvm_irqchips),
- __entry->pin)
-#else
- TP_printk("irqchip %d pin %u", __entry->irqchip, __entry->pin)
-#endif
+ TP_printk(kvm_ack_irq_string, kvm_ack_irq_parm)
);
#endif /* defined(CONFIG_HAVE_KVM_IRQFD) */
diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h
index 7543b3e51331..e063effe0cc1 100644
--- a/include/uapi/asm-generic/fcntl.h
+++ b/include/uapi/asm-generic/fcntl.h
@@ -5,7 +5,7 @@
/*
* FMODE_EXEC is 0x20
- * FMODE_NONOTIFY is 0x1000000
+ * FMODE_NONOTIFY is 0x4000000
* These cannot be used by userspace O_* until internal and external open
* flags are split.
* -Eric Paris
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index 3e4323a3918d..94ffe0c83ce7 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -98,6 +98,7 @@ struct can_ctrlmode {
#define CAN_CTRLMODE_BERR_REPORTING 0x10 /* Bus-error reporting */
#define CAN_CTRLMODE_FD 0x20 /* CAN FD mode */
#define CAN_CTRLMODE_PRESUME_ACK 0x40 /* Ignore missing CAN ACKs */
+#define CAN_CTRLMODE_FD_NON_ISO 0x80 /* CAN FD in non-ISO mode */
/*
* CAN device statistics
diff --git a/include/uapi/linux/in6.h b/include/uapi/linux/in6.h
index 74a2a1773494..79b12b004ade 100644
--- a/include/uapi/linux/in6.h
+++ b/include/uapi/linux/in6.h
@@ -149,7 +149,7 @@ struct in6_flowlabel_req {
/*
* IPV6 socket options
*/
-
+#if __UAPI_DEF_IPV6_OPTIONS
#define IPV6_ADDRFORM 1
#define IPV6_2292PKTINFO 2
#define IPV6_2292HOPOPTS 3
@@ -196,6 +196,7 @@ struct in6_flowlabel_req {
#define IPV6_IPSEC_POLICY 34
#define IPV6_XFRM_POLICY 35
+#endif
/*
* Multicast:
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index 7acef41fc209..af94f31e33ac 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -128,27 +128,34 @@ struct kfd_ioctl_get_process_apertures_args {
uint32_t pad;
};
-#define KFD_IOC_MAGIC 'K'
+#define AMDKFD_IOCTL_BASE 'K'
+#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr)
+#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type)
+#define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type)
+#define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type)
-#define KFD_IOC_GET_VERSION \
- _IOR(KFD_IOC_MAGIC, 1, struct kfd_ioctl_get_version_args)
+#define AMDKFD_IOC_GET_VERSION \
+ AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args)
-#define KFD_IOC_CREATE_QUEUE \
- _IOWR(KFD_IOC_MAGIC, 2, struct kfd_ioctl_create_queue_args)
+#define AMDKFD_IOC_CREATE_QUEUE \
+ AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args)
-#define KFD_IOC_DESTROY_QUEUE \
- _IOWR(KFD_IOC_MAGIC, 3, struct kfd_ioctl_destroy_queue_args)
+#define AMDKFD_IOC_DESTROY_QUEUE \
+ AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args)
-#define KFD_IOC_SET_MEMORY_POLICY \
- _IOW(KFD_IOC_MAGIC, 4, struct kfd_ioctl_set_memory_policy_args)
+#define AMDKFD_IOC_SET_MEMORY_POLICY \
+ AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args)
-#define KFD_IOC_GET_CLOCK_COUNTERS \
- _IOWR(KFD_IOC_MAGIC, 5, struct kfd_ioctl_get_clock_counters_args)
+#define AMDKFD_IOC_GET_CLOCK_COUNTERS \
+ AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args)
-#define KFD_IOC_GET_PROCESS_APERTURES \
- _IOR(KFD_IOC_MAGIC, 6, struct kfd_ioctl_get_process_apertures_args)
+#define AMDKFD_IOC_GET_PROCESS_APERTURES \
+ AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args)
-#define KFD_IOC_UPDATE_QUEUE \
- _IOW(KFD_IOC_MAGIC, 7, struct kfd_ioctl_update_queue_args)
+#define AMDKFD_IOC_UPDATE_QUEUE \
+ AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args)
+
+#define AMDKFD_COMMAND_START 0x01
+#define AMDKFD_COMMAND_END 0x08
#endif
diff --git a/include/uapi/linux/libc-compat.h b/include/uapi/linux/libc-compat.h
index c140620dad92..e28807ad17fa 100644
--- a/include/uapi/linux/libc-compat.h
+++ b/include/uapi/linux/libc-compat.h
@@ -69,6 +69,7 @@
#define __UAPI_DEF_SOCKADDR_IN6 0
#define __UAPI_DEF_IPV6_MREQ 0
#define __UAPI_DEF_IPPROTO_V6 0
+#define __UAPI_DEF_IPV6_OPTIONS 0
#else
@@ -82,6 +83,7 @@
#define __UAPI_DEF_SOCKADDR_IN6 1
#define __UAPI_DEF_IPV6_MREQ 1
#define __UAPI_DEF_IPPROTO_V6 1
+#define __UAPI_DEF_IPV6_OPTIONS 1
#endif /* _NETINET_IN_H */
@@ -103,6 +105,7 @@
#define __UAPI_DEF_SOCKADDR_IN6 1
#define __UAPI_DEF_IPV6_MREQ 1
#define __UAPI_DEF_IPPROTO_V6 1
+#define __UAPI_DEF_IPV6_OPTIONS 1
/* Definitions for xattr.h */
#define __UAPI_DEF_XATTR 1
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 3a6dcaa359b7..f714e8633352 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -174,6 +174,10 @@ enum ovs_packet_attr {
OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */
OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_*
attributes. */
+ OVS_PACKET_ATTR_UNUSED1,
+ OVS_PACKET_ATTR_UNUSED2,
+ OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe,
+ error logging should be suppressed. */
__OVS_PACKET_ATTR_MAX
};
diff --git a/include/uapi/linux/uinput.h b/include/uapi/linux/uinput.h
index baeab83deb64..013c9d8db372 100644
--- a/include/uapi/linux/uinput.h
+++ b/include/uapi/linux/uinput.h
@@ -82,7 +82,7 @@ struct uinput_ff_erase {
* The complete sysfs path is then /sys/devices/virtual/input/--NAME--
* Usually, it is in the form "inputN"
*/
-#define UI_GET_SYSNAME(len) _IOC(_IOC_READ, UINPUT_IOCTL_BASE, 300, len)
+#define UI_GET_SYSNAME(len) _IOC(_IOC_READ, UINPUT_IOCTL_BASE, 44, len)
/**
* UI_GET_VERSION - Return version of uinput protocol
@@ -91,7 +91,7 @@ struct uinput_ff_erase {
* the integer pointed to by the ioctl argument. The protocol version
* is hard-coded in the kernel and is independent of the uinput device.
*/
-#define UI_GET_VERSION _IOR(UINPUT_IOCTL_BASE, 301, unsigned int)
+#define UI_GET_VERSION _IOR(UINPUT_IOCTL_BASE, 45, unsigned int)
/*
* To write a force-feedback-capable driver, the upload_effect
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 61c818a7fe70..a3318f31e8e7 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -101,6 +101,13 @@ struct vring {
struct vring_used *used;
};
+/* Alignment requirements for vring elements.
+ * When using pre-virtio 1.0 layout, these fall out naturally.
+ */
+#define VRING_AVAIL_ALIGN_SIZE 2
+#define VRING_USED_ALIGN_SIZE 4
+#define VRING_DESC_ALIGN_SIZE 16
+
/* The standard layout for the ring is a continuous chunk of memory which looks
* like this. We assume num is a power of 2.
*
diff --git a/include/xen/interface/nmi.h b/include/xen/interface/nmi.h
new file mode 100644
index 000000000000..b47d9d06fade
--- /dev/null
+++ b/include/xen/interface/nmi.h
@@ -0,0 +1,51 @@
+/******************************************************************************
+ * nmi.h
+ *
+ * NMI callback registration and reason codes.
+ *
+ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
+ */
+
+#ifndef __XEN_PUBLIC_NMI_H__
+#define __XEN_PUBLIC_NMI_H__
+
+#include <xen/interface/xen.h>
+
+/*
+ * NMI reason codes:
+ * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
+ */
+ /* I/O-check error reported via ISA port 0x61, bit 6. */
+#define _XEN_NMIREASON_io_error 0
+#define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error)
+ /* PCI SERR reported via ISA port 0x61, bit 7. */
+#define _XEN_NMIREASON_pci_serr 1
+#define XEN_NMIREASON_pci_serr (1UL << _XEN_NMIREASON_pci_serr)
+ /* Unknown hardware-generated NMI. */
+#define _XEN_NMIREASON_unknown 2
+#define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown)
+
+/*
+ * long nmi_op(unsigned int cmd, void *arg)
+ * NB. All ops return zero on success, else a negative error code.
+ */
+
+/*
+ * Register NMI callback for this (calling) VCPU. Currently this only makes
+ * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
+ * arg == pointer to xennmi_callback structure.
+ */
+#define XENNMI_register_callback 0
+struct xennmi_callback {
+ unsigned long handler_address;
+ unsigned long pad;
+};
+DEFINE_GUEST_HANDLE_STRUCT(xennmi_callback);
+
+/*
+ * Deregister NMI callback for this (calling) VCPU.
+ * arg == NULL.
+ */
+#define XENNMI_unregister_callback 1
+
+#endif /* __XEN_PUBLIC_NMI_H__ */
diff --git a/kernel/audit.c b/kernel/audit.c
index f8f203e8018c..72ab759a0b43 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -429,7 +429,7 @@ static void kauditd_send_skb(struct sk_buff *skb)
* This function doesn't consume an skb as might be expected since it has to
* copy it anyways.
*/
-static void kauditd_send_multicast_skb(struct sk_buff *skb)
+static void kauditd_send_multicast_skb(struct sk_buff *skb, gfp_t gfp_mask)
{
struct sk_buff *copy;
struct audit_net *aunet = net_generic(&init_net, audit_net_id);
@@ -448,11 +448,11 @@ static void kauditd_send_multicast_skb(struct sk_buff *skb)
* no reason for new multicast clients to continue with this
* non-compliance.
*/
- copy = skb_copy(skb, GFP_KERNEL);
+ copy = skb_copy(skb, gfp_mask);
if (!copy)
return;
- nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, GFP_KERNEL);
+ nlmsg_multicast(sock, copy, 0, AUDIT_NLGRP_READLOG, gfp_mask);
}
/*
@@ -1100,7 +1100,7 @@ static void audit_receive(struct sk_buff *skb)
}
/* Run custom bind function on netlink socket group connect or bind requests. */
-static int audit_bind(int group)
+static int audit_bind(struct net *net, int group)
{
if (!capable(CAP_AUDIT_READ))
return -EPERM;
@@ -1940,7 +1940,7 @@ void audit_log_end(struct audit_buffer *ab)
struct nlmsghdr *nlh = nlmsg_hdr(ab->skb);
nlh->nlmsg_len = ab->skb->len;
- kauditd_send_multicast_skb(ab->skb);
+ kauditd_send_multicast_skb(ab->skb, ab->gfp_mask);
/*
* The original kaudit unicast socket sends up messages with
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 3598e13f2a65..4f68a326d92e 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -442,19 +442,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
f->type = AUDIT_LOGINUID_SET;
f->val = 0;
- }
-
- if ((f->type == AUDIT_PID) || (f->type == AUDIT_PPID)) {
- struct pid *pid;
- rcu_read_lock();
- pid = find_vpid(f->val);
- if (!pid) {
- rcu_read_unlock();
- err = -ESRCH;
- goto exit_free;
- }
- f->val = pid_nr(pid);
- rcu_read_unlock();
+ entry->rule.pflags |= AUDIT_LOGINUID_LEGACY;
}
err = audit_field_valid(entry, f);
@@ -630,6 +618,13 @@ static struct audit_rule_data *audit_krule_to_data(struct audit_krule *krule)
data->buflen += data->values[i] =
audit_pack_string(&bufp, krule->filterkey);
break;
+ case AUDIT_LOGINUID_SET:
+ if (krule->pflags & AUDIT_LOGINUID_LEGACY && !f->val) {
+ data->fields[i] = AUDIT_LOGINUID;
+ data->values[i] = AUDIT_UID_UNSET;
+ break;
+ }
+ /* fallthrough if set */
default:
data->values[i] = f->val;
}
@@ -646,6 +641,7 @@ static int audit_compare_rule(struct audit_krule *a, struct audit_krule *b)
int i;
if (a->flags != b->flags ||
+ a->pflags != b->pflags ||
a->listnr != b->listnr ||
a->action != b->action ||
a->field_count != b->field_count)
@@ -764,6 +760,7 @@ struct audit_entry *audit_dupe_rule(struct audit_krule *old)
new = &entry->rule;
new->vers_ops = old->vers_ops;
new->flags = old->flags;
+ new->pflags = old->pflags;
new->listnr = old->listnr;
new->action = old->action;
for (i = 0; i < AUDIT_BITMASK_SIZE; i++)
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index c75522a83678..072566dd0caf 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -72,6 +72,8 @@
#include <linux/fs_struct.h>
#include <linux/compat.h>
#include <linux/ctype.h>
+#include <linux/string.h>
+#include <uapi/linux/limits.h>
#include "audit.h"
@@ -1861,8 +1863,7 @@ void __audit_inode(struct filename *name, const struct dentry *dentry,
}
list_for_each_entry_reverse(n, &context->names_list, list) {
- /* does the name pointer match? */
- if (!n->name || n->name->name != name->name)
+ if (!n->name || strcmp(n->name->name, name->name))
continue;
/* match the correct record type */
@@ -1877,12 +1878,48 @@ void __audit_inode(struct filename *name, const struct dentry *dentry,
}
out_alloc:
- /* unable to find the name from a previous getname(). Allocate a new
- * anonymous entry.
- */
- n = audit_alloc_name(context, AUDIT_TYPE_NORMAL);
+ /* unable to find an entry with both a matching name and type */
+ n = audit_alloc_name(context, AUDIT_TYPE_UNKNOWN);
if (!n)
return;
+ /* unfortunately, while we may have a path name to record with the
+ * inode, we can't always rely on the string lasting until the end of
+ * the syscall so we need to create our own copy, it may fail due to
+ * memory allocation issues, but we do our best */
+ if (name) {
+ /* we can't use getname_kernel() due to size limits */
+ size_t len = strlen(name->name) + 1;
+ struct filename *new = __getname();
+
+ if (unlikely(!new))
+ goto out;
+
+ if (len <= (PATH_MAX - sizeof(*new))) {
+ new->name = (char *)(new) + sizeof(*new);
+ new->separate = false;
+ } else if (len <= PATH_MAX) {
+ /* this looks odd, but is due to final_putname() */
+ struct filename *new2;
+
+ new2 = kmalloc(sizeof(*new2), GFP_KERNEL);
+ if (unlikely(!new2)) {
+ __putname(new);
+ goto out;
+ }
+ new2->name = (char *)new;
+ new2->separate = true;
+ new = new2;
+ } else {
+ /* we should never get here, but let's be safe */
+ __putname(new);
+ goto out;
+ }
+ strlcpy((char *)new->name, name->name, len);
+ new->uptr = NULL;
+ new->aname = n;
+ n->name = new;
+ n->name_put = true;
+ }
out:
if (parent) {
n->name_len = n->name ? parent_len(n->name->name) : AUDIT_NAME_FULL;
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index d6594e457a25..a64e7a207d2b 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -163,7 +163,7 @@ bpf_jit_binary_alloc(unsigned int proglen, u8 **image_ptr,
void bpf_jit_binary_free(struct bpf_binary_header *hdr)
{
- module_free(NULL, hdr);
+ module_memfree(hdr);
}
#endif /* CONFIG_BPF_JIT */
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 088ac0b1b106..536edc2be307 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -150,7 +150,7 @@ static int map_lookup_elem(union bpf_attr *attr)
int ufd = attr->map_fd;
struct fd f = fdget(ufd);
struct bpf_map *map;
- void *key, *value;
+ void *key, *value, *ptr;
int err;
if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
@@ -169,20 +169,29 @@ static int map_lookup_elem(union bpf_attr *attr)
if (copy_from_user(key, ukey, map->key_size) != 0)
goto free_key;
- err = -ENOENT;
- rcu_read_lock();
- value = map->ops->map_lookup_elem(map, key);
+ err = -ENOMEM;
+ value = kmalloc(map->value_size, GFP_USER);
if (!value)
- goto err_unlock;
+ goto free_key;
+
+ rcu_read_lock();
+ ptr = map->ops->map_lookup_elem(map, key);
+ if (ptr)
+ memcpy(value, ptr, map->value_size);
+ rcu_read_unlock();
+
+ err = -ENOENT;
+ if (!ptr)
+ goto free_value;
err = -EFAULT;
if (copy_to_user(uvalue, value, map->value_size) != 0)
- goto err_unlock;
+ goto free_value;
err = 0;
-err_unlock:
- rcu_read_unlock();
+free_value:
+ kfree(value);
free_key:
kfree(key);
err_put:
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index bb263d0caab3..04cfe8ace520 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1909,7 +1909,7 @@ static void cgroup_kill_sb(struct super_block *sb)
*
* And don't kill the default root.
*/
- if (css_has_online_children(&root->cgrp.self) ||
+ if (!list_empty(&root->cgrp.self.children) ||
root == &cgrp_dfl_root)
cgroup_put(&root->cgrp);
else
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 1adf62b39b96..07ce18ca71e0 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -27,6 +27,9 @@
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
+
+#define pr_fmt(fmt) "KGDB: " fmt
+
#include <linux/pid_namespace.h>
#include <linux/clocksource.h>
#include <linux/serial_core.h>
@@ -196,8 +199,8 @@ int __weak kgdb_validate_break_address(unsigned long addr)
return err;
err = kgdb_arch_remove_breakpoint(&tmp);
if (err)
- printk(KERN_ERR "KGDB: Critical breakpoint error, kernel "
- "memory destroyed at: %lx", addr);
+ pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n",
+ addr);
return err;
}
@@ -256,8 +259,8 @@ int dbg_activate_sw_breakpoints(void)
error = kgdb_arch_set_breakpoint(&kgdb_break[i]);
if (error) {
ret = error;
- printk(KERN_INFO "KGDB: BP install failed: %lx",
- kgdb_break[i].bpt_addr);
+ pr_info("BP install failed: %lx\n",
+ kgdb_break[i].bpt_addr);
continue;
}
@@ -319,8 +322,8 @@ int dbg_deactivate_sw_breakpoints(void)
continue;
error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
if (error) {
- printk(KERN_INFO "KGDB: BP remove failed: %lx\n",
- kgdb_break[i].bpt_addr);
+ pr_info("BP remove failed: %lx\n",
+ kgdb_break[i].bpt_addr);
ret = error;
}
@@ -367,7 +370,7 @@ int dbg_remove_all_break(void)
goto setundefined;
error = kgdb_arch_remove_breakpoint(&kgdb_break[i]);
if (error)
- printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n",
+ pr_err("breakpoint remove failed: %lx\n",
kgdb_break[i].bpt_addr);
setundefined:
kgdb_break[i].state = BP_UNDEFINED;
@@ -400,9 +403,9 @@ static int kgdb_io_ready(int print_wait)
if (print_wait) {
#ifdef CONFIG_KGDB_KDB
if (!dbg_kdb_mode)
- printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n");
+ pr_crit("waiting... or $3#33 for KDB\n");
#else
- printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
+ pr_crit("Waiting for remote debugger\n");
#endif
}
return 1;
@@ -430,8 +433,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
exception_level = 0;
kgdb_skipexception(ks->ex_vector, ks->linux_regs);
dbg_activate_sw_breakpoints();
- printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n",
- addr);
+ pr_crit("re-enter error: breakpoint removed %lx\n", addr);
WARN_ON_ONCE(1);
return 1;
@@ -444,7 +446,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
panic("Recursive entry to debugger");
}
- printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n");
+ pr_crit("re-enter exception: ALL breakpoints killed\n");
#ifdef CONFIG_KGDB_KDB
/* Allow kdb to debug itself one level */
return 0;
@@ -471,6 +473,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs,
int cpu;
int trace_on = 0;
int online_cpus = num_online_cpus();
+ u64 time_left;
kgdb_info[ks->cpu].enter_kgdb++;
kgdb_info[ks->cpu].exception_state |= exception_state;
@@ -595,9 +598,13 @@ return_normal:
/*
* Wait for the other CPUs to be notified and be waiting for us:
*/
- while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) +
- atomic_read(&slaves_in_kgdb)) != online_cpus)
+ time_left = loops_per_jiffy * HZ;
+ while (kgdb_do_roundup && --time_left &&
+ (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) !=
+ online_cpus)
cpu_relax();
+ if (!time_left)
+ pr_crit("KGDB: Timed out waiting for secondary CPUs.\n");
/*
* At this point the primary processor is completely
@@ -795,15 +802,15 @@ static struct console kgdbcons = {
static void sysrq_handle_dbg(int key)
{
if (!dbg_io_ops) {
- printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
+ pr_crit("ERROR: No KGDB I/O module available\n");
return;
}
if (!kgdb_connected) {
#ifdef CONFIG_KGDB_KDB
if (!dbg_kdb_mode)
- printk(KERN_CRIT "KGDB or $3#33 for KDB\n");
+ pr_crit("KGDB or $3#33 for KDB\n");
#else
- printk(KERN_CRIT "Entering KGDB\n");
+ pr_crit("Entering KGDB\n");
#endif
}
@@ -945,7 +952,7 @@ static void kgdb_initial_breakpoint(void)
{
kgdb_break_asap = 0;
- printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n");
+ pr_crit("Waiting for connection from remote gdb...\n");
kgdb_breakpoint();
}
@@ -964,8 +971,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
if (dbg_io_ops) {
spin_unlock(&kgdb_registration_lock);
- printk(KERN_ERR "kgdb: Another I/O driver is already "
- "registered with KGDB.\n");
+ pr_err("Another I/O driver is already registered with KGDB\n");
return -EBUSY;
}
@@ -981,8 +987,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops)
spin_unlock(&kgdb_registration_lock);
- printk(KERN_INFO "kgdb: Registered I/O driver %s.\n",
- new_dbg_io_ops->name);
+ pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name);
/* Arm KGDB now. */
kgdb_register_callbacks();
@@ -1017,8 +1022,7 @@ void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops)
spin_unlock(&kgdb_registration_lock);
- printk(KERN_INFO
- "kgdb: Unregistered I/O driver %s, debugger disabled.\n",
+ pr_info("Unregistered I/O driver %s, debugger disabled\n",
old_dbg_io_ops->name);
}
EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
index b20d544f20c2..e1dbf4a2c69e 100644
--- a/kernel/debug/kdb/kdb_bp.c
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -531,22 +531,29 @@ void __init kdb_initbptab(void)
for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++)
bp->bp_free = 1;
- kdb_register_repeat("bp", kdb_bp, "[<vaddr>]",
- "Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("bl", kdb_bp, "[<vaddr>]",
- "Display breakpoints", 0, KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("bp", kdb_bp, "[<vaddr>]",
+ "Set/Display breakpoints", 0,
+ KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("bl", kdb_bp, "[<vaddr>]",
+ "Display breakpoints", 0,
+ KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT)
- kdb_register_repeat("bph", kdb_bp, "[<vaddr>]",
- "[datar [length]|dataw [length]] Set hw brk", 0, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("bc", kdb_bc, "<bpnum>",
- "Clear Breakpoint", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("be", kdb_bc, "<bpnum>",
- "Enable Breakpoint", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("bd", kdb_bc, "<bpnum>",
- "Disable Breakpoint", 0, KDB_REPEAT_NONE);
-
- kdb_register_repeat("ss", kdb_ss, "",
- "Single Step", 1, KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("bph", kdb_bp, "[<vaddr>]",
+ "[datar [length]|dataw [length]] Set hw brk", 0,
+ KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("bc", kdb_bc, "<bpnum>",
+ "Clear Breakpoint", 0,
+ KDB_ENABLE_FLOW_CTRL);
+ kdb_register_flags("be", kdb_bc, "<bpnum>",
+ "Enable Breakpoint", 0,
+ KDB_ENABLE_FLOW_CTRL);
+ kdb_register_flags("bd", kdb_bc, "<bpnum>",
+ "Disable Breakpoint", 0,
+ KDB_ENABLE_FLOW_CTRL);
+
+ kdb_register_flags("ss", kdb_ss, "",
+ "Single Step", 1,
+ KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS);
/*
* Architecture dependent initialization.
*/
diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c
index 8859ca34dcfe..15e1a7af5dd0 100644
--- a/kernel/debug/kdb/kdb_debugger.c
+++ b/kernel/debug/kdb/kdb_debugger.c
@@ -129,6 +129,10 @@ int kdb_stub(struct kgdb_state *ks)
ks->pass_exception = 1;
KDB_FLAG_SET(CATASTROPHIC);
}
+ /* set CATASTROPHIC if the system contains unresponsive processors */
+ for_each_online_cpu(i)
+ if (!kgdb_info[i].enter_kgdb)
+ KDB_FLAG_SET(CATASTROPHIC);
if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) {
KDB_STATE_CLEAR(SSBPT);
KDB_STATE_CLEAR(DOING_SS);
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 379650b984f8..7b40c5f07dce 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -12,6 +12,7 @@
*/
#include <linux/ctype.h>
+#include <linux/types.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/kmsg_dump.h>
@@ -23,6 +24,7 @@
#include <linux/vmalloc.h>
#include <linux/atomic.h>
#include <linux/module.h>
+#include <linux/moduleparam.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
@@ -42,6 +44,12 @@
#include <linux/slab.h>
#include "kdb_private.h"
+#undef MODULE_PARAM_PREFIX
+#define MODULE_PARAM_PREFIX "kdb."
+
+static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE;
+module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600);
+
#define GREP_LEN 256
char kdb_grep_string[GREP_LEN];
int kdb_grepping_flag;
@@ -121,6 +129,7 @@ static kdbmsg_t kdbmsgs[] = {
KDBMSG(BADLENGTH, "Invalid length field"),
KDBMSG(NOBP, "No Breakpoint exists"),
KDBMSG(BADADDR, "Invalid address"),
+ KDBMSG(NOPERM, "Permission denied"),
};
#undef KDBMSG
@@ -188,6 +197,26 @@ struct task_struct *kdb_curr_task(int cpu)
}
/*
+ * Check whether the flags of the current command and the permissions
+ * of the kdb console has allow a command to be run.
+ */
+static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions,
+ bool no_args)
+{
+ /* permissions comes from userspace so needs massaging slightly */
+ permissions &= KDB_ENABLE_MASK;
+ permissions |= KDB_ENABLE_ALWAYS_SAFE;
+
+ /* some commands change group when launched with no arguments */
+ if (no_args)
+ permissions |= permissions << KDB_ENABLE_NO_ARGS_SHIFT;
+
+ flags |= KDB_ENABLE_ALL;
+
+ return permissions & flags;
+}
+
+/*
* kdbgetenv - This function will return the character string value of
* an environment variable.
* Parameters:
@@ -476,6 +505,15 @@ int kdbgetaddrarg(int argc, const char **argv, int *nextarg,
kdb_symtab_t symtab;
/*
+ * If the enable flags prohibit both arbitrary memory access
+ * and flow control then there are no reasonable grounds to
+ * provide symbol lookup.
+ */
+ if (!kdb_check_flags(KDB_ENABLE_MEM_READ | KDB_ENABLE_FLOW_CTRL,
+ kdb_cmd_enabled, false))
+ return KDB_NOPERM;
+
+ /*
* Process arguments which follow the following syntax:
*
* symbol | numeric-address [+/- numeric-offset]
@@ -641,8 +679,13 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0)
if (!s->count)
s->usable = 0;
if (s->usable)
- kdb_register(s->name, kdb_exec_defcmd,
- s->usage, s->help, 0);
+ /* macros are always safe because when executed each
+ * internal command re-enters kdb_parse() and is
+ * safety checked individually.
+ */
+ kdb_register_flags(s->name, kdb_exec_defcmd, s->usage,
+ s->help, 0,
+ KDB_ENABLE_ALWAYS_SAFE);
return 0;
}
if (!s->usable)
@@ -1003,25 +1046,22 @@ int kdb_parse(const char *cmdstr)
if (i < kdb_max_commands) {
int result;
+
+ if (!kdb_check_flags(tp->cmd_flags, kdb_cmd_enabled, argc <= 1))
+ return KDB_NOPERM;
+
KDB_STATE_SET(CMD);
result = (*tp->cmd_func)(argc-1, (const char **)argv);
if (result && ignore_errors && result > KDB_CMD_GO)
result = 0;
KDB_STATE_CLEAR(CMD);
- switch (tp->cmd_repeat) {
- case KDB_REPEAT_NONE:
- argc = 0;
- if (argv[0])
- *(argv[0]) = '\0';
- break;
- case KDB_REPEAT_NO_ARGS:
- argc = 1;
- if (argv[1])
- *(argv[1]) = '\0';
- break;
- case KDB_REPEAT_WITH_ARGS:
- break;
- }
+
+ if (tp->cmd_flags & KDB_REPEAT_WITH_ARGS)
+ return result;
+
+ argc = tp->cmd_flags & KDB_REPEAT_NO_ARGS ? 1 : 0;
+ if (argv[argc])
+ *(argv[argc]) = '\0';
return result;
}
@@ -1921,10 +1961,14 @@ static int kdb_rm(int argc, const char **argv)
*/
static int kdb_sr(int argc, const char **argv)
{
+ bool check_mask =
+ !kdb_check_flags(KDB_ENABLE_ALL, kdb_cmd_enabled, false);
+
if (argc != 1)
return KDB_ARGCOUNT;
+
kdb_trap_printk++;
- __handle_sysrq(*argv[1], false);
+ __handle_sysrq(*argv[1], check_mask);
kdb_trap_printk--;
return 0;
@@ -1979,7 +2023,7 @@ static int kdb_lsmod(int argc, const char **argv)
kdb_printf("%-20s%8u 0x%p ", mod->name,
mod->core_size, (void *)mod);
#ifdef CONFIG_MODULE_UNLOAD
- kdb_printf("%4ld ", module_refcount(mod));
+ kdb_printf("%4d ", module_refcount(mod));
#endif
if (mod->state == MODULE_STATE_GOING)
kdb_printf(" (Unloading)");
@@ -2157,6 +2201,8 @@ static void kdb_cpu_status(void)
for (start_cpu = -1, i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i)) {
state = 'F'; /* cpu is offline */
+ } else if (!kgdb_info[i].enter_kgdb) {
+ state = 'D'; /* cpu is online but unresponsive */
} else {
state = ' '; /* cpu is responding to kdb */
if (kdb_task_state_char(KDB_TSK(i)) == 'I')
@@ -2210,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv)
/*
* Validate cpunum
*/
- if ((cpunum > NR_CPUS) || !cpu_online(cpunum))
+ if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb)
return KDB_BADCPUNUM;
dbg_switch_cpu = cpunum;
@@ -2375,6 +2421,8 @@ static int kdb_help(int argc, const char **argv)
return 0;
if (!kt->cmd_name)
continue;
+ if (!kdb_check_flags(kt->cmd_flags, kdb_cmd_enabled, true))
+ continue;
if (strlen(kt->cmd_usage) > 20)
space = "\n ";
kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name,
@@ -2629,7 +2677,7 @@ static int kdb_grep_help(int argc, const char **argv)
}
/*
- * kdb_register_repeat - This function is used to register a kernel
+ * kdb_register_flags - This function is used to register a kernel
* debugger command.
* Inputs:
* cmd Command name
@@ -2641,12 +2689,12 @@ static int kdb_grep_help(int argc, const char **argv)
* zero for success, one if a duplicate command.
*/
#define kdb_command_extend 50 /* arbitrary */
-int kdb_register_repeat(char *cmd,
- kdb_func_t func,
- char *usage,
- char *help,
- short minlen,
- kdb_repeat_t repeat)
+int kdb_register_flags(char *cmd,
+ kdb_func_t func,
+ char *usage,
+ char *help,
+ short minlen,
+ kdb_cmdflags_t flags)
{
int i;
kdbtab_t *kp;
@@ -2694,19 +2742,18 @@ int kdb_register_repeat(char *cmd,
kp->cmd_func = func;
kp->cmd_usage = usage;
kp->cmd_help = help;
- kp->cmd_flags = 0;
kp->cmd_minlen = minlen;
- kp->cmd_repeat = repeat;
+ kp->cmd_flags = flags;
return 0;
}
-EXPORT_SYMBOL_GPL(kdb_register_repeat);
+EXPORT_SYMBOL_GPL(kdb_register_flags);
/*
* kdb_register - Compatibility register function for commands that do
* not need to specify a repeat state. Equivalent to
- * kdb_register_repeat with KDB_REPEAT_NONE.
+ * kdb_register_flags with flags set to 0.
* Inputs:
* cmd Command name
* func Function to execute the command
@@ -2721,8 +2768,7 @@ int kdb_register(char *cmd,
char *help,
short minlen)
{
- return kdb_register_repeat(cmd, func, usage, help, minlen,
- KDB_REPEAT_NONE);
+ return kdb_register_flags(cmd, func, usage, help, minlen, 0);
}
EXPORT_SYMBOL_GPL(kdb_register);
@@ -2764,80 +2810,109 @@ static void __init kdb_inittab(void)
for_each_kdbcmd(kp, i)
kp->cmd_name = NULL;
- kdb_register_repeat("md", kdb_md, "<vaddr>",
+ kdb_register_flags("md", kdb_md, "<vaddr>",
"Display Memory Contents, also mdWcN, e.g. md8c1", 1,
- KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>",
- "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>",
- "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("mds", kdb_md, "<vaddr>",
- "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>",
- "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS);
- kdb_register_repeat("go", kdb_go, "[<vaddr>]",
- "Continue Execution", 1, KDB_REPEAT_NONE);
- kdb_register_repeat("rd", kdb_rd, "",
- "Display Registers", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("rm", kdb_rm, "<reg> <contents>",
- "Modify Registers", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("ef", kdb_ef, "<vaddr>",
- "Display exception frame", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("bt", kdb_bt, "[<vaddr>]",
- "Stack traceback", 1, KDB_REPEAT_NONE);
- kdb_register_repeat("btp", kdb_bt, "<pid>",
- "Display stack for process <pid>", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]",
- "Backtrace all processes matching state flag", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("btc", kdb_bt, "",
- "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("btt", kdb_bt, "<vaddr>",
+ KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("mdr", kdb_md, "<vaddr> <bytes>",
+ "Display Raw Memory", 0,
+ KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("mdp", kdb_md, "<paddr> <bytes>",
+ "Display Physical Memory", 0,
+ KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("mds", kdb_md, "<vaddr>",
+ "Display Memory Symbolically", 0,
+ KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("mm", kdb_mm, "<vaddr> <contents>",
+ "Modify Memory Contents", 0,
+ KDB_ENABLE_MEM_WRITE | KDB_REPEAT_NO_ARGS);
+ kdb_register_flags("go", kdb_go, "[<vaddr>]",
+ "Continue Execution", 1,
+ KDB_ENABLE_REG_WRITE | KDB_ENABLE_ALWAYS_SAFE_NO_ARGS);
+ kdb_register_flags("rd", kdb_rd, "",
+ "Display Registers", 0,
+ KDB_ENABLE_REG_READ);
+ kdb_register_flags("rm", kdb_rm, "<reg> <contents>",
+ "Modify Registers", 0,
+ KDB_ENABLE_REG_WRITE);
+ kdb_register_flags("ef", kdb_ef, "<vaddr>",
+ "Display exception frame", 0,
+ KDB_ENABLE_MEM_READ);
+ kdb_register_flags("bt", kdb_bt, "[<vaddr>]",
+ "Stack traceback", 1,
+ KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS);
+ kdb_register_flags("btp", kdb_bt, "<pid>",
+ "Display stack for process <pid>", 0,
+ KDB_ENABLE_INSPECT);
+ kdb_register_flags("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]",
+ "Backtrace all processes matching state flag", 0,
+ KDB_ENABLE_INSPECT);
+ kdb_register_flags("btc", kdb_bt, "",
+ "Backtrace current process on each cpu", 0,
+ KDB_ENABLE_INSPECT);
+ kdb_register_flags("btt", kdb_bt, "<vaddr>",
"Backtrace process given its struct task address", 0,
- KDB_REPEAT_NONE);
- kdb_register_repeat("env", kdb_env, "",
- "Show environment variables", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("set", kdb_set, "",
- "Set environment variables", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("help", kdb_help, "",
- "Display Help Message", 1, KDB_REPEAT_NONE);
- kdb_register_repeat("?", kdb_help, "",
- "Display Help Message", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("cpu", kdb_cpu, "<cpunum>",
- "Switch to new cpu", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("kgdb", kdb_kgdb, "",
- "Enter kgdb mode", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("ps", kdb_ps, "[<flags>|A]",
- "Display active task list", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("pid", kdb_pid, "<pidnum>",
- "Switch to another task", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("reboot", kdb_reboot, "",
- "Reboot the machine immediately", 0, KDB_REPEAT_NONE);
+ KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS);
+ kdb_register_flags("env", kdb_env, "",
+ "Show environment variables", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register_flags("set", kdb_set, "",
+ "Set environment variables", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register_flags("help", kdb_help, "",
+ "Display Help Message", 1,
+ KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register_flags("?", kdb_help, "",
+ "Display Help Message", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register_flags("cpu", kdb_cpu, "<cpunum>",
+ "Switch to new cpu", 0,
+ KDB_ENABLE_ALWAYS_SAFE_NO_ARGS);
+ kdb_register_flags("kgdb", kdb_kgdb, "",
+ "Enter kgdb mode", 0, 0);
+ kdb_register_flags("ps", kdb_ps, "[<flags>|A]",
+ "Display active task list", 0,
+ KDB_ENABLE_INSPECT);
+ kdb_register_flags("pid", kdb_pid, "<pidnum>",
+ "Switch to another task", 0,
+ KDB_ENABLE_INSPECT);
+ kdb_register_flags("reboot", kdb_reboot, "",
+ "Reboot the machine immediately", 0,
+ KDB_ENABLE_REBOOT);
#if defined(CONFIG_MODULES)
- kdb_register_repeat("lsmod", kdb_lsmod, "",
- "List loaded kernel modules", 0, KDB_REPEAT_NONE);
+ kdb_register_flags("lsmod", kdb_lsmod, "",
+ "List loaded kernel modules", 0,
+ KDB_ENABLE_INSPECT);
#endif
#if defined(CONFIG_MAGIC_SYSRQ)
- kdb_register_repeat("sr", kdb_sr, "<key>",
- "Magic SysRq key", 0, KDB_REPEAT_NONE);
+ kdb_register_flags("sr", kdb_sr, "<key>",
+ "Magic SysRq key", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
#endif
#if defined(CONFIG_PRINTK)
- kdb_register_repeat("dmesg", kdb_dmesg, "[lines]",
- "Display syslog buffer", 0, KDB_REPEAT_NONE);
+ kdb_register_flags("dmesg", kdb_dmesg, "[lines]",
+ "Display syslog buffer", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
#endif
if (arch_kgdb_ops.enable_nmi) {
- kdb_register_repeat("disable_nmi", kdb_disable_nmi, "",
- "Disable NMI entry to KDB", 0, KDB_REPEAT_NONE);
- }
- kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
- "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>",
- "Send a signal to a process", 0, KDB_REPEAT_NONE);
- kdb_register_repeat("summary", kdb_summary, "",
- "Summarize the system", 4, KDB_REPEAT_NONE);
- kdb_register_repeat("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]",
- "Display per_cpu variables", 3, KDB_REPEAT_NONE);
- kdb_register_repeat("grephelp", kdb_grep_help, "",
- "Display help on | grep", 0, KDB_REPEAT_NONE);
+ kdb_register_flags("disable_nmi", kdb_disable_nmi, "",
+ "Disable NMI entry to KDB", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
+ }
+ kdb_register_flags("defcmd", kdb_defcmd, "name \"usage\" \"help\"",
+ "Define a set of commands, down to endefcmd", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register_flags("kill", kdb_kill, "<-signal> <pid>",
+ "Send a signal to a process", 0,
+ KDB_ENABLE_SIGNAL);
+ kdb_register_flags("summary", kdb_summary, "",
+ "Summarize the system", 4,
+ KDB_ENABLE_ALWAYS_SAFE);
+ kdb_register_flags("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]",
+ "Display per_cpu variables", 3,
+ KDB_ENABLE_MEM_READ);
+ kdb_register_flags("grephelp", kdb_grep_help, "",
+ "Display help on | grep", 0,
+ KDB_ENABLE_ALWAYS_SAFE);
}
/* Execute any commands defined in kdb_cmds. */
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index 7afd3c8c41d5..eaacd1693954 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -172,10 +172,9 @@ typedef struct _kdbtab {
kdb_func_t cmd_func; /* Function to execute command */
char *cmd_usage; /* Usage String for this command */
char *cmd_help; /* Help message for this command */
- short cmd_flags; /* Parsing flags */
short cmd_minlen; /* Minimum legal # command
* chars required */
- kdb_repeat_t cmd_repeat; /* Does command auto repeat on enter? */
+ kdb_cmdflags_t cmd_flags; /* Command behaviour flags */
} kdbtab_t;
extern int kdb_bt(int, const char **); /* KDB display back trace */
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 4c1ee7f2bebc..19efcf13375a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4461,18 +4461,14 @@ perf_output_sample_regs(struct perf_output_handle *handle,
}
static void perf_sample_regs_user(struct perf_regs *regs_user,
- struct pt_regs *regs)
+ struct pt_regs *regs,
+ struct pt_regs *regs_user_copy)
{
- if (!user_mode(regs)) {
- if (current->mm)
- regs = task_pt_regs(current);
- else
- regs = NULL;
- }
-
- if (regs) {
- regs_user->abi = perf_reg_abi(current);
+ if (user_mode(regs)) {
+ regs_user->abi = perf_reg_abi(current);
regs_user->regs = regs;
+ } else if (current->mm) {
+ perf_get_regs_user(regs_user, regs, regs_user_copy);
} else {
regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE;
regs_user->regs = NULL;
@@ -4951,7 +4947,8 @@ void perf_prepare_sample(struct perf_event_header *header,
}
if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER))
- perf_sample_regs_user(&data->regs_user, regs);
+ perf_sample_regs_user(&data->regs_user, regs,
+ &data->regs_user_copy);
if (sample_type & PERF_SAMPLE_REGS_USER) {
/* regs dump ABI info */
@@ -6779,7 +6776,6 @@ skip_type:
__perf_event_init_context(&cpuctx->ctx);
lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
- cpuctx->ctx.type = cpu_context;
cpuctx->ctx.pmu = pmu;
__perf_cpu_hrtimer_init(cpuctx, cpu);
@@ -7423,7 +7419,19 @@ SYSCALL_DEFINE5(perf_event_open,
* task or CPU context:
*/
if (move_group) {
- if (group_leader->ctx->type != ctx->type)
+ /*
+ * Make sure we're both on the same task, or both
+ * per-cpu events.
+ */
+ if (group_leader->ctx->task != ctx->task)
+ goto err_context;
+
+ /*
+ * Make sure we're both events for the same CPU;
+ * grouping events for different CPUs is broken; since
+ * you can never concurrently schedule them anyhow.
+ */
+ if (group_leader->cpu != event->cpu)
goto err_context;
} else {
if (group_leader->ctx != ctx)
diff --git a/kernel/exit.c b/kernel/exit.c
index 1ea4369890a3..6806c55475ee 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1287,9 +1287,15 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
static int wait_consider_task(struct wait_opts *wo, int ptrace,
struct task_struct *p)
{
+ /*
+ * We can race with wait_task_zombie() from another thread.
+ * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition
+ * can't confuse the checks below.
+ */
+ int exit_state = ACCESS_ONCE(p->exit_state);
int ret;
- if (unlikely(p->exit_state == EXIT_DEAD))
+ if (unlikely(exit_state == EXIT_DEAD))
return 0;
ret = eligible_child(wo, p);
@@ -1310,7 +1316,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
return 0;
}
- if (unlikely(p->exit_state == EXIT_TRACE)) {
+ if (unlikely(exit_state == EXIT_TRACE)) {
/*
* ptrace == 0 means we are the natural parent. In this case
* we should clear notask_error, debugger will notify us.
@@ -1337,7 +1343,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
}
/* slay zombie? */
- if (p->exit_state == EXIT_ZOMBIE) {
+ if (exit_state == EXIT_ZOMBIE) {
/* we don't reap group leaders with subthreads */
if (!delay_group_leader(p)) {
/*
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 06f58309fed2..ee619929cf90 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -127,7 +127,7 @@ static void *alloc_insn_page(void)
static void free_insn_page(void *page)
{
- module_free(NULL, page);
+ module_memfree(page);
}
struct kprobe_insn_cache kprobe_insn_slots = {
diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c
index 5cf6731b98e9..3ef3736002d8 100644
--- a/kernel/locking/mutex-debug.c
+++ b/kernel/locking/mutex-debug.c
@@ -80,13 +80,13 @@ void debug_mutex_unlock(struct mutex *lock)
DEBUG_LOCKS_WARN_ON(lock->owner != current);
DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
- mutex_clear_owner(lock);
}
/*
* __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug
* mutexes so that we can do it here after we've verified state.
*/
+ mutex_clear_owner(lock);
atomic_set(&lock->count, 1);
}
diff --git a/kernel/module.c b/kernel/module.c
index 3965511ae133..d856e96a3cce 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -772,9 +772,18 @@ static int try_stop_module(struct module *mod, int flags, int *forced)
return 0;
}
-unsigned long module_refcount(struct module *mod)
+/**
+ * module_refcount - return the refcount or -1 if unloading
+ *
+ * @mod: the module we're checking
+ *
+ * Returns:
+ * -1 if the module is in the process of unloading
+ * otherwise the number of references in the kernel to the module
+ */
+int module_refcount(struct module *mod)
{
- return (unsigned long)atomic_read(&mod->refcnt) - MODULE_REF_BASE;
+ return atomic_read(&mod->refcnt) - MODULE_REF_BASE;
}
EXPORT_SYMBOL(module_refcount);
@@ -856,7 +865,7 @@ static inline void print_unload_info(struct seq_file *m, struct module *mod)
struct module_use *use;
int printed_something = 0;
- seq_printf(m, " %lu ", module_refcount(mod));
+ seq_printf(m, " %i ", module_refcount(mod));
/*
* Always include a trailing , so userspace can differentiate
@@ -908,7 +917,7 @@ EXPORT_SYMBOL_GPL(symbol_put_addr);
static ssize_t show_refcnt(struct module_attribute *mattr,
struct module_kobject *mk, char *buffer)
{
- return sprintf(buffer, "%lu\n", module_refcount(mk->mod));
+ return sprintf(buffer, "%i\n", module_refcount(mk->mod));
}
static struct module_attribute modinfo_refcnt =
@@ -1795,7 +1804,7 @@ static void unset_module_core_ro_nx(struct module *mod) { }
static void unset_module_init_ro_nx(struct module *mod) { }
#endif
-void __weak module_free(struct module *mod, void *module_region)
+void __weak module_memfree(void *module_region)
{
vfree(module_region);
}
@@ -1804,6 +1813,10 @@ void __weak module_arch_cleanup(struct module *mod)
{
}
+void __weak module_arch_freeing_init(struct module *mod)
+{
+}
+
/* Free a module, remove from lists, etc. */
static void free_module(struct module *mod)
{
@@ -1841,7 +1854,8 @@ static void free_module(struct module *mod)
/* This may be NULL, but that's OK */
unset_module_init_ro_nx(mod);
- module_free(mod, mod->module_init);
+ module_arch_freeing_init(mod);
+ module_memfree(mod->module_init);
kfree(mod->args);
percpu_modfree(mod);
@@ -1850,7 +1864,7 @@ static void free_module(struct module *mod)
/* Finally, free the core (containing the module structure) */
unset_module_core_ro_nx(mod);
- module_free(mod, mod->module_core);
+ module_memfree(mod->module_core);
#ifdef CONFIG_MPU
update_protections(current->mm);
@@ -2785,7 +2799,7 @@ static int move_module(struct module *mod, struct load_info *info)
*/
kmemleak_ignore(ptr);
if (!ptr) {
- module_free(mod, mod->module_core);
+ module_memfree(mod->module_core);
return -ENOMEM;
}
memset(ptr, 0, mod->init_size);
@@ -2930,8 +2944,9 @@ static struct module *layout_and_allocate(struct load_info *info, int flags)
static void module_deallocate(struct module *mod, struct load_info *info)
{
percpu_modfree(mod);
- module_free(mod, mod->module_init);
- module_free(mod, mod->module_core);
+ module_arch_freeing_init(mod);
+ module_memfree(mod->module_init);
+ module_memfree(mod->module_core);
}
int __weak module_finalize(const Elf_Ehdr *hdr,
@@ -2983,10 +2998,31 @@ static void do_mod_ctors(struct module *mod)
#endif
}
+/* For freeing module_init on success, in case kallsyms traversing */
+struct mod_initfree {
+ struct rcu_head rcu;
+ void *module_init;
+};
+
+static void do_free_init(struct rcu_head *head)
+{
+ struct mod_initfree *m = container_of(head, struct mod_initfree, rcu);
+ module_memfree(m->module_init);
+ kfree(m);
+}
+
/* This is where the real work happens */
static int do_init_module(struct module *mod)
{
int ret = 0;
+ struct mod_initfree *freeinit;
+
+ freeinit = kmalloc(sizeof(*freeinit), GFP_KERNEL);
+ if (!freeinit) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ freeinit->module_init = mod->module_init;
/*
* We want to find out whether @mod uses async during init. Clear
@@ -2999,18 +3035,7 @@ static int do_init_module(struct module *mod)
if (mod->init != NULL)
ret = do_one_initcall(mod->init);
if (ret < 0) {
- /*
- * Init routine failed: abort. Try to protect us from
- * buggy refcounters.
- */
- mod->state = MODULE_STATE_GOING;
- synchronize_sched();
- module_put(mod);
- blocking_notifier_call_chain(&module_notify_list,
- MODULE_STATE_GOING, mod);
- free_module(mod);
- wake_up_all(&module_wq);
- return ret;
+ goto fail_free_freeinit;
}
if (ret > 0) {
pr_warn("%s: '%s'->init suspiciously returned %d, it should "
@@ -3055,15 +3080,35 @@ static int do_init_module(struct module *mod)
mod->strtab = mod->core_strtab;
#endif
unset_module_init_ro_nx(mod);
- module_free(mod, mod->module_init);
+ module_arch_freeing_init(mod);
mod->module_init = NULL;
mod->init_size = 0;
mod->init_ro_size = 0;
mod->init_text_size = 0;
+ /*
+ * We want to free module_init, but be aware that kallsyms may be
+ * walking this with preempt disabled. In all the failure paths,
+ * we call synchronize_rcu/synchronize_sched, but we don't want
+ * to slow down the success path, so use actual RCU here.
+ */
+ call_rcu(&freeinit->rcu, do_free_init);
mutex_unlock(&module_mutex);
wake_up_all(&module_wq);
return 0;
+
+fail_free_freeinit:
+ kfree(freeinit);
+fail:
+ /* Try to protect us from buggy refcounters. */
+ mod->state = MODULE_STATE_GOING;
+ synchronize_sched();
+ module_put(mod);
+ blocking_notifier_call_chain(&module_notify_list,
+ MODULE_STATE_GOING, mod);
+ free_module(mod);
+ wake_up_all(&module_wq);
+ return ret;
}
static int may_init_module(void)
diff --git a/kernel/params.c b/kernel/params.c
index 0af9b2c4e56c..728e05b167de 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -642,12 +642,15 @@ static __modinit int add_sysfs_param(struct module_kobject *mk,
mk->mp->grp.attrs = new_attrs;
/* Tack new one on the end. */
+ memset(&mk->mp->attrs[mk->mp->num], 0, sizeof(mk->mp->attrs[0]));
sysfs_attr_init(&mk->mp->attrs[mk->mp->num].mattr.attr);
mk->mp->attrs[mk->mp->num].param = kp;
mk->mp->attrs[mk->mp->num].mattr.show = param_attr_show;
/* Do not allow runtime DAC changes to make param writable. */
if ((kp->perm & (S_IWUSR | S_IWGRP | S_IWOTH)) != 0)
mk->mp->attrs[mk->mp->num].mattr.store = param_attr_store;
+ else
+ mk->mp->attrs[mk->mp->num].mattr.store = NULL;
mk->mp->attrs[mk->mp->num].mattr.attr.name = (char *)name;
mk->mp->attrs[mk->mp->num].mattr.attr.mode = kp->perm;
mk->mp->num++;
diff --git a/kernel/range.c b/kernel/range.c
index 322ea8e93e4b..82cfc285b046 100644
--- a/kernel/range.c
+++ b/kernel/range.c
@@ -113,12 +113,12 @@ static int cmp_range(const void *x1, const void *x2)
{
const struct range *r1 = x1;
const struct range *r2 = x2;
- s64 start1, start2;
- start1 = r1->start;
- start2 = r2->start;
-
- return start1 - start2;
+ if (r1->start < r2->start)
+ return -1;
+ if (r1->start > r2->start)
+ return 1;
+ return 0;
}
int clean_sort_range(struct range *range, int az)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index b5797b78add6..e628cb11b560 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7113,9 +7113,6 @@ void __init sched_init(void)
#ifdef CONFIG_RT_GROUP_SCHED
alloc_size += 2 * nr_cpu_ids * sizeof(void **);
#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
- alloc_size += num_possible_cpus() * cpumask_size();
-#endif
if (alloc_size) {
ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);
@@ -7135,13 +7132,13 @@ void __init sched_init(void)
ptr += nr_cpu_ids * sizeof(void **);
#endif /* CONFIG_RT_GROUP_SCHED */
+ }
#ifdef CONFIG_CPUMASK_OFFSTACK
- for_each_possible_cpu(i) {
- per_cpu(load_balance_mask, i) = (void *)ptr;
- ptr += cpumask_size();
- }
-#endif /* CONFIG_CPUMASK_OFFSTACK */
+ for_each_possible_cpu(i) {
+ per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node(
+ cpumask_size(), GFP_KERNEL, cpu_to_node(i));
}
+#endif /* CONFIG_CPUMASK_OFFSTACK */
init_rt_bandwidth(&def_rt_bandwidth,
global_rt_period(), global_rt_runtime());
@@ -7295,13 +7292,12 @@ void __might_sleep(const char *file, int line, int preempt_offset)
* since we will exit with TASK_RUNNING make sure we enter with it,
* otherwise we will destroy state.
*/
- if (WARN_ONCE(current->state != TASK_RUNNING,
+ WARN_ONCE(current->state != TASK_RUNNING && current->task_state_change,
"do not call blocking ops when !TASK_RUNNING; "
"state=%lx set at [<%p>] %pS\n",
current->state,
(void *)current->task_state_change,
- (void *)current->task_state_change))
- __set_current_state(TASK_RUNNING);
+ (void *)current->task_state_change);
___might_sleep(file, line, preempt_offset);
}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index e5db8c6feebd..b52092f2636d 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -570,24 +570,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
static
int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
{
- int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq));
- int rorun = dl_se->runtime <= 0;
-
- if (!rorun && !dmiss)
- return 0;
-
- /*
- * If we are beyond our current deadline and we are still
- * executing, then we have already used some of the runtime of
- * the next instance. Thus, if we do not account that, we are
- * stealing bandwidth from the system at each deadline miss!
- */
- if (dmiss) {
- dl_se->runtime = rorun ? dl_se->runtime : 0;
- dl_se->runtime -= rq_clock(rq) - dl_se->deadline;
- }
-
- return 1;
+ return (dl_se->runtime <= 0);
}
extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq);
@@ -826,10 +809,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se,
* parameters of the task might need updating. Otherwise,
* we want a replenishment of its runtime.
*/
- if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH)
- replenish_dl_entity(dl_se, pi_se);
- else
+ if (dl_se->dl_new || flags & ENQUEUE_WAKEUP)
update_dl_entity(dl_se, pi_se);
+ else if (flags & ENQUEUE_REPLENISH)
+ replenish_dl_entity(dl_se, pi_se);
__enqueue_dl_entity(dl_se);
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index df2cdf77f899..40667cbf371b 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4005,6 +4005,10 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
{
+ /* init_cfs_bandwidth() was not called */
+ if (!cfs_b->throttled_cfs_rq.next)
+ return;
+
hrtimer_cancel(&cfs_b->period_timer);
hrtimer_cancel(&cfs_b->slack_timer);
}
@@ -4424,7 +4428,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg)
* wl = S * s'_i; see (2)
*/
if (W > 0 && w < W)
- wl = (w * tg->shares) / W;
+ wl = (w * (long)tg->shares) / W;
else
wl = tg->shares;
diff --git a/kernel/sys.c b/kernel/sys.c
index a8c9f5a7dda6..ea9c88109894 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -2210,9 +2210,13 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
up_write(&me->mm->mmap_sem);
break;
case PR_MPX_ENABLE_MANAGEMENT:
+ if (arg2 || arg3 || arg4 || arg5)
+ return -EINVAL;
error = MPX_ENABLE_MANAGEMENT(me);
break;
case PR_MPX_DISABLE_MANAGEMENT:
+ if (arg2 || arg3 || arg4 || arg5)
+ return -EINVAL;
error = MPX_DISABLE_MANAGEMENT(me);
break;
default:
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 87a346fd6d61..28bf91c60a0b 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -633,6 +633,13 @@ int ntp_validate_timex(struct timex *txc)
if ((txc->modes & ADJ_SETOFFSET) && (!capable(CAP_SYS_TIME)))
return -EPERM;
+ if (txc->modes & ADJ_FREQUENCY) {
+ if (LONG_MIN / PPM_SCALE > txc->freq)
+ return -EINVAL;
+ if (LONG_MAX / PPM_SCALE < txc->freq)
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 6390517e77d4..2c85b7724af4 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -196,6 +196,10 @@ SYSCALL_DEFINE2(settimeofday, struct timeval __user *, tv,
if (tv) {
if (copy_from_user(&user_tv, tv, sizeof(*tv)))
return -EFAULT;
+
+ if (!timeval_valid(&user_tv))
+ return -EINVAL;
+
new_ts.tv_sec = user_tv.tv_sec;
new_ts.tv_nsec = user_tv.tv_usec * NSEC_PER_USEC;
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 929a733d302e..224e768bdc73 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2497,12 +2497,14 @@ static void ftrace_run_update_code(int command)
}
static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
- struct ftrace_hash *old_hash)
+ struct ftrace_ops_hash *old_hash)
{
ops->flags |= FTRACE_OPS_FL_MODIFYING;
- ops->old_hash.filter_hash = old_hash;
+ ops->old_hash.filter_hash = old_hash->filter_hash;
+ ops->old_hash.notrace_hash = old_hash->notrace_hash;
ftrace_run_update_code(command);
ops->old_hash.filter_hash = NULL;
+ ops->old_hash.notrace_hash = NULL;
ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
}
@@ -3579,7 +3581,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
static int ftrace_probe_registered;
-static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash)
+static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash)
{
int ret;
int i;
@@ -3637,6 +3639,7 @@ int
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data)
{
+ struct ftrace_ops_hash old_hash_ops;
struct ftrace_func_probe *entry;
struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
struct ftrace_hash *old_hash = *orig_hash;
@@ -3658,6 +3661,10 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
mutex_lock(&trace_probe_ops.func_hash->regex_lock);
+ old_hash_ops.filter_hash = old_hash;
+ /* Probes only have filters */
+ old_hash_ops.notrace_hash = NULL;
+
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
if (!hash) {
count = -ENOMEM;
@@ -3718,7 +3725,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
- __enable_ftrace_function_probe(old_hash);
+ __enable_ftrace_function_probe(&old_hash_ops);
if (!ret)
free_ftrace_hash_rcu(old_hash);
@@ -4006,10 +4013,34 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
}
static void ftrace_ops_update_code(struct ftrace_ops *ops,
- struct ftrace_hash *old_hash)
+ struct ftrace_ops_hash *old_hash)
{
- if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
+ struct ftrace_ops *op;
+
+ if (!ftrace_enabled)
+ return;
+
+ if (ops->flags & FTRACE_OPS_FL_ENABLED) {
ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
+ return;
+ }
+
+ /*
+ * If this is the shared global_ops filter, then we need to
+ * check if there is another ops that shares it, is enabled.
+ * If so, we still need to run the modify code.
+ */
+ if (ops->func_hash != &global_ops.local_hash)
+ return;
+
+ do_for_each_ftrace_op(op, ftrace_ops_list) {
+ if (op->func_hash == &global_ops.local_hash &&
+ op->flags & FTRACE_OPS_FL_ENABLED) {
+ ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash);
+ /* Only need to do this once */
+ return;
+ }
+ } while_for_each_ftrace_op(op);
}
static int
@@ -4017,6 +4048,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
unsigned long ip, int remove, int reset, int enable)
{
struct ftrace_hash **orig_hash;
+ struct ftrace_ops_hash old_hash_ops;
struct ftrace_hash *old_hash;
struct ftrace_hash *hash;
int ret;
@@ -4053,9 +4085,11 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
mutex_lock(&ftrace_lock);
old_hash = *orig_hash;
+ old_hash_ops.filter_hash = ops->func_hash->filter_hash;
+ old_hash_ops.notrace_hash = ops->func_hash->notrace_hash;
ret = ftrace_hash_move(ops, enable, orig_hash, hash);
if (!ret) {
- ftrace_ops_update_code(ops, old_hash);
+ ftrace_ops_update_code(ops, &old_hash_ops);
free_ftrace_hash_rcu(old_hash);
}
mutex_unlock(&ftrace_lock);
@@ -4267,6 +4301,7 @@ static void __init set_ftrace_early_filters(void)
int ftrace_regex_release(struct inode *inode, struct file *file)
{
struct seq_file *m = (struct seq_file *)file->private_data;
+ struct ftrace_ops_hash old_hash_ops;
struct ftrace_iterator *iter;
struct ftrace_hash **orig_hash;
struct ftrace_hash *old_hash;
@@ -4300,10 +4335,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
mutex_lock(&ftrace_lock);
old_hash = *orig_hash;
+ old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash;
+ old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash;
ret = ftrace_hash_move(iter->ops, filter_hash,
orig_hash, iter->hash);
if (!ret) {
- ftrace_ops_update_code(iter->ops, old_hash);
+ ftrace_ops_update_code(iter->ops, &old_hash_ops);
free_ftrace_hash_rcu(old_hash);
}
mutex_unlock(&ftrace_lock);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 2e767972e99c..4a9079b9f082 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -6918,7 +6918,6 @@ void __init trace_init(void)
tracepoint_printk = 0;
}
tracer_alloc_buffers();
- init_ftrace_syscalls();
trace_event_init();
}
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 366a78a3e61e..b03a0ea77b99 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -2429,12 +2429,39 @@ static __init int event_trace_memsetup(void)
return 0;
}
+static __init void
+early_enable_events(struct trace_array *tr, bool disable_first)
+{
+ char *buf = bootup_event_buf;
+ char *token;
+ int ret;
+
+ while (true) {
+ token = strsep(&buf, ",");
+
+ if (!token)
+ break;
+ if (!*token)
+ continue;
+
+ /* Restarting syscalls requires that we stop them first */
+ if (disable_first)
+ ftrace_set_clr_event(tr, token, 0);
+
+ ret = ftrace_set_clr_event(tr, token, 1);
+ if (ret)
+ pr_warn("Failed to enable trace event: %s\n", token);
+
+ /* Put back the comma to allow this to be called again */
+ if (buf)
+ *(buf - 1) = ',';
+ }
+}
+
static __init int event_trace_enable(void)
{
struct trace_array *tr = top_trace_array();
struct ftrace_event_call **iter, *call;
- char *buf = bootup_event_buf;
- char *token;
int ret;
if (!tr)
@@ -2456,18 +2483,7 @@ static __init int event_trace_enable(void)
*/
__trace_early_add_events(tr);
- while (true) {
- token = strsep(&buf, ",");
-
- if (!token)
- break;
- if (!*token)
- continue;
-
- ret = ftrace_set_clr_event(tr, token, 1);
- if (ret)
- pr_warn("Failed to enable trace event: %s\n", token);
- }
+ early_enable_events(tr, false);
trace_printk_start_comm();
@@ -2478,6 +2494,31 @@ static __init int event_trace_enable(void)
return 0;
}
+/*
+ * event_trace_enable() is called from trace_event_init() first to
+ * initialize events and perhaps start any events that are on the
+ * command line. Unfortunately, there are some events that will not
+ * start this early, like the system call tracepoints that need
+ * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
+ * is called before pid 1 starts, and this flag is never set, making
+ * the syscall tracepoint never get reached, but the event is enabled
+ * regardless (and not doing anything).
+ */
+static __init int event_trace_enable_again(void)
+{
+ struct trace_array *tr;
+
+ tr = top_trace_array();
+ if (!tr)
+ return -ENODEV;
+
+ early_enable_events(tr, true);
+
+ return 0;
+}
+
+early_initcall(event_trace_enable_again);
+
static __init int event_trace_init(void)
{
struct trace_array *tr;
diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c
index b0b1c44e923a..3ccf5c2c1320 100644
--- a/kernel/trace/trace_kdb.c
+++ b/kernel/trace/trace_kdb.c
@@ -132,8 +132,8 @@ static int kdb_ftdump(int argc, const char **argv)
static __init int kdb_ftrace_register(void)
{
- kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
- "Dump ftrace log", 0, KDB_REPEAT_NONE);
+ kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]",
+ "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE);
return 0;
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 6202b08f1933..beeeac9e0e3e 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1841,17 +1841,11 @@ static void pool_mayday_timeout(unsigned long __pool)
* spin_lock_irq(pool->lock) which may be released and regrabbed
* multiple times. Does GFP_KERNEL allocations. Called only from
* manager.
- *
- * Return:
- * %false if no action was taken and pool->lock stayed locked, %true
- * otherwise.
*/
-static bool maybe_create_worker(struct worker_pool *pool)
+static void maybe_create_worker(struct worker_pool *pool)
__releases(&pool->lock)
__acquires(&pool->lock)
{
- if (!need_to_create_worker(pool))
- return false;
restart:
spin_unlock_irq(&pool->lock);
@@ -1877,7 +1871,6 @@ restart:
*/
if (need_to_create_worker(pool))
goto restart;
- return true;
}
/**
@@ -1897,16 +1890,14 @@ restart:
* multiple times. Does GFP_KERNEL allocations.
*
* Return:
- * %false if the pool don't need management and the caller can safely start
- * processing works, %true indicates that the function released pool->lock
- * and reacquired it to perform some management function and that the
- * conditions that the caller verified while holding the lock before
- * calling the function might no longer be true.
+ * %false if the pool doesn't need management and the caller can safely
+ * start processing works, %true if management function was performed and
+ * the conditions that the caller verified before calling the function may
+ * no longer be true.
*/
static bool manage_workers(struct worker *worker)
{
struct worker_pool *pool = worker->pool;
- bool ret = false;
/*
* Anyone who successfully grabs manager_arb wins the arbitration
@@ -1919,12 +1910,12 @@ static bool manage_workers(struct worker *worker)
* actual management, the pool may stall indefinitely.
*/
if (!mutex_trylock(&pool->manager_arb))
- return ret;
+ return false;
- ret |= maybe_create_worker(pool);
+ maybe_create_worker(pool);
mutex_unlock(&pool->manager_arb);
- return ret;
+ return true;
}
/**
diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb
index 358eb81fa28d..c635a107a7de 100644
--- a/lib/Kconfig.kgdb
+++ b/lib/Kconfig.kgdb
@@ -73,6 +73,31 @@ config KGDB_KDB
help
KDB frontend for kernel
+config KDB_DEFAULT_ENABLE
+ hex "KDB: Select kdb command functions to be enabled by default"
+ depends on KGDB_KDB
+ default 0x1
+ help
+ Specifiers which kdb commands are enabled by default. This may
+ be set to 1 or 0 to enable all commands or disable almost all
+ commands.
+
+ Alternatively the following bitmask applies:
+
+ 0x0002 - allow arbitrary reads from memory and symbol lookup
+ 0x0004 - allow arbitrary writes to memory
+ 0x0008 - allow current register state to be inspected
+ 0x0010 - allow current register state to be modified
+ 0x0020 - allow passive inspection (backtrace, process list, lsmod)
+ 0x0040 - allow flow control management (breakpoint, single step)
+ 0x0080 - enable signalling of processes
+ 0x0100 - allow machine to be rebooted
+
+ The config option merely sets the default at boot time. Both
+ issuing 'echo X > /sys/module/kdb/parameters/cmd_enable' or
+ setting with kdb.cmd_enable=X kernel command line option will
+ override the default settings.
+
config KDB_KEYBOARD
bool "KGDB_KDB: keyboard as input device"
depends on VT && KGDB_KDB
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 2404d03e251a..03dd576e6773 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -11,6 +11,7 @@
* 2 of the Licence, or (at your option) any later version.
*/
//#define DEBUG
+#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/assoc_array_priv.h>
diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug
index 56badfc4810a..957d3da53ddd 100644
--- a/mm/Kconfig.debug
+++ b/mm/Kconfig.debug
@@ -14,7 +14,6 @@ config DEBUG_PAGEALLOC
depends on !KMEMCHECK
select PAGE_EXTENSION
select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
- select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC
---help---
Unmap pages from the kernel linear mapping after free_pages().
This results in a large slowdown, but helps to find certain types
@@ -27,13 +26,5 @@ config DEBUG_PAGEALLOC
that would result in incorrect warnings of memory corruption after
a resume because free pages are not saved to the suspend image.
-config WANT_PAGE_DEBUG_FLAGS
- bool
-
config PAGE_POISONING
bool
- select WANT_PAGE_DEBUG_FLAGS
-
-config PAGE_GUARD
- bool
- select WANT_PAGE_DEBUG_FLAGS
diff --git a/mm/filemap.c b/mm/filemap.c
index bd8543c6508f..673e4581a2e5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1046,8 +1046,7 @@ EXPORT_SYMBOL(find_lock_entry);
* @mapping: the address_space to search
* @offset: the page index
* @fgp_flags: PCG flags
- * @cache_gfp_mask: gfp mask to use for the page cache data page allocation
- * @radix_gfp_mask: gfp mask to use for radix tree node allocation
+ * @gfp_mask: gfp mask to use for the page cache data page allocation
*
* Looks up the page cache slot at @mapping & @offset.
*
@@ -1056,11 +1055,9 @@ EXPORT_SYMBOL(find_lock_entry);
* FGP_ACCESSED: the page will be marked accessed
* FGP_LOCK: Page is return locked
* FGP_CREAT: If page is not present then a new page is allocated using
- * @cache_gfp_mask and added to the page cache and the VM's LRU
- * list. If radix tree nodes are allocated during page cache
- * insertion then @radix_gfp_mask is used. The page is returned
- * locked and with an increased refcount. Otherwise, %NULL is
- * returned.
+ * @gfp_mask and added to the page cache and the VM's LRU
+ * list. The page is returned locked and with an increased
+ * refcount. Otherwise, %NULL is returned.
*
* If FGP_LOCK or FGP_CREAT are specified then the function may sleep even
* if the GFP flags specified for FGP_CREAT are atomic.
@@ -1068,7 +1065,7 @@ EXPORT_SYMBOL(find_lock_entry);
* If there is a page cache page, it is returned with an increased refcount.
*/
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t offset,
- int fgp_flags, gfp_t cache_gfp_mask, gfp_t radix_gfp_mask)
+ int fgp_flags, gfp_t gfp_mask)
{
struct page *page;
@@ -1105,13 +1102,11 @@ no_page:
if (!page && (fgp_flags & FGP_CREAT)) {
int err;
if ((fgp_flags & FGP_WRITE) && mapping_cap_account_dirty(mapping))
- cache_gfp_mask |= __GFP_WRITE;
- if (fgp_flags & FGP_NOFS) {
- cache_gfp_mask &= ~__GFP_FS;
- radix_gfp_mask &= ~__GFP_FS;
- }
+ gfp_mask |= __GFP_WRITE;
+ if (fgp_flags & FGP_NOFS)
+ gfp_mask &= ~__GFP_FS;
- page = __page_cache_alloc(cache_gfp_mask);
+ page = __page_cache_alloc(gfp_mask);
if (!page)
return NULL;
@@ -1122,7 +1117,8 @@ no_page:
if (fgp_flags & FGP_ACCESSED)
__SetPageReferenced(page);
- err = add_to_page_cache_lru(page, mapping, offset, radix_gfp_mask);
+ err = add_to_page_cache_lru(page, mapping, offset,
+ gfp_mask & GFP_RECLAIM_MASK);
if (unlikely(err)) {
page_cache_release(page);
page = NULL;
@@ -2443,8 +2439,7 @@ struct page *grab_cache_page_write_begin(struct address_space *mapping,
fgp_flags |= FGP_NOFS;
page = pagecache_get_page(mapping, index, fgp_flags,
- mapping_gfp_mask(mapping),
- GFP_KERNEL);
+ mapping_gfp_mask(mapping));
if (page)
wait_for_stable_page(page);
diff --git a/mm/gup.c b/mm/gup.c
index a900759cc807..8dd50ce6326f 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -296,7 +296,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
return -ENOMEM;
if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
return *flags & FOLL_HWPOISON ? -EHWPOISON : -EFAULT;
- if (ret & VM_FAULT_SIGBUS)
+ if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
return -EFAULT;
BUG();
}
@@ -571,7 +571,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
return -ENOMEM;
if (ret & (VM_FAULT_HWPOISON | VM_FAULT_HWPOISON_LARGE))
return -EHWPOISON;
- if (ret & VM_FAULT_SIGBUS)
+ if (ret & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV))
return -EFAULT;
BUG();
}
diff --git a/mm/ksm.c b/mm/ksm.c
index d247efab5073..15647fb0394f 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -376,7 +376,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
else
ret = VM_FAULT_WRITE;
put_page(page);
- } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM)));
+ } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV | VM_FAULT_OOM)));
/*
* We must loop because handle_mm_fault() may back out if there's
* any difficulty e.g. if pte accessed bit gets updated concurrently.
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ef91e856c7e4..683b4782019b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1477,9 +1477,9 @@ void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
pr_info("Task in ");
pr_cont_cgroup_path(task_cgroup(p, memory_cgrp_id));
- pr_info(" killed as a result of limit of ");
+ pr_cont(" killed as a result of limit of ");
pr_cont_cgroup_path(memcg->css.cgroup);
- pr_info("\n");
+ pr_cont("\n");
rcu_read_unlock();
@@ -3043,18 +3043,6 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) {
mem_cgroup_swap_statistics(from, false);
mem_cgroup_swap_statistics(to, true);
- /*
- * This function is only called from task migration context now.
- * It postpones page_counter and refcount handling till the end
- * of task migration(mem_cgroup_clear_mc()) for performance
- * improvement. But we cannot postpone css_get(to) because if
- * the process that has been moved to @to does swap-in, the
- * refcount of @to might be decreased to 0.
- *
- * We are in attach() phase, so the cgroup is guaranteed to be
- * alive, so we can just call css_get().
- */
- css_get(&to->css);
return 0;
}
return -EINVAL;
@@ -4679,6 +4667,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
if (parent_css == NULL) {
root_mem_cgroup = memcg;
page_counter_init(&memcg->memory, NULL);
+ memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, NULL);
page_counter_init(&memcg->kmem, NULL);
}
@@ -4724,6 +4713,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
if (parent->use_hierarchy) {
page_counter_init(&memcg->memory, &parent->memory);
+ memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, &parent->memsw);
page_counter_init(&memcg->kmem, &parent->kmem);
@@ -4733,6 +4723,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css)
*/
} else {
page_counter_init(&memcg->memory, NULL);
+ memcg->soft_limit = PAGE_COUNTER_MAX;
page_counter_init(&memcg->memsw, NULL);
page_counter_init(&memcg->kmem, NULL);
/*
@@ -4807,7 +4798,7 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css)
mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX);
mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX);
memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX);
- memcg->soft_limit = 0;
+ memcg->soft_limit = PAGE_COUNTER_MAX;
}
#ifdef CONFIG_MMU
diff --git a/mm/memory.c b/mm/memory.c
index 649e7d440bd7..2c3536cc6c63 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -235,6 +235,9 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long
static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
+ if (!tlb->end)
+ return;
+
tlb_flush(tlb);
mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
#ifdef CONFIG_HAVE_RCU_TABLE_FREE
@@ -247,7 +250,7 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
{
struct mmu_gather_batch *batch;
- for (batch = &tlb->local; batch; batch = batch->next) {
+ for (batch = &tlb->local; batch && batch->nr; batch = batch->next) {
free_pages_and_swap_cache(batch->pages, batch->nr);
batch->nr = 0;
}
@@ -256,9 +259,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb)
void tlb_flush_mmu(struct mmu_gather *tlb)
{
- if (!tlb->end)
- return;
-
tlb_flush_mmu_tlbonly(tlb);
tlb_flush_mmu_free(tlb);
}
@@ -2137,17 +2137,24 @@ reuse:
if (!dirty_page)
return ret;
- /*
- * Yes, Virginia, this is actually required to prevent a race
- * with clear_page_dirty_for_io() from clearing the page dirty
- * bit after it clear all dirty ptes, but before a racing
- * do_wp_page installs a dirty pte.
- *
- * do_shared_fault is protected similarly.
- */
if (!page_mkwrite) {
- wait_on_page_locked(dirty_page);
- set_page_dirty_balance(dirty_page);
+ struct address_space *mapping;
+ int dirtied;
+
+ lock_page(dirty_page);
+ dirtied = set_page_dirty(dirty_page);
+ VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page);
+ mapping = dirty_page->mapping;
+ unlock_page(dirty_page);
+
+ if (dirtied && mapping) {
+ /*
+ * Some device drivers do not set page.mapping
+ * but still dirty their pages
+ */
+ balance_dirty_pages_ratelimited(mapping);
+ }
+
/* file_update_time outside page_lock */
if (vma->vm_file)
file_update_time(vma->vm_file);
@@ -2378,12 +2385,12 @@ void unmap_mapping_range(struct address_space *mapping,
details.last_index = ULONG_MAX;
- i_mmap_lock_read(mapping);
+ i_mmap_lock_write(mapping);
if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
unmap_mapping_range_tree(&mapping->i_mmap, &details);
if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
- i_mmap_unlock_read(mapping);
+ i_mmap_unlock_write(mapping);
}
EXPORT_SYMBOL(unmap_mapping_range);
@@ -2593,7 +2600,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
if (prev && prev->vm_end == address)
return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
- expand_downwards(vma, address - PAGE_SIZE);
+ return expand_downwards(vma, address - PAGE_SIZE);
}
if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
struct vm_area_struct *next = vma->vm_next;
@@ -2602,7 +2609,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo
if (next && next->vm_start == address + PAGE_SIZE)
return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
- expand_upwards(vma, address + PAGE_SIZE);
+ return expand_upwards(vma, address + PAGE_SIZE);
}
return 0;
}
@@ -2625,7 +2632,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* Check if we need to add a guard page to the stack */
if (check_stack_guard_page(vma, address) < 0)
- return VM_FAULT_SIGBUS;
+ return VM_FAULT_SIGSEGV;
/* Use the zero-page for reads */
if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
diff --git a/mm/mmap.c b/mm/mmap.c
index 7b36aa7cc89a..7f684d5a8087 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -778,10 +778,12 @@ again: remove_next = 1 + (end > next->vm_end);
if (exporter && exporter->anon_vma && !importer->anon_vma) {
int error;
+ importer->anon_vma = exporter->anon_vma;
error = anon_vma_clone(importer, exporter);
- if (error)
+ if (error) {
+ importer->anon_vma = NULL;
return error;
- importer->anon_vma = exporter->anon_vma;
+ }
}
}
@@ -2099,14 +2101,17 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
{
struct mm_struct *mm = vma->vm_mm;
struct rlimit *rlim = current->signal->rlim;
- unsigned long new_start;
+ unsigned long new_start, actual_size;
/* address space limit tests */
if (!may_expand_vm(mm, grow))
return -ENOMEM;
/* Stack limit test */
- if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ actual_size = size;
+ if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN)))
+ actual_size -= PAGE_SIZE;
+ if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
return -ENOMEM;
/* mlock limit tests */
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index d5d81f5384d1..6f4335238e33 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -1541,16 +1541,6 @@ pause:
bdi_start_background_writeback(bdi);
}
-void set_page_dirty_balance(struct page *page)
-{
- if (set_page_dirty(page)) {
- struct address_space *mapping = page_mapping(page);
-
- if (mapping)
- balance_dirty_pages_ratelimited(mapping);
- }
-}
-
static DEFINE_PER_CPU(int, bdp_ratelimits);
/*
@@ -2123,32 +2113,25 @@ EXPORT_SYMBOL(account_page_dirtied);
* page dirty in that case, but not all the buffers. This is a "bottom-up"
* dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying.
*
- * Most callers have locked the page, which pins the address_space in memory.
- * But zap_pte_range() does not lock the page, however in that case the
- * mapping is pinned by the vma's ->vm_file reference.
- *
- * We take care to handle the case where the page was truncated from the
- * mapping by re-checking page_mapping() inside tree_lock.
+ * The caller must ensure this doesn't race with truncation. Most will simply
+ * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and
+ * the pte lock held, which also locks out truncation.
*/
int __set_page_dirty_nobuffers(struct page *page)
{
if (!TestSetPageDirty(page)) {
struct address_space *mapping = page_mapping(page);
- struct address_space *mapping2;
unsigned long flags;
if (!mapping)
return 1;
spin_lock_irqsave(&mapping->tree_lock, flags);
- mapping2 = page_mapping(page);
- if (mapping2) { /* Race with truncate? */
- BUG_ON(mapping2 != mapping);
- WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
- account_page_dirtied(page, mapping);
- radix_tree_tag_set(&mapping->page_tree,
- page_index(page), PAGECACHE_TAG_DIRTY);
- }
+ BUG_ON(page_mapping(page) != mapping);
+ WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page));
+ account_page_dirtied(page, mapping);
+ radix_tree_tag_set(&mapping->page_tree, page_index(page),
+ PAGECACHE_TAG_DIRTY);
spin_unlock_irqrestore(&mapping->tree_lock, flags);
if (mapping->host) {
/* !PageAnon && !swapper_space */
@@ -2305,12 +2288,10 @@ int clear_page_dirty_for_io(struct page *page)
/*
* We carefully synchronise fault handlers against
* installing a dirty pte and marking the page dirty
- * at this point. We do this by having them hold the
- * page lock at some point after installing their
- * pte, but before marking the page dirty.
- * Pages are always locked coming in here, so we get
- * the desired exclusion. See mm/memory.c:do_wp_page()
- * for more comments.
+ * at this point. We do this by having them hold the
+ * page lock while dirtying the page, and pages are
+ * always locked coming in here, so we get the desired
+ * exclusion.
*/
if (TestClearPageDirty(page)) {
dec_zone_page_state(page, NR_FILE_DIRTY);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7633c503a116..8e20f9c2fa5a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2332,12 +2332,21 @@ static inline struct page *
__alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, struct zone *preferred_zone,
- int classzone_idx, int migratetype)
+ int classzone_idx, int migratetype, unsigned long *did_some_progress)
{
struct page *page;
- /* Acquire the per-zone oom lock for each zone */
+ *did_some_progress = 0;
+
+ if (oom_killer_disabled)
+ return NULL;
+
+ /*
+ * Acquire the per-zone oom lock for each zone. If that
+ * fails, somebody else is making progress for us.
+ */
if (!oom_zonelist_trylock(zonelist, gfp_mask)) {
+ *did_some_progress = 1;
schedule_timeout_uninterruptible(1);
return NULL;
}
@@ -2363,12 +2372,18 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
goto out;
if (!(gfp_mask & __GFP_NOFAIL)) {
+ /* Coredumps can quickly deplete all memory reserves */
+ if (current->flags & PF_DUMPCORE)
+ goto out;
/* The OOM killer will not help higher order allocs */
if (order > PAGE_ALLOC_COSTLY_ORDER)
goto out;
/* The OOM killer does not needlessly kill tasks for lowmem */
if (high_zoneidx < ZONE_NORMAL)
goto out;
+ /* The OOM killer does not compensate for light reclaim */
+ if (!(gfp_mask & __GFP_FS))
+ goto out;
/*
* GFP_THISNODE contains __GFP_NORETRY and we never hit this.
* Sanity check for bare calls of __GFP_THISNODE, not real OOM.
@@ -2381,7 +2396,7 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
}
/* Exhausted what can be done so it's blamo time */
out_of_memory(zonelist, gfp_mask, order, nodemask, false);
-
+ *did_some_progress = 1;
out:
oom_zonelist_unlock(zonelist, gfp_mask);
return page;
@@ -2658,7 +2673,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
(gfp_mask & GFP_THISNODE) == GFP_THISNODE)
goto nopage;
-restart:
+retry:
if (!(gfp_mask & __GFP_NO_KSWAPD))
wake_all_kswapds(order, zonelist, high_zoneidx,
preferred_zone, nodemask);
@@ -2681,7 +2696,6 @@ restart:
classzone_idx = zonelist_zone_idx(preferred_zoneref);
}
-rebalance:
/* This is the last chance, in general, before the goto nopage. */
page = get_page_from_freelist(gfp_mask, nodemask, order, zonelist,
high_zoneidx, alloc_flags & ~ALLOC_NO_WATERMARKS,
@@ -2788,54 +2802,28 @@ rebalance:
if (page)
goto got_pg;
- /*
- * If we failed to make any progress reclaiming, then we are
- * running out of options and have to consider going OOM
- */
- if (!did_some_progress) {
- if (oom_gfp_allowed(gfp_mask)) {
- if (oom_killer_disabled)
- goto nopage;
- /* Coredumps can quickly deplete all memory reserves */
- if ((current->flags & PF_DUMPCORE) &&
- !(gfp_mask & __GFP_NOFAIL))
- goto nopage;
- page = __alloc_pages_may_oom(gfp_mask, order,
- zonelist, high_zoneidx,
- nodemask, preferred_zone,
- classzone_idx, migratetype);
- if (page)
- goto got_pg;
-
- if (!(gfp_mask & __GFP_NOFAIL)) {
- /*
- * The oom killer is not called for high-order
- * allocations that may fail, so if no progress
- * is being made, there are no other options and
- * retrying is unlikely to help.
- */
- if (order > PAGE_ALLOC_COSTLY_ORDER)
- goto nopage;
- /*
- * The oom killer is not called for lowmem
- * allocations to prevent needlessly killing
- * innocent tasks.
- */
- if (high_zoneidx < ZONE_NORMAL)
- goto nopage;
- }
-
- goto restart;
- }
- }
-
/* Check if we should retry the allocation */
pages_reclaimed += did_some_progress;
if (should_alloc_retry(gfp_mask, order, did_some_progress,
pages_reclaimed)) {
+ /*
+ * If we fail to make progress by freeing individual
+ * pages, but the allocation wants us to keep going,
+ * start OOM killing tasks.
+ */
+ if (!did_some_progress) {
+ page = __alloc_pages_may_oom(gfp_mask, order, zonelist,
+ high_zoneidx, nodemask,
+ preferred_zone, classzone_idx,
+ migratetype,&did_some_progress);
+ if (page)
+ goto got_pg;
+ if (!did_some_progress)
+ goto nopage;
+ }
/* Wait for some write requests to complete then retry */
wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/50);
- goto rebalance;
+ goto retry;
} else {
/*
* High-order allocations do not necessarily loop after
diff --git a/mm/rmap.c b/mm/rmap.c
index c5bc241127b2..71cd5bd0c17d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -72,6 +72,8 @@ static inline struct anon_vma *anon_vma_alloc(void)
anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
if (anon_vma) {
atomic_set(&anon_vma->refcount, 1);
+ anon_vma->degree = 1; /* Reference for first vma */
+ anon_vma->parent = anon_vma;
/*
* Initialise the anon_vma root to point to itself. If called
* from fork, the root will be reset to the parents anon_vma.
@@ -188,6 +190,8 @@ int anon_vma_prepare(struct vm_area_struct *vma)
if (likely(!vma->anon_vma)) {
vma->anon_vma = anon_vma;
anon_vma_chain_link(vma, avc, anon_vma);
+ /* vma reference or self-parent link for new root */
+ anon_vma->degree++;
allocated = NULL;
avc = NULL;
}
@@ -236,6 +240,14 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
/*
* Attach the anon_vmas from src to dst.
* Returns 0 on success, -ENOMEM on failure.
+ *
+ * If dst->anon_vma is NULL this function tries to find and reuse existing
+ * anon_vma which has no vmas and only one child anon_vma. This prevents
+ * degradation of anon_vma hierarchy to endless linear chain in case of
+ * constantly forking task. On the other hand, an anon_vma with more than one
+ * child isn't reused even if there was no alive vma, thus rmap walker has a
+ * good chance of avoiding scanning the whole hierarchy when it searches where
+ * page is mapped.
*/
int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
{
@@ -256,7 +268,21 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
anon_vma = pavc->anon_vma;
root = lock_anon_vma_root(root, anon_vma);
anon_vma_chain_link(dst, avc, anon_vma);
+
+ /*
+ * Reuse existing anon_vma if its degree lower than two,
+ * that means it has no vma and only one anon_vma child.
+ *
+ * Do not chose parent anon_vma, otherwise first child
+ * will always reuse it. Root anon_vma is never reused:
+ * it has self-parent reference and at least one child.
+ */
+ if (!dst->anon_vma && anon_vma != src->anon_vma &&
+ anon_vma->degree < 2)
+ dst->anon_vma = anon_vma;
}
+ if (dst->anon_vma)
+ dst->anon_vma->degree++;
unlock_anon_vma_root(root);
return 0;
@@ -280,6 +306,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
if (!pvma->anon_vma)
return 0;
+ /* Drop inherited anon_vma, we'll reuse existing or allocate new. */
+ vma->anon_vma = NULL;
+
/*
* First, attach the new VMA to the parent VMA's anon_vmas,
* so rmap can find non-COWed pages in child processes.
@@ -288,6 +317,10 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
if (error)
return error;
+ /* An existing anon_vma has been reused, all done then. */
+ if (vma->anon_vma)
+ return 0;
+
/* Then add our own anon_vma. */
anon_vma = anon_vma_alloc();
if (!anon_vma)
@@ -301,6 +334,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
* lock any of the anon_vmas in this anon_vma tree.
*/
anon_vma->root = pvma->anon_vma->root;
+ anon_vma->parent = pvma->anon_vma;
/*
* With refcounts, an anon_vma can stay around longer than the
* process it belongs to. The root anon_vma needs to be pinned until
@@ -311,6 +345,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
vma->anon_vma = anon_vma;
anon_vma_lock_write(anon_vma);
anon_vma_chain_link(vma, avc, anon_vma);
+ anon_vma->parent->degree++;
anon_vma_unlock_write(anon_vma);
return 0;
@@ -341,12 +376,16 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
* Leave empty anon_vmas on the list - we'll need
* to free them outside the lock.
*/
- if (RB_EMPTY_ROOT(&anon_vma->rb_root))
+ if (RB_EMPTY_ROOT(&anon_vma->rb_root)) {
+ anon_vma->parent->degree--;
continue;
+ }
list_del(&avc->same_vma);
anon_vma_chain_free(avc);
}
+ if (vma->anon_vma)
+ vma->anon_vma->degree--;
unlock_anon_vma_root(root);
/*
@@ -357,6 +396,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma)
list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
struct anon_vma *anon_vma = avc->anon_vma;
+ BUG_ON(anon_vma->degree);
put_anon_vma(anon_vma);
list_del(&avc->same_vma);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index bd9a72bc4a1b..dcd90c891d8e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2656,7 +2656,7 @@ static bool throttle_direct_reclaim(gfp_t gfp_mask, struct zonelist *zonelist,
* should make reasonable progress.
*/
for_each_zone_zonelist_nodemask(zone, z, zonelist,
- gfp_mask, nodemask) {
+ gfp_zone(gfp_mask), nodemask) {
if (zone_idx(zone) > ZONE_NORMAL)
continue;
@@ -2921,18 +2921,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
return false;
/*
- * There is a potential race between when kswapd checks its watermarks
- * and a process gets throttled. There is also a potential race if
- * processes get throttled, kswapd wakes, a large process exits therby
- * balancing the zones that causes kswapd to miss a wakeup. If kswapd
- * is going to sleep, no process should be sleeping on pfmemalloc_wait
- * so wake them now if necessary. If necessary, processes will wake
- * kswapd and get throttled again
+ * The throttled processes are normally woken up in balance_pgdat() as
+ * soon as pfmemalloc_watermark_ok() is true. But there is a potential
+ * race between when kswapd checks the watermarks and a process gets
+ * throttled. There is also a potential race if processes get
+ * throttled, kswapd wakes, a large process exits thereby balancing the
+ * zones, which causes kswapd to exit balance_pgdat() before reaching
+ * the wake up checks. If kswapd is going to sleep, no process should
+ * be sleeping on pfmemalloc_wait, so wake them now if necessary. If
+ * the wake up is premature, processes will wake kswapd and get
+ * throttled again. The difference from wake ups in balance_pgdat() is
+ * that here we are under prepare_to_wait().
*/
- if (waitqueue_active(&pgdat->pfmemalloc_wait)) {
- wake_up(&pgdat->pfmemalloc_wait);
- return false;
- }
+ if (waitqueue_active(&pgdat->pfmemalloc_wait))
+ wake_up_all(&pgdat->pfmemalloc_wait);
return pgdat_balanced(pgdat, order, classzone_idx);
}
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index fc1835c6bb40..00f9e144cc97 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -251,7 +251,7 @@ batadv_frag_merge_packets(struct hlist_head *chain, struct sk_buff *skb)
kfree(entry);
/* Make room for the rest of the fragments. */
- if (pskb_expand_head(skb_out, 0, size - skb->len, GFP_ATOMIC) < 0) {
+ if (pskb_expand_head(skb_out, 0, size - skb_out->len, GFP_ATOMIC) < 0) {
kfree_skb(skb_out);
skb_out = NULL;
goto free;
@@ -434,7 +434,7 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
* fragments larger than BATADV_FRAG_MAX_FRAG_SIZE
*/
mtu = min_t(unsigned, mtu, BATADV_FRAG_MAX_FRAG_SIZE);
- max_fragment_size = (mtu - header_size - ETH_HLEN);
+ max_fragment_size = mtu - header_size;
max_packet_size = max_fragment_size * BATADV_FRAG_MAX_FRAGMENTS;
/* Don't even try to fragment, if we need more than 16 fragments */
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 90cff585b37d..e0bcf9e84273 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -810,7 +810,7 @@ bool batadv_gw_out_of_range(struct batadv_priv *bat_priv,
goto out;
gw_node = batadv_gw_node_get(bat_priv, orig_dst_node);
- if (!gw_node->bandwidth_down == 0)
+ if (!gw_node)
goto out;
switch (atomic_read(&bat_priv->gw_mode)) {
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index ab6bb2af1d45..b24e4bb64fb5 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -685,11 +685,13 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv,
if (orig_initialized)
atomic_dec(&bat_priv->mcast.num_disabled);
orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST;
- /* If mcast support is being switched off increase the disabled
- * mcast node counter.
+ /* If mcast support is being switched off or if this is an initial
+ * OGM without mcast support then increase the disabled mcast
+ * node counter.
*/
} else if (!orig_mcast_enabled &&
- orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) {
+ (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST ||
+ !orig_initialized)) {
atomic_inc(&bat_priv->mcast.num_disabled);
orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST;
}
@@ -738,7 +740,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig)
{
struct batadv_priv *bat_priv = orig->bat_priv;
- if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST))
+ if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) &&
+ orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST)
atomic_dec(&bat_priv->mcast.num_disabled);
batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS);
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 8d04d174669e..fab47f1f3ef9 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -133,7 +133,7 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv)
if (!bat_priv->nc.decoding_hash)
goto err;
- batadv_hash_set_lock_class(bat_priv->nc.coding_hash,
+ batadv_hash_set_lock_class(bat_priv->nc.decoding_hash,
&batadv_nc_decoding_hash_lock_class_key);
INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker);
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 6a484514cd3e..bea8198d0198 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -570,9 +570,6 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
batadv_frag_purge_orig(orig_node, NULL);
- batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1,
- "originator timed out");
-
if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
@@ -678,6 +675,7 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
atomic_set(&orig_node->last_ttvn, 0);
orig_node->tt_buff = NULL;
orig_node->tt_buff_len = 0;
+ orig_node->last_seen = jiffies;
reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
orig_node->bcast_seqno_reset = reset_time;
#ifdef CONFIG_BATMAN_ADV_MCAST
@@ -977,6 +975,9 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv)
if (batadv_purge_orig_node(bat_priv, orig_node)) {
batadv_gw_node_delete(bat_priv, orig_node);
hlist_del_rcu(&orig_node->hash_entry);
+ batadv_tt_global_del_orig(orig_node->bat_priv,
+ orig_node, -1,
+ "originator timed out");
batadv_orig_node_free_ref(orig_node);
continue;
}
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 35f76f2f7824..6648f321864d 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -443,11 +443,13 @@ batadv_find_router(struct batadv_priv *bat_priv,
router = batadv_orig_router_get(orig_node, recv_if);
+ if (!router)
+ return router;
+
/* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop)
* and if activated.
*/
- if (recv_if == BATADV_IF_DEFAULT || !atomic_read(&bat_priv->bonding) ||
- !router)
+ if (!(recv_if == BATADV_IF_DEFAULT && atomic_read(&bat_priv->bonding)))
return router;
/* bonding: loop through the list of possible routers found
diff --git a/net/bluetooth/6lowpan.c b/net/bluetooth/6lowpan.c
index 76617be1e797..c989253737f0 100644
--- a/net/bluetooth/6lowpan.c
+++ b/net/bluetooth/6lowpan.c
@@ -390,7 +390,6 @@ static int recv_pkt(struct sk_buff *skb, struct net_device *dev,
drop:
dev->stats.rx_dropped++;
- kfree_skb(skb);
return NET_RX_DROP;
}
diff --git a/net/bluetooth/bnep/core.c b/net/bluetooth/bnep/core.c
index 85bcc21e84d2..ce82722d049b 100644
--- a/net/bluetooth/bnep/core.c
+++ b/net/bluetooth/bnep/core.c
@@ -533,6 +533,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
BT_DBG("");
+ if (!l2cap_is_socket(sock))
+ return -EBADFD;
+
baswap((void *) dst, &l2cap_pi(sock->sk)->chan->dst);
baswap((void *) src, &l2cap_pi(sock->sk)->chan->src);
diff --git a/net/bluetooth/cmtp/core.c b/net/bluetooth/cmtp/core.c
index 67fe5e84e68f..278a194e6af4 100644
--- a/net/bluetooth/cmtp/core.c
+++ b/net/bluetooth/cmtp/core.c
@@ -334,6 +334,9 @@ int cmtp_add_connection(struct cmtp_connadd_req *req, struct socket *sock)
BT_DBG("");
+ if (!l2cap_is_socket(sock))
+ return -EBADFD;
+
session = kzalloc(sizeof(struct cmtp_session), GFP_KERNEL);
if (!session)
return -ENOMEM;
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 39a5c8a01726..3f2e8b830cbd 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -242,7 +242,8 @@ static void hci_cc_read_local_name(struct hci_dev *hdev, struct sk_buff *skb)
if (rp->status)
return;
- if (test_bit(HCI_SETUP, &hdev->dev_flags))
+ if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
+ test_bit(HCI_CONFIG, &hdev->dev_flags))
memcpy(hdev->dev_name, rp->name, HCI_MAX_NAME_LENGTH);
}
@@ -509,7 +510,8 @@ static void hci_cc_read_local_version(struct hci_dev *hdev, struct sk_buff *skb)
if (rp->status)
return;
- if (test_bit(HCI_SETUP, &hdev->dev_flags)) {
+ if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
+ test_bit(HCI_CONFIG, &hdev->dev_flags)) {
hdev->hci_ver = rp->hci_ver;
hdev->hci_rev = __le16_to_cpu(rp->hci_rev);
hdev->lmp_ver = rp->lmp_ver;
@@ -528,7 +530,8 @@ static void hci_cc_read_local_commands(struct hci_dev *hdev,
if (rp->status)
return;
- if (test_bit(HCI_SETUP, &hdev->dev_flags))
+ if (test_bit(HCI_SETUP, &hdev->dev_flags) ||
+ test_bit(HCI_CONFIG, &hdev->dev_flags))
memcpy(hdev->commands, rp->commands, sizeof(hdev->commands));
}
@@ -2194,7 +2197,12 @@ static void hci_conn_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
return;
}
- if (!test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
+ /* Require HCI_CONNECTABLE or a whitelist entry to accept the
+ * connection. These features are only touched through mgmt so
+ * only do the checks if HCI_MGMT is set.
+ */
+ if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
+ !test_bit(HCI_CONNECTABLE, &hdev->dev_flags) &&
!hci_bdaddr_list_lookup(&hdev->whitelist, &ev->bdaddr,
BDADDR_BREDR)) {
hci_reject_conn(hdev, &ev->bdaddr);
diff --git a/net/bluetooth/hidp/core.c b/net/bluetooth/hidp/core.c
index cc25d0b74b36..07348e142f16 100644
--- a/net/bluetooth/hidp/core.c
+++ b/net/bluetooth/hidp/core.c
@@ -1314,13 +1314,14 @@ int hidp_connection_add(struct hidp_connadd_req *req,
{
struct hidp_session *session;
struct l2cap_conn *conn;
- struct l2cap_chan *chan = l2cap_pi(ctrl_sock->sk)->chan;
+ struct l2cap_chan *chan;
int ret;
ret = hidp_verify_sockets(ctrl_sock, intr_sock);
if (ret)
return ret;
+ chan = l2cap_pi(ctrl_sock->sk)->chan;
conn = NULL;
l2cap_chan_lock(chan);
if (chan->conn)
diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c
index 1f1de715197c..e2aa7be3a847 100644
--- a/net/bridge/br_input.c
+++ b/net/bridge/br_input.c
@@ -154,7 +154,8 @@ int br_handle_frame_finish(struct sk_buff *skb)
dst = NULL;
if (is_broadcast_ether_addr(dest)) {
- if (p->flags & BR_PROXYARP &&
+ if (IS_ENABLED(CONFIG_INET) &&
+ p->flags & BR_PROXYARP &&
skb->protocol == htons(ETH_P_ARP))
br_do_proxy_arp(skb, br, vid);
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 15845814a0f2..ba6eb17226da 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -676,7 +676,7 @@ static int calcu_signature(struct ceph_x_authorizer *au,
int ret;
char tmp_enc[40];
__le32 tmp[5] = {
- 16u, msg->hdr.crc, msg->footer.front_crc,
+ cpu_to_le32(16), msg->hdr.crc, msg->footer.front_crc,
msg->footer.middle_crc, msg->footer.data_crc,
};
ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp),
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index a83062ceeec9..f2148e22b148 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -717,7 +717,7 @@ static int get_poolop_reply_buf(const char *src, size_t src_len,
if (src_len != sizeof(u32) + dst_len)
return -EINVAL;
- buf_len = le32_to_cpu(*(u32 *)src);
+ buf_len = le32_to_cpu(*(__le32 *)src);
if (buf_len != dst_len)
return -EINVAL;
diff --git a/net/core/dev.c b/net/core/dev.c
index f411c28d0a66..171420e75b03 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1694,6 +1694,7 @@ int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
skb_scrub_packet(skb, true);
skb->protocol = eth_type_trans(skb, dev);
+ skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
return 0;
}
@@ -2522,7 +2523,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
/* If MPLS offload request, verify we are testing hardware MPLS features
* instead of standard features for the netdev.
*/
-#ifdef CONFIG_NET_MPLS_GSO
+#if IS_ENABLED(CONFIG_NET_MPLS_GSO)
static netdev_features_t net_mpls_features(struct sk_buff *skb,
netdev_features_t features,
__be16 type)
@@ -2562,7 +2563,7 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
netdev_features_t netif_skb_features(struct sk_buff *skb)
{
- const struct net_device *dev = skb->dev;
+ struct net_device *dev = skb->dev;
netdev_features_t features = dev->features;
u16 gso_segs = skb_shinfo(skb)->gso_segs;
__be16 protocol = skb->protocol;
@@ -2570,11 +2571,21 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
features &= ~NETIF_F_GSO_MASK;
- if (protocol == htons(ETH_P_8021Q) || protocol == htons(ETH_P_8021AD)) {
- struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
- protocol = veh->h_vlan_encapsulated_proto;
- } else if (!vlan_tx_tag_present(skb)) {
- return harmonize_features(skb, features);
+ /* If encapsulation offload request, verify we are testing
+ * hardware encapsulation features instead of standard
+ * features for the netdev
+ */
+ if (skb->encapsulation)
+ features &= dev->hw_enc_features;
+
+ if (!vlan_tx_tag_present(skb)) {
+ if (unlikely(protocol == htons(ETH_P_8021Q) ||
+ protocol == htons(ETH_P_8021AD))) {
+ struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+ protocol = veh->h_vlan_encapsulated_proto;
+ } else {
+ goto finalize;
+ }
}
features = netdev_intersect_features(features,
@@ -2591,6 +2602,11 @@ netdev_features_t netif_skb_features(struct sk_buff *skb)
NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_HW_VLAN_STAG_TX);
+finalize:
+ if (dev->netdev_ops->ndo_features_check)
+ features &= dev->netdev_ops->ndo_features_check(skb, dev,
+ features);
+
return harmonize_features(skb, features);
}
EXPORT_SYMBOL(netif_skb_features);
@@ -2661,19 +2677,12 @@ static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device
if (unlikely(!skb))
goto out_null;
- /* If encapsulation offload request, verify we are testing
- * hardware encapsulation features instead of standard
- * features for the netdev
- */
- if (skb->encapsulation)
- features &= dev->hw_enc_features;
-
if (netif_needs_gso(dev, skb, features)) {
struct sk_buff *segs;
segs = skb_gso_segment(skb, features);
if (IS_ERR(segs)) {
- segs = NULL;
+ goto out_kfree_skb;
} else if (segs) {
consume_skb(skb);
skb = segs;
@@ -4557,6 +4566,68 @@ void netif_napi_del(struct napi_struct *napi)
}
EXPORT_SYMBOL(netif_napi_del);
+static int napi_poll(struct napi_struct *n, struct list_head *repoll)
+{
+ void *have;
+ int work, weight;
+
+ list_del_init(&n->poll_list);
+
+ have = netpoll_poll_lock(n);
+
+ weight = n->weight;
+
+ /* This NAPI_STATE_SCHED test is for avoiding a race
+ * with netpoll's poll_napi(). Only the entity which
+ * obtains the lock and sees NAPI_STATE_SCHED set will
+ * actually make the ->poll() call. Therefore we avoid
+ * accidentally calling ->poll() when NAPI is not scheduled.
+ */
+ work = 0;
+ if (test_bit(NAPI_STATE_SCHED, &n->state)) {
+ work = n->poll(n, weight);
+ trace_napi_poll(n);
+ }
+
+ WARN_ON_ONCE(work > weight);
+
+ if (likely(work < weight))
+ goto out_unlock;
+
+ /* Drivers must not modify the NAPI state if they
+ * consume the entire weight. In such cases this code
+ * still "owns" the NAPI instance and therefore can
+ * move the instance around on the list at-will.
+ */
+ if (unlikely(napi_disable_pending(n))) {
+ napi_complete(n);
+ goto out_unlock;
+ }
+
+ if (n->gro_list) {
+ /* flush too old packets
+ * If HZ < 1000, flush all packets.
+ */
+ napi_gro_flush(n, HZ >= 1000);
+ }
+
+ /* Some drivers may have called napi_schedule
+ * prior to exhausting their budget.
+ */
+ if (unlikely(!list_empty(&n->poll_list))) {
+ pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
+ n->dev ? n->dev->name : "backlog");
+ goto out_unlock;
+ }
+
+ list_add_tail(&n->poll_list, repoll);
+
+out_unlock:
+ netpoll_poll_unlock(have);
+
+ return work;
+}
+
static void net_rx_action(struct softirq_action *h)
{
struct softnet_data *sd = this_cpu_ptr(&softnet_data);
@@ -4564,74 +4635,34 @@ static void net_rx_action(struct softirq_action *h)
int budget = netdev_budget;
LIST_HEAD(list);
LIST_HEAD(repoll);
- void *have;
local_irq_disable();
list_splice_init(&sd->poll_list, &list);
local_irq_enable();
- while (!list_empty(&list)) {
+ for (;;) {
struct napi_struct *n;
- int work, weight;
-
- /* If softirq window is exhausted then punt.
- * Allow this to run for 2 jiffies since which will allow
- * an average latency of 1.5/HZ.
- */
- if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))
- goto softnet_break;
-
- n = list_first_entry(&list, struct napi_struct, poll_list);
- list_del_init(&n->poll_list);
-
- have = netpoll_poll_lock(n);
-
- weight = n->weight;
-
- /* This NAPI_STATE_SCHED test is for avoiding a race
- * with netpoll's poll_napi(). Only the entity which
- * obtains the lock and sees NAPI_STATE_SCHED set will
- * actually make the ->poll() call. Therefore we avoid
- * accidentally calling ->poll() when NAPI is not scheduled.
- */
- work = 0;
- if (test_bit(NAPI_STATE_SCHED, &n->state)) {
- work = n->poll(n, weight);
- trace_napi_poll(n);
+ if (list_empty(&list)) {
+ if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
+ return;
+ break;
}
- WARN_ON_ONCE(work > weight);
-
- budget -= work;
+ n = list_first_entry(&list, struct napi_struct, poll_list);
+ budget -= napi_poll(n, &repoll);
- /* Drivers must not modify the NAPI state if they
- * consume the entire weight. In such cases this code
- * still "owns" the NAPI instance and therefore can
- * move the instance around on the list at-will.
+ /* If softirq window is exhausted then punt.
+ * Allow this to run for 2 jiffies since which will allow
+ * an average latency of 1.5/HZ.
*/
- if (unlikely(work == weight)) {
- if (unlikely(napi_disable_pending(n))) {
- napi_complete(n);
- } else {
- if (n->gro_list) {
- /* flush too old packets
- * If HZ < 1000, flush all packets.
- */
- napi_gro_flush(n, HZ >= 1000);
- }
- list_add_tail(&n->poll_list, &repoll);
- }
+ if (unlikely(budget <= 0 ||
+ time_after_eq(jiffies, time_limit))) {
+ sd->time_squeeze++;
+ break;
}
-
- netpoll_poll_unlock(have);
}
- if (!sd_has_rps_ipi_waiting(sd) &&
- list_empty(&list) &&
- list_empty(&repoll))
- return;
-out:
local_irq_disable();
list_splice_tail_init(&sd->poll_list, &list);
@@ -4641,12 +4672,6 @@ out:
__raise_softirq_irqoff(NET_RX_SOFTIRQ);
net_rps_action_and_irq_enable(sd);
-
- return;
-
-softnet_break:
- sd->time_squeeze++;
- goto out;
}
struct netdev_adjacent {
@@ -7047,10 +7072,20 @@ static int dev_cpu_callback(struct notifier_block *nfb,
oldsd->output_queue = NULL;
oldsd->output_queue_tailp = &oldsd->output_queue;
}
- /* Append NAPI poll list from offline CPU. */
- if (!list_empty(&oldsd->poll_list)) {
- list_splice_init(&oldsd->poll_list, &sd->poll_list);
- raise_softirq_irqoff(NET_RX_SOFTIRQ);
+ /* Append NAPI poll list from offline CPU, with one exception :
+ * process_backlog() must be called by cpu owning percpu backlog.
+ * We properly handle process_queue & input_pkt_queue later.
+ */
+ while (!list_empty(&oldsd->poll_list)) {
+ struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
+ struct napi_struct,
+ poll_list);
+
+ list_del_init(&napi->poll_list);
+ if (napi->poll == process_backlog)
+ napi->state = 0;
+ else
+ ____napi_schedule(sd, napi);
}
raise_softirq_irqoff(NET_TX_SOFTIRQ);
@@ -7061,7 +7096,7 @@ static int dev_cpu_callback(struct notifier_block *nfb,
netif_rx_internal(skb);
input_queue_head_incr(oldsd);
}
- while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) {
+ while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
netif_rx_internal(skb);
input_queue_head_incr(oldsd);
}
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 8e38f17288d3..8d614c93f86a 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2043,6 +2043,12 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh)
case NDTPA_BASE_REACHABLE_TIME:
NEIGH_VAR_SET(p, BASE_REACHABLE_TIME,
nla_get_msecs(tbp[i]));
+ /* update reachable_time as well, otherwise, the change will
+ * only be effective after the next time neigh_periodic_work
+ * decides to recompute it (can be multiple minutes)
+ */
+ p->reachable_time =
+ neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
break;
case NDTPA_GC_STALETIME:
NEIGH_VAR_SET(p, GC_STALETIME,
@@ -2921,6 +2927,31 @@ static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write,
return ret;
}
+static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write,
+ void __user *buffer,
+ size_t *lenp, loff_t *ppos)
+{
+ struct neigh_parms *p = ctl->extra2;
+ int ret;
+
+ if (strcmp(ctl->procname, "base_reachable_time") == 0)
+ ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos);
+ else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0)
+ ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos);
+ else
+ ret = -1;
+
+ if (write && ret == 0) {
+ /* update reachable_time as well, otherwise, the change will
+ * only be effective after the next time neigh_periodic_work
+ * decides to recompute it
+ */
+ p->reachable_time =
+ neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME));
+ }
+ return ret;
+}
+
#define NEIGH_PARMS_DATA_OFFSET(index) \
(&((struct neigh_parms *) 0)->data[index])
@@ -3047,6 +3078,19 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler;
/* ReachableTime (in milliseconds) */
t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler;
+ } else {
+ /* Those handlers will update p->reachable_time after
+ * base_reachable_time(_ms) is set to ensure the new timer starts being
+ * applied after the next neighbour update instead of waiting for
+ * neigh_periodic_work to update its value (can be multiple minutes)
+ * So any handler that replaces them should do this as well
+ */
+ /* ReachableTime */
+ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler =
+ neigh_proc_base_reachable_time;
+ /* ReachableTime (in milliseconds) */
+ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler =
+ neigh_proc_base_reachable_time;
}
/* Don't export sysctls to unprivileged users */
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index ae13ef6b3ea7..395c15b82087 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4148,6 +4148,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
skb->ignore_df = 0;
skb_dst_drop(skb);
skb->mark = 0;
+ skb_init_secmark(skb);
secpath_reset(skb);
nf_reset(skb);
nf_reset_trace(skb);
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 515569ffde8a..589aafd01fc5 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -46,6 +46,7 @@ void dsa_slave_mii_bus_init(struct dsa_switch *ds)
snprintf(ds->slave_mii_bus->id, MII_BUS_ID_SIZE, "dsa-%d:%.2x",
ds->index, ds->pd->sw_addr);
ds->slave_mii_bus->parent = ds->master_dev;
+ ds->slave_mii_bus->phy_mask = ~ds->phys_mii_mask;
}
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index 95e47c97585e..394a200f93c1 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -122,14 +122,18 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
int err;
skb = udp_tunnel_handle_offloads(skb, !gs->sock->sk->sk_no_check_tx);
+ if (IS_ERR(skb))
+ return PTR_ERR(skb);
min_headroom = LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len
+ GENEVE_BASE_HLEN + opt_len + sizeof(struct iphdr)
+ (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
err = skb_cow_head(skb, min_headroom);
- if (unlikely(err))
+ if (unlikely(err)) {
+ kfree_skb(skb);
return err;
+ }
skb = vlan_hwaccel_push_inside(skb);
if (unlikely(!skb))
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 3a83ce5efa80..787b3c294ce6 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -129,7 +129,8 @@ int ip_forward(struct sk_buff *skb)
* We now generate an ICMP HOST REDIRECT giving the route
* we calculated.
*/
- if (rt->rt_flags&RTCF_DOREDIRECT && !opt->srr && !skb_sec_path(skb))
+ if (IPCB(skb)->flags & IPSKB_DOREDIRECT && !opt->srr &&
+ !skb_sec_path(skb))
ip_rt_send_redirect(skb);
skb->priority = rt_tos2priority(iph->tos);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 8a89c738b7a3..6b85adb05003 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -461,17 +461,13 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
sin = &errhdr.offender;
- sin->sin_family = AF_UNSPEC;
+ memset(sin, 0, sizeof(*sin));
if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP ||
ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin)) {
- struct inet_sock *inet = inet_sk(sk);
-
sin->sin_family = AF_INET;
sin->sin_addr.s_addr = ip_hdr(skb)->saddr;
- sin->sin_port = 0;
- memset(&sin->sin_zero, 0, sizeof(sin->sin_zero));
- if (inet->cmsg_flags)
+ if (inet_sk(sk)->cmsg_flags)
ip_cmsg_recv(msg, skb);
}
diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c
index ff2d23d8c87a..6ecfce63201a 100644
--- a/net/ipv4/netfilter/nft_redir_ipv4.c
+++ b/net/ipv4/netfilter/nft_redir_ipv4.c
@@ -27,10 +27,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr,
memset(&mr, 0, sizeof(mr));
if (priv->sreg_proto_min) {
- mr.range[0].min.all = (__force __be16)
- data[priv->sreg_proto_min].data[0];
- mr.range[0].max.all = (__force __be16)
- data[priv->sreg_proto_max].data[0];
+ mr.range[0].min.all =
+ *(__be16 *)&data[priv->sreg_proto_min].data[0];
+ mr.range[0].max.all =
+ *(__be16 *)&data[priv->sreg_proto_max].data[0];
mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index c0d82f78d364..2a3720fb5a5f 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -966,8 +966,11 @@ bool ping_rcv(struct sk_buff *skb)
sk = ping_lookup(net, skb, ntohs(icmph->un.echo.id));
if (sk != NULL) {
+ struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
+
pr_debug("rcv on socket %p\n", sk);
- ping_queue_rcv_skb(sk, skb_get(skb));
+ if (skb2)
+ ping_queue_rcv_skb(sk, skb2);
sock_put(sk);
return true;
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 6a2155b02602..d58dd0ec3e53 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1554,11 +1554,10 @@ static int __mkroute_input(struct sk_buff *skb,
do_cache = res->fi && !itag;
if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
+ skb->protocol == htons(ETH_P_IP) &&
(IN_DEV_SHARED_MEDIA(out_dev) ||
- inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res)))) {
- flags |= RTCF_DOREDIRECT;
- do_cache = false;
- }
+ inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
+ IPCB(skb)->flags |= IPSKB_DOREDIRECT;
if (skb->protocol != htons(ETH_P_IP)) {
/* Not IP (i.e. ARP). Do not create route, if it is
@@ -2303,6 +2302,8 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
if (rt->rt_flags & RTCF_NOTIFY)
r->rtm_flags |= RTM_F_NOTIFY;
+ if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
+ r->rtm_flags |= RTCF_DOREDIRECT;
if (nla_put_be32(skb, RTA_DST, dst))
goto nla_put_failure;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 7f18262e2326..65caf8b95e17 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2019,7 +2019,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now)))
break;
- if (tso_segs == 1) {
+ if (tso_segs == 1 || !max_segs) {
if (unlikely(!tcp_nagle_test(tp, skb, mss_now,
(tcp_skb_is_last(sk, skb) ?
nonagle : TCP_NAGLE_PUSH))))
@@ -2032,7 +2032,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
}
limit = mss_now;
- if (tso_segs > 1 && !tcp_urg_mode(tp))
+ if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp))
limit = tcp_mss_split_point(sk, skb, mss_now,
min_t(unsigned int,
cwnd_quota,
diff --git a/net/ipv4/udp_diag.c b/net/ipv4/udp_diag.c
index 7927db0a9279..4a000f1dd757 100644
--- a/net/ipv4/udp_diag.c
+++ b/net/ipv4/udp_diag.c
@@ -99,11 +99,13 @@ static void udp_dump(struct udp_table *table, struct sk_buff *skb, struct netlin
s_slot = cb->args[0];
num = s_num = cb->args[1];
- for (slot = s_slot; slot <= table->mask; num = s_num = 0, slot++) {
+ for (slot = s_slot; slot <= table->mask; s_num = 0, slot++) {
struct sock *sk;
struct hlist_nulls_node *node;
struct udp_hslot *hslot = &table->hash[slot];
+ num = 0;
+
if (hlist_nulls_empty(&hslot->head))
continue;
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 100c589a2a6c..49f5e73db122 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -393,11 +393,10 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err));
sin = &errhdr.offender;
- sin->sin6_family = AF_UNSPEC;
+ memset(sin, 0, sizeof(*sin));
+
if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) {
sin->sin6_family = AF_INET6;
- sin->sin6_flowinfo = 0;
- sin->sin6_port = 0;
if (np->rxopt.all) {
if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP &&
serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6)
@@ -412,12 +411,9 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
ipv6_iface_scope_id(&sin->sin6_addr,
IP6CB(skb)->iif);
} else {
- struct inet_sock *inet = inet_sk(sk);
-
ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr,
&sin->sin6_addr);
- sin->sin6_scope_id = 0;
- if (inet->cmsg_flags)
+ if (inet_sk(sk)->cmsg_flags)
ip_cmsg_recv(msg, skb);
}
}
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index b2d1838897c9..f1c6d5e98322 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -659,6 +659,29 @@ static int fib6_commit_metrics(struct dst_entry *dst,
return 0;
}
+static void fib6_purge_rt(struct rt6_info *rt, struct fib6_node *fn,
+ struct net *net)
+{
+ if (atomic_read(&rt->rt6i_ref) != 1) {
+ /* This route is used as dummy address holder in some split
+ * nodes. It is not leaked, but it still holds other resources,
+ * which must be released in time. So, scan ascendant nodes
+ * and replace dummy references to this route with references
+ * to still alive ones.
+ */
+ while (fn) {
+ if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
+ fn->leaf = fib6_find_prefix(net, fn);
+ atomic_inc(&fn->leaf->rt6i_ref);
+ rt6_release(rt);
+ }
+ fn = fn->parent;
+ }
+ /* No more references are possible at this point. */
+ BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
+ }
+}
+
/*
* Insert routing information in a node.
*/
@@ -807,11 +830,12 @@ add:
rt->dst.rt6_next = iter->dst.rt6_next;
atomic_inc(&rt->rt6i_ref);
inet6_rt_notify(RTM_NEWROUTE, rt, info);
- rt6_release(iter);
if (!(fn->fn_flags & RTN_RTINFO)) {
info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
fn->fn_flags |= RTN_RTINFO;
}
+ fib6_purge_rt(iter, fn, info->nl_net);
+ rt6_release(iter);
}
return 0;
@@ -1322,24 +1346,7 @@ static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
fn = fib6_repair_tree(net, fn);
}
- if (atomic_read(&rt->rt6i_ref) != 1) {
- /* This route is used as dummy address holder in some split
- * nodes. It is not leaked, but it still holds other resources,
- * which must be released in time. So, scan ascendant nodes
- * and replace dummy references to this route with references
- * to still alive ones.
- */
- while (fn) {
- if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
- fn->leaf = fib6_find_prefix(net, fn);
- atomic_inc(&fn->leaf->rt6i_ref);
- rt6_release(rt);
- }
- fn = fn->parent;
- }
- /* No more references are possible at this point. */
- BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
- }
+ fib6_purge_rt(rt, fn, net);
inet6_rt_notify(RTM_DELROUTE, rt, info);
rt6_release(rt);
diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c
index 2433a6bfb191..11820b6b3613 100644
--- a/net/ipv6/netfilter/nft_redir_ipv6.c
+++ b/net/ipv6/netfilter/nft_redir_ipv6.c
@@ -27,10 +27,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr,
memset(&range, 0, sizeof(range));
if (priv->sreg_proto_min) {
- range.min_proto.all = (__force __be16)
- data[priv->sreg_proto_min].data[0];
- range.max_proto.all = (__force __be16)
- data[priv->sreg_proto_max].data[0];
+ range.min_proto.all =
+ *(__be16 *)&data[priv->sreg_proto_min].data[0];
+ range.max_proto.all =
+ *(__be16 *)&data[priv->sreg_proto_max].data[0];
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c91083156edb..495965358d22 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1160,12 +1160,9 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct net *net = dev_net(dst->dev);
rt6->rt6i_flags |= RTF_MODIFIED;
- if (mtu < IPV6_MIN_MTU) {
- u32 features = dst_metric(dst, RTAX_FEATURES);
+ if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
- features |= RTAX_FEATURE_ALLFRAG;
- dst_metric_set(dst, RTAX_FEATURES, features);
- }
+
dst_metric_set(dst, RTAX_MTU, mtu);
rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires);
}
@@ -1245,12 +1242,16 @@ restart:
rt = net->ipv6.ip6_null_entry;
else if (rt->dst.error) {
rt = net->ipv6.ip6_null_entry;
- } else if (rt == net->ipv6.ip6_null_entry) {
+ goto out;
+ }
+
+ if (rt == net->ipv6.ip6_null_entry) {
fn = fib6_backtrack(fn, &fl6->saddr);
if (fn)
goto restart;
}
+out:
dst_hold(&rt->dst);
read_unlock_bh(&table->tb6_lock);
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5ff87805258e..9c0b54e87b47 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1387,6 +1387,28 @@ ipv6_pktoptions:
return 0;
}
+static void tcp_v6_fill_cb(struct sk_buff *skb, const struct ipv6hdr *hdr,
+ const struct tcphdr *th)
+{
+ /* This is tricky: we move IP6CB at its correct location into
+ * TCP_SKB_CB(). It must be done after xfrm6_policy_check(), because
+ * _decode_session6() uses IP6CB().
+ * barrier() makes sure compiler won't play aliasing games.
+ */
+ memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
+ sizeof(struct inet6_skb_parm));
+ barrier();
+
+ TCP_SKB_CB(skb)->seq = ntohl(th->seq);
+ TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
+ skb->len - th->doff*4);
+ TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
+ TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
+ TCP_SKB_CB(skb)->tcp_tw_isn = 0;
+ TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
+ TCP_SKB_CB(skb)->sacked = 0;
+}
+
static int tcp_v6_rcv(struct sk_buff *skb)
{
const struct tcphdr *th;
@@ -1418,24 +1440,9 @@ static int tcp_v6_rcv(struct sk_buff *skb)
th = tcp_hdr(skb);
hdr = ipv6_hdr(skb);
- /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
- * barrier() makes sure compiler wont play fool^Waliasing games.
- */
- memmove(&TCP_SKB_CB(skb)->header.h6, IP6CB(skb),
- sizeof(struct inet6_skb_parm));
- barrier();
-
- TCP_SKB_CB(skb)->seq = ntohl(th->seq);
- TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
- skb->len - th->doff*4);
- TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
- TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
- TCP_SKB_CB(skb)->tcp_tw_isn = 0;
- TCP_SKB_CB(skb)->ip_dsfield = ipv6_get_dsfield(hdr);
- TCP_SKB_CB(skb)->sacked = 0;
sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest,
- tcp_v6_iif(skb));
+ inet6_iif(skb));
if (!sk)
goto no_tcp_socket;
@@ -1451,6 +1458,8 @@ process:
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
goto discard_and_relse;
+ tcp_v6_fill_cb(skb, hdr, th);
+
#ifdef CONFIG_TCP_MD5SIG
if (tcp_v6_inbound_md5_hash(sk, skb))
goto discard_and_relse;
@@ -1482,6 +1491,8 @@ no_tcp_socket:
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard_it;
+ tcp_v6_fill_cb(skb, hdr, th);
+
if (skb->len < (th->doff<<2) || tcp_checksum_complete(skb)) {
csum_error:
TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
@@ -1505,6 +1516,8 @@ do_time_wait:
goto discard_it;
}
+ tcp_v6_fill_cb(skb, hdr, th);
+
if (skb->len < (th->doff<<2)) {
inet_twsk_put(inet_twsk(sk));
goto bad_packet;
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index 5f983644373a..48bf5a06847b 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -130,12 +130,18 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
{
struct flowi6 *fl6 = &fl->u.ip6;
int onlyproto = 0;
- u16 offset = skb_network_header_len(skb);
const struct ipv6hdr *hdr = ipv6_hdr(skb);
+ u16 offset = sizeof(*hdr);
struct ipv6_opt_hdr *exthdr;
const unsigned char *nh = skb_network_header(skb);
- u8 nexthdr = nh[IP6CB(skb)->nhoff];
+ u16 nhoff = IP6CB(skb)->nhoff;
int oif = 0;
+ u8 nexthdr;
+
+ if (!nhoff)
+ nhoff = offsetof(struct ipv6hdr, nexthdr);
+
+ nexthdr = nh[nhoff];
if (skb_dst(skb))
oif = skb_dst(skb)->dev->ifindex;
diff --git a/net/llc/sysctl_net_llc.c b/net/llc/sysctl_net_llc.c
index 612a5ddaf93b..799bafc2af39 100644
--- a/net/llc/sysctl_net_llc.c
+++ b/net/llc/sysctl_net_llc.c
@@ -18,28 +18,28 @@ static struct ctl_table llc2_timeout_table[] = {
{
.procname = "ack",
.data = &sysctl_llc2_ack_timeout,
- .maxlen = sizeof(long),
+ .maxlen = sizeof(sysctl_llc2_ack_timeout),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "busy",
.data = &sysctl_llc2_busy_timeout,
- .maxlen = sizeof(long),
+ .maxlen = sizeof(sysctl_llc2_busy_timeout),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "p",
.data = &sysctl_llc2_p_timeout,
- .maxlen = sizeof(long),
+ .maxlen = sizeof(sysctl_llc2_p_timeout),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
{
.procname = "rej",
.data = &sysctl_llc2_rej_timeout,
- .maxlen = sizeof(long),
+ .maxlen = sizeof(sysctl_llc2_rej_timeout),
.mode = 0644,
.proc_handler = proc_dointvec_jiffies,
},
diff --git a/net/mac80211/key.c b/net/mac80211/key.c
index 0bb7038121ac..bd4e46ec32bd 100644
--- a/net/mac80211/key.c
+++ b/net/mac80211/key.c
@@ -140,7 +140,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
if (!ret) {
key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE;
- if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
+ if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
sdata->crypto_tx_tailroom_needed_cnt--;
WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
@@ -188,7 +190,9 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key)
sta = key->sta;
sdata = key->sdata;
- if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
+ if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
increment_tailroom_need_count(sdata);
ret = drv_set_key(key->local, DISABLE_KEY, sdata,
@@ -884,7 +888,9 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf)
if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) {
key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE;
- if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC))
+ if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) ||
+ (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)))
increment_tailroom_need_count(key->sdata);
}
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 2c36c4765f47..837a406a9dd6 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1643,7 +1643,7 @@ __ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
- bool ret;
+ bool ret = false;
int ac;
if (local->hw.queues < IEEE80211_NUM_ACS)
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 4c5192e0d66c..4a95fe3cffbc 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -86,20 +86,6 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
}
}
- /* tear down aggregation sessions and remove STAs */
- mutex_lock(&local->sta_mtx);
- list_for_each_entry(sta, &local->sta_list, list) {
- if (sta->uploaded) {
- enum ieee80211_sta_state state;
-
- state = sta->sta_state;
- for (; state > IEEE80211_STA_NOTEXIST; state--)
- WARN_ON(drv_sta_state(local, sta->sdata, sta,
- state, state - 1));
- }
- }
- mutex_unlock(&local->sta_mtx);
-
/* remove all interfaces that were created in the driver */
list_for_each_entry(sdata, &local->interfaces, list) {
if (!ieee80211_sdata_running(sdata))
@@ -111,6 +97,21 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
case NL80211_IFTYPE_STATION:
ieee80211_mgd_quiesce(sdata);
break;
+ case NL80211_IFTYPE_WDS:
+ /* tear down aggregation sessions and remove STAs */
+ mutex_lock(&local->sta_mtx);
+ sta = sdata->u.wds.sta;
+ if (sta && sta->uploaded) {
+ enum ieee80211_sta_state state;
+
+ state = sta->sta_state;
+ for (; state > IEEE80211_STA_NOTEXIST; state--)
+ WARN_ON(drv_sta_state(local, sta->sdata,
+ sta, state,
+ state - 1));
+ }
+ mutex_unlock(&local->sta_mtx);
+ break;
default:
break;
}
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 683b10f46505..d69ca513848e 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -272,7 +272,7 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
else if (rate)
- channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
+ channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
else
channel_flags |= IEEE80211_CHAN_2GHZ;
put_unaligned_le16(channel_flags, pos);
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index ca27837974fe..349295d21946 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -31,10 +31,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
SKB_GSO_TCPV6 |
SKB_GSO_UDP |
SKB_GSO_DODGY |
- SKB_GSO_TCP_ECN |
- SKB_GSO_GRE |
- SKB_GSO_GRE_CSUM |
- SKB_GSO_IPIP)))
+ SKB_GSO_TCP_ECN)))
goto out;
/* Setup inner SKB. */
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c
index 1d5341f3761d..5d3daae98bf0 100644
--- a/net/netfilter/ipvs/ip_vs_ftp.c
+++ b/net/netfilter/ipvs/ip_vs_ftp.c
@@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
struct nf_conn *ct;
struct net *net;
+ *diff = 0;
+
#ifdef CONFIG_IP_VS_IPV6
/* This application helper doesn't work with IPv6 yet,
* so turn this into a no-op for IPv6 packets
@@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp,
return 1;
#endif
- *diff = 0;
-
/* Only useful for established sessions */
if (cp->state != IP_VS_TCP_S_ESTABLISHED)
return 1;
@@ -322,6 +322,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
struct ip_vs_conn *n_cp;
struct net *net;
+ /* no diff required for incoming packets */
+ *diff = 0;
+
#ifdef CONFIG_IP_VS_IPV6
/* This application helper doesn't work with IPv6 yet,
* so turn this into a no-op for IPv6 packets
@@ -330,9 +333,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp,
return 1;
#endif
- /* no diff required for incoming packets */
- *diff = 0;
-
/* Only useful for established sessions */
if (cp->state != IP_VS_TCP_S_ESTABLISHED)
return 1;
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index a11674806707..46d1b26a468e 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -611,16 +611,15 @@ __nf_conntrack_confirm(struct sk_buff *skb)
*/
NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
pr_debug("Confirming conntrack %p\n", ct);
- /* We have to check the DYING flag inside the lock to prevent
- a race against nf_ct_get_next_corpse() possibly called from
- user context, else we insert an already 'dead' hash, blocking
- further use of that particular connection -JM */
+ /* We have to check the DYING flag after unlink to prevent
+ * a race against nf_ct_get_next_corpse() possibly called from
+ * user context, else we insert an already 'dead' hash, blocking
+ * further use of that particular connection -JM.
+ */
+ nf_ct_del_from_dying_or_unconfirmed_list(ct);
- if (unlikely(nf_ct_is_dying(ct))) {
- nf_conntrack_double_unlock(hash, reply_hash);
- local_bh_enable();
- return NF_ACCEPT;
- }
+ if (unlikely(nf_ct_is_dying(ct)))
+ goto out;
/* See if there's one in the list already, including reverse:
NAT could have grabbed it without realizing, since we're
@@ -636,8 +635,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h)))
goto out;
- nf_ct_del_from_dying_or_unconfirmed_list(ct);
-
/* Timer relative to confirmation time, not original
setting time, otherwise we'd get timer wrap in
weird delay cases. */
@@ -673,6 +670,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
return NF_ACCEPT;
out:
+ nf_ct_add_to_dying_list(ct);
nf_conntrack_double_unlock(hash, reply_hash);
NF_CT_STAT_INC(net, insert_failed);
local_bh_enable();
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 129a8daa4abf..3b3ddb4fb9ee 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -713,16 +713,12 @@ static int nft_flush_table(struct nft_ctx *ctx)
struct nft_chain *chain, *nc;
struct nft_set *set, *ns;
- list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
+ list_for_each_entry(chain, &ctx->table->chains, list) {
ctx->chain = chain;
err = nft_delrule_by_chain(ctx);
if (err < 0)
goto out;
-
- err = nft_delchain(ctx);
- if (err < 0)
- goto out;
}
list_for_each_entry_safe(set, ns, &ctx->table->sets, list) {
@@ -735,6 +731,14 @@ static int nft_flush_table(struct nft_ctx *ctx)
goto out;
}
+ list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) {
+ ctx->chain = chain;
+
+ err = nft_delchain(ctx);
+ if (err < 0)
+ goto out;
+ }
+
err = nft_deltable(ctx);
out:
return err;
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 13c2e17bbe27..c421d94c4652 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -321,7 +321,8 @@ replay:
nlh = nlmsg_hdr(skb);
err = 0;
- if (nlh->nlmsg_len < NLMSG_HDRLEN) {
+ if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) ||
+ skb->len < nlh->nlmsg_len) {
err = -EINVAL;
goto ack;
}
@@ -463,13 +464,13 @@ static void nfnetlink_rcv(struct sk_buff *skb)
}
#ifdef CONFIG_MODULES
-static int nfnetlink_bind(int group)
+static int nfnetlink_bind(struct net *net, int group)
{
const struct nfnetlink_subsystem *ss;
int type;
if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
- return -EINVAL;
+ return 0;
type = nfnl_group2type[group];
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index afe2b0b45ec4..aff54fb1c8a0 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr,
}
if (priv->sreg_proto_min) {
- range.min_proto.all = (__force __be16)
- data[priv->sreg_proto_min].data[0];
- range.max_proto.all = (__force __be16)
- data[priv->sreg_proto_max].data[0];
+ range.min_proto.all =
+ *(__be16 *)&data[priv->sreg_proto_min].data[0];
+ range.max_proto.all =
+ *(__be16 *)&data[priv->sreg_proto_max].data[0];
range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED;
}
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 074cf3e91c6f..02fdde28dada 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -61,6 +61,7 @@
#include <linux/rhashtable.h>
#include <asm/cacheflush.h>
#include <linux/hash.h>
+#include <linux/genetlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -1091,8 +1092,12 @@ static void netlink_remove(struct sock *sk)
mutex_unlock(&nl_sk_hash_lock);
netlink_table_grab();
- if (nlk_sk(sk)->subscriptions)
+ if (nlk_sk(sk)->subscriptions) {
__sk_del_bind_node(sk);
+ netlink_update_listeners(sk);
+ }
+ if (sk->sk_protocol == NETLINK_GENERIC)
+ atomic_inc(&genl_sk_destructing_cnt);
netlink_table_ungrab();
}
@@ -1139,8 +1144,8 @@ static int netlink_create(struct net *net, struct socket *sock, int protocol,
struct module *module = NULL;
struct mutex *cb_mutex;
struct netlink_sock *nlk;
- int (*bind)(int group);
- void (*unbind)(int group);
+ int (*bind)(struct net *net, int group);
+ void (*unbind)(struct net *net, int group);
int err = 0;
sock->state = SS_UNCONNECTED;
@@ -1209,6 +1214,20 @@ static int netlink_release(struct socket *sock)
* will be purged.
*/
+ /* must not acquire netlink_table_lock in any way again before unbind
+ * and notifying genetlink is done as otherwise it might deadlock
+ */
+ if (nlk->netlink_unbind) {
+ int i;
+
+ for (i = 0; i < nlk->ngroups; i++)
+ if (test_bit(i, nlk->groups))
+ nlk->netlink_unbind(sock_net(sk), i + 1);
+ }
+ if (sk->sk_protocol == NETLINK_GENERIC &&
+ atomic_dec_return(&genl_sk_destructing_cnt) == 0)
+ wake_up(&genl_sk_destructing_waitq);
+
sock->sk = NULL;
wake_up_interruptible_all(&nlk->wait);
@@ -1226,8 +1245,8 @@ static int netlink_release(struct socket *sock)
module_put(nlk->module);
- netlink_table_grab();
if (netlink_is_kernel(sk)) {
+ netlink_table_grab();
BUG_ON(nl_table[sk->sk_protocol].registered == 0);
if (--nl_table[sk->sk_protocol].registered == 0) {
struct listeners *old;
@@ -1241,10 +1260,8 @@ static int netlink_release(struct socket *sock)
nl_table[sk->sk_protocol].flags = 0;
nl_table[sk->sk_protocol].registered = 0;
}
- } else if (nlk->subscriptions) {
- netlink_update_listeners(sk);
+ netlink_table_ungrab();
}
- netlink_table_ungrab();
kfree(nlk->groups);
nlk->groups = NULL;
@@ -1410,9 +1427,10 @@ static int netlink_realloc_groups(struct sock *sk)
return err;
}
-static void netlink_unbind(int group, long unsigned int groups,
- struct netlink_sock *nlk)
+static void netlink_undo_bind(int group, long unsigned int groups,
+ struct sock *sk)
{
+ struct netlink_sock *nlk = nlk_sk(sk);
int undo;
if (!nlk->netlink_unbind)
@@ -1420,7 +1438,7 @@ static void netlink_unbind(int group, long unsigned int groups,
for (undo = 0; undo < group; undo++)
if (test_bit(undo, &groups))
- nlk->netlink_unbind(undo);
+ nlk->netlink_unbind(sock_net(sk), undo);
}
static int netlink_bind(struct socket *sock, struct sockaddr *addr,
@@ -1458,10 +1476,10 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
for (group = 0; group < nlk->ngroups; group++) {
if (!test_bit(group, &groups))
continue;
- err = nlk->netlink_bind(group);
+ err = nlk->netlink_bind(net, group);
if (!err)
continue;
- netlink_unbind(group, groups, nlk);
+ netlink_undo_bind(group, groups, sk);
return err;
}
}
@@ -1471,7 +1489,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
netlink_insert(sk, net, nladdr->nl_pid) :
netlink_autobind(sock);
if (err) {
- netlink_unbind(nlk->ngroups, groups, nlk);
+ netlink_undo_bind(nlk->ngroups, groups, sk);
return err;
}
}
@@ -2122,7 +2140,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
if (!val || val - 1 >= nlk->ngroups)
return -EINVAL;
if (optname == NETLINK_ADD_MEMBERSHIP && nlk->netlink_bind) {
- err = nlk->netlink_bind(val);
+ err = nlk->netlink_bind(sock_net(sk), val);
if (err)
return err;
}
@@ -2131,7 +2149,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
optname == NETLINK_ADD_MEMBERSHIP);
netlink_table_ungrab();
if (optname == NETLINK_DROP_MEMBERSHIP && nlk->netlink_unbind)
- nlk->netlink_unbind(val);
+ nlk->netlink_unbind(sock_net(sk), val);
err = 0;
break;
diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h
index b20a1731759b..f1c31b39aa3e 100644
--- a/net/netlink/af_netlink.h
+++ b/net/netlink/af_netlink.h
@@ -2,6 +2,7 @@
#define _AF_NETLINK_H
#include <linux/rhashtable.h>
+#include <linux/atomic.h>
#include <net/sock.h>
#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
@@ -39,8 +40,8 @@ struct netlink_sock {
struct mutex *cb_mutex;
struct mutex cb_def_mutex;
void (*netlink_rcv)(struct sk_buff *skb);
- int (*netlink_bind)(int group);
- void (*netlink_unbind)(int group);
+ int (*netlink_bind)(struct net *net, int group);
+ void (*netlink_unbind)(struct net *net, int group);
struct module *module;
#ifdef CONFIG_NETLINK_MMAP
struct mutex pg_vec_lock;
@@ -65,8 +66,8 @@ struct netlink_table {
unsigned int groups;
struct mutex *cb_mutex;
struct module *module;
- int (*bind)(int group);
- void (*unbind)(int group);
+ int (*bind)(struct net *net, int group);
+ void (*unbind)(struct net *net, int group);
bool (*compare)(struct net *net, struct sock *sock);
int registered;
};
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 76393f2f4b22..ee57459fc258 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -23,6 +23,9 @@
static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */
static DECLARE_RWSEM(cb_lock);
+atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0);
+DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq);
+
void genl_lock(void)
{
mutex_lock(&genl_mutex);
@@ -435,15 +438,18 @@ int genl_unregister_family(struct genl_family *family)
genl_lock_all();
- genl_unregister_mc_groups(family);
-
list_for_each_entry(rc, genl_family_chain(family->id), family_list) {
if (family->id != rc->id || strcmp(rc->name, family->name))
continue;
+ genl_unregister_mc_groups(family);
+
list_del(&rc->family_list);
family->n_ops = 0;
- genl_unlock_all();
+ up_write(&cb_lock);
+ wait_event(genl_sk_destructing_waitq,
+ atomic_read(&genl_sk_destructing_cnt) == 0);
+ genl_unlock();
kfree(family->attrbuf);
genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0);
@@ -983,11 +989,63 @@ static struct genl_multicast_group genl_ctrl_groups[] = {
{ .name = "notify", },
};
+static int genl_bind(struct net *net, int group)
+{
+ int i, err = -ENOENT;
+
+ down_read(&cb_lock);
+ for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
+ struct genl_family *f;
+
+ list_for_each_entry(f, genl_family_chain(i), family_list) {
+ if (group >= f->mcgrp_offset &&
+ group < f->mcgrp_offset + f->n_mcgrps) {
+ int fam_grp = group - f->mcgrp_offset;
+
+ if (!f->netnsok && net != &init_net)
+ err = -ENOENT;
+ else if (f->mcast_bind)
+ err = f->mcast_bind(net, fam_grp);
+ else
+ err = 0;
+ break;
+ }
+ }
+ }
+ up_read(&cb_lock);
+
+ return err;
+}
+
+static void genl_unbind(struct net *net, int group)
+{
+ int i;
+
+ down_read(&cb_lock);
+ for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
+ struct genl_family *f;
+
+ list_for_each_entry(f, genl_family_chain(i), family_list) {
+ if (group >= f->mcgrp_offset &&
+ group < f->mcgrp_offset + f->n_mcgrps) {
+ int fam_grp = group - f->mcgrp_offset;
+
+ if (f->mcast_unbind)
+ f->mcast_unbind(net, fam_grp);
+ break;
+ }
+ }
+ }
+ up_read(&cb_lock);
+}
+
static int __net_init genl_pernet_init(struct net *net)
{
struct netlink_kernel_cfg cfg = {
.input = genl_rcv,
.flags = NL_CFG_F_NONROOT_RECV,
+ .bind = genl_bind,
+ .unbind = genl_unbind,
};
/* we'll bump the group number right afterwards */
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 764fdc39c63b..770064c83711 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -147,7 +147,8 @@ static int push_mpls(struct sk_buff *skb, struct sw_flow_key *key,
hdr = eth_hdr(skb);
hdr->h_proto = mpls->mpls_ethertype;
- skb_set_inner_protocol(skb, skb->protocol);
+ if (!skb->inner_protocol)
+ skb_set_inner_protocol(skb, skb->protocol);
skb->protocol = mpls->mpls_ethertype;
invalidate_flow_key(key);
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 332b5a031739..b07349e82d78 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -83,8 +83,7 @@ static bool ovs_must_notify(struct genl_family *family, struct genl_info *info,
unsigned int group)
{
return info->nlhdr->nlmsg_flags & NLM_F_ECHO ||
- genl_has_listeners(family, genl_info_net(info)->genl_sock,
- group);
+ genl_has_listeners(family, genl_info_net(info), group);
}
static void ovs_notify(struct genl_family *family,
@@ -525,7 +524,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info)
struct vport *input_vport;
int len;
int err;
- bool log = !a[OVS_FLOW_ATTR_PROBE];
+ bool log = !a[OVS_PACKET_ATTR_PROBE];
err = -EINVAL;
if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] ||
@@ -611,6 +610,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = {
[OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN },
[OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED },
[OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED },
+ [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG },
};
static const struct genl_ops dp_packet_genl_ops[] = {
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 70bef2ab7f2b..da2fae0873a5 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -70,6 +70,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
{
struct flow_stats *stats;
int node = numa_node_id();
+ int len = skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
stats = rcu_dereference(flow->stats[node]);
@@ -105,7 +106,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
if (likely(new_stats)) {
new_stats->used = jiffies;
new_stats->packet_count = 1;
- new_stats->byte_count = skb->len;
+ new_stats->byte_count = len;
new_stats->tcp_flags = tcp_flags;
spin_lock_init(&new_stats->lock);
@@ -120,7 +121,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
stats->used = jiffies;
stats->packet_count++;
- stats->byte_count += skb->len;
+ stats->byte_count += len;
stats->tcp_flags |= tcp_flags;
unlock:
spin_unlock(&stats->lock);
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 9645a21d9eaa..d1eecf707613 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -1753,7 +1753,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
__be16 eth_type, __be16 vlan_tci, bool log)
{
const struct nlattr *a;
- bool out_tnl_port = false;
int rem, err;
if (depth >= SAMPLE_ACTION_DEPTH)
@@ -1796,8 +1795,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
case OVS_ACTION_ATTR_OUTPUT:
if (nla_get_u32(a) >= DP_MAX_PORTS)
return -EINVAL;
- out_tnl_port = false;
-
break;
case OVS_ACTION_ATTR_HASH: {
@@ -1832,12 +1829,6 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
case OVS_ACTION_ATTR_PUSH_MPLS: {
const struct ovs_action_push_mpls *mpls = nla_data(a);
- /* Networking stack do not allow simultaneous Tunnel
- * and MPLS GSO.
- */
- if (out_tnl_port)
- return -EINVAL;
-
if (!eth_p_mpls(mpls->mpls_ethertype))
return -EINVAL;
/* Prohibit push MPLS other than to a white list
@@ -1873,11 +1864,9 @@ static int __ovs_nla_copy_actions(const struct nlattr *attr,
case OVS_ACTION_ATTR_SET:
err = validate_set(a, key, sfa,
- &out_tnl_port, eth_type, log);
+ &skip_copy, eth_type, log);
if (err)
return err;
-
- skip_copy = out_tnl_port;
break;
case OVS_ACTION_ATTR_SAMPLE:
diff --git a/net/openvswitch/vport-geneve.c b/net/openvswitch/vport-geneve.c
index 347fa2325b22..484864dd0e68 100644
--- a/net/openvswitch/vport-geneve.c
+++ b/net/openvswitch/vport-geneve.c
@@ -219,7 +219,10 @@ static int geneve_tnl_send(struct vport *vport, struct sk_buff *skb)
false);
if (err < 0)
ip_rt_put(rt);
+ return err;
+
error:
+ kfree_skb(skb);
return err;
}
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 6b69df545b1d..d4168c442db5 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -73,7 +73,7 @@ static struct sk_buff *__build_header(struct sk_buff *skb,
skb = gre_handle_offloads(skb, !!(tun_key->tun_flags & TUNNEL_CSUM));
if (IS_ERR(skb))
- return NULL;
+ return skb;
tpi.flags = filter_tnl_flags(tun_key->tun_flags);
tpi.proto = htons(ETH_P_TEB);
@@ -144,7 +144,7 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
if (unlikely(!OVS_CB(skb)->egress_tun_info)) {
err = -EINVAL;
- goto error;
+ goto err_free_skb;
}
tun_key = &OVS_CB(skb)->egress_tun_info->tunnel;
@@ -157,8 +157,10 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
fl.flowi4_proto = IPPROTO_GRE;
rt = ip_route_output_key(net, &fl);
- if (IS_ERR(rt))
- return PTR_ERR(rt);
+ if (IS_ERR(rt)) {
+ err = PTR_ERR(rt);
+ goto err_free_skb;
+ }
tunnel_hlen = ip_gre_calc_hlen(tun_key->tun_flags);
@@ -183,8 +185,9 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
/* Push Tunnel header. */
skb = __build_header(skb, tunnel_hlen);
- if (unlikely(!skb)) {
- err = 0;
+ if (IS_ERR(skb)) {
+ err = PTR_ERR(skb);
+ skb = NULL;
goto err_free_rt;
}
@@ -198,7 +201,8 @@ static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
tun_key->ipv4_tos, tun_key->ipv4_ttl, df, false);
err_free_rt:
ip_rt_put(rt);
-error:
+err_free_skb:
+ kfree_skb(skb);
return err;
}
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 38f95a52241b..d7c46b301024 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -187,7 +187,9 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
false);
if (err < 0)
ip_rt_put(rt);
+ return err;
error:
+ kfree_skb(skb);
return err;
}
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 9584526c0778..2034c6d9cb5a 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -480,7 +480,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb,
stats = this_cpu_ptr(vport->percpu_stats);
u64_stats_update_begin(&stats->syncp);
stats->rx_packets++;
- stats->rx_bytes += skb->len;
+ stats->rx_bytes += skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0);
u64_stats_update_end(&stats->syncp);
OVS_CB(skb)->input_vport = vport;
@@ -519,10 +519,9 @@ int ovs_vport_send(struct vport *vport, struct sk_buff *skb)
u64_stats_update_end(&stats->syncp);
} else if (sent < 0) {
ovs_vport_record_error(vport, VPORT_E_TX_ERROR);
- kfree_skb(skb);
- } else
+ } else {
ovs_vport_record_error(vport, VPORT_E_TX_DROPPED);
-
+ }
return sent;
}
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index e52a44785681..9cfe2e1dd8b5 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -785,6 +785,7 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1,
struct tpacket3_hdr *last_pkt;
struct tpacket_hdr_v1 *h1 = &pbd1->hdr.bh1;
+ struct sock *sk = &po->sk;
if (po->stats.stats3.tp_drops)
status |= TP_STATUS_LOSING;
@@ -809,6 +810,8 @@ static void prb_close_block(struct tpacket_kbdq_core *pkc1,
/* Flush the block */
prb_flush_block(pkc1, pbd1, status);
+ sk->sk_data_ready(sk);
+
pkc1->kactive_blk_num = GET_NEXT_PRB_BLK_NUM(pkc1);
}
@@ -2052,12 +2055,12 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
smp_wmb();
#endif
- if (po->tp_version <= TPACKET_V2)
+ if (po->tp_version <= TPACKET_V2) {
__packet_set_status(po, h.raw, status);
- else
+ sk->sk_data_ready(sk);
+ } else {
prb_clear_blk_fill_status(&po->rx_ring);
-
- sk->sk_data_ready(sk);
+ }
drop_n_restore:
if (skb_head != skb->data && skb_shared(skb)) {
@@ -2514,7 +2517,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len)
err = -EINVAL;
if (sock->type == SOCK_DGRAM) {
offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len);
- if (unlikely(offset) < 0)
+ if (unlikely(offset < 0))
goto out_free;
} else {
if (ll_header_truncated(dev, len))
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index 84c8219c3e1c..f59adf8a4cd7 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -180,6 +180,11 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
}
bpf_size = bpf_len * sizeof(*bpf_ops);
+ if (bpf_size != nla_len(tb[TCA_BPF_OPS])) {
+ ret = -EINVAL;
+ goto errout;
+ }
+
bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
if (bpf_ops == NULL) {
ret = -ENOMEM;
@@ -215,15 +220,21 @@ static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
struct cls_bpf_head *head)
{
unsigned int i = 0x80000000;
+ u32 handle;
do {
if (++head->hgen == 0x7FFFFFFF)
head->hgen = 1;
} while (--i > 0 && cls_bpf_get(tp, head->hgen));
- if (i == 0)
+
+ if (unlikely(i == 0)) {
pr_err("Insufficient number of handles\n");
+ handle = 0;
+ } else {
+ handle = head->hgen;
+ }
- return i;
+ return handle;
}
static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index f791edd64d6c..26d06dbcc1c8 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -1182,7 +1182,6 @@ void sctp_assoc_update(struct sctp_association *asoc,
asoc->peer.peer_hmacs = new->peer.peer_hmacs;
new->peer.peer_hmacs = NULL;
- sctp_auth_key_put(asoc->asoc_shared_key);
sctp_auth_asoc_init_active_key(asoc, GFP_ATOMIC);
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 2625eccb77d5..aafe94bf292e 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1603,7 +1603,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
sctp_assoc_t associd = 0;
sctp_cmsgs_t cmsgs = { NULL };
sctp_scope_t scope;
- bool fill_sinfo_ttl = false;
+ bool fill_sinfo_ttl = false, wait_connect = false;
struct sctp_datamsg *datamsg;
int msg_flags = msg->msg_flags;
__u16 sinfo_flags = 0;
@@ -1943,6 +1943,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
if (err < 0)
goto out_free;
+ wait_connect = true;
pr_debug("%s: we associated primitively\n", __func__);
}
@@ -1980,6 +1981,11 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
sctp_datamsg_put(datamsg);
err = msg_len;
+ if (unlikely(wait_connect)) {
+ timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT);
+ sctp_wait_for_connect(asoc, &timeo);
+ }
+
/* If we are already past ASSOCIATE, the lower
* layers are responsible for association cleanup.
*/
diff --git a/net/socket.c b/net/socket.c
index a2c33a4dc7ba..418795caa897 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -869,9 +869,6 @@ static ssize_t sock_splice_read(struct file *file, loff_t *ppos,
static struct sock_iocb *alloc_sock_iocb(struct kiocb *iocb,
struct sock_iocb *siocb)
{
- if (!is_sync_kiocb(iocb))
- BUG();
-
siocb->kiocb = iocb;
iocb->private = siocb;
return siocb;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 1cb61242e55e..4439ac4c1b53 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -606,7 +606,7 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
struct kvec *head = buf->head;
struct kvec *tail = buf->tail;
int fraglen;
- int new, old;
+ int new;
if (len > buf->len) {
WARN_ON_ONCE(1);
@@ -629,8 +629,8 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len)
buf->len -= fraglen;
new = buf->page_base + buf->page_len;
- old = new + fraglen;
- xdr->page_ptr -= (old >> PAGE_SHIFT) - (new >> PAGE_SHIFT);
+
+ xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT);
if (buf->page_len) {
xdr->p = page_address(*xdr->page_ptr);
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 96ceefeb9daf..a9e174fc0f91 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -220,10 +220,11 @@ static void bclink_retransmit_pkt(u32 after, u32 to)
struct sk_buff *skb;
skb_queue_walk(&bcl->outqueue, skb) {
- if (more(buf_seqno(skb), after))
+ if (more(buf_seqno(skb), after)) {
+ tipc_link_retransmit(bcl, skb, mod(to - after));
break;
+ }
}
- tipc_link_retransmit(bcl, skb, mod(to - after));
}
/**
diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig
index 22ba971741e5..29c8675f9a11 100644
--- a/net/wireless/Kconfig
+++ b/net/wireless/Kconfig
@@ -175,7 +175,7 @@ config CFG80211_INTERNAL_REGDB
Most distributions have a CRDA package. So if unsure, say N.
config CFG80211_WEXT
- bool
+ bool "cfg80211 wireless extensions compatibility"
depends on CFG80211
select WEXT_CORE
help
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 7ca4b5133123..8887c6e5fca8 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2854,6 +2854,9 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
if (!rdev->ops->get_key)
return -EOPNOTSUPP;
+ if (!pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
+ return -ENOENT;
+
msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return -ENOMEM;
@@ -2873,10 +2876,6 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr))
goto nla_put_failure;
- if (pairwise && mac_addr &&
- !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
- return -ENOENT;
-
err = rdev_get_key(rdev, dev, key_idx, pairwise, mac_addr, &cookie,
get_key_callback);
@@ -3047,7 +3046,7 @@ static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info)
wdev_lock(dev->ieee80211_ptr);
err = nl80211_key_allowed(dev->ieee80211_ptr);
- if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr &&
+ if (key.type == NL80211_KEYTYPE_GROUP && mac_addr &&
!(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN))
err = -ENOENT;
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 7b8309840d4e..d39d1cbc86b1 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -1530,45 +1530,40 @@ static void reg_call_notifier(struct wiphy *wiphy,
static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
{
- struct ieee80211_channel *ch;
struct cfg80211_chan_def chandef;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy);
- bool ret = true;
+ enum nl80211_iftype iftype;
wdev_lock(wdev);
+ iftype = wdev->iftype;
+ /* make sure the interface is active */
if (!wdev->netdev || !netif_running(wdev->netdev))
- goto out;
+ goto wdev_inactive_unlock;
- switch (wdev->iftype) {
+ switch (iftype) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_P2P_GO:
if (!wdev->beacon_interval)
- goto out;
-
- ret = cfg80211_reg_can_beacon(wiphy,
- &wdev->chandef, wdev->iftype);
+ goto wdev_inactive_unlock;
+ chandef = wdev->chandef;
break;
case NL80211_IFTYPE_ADHOC:
if (!wdev->ssid_len)
- goto out;
-
- ret = cfg80211_reg_can_beacon(wiphy,
- &wdev->chandef, wdev->iftype);
+ goto wdev_inactive_unlock;
+ chandef = wdev->chandef;
break;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_CLIENT:
if (!wdev->current_bss ||
!wdev->current_bss->pub.channel)
- goto out;
+ goto wdev_inactive_unlock;
- ch = wdev->current_bss->pub.channel;
- if (rdev->ops->get_channel &&
- !rdev_get_channel(rdev, wdev, &chandef))
- ret = cfg80211_chandef_usable(wiphy, &chandef,
- IEEE80211_CHAN_DISABLED);
- else
- ret = !(ch->flags & IEEE80211_CHAN_DISABLED);
+ if (!rdev->ops->get_channel ||
+ rdev_get_channel(rdev, wdev, &chandef))
+ cfg80211_chandef_create(&chandef,
+ wdev->current_bss->pub.channel,
+ NL80211_CHAN_NO_HT);
break;
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_AP_VLAN:
@@ -1581,9 +1576,26 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
break;
}
-out:
wdev_unlock(wdev);
- return ret;
+
+ switch (iftype) {
+ case NL80211_IFTYPE_AP:
+ case NL80211_IFTYPE_P2P_GO:
+ case NL80211_IFTYPE_ADHOC:
+ return cfg80211_reg_can_beacon(wiphy, &chandef, iftype);
+ case NL80211_IFTYPE_STATION:
+ case NL80211_IFTYPE_P2P_CLIENT:
+ return cfg80211_chandef_usable(wiphy, &chandef,
+ IEEE80211_CHAN_DISABLED);
+ default:
+ break;
+ }
+
+ return true;
+
+wdev_inactive_unlock:
+ wdev_unlock(wdev);
+ return true;
}
static void reg_leave_invalid_chans(struct wiphy *wiphy)
diff --git a/net/wireless/util.c b/net/wireless/util.c
index d0ac795445b7..5488c3662f7d 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -308,6 +308,12 @@ unsigned int __attribute_const__ ieee80211_hdrlen(__le16 fc)
goto out;
}
+ if (ieee80211_is_mgmt(fc)) {
+ if (ieee80211_has_order(fc))
+ hdrlen += IEEE80211_HT_CTL_LEN;
+ goto out;
+ }
+
if (ieee80211_is_ctl(fc)) {
/*
* ACK and CTS are 10 bytes, all others 16. To see how
diff --git a/samples/bpf/test_maps.c b/samples/bpf/test_maps.c
index e286b42307f3..6299ee95cd11 100644
--- a/samples/bpf/test_maps.c
+++ b/samples/bpf/test_maps.c
@@ -69,9 +69,9 @@ static void test_hashmap_sanity(int i, void *data)
/* iterate over two elements */
assert(bpf_get_next_key(map_fd, &key, &next_key) == 0 &&
- next_key == 2);
+ (next_key == 1 || next_key == 2));
assert(bpf_get_next_key(map_fd, &next_key, &next_key) == 0 &&
- next_key == 1);
+ (next_key == 1 || next_key == 2));
assert(bpf_get_next_key(map_fd, &next_key, &next_key) == -1 &&
errno == ENOENT);
diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean
index 1bca180db8ad..627f8cbbedb8 100644
--- a/scripts/Makefile.clean
+++ b/scripts/Makefile.clean
@@ -42,19 +42,19 @@ __clean-files := $(extra-y) $(extra-m) $(extra-) \
__clean-files := $(filter-out $(no-clean-files), $(__clean-files))
-# as clean-files is given relative to the current directory, this adds
-# a $(obj) prefix, except for absolute paths
+# clean-files is given relative to the current directory, unless it
+# starts with $(objtree)/ (which means "./", so do not add "./" unless
+# you want to delete a file from the toplevel object directory).
__clean-files := $(wildcard \
- $(addprefix $(obj)/, $(filter-out /%, $(__clean-files))) \
- $(filter /%, $(__clean-files)))
+ $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(__clean-files))) \
+ $(filter $(objtree)/%, $(__clean-files)))
-# as clean-dirs is given relative to the current directory, this adds
-# a $(obj) prefix, except for absolute paths
+# same as clean-files
__clean-dirs := $(wildcard \
- $(addprefix $(obj)/, $(filter-out /%, $(clean-dirs))) \
- $(filter /%, $(clean-dirs)))
+ $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(clean-dirs))) \
+ $(filter $(objtree)/%, $(clean-dirs)))
# ==========================================================================
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index 56ea99a12ab7..537c38ca2e1c 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -255,7 +255,6 @@ if ($arch eq "x86_64") {
# force flags for this arch
$ld .= " -m shlelf_linux";
$objcopy .= " -O elf32-sh-linux";
- $cc .= " -m32";
} elsif ($arch eq "powerpc") {
$local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)";
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 9609a7f0faea..c7952375ac53 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -148,12 +148,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys)
if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
atomic_dec(&key->user->nikeys);
- key_user_put(key->user);
-
/* now throw away the key memory */
if (key->type->destroy)
key->type->destroy(key);
+ key_user_put(key->user);
+
kfree(key->description);
#ifdef KEY_DEBUGGING
diff --git a/sound/core/seq/seq_dummy.c b/sound/core/seq/seq_dummy.c
index ec667f158f19..5d905d90d504 100644
--- a/sound/core/seq/seq_dummy.c
+++ b/sound/core/seq/seq_dummy.c
@@ -82,36 +82,6 @@ struct snd_seq_dummy_port {
static int my_client = -1;
/*
- * unuse callback - send ALL_SOUNDS_OFF and RESET_CONTROLLERS events
- * to subscribers.
- * Note: this callback is called only after all subscribers are removed.
- */
-static int
-dummy_unuse(void *private_data, struct snd_seq_port_subscribe *info)
-{
- struct snd_seq_dummy_port *p;
- int i;
- struct snd_seq_event ev;
-
- p = private_data;
- memset(&ev, 0, sizeof(ev));
- if (p->duplex)
- ev.source.port = p->connect;
- else
- ev.source.port = p->port;
- ev.dest.client = SNDRV_SEQ_ADDRESS_SUBSCRIBERS;
- ev.type = SNDRV_SEQ_EVENT_CONTROLLER;
- for (i = 0; i < 16; i++) {
- ev.data.control.channel = i;
- ev.data.control.param = MIDI_CTL_ALL_SOUNDS_OFF;
- snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
- ev.data.control.param = MIDI_CTL_RESET_CONTROLLERS;
- snd_seq_kernel_client_dispatch(p->client, &ev, 0, 0);
- }
- return 0;
-}
-
-/*
* event input callback - just redirect events to subscribers
*/
static int
@@ -175,7 +145,6 @@ create_port(int idx, int type)
| SNDRV_SEQ_PORT_TYPE_PORT;
memset(&pcb, 0, sizeof(pcb));
pcb.owner = THIS_MODULE;
- pcb.unuse = dummy_unuse;
pcb.event_input = dummy_input;
pcb.private_free = dummy_free;
pcb.private_data = rec;
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
index 3badc70124ab..0d580186ef1a 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -21,7 +21,19 @@
#define CYCLES_PER_SECOND 8000
#define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND)
-#define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 µs */
+/*
+ * Nominally 3125 bytes/second, but the MIDI port's clock might be
+ * 1% too slow, and the bus clock 100 ppm too fast.
+ */
+#define MIDI_BYTES_PER_SECOND 3093
+
+/*
+ * Several devices look only at the first eight data blocks.
+ * In any case, this is more than enough for the MIDI data rate.
+ */
+#define MAX_MIDI_RX_BLOCKS 8
+
+#define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 µs */
/* isochronous header parameters */
#define ISO_DATA_LENGTH_SHIFT 16
@@ -78,8 +90,6 @@ int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit,
s->callbacked = false;
s->sync_slave = NULL;
- s->rx_blocks_for_midi = UINT_MAX;
-
return 0;
}
EXPORT_SYMBOL(amdtp_stream_init);
@@ -222,6 +232,14 @@ sfc_found:
for (i = 0; i < pcm_channels; i++)
s->pcm_positions[i] = i;
s->midi_position = s->pcm_channels;
+
+ /*
+ * We do not know the actual MIDI FIFO size of most devices. Just
+ * assume two bytes, i.e., one byte can be received over the bus while
+ * the previous one is transmitted over MIDI.
+ * (The value here is adjusted for midi_ratelimit_per_packet().)
+ */
+ s->midi_fifo_limit = rate - MIDI_BYTES_PER_SECOND * s->syt_interval + 1;
}
EXPORT_SYMBOL(amdtp_stream_set_parameters);
@@ -463,6 +481,36 @@ static void amdtp_fill_pcm_silence(struct amdtp_stream *s,
}
}
+/*
+ * To avoid sending MIDI bytes at too high a rate, assume that the receiving
+ * device has a FIFO, and track how much it is filled. This values increases
+ * by one whenever we send one byte in a packet, but the FIFO empties at
+ * a constant rate independent of our packet rate. One packet has syt_interval
+ * samples, so the number of bytes that empty out of the FIFO, per packet(!),
+ * is MIDI_BYTES_PER_SECOND * syt_interval / sample_rate. To avoid storing
+ * fractional values, the values in midi_fifo_used[] are measured in bytes
+ * multiplied by the sample rate.
+ */
+static bool midi_ratelimit_per_packet(struct amdtp_stream *s, unsigned int port)
+{
+ int used;
+
+ used = s->midi_fifo_used[port];
+ if (used == 0) /* common shortcut */
+ return true;
+
+ used -= MIDI_BYTES_PER_SECOND * s->syt_interval;
+ used = max(used, 0);
+ s->midi_fifo_used[port] = used;
+
+ return used < s->midi_fifo_limit;
+}
+
+static void midi_rate_use_one_byte(struct amdtp_stream *s, unsigned int port)
+{
+ s->midi_fifo_used[port] += amdtp_rate_table[s->sfc];
+}
+
static void amdtp_fill_midi(struct amdtp_stream *s,
__be32 *buffer, unsigned int frames)
{
@@ -470,16 +518,21 @@ static void amdtp_fill_midi(struct amdtp_stream *s,
u8 *b;
for (f = 0; f < frames; f++) {
- buffer[s->midi_position] = 0;
b = (u8 *)&buffer[s->midi_position];
port = (s->data_block_counter + f) % 8;
- if ((f >= s->rx_blocks_for_midi) ||
- (s->midi[port] == NULL) ||
- (snd_rawmidi_transmit(s->midi[port], b + 1, 1) <= 0))
- b[0] = 0x80;
- else
+ if (f < MAX_MIDI_RX_BLOCKS &&
+ midi_ratelimit_per_packet(s, port) &&
+ s->midi[port] != NULL &&
+ snd_rawmidi_transmit(s->midi[port], &b[1], 1) == 1) {
+ midi_rate_use_one_byte(s, port);
b[0] = 0x81;
+ } else {
+ b[0] = 0x80;
+ b[1] = 0;
+ }
+ b[2] = 0;
+ b[3] = 0;
buffer += s->data_block_quadlets;
}
diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
index e6e8926275b0..8a03a91e728b 100644
--- a/sound/firewire/amdtp.h
+++ b/sound/firewire/amdtp.h
@@ -148,13 +148,12 @@ struct amdtp_stream {
bool double_pcm_frames;
struct snd_rawmidi_substream *midi[AMDTP_MAX_CHANNELS_FOR_MIDI * 8];
+ int midi_fifo_limit;
+ int midi_fifo_used[AMDTP_MAX_CHANNELS_FOR_MIDI * 8];
/* quirk: fixed interval of dbc between previos/current packets. */
unsigned int tx_dbc_interval;
- /* quirk: the first count of data blocks in an rx packet for MIDI */
- unsigned int rx_blocks_for_midi;
-
bool callbacked;
wait_queue_head_t callback_wait;
struct amdtp_stream *sync_slave;
diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
index 1aab0a32870c..0ebcabfdc7ce 100644
--- a/sound/firewire/bebob/bebob_stream.c
+++ b/sound/firewire/bebob/bebob_stream.c
@@ -484,13 +484,6 @@ int snd_bebob_stream_init_duplex(struct snd_bebob *bebob)
amdtp_stream_destroy(&bebob->rx_stream);
destroy_both_connections(bebob);
}
- /*
- * The firmware for these devices ignore MIDI messages in more than
- * first 8 data blocks of an received AMDTP packet.
- */
- if (bebob->spec == &maudio_fw410_spec ||
- bebob->spec == &maudio_special_spec)
- bebob->rx_stream.rx_blocks_for_midi = 8;
end:
return err;
}
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
index b985fc5ebdc6..4f440e163667 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -179,11 +179,6 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw)
destroy_stream(efw, &efw->tx_stream);
goto end;
}
- /*
- * Fireworks ignores MIDI messages in more than first 8 data
- * blocks of an received AMDTP packet.
- */
- efw->rx_stream.rx_blocks_for_midi = 8;
/* set IEC61883 compliant mode (actually not fully compliant...) */
err = snd_efw_command_set_tx_mode(efw, SND_EFW_TRANSPORT_MODE_IEC61883);
diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c
index 255dabc6fc33..2a85e4209f0b 100644
--- a/sound/firewire/fireworks/fireworks_transaction.c
+++ b/sound/firewire/fireworks/fireworks_transaction.c
@@ -124,7 +124,7 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode)
spin_lock_irq(&efw->lock);
t = (struct snd_efw_transaction *)data;
- length = min_t(size_t, t->length * sizeof(t->length), length);
+ length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length);
if (efw->push_ptr < efw->pull_ptr)
capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr);
diff --git a/sound/i2c/other/ak4113.c b/sound/i2c/other/ak4113.c
index 1a3a6fa27158..c6bba99a90b2 100644
--- a/sound/i2c/other/ak4113.c
+++ b/sound/i2c/other/ak4113.c
@@ -56,8 +56,7 @@ static inline unsigned char reg_read(struct ak4113 *ak4113, unsigned char reg)
static void snd_ak4113_free(struct ak4113 *chip)
{
- chip->init = 1; /* don't schedule new work */
- mb();
+ atomic_inc(&chip->wq_processing); /* don't schedule new work */
cancel_delayed_work_sync(&chip->work);
kfree(chip);
}
@@ -89,6 +88,7 @@ int snd_ak4113_create(struct snd_card *card, ak4113_read_t *read,
chip->write = write;
chip->private_data = private_data;
INIT_DELAYED_WORK(&chip->work, ak4113_stats);
+ atomic_set(&chip->wq_processing, 0);
for (reg = 0; reg < AK4113_WRITABLE_REGS ; reg++)
chip->regmap[reg] = pgm[reg];
@@ -139,13 +139,11 @@ static void ak4113_init_regs(struct ak4113 *chip)
void snd_ak4113_reinit(struct ak4113 *chip)
{
- chip->init = 1;
- mb();
- flush_delayed_work(&chip->work);
+ if (atomic_inc_return(&chip->wq_processing) == 1)
+ cancel_delayed_work_sync(&chip->work);
ak4113_init_regs(chip);
/* bring up statistics / event queing */
- chip->init = 0;
- if (chip->kctls[0])
+ if (atomic_dec_and_test(&chip->wq_processing))
schedule_delayed_work(&chip->work, HZ / 10);
}
EXPORT_SYMBOL_GPL(snd_ak4113_reinit);
@@ -632,8 +630,9 @@ static void ak4113_stats(struct work_struct *work)
{
struct ak4113 *chip = container_of(work, struct ak4113, work.work);
- if (!chip->init)
+ if (atomic_inc_return(&chip->wq_processing) == 1)
snd_ak4113_check_rate_and_errors(chip, chip->check_flags);
- schedule_delayed_work(&chip->work, HZ / 10);
+ if (atomic_dec_and_test(&chip->wq_processing))
+ schedule_delayed_work(&chip->work, HZ / 10);
}
diff --git a/sound/i2c/other/ak4114.c b/sound/i2c/other/ak4114.c
index c7f56339415d..b70e6eccbd03 100644
--- a/sound/i2c/other/ak4114.c
+++ b/sound/i2c/other/ak4114.c
@@ -66,8 +66,7 @@ static void reg_dump(struct ak4114 *ak4114)
static void snd_ak4114_free(struct ak4114 *chip)
{
- chip->init = 1; /* don't schedule new work */
- mb();
+ atomic_inc(&chip->wq_processing); /* don't schedule new work */
cancel_delayed_work_sync(&chip->work);
kfree(chip);
}
@@ -100,6 +99,7 @@ int snd_ak4114_create(struct snd_card *card,
chip->write = write;
chip->private_data = private_data;
INIT_DELAYED_WORK(&chip->work, ak4114_stats);
+ atomic_set(&chip->wq_processing, 0);
for (reg = 0; reg < 6; reg++)
chip->regmap[reg] = pgm[reg];
@@ -152,13 +152,11 @@ static void ak4114_init_regs(struct ak4114 *chip)
void snd_ak4114_reinit(struct ak4114 *chip)
{
- chip->init = 1;
- mb();
- flush_delayed_work(&chip->work);
+ if (atomic_inc_return(&chip->wq_processing) == 1)
+ cancel_delayed_work_sync(&chip->work);
ak4114_init_regs(chip);
/* bring up statistics / event queing */
- chip->init = 0;
- if (chip->kctls[0])
+ if (atomic_dec_and_test(&chip->wq_processing))
schedule_delayed_work(&chip->work, HZ / 10);
}
@@ -612,10 +610,10 @@ static void ak4114_stats(struct work_struct *work)
{
struct ak4114 *chip = container_of(work, struct ak4114, work.work);
- if (!chip->init)
+ if (atomic_inc_return(&chip->wq_processing) == 1)
snd_ak4114_check_rate_and_errors(chip, chip->check_flags);
-
- schedule_delayed_work(&chip->work, HZ / 10);
+ if (atomic_dec_and_test(&chip->wq_processing))
+ schedule_delayed_work(&chip->work, HZ / 10);
}
EXPORT_SYMBOL(snd_ak4114_create);
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 8276a743e22e..0cfc9c8c4b4e 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -1922,10 +1922,18 @@ int azx_mixer_create(struct azx *chip)
EXPORT_SYMBOL_GPL(azx_mixer_create);
+static bool is_input_stream(struct azx *chip, unsigned char index)
+{
+ return (index >= chip->capture_index_offset &&
+ index < chip->capture_index_offset + chip->capture_streams);
+}
+
/* initialize SD streams */
int azx_init_stream(struct azx *chip)
{
int i;
+ int in_stream_tag = 0;
+ int out_stream_tag = 0;
/* initialize each stream (aka device)
* assign the starting bdl address to each stream (device)
@@ -1938,9 +1946,21 @@ int azx_init_stream(struct azx *chip)
azx_dev->sd_addr = chip->remap_addr + (0x20 * i + 0x80);
/* int mask: SDI0=0x01, SDI1=0x02, ... SDO3=0x80 */
azx_dev->sd_int_sta_mask = 1 << i;
- /* stream tag: must be non-zero and unique */
azx_dev->index = i;
- azx_dev->stream_tag = i + 1;
+
+ /* stream tag must be unique throughout
+ * the stream direction group,
+ * valid values 1...15
+ * use separate stream tag if the flag
+ * AZX_DCAPS_SEPARATE_STREAM_TAG is used
+ */
+ if (chip->driver_caps & AZX_DCAPS_SEPARATE_STREAM_TAG)
+ azx_dev->stream_tag =
+ is_input_stream(chip, i) ?
+ ++in_stream_tag :
+ ++out_stream_tag;
+ else
+ azx_dev->stream_tag = i + 1;
}
return 0;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 2bf0b568e3de..d426a0bd6a5f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -299,6 +299,9 @@ enum {
AZX_DCAPS_PM_RUNTIME | AZX_DCAPS_I915_POWERWELL |\
AZX_DCAPS_SNOOP_TYPE(SCH))
+#define AZX_DCAPS_INTEL_SKYLAKE \
+ (AZX_DCAPS_INTEL_PCH | AZX_DCAPS_SEPARATE_STREAM_TAG)
+
/* quirks for ATI SB / AMD Hudson */
#define AZX_DCAPS_PRESET_ATI_SB \
(AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB |\
@@ -2027,7 +2030,7 @@ static const struct pci_device_id azx_ids[] = {
.driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
/* Sunrise Point-LP */
{ PCI_DEVICE(0x8086, 0x9d70),
- .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
+ .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_SKYLAKE },
/* Haswell */
{ PCI_DEVICE(0x8086, 0x0a0c),
.driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h
index aa484fdf4338..166e3e84b963 100644
--- a/sound/pci/hda/hda_priv.h
+++ b/sound/pci/hda/hda_priv.h
@@ -171,6 +171,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */
#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */
+#define AZX_DCAPS_SEPARATE_STREAM_TAG (1 << 30) /* capture and playback use separate stream tag */
enum {
AZX_SNOOP_TYPE_NONE ,
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 5f13d2d18079..b422e406a9cb 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -3353,6 +3353,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi },
+{ .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi },
{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
{ .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
@@ -3413,6 +3414,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0060");
MODULE_ALIAS("snd-hda-codec-id:10de0067");
MODULE_ALIAS("snd-hda-codec-id:10de0070");
MODULE_ALIAS("snd-hda-codec-id:10de0071");
+MODULE_ALIAS("snd-hda-codec-id:10de0072");
MODULE_ALIAS("snd-hda-codec-id:10de8001");
MODULE_ALIAS("snd-hda-codec-id:11069f80");
MODULE_ALIAS("snd-hda-codec-id:11069f81");
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 4f6413e01c13..605d14003d25 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -568,9 +568,9 @@ static void stac_store_hints(struct hda_codec *codec)
spec->gpio_mask;
}
if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir))
- spec->gpio_mask &= spec->gpio_mask;
- if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
spec->gpio_dir &= spec->gpio_mask;
+ if (get_int_hint(codec, "gpio_data", &spec->gpio_data))
+ spec->gpio_data &= spec->gpio_mask;
if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask))
spec->eapd_mask &= spec->gpio_mask;
if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute))
diff --git a/sound/soc/adi/axi-i2s.c b/sound/soc/adi/axi-i2s.c
index 7752860f7230..4c23381727a1 100644
--- a/sound/soc/adi/axi-i2s.c
+++ b/sound/soc/adi/axi-i2s.c
@@ -240,6 +240,8 @@ static int axi_i2s_probe(struct platform_device *pdev)
if (ret)
goto err_clk_disable;
+ return 0;
+
err_clk_disable:
clk_disable_unprepare(i2s->clk);
return ret;
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 99ff35e2a25d..35e44e463cfe 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -348,7 +348,6 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
struct atmel_pcm_dma_params *dma_params;
int dir, channels, bits;
u32 tfmr, rfmr, tcmr, rcmr;
- int start_event;
int ret;
int fslen, fslen_ext;
@@ -457,19 +456,10 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
* The SSC transmit clock is obtained from the BCLK signal on
* on the TK line, and the SSC receive clock is
* generated from the transmit clock.
- *
- * For single channel data, one sample is transferred
- * on the falling edge of the LRC clock.
- * For two channel data, one sample is
- * transferred on both edges of the LRC clock.
*/
- start_event = ((channels == 1)
- ? SSC_START_FALLING_RF
- : SSC_START_EDGE_RF);
-
rcmr = SSC_BF(RCMR_PERIOD, 0)
| SSC_BF(RCMR_STTDLY, START_DELAY)
- | SSC_BF(RCMR_START, start_event)
+ | SSC_BF(RCMR_START, SSC_START_FALLING_RF)
| SSC_BF(RCMR_CKI, SSC_CKI_RISING)
| SSC_BF(RCMR_CKO, SSC_CKO_NONE)
| SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ?
@@ -478,14 +468,14 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
rfmr = SSC_BF(RFMR_FSEDGE, SSC_FSEDGE_POSITIVE)
| SSC_BF(RFMR_FSOS, SSC_FSOS_NONE)
| SSC_BF(RFMR_FSLEN, 0)
- | SSC_BF(RFMR_DATNB, 0)
+ | SSC_BF(RFMR_DATNB, (channels - 1))
| SSC_BIT(RFMR_MSBF)
| SSC_BF(RFMR_LOOP, 0)
| SSC_BF(RFMR_DATLEN, (bits - 1));
tcmr = SSC_BF(TCMR_PERIOD, 0)
| SSC_BF(TCMR_STTDLY, START_DELAY)
- | SSC_BF(TCMR_START, start_event)
+ | SSC_BF(TCMR_START, SSC_START_FALLING_RF)
| SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
| SSC_BF(TCMR_CKO, SSC_CKO_NONE)
| SSC_BF(TCMR_CKS, ssc->clk_from_rk_pin ?
@@ -495,7 +485,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
| SSC_BF(TFMR_FSDEN, 0)
| SSC_BF(TFMR_FSOS, SSC_FSOS_NONE)
| SSC_BF(TFMR_FSLEN, 0)
- | SSC_BF(TFMR_DATNB, 0)
+ | SSC_BF(TFMR_DATNB, (channels - 1))
| SSC_BIT(TFMR_MSBF)
| SSC_BF(TFMR_DATDEF, 0)
| SSC_BF(TFMR_DATLEN, (bits - 1));
@@ -512,7 +502,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
rcmr = SSC_BF(RCMR_PERIOD, ssc_p->rcmr_period)
| SSC_BF(RCMR_STTDLY, 1)
| SSC_BF(RCMR_START, SSC_START_RISING_RF)
- | SSC_BF(RCMR_CKI, SSC_CKI_RISING)
+ | SSC_BF(RCMR_CKI, SSC_CKI_FALLING)
| SSC_BF(RCMR_CKO, SSC_CKO_NONE)
| SSC_BF(RCMR_CKS, SSC_CKS_DIV);
@@ -527,7 +517,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
tcmr = SSC_BF(TCMR_PERIOD, ssc_p->tcmr_period)
| SSC_BF(TCMR_STTDLY, 1)
| SSC_BF(TCMR_START, SSC_START_RISING_RF)
- | SSC_BF(TCMR_CKI, SSC_CKI_RISING)
+ | SSC_BF(TCMR_CKI, SSC_CKI_FALLING)
| SSC_BF(TCMR_CKO, SSC_CKO_CONTINUOUS)
| SSC_BF(TCMR_CKS, SSC_CKS_DIV);
@@ -556,7 +546,7 @@ static int atmel_ssc_hw_params(struct snd_pcm_substream *substream,
rcmr = SSC_BF(RCMR_PERIOD, 0)
| SSC_BF(RCMR_STTDLY, START_DELAY)
| SSC_BF(RCMR_START, SSC_START_RISING_RF)
- | SSC_BF(RCMR_CKI, SSC_CKI_RISING)
+ | SSC_BF(RCMR_CKI, SSC_CKI_FALLING)
| SSC_BF(RCMR_CKO, SSC_CKO_NONE)
| SSC_BF(RCMR_CKS, ssc->clk_from_rk_pin ?
SSC_CKS_PIN : SSC_CKS_CLOCK);
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index e5f2fb884bf3..30c673cdc12e 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -188,8 +188,8 @@ static const DECLARE_TLV_DB_SCALE(boost_tlv, 0, 80, 0);
static const char * const pcm512x_dsp_program_texts[] = {
"FIR interpolation with de-emphasis",
"Low latency IIR with de-emphasis",
- "Fixed process flow",
"High attenuation with de-emphasis",
+ "Fixed process flow",
"Ringing-less low latency FIR",
};
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index 2cd4fe463102..1d1c7f8a9af2 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -861,10 +861,8 @@ static int rt286_hw_params(struct snd_pcm_substream *substream,
RT286_I2S_CTRL1, 0x0018, d_len_code << 3);
dev_dbg(codec->dev, "format val = 0x%x\n", val);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
- snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val);
- else
- snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val);
+ snd_soc_update_bits(codec, RT286_DAC_FORMAT, 0x407f, val);
+ snd_soc_update_bits(codec, RT286_ADC_FORMAT, 0x407f, val);
return 0;
}
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index c3f2decd643c..1ff726c29249 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -2124,6 +2124,7 @@ MODULE_DEVICE_TABLE(of, rt5640_of_match);
static struct acpi_device_id rt5640_acpi_match[] = {
{ "INT33CA", 0 },
{ "10EC5640", 0 },
+ { "10EC5642", 0 },
{ },
};
MODULE_DEVICE_TABLE(acpi, rt5640_acpi_match);
diff --git a/sound/soc/codecs/rt5677.c b/sound/soc/codecs/rt5677.c
index 81fe1464d268..918ada9738b0 100644
--- a/sound/soc/codecs/rt5677.c
+++ b/sound/soc/codecs/rt5677.c
@@ -784,8 +784,8 @@ static unsigned int bst_tlv[] = {
static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
ucontrol->value.integer.value[0] = rt5677->dsp_vad_en;
@@ -795,8 +795,9 @@ static int rt5677_dsp_vad_get(struct snd_kcontrol *kcontrol,
static int rt5677_dsp_vad_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
- struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
+ struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
+ struct rt5677_priv *rt5677 = snd_soc_component_get_drvdata(component);
+ struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
rt5677->dsp_vad_en = !!ucontrol->value.integer.value[0];
@@ -2082,10 +2083,14 @@ static int rt5677_set_pll1_event(struct snd_soc_dapm_widget *w,
struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
switch (event) {
- case SND_SOC_DAPM_POST_PMU:
+ case SND_SOC_DAPM_PRE_PMU:
regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x2);
+ break;
+
+ case SND_SOC_DAPM_POST_PMU:
regmap_update_bits(rt5677->regmap, RT5677_PLL1_CTRL2, 0x2, 0x0);
break;
+
default:
return 0;
}
@@ -2100,10 +2105,14 @@ static int rt5677_set_pll2_event(struct snd_soc_dapm_widget *w,
struct rt5677_priv *rt5677 = snd_soc_codec_get_drvdata(codec);
switch (event) {
- case SND_SOC_DAPM_POST_PMU:
+ case SND_SOC_DAPM_PRE_PMU:
regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x2);
+ break;
+
+ case SND_SOC_DAPM_POST_PMU:
regmap_update_bits(rt5677->regmap, RT5677_PLL2_CTRL2, 0x2, 0x0);
break;
+
default:
return 0;
}
@@ -2211,9 +2220,11 @@ static int rt5677_vref_event(struct snd_soc_dapm_widget *w,
static const struct snd_soc_dapm_widget rt5677_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("PLL1", RT5677_PWR_ANLG2, RT5677_PWR_PLL1_BIT,
- 0, rt5677_set_pll1_event, SND_SOC_DAPM_POST_PMU),
+ 0, rt5677_set_pll1_event, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_SUPPLY("PLL2", RT5677_PWR_ANLG2, RT5677_PWR_PLL2_BIT,
- 0, rt5677_set_pll2_event, SND_SOC_DAPM_POST_PMU),
+ 0, rt5677_set_pll2_event, SND_SOC_DAPM_PRE_PMU |
+ SND_SOC_DAPM_POST_PMU),
/* Input Side */
/* micbias */
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 29cf7ce610f4..aa98be32bb60 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -483,21 +483,21 @@ static int sgtl5000_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt)
/* setting i2s data format */
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_A:
- i2sctl |= SGTL5000_I2S_MODE_PCM;
+ i2sctl |= SGTL5000_I2S_MODE_PCM << SGTL5000_I2S_MODE_SHIFT;
break;
case SND_SOC_DAIFMT_DSP_B:
- i2sctl |= SGTL5000_I2S_MODE_PCM;
+ i2sctl |= SGTL5000_I2S_MODE_PCM << SGTL5000_I2S_MODE_SHIFT;
i2sctl |= SGTL5000_I2S_LRALIGN;
break;
case SND_SOC_DAIFMT_I2S:
- i2sctl |= SGTL5000_I2S_MODE_I2S_LJ;
+ i2sctl |= SGTL5000_I2S_MODE_I2S_LJ << SGTL5000_I2S_MODE_SHIFT;
break;
case SND_SOC_DAIFMT_RIGHT_J:
- i2sctl |= SGTL5000_I2S_MODE_RJ;
+ i2sctl |= SGTL5000_I2S_MODE_RJ << SGTL5000_I2S_MODE_SHIFT;
i2sctl |= SGTL5000_I2S_LRPOL;
break;
case SND_SOC_DAIFMT_LEFT_J:
- i2sctl |= SGTL5000_I2S_MODE_I2S_LJ;
+ i2sctl |= SGTL5000_I2S_MODE_I2S_LJ << SGTL5000_I2S_MODE_SHIFT;
i2sctl |= SGTL5000_I2S_LRALIGN;
break;
default:
@@ -1462,6 +1462,9 @@ static int sgtl5000_i2c_probe(struct i2c_client *client,
if (ret)
return ret;
+ /* Need 8 clocks before I2C accesses */
+ udelay(1);
+
/* read chip information */
ret = regmap_read(sgtl5000->regmap, SGTL5000_CHIP_ID, &reg);
if (ret)
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index b7ebce054b4e..dd222b10ce13 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -1046,7 +1046,7 @@ static int aic3x_prepare(struct snd_pcm_substream *substream,
delay += aic3x->tdm_delay;
/* Configure data delay */
- snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, aic3x->tdm_delay);
+ snd_soc_write(codec, AIC3X_ASD_INTF_CTRLC, delay);
return 0;
}
diff --git a/sound/soc/codecs/ts3a227e.c b/sound/soc/codecs/ts3a227e.c
index 1d1205702d23..9f2dced046de 100644
--- a/sound/soc/codecs/ts3a227e.c
+++ b/sound/soc/codecs/ts3a227e.c
@@ -254,6 +254,7 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c,
struct ts3a227e *ts3a227e;
struct device *dev = &i2c->dev;
int ret;
+ unsigned int acc_reg;
ts3a227e = devm_kzalloc(&i2c->dev, sizeof(*ts3a227e), GFP_KERNEL);
if (ts3a227e == NULL)
@@ -283,6 +284,11 @@ static int ts3a227e_i2c_probe(struct i2c_client *i2c,
INTB_DISABLE | ADC_COMPLETE_INT_DISABLE,
ADC_COMPLETE_INT_DISABLE);
+ /* Read jack status because chip might not trigger interrupt at boot. */
+ regmap_read(ts3a227e->regmap, TS3A227E_REG_ACCESSORY_STATUS, &acc_reg);
+ ts3a227e_new_jack_state(ts3a227e, acc_reg);
+ ts3a227e_jack_report(ts3a227e);
+
return 0;
}
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index b9211b42f6e9..b115ed815db9 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -717,6 +717,8 @@ static int wm8731_i2c_probe(struct i2c_client *i2c,
if (wm8731 == NULL)
return -ENOMEM;
+ mutex_init(&wm8731->lock);
+
wm8731->regmap = devm_regmap_init_i2c(i2c, &wm8731_regmap);
if (IS_ERR(wm8731->regmap)) {
ret = PTR_ERR(wm8731->regmap);
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 4d2d2b1380d5..75b87c5c0f04 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1076,10 +1076,13 @@ static const struct snd_soc_dapm_route adc_intercon[] = {
{ "Right Capture PGA", NULL, "Right Capture Mux" },
{ "Right Capture PGA", NULL, "Right Capture Inverting Mux" },
- { "AIFOUTL", "Left", "ADCL" },
- { "AIFOUTL", "Right", "ADCR" },
- { "AIFOUTR", "Left", "ADCL" },
- { "AIFOUTR", "Right", "ADCR" },
+ { "AIFOUTL Mux", "Left", "ADCL" },
+ { "AIFOUTL Mux", "Right", "ADCR" },
+ { "AIFOUTR Mux", "Left", "ADCL" },
+ { "AIFOUTR Mux", "Right", "ADCR" },
+
+ { "AIFOUTL", NULL, "AIFOUTL Mux" },
+ { "AIFOUTR", NULL, "AIFOUTR Mux" },
{ "ADCL", NULL, "CLK_DSP" },
{ "ADCL", NULL, "Left Capture PGA" },
@@ -1089,12 +1092,16 @@ static const struct snd_soc_dapm_route adc_intercon[] = {
};
static const struct snd_soc_dapm_route dac_intercon[] = {
- { "DACL", "Right", "AIFINR" },
- { "DACL", "Left", "AIFINL" },
+ { "DACL Mux", "Left", "AIFINL" },
+ { "DACL Mux", "Right", "AIFINR" },
+
+ { "DACR Mux", "Left", "AIFINL" },
+ { "DACR Mux", "Right", "AIFINR" },
+
+ { "DACL", NULL, "DACL Mux" },
{ "DACL", NULL, "CLK_DSP" },
- { "DACR", "Right", "AIFINR" },
- { "DACR", "Left", "AIFINL" },
+ { "DACR", NULL, "DACR Mux" },
{ "DACR", NULL, "CLK_DSP" },
{ "Charge pump", NULL, "SYSCLK" },
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 031a1ae71d94..a96eb497a379 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -556,7 +556,7 @@ static struct {
{ 22050, 2 },
{ 24000, 2 },
{ 16000, 3 },
- { 11250, 4 },
+ { 11025, 4 },
{ 12000, 4 },
{ 8000, 5 },
};
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index b93168d4f648..8d18bbda661b 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -209,16 +209,9 @@ static int dw_i2s_hw_params(struct snd_pcm_substream *substream,
switch (config->chan_nr) {
case EIGHT_CHANNEL_SUPPORT:
- ch_reg = 3;
- break;
case SIX_CHANNEL_SUPPORT:
- ch_reg = 2;
- break;
case FOUR_CHANNEL_SUPPORT:
- ch_reg = 1;
- break;
case TWO_CHANNEL_SUPPORT:
- ch_reg = 0;
break;
default:
dev_err(dev->dev, "channel not supported\n");
@@ -227,18 +220,22 @@ static int dw_i2s_hw_params(struct snd_pcm_substream *substream,
i2s_disable_channels(dev, substream->stream);
- if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- i2s_write_reg(dev->i2s_base, TCR(ch_reg), xfer_resolution);
- i2s_write_reg(dev->i2s_base, TFCR(ch_reg), 0x02);
- irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
- i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30);
- i2s_write_reg(dev->i2s_base, TER(ch_reg), 1);
- } else {
- i2s_write_reg(dev->i2s_base, RCR(ch_reg), xfer_resolution);
- i2s_write_reg(dev->i2s_base, RFCR(ch_reg), 0x07);
- irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
- i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03);
- i2s_write_reg(dev->i2s_base, RER(ch_reg), 1);
+ for (ch_reg = 0; ch_reg < (config->chan_nr / 2); ch_reg++) {
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
+ i2s_write_reg(dev->i2s_base, TCR(ch_reg),
+ xfer_resolution);
+ i2s_write_reg(dev->i2s_base, TFCR(ch_reg), 0x02);
+ irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
+ i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x30);
+ i2s_write_reg(dev->i2s_base, TER(ch_reg), 1);
+ } else {
+ i2s_write_reg(dev->i2s_base, RCR(ch_reg),
+ xfer_resolution);
+ i2s_write_reg(dev->i2s_base, RFCR(ch_reg), 0x07);
+ irq = i2s_read_reg(dev->i2s_base, IMR(ch_reg));
+ i2s_write_reg(dev->i2s_base, IMR(ch_reg), irq & ~0x03);
+ i2s_write_reg(dev->i2s_base, RER(ch_reg), 1);
+ }
}
i2s_write_reg(dev->i2s_base, CCR, ccr);
@@ -263,6 +260,19 @@ static void dw_i2s_shutdown(struct snd_pcm_substream *substream,
snd_soc_dai_set_dma_data(dai, substream, NULL);
}
+static int dw_i2s_prepare(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *dai)
+{
+ struct dw_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ i2s_write_reg(dev->i2s_base, TXFFR, 1);
+ else
+ i2s_write_reg(dev->i2s_base, RXFFR, 1);
+
+ return 0;
+}
+
static int dw_i2s_trigger(struct snd_pcm_substream *substream,
int cmd, struct snd_soc_dai *dai)
{
@@ -294,6 +304,7 @@ static struct snd_soc_dai_ops dw_i2s_dai_ops = {
.startup = dw_i2s_startup,
.shutdown = dw_i2s_shutdown,
.hw_params = dw_i2s_hw_params,
+ .prepare = dw_i2s_prepare,
.trigger = dw_i2s_trigger,
};
diff --git a/sound/soc/fsl/fsl_esai.h b/sound/soc/fsl/fsl_esai.h
index 91a550f4a10d..5e793bbb6b02 100644
--- a/sound/soc/fsl/fsl_esai.h
+++ b/sound/soc/fsl/fsl_esai.h
@@ -302,7 +302,7 @@
#define ESAI_xCCR_xFP_MASK (((1 << ESAI_xCCR_xFP_WIDTH) - 1) << ESAI_xCCR_xFP_SHIFT)
#define ESAI_xCCR_xFP(v) ((((v) - 1) << ESAI_xCCR_xFP_SHIFT) & ESAI_xCCR_xFP_MASK)
#define ESAI_xCCR_xDC_SHIFT 9
-#define ESAI_xCCR_xDC_WIDTH 4
+#define ESAI_xCCR_xDC_WIDTH 5
#define ESAI_xCCR_xDC_MASK (((1 << ESAI_xCCR_xDC_WIDTH) - 1) << ESAI_xCCR_xDC_SHIFT)
#define ESAI_xCCR_xDC(v) ((((v) - 1) << ESAI_xCCR_xDC_SHIFT) & ESAI_xCCR_xDC_MASK)
#define ESAI_xCCR_xPSR_SHIFT 8
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index a65f17d57ffb..059496ed9ad7 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -1362,9 +1362,9 @@ static int fsl_ssi_probe(struct platform_device *pdev)
}
ssi_private->irq = platform_get_irq(pdev, 0);
- if (!ssi_private->irq) {
+ if (ssi_private->irq < 0) {
dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
- return -ENXIO;
+ return ssi_private->irq;
}
/* Are the RX and the TX clocks locked? */
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 4caacb05a623..cd146d4fa805 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -257,6 +257,7 @@ static int imx_wm8962_probe(struct platform_device *pdev)
if (ret)
goto clk_fail;
data->card.num_links = 1;
+ data->card.owner = THIS_MODULE;
data->card.dai_link = &data->dai;
data->card.dapm_widgets = imx_wm8962_dapm_widgets;
data->card.num_dapm_widgets = ARRAY_SIZE(imx_wm8962_dapm_widgets);
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index fb9240fdc9b7..7fe3009b1c43 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -452,9 +452,8 @@ static int asoc_simple_card_parse_of(struct device_node *node,
}
/* Decrease the reference count of the device nodes */
-static int asoc_simple_card_unref(struct platform_device *pdev)
+static int asoc_simple_card_unref(struct snd_soc_card *card)
{
- struct snd_soc_card *card = platform_get_drvdata(pdev);
struct snd_soc_dai_link *dai_link;
int num_links;
@@ -556,7 +555,7 @@ static int asoc_simple_card_probe(struct platform_device *pdev)
return ret;
err:
- asoc_simple_card_unref(pdev);
+ asoc_simple_card_unref(&priv->snd_card);
return ret;
}
@@ -572,7 +571,7 @@ static int asoc_simple_card_remove(struct platform_device *pdev)
snd_soc_jack_free_gpios(&simple_card_mic_jack, 1,
&simple_card_mic_jack_gpio);
- return asoc_simple_card_unref(pdev);
+ return asoc_simple_card_unref(card);
}
static const struct of_device_id asoc_simple_of_match[] = {
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index e989ecf046c9..f86de1211b96 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -89,7 +89,7 @@ config SND_SOC_INTEL_BROADWELL_MACH
config SND_SOC_INTEL_BYTCR_RT5640_MACH
tristate "ASoC Audio DSP Support for MID BYT Platform"
- depends on X86
+ depends on X86 && I2C
select SND_SOC_RT5640
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
@@ -101,7 +101,7 @@ config SND_SOC_INTEL_BYTCR_RT5640_MACH
config SND_SOC_INTEL_CHT_BSW_RT5672_MACH
tristate "ASoC Audio driver for Intel Cherrytrail & Braswell with RT5672 codec"
- depends on X86_INTEL_LPSS
+ depends on X86_INTEL_LPSS && I2C
select SND_SOC_RT5670
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
diff --git a/sound/soc/intel/bytcr_dpcm_rt5640.c b/sound/soc/intel/bytcr_dpcm_rt5640.c
index f5d0fc1ab10c..eef0c56ec32e 100644
--- a/sound/soc/intel/bytcr_dpcm_rt5640.c
+++ b/sound/soc/intel/bytcr_dpcm_rt5640.c
@@ -227,4 +227,4 @@ module_platform_driver(snd_byt_mc_driver);
MODULE_DESCRIPTION("ASoC Intel(R) Baytrail CR Machine driver");
MODULE_AUTHOR("Subhransu S. Prusty <subhransu.s.prusty@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("platform:bytrt5640-audio");
+MODULE_ALIAS("platform:bytt100_rt5640");
diff --git a/sound/soc/intel/sst-firmware.c b/sound/soc/intel/sst-firmware.c
index 4a5bde9c686b..b3f9489794a6 100644
--- a/sound/soc/intel/sst-firmware.c
+++ b/sound/soc/intel/sst-firmware.c
@@ -706,6 +706,7 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
struct list_head *block_list)
{
struct sst_mem_block *block, *tmp;
+ struct sst_block_allocator ba_tmp = *ba;
u32 end = ba->offset + ba->size, block_end;
int err;
@@ -730,9 +731,9 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
if (ba->offset >= block->offset && ba->offset < block_end) {
/* align ba to block boundary */
- ba->size -= block_end - ba->offset;
- ba->offset = block_end;
- err = block_alloc_contiguous(dsp, ba, block_list);
+ ba_tmp.size -= block_end - ba->offset;
+ ba_tmp.offset = block_end;
+ err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
if (err < 0)
return -ENOMEM;
@@ -763,10 +764,14 @@ static int block_alloc_fixed(struct sst_dsp *dsp, struct sst_block_allocator *ba
/* does block span more than 1 section */
if (ba->offset >= block->offset && ba->offset < block_end) {
+ /* add block */
+ list_move(&block->list, &dsp->used_block_list);
+ list_add(&block->module_list, block_list);
/* align ba to block boundary */
- ba->offset = block->offset;
+ ba_tmp.size -= block_end - ba->offset;
+ ba_tmp.offset = block_end;
- err = block_alloc_contiguous(dsp, ba, block_list);
+ err = block_alloc_contiguous(dsp, &ba_tmp, block_list);
if (err < 0)
return -ENOMEM;
diff --git a/sound/soc/intel/sst-haswell-ipc.c b/sound/soc/intel/sst-haswell-ipc.c
index 3f8c48231364..8156cc1accb7 100644
--- a/sound/soc/intel/sst-haswell-ipc.c
+++ b/sound/soc/intel/sst-haswell-ipc.c
@@ -651,11 +651,11 @@ static void hsw_notification_work(struct work_struct *work)
}
/* tell DSP that notification has been handled */
- sst_dsp_shim_update_bits_unlocked(hsw->dsp, SST_IPCD,
+ sst_dsp_shim_update_bits(hsw->dsp, SST_IPCD,
SST_IPCD_BUSY | SST_IPCD_DONE, SST_IPCD_DONE);
/* unmask busy interrupt */
- sst_dsp_shim_update_bits_unlocked(hsw->dsp, SST_IMRX, SST_IMRX_BUSY, 0);
+ sst_dsp_shim_update_bits(hsw->dsp, SST_IMRX, SST_IMRX_BUSY, 0);
}
static struct ipc_message *reply_find_msg(struct sst_hsw *hsw, u32 header)
@@ -1228,6 +1228,11 @@ int sst_hsw_stream_free(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
struct sst_dsp *sst = hsw->dsp;
unsigned long flags;
+ if (!stream) {
+ dev_warn(hsw->dev, "warning: stream is NULL, no stream to free, ignore it.\n");
+ return 0;
+ }
+
/* dont free DSP streams that are not commited */
if (!stream->commited)
goto out;
@@ -1415,6 +1420,16 @@ int sst_hsw_stream_commit(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
u32 header;
int ret;
+ if (!stream) {
+ dev_warn(hsw->dev, "warning: stream is NULL, no stream to commit, ignore it.\n");
+ return 0;
+ }
+
+ if (stream->commited) {
+ dev_warn(hsw->dev, "warning: stream is already committed, ignore it.\n");
+ return 0;
+ }
+
trace_ipc_request("stream alloc", stream->host_id);
header = IPC_GLB_TYPE(IPC_GLB_ALLOCATE_STREAM);
@@ -1519,6 +1534,11 @@ int sst_hsw_stream_pause(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
{
int ret;
+ if (!stream) {
+ dev_warn(hsw->dev, "warning: stream is NULL, no stream to pause, ignore it.\n");
+ return 0;
+ }
+
trace_ipc_request("stream pause", stream->reply.stream_hw_id);
ret = sst_hsw_stream_operations(hsw, IPC_STR_PAUSE,
@@ -1535,6 +1555,11 @@ int sst_hsw_stream_resume(struct sst_hsw *hsw, struct sst_hsw_stream *stream,
{
int ret;
+ if (!stream) {
+ dev_warn(hsw->dev, "warning: stream is NULL, no stream to resume, ignore it.\n");
+ return 0;
+ }
+
trace_ipc_request("stream resume", stream->reply.stream_hw_id);
ret = sst_hsw_stream_operations(hsw, IPC_STR_RESUME,
@@ -1550,6 +1575,11 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
{
int ret, tries = 10;
+ if (!stream) {
+ dev_warn(hsw->dev, "warning: stream is NULL, no stream to reset, ignore it.\n");
+ return 0;
+ }
+
/* dont reset streams that are not commited */
if (!stream->commited)
return 0;
diff --git a/sound/soc/intel/sst/sst_acpi.c b/sound/soc/intel/sst/sst_acpi.c
index 3abc29e8a928..b3360139c41a 100644
--- a/sound/soc/intel/sst/sst_acpi.c
+++ b/sound/soc/intel/sst/sst_acpi.c
@@ -343,14 +343,14 @@ int sst_acpi_remove(struct platform_device *pdev)
}
static struct sst_machines sst_acpi_bytcr[] = {
- {"10EC5640", "T100", "bytt100_rt5640", NULL, "fw_sst_0f28.bin",
+ {"10EC5640", "T100", "bytt100_rt5640", NULL, "intel/fw_sst_0f28.bin",
&byt_rvp_platform_data },
{},
};
/* Cherryview-based platforms: CherryTrail and Braswell */
static struct sst_machines sst_acpi_chv[] = {
- {"10EC5670", "cht-bsw", "cht-bsw-rt5672", NULL, "fw_sst_22a8.bin",
+ {"10EC5670", "cht-bsw", "cht-bsw-rt5672", NULL, "intel/fw_sst_22a8.bin",
&chv_platform_data },
{},
};
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 8b79cafab1e2..c7eb9dd67f60 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -434,7 +434,7 @@ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai,
case SND_SOC_DAIFMT_CBM_CFS:
/* McBSP slave. FS clock as output */
regs->srgr2 |= FSGM;
- regs->pcr0 |= FSXM;
+ regs->pcr0 |= FSXM | FSRM;
break;
case SND_SOC_DAIFMT_CBM_CFM:
/* McBSP slave */
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index 26ec5117b35c..dcc26eda0539 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -335,6 +335,7 @@ static struct snd_soc_dai_driver rockchip_i2s_dai = {
SNDRV_PCM_FMTBIT_S24_LE),
},
.ops = &rockchip_i2s_dai_ops,
+ .symmetric_rates = 1,
};
static const struct snd_soc_component_driver rockchip_i2s_component = {
@@ -454,11 +455,11 @@ static int rockchip_i2s_probe(struct platform_device *pdev)
i2s->playback_dma_data.addr = res->start + I2S_TXDR;
i2s->playback_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- i2s->playback_dma_data.maxburst = 16;
+ i2s->playback_dma_data.maxburst = 4;
i2s->capture_dma_data.addr = res->start + I2S_RXDR;
i2s->capture_dma_data.addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
- i2s->capture_dma_data.maxburst = 16;
+ i2s->capture_dma_data.maxburst = 4;
i2s->dev = &pdev->dev;
dev_set_drvdata(&pdev->dev, i2s);
diff --git a/sound/soc/rockchip/rockchip_i2s.h b/sound/soc/rockchip/rockchip_i2s.h
index 89a5d8bc6ee7..93f456f518a9 100644
--- a/sound/soc/rockchip/rockchip_i2s.h
+++ b/sound/soc/rockchip/rockchip_i2s.h
@@ -127,7 +127,7 @@
#define I2S_DMACR_TDE_DISABLE (0 << I2S_DMACR_TDE_SHIFT)
#define I2S_DMACR_TDE_ENABLE (1 << I2S_DMACR_TDE_SHIFT)
#define I2S_DMACR_TDL_SHIFT 0
-#define I2S_DMACR_TDL(x) ((x - 1) << I2S_DMACR_TDL_SHIFT)
+#define I2S_DMACR_TDL(x) ((x) << I2S_DMACR_TDL_SHIFT)
#define I2S_DMACR_TDL_MASK (0x1f << I2S_DMACR_TDL_SHIFT)
/*
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 590a82f01d0b..025c38fbe3c0 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -659,7 +659,8 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
rtd->dai_link->stream_name);
ret = snd_pcm_new_internal(rtd->card->snd_card, new_name, num,
- 1, 0, &be_pcm);
+ rtd->dai_link->dpcm_playback,
+ rtd->dai_link->dpcm_capture, &be_pcm);
if (ret < 0) {
dev_err(rtd->card->dev, "ASoC: can't create compressed for %s\n",
rtd->dai_link->name);
@@ -668,8 +669,10 @@ int soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
rtd->pcm = be_pcm;
rtd->fe_compr = 1;
- be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
- be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
+ if (rtd->dai_link->dpcm_playback)
+ be_pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->private_data = rtd;
+ else if (rtd->dai_link->dpcm_capture)
+ be_pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->private_data = rtd;
memcpy(compr->ops, &soc_compr_dyn_ops, sizeof(soc_compr_dyn_ops));
} else
memcpy(compr->ops, &soc_compr_ops, sizeof(soc_compr_ops));
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 985052b3fbed..2c62620abca6 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -3230,7 +3230,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
const char *propname)
{
struct device_node *np = card->dev->of_node;
- int num_routes, old_routes;
+ int num_routes;
struct snd_soc_dapm_route *routes;
int i, ret;
@@ -3248,9 +3248,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
return -EINVAL;
}
- old_routes = card->num_dapm_routes;
- routes = devm_kzalloc(card->dev,
- (old_routes + num_routes) * sizeof(*routes),
+ routes = devm_kzalloc(card->dev, num_routes * sizeof(*routes),
GFP_KERNEL);
if (!routes) {
dev_err(card->dev,
@@ -3258,11 +3256,9 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
return -EINVAL;
}
- memcpy(routes, card->dapm_routes, old_routes * sizeof(*routes));
-
for (i = 0; i < num_routes; i++) {
ret = of_property_read_string_index(np, propname,
- 2 * i, &routes[old_routes + i].sink);
+ 2 * i, &routes[i].sink);
if (ret) {
dev_err(card->dev,
"ASoC: Property '%s' index %d could not be read: %d\n",
@@ -3270,7 +3266,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
return -EINVAL;
}
ret = of_property_read_string_index(np, propname,
- (2 * i) + 1, &routes[old_routes + i].source);
+ (2 * i) + 1, &routes[i].source);
if (ret) {
dev_err(card->dev,
"ASoC: Property '%s' index %d could not be read: %d\n",
@@ -3279,7 +3275,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
}
}
- card->num_dapm_routes += num_routes;
+ card->num_dapm_routes = num_routes;
card->dapm_routes = routes;
return 0;
diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c
index 272844746135..327f8642ca80 100644
--- a/sound/usb/caiaq/audio.c
+++ b/sound/usb/caiaq/audio.c
@@ -816,7 +816,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *cdev)
return -EINVAL;
}
- if (cdev->n_streams < 2) {
+ if (cdev->n_streams < 1) {
dev_err(dev, "bogus number of streams: %d\n", cdev->n_streams);
return -EINVAL;
}
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 41650d5b93b7..3e2ef61c627b 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -913,6 +913,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
case USB_ID(0x046d, 0x0808):
case USB_ID(0x046d, 0x0809):
+ case USB_ID(0x046d, 0x0819): /* Logitech Webcam C210 */
case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
diff --git a/tools/include/asm-generic/bitops.h b/tools/include/asm-generic/bitops.h
index 6eedba1f7732..653d1bad77de 100644
--- a/tools/include/asm-generic/bitops.h
+++ b/tools/include/asm-generic/bitops.h
@@ -22,6 +22,8 @@
#error only <linux/bitops.h> can be included directly
#endif
+#include <asm-generic/bitops/hweight.h>
+
#include <asm-generic/bitops/atomic.h>
#endif /* __TOOLS_ASM_GENERIC_BITOPS_H */
diff --git a/tools/include/asm-generic/bitops/arch_hweight.h b/tools/include/asm-generic/bitops/arch_hweight.h
new file mode 100644
index 000000000000..318bb2b202b0
--- /dev/null
+++ b/tools/include/asm-generic/bitops/arch_hweight.h
@@ -0,0 +1 @@
+#include "../../../../include/asm-generic/bitops/arch_hweight.h"
diff --git a/tools/include/asm-generic/bitops/const_hweight.h b/tools/include/asm-generic/bitops/const_hweight.h
new file mode 100644
index 000000000000..0afd644aff83
--- /dev/null
+++ b/tools/include/asm-generic/bitops/const_hweight.h
@@ -0,0 +1 @@
+#include "../../../../include/asm-generic/bitops/const_hweight.h"
diff --git a/tools/include/asm-generic/bitops/hweight.h b/tools/include/asm-generic/bitops/hweight.h
new file mode 100644
index 000000000000..290120c01a8e
--- /dev/null
+++ b/tools/include/asm-generic/bitops/hweight.h
@@ -0,0 +1,7 @@
+#ifndef _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_
+#define _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_
+
+#include <asm-generic/bitops/arch_hweight.h>
+#include <asm-generic/bitops/const_hweight.h>
+
+#endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_ */
diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h
index 26005a15e7e2..5ad9ee1dd7f6 100644
--- a/tools/include/linux/bitops.h
+++ b/tools/include/linux/bitops.h
@@ -1,9 +1,9 @@
#ifndef _TOOLS_LINUX_BITOPS_H_
#define _TOOLS_LINUX_BITOPS_H_
+#include <asm/types.h>
#include <linux/kernel.h>
#include <linux/compiler.h>
-#include <asm/hweight.h>
#ifndef __WORDSIZE
#define __WORDSIZE (__SIZEOF_LONG__ * 8)
@@ -19,6 +19,11 @@
#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE)
+extern unsigned int __sw_hweight8(unsigned int w);
+extern unsigned int __sw_hweight16(unsigned int w);
+extern unsigned int __sw_hweight32(unsigned int w);
+extern unsigned long __sw_hweight64(__u64 w);
+
/*
* Include this here because some architectures need generic_ffs/fls in
* scope
diff --git a/tools/lib/api/fs/debugfs.c b/tools/lib/api/fs/debugfs.c
index a74fba6d7743..86ea2d7b8845 100644
--- a/tools/lib/api/fs/debugfs.c
+++ b/tools/lib/api/fs/debugfs.c
@@ -67,7 +67,7 @@ int debugfs_valid_mountpoint(const char *debugfs)
if (statfs(debugfs, &st_fs) < 0)
return -ENOENT;
- else if (st_fs.f_type != (long) DEBUGFS_MAGIC)
+ else if ((long)st_fs.f_type != (long)DEBUGFS_MAGIC)
return -ENOENT;
return 0;
diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c
index 65d9be3f9887..128ef6332a6b 100644
--- a/tools/lib/api/fs/fs.c
+++ b/tools/lib/api/fs/fs.c
@@ -79,7 +79,7 @@ static int fs__valid_mount(const char *fs, long magic)
if (statfs(fs, &st_fs) < 0)
return -ENOENT;
- else if (st_fs.f_type != magic)
+ else if ((long)st_fs.f_type != magic)
return -ENOENT;
return 0;
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c
index 6f803609e498..0b0112c80f22 100644
--- a/tools/lib/lockdep/preload.c
+++ b/tools/lib/lockdep/preload.c
@@ -317,7 +317,7 @@ int pthread_mutex_destroy(pthread_mutex_t *mutex)
*
* TODO: Hook into free() and add that check there as well.
*/
- debug_check_no_locks_freed(mutex, mutex + sizeof(*mutex));
+ debug_check_no_locks_freed(mutex, sizeof(*mutex));
__del_lock(__get_lock(mutex));
return ll_pthread_mutex_destroy(mutex);
}
@@ -341,7 +341,7 @@ int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
{
try_init_preload();
- debug_check_no_locks_freed(rwlock, rwlock + sizeof(*rwlock));
+ debug_check_no_locks_freed(rwlock, sizeof(*rwlock));
__del_lock(__get_lock(rwlock));
return ll_pthread_rwlock_destroy(rwlock);
}
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 83e2887f91a3..fbbfdc39271d 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -6,12 +6,15 @@ tools/lib/symbol/kallsyms.c
tools/lib/symbol/kallsyms.h
tools/lib/util/find_next_bit.c
tools/include/asm/bug.h
+tools/include/asm-generic/bitops/arch_hweight.h
tools/include/asm-generic/bitops/atomic.h
+tools/include/asm-generic/bitops/const_hweight.h
tools/include/asm-generic/bitops/__ffs.h
tools/include/asm-generic/bitops/__fls.h
tools/include/asm-generic/bitops/find.h
tools/include/asm-generic/bitops/fls64.h
tools/include/asm-generic/bitops/fls.h
+tools/include/asm-generic/bitops/hweight.h
tools/include/asm-generic/bitops.h
tools/include/linux/bitops.h
tools/include/linux/compiler.h
@@ -19,6 +22,8 @@ tools/include/linux/export.h
tools/include/linux/hash.h
tools/include/linux/log2.h
tools/include/linux/types.h
+include/asm-generic/bitops/arch_hweight.h
+include/asm-generic/bitops/const_hweight.h
include/asm-generic/bitops/fls64.h
include/asm-generic/bitops/__fls.h
include/asm-generic/bitops/fls.h
@@ -29,6 +34,7 @@ include/linux/list.h
include/linux/hash.h
include/linux/stringify.h
lib/find_next_bit.c
+lib/hweight.c
lib/rbtree.c
include/linux/swab.h
arch/*/include/asm/unistd*.h
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 67a03a825b3c..aa6a50447c32 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -232,12 +232,15 @@ LIB_H += ../include/linux/hash.h
LIB_H += ../../include/linux/stringify.h
LIB_H += util/include/linux/bitmap.h
LIB_H += ../include/linux/bitops.h
+LIB_H += ../include/asm-generic/bitops/arch_hweight.h
LIB_H += ../include/asm-generic/bitops/atomic.h
+LIB_H += ../include/asm-generic/bitops/const_hweight.h
LIB_H += ../include/asm-generic/bitops/find.h
LIB_H += ../include/asm-generic/bitops/fls64.h
LIB_H += ../include/asm-generic/bitops/fls.h
LIB_H += ../include/asm-generic/bitops/__ffs.h
LIB_H += ../include/asm-generic/bitops/__fls.h
+LIB_H += ../include/asm-generic/bitops/hweight.h
LIB_H += ../include/asm-generic/bitops.h
LIB_H += ../include/linux/compiler.h
LIB_H += ../include/linux/log2.h
@@ -255,7 +258,6 @@ LIB_H += util/include/linux/linkage.h
LIB_H += util/include/asm/asm-offsets.h
LIB_H += ../include/asm/bug.h
LIB_H += util/include/asm/byteorder.h
-LIB_H += util/include/asm/hweight.h
LIB_H += util/include/asm/swab.h
LIB_H += util/include/asm/system.h
LIB_H += util/include/asm/uaccess.h
@@ -462,10 +464,12 @@ BUILTIN_OBJS += $(OUTPUT)builtin-bench.o
# Benchmark modules
BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o
BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o
-ifeq ($(RAW_ARCH),x86_64)
+ifeq ($(ARCH), x86)
+ifeq ($(IS_64_BIT), 1)
BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy-x86-64-asm.o
BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o
endif
+endif
BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o
BUILTIN_OBJS += $(OUTPUT)bench/futex-hash.o
BUILTIN_OBJS += $(OUTPUT)bench/futex-wake.o
@@ -743,6 +747,9 @@ $(OUTPUT)util/kallsyms.o: ../lib/symbol/kallsyms.c $(OUTPUT)PERF-CFLAGS
$(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+$(OUTPUT)util/hweight.o: ../../lib/hweight.c $(OUTPUT)PERF-CFLAGS
+ $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
+
$(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c $(OUTPUT)PERF-CFLAGS
$(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $<
diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
index 3bb50eac5542..0c370f81e002 100644
--- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c
+++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c
@@ -103,7 +103,7 @@ static Dwarf_Frame *get_eh_frame(Dwfl_Module *mod, Dwarf_Addr pc)
return NULL;
}
- result = dwarf_cfi_addrframe(cfi, pc, &frame);
+ result = dwarf_cfi_addrframe(cfi, pc-bias, &frame);
if (result) {
pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1));
return NULL;
@@ -128,7 +128,7 @@ static Dwarf_Frame *get_dwarf_frame(Dwfl_Module *mod, Dwarf_Addr pc)
return NULL;
}
- result = dwarf_cfi_addrframe(cfi, pc, &frame);
+ result = dwarf_cfi_addrframe(cfi, pc-bias, &frame);
if (result) {
pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1));
return NULL;
@@ -145,7 +145,7 @@ static Dwarf_Frame *get_dwarf_frame(Dwfl_Module *mod, Dwarf_Addr pc)
* yet used)
* -1 in case of errors
*/
-static int check_return_addr(struct dso *dso, Dwarf_Addr pc)
+static int check_return_addr(struct dso *dso, u64 map_start, Dwarf_Addr pc)
{
int rc = -1;
Dwfl *dwfl;
@@ -155,6 +155,7 @@ static int check_return_addr(struct dso *dso, Dwarf_Addr pc)
Dwarf_Addr start = pc;
Dwarf_Addr end = pc;
bool signalp;
+ const char *exec_file = dso->long_name;
dwfl = dso->dwfl;
@@ -165,8 +166,10 @@ static int check_return_addr(struct dso *dso, Dwarf_Addr pc)
return -1;
}
- if (dwfl_report_offline(dwfl, "", dso->long_name, -1) == NULL) {
- pr_debug("dwfl_report_offline() failed %s\n",
+ mod = dwfl_report_elf(dwfl, exec_file, exec_file, -1,
+ map_start, false);
+ if (!mod) {
+ pr_debug("dwfl_report_elf() failed %s\n",
dwarf_errmsg(-1));
/*
* We normally cache the DWARF debug info and never
@@ -256,10 +259,10 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain)
return skip_slot;
}
- rc = check_return_addr(dso, ip);
+ rc = check_return_addr(dso, al.map->start, ip);
- pr_debug("DSO %s, nr %" PRIx64 ", ip 0x%" PRIx64 "rc %d\n",
- dso->long_name, chain->nr, ip, rc);
+ pr_debug("[DSO %s, sym %s, ip 0x%" PRIx64 "] rc %d\n",
+ dso->long_name, al.sym->name, ip, rc);
if (rc == 0) {
/*
diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c
index 07a8d7646a15..005cc283790c 100644
--- a/tools/perf/bench/sched-pipe.c
+++ b/tools/perf/bench/sched-pipe.c
@@ -19,12 +19,12 @@
#include <stdlib.h>
#include <signal.h>
#include <sys/wait.h>
-#include <linux/unistd.h>
#include <string.h>
#include <errno.h>
#include <assert.h>
#include <sys/time.h>
#include <sys/types.h>
+#include <sys/syscall.h>
#include <pthread.h>
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index e7417fe97a97..747f86103599 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -232,7 +232,7 @@ static int __cmd_annotate(struct perf_annotate *ann)
if (nr_samples > 0) {
total_nr_samples += nr_samples;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (symbol_conf.event_group &&
!perf_evsel__is_group_leader(pos))
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 1ce425d101a9..1fd96c13f199 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -545,6 +545,42 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right,
return __hist_entry__cmp_compute(p_left, p_right, c);
}
+static int64_t
+hist_entry__cmp_nop(struct hist_entry *left __maybe_unused,
+ struct hist_entry *right __maybe_unused)
+{
+ return 0;
+}
+
+static int64_t
+hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right)
+{
+ if (sort_compute)
+ return 0;
+
+ if (left->stat.period == right->stat.period)
+ return 0;
+ return left->stat.period > right->stat.period ? 1 : -1;
+}
+
+static int64_t
+hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right)
+{
+ return hist_entry__cmp_compute(right, left, COMPUTE_DELTA);
+}
+
+static int64_t
+hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right)
+{
+ return hist_entry__cmp_compute(right, left, COMPUTE_RATIO);
+}
+
+static int64_t
+hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right)
+{
+ return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF);
+}
+
static void insert_hist_entry_by_compute(struct rb_root *root,
struct hist_entry *he,
int c)
@@ -605,7 +641,7 @@ static void hists__process(struct hists *hists)
hists__precompute(hists);
hists__compute_resort(hists);
} else {
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
}
hists__fprintf(hists, true, 0, 0, 0, stdout);
@@ -1038,27 +1074,35 @@ static void data__hpp_register(struct data__file *d, int idx)
fmt->header = hpp__header;
fmt->width = hpp__width;
fmt->entry = hpp__entry_global;
+ fmt->cmp = hist_entry__cmp_nop;
+ fmt->collapse = hist_entry__cmp_nop;
/* TODO more colors */
switch (idx) {
case PERF_HPP_DIFF__BASELINE:
fmt->color = hpp__color_baseline;
+ fmt->sort = hist_entry__cmp_baseline;
break;
case PERF_HPP_DIFF__DELTA:
fmt->color = hpp__color_delta;
+ fmt->sort = hist_entry__cmp_delta;
break;
case PERF_HPP_DIFF__RATIO:
fmt->color = hpp__color_ratio;
+ fmt->sort = hist_entry__cmp_ratio;
break;
case PERF_HPP_DIFF__WEIGHTED_DIFF:
fmt->color = hpp__color_wdiff;
+ fmt->sort = hist_entry__cmp_wdiff;
break;
default:
+ fmt->sort = hist_entry__cmp_nop;
break;
}
init_header(d, dfmt);
perf_hpp__column_register(fmt);
+ perf_hpp__register_sort_field(fmt);
}
static void ui_init(void)
diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c
index 011195e38f21..198f3c3aff95 100644
--- a/tools/perf/builtin-list.c
+++ b/tools/perf/builtin-list.c
@@ -19,7 +19,9 @@
int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
{
int i;
- const struct option list_options[] = {
+ bool raw_dump = false;
+ struct option list_options[] = {
+ OPT_BOOLEAN(0, "raw-dump", &raw_dump, "Dump raw events"),
OPT_END()
};
const char * const list_usage[] = {
@@ -27,11 +29,18 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
NULL
};
+ set_option_flag(list_options, 0, "raw-dump", PARSE_OPT_HIDDEN);
+
argc = parse_options(argc, argv, list_options, list_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
setup_pager();
+ if (raw_dump) {
+ print_events(NULL, true);
+ return 0;
+ }
+
if (argc == 0) {
print_events(NULL, false);
return 0;
@@ -53,8 +62,6 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused)
print_hwcache_events(NULL, false);
else if (strcmp(argv[i], "pmu") == 0)
print_pmu_events(NULL, false);
- else if (strcmp(argv[i], "--raw-dump") == 0)
- print_events(NULL, true);
else {
char *sep = strchr(argv[i], ':'), *s;
int sep_idx;
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 39367609c707..072ae8ad67fc 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -457,6 +457,19 @@ static void report__collapse_hists(struct report *rep)
ui_progress__finish();
}
+static void report__output_resort(struct report *rep)
+{
+ struct ui_progress prog;
+ struct perf_evsel *pos;
+
+ ui_progress__init(&prog, rep->nr_entries, "Sorting events for output...");
+
+ evlist__for_each(rep->session->evlist, pos)
+ hists__output_resort(evsel__hists(pos), &prog);
+
+ ui_progress__finish();
+}
+
static int __cmd_report(struct report *rep)
{
int ret;
@@ -505,13 +518,20 @@ static int __cmd_report(struct report *rep)
if (session_done())
return 0;
+ /*
+ * recalculate number of entries after collapsing since it
+ * might be changed during the collapse phase.
+ */
+ rep->nr_entries = 0;
+ evlist__for_each(session->evlist, pos)
+ rep->nr_entries += evsel__hists(pos)->nr_entries;
+
if (rep->nr_entries == 0) {
ui__error("The %s file has no samples!\n", file->path);
return 0;
}
- evlist__for_each(session->evlist, pos)
- hists__output_resort(evsel__hists(pos));
+ report__output_resort(rep);
return report__browse_hists(rep);
}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 0aa7747ff139..616f0fcb4701 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -66,7 +66,6 @@
#include <sys/utsname.h>
#include <sys/mman.h>
-#include <linux/unistd.h>
#include <linux/types.h>
static volatile int done;
@@ -285,7 +284,7 @@ static void perf_top__print_sym_table(struct perf_top *top)
}
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
hists__output_recalc_col_len(hists, top->print_entries - printed);
putchar('\n');
@@ -554,7 +553,7 @@ static void perf_top__sort_new_samples(void *arg)
}
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
}
static void *display_thread_tui(void *arg)
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 5d4b039fe1ed..648e31ff4021 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -20,7 +20,7 @@ NO_PERF_REGS := 1
# Additional ARCH settings for x86
ifeq ($(ARCH),x86)
- ifeq (${IS_X86_64}, 1)
+ ifeq (${IS_64_BIT}, 1)
CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT
ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S
LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
diff --git a/tools/perf/config/Makefile.arch b/tools/perf/config/Makefile.arch
index 851cd0172a76..ff95a68741d1 100644
--- a/tools/perf/config/Makefile.arch
+++ b/tools/perf/config/Makefile.arch
@@ -1,7 +1,7 @@
uname_M := $(shell uname -m 2>/dev/null || echo not)
-ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
+RAW_ARCH := $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
-e s/arm.*/arm/ -e s/sa110/arm/ \
-e s/s390x/s390/ -e s/parisc64/parisc/ \
-e s/ppc.*/powerpc/ -e s/mips.*/mips/ \
@@ -9,23 +9,23 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \
-e s/tile.*/tile/ )
# Additional ARCH settings for x86
-ifeq ($(ARCH),i386)
- override ARCH := x86
+ifeq ($(RAW_ARCH),i386)
+ ARCH ?= x86
endif
-ifeq ($(ARCH),x86_64)
- override ARCH := x86
- IS_X86_64 := 0
- ifeq (, $(findstring m32,$(CFLAGS)))
- IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1)
- RAW_ARCH := x86_64
+ifeq ($(RAW_ARCH),x86_64)
+ ARCH ?= x86
+
+ ifneq (, $(findstring m32,$(CFLAGS)))
+ RAW_ARCH := x86_32
endif
endif
-ifeq (${IS_X86_64}, 1)
+ARCH ?= $(RAW_ARCH)
+
+LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
+ifeq ($(LP64), 1)
IS_64_BIT := 1
-else ifeq ($(ARCH),x86)
- IS_64_BIT := 0
else
- IS_64_BIT := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
+ IS_64_BIT := 0
endif
diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h
index a3b13d7dc1d4..6ef68165c9db 100644
--- a/tools/perf/perf-sys.h
+++ b/tools/perf/perf-sys.h
@@ -6,7 +6,6 @@
#include <sys/syscall.h>
#include <linux/types.h>
#include <linux/perf_event.h>
-#include <asm/unistd.h>
#if defined(__i386__)
#define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
diff --git a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
index 790ceba6ad3f..28431d1bbcf5 100644
--- a/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
+++ b/tools/perf/scripts/perl/Perf-Trace-Util/Context.c
@@ -5,7 +5,10 @@
* ANY CHANGES MADE HERE WILL BE LOST!
*
*/
-
+#include <stdbool.h>
+#ifndef HAS_BOOL
+# define HAS_BOOL 1
+#endif
#line 1 "Context.xs"
/*
* Context.xs. XS interfaces for perf script.
diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c
index ab28cca2cb97..0bf06bec68c7 100644
--- a/tools/perf/tests/dwarf-unwind.c
+++ b/tools/perf/tests/dwarf-unwind.c
@@ -11,6 +11,9 @@
#include "thread.h"
#include "callchain.h"
+/* For bsearch. We try to unwind functions in shared object. */
+#include <stdlib.h>
+
static int mmap_handler(struct perf_tool *tool __maybe_unused,
union perf_event *event,
struct perf_sample *sample __maybe_unused,
@@ -28,7 +31,7 @@ static int init_live_machine(struct machine *machine)
mmap_handler, machine, true);
}
-#define MAX_STACK 6
+#define MAX_STACK 8
static int unwind_entry(struct unwind_entry *entry, void *arg)
{
@@ -37,6 +40,8 @@ static int unwind_entry(struct unwind_entry *entry, void *arg)
static const char *funcs[MAX_STACK] = {
"test__arch_unwind_sample",
"unwind_thread",
+ "compare",
+ "bsearch",
"krava_3",
"krava_2",
"krava_1",
@@ -88,10 +93,37 @@ static int unwind_thread(struct thread *thread)
return err;
}
+static int global_unwind_retval = -INT_MAX;
+
+__attribute__ ((noinline))
+static int compare(void *p1, void *p2)
+{
+ /* Any possible value should be 'thread' */
+ struct thread *thread = *(struct thread **)p1;
+
+ if (global_unwind_retval == -INT_MAX)
+ global_unwind_retval = unwind_thread(thread);
+
+ return p1 - p2;
+}
+
__attribute__ ((noinline))
static int krava_3(struct thread *thread)
{
- return unwind_thread(thread);
+ struct thread *array[2] = {thread, thread};
+ void *fp = &bsearch;
+ /*
+ * make _bsearch a volatile function pointer to
+ * prevent potential optimization, which may expand
+ * bsearch and call compare directly from this function,
+ * instead of libc shared object.
+ */
+ void *(*volatile _bsearch)(void *, void *, size_t,
+ size_t, int (*)(void *, void *));
+
+ _bsearch = fp;
+ _bsearch(array, &thread, 2, sizeof(struct thread **), compare);
+ return global_unwind_retval;
}
__attribute__ ((noinline))
diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c
index 614d5c4978ab..8d110dec393e 100644
--- a/tools/perf/tests/hists_cumulate.c
+++ b/tools/perf/tests/hists_cumulate.c
@@ -187,7 +187,7 @@ static int do_test(struct hists *hists, struct result *expected, size_t nr_expec
* function since TEST_ASSERT_VAL() returns in case of failure.
*/
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("use callchain: %d, cumulate callchain: %d\n",
@@ -454,12 +454,12 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
* 30.00% 10.00% perf perf [.] cmd_record
* 20.00% 0.00% bash libc [.] malloc
* 10.00% 10.00% bash [kernel] [k] page_fault
- * 10.00% 10.00% perf [kernel] [k] schedule
- * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
+ * 10.00% 10.00% bash bash [.] xmalloc
* 10.00% 10.00% perf [kernel] [k] page_fault
- * 10.00% 10.00% perf libc [.] free
* 10.00% 10.00% perf libc [.] malloc
- * 10.00% 10.00% bash bash [.] xmalloc
+ * 10.00% 10.00% perf [kernel] [k] schedule
+ * 10.00% 10.00% perf libc [.] free
+ * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
*/
struct result expected[] = {
{ 7000, 2000, "perf", "perf", "main" },
@@ -468,12 +468,12 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
{ 3000, 1000, "perf", "perf", "cmd_record" },
{ 2000, 0, "bash", "libc", "malloc" },
{ 1000, 1000, "bash", "[kernel]", "page_fault" },
- { 1000, 1000, "perf", "[kernel]", "schedule" },
- { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
+ { 1000, 1000, "bash", "bash", "xmalloc" },
{ 1000, 1000, "perf", "[kernel]", "page_fault" },
+ { 1000, 1000, "perf", "[kernel]", "schedule" },
{ 1000, 1000, "perf", "libc", "free" },
{ 1000, 1000, "perf", "libc", "malloc" },
- { 1000, 1000, "bash", "bash", "xmalloc" },
+ { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
};
symbol_conf.use_callchain = false;
@@ -537,10 +537,13 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
* malloc
* main
*
- * 10.00% 10.00% perf [kernel] [k] schedule
+ * 10.00% 10.00% bash bash [.] xmalloc
* |
- * --- schedule
- * run_command
+ * --- xmalloc
+ * malloc
+ * xmalloc <--- NOTE: there's a cycle
+ * malloc
+ * xmalloc
* main
*
* 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
@@ -556,6 +559,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
* run_command
* main
*
+ * 10.00% 10.00% perf [kernel] [k] schedule
+ * |
+ * --- schedule
+ * run_command
+ * main
+ *
* 10.00% 10.00% perf libc [.] free
* |
* --- free
@@ -570,15 +579,6 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
* run_command
* main
*
- * 10.00% 10.00% bash bash [.] xmalloc
- * |
- * --- xmalloc
- * malloc
- * xmalloc <--- NOTE: there's a cycle
- * malloc
- * xmalloc
- * main
- *
*/
struct result expected[] = {
{ 7000, 2000, "perf", "perf", "main" },
@@ -587,12 +587,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
{ 3000, 1000, "perf", "perf", "cmd_record" },
{ 2000, 0, "bash", "libc", "malloc" },
{ 1000, 1000, "bash", "[kernel]", "page_fault" },
- { 1000, 1000, "perf", "[kernel]", "schedule" },
+ { 1000, 1000, "bash", "bash", "xmalloc" },
{ 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
{ 1000, 1000, "perf", "[kernel]", "page_fault" },
+ { 1000, 1000, "perf", "[kernel]", "schedule" },
{ 1000, 1000, "perf", "libc", "free" },
{ 1000, 1000, "perf", "libc", "malloc" },
- { 1000, 1000, "bash", "bash", "xmalloc" },
};
struct callchain_result expected_callchain[] = {
{
@@ -622,9 +622,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
{ "bash", "main" }, },
},
{
- 3, { { "[kernel]", "schedule" },
- { "perf", "run_command" },
- { "perf", "main" }, },
+ 6, { { "bash", "xmalloc" },
+ { "libc", "malloc" },
+ { "bash", "xmalloc" },
+ { "libc", "malloc" },
+ { "bash", "xmalloc" },
+ { "bash", "main" }, },
},
{
3, { { "[kernel]", "sys_perf_event_open" },
@@ -638,6 +641,11 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
{ "perf", "main" }, },
},
{
+ 3, { { "[kernel]", "schedule" },
+ { "perf", "run_command" },
+ { "perf", "main" }, },
+ },
+ {
4, { { "libc", "free" },
{ "perf", "cmd_record" },
{ "perf", "run_command" },
@@ -649,14 +657,6 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
{ "perf", "run_command" },
{ "perf", "main" }, },
},
- {
- 6, { { "bash", "xmalloc" },
- { "libc", "malloc" },
- { "bash", "xmalloc" },
- { "libc", "malloc" },
- { "bash", "xmalloc" },
- { "bash", "main" }, },
- },
};
symbol_conf.use_callchain = true;
diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c
index 74f257a81265..59e53db7914c 100644
--- a/tools/perf/tests/hists_filter.c
+++ b/tools/perf/tests/hists_filter.c
@@ -138,7 +138,7 @@ int test__hists_filter(void)
struct hists *hists = evsel__hists(evsel);
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("Normal histogram\n");
diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c
index a748f2be1222..f5547610da02 100644
--- a/tools/perf/tests/hists_output.c
+++ b/tools/perf/tests/hists_output.c
@@ -152,7 +152,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -252,7 +252,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -306,7 +306,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -384,7 +384,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
@@ -487,7 +487,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine)
goto out;
hists__collapse_resort(hists, NULL);
- hists__output_resort(hists);
+ hists__output_resort(hists, NULL);
if (verbose > 2) {
pr_info("[fields = %s, sort = %s]\n", field_order, sort_order);
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index e6bb04b5b09b..788506eef567 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -550,7 +550,7 @@ static int hist_browser__show_callchain(struct hist_browser *browser,
bool need_percent;
node = rb_first(root);
- need_percent = !!rb_next(node);
+ need_percent = node && rb_next(node);
while (node) {
struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node);
diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c
index dc0d095f318c..482adae3cc44 100644
--- a/tools/perf/ui/hist.c
+++ b/tools/perf/ui/hist.c
@@ -204,6 +204,9 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
if (ret)
return ret;
+ if (a->thread != b->thread || !symbol_conf.use_callchain)
+ return 0;
+
ret = b->callchain->max_depth - a->callchain->max_depth;
}
return ret;
diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c
index 2f612562978c..3c38f25b1695 100644
--- a/tools/perf/ui/tui/setup.c
+++ b/tools/perf/ui/tui/setup.c
@@ -1,5 +1,8 @@
#include <signal.h>
#include <stdbool.h>
+#ifdef HAVE_BACKTRACE_SUPPORT
+#include <execinfo.h>
+#endif
#include "../../util/cache.h"
#include "../../util/debug.h"
@@ -88,6 +91,25 @@ int ui__getch(int delay_secs)
return SLkp_getkey();
}
+#ifdef HAVE_BACKTRACE_SUPPORT
+static void ui__signal_backtrace(int sig)
+{
+ void *stackdump[32];
+ size_t size;
+
+ ui__exit(false);
+ psignal(sig, "perf");
+
+ printf("-------- backtrace --------\n");
+ size = backtrace(stackdump, ARRAY_SIZE(stackdump));
+ backtrace_symbols_fd(stackdump, size, STDOUT_FILENO);
+
+ exit(0);
+}
+#else
+# define ui__signal_backtrace ui__signal
+#endif
+
static void ui__signal(int sig)
{
ui__exit(false);
@@ -122,8 +144,8 @@ int ui__init(void)
ui_browser__init();
tui_progress__init();
- signal(SIGSEGV, ui__signal);
- signal(SIGFPE, ui__signal);
+ signal(SIGSEGV, ui__signal_backtrace);
+ signal(SIGFPE, ui__signal_backtrace);
signal(SIGINT, ui__signal);
signal(SIGQUIT, ui__signal);
signal(SIGTERM, ui__signal);
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index 79999ceaf2be..01bc4e23a2cf 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -177,14 +177,17 @@ static int lock__parse(struct ins_operands *ops)
goto out_free_ops;
ops->locked.ins = ins__find(name);
+ free(name);
+
if (ops->locked.ins == NULL)
goto out_free_ops;
if (!ops->locked.ins->ops)
return 0;
- if (ops->locked.ins->ops->parse)
- ops->locked.ins->ops->parse(ops->locked.ops);
+ if (ops->locked.ins->ops->parse &&
+ ops->locked.ins->ops->parse(ops->locked.ops) < 0)
+ goto out_free_ops;
return 0;
@@ -208,6 +211,13 @@ static int lock__scnprintf(struct ins *ins, char *bf, size_t size,
static void lock__delete(struct ins_operands *ops)
{
+ struct ins *ins = ops->locked.ins;
+
+ if (ins && ins->ops->free)
+ ins->ops->free(ops->locked.ops);
+ else
+ ins__delete(ops->locked.ops);
+
zfree(&ops->locked.ops);
zfree(&ops->target.raw);
zfree(&ops->target.name);
@@ -531,8 +541,8 @@ static void disasm_line__init_ins(struct disasm_line *dl)
if (!dl->ins->ops)
return;
- if (dl->ins->ops->parse)
- dl->ins->ops->parse(&dl->ops);
+ if (dl->ins->ops->parse && dl->ins->ops->parse(&dl->ops) < 0)
+ dl->ins = NULL;
}
static int disasm_line__parse(char *line, char **namep, char **rawp)
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 0784a9420528..cadbdc90a5cb 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -116,11 +116,6 @@ struct annotation {
struct annotated_source *src;
};
-struct sannotation {
- struct annotation annotation;
- struct symbol symbol;
-};
-
static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx)
{
return (((void *)&notes->src->histograms) +
@@ -129,8 +124,7 @@ static inline struct sym_hist *annotation__histogram(struct annotation *notes, i
static inline struct annotation *symbol__annotation(struct symbol *sym)
{
- struct sannotation *a = container_of(sym, struct sannotation, symbol);
- return &a->annotation;
+ return (void *)sym - symbol_conf.priv_size;
}
int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx);
diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h
index 5cf9e1b5989d..d04d770d90f6 100644
--- a/tools/perf/util/cache.h
+++ b/tools/perf/util/cache.h
@@ -71,7 +71,9 @@ extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2
extern char *perf_pathdup(const char *fmt, ...)
__attribute__((format (printf, 1, 2)));
+#ifndef __UCLIBC__
/* Matches the libc/libbsd function attribute so we declare this unconditionally: */
extern size_t strlcpy(char *dest, const char *src, size_t size);
+#endif
#endif /* __PERF_CACHE_H */
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 64b377e591e4..14e7a123d43b 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -841,3 +841,33 @@ char *callchain_list__sym_name(struct callchain_list *cl,
return bf;
}
+
+static void free_callchain_node(struct callchain_node *node)
+{
+ struct callchain_list *list, *tmp;
+ struct callchain_node *child;
+ struct rb_node *n;
+
+ list_for_each_entry_safe(list, tmp, &node->val, list) {
+ list_del(&list->list);
+ free(list);
+ }
+
+ n = rb_first(&node->rb_root_in);
+ while (n) {
+ child = container_of(n, struct callchain_node, rb_node_in);
+ n = rb_next(n);
+ rb_erase(&child->rb_node_in, &node->rb_root_in);
+
+ free_callchain_node(child);
+ free(child);
+ }
+}
+
+void free_callchain(struct callchain_root *root)
+{
+ if (!symbol_conf.use_callchain)
+ return;
+
+ free_callchain_node(&root->node);
+}
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index dbc08cf5f970..c0ec1acc38e4 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -198,4 +198,6 @@ static inline int arch_skip_callchain_idx(struct thread *thread __maybe_unused,
char *callchain_list__sym_name(struct callchain_list *cl,
char *bf, size_t bfsize, bool show_dso);
+void free_callchain(struct callchain_root *root);
+
#endif /* __PERF_CALLCHAIN_H */
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index cbab1fb77b1d..2e507b5025a3 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -1445,7 +1445,7 @@ int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
case ENOENT:
scnprintf(buf, size, "%s",
"Error:\tUnable to find debugfs\n"
- "Hint:\tWas your kernel was compiled with debugfs support?\n"
+ "Hint:\tWas your kernel compiled with debugfs support?\n"
"Hint:\tIs the debugfs filesystem mounted?\n"
"Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
break;
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 6e88b9e395df..182395546ddc 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -6,6 +6,7 @@
#include "evlist.h"
#include "evsel.h"
#include "annotate.h"
+#include "ui/progress.h"
#include <math.h>
static bool hists__filter_entry_by_dso(struct hists *hists,
@@ -303,7 +304,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
size_t callchain_size = 0;
struct hist_entry *he;
- if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain)
+ if (symbol_conf.use_callchain)
callchain_size = sizeof(struct callchain_root);
he = zalloc(sizeof(*he) + callchain_size);
@@ -736,7 +737,7 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
iter->he = he;
he_cache[iter->curr++] = he;
- callchain_append(he->callchain, &callchain_cursor, sample->period);
+ hist_entry__append_callchain(he, sample);
/*
* We need to re-initialize the cursor since callchain_append()
@@ -809,7 +810,8 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
iter->he = he;
he_cache[iter->curr++] = he;
- callchain_append(he->callchain, &cursor, sample->period);
+ if (symbol_conf.use_callchain)
+ callchain_append(he->callchain, &cursor, sample->period);
return 0;
}
@@ -945,6 +947,7 @@ void hist_entry__free(struct hist_entry *he)
zfree(&he->mem_info);
zfree(&he->stat_acc);
free_srcline(he->srcline);
+ free_callchain(he->callchain);
free(he);
}
@@ -987,6 +990,7 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
else
p = &(*p)->rb_right;
}
+ hists->nr_entries++;
rb_link_node(&he->rb_node_in, parent, p);
rb_insert_color(&he->rb_node_in, root);
@@ -1024,7 +1028,10 @@ void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
if (!sort__need_collapse)
return;
+ hists->nr_entries = 0;
+
root = hists__get_rotate_entries_in(hists);
+
next = rb_first(root);
while (next) {
@@ -1119,7 +1126,7 @@ static void __hists__insert_output_entry(struct rb_root *entries,
rb_insert_color(&he->rb_node, entries);
}
-void hists__output_resort(struct hists *hists)
+void hists__output_resort(struct hists *hists, struct ui_progress *prog)
{
struct rb_root *root;
struct rb_node *next;
@@ -1148,6 +1155,9 @@ void hists__output_resort(struct hists *hists)
if (!n->filtered)
hists__calc_col_len(hists, n);
+
+ if (prog)
+ ui_progress__update(prog, 1);
}
}
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index d0ef9a19a744..46bd50344f85 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -121,7 +121,7 @@ int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size,
struct hists *hists);
void hist_entry__free(struct hist_entry *);
-void hists__output_resort(struct hists *hists);
+void hists__output_resort(struct hists *hists, struct ui_progress *prog);
void hists__collapse_resort(struct hists *hists, struct ui_progress *prog);
void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel);
diff --git a/tools/perf/util/hweight.c b/tools/perf/util/hweight.c
deleted file mode 100644
index 5c1d0d099f0d..000000000000
--- a/tools/perf/util/hweight.c
+++ /dev/null
@@ -1,31 +0,0 @@
-#include <linux/bitops.h>
-
-/**
- * hweightN - returns the hamming weight of a N-bit word
- * @x: the word to weigh
- *
- * The Hamming Weight of a number is the total number of bits set in it.
- */
-
-unsigned int hweight32(unsigned int w)
-{
- unsigned int res = w - ((w >> 1) & 0x55555555);
- res = (res & 0x33333333) + ((res >> 2) & 0x33333333);
- res = (res + (res >> 4)) & 0x0F0F0F0F;
- res = res + (res >> 8);
- return (res + (res >> 16)) & 0x000000FF;
-}
-
-unsigned long hweight64(__u64 w)
-{
-#if BITS_PER_LONG == 32
- return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w);
-#elif BITS_PER_LONG == 64
- __u64 res = w - ((w >> 1) & 0x5555555555555555ul);
- res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul);
- res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful;
- res = res + (res >> 8);
- res = res + (res >> 16);
- return (res + (res >> 32)) & 0x00000000000000FFul;
-#endif
-}
diff --git a/tools/perf/util/include/asm/hweight.h b/tools/perf/util/include/asm/hweight.h
deleted file mode 100644
index 36cf26d434a5..000000000000
--- a/tools/perf/util/include/asm/hweight.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#ifndef PERF_HWEIGHT_H
-#define PERF_HWEIGHT_H
-
-#include <linux/types.h>
-unsigned int hweight32(unsigned int w);
-unsigned long hweight64(__u64 w);
-
-#endif /* PERF_HWEIGHT_H */
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 94de3e48b490..1bca3a9f2b16 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -389,7 +389,6 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
if (th != NULL) {
rb_link_node(&th->rb_node, parent, p);
rb_insert_color(&th->rb_node, &machine->threads);
- machine->last_match = th;
/*
* We have to initialize map_groups separately
@@ -400,9 +399,12 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
* leader and that would screwed the rb tree.
*/
if (thread__init_map_groups(th, machine)) {
+ rb_erase(&th->rb_node, &machine->threads);
thread__delete(th);
return NULL;
}
+
+ machine->last_match = th;
}
return th;
diff --git a/tools/perf/util/map.h b/tools/perf/util/map.h
index 6951a9d42339..0e42438b1e59 100644
--- a/tools/perf/util/map.h
+++ b/tools/perf/util/map.h
@@ -116,6 +116,22 @@ struct thread;
#define map__for_each_symbol(map, pos, n) \
dso__for_each_symbol(map->dso, pos, n, map->type)
+/* map__for_each_symbol_with_name - iterate over the symbols in the given map
+ * that have the given name
+ *
+ * @map: the 'struct map *' in which symbols itereated
+ * @sym_name: the symbol name
+ * @pos: the 'struct symbol *' to use as a loop cursor
+ * @filter: to use when loading the DSO
+ */
+#define __map__for_each_symbol_by_name(map, sym_name, pos, filter) \
+ for (pos = map__find_symbol_by_name(map, sym_name, filter); \
+ pos && strcmp(pos->name, sym_name) == 0; \
+ pos = symbol__next_by_name(pos))
+
+#define map__for_each_symbol_by_name(map, sym_name, pos) \
+ __map__for_each_symbol_by_name(map, sym_name, (pos), NULL)
+
typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
void map__init(struct map *map, enum map_type type,
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 28eb1417cb2a..919937eb0be2 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -446,7 +446,7 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs,
}
for (i = 0; i < ntevs; i++) {
- if (tevs[i].point.address) {
+ if (tevs[i].point.address && !tevs[i].point.retprobe) {
tmp = strdup(reloc_sym->name);
if (!tmp)
return -ENOMEM;
@@ -495,9 +495,11 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
}
if (ntevs == 0) { /* No error but failed to find probe point. */
- pr_warning("Probe point '%s' not found.\n",
+ pr_warning("Probe point '%s' not found in debuginfo.\n",
synthesize_perf_probe_point(&pev->point));
- return -ENOENT;
+ if (need_dwarf)
+ return -ENOENT;
+ return 0;
}
/* Error path : ntevs < 0 */
pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
@@ -2050,9 +2052,11 @@ static int write_probe_trace_event(int fd, struct probe_trace_event *tev)
pr_debug("Writing event: %s\n", buf);
if (!probe_event_dry_run) {
ret = write(fd, buf, strlen(buf));
- if (ret <= 0)
+ if (ret <= 0) {
+ ret = -errno;
pr_warning("Failed to write event: %s\n",
strerror_r(errno, sbuf, sizeof(sbuf)));
+ }
}
free(buf);
return ret;
@@ -2189,18 +2193,17 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
return ret;
}
-static char *looking_function_name;
-static int num_matched_functions;
-
-static int probe_function_filter(struct map *map __maybe_unused,
- struct symbol *sym)
+static int find_probe_functions(struct map *map, char *name)
{
- if ((sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL) &&
- strcmp(looking_function_name, sym->name) == 0) {
- num_matched_functions++;
- return 0;
+ int found = 0;
+ struct symbol *sym;
+
+ map__for_each_symbol_by_name(map, name, sym) {
+ if (sym->binding == STB_GLOBAL || sym->binding == STB_LOCAL)
+ found++;
}
- return 1;
+
+ return found;
}
#define strdup_or_goto(str, label) \
@@ -2218,10 +2221,10 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
struct kmap *kmap = NULL;
struct ref_reloc_sym *reloc_sym = NULL;
struct symbol *sym;
- struct rb_node *nd;
struct probe_trace_event *tev;
struct perf_probe_point *pp = &pev->point;
struct probe_trace_point *tp;
+ int num_matched_functions;
int ret, i;
/* Init maps of given executable or kernel */
@@ -2238,10 +2241,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
* Load matched symbols: Since the different local symbols may have
* same name but different addresses, this lists all the symbols.
*/
- num_matched_functions = 0;
- looking_function_name = pp->function;
- ret = map__load(map, probe_function_filter);
- if (ret || num_matched_functions == 0) {
+ num_matched_functions = find_probe_functions(map, pp->function);
+ if (num_matched_functions == 0) {
pr_err("Failed to find symbol %s in %s\n", pp->function,
target ? : "kernel");
ret = -ENOENT;
@@ -2253,7 +2254,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
goto out;
}
- if (!pev->uprobes) {
+ if (!pev->uprobes && !pp->retprobe) {
kmap = map__kmap(map);
reloc_sym = kmap->ref_reloc_sym;
if (!reloc_sym) {
@@ -2271,7 +2272,8 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
}
ret = 0;
- map__for_each_symbol(map, sym, nd) {
+
+ map__for_each_symbol_by_name(map, pp->function, sym) {
tev = (*tevs) + ret;
tp = &tev->point;
if (ret == num_matched_functions) {
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index c7918f83b300..b5247d777f0e 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -989,8 +989,24 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
int ret = 0;
#if _ELFUTILS_PREREQ(0, 142)
+ Elf *elf;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr;
+
/* Get the call frame information from this dwarf */
- pf->cfi = dwarf_getcfi_elf(dwarf_getelf(dbg->dbg));
+ elf = dwarf_getelf(dbg->dbg);
+ if (elf == NULL)
+ return -EINVAL;
+
+ if (gelf_getehdr(elf, &ehdr) == NULL)
+ return -EINVAL;
+
+ if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) &&
+ shdr.sh_type == SHT_PROGBITS) {
+ pf->cfi = dwarf_getcfi_elf(elf);
+ } else {
+ pf->cfi = dwarf_getcfi(dbg->dbg);
+ }
#endif
off = 0;
diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources
index 16a475a7d492..6c6a6953fa93 100644
--- a/tools/perf/util/python-ext-sources
+++ b/tools/perf/util/python-ext-sources
@@ -10,7 +10,7 @@ util/ctype.c
util/evlist.c
util/evsel.c
util/cpumap.c
-util/hweight.c
+../../lib/hweight.c
util/thread_map.c
util/util.c
util/xyarray.c
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index c24c5b83156c..a194702a0a2f 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -396,6 +396,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
const char *name)
{
struct rb_node *n;
+ struct symbol_name_rb_node *s;
if (symbols == NULL)
return NULL;
@@ -403,7 +404,6 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
n = symbols->rb_node;
while (n) {
- struct symbol_name_rb_node *s;
int cmp;
s = rb_entry(n, struct symbol_name_rb_node, rb_node);
@@ -414,10 +414,24 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
else if (cmp > 0)
n = n->rb_right;
else
- return &s->sym;
+ break;
}
- return NULL;
+ if (n == NULL)
+ return NULL;
+
+ /* return first symbol that has same name (if any) */
+ for (n = rb_prev(n); n; n = rb_prev(n)) {
+ struct symbol_name_rb_node *tmp;
+
+ tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
+ if (strcmp(tmp->sym.name, s->sym.name))
+ break;
+
+ s = tmp;
+ }
+
+ return &s->sym;
}
struct symbol *dso__find_symbol(struct dso *dso,
@@ -436,6 +450,17 @@ struct symbol *dso__next_symbol(struct symbol *sym)
return symbols__next(sym);
}
+struct symbol *symbol__next_by_name(struct symbol *sym)
+{
+ struct symbol_name_rb_node *s = container_of(sym, struct symbol_name_rb_node, sym);
+ struct rb_node *n = rb_next(&s->rb_node);
+
+ return n ? &rb_entry(n, struct symbol_name_rb_node, rb_node)->sym : NULL;
+}
+
+ /*
+ * Teturns first symbol that matched with @name.
+ */
struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
const char *name)
{
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 9d602e9c6f59..1650dcb3a67b 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -231,6 +231,7 @@ struct symbol *dso__find_symbol(struct dso *dso, enum map_type type,
u64 addr);
struct symbol *dso__find_symbol_by_name(struct dso *dso, enum map_type type,
const char *name);
+struct symbol *symbol__next_by_name(struct symbol *sym);
struct symbol *dso__first_symbol(struct dso *dso, enum map_type type);
struct symbol *dso__next_symbol(struct symbol *sym);
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index 371219a6daf1..6edf535f65c2 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -185,6 +185,28 @@ static u64 elf_section_offset(int fd, const char *name)
return offset;
}
+#ifndef NO_LIBUNWIND_DEBUG_FRAME
+static int elf_is_exec(int fd, const char *name)
+{
+ Elf *elf;
+ GElf_Ehdr ehdr;
+ int retval = 0;
+
+ elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL);
+ if (elf == NULL)
+ return 0;
+ if (gelf_getehdr(elf, &ehdr) == NULL)
+ goto out;
+
+ retval = (ehdr.e_type == ET_EXEC);
+
+out:
+ elf_end(elf);
+ pr_debug("unwind: elf_is_exec(%s): %d\n", name, retval);
+ return retval;
+}
+#endif
+
struct table_entry {
u32 start_ip_offset;
u32 fde_offset;
@@ -322,8 +344,12 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
#ifndef NO_LIBUNWIND_DEBUG_FRAME
/* Check the .debug_frame section for unwinding info */
if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
+ int fd = dso__data_fd(map->dso, ui->machine);
+ int is_exec = elf_is_exec(fd, map->dso->name);
+ unw_word_t base = is_exec ? 0 : map->start;
+
memset(&di, 0, sizeof(di));
- if (dwarf_find_debug_frame(0, &di, ip, 0, map->dso->name,
+ if (dwarf_find_debug_frame(0, &di, ip, base, map->dso->name,
map->start, map->end))
return dwarf_search_unwind_table(as, ip, &di, pi,
need_unwind_info, arg);
diff --git a/tools/power/cpupower/utils/cpupower.c b/tools/power/cpupower/utils/cpupower.c
index 7cdcf88659c7..9ea914378985 100644
--- a/tools/power/cpupower/utils/cpupower.c
+++ b/tools/power/cpupower/utils/cpupower.c
@@ -199,7 +199,7 @@ int main(int argc, const char *argv[])
}
get_cpu_info(0, &cpupower_cpu_info);
- run_as_root = !getuid();
+ run_as_root = !geteuid();
if (run_as_root) {
ret = uname(&uts);
if (!ret && !strcmp(uts.machine, "x86_64") &&
diff --git a/tools/power/cpupower/utils/helpers/sysfs.c b/tools/power/cpupower/utils/helpers/sysfs.c
index 09afe5d87f2b..4e8fe2c7b054 100644
--- a/tools/power/cpupower/utils/helpers/sysfs.c
+++ b/tools/power/cpupower/utils/helpers/sysfs.c
@@ -361,7 +361,7 @@ unsigned int sysfs_get_idlestate_count(unsigned int cpu)
snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpuidle");
if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
- return -ENODEV;
+ return 0;
snprintf(file, SYSFS_PATH_MAX, PATH_TO_CPU "cpu%u/cpuidle/state0", cpu);
if (stat(file, &statbuf) != 0 || !S_ISDIR(statbuf.st_mode))
diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c
index 33a5c06d95ca..e238c9559caf 100644
--- a/tools/testing/selftests/exec/execveat.c
+++ b/tools/testing/selftests/exec/execveat.c
@@ -62,7 +62,7 @@ static int _check_execveat_fail(int fd, const char *path, int flags,
}
static int check_execveat_invoked_rc(int fd, const char *path, int flags,
- int expected_rc)
+ int expected_rc, int expected_rc2)
{
int status;
int rc;
@@ -98,9 +98,10 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags,
child, status);
return 1;
}
- if (WEXITSTATUS(status) != expected_rc) {
- printf("[FAIL] (child %d exited with %d not %d)\n",
- child, WEXITSTATUS(status), expected_rc);
+ if ((WEXITSTATUS(status) != expected_rc) &&
+ (WEXITSTATUS(status) != expected_rc2)) {
+ printf("[FAIL] (child %d exited with %d not %d nor %d)\n",
+ child, WEXITSTATUS(status), expected_rc, expected_rc2);
return 1;
}
printf("[OK]\n");
@@ -109,7 +110,7 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags,
static int check_execveat(int fd, const char *path, int flags)
{
- return check_execveat_invoked_rc(fd, path, flags, 99);
+ return check_execveat_invoked_rc(fd, path, flags, 99, 99);
}
static char *concat(const char *left, const char *right)
@@ -179,11 +180,11 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
*/
fd = open(longpath, O_RDONLY);
if (fd > 0) {
- printf("Invoke copy of '%s' via filename of length %lu:\n",
+ printf("Invoke copy of '%s' via filename of length %zu:\n",
src, strlen(longpath));
fail += check_execveat(fd, "", AT_EMPTY_PATH);
} else {
- printf("Failed to open length %lu filename, errno=%d (%s)\n",
+ printf("Failed to open length %zu filename, errno=%d (%s)\n",
strlen(longpath), errno, strerror(errno));
fail++;
}
@@ -192,9 +193,15 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script)
* Execute as a long pathname relative to ".". If this is a script,
* the interpreter will launch but fail to open the script because its
* name ("/dev/fd/5/xxx....") is bigger than PATH_MAX.
+ *
+ * The failure code is usually 127 (POSIX: "If a command is not found,
+ * the exit status shall be 127."), but some systems give 126 (POSIX:
+ * "If the command name is found, but it is not an executable utility,
+ * the exit status shall be 126."), so allow either.
*/
if (is_script)
- fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, 127);
+ fail += check_execveat_invoked_rc(dot_dfd, longpath, 0,
+ 127, 126);
else
fail += check_execveat(dot_dfd, longpath, 0);
diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c
index 94dae65eea41..8519e9ee97e3 100644
--- a/tools/testing/selftests/mqueue/mq_perf_tests.c
+++ b/tools/testing/selftests/mqueue/mq_perf_tests.c
@@ -536,10 +536,9 @@ int main(int argc, char *argv[])
{
struct mq_attr attr;
char *option, *next_option;
- int i, cpu;
+ int i, cpu, rc;
struct sigaction sa;
poptContext popt_context;
- char rc;
void *retval;
main_thread = pthread_self();
diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile
index 4c4b1f631ecf..077828c889f1 100644
--- a/tools/testing/selftests/vm/Makefile
+++ b/tools/testing/selftests/vm/Makefile
@@ -7,7 +7,7 @@ BINARIES += transhuge-stress
all: $(BINARIES)
%: %.c
- $(CC) $(CFLAGS) -o $@ $^
+ $(CC) $(CFLAGS) -o $@ $^ -lrt
run_tests: all
@/bin/sh ./run_vmtests || (echo "vmtests: [FAIL]"; exit 1)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index f5283438ee05..1cc6e2e19982 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -671,6 +671,7 @@ static void update_memslots(struct kvm_memslots *slots,
WARN_ON(mslots[i].id != id);
if (!new->npages) {
+ WARN_ON(!mslots[i].npages);
new->base_gfn = 0;
if (mslots[i].npages)
slots->used_slots--;
@@ -687,12 +688,25 @@ static void update_memslots(struct kvm_memslots *slots,
slots->id_to_index[mslots[i].id] = i;
i++;
}
- while (i > 0 &&
- new->base_gfn > mslots[i - 1].base_gfn) {
- mslots[i] = mslots[i - 1];
- slots->id_to_index[mslots[i].id] = i;
- i--;
- }
+
+ /*
+ * The ">=" is needed when creating a slot with base_gfn == 0,
+ * so that it moves before all those with base_gfn == npages == 0.
+ *
+ * On the other hand, if new->npages is zero, the above loop has
+ * already left i pointing to the beginning of the empty part of
+ * mslots, and the ">=" would move the hole backwards in this
+ * case---which is wrong. So skip the loop when deleting a slot.
+ */
+ if (new->npages) {
+ while (i > 0 &&
+ new->base_gfn >= mslots[i - 1].base_gfn) {
+ mslots[i] = mslots[i - 1];
+ slots->id_to_index[mslots[i].id] = i;
+ i--;
+ }
+ } else
+ WARN_ON_ONCE(i != slots->used_slots);
mslots[i] = *new;
slots->id_to_index[mslots[i].id] = i;