summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap4
-rw-r--r--Documentation/devicetree/bindings/arc/archs-pct.txt2
-rw-r--r--Documentation/devicetree/bindings/arc/pct.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt1
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-rk3x.txt4
-rw-r--r--Documentation/sysctl/vm.txt19
-rw-r--r--MAINTAINERS15
-rw-r--r--Makefile4
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/include/asm/irqflags-arcv2.h36
-rw-r--r--arch/arc/kernel/entry-arcv2.S10
-rw-r--r--arch/arc/kernel/entry-compact.S3
-rw-r--r--arch/arc/mm/init.c4
-rw-r--r--arch/arm/boot/dts/am33xx.dtsi2
-rw-r--r--arch/arm/boot/dts/am4372.dtsi2
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts17
-rw-r--r--arch/arm/boot/dts/dm814x-clocks.dtsi243
-rw-r--r--arch/arm/boot/dts/dra62x-clocks.dtsi26
-rw-r--r--arch/arm/boot/dts/dra7xx-clocks.dtsi18
-rw-r--r--arch/arm/boot/dts/qcom-msm8974.dtsi14
-rw-r--r--arch/arm/boot/dts/r8a7791-koelsch.dts1
-rw-r--r--arch/arm/boot/dts/r8a7791-porter.dts14
-rw-r--r--arch/arm/boot/dts/r8a7791.dtsi5
-rw-r--r--arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c5
-rw-r--r--arch/arm/mach-omap2/clockdomains7xx_data.c2
-rw-r--r--arch/arm/mach-omap2/io.c3
-rw-r--r--arch/arm/mach-omap2/omap-wakeupgen.c7
-rw-r--r--arch/arm/mach-omap2/pm34xx.c23
-rw-r--r--arch/arm/mach-shmobile/timer.c28
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts1
-rw-r--r--arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi20
-rw-r--r--arch/nios2/lib/memset.c2
-rw-r--r--arch/powerpc/include/asm/systbl.h2
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h2
-rw-r--r--arch/s390/include/asm/mmu.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h28
-rw-r--r--arch/s390/include/asm/pgalloc.h4
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/tlbflush.h9
-rw-r--r--arch/s390/mm/init.c3
-rw-r--r--arch/s390/mm/mmap.c6
-rw-r--r--arch/s390/mm/pgalloc.c85
-rw-r--r--arch/s390/pci/pci_dma.c16
-rw-r--r--arch/x86/events/amd/core.c2
-rw-r--r--arch/x86/events/intel/core.c1
-rw-r--r--arch/x86/events/intel/lbr.c6
-rw-r--r--arch/x86/events/intel/pt.c75
-rw-r--r--arch/x86/events/intel/pt.h3
-rw-r--r--arch/x86/events/intel/rapl.c1
-rw-r--r--arch/x86/include/asm/perf_event.h4
-rw-r--r--arch/x86/kernel/apic/vector.c3
-rw-r--r--arch/x86/kernel/head_32.S6
-rw-r--r--arch/x86/kvm/vmx.c4
-rw-r--r--arch/x86/mm/setup_nx.c5
-rw-r--r--arch/x86/xen/spinlock.c6
-rw-r--r--drivers/block/rbd.c52
-rw-r--r--drivers/cpufreq/cpufreq_governor.c8
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/crypto/talitos.c87
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/firmware/efi/vars.c37
-rw-r--r--drivers/firmware/psci.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c5
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c20
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c31
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c154
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h46
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c17
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c6
-rw-r--r--drivers/i2c/busses/Kconfig4
-rw-r--r--drivers/i2c/busses/i2c-cpm.c4
-rw-r--r--drivers/i2c/busses/i2c-exynos5.c24
-rw-r--r--drivers/i2c/busses/i2c-ismt.c2
-rw-r--r--drivers/i2c/busses/i2c-rk3x.c1
-rw-r--r--drivers/infiniband/core/cache.c3
-rw-r--r--drivers/infiniband/core/ucm.c4
-rw-r--r--drivers/infiniband/core/ucma.c3
-rw-r--r--drivers/infiniband/core/uverbs_main.c5
-rw-r--r--drivers/infiniband/core/verbs.c3
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c24
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c5
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c4
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c7
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c20
-rw-r--r--drivers/media/v4l2-core/videobuf2-memops.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-v4l2.c20
-rw-r--r--drivers/misc/cxl/context.c7
-rw-r--r--drivers/misc/cxl/cxl.h2
-rw-r--r--drivers/misc/cxl/irq.c1
-rw-r--r--drivers/misc/cxl/native.c31
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/sdhci-acpi.c81
-rw-r--r--drivers/mmc/host/sunxi-mmc.c5
-rw-r--r--drivers/net/Kconfig5
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c48
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c25
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c40
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c50
-rw-r--r--drivers/net/macsec.c65
-rw-r--r--drivers/platform/x86/toshiba_acpi.c2
-rw-r--r--drivers/rapidio/devices/rio_mport_cdev.c4
-rw-r--r--drivers/s390/char/sclp_ctl.c12
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c1
-rw-r--r--drivers/soc/mediatek/mtk-scpsys.c11
-rw-r--r--drivers/staging/media/davinci_vpfe/vpfe_video.c54
-rw-r--r--drivers/staging/rdma/hfi1/TODO2
-rw-r--r--drivers/staging/rdma/hfi1/file_ops.c91
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.c40
-rw-r--r--drivers/staging/rdma/hfi1/mmu_rb.h3
-rw-r--r--drivers/staging/rdma/hfi1/qp.c2
-rw-r--r--drivers/staging/rdma/hfi1/user_exp_rcv.c11
-rw-r--r--drivers/staging/rdma/hfi1/user_sdma.c33
-rw-r--r--drivers/thermal/hisi_thermal.c4
-rw-r--r--drivers/thermal/thermal_core.c2
-rw-r--r--drivers/tty/pty.c18
-rw-r--r--drivers/tty/tty_io.c6
-rw-r--r--drivers/usb/serial/option.c155
-rw-r--r--fs/ceph/mds_client.c6
-rw-r--r--fs/devpts/inode.c53
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c2
-rw-r--r--fs/proc/task_mmu.c33
-rw-r--r--include/linux/ceph/auth.h10
-rw-r--r--include/linux/ceph/osd_client.h1
-rw-r--r--include/linux/cgroup-defs.h1
-rw-r--r--include/linux/cpuset.h6
-rw-r--r--include/linux/devpts_fs.h6
-rw-r--r--include/linux/huge_mm.h5
-rw-r--r--include/linux/lockdep.h8
-rw-r--r--include/linux/mlx5/device.h11
-rw-r--r--include/linux/mlx5/driver.h7
-rw-r--r--include/linux/mlx5/port.h6
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/tty_driver.h4
-rw-r--r--include/media/videobuf2-core.h8
-rw-r--r--include/net/switchdev.h4
-rw-r--r--include/rdma/ib.h16
-rw-r--r--include/sound/hda_i915.h5
-rw-r--r--include/uapi/linux/if_macsec.h4
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h30
-rw-r--r--kernel/bpf/verifier.c1
-rw-r--r--kernel/cgroup.c7
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/events/core.c55
-rw-r--r--kernel/kcov.c3
-rw-r--r--kernel/kexec_core.c7
-rw-r--r--kernel/locking/lockdep.c37
-rw-r--r--kernel/locking/lockdep_proc.c2
-rw-r--r--kernel/workqueue.c29
-rw-r--r--lib/stackdepot.c4
-rw-r--r--mm/huge_memory.c12
-rw-r--r--mm/memcontrol.c37
-rw-r--r--mm/memory-failure.c10
-rw-r--r--mm/memory.c40
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/page_io.c6
-rw-r--r--mm/swap.c5
-rw-r--r--mm/vmscan.c30
-rw-r--r--net/bridge/br_mdb.c124
-rw-r--r--net/bridge/br_multicast.c8
-rw-r--r--net/bridge/br_private.h4
-rw-r--r--net/ceph/auth.c8
-rw-r--r--net/ceph/auth_none.c71
-rw-r--r--net/ceph/auth_none.h3
-rw-r--r--net/ceph/auth_x.c21
-rw-r--r--net/ceph/auth_x.h1
-rw-r--r--net/ceph/osd_client.c6
-rw-r--r--net/ipv4/fib_frontend.c6
-rw-r--r--net/ipv6/addrconf.c48
-rw-r--r--net/switchdev/switchdev.c6
-rw-r--r--sound/hda/ext/hdac_ext_stream.c5
-rw-r--r--sound/hda/hdac_i915.c62
-rw-r--r--sound/pci/hda/hda_intel.c56
-rw-r--r--sound/pci/hda/patch_hdmi.c1
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/soc/codecs/Kconfig1
-rw-r--r--sound/soc/codecs/arizona.c12
-rw-r--r--sound/soc/codecs/arizona.h2
-rw-r--r--sound/soc/codecs/cs35l32.c17
-rw-r--r--sound/soc/codecs/cs47l24.c3
-rw-r--r--sound/soc/codecs/hdac_hdmi.c94
-rw-r--r--sound/soc/codecs/nau8825.c126
-rw-r--r--sound/soc/codecs/rt5640.c2
-rw-r--r--sound/soc/codecs/rt5640.h36
-rw-r--r--sound/soc/codecs/wm5102.c5
-rw-r--r--sound/soc/codecs/wm5110.c2
-rw-r--r--sound/soc/codecs/wm8962.c2
-rw-r--r--sound/soc/codecs/wm8997.c2
-rw-r--r--sound/soc/codecs/wm8998.c2
-rw-r--r--sound/soc/intel/Kconfig1
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c2
-rw-r--r--sound/soc/intel/skylake/skl-sst-dsp.c5
-rw-r--r--sound/soc/intel/skylake/skl-topology.c42
-rw-r--r--sound/soc/intel/skylake/skl-topology.h8
-rw-r--r--sound/soc/intel/skylake/skl.c32
-rw-r--r--sound/soc/soc-dapm.c7
214 files changed, 2540 insertions, 1223 deletions
diff --git a/.mailmap b/.mailmap
index 90c0aefc276d..c156a8b4d845 100644
--- a/.mailmap
+++ b/.mailmap
@@ -48,6 +48,9 @@ Felix Kuhling <fxkuehl@gmx.de>
Felix Moeller <felix@derklecks.de>
Filipe Lautert <filipe@icewall.org>
Franck Bui-Huu <vagabon.xyz@gmail.com>
+Frank Rowand <frowand.list@gmail.com> <frowand@mvista.com>
+Frank Rowand <frowand.list@gmail.com> <frank.rowand@am.sony.com>
+Frank Rowand <frowand.list@gmail.com> <frank.rowand@sonymobile.com>
Frank Zago <fzago@systemfabricworks.com>
Greg Kroah-Hartman <greg@echidna.(none)>
Greg Kroah-Hartman <gregkh@suse.de>
@@ -79,6 +82,7 @@ Kay Sievers <kay.sievers@vrfy.org>
Kenneth W Chen <kenneth.w.chen@intel.com>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
Koushik <raghavendra.koushik@neterion.com>
+Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
Leonid I Ananiev <leonid.i.ananiev@intel.com>
Linas Vepstas <linas@austin.ibm.com>
diff --git a/Documentation/devicetree/bindings/arc/archs-pct.txt b/Documentation/devicetree/bindings/arc/archs-pct.txt
index 1ae98b87c640..e4b9dcee6d41 100644
--- a/Documentation/devicetree/bindings/arc/archs-pct.txt
+++ b/Documentation/devicetree/bindings/arc/archs-pct.txt
@@ -2,7 +2,7 @@
The ARC HS can be configured with a pipeline performance monitor for counting
CPU and cache events like cache misses and hits. Like conventional PCT there
-are 100+ hardware conditions dynamically mapped to upto 32 counters.
+are 100+ hardware conditions dynamically mapped to up to 32 counters.
It also supports overflow interrupts.
Required properties:
diff --git a/Documentation/devicetree/bindings/arc/pct.txt b/Documentation/devicetree/bindings/arc/pct.txt
index 7b9588444f20..4e874d9a38a6 100644
--- a/Documentation/devicetree/bindings/arc/pct.txt
+++ b/Documentation/devicetree/bindings/arc/pct.txt
@@ -2,7 +2,7 @@
The ARC700 can be configured with a pipeline performance monitor for counting
CPU and cache events like cache misses and hits. Like conventional PCT there
-are 100+ hardware conditions dynamically mapped to upto 32 counters
+are 100+ hardware conditions dynamically mapped to up to 32 counters
Note that:
* The ARC 700 PCT does not support interrupts; although HW events may be
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index ccc62f145306..3f0cbbb8395f 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -192,7 +192,6 @@ nodes to be present and contain the properties described below.
can be one of:
"allwinner,sun6i-a31"
"allwinner,sun8i-a23"
- "arm,psci"
"arm,realview-smp"
"brcm,bcm-nsp-smp"
"brcm,brahma-b15"
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
index f0d71bc52e64..0b4a85fe2d86 100644
--- a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
@@ -6,8 +6,8 @@ RK3xxx SoCs.
Required properties :
- reg : Offset and length of the register set for the device
- - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c" or
- "rockchip,rk3288-i2c".
+ - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c",
+ "rockchip,rk3228-i2c" or "rockchip,rk3288-i2c".
- interrupts : interrupt number
- clocks : parent clock
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index cb0368459da3..34a5fece3121 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -581,15 +581,16 @@ Specify "[Nn]ode" for node order
"Zone Order" orders the zonelists by zone type, then by node within each
zone. Specify "[Zz]one" for zone order.
-Specify "[Dd]efault" to request automatic configuration. Autoconfiguration
-will select "node" order in following case.
-(1) if the DMA zone does not exist or
-(2) if the DMA zone comprises greater than 50% of the available memory or
-(3) if any node's DMA zone comprises greater than 70% of its local memory and
- the amount of local memory is big enough.
-
-Otherwise, "zone" order will be selected. Default order is recommended unless
-this is causing problems for your system/application.
+Specify "[Dd]efault" to request automatic configuration.
+
+On 32-bit, the Normal zone needs to be preserved for allocations accessible
+by the kernel, so "zone" order will be selected.
+
+On 64-bit, devices that require DMA32/DMA are relatively rare, so "node"
+order will be selected.
+
+Default order is recommended unless this is causing problems for your
+system/application.
==============================================================
diff --git a/MAINTAINERS b/MAINTAINERS
index 1d5b4becab6f..42e65d128d01 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6027,7 +6027,7 @@ F: include/scsi/*iscsi*
ISCSI EXTENSIONS FOR RDMA (ISER) INITIATOR
M: Or Gerlitz <ogerlitz@mellanox.com>
-M: Sagi Grimberg <sagig@mellanox.com>
+M: Sagi Grimberg <sagi@grimberg.me>
M: Roi Dayan <roid@mellanox.com>
L: linux-rdma@vger.kernel.org
S: Supported
@@ -6037,7 +6037,7 @@ Q: http://patchwork.kernel.org/project/linux-rdma/list/
F: drivers/infiniband/ulp/iser/
ISCSI EXTENSIONS FOR RDMA (ISER) TARGET
-M: Sagi Grimberg <sagig@mellanox.com>
+M: Sagi Grimberg <sagi@grimberg.me>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master
L: linux-rdma@vger.kernel.org
L: target-devel@vger.kernel.org
@@ -6400,7 +6400,7 @@ F: mm/kmemleak.c
F: mm/kmemleak-test.c
KPROBES
-M: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
+M: Ananth N Mavinakayanahalli <ananth@linux.vnet.ibm.com>
M: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
M: "David S. Miller" <davem@davemloft.net>
M: Masami Hiramatsu <mhiramat@kernel.org>
@@ -11071,6 +11071,15 @@ S: Maintained
F: drivers/clk/ti/
F: include/linux/clk/ti.h
+TI ETHERNET SWITCH DRIVER (CPSW)
+M: Mugunthan V N <mugunthanvnm@ti.com>
+R: Grygorii Strashko <grygorii.strashko@ti.com>
+L: linux-omap@vger.kernel.org
+L: netdev@vger.kernel.org
+S: Maintained
+F: drivers/net/ethernet/ti/cpsw*
+F: drivers/net/ethernet/ti/davinci*
+
TI FLASH MEDIA INTERFACE DRIVER
M: Alex Dubov <oakad@yahoo.com>
S: Maintained
diff --git a/Makefile b/Makefile
index 9496df81b9a8..7466de60ddc7 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
VERSION = 4
PATCHLEVEL = 6
SUBLEVEL = 0
-EXTRAVERSION = -rc5
-NAME = Blurry Fish Butt
+EXTRAVERSION = -rc6
+NAME = Charred Weasel
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 12d0284a46e5..ec4791ea6911 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -35,8 +35,10 @@ config ARC
select NO_BOOTMEM
select OF
select OF_EARLY_FLATTREE
+ select OF_RESERVED_MEM
select PERF_USE_VMALLOC
select HAVE_DEBUG_STACKOVERFLOW
+ select HAVE_GENERIC_DMA_COHERENT
config MIGHT_HAVE_PCI
bool
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index 37c2f751eebf..d1ec7f6b31e0 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -18,6 +18,12 @@
#define STATUS_AD_MASK (1<<STATUS_AD_BIT)
#define STATUS_IE_MASK (1<<STATUS_IE_BIT)
+/* status32 Bits as encoded/expected by CLRI/SETI */
+#define CLRI_STATUS_IE_BIT 4
+
+#define CLRI_STATUS_E_MASK 0xF
+#define CLRI_STATUS_IE_MASK (1 << CLRI_STATUS_IE_BIT)
+
#define AUX_USER_SP 0x00D
#define AUX_IRQ_CTRL 0x00E
#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
@@ -100,6 +106,13 @@ static inline long arch_local_save_flags(void)
:
: "memory");
+ /* To be compatible with irq_save()/irq_restore()
+ * encode the irq bits as expected by CLRI/SETI
+ * (this was needed to make CONFIG_TRACE_IRQFLAGS work)
+ */
+ temp = (1 << 5) |
+ ((!!(temp & STATUS_IE_MASK)) << CLRI_STATUS_IE_BIT) |
+ (temp & CLRI_STATUS_E_MASK);
return temp;
}
@@ -108,7 +121,7 @@ static inline long arch_local_save_flags(void)
*/
static inline int arch_irqs_disabled_flags(unsigned long flags)
{
- return !(flags & (STATUS_IE_MASK));
+ return !(flags & CLRI_STATUS_IE_MASK);
}
static inline int arch_irqs_disabled(void)
@@ -128,11 +141,32 @@ static inline void arc_softirq_clear(int irq)
#else
+#ifdef CONFIG_TRACE_IRQFLAGS
+
+.macro TRACE_ASM_IRQ_DISABLE
+ bl trace_hardirqs_off
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+ bl trace_hardirqs_on
+.endm
+
+#else
+
+.macro TRACE_ASM_IRQ_DISABLE
+.endm
+
+.macro TRACE_ASM_IRQ_ENABLE
+.endm
+
+#endif
.macro IRQ_DISABLE scratch
clri
+ TRACE_ASM_IRQ_DISABLE
.endm
.macro IRQ_ENABLE scratch
+ TRACE_ASM_IRQ_ENABLE
seti
.endm
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index c1264607bbff..7a1c124ff021 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -69,8 +69,11 @@ ENTRY(handle_interrupt)
clri ; To make status32.IE agree with CPU internal state
- lr r0, [ICAUSE]
+#ifdef CONFIG_TRACE_IRQFLAGS
+ TRACE_ASM_IRQ_DISABLE
+#endif
+ lr r0, [ICAUSE]
mov blink, ret_from_exception
b.d arch_do_IRQ
@@ -169,6 +172,11 @@ END(EV_TLBProtV)
.Lrestore_regs:
+ # Interrpts are actually disabled from this point on, but will get
+ # reenabled after we return from interrupt/exception.
+ # But irq tracer needs to be told now...
+ TRACE_ASM_IRQ_ENABLE
+
ld r0, [sp, PT_status32] ; U/K mode at time of entry
lr r10, [AUX_IRQ_ACT]
diff --git a/arch/arc/kernel/entry-compact.S b/arch/arc/kernel/entry-compact.S
index 431433929189..0cb0abaa0479 100644
--- a/arch/arc/kernel/entry-compact.S
+++ b/arch/arc/kernel/entry-compact.S
@@ -341,6 +341,9 @@ END(call_do_page_fault)
.Lrestore_regs:
+ # Interrpts are actually disabled from this point on, but will get
+ # reenabled after we return from interrupt/exception.
+ # But irq tracer needs to be told now...
TRACE_ASM_IRQ_ENABLE
lr r10, [status32]
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 7d2c4fbf4f22..5487d0b97400 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -13,6 +13,7 @@
#ifdef CONFIG_BLK_DEV_INITRD
#include <linux/initrd.h>
#endif
+#include <linux/of_fdt.h>
#include <linux/swap.h>
#include <linux/module.h>
#include <linux/highmem.h>
@@ -136,6 +137,9 @@ void __init setup_arch_memory(void)
memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
#endif
+ early_init_fdt_reserve_self();
+ early_init_fdt_scan_reserved_mem();
+
memblock_dump_all();
/*----------------- node/zones setup --------------------------*/
diff --git a/arch/arm/boot/dts/am33xx.dtsi b/arch/arm/boot/dts/am33xx.dtsi
index 55ca9c7dcf6a..0467846b4cc3 100644
--- a/arch/arm/boot/dts/am33xx.dtsi
+++ b/arch/arm/boot/dts/am33xx.dtsi
@@ -860,7 +860,7 @@
ti,no-idle-on-init;
reg = <0x50000000 0x2000>;
interrupts = <100>;
- dmas = <&edma 52>;
+ dmas = <&edma 52 0>;
dma-names = "rxtx";
gpmc,num-cs = <7>;
gpmc,num-waitpins = <2>;
diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi
index 344b861a55a5..ba580a9da390 100644
--- a/arch/arm/boot/dts/am4372.dtsi
+++ b/arch/arm/boot/dts/am4372.dtsi
@@ -884,7 +884,7 @@
gpmc: gpmc@50000000 {
compatible = "ti,am3352-gpmc";
ti,hwmods = "gpmc";
- dmas = <&edma 52>;
+ dmas = <&edma 52 0>;
dma-names = "rxtx";
clocks = <&l3s_gclk>;
clock-names = "fck";
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 0a5fc5d02ce2..4168eb9dd369 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -99,13 +99,6 @@
#cooling-cells = <2>;
};
- extcon_usb1: extcon_usb1 {
- compatible = "linux,extcon-usb-gpio";
- id-gpio = <&gpio7 25 GPIO_ACTIVE_HIGH>;
- pinctrl-names = "default";
- pinctrl-0 = <&extcon_usb1_pins>;
- };
-
hdmi0: connector {
compatible = "hdmi-connector";
label = "hdmi";
@@ -349,12 +342,6 @@
>;
};
- extcon_usb1_pins: extcon_usb1_pins {
- pinctrl-single,pins = <
- DRA7XX_CORE_IOPAD(0x37ec, PIN_INPUT_PULLUP | MUX_MODE14) /* uart1_rtsn.gpio7_25 */
- >;
- };
-
tpd12s015_pins: pinmux_tpd12s015_pins {
pinctrl-single,pins = <
DRA7XX_CORE_IOPAD(0x37b0, PIN_OUTPUT | MUX_MODE14) /* gpio7_10 CT_CP_HPD */
@@ -706,10 +693,6 @@
pinctrl-0 = <&usb1_pins>;
};
-&omap_dwc3_1 {
- extcon = <&extcon_usb1>;
-};
-
&omap_dwc3_2 {
extcon = <&extcon_usb2>;
};
diff --git a/arch/arm/boot/dts/dm814x-clocks.dtsi b/arch/arm/boot/dts/dm814x-clocks.dtsi
index e0ea6a93a22e..792a64ee0df7 100644
--- a/arch/arm/boot/dts/dm814x-clocks.dtsi
+++ b/arch/arm/boot/dts/dm814x-clocks.dtsi
@@ -4,6 +4,157 @@
* published by the Free Software Foundation.
*/
+&pllss {
+ /*
+ * See TRM "2.6.10 Connected outputso DPLLS" and
+ * "2.6.11 Connected Outputs of DPLLJ". Only clkout is
+ * connected except for hdmi and usb.
+ */
+ adpll_mpu_ck: adpll@40 {
+ #clock-cells = <1>;
+ compatible = "ti,dm814-adpll-s-clock";
+ reg = <0x40 0x40>;
+ clocks = <&devosc_ck &devosc_ck &devosc_ck>;
+ clock-names = "clkinp", "clkinpulow", "clkinphif";
+ clock-output-names = "481c5040.adpll.dcoclkldo",
+ "481c5040.adpll.clkout",
+ "481c5040.adpll.clkoutx2",
+ "481c5040.adpll.clkouthif";
+ };
+
+ adpll_dsp_ck: adpll@80 {
+ #clock-cells = <1>;
+ compatible = "ti,dm814-adpll-lj-clock";
+ reg = <0x80 0x30>;
+ clocks = <&devosc_ck &devosc_ck>;
+ clock-names = "clkinp", "clkinpulow";
+ clock-output-names = "481c5080.adpll.dcoclkldo",
+ "481c5080.adpll.clkout",
+ "481c5080.adpll.clkoutldo";
+ };
+
+ adpll_sgx_ck: adpll@b0 {
+ #clock-cells = <1>;
+ compatible = "ti,dm814-adpll-lj-clock";
+ reg = <0xb0 0x30>;
+ clocks = <&devosc_ck &devosc_ck>;
+ clock-names = "clkinp", "clkinpulow";
+ clock-output-names = "481c50b0.adpll.dcoclkldo",
+ "481c50b0.adpll.clkout",
+ "481c50b0.adpll.clkoutldo";
+ };
+
+ adpll_hdvic_ck: adpll@e0 {
+ #clock-cells = <1>;
+ compatible = "ti,dm814-adpll-lj-clock";
+ reg = <0xe0 0x30>;
+ clocks = <&devosc_ck &devosc_ck>;
+ clock-names = "clkinp", "clkinpulow";
+ clock-output-names = "481c50e0.adpll.dcoclkldo",
+ "481c50e0.adpll.clkout",
+ "481c50e0.adpll.clkoutldo";
+ };
+
+ adpll_l3_ck: adpll@110 {
+ #clock-cells = <1>;
+ compatible = "ti,dm814-adpll-lj-clock";
+ reg = <0x110 0x30>;
+ clocks = <&devosc_ck &devosc_ck>;
+ clock-names = "clkinp", "clkinpulow";
+ clock-output-names = "481c5110.adpll.dcoclkldo",
+ "481c5110.adpll.clkout",
+ "481c5110.adpll.clkoutldo";
+ };
+
+ adpll_isp_ck: adpll@140 {
+ #clock-cells = <1>;
+ compatible = "ti,dm814-adpll-lj-clock";
+ reg = <0x140 0x30>;
+ clocks = <&devosc_ck &devosc_ck>;
+ clock-names = "clkinp", "clkinpulow";
+ clock-output-names = "481c5140.adpll.dcoclkldo",
+ "481c5140.adpll.clkout",
+ "481c5140.adpll.clkoutldo";
+ };
+
+ adpll_dss_ck: adpll@170 {
+ #clock-cells = <1>;
+ compatible = "ti,dm814-adpll-lj-clock";
+ reg = <0x170 0x30>;
+ clocks = <&devosc_ck &devosc_ck>;
+ clock-names = "clkinp", "clkinpulow";
+ clock-output-names = "481c5170.adpll.dcoclkldo",
+ "481c5170.adpll.clkout",
+ "481c5170.adpll.clkoutldo";
+ };
+
+ adpll_video0_ck: adpll@1a0 {
+ #clock-cells = <1>;
+ compatible = "ti,dm814-adpll-lj-clock";
+ reg = <0x1a0 0x30>;
+ clocks = <&devosc_ck &devosc_ck>;
+ clock-names = "clkinp", "clkinpulow";
+ clock-output-names = "481c51a0.adpll.dcoclkldo",
+ "481c51a0.adpll.clkout",
+ "481c51a0.adpll.clkoutldo";
+ };
+
+ adpll_video1_ck: adpll@1d0 {
+ #clock-cells = <1>;
+ compatible = "ti,dm814-adpll-lj-clock";
+ reg = <0x1d0 0x30>;
+ clocks = <&devosc_ck &devosc_ck>;
+ clock-names = "clkinp", "clkinpulow";
+ clock-output-names = "481c51d0.adpll.dcoclkldo",
+ "481c51d0.adpll.clkout",
+ "481c51d0.adpll.clkoutldo";
+ };
+
+ adpll_hdmi_ck: adpll@200 {
+ #clock-cells = <1>;
+ compatible = "ti,dm814-adpll-lj-clock";
+ reg = <0x200 0x30>;
+ clocks = <&devosc_ck &devosc_ck>;
+ clock-names = "clkinp", "clkinpulow";
+ clock-output-names = "481c5200.adpll.dcoclkldo",
+ "481c5200.adpll.clkout",
+ "481c5200.adpll.clkoutldo";
+ };
+
+ adpll_audio_ck: adpll@230 {
+ #clock-cells = <1>;
+ compatible = "ti,dm814-adpll-lj-clock";
+ reg = <0x230 0x30>;
+ clocks = <&devosc_ck &devosc_ck>;
+ clock-names = "clkinp", "clkinpulow";
+ clock-output-names = "481c5230.adpll.dcoclkldo",
+ "481c5230.adpll.clkout",
+ "481c5230.adpll.clkoutldo";
+ };
+
+ adpll_usb_ck: adpll@260 {
+ #clock-cells = <1>;
+ compatible = "ti,dm814-adpll-lj-clock";
+ reg = <0x260 0x30>;
+ clocks = <&devosc_ck &devosc_ck>;
+ clock-names = "clkinp", "clkinpulow";
+ clock-output-names = "481c5260.adpll.dcoclkldo",
+ "481c5260.adpll.clkout",
+ "481c5260.adpll.clkoutldo";
+ };
+
+ adpll_ddr_ck: adpll@290 {
+ #clock-cells = <1>;
+ compatible = "ti,dm814-adpll-lj-clock";
+ reg = <0x290 0x30>;
+ clocks = <&devosc_ck &devosc_ck>;
+ clock-names = "clkinp", "clkinpulow";
+ clock-output-names = "481c5290.adpll.dcoclkldo",
+ "481c5290.adpll.clkout",
+ "481c5290.adpll.clkoutldo";
+ };
+};
+
&pllss_clocks {
timer1_fck: timer1_fck {
#clock-cells = <0>;
@@ -23,6 +174,24 @@
reg = <0x2e0>;
};
+ /* CPTS_RFT_CLK in RMII_REFCLK_SRC, usually sourced from auiod */
+ cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
+ #clock-cells = <0>;
+ compatible = "ti,mux-clock";
+ clocks = <&adpll_video0_ck 1
+ &adpll_video1_ck 1
+ &adpll_audio_ck 1>;
+ ti,bit-shift = <1>;
+ reg = <0x2e8>;
+ };
+
+ /* REVISIT: Set up with a proper mux using RMII_REFCLK_SRC */
+ cpsw_125mhz_gclk: cpsw_125mhz_gclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <125000000>;
+ };
+
sysclk18_ck: sysclk18_ck {
#clock-cells = <0>;
compatible = "ti,mux-clock";
@@ -79,37 +248,6 @@
compatible = "fixed-clock";
clock-frequency = <1000000000>;
};
-
- sysclk4_ck: sysclk4_ck {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <222000000>;
- };
-
- sysclk6_ck: sysclk6_ck {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <100000000>;
- };
-
- sysclk10_ck: sysclk10_ck {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <48000000>;
- };
-
- cpsw_125mhz_gclk: cpsw_125mhz_gclk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <125000000>;
- };
-
- cpsw_cpts_rft_clk: cpsw_cpts_rft_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <250000000>;
- };
-
};
&prcm_clocks {
@@ -138,6 +276,49 @@
clock-div = <78125>;
};
+ /* L4_HS 220 MHz*/
+ sysclk4_ck: sysclk4_ck {
+ #clock-cells = <0>;
+ compatible = "ti,fixed-factor-clock";
+ clocks = <&adpll_l3_ck 1>;
+ ti,clock-mult = <1>;
+ ti,clock-div = <1>;
+ };
+
+ /* L4_FWCFG */
+ sysclk5_ck: sysclk5_ck {
+ #clock-cells = <0>;
+ compatible = "ti,fixed-factor-clock";
+ clocks = <&adpll_l3_ck 1>;
+ ti,clock-mult = <1>;
+ ti,clock-div = <2>;
+ };
+
+ /* L4_LS 110 MHz */
+ sysclk6_ck: sysclk6_ck {
+ #clock-cells = <0>;
+ compatible = "ti,fixed-factor-clock";
+ clocks = <&adpll_l3_ck 1>;
+ ti,clock-mult = <1>;
+ ti,clock-div = <2>;
+ };
+
+ sysclk8_ck: sysclk8_ck {
+ #clock-cells = <0>;
+ compatible = "ti,fixed-factor-clock";
+ clocks = <&adpll_usb_ck 1>;
+ ti,clock-mult = <1>;
+ ti,clock-div = <1>;
+ };
+
+ sysclk10_ck: sysclk10_ck {
+ compatible = "ti,divider-clock";
+ reg = <0x324>;
+ ti,max-div = <7>;
+ #clock-cells = <0>;
+ clocks = <&adpll_usb_ck 1>;
+ };
+
aud_clkin0_ck: aud_clkin0_ck {
#clock-cells = <0>;
compatible = "fixed-clock";
diff --git a/arch/arm/boot/dts/dra62x-clocks.dtsi b/arch/arm/boot/dts/dra62x-clocks.dtsi
index 6f98dc8df9dd..0e49741747ef 100644
--- a/arch/arm/boot/dts/dra62x-clocks.dtsi
+++ b/arch/arm/boot/dts/dra62x-clocks.dtsi
@@ -6,6 +6,32 @@
#include "dm814x-clocks.dtsi"
+/* Compared to dm814x, dra62x does not have hdic, l3 or dss PLLs */
+&adpll_hdvic_ck {
+ status = "disabled";
+};
+
+&adpll_l3_ck {
+ status = "disabled";
+};
+
+&adpll_dss_ck {
+ status = "disabled";
+};
+
+/* Compared to dm814x, dra62x has interconnect clocks on isp PLL */
+&sysclk4_ck {
+ clocks = <&adpll_isp_ck 1>;
+};
+
+&sysclk5_ck {
+ clocks = <&adpll_isp_ck 1>;
+};
+
+&sysclk6_ck {
+ clocks = <&adpll_isp_ck 1>;
+};
+
/*
* Compared to dm814x, dra62x has different shifts and more mux options.
* Please add the extra options for ysclk_14 and 16 if really needed.
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index d0bae06b7eb7..ef2164a99d0f 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -98,12 +98,20 @@
clock-frequency = <32768>;
};
- sys_32k_ck: sys_32k_ck {
+ sys_clk32_crystal_ck: sys_clk32_crystal_ck {
#clock-cells = <0>;
compatible = "fixed-clock";
clock-frequency = <32768>;
};
+ sys_clk32_pseudo_ck: sys_clk32_pseudo_ck {
+ #clock-cells = <0>;
+ compatible = "fixed-factor-clock";
+ clocks = <&sys_clkin1>;
+ clock-mult = <1>;
+ clock-div = <610>;
+ };
+
virt_12000000_ck: virt_12000000_ck {
#clock-cells = <0>;
compatible = "fixed-clock";
@@ -2170,4 +2178,12 @@
ti,bit-shift = <22>;
reg = <0x0558>;
};
+
+ sys_32k_ck: sys_32k_ck {
+ #clock-cells = <0>;
+ compatible = "ti,mux-clock";
+ clocks = <&sys_clk32_crystal_ck>, <&sys_clk32_pseudo_ck>, <&sys_clk32_pseudo_ck>, <&sys_clk32_pseudo_ck>;
+ ti,bit-shift = <8>;
+ reg = <0x6c4>;
+ };
};
diff --git a/arch/arm/boot/dts/qcom-msm8974.dtsi b/arch/arm/boot/dts/qcom-msm8974.dtsi
index ef5330578431..8193139d0d87 100644
--- a/arch/arm/boot/dts/qcom-msm8974.dtsi
+++ b/arch/arm/boot/dts/qcom-msm8974.dtsi
@@ -1,6 +1,6 @@
/dts-v1/;
-#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/interrupt-controller/irq.h>
#include <dt-bindings/clock/qcom,gcc-msm8974.h>
#include "skeleton.dtsi"
@@ -460,8 +460,6 @@
clock-names = "core", "iface";
#address-cells = <1>;
#size-cells = <0>;
- dmas = <&blsp2_dma 20>, <&blsp2_dma 21>;
- dma-names = "tx", "rx";
};
spmi_bus: spmi@fc4cf000 {
@@ -479,16 +477,6 @@
interrupt-controller;
#interrupt-cells = <4>;
};
-
- blsp2_dma: dma-controller@f9944000 {
- compatible = "qcom,bam-v1.4.0";
- reg = <0xf9944000 0x19000>;
- interrupts = <GIC_SPI 239 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&gcc GCC_BLSP2_AHB_CLK>;
- clock-names = "bam_clk";
- #dma-cells = <1>;
- qcom,ee = <0>;
- };
};
smd {
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index 0ad71b81d3a2..cc6e28f81fe4 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -661,6 +661,7 @@
};
&pcie_bus_clk {
+ clock-frequency = <100000000>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/r8a7791-porter.dts b/arch/arm/boot/dts/r8a7791-porter.dts
index 6c08314427d6..a9285d9a57cd 100644
--- a/arch/arm/boot/dts/r8a7791-porter.dts
+++ b/arch/arm/boot/dts/r8a7791-porter.dts
@@ -143,19 +143,11 @@
};
&pfc {
- pinctrl-0 = <&scif_clk_pins>;
- pinctrl-names = "default";
-
scif0_pins: serial0 {
renesas,groups = "scif0_data_d";
renesas,function = "scif0";
};
- scif_clk_pins: scif_clk {
- renesas,groups = "scif_clk";
- renesas,function = "scif_clk";
- };
-
ether_pins: ether {
renesas,groups = "eth_link", "eth_mdio", "eth_rmii";
renesas,function = "eth";
@@ -229,11 +221,6 @@
status = "okay";
};
-&scif_clk {
- clock-frequency = <14745600>;
- status = "okay";
-};
-
&ether {
pinctrl-0 = <&ether_pins &phy1_pins>;
pinctrl-names = "default";
@@ -414,6 +401,7 @@
};
&pcie_bus_clk {
+ clock-frequency = <100000000>;
status = "okay";
};
diff --git a/arch/arm/boot/dts/r8a7791.dtsi b/arch/arm/boot/dts/r8a7791.dtsi
index 6439f0569fe2..1cd1b6a3a72a 100644
--- a/arch/arm/boot/dts/r8a7791.dtsi
+++ b/arch/arm/boot/dts/r8a7791.dtsi
@@ -1083,9 +1083,8 @@
pcie_bus_clk: pcie_bus_clk {
compatible = "fixed-clock";
#clock-cells = <0>;
- clock-frequency = <100000000>;
+ clock-frequency = <0>;
clock-output-names = "pcie_bus";
- status = "disabled";
};
/* External SCIF clock */
@@ -1094,7 +1093,6 @@
#clock-cells = <0>;
/* This value must be overridden by the board. */
clock-frequency = <0>;
- status = "disabled";
};
/* External USB clock - can be overridden by the board */
@@ -1112,7 +1110,6 @@
/* This value must be overridden by the board. */
clock-frequency = <0>;
clock-output-names = "can_clk";
- status = "disabled";
};
/* Special CPG clocks */
diff --git a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
index a5edd7d60266..3d039ef021e0 100644
--- a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
+++ b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
@@ -71,6 +71,7 @@ struct platform_device *__init imx_add_sdhci_esdhc_imx(
if (!pdata)
pdata = &default_esdhc_pdata;
- return imx_add_platform_device(data->devid, data->id, res,
- ARRAY_SIZE(res), pdata, sizeof(*pdata));
+ return imx_add_platform_device_dmamask(data->devid, data->id, res,
+ ARRAY_SIZE(res), pdata, sizeof(*pdata),
+ DMA_BIT_MASK(32));
}
diff --git a/arch/arm/mach-omap2/clockdomains7xx_data.c b/arch/arm/mach-omap2/clockdomains7xx_data.c
index 7581e036bda6..ef9ed36e8a61 100644
--- a/arch/arm/mach-omap2/clockdomains7xx_data.c
+++ b/arch/arm/mach-omap2/clockdomains7xx_data.c
@@ -461,7 +461,7 @@ static struct clockdomain ipu_7xx_clkdm = {
.cm_inst = DRA7XX_CM_CORE_AON_IPU_INST,
.clkdm_offs = DRA7XX_CM_CORE_AON_IPU_IPU_CDOFFS,
.dep_bit = DRA7XX_IPU_STATDEP_SHIFT,
- .flags = CLKDM_CAN_HWSUP_SWSUP,
+ .flags = CLKDM_CAN_SWSUP,
};
static struct clockdomain mpu1_7xx_clkdm = {
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 9821be6dfd5e..49de4dd227be 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -737,7 +737,8 @@ void __init omap5_init_late(void)
#ifdef CONFIG_SOC_DRA7XX
void __init dra7xx_init_early(void)
{
- omap2_set_globals_tap(-1, OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE));
+ omap2_set_globals_tap(DRA7XX_CLASS,
+ OMAP2_L4_IO_ADDRESS(DRA7XX_TAP_BASE));
omap2_set_globals_prcm_mpu(OMAP2_L4_IO_ADDRESS(OMAP54XX_PRCM_MPU_BASE));
omap2_control_base_init();
omap4_pm_init_early();
diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
index f397bd6bd6e3..2c04f2741476 100644
--- a/arch/arm/mach-omap2/omap-wakeupgen.c
+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
@@ -274,6 +274,10 @@ static inline void omap5_irq_save_context(void)
*/
static void irq_save_context(void)
{
+ /* DRA7 has no SAR to save */
+ if (soc_is_dra7xx())
+ return;
+
if (!sar_base)
sar_base = omap4_get_sar_ram_base();
@@ -290,6 +294,9 @@ static void irq_sar_clear(void)
{
u32 val;
u32 offset = SAR_BACKUP_STATUS_OFFSET;
+ /* DRA7 has no SAR to save */
+ if (soc_is_dra7xx())
+ return;
if (soc_is_omap54xx())
offset = OMAP5_SAR_BACKUP_STATUS_OFFSET;
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 2dbd3785ee6f..d44e0e2f1106 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -198,7 +198,6 @@ void omap_sram_idle(void)
int per_next_state = PWRDM_POWER_ON;
int core_next_state = PWRDM_POWER_ON;
int per_going_off;
- int core_prev_state;
u32 sdrc_pwr = 0;
mpu_next_state = pwrdm_read_next_pwrst(mpu_pwrdm);
@@ -278,16 +277,20 @@ void omap_sram_idle(void)
sdrc_write_reg(sdrc_pwr, SDRC_POWER);
/* CORE */
- if (core_next_state < PWRDM_POWER_ON) {
- core_prev_state = pwrdm_read_prev_pwrst(core_pwrdm);
- if (core_prev_state == PWRDM_POWER_OFF) {
- omap3_core_restore_context();
- omap3_cm_restore_context();
- omap3_sram_restore_context();
- omap2_sms_restore_context();
- }
+ if (core_next_state < PWRDM_POWER_ON &&
+ pwrdm_read_prev_pwrst(core_pwrdm) == PWRDM_POWER_OFF) {
+ omap3_core_restore_context();
+ omap3_cm_restore_context();
+ omap3_sram_restore_context();
+ omap2_sms_restore_context();
+ } else {
+ /*
+ * In off-mode resume path above, omap3_core_restore_context
+ * also handles the INTC autoidle restore done here so limit
+ * this to non-off mode resume paths so we don't do it twice.
+ */
+ omap3_intc_resume_idle();
}
- omap3_intc_resume_idle();
pwrdm_post_transition(NULL);
diff --git a/arch/arm/mach-shmobile/timer.c b/arch/arm/mach-shmobile/timer.c
index ad008e4b0c49..67d79f9c6bad 100644
--- a/arch/arm/mach-shmobile/timer.c
+++ b/arch/arm/mach-shmobile/timer.c
@@ -40,8 +40,7 @@ static void __init shmobile_setup_delay_hz(unsigned int max_cpu_core_hz,
void __init shmobile_init_delay(void)
{
struct device_node *np, *cpus;
- bool is_a7_a8_a9 = false;
- bool is_a15 = false;
+ unsigned int div = 0;
bool has_arch_timer = false;
u32 max_freq = 0;
@@ -55,27 +54,22 @@ void __init shmobile_init_delay(void)
if (!of_property_read_u32(np, "clock-frequency", &freq))
max_freq = max(max_freq, freq);
- if (of_device_is_compatible(np, "arm,cortex-a8") ||
- of_device_is_compatible(np, "arm,cortex-a9")) {
- is_a7_a8_a9 = true;
- } else if (of_device_is_compatible(np, "arm,cortex-a7")) {
- is_a7_a8_a9 = true;
- has_arch_timer = true;
- } else if (of_device_is_compatible(np, "arm,cortex-a15")) {
- is_a15 = true;
+ if (of_device_is_compatible(np, "arm,cortex-a8")) {
+ div = 2;
+ } else if (of_device_is_compatible(np, "arm,cortex-a9")) {
+ div = 1;
+ } else if (of_device_is_compatible(np, "arm,cortex-a7") ||
+ of_device_is_compatible(np, "arm,cortex-a15")) {
+ div = 1;
has_arch_timer = true;
}
}
of_node_put(cpus);
- if (!max_freq)
+ if (!max_freq || !div)
return;
- if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER)) {
- if (is_a7_a8_a9)
- shmobile_setup_delay_hz(max_freq, 1, 3);
- else if (is_a15)
- shmobile_setup_delay_hz(max_freq, 2, 4);
- }
+ if (!has_arch_timer || !IS_ENABLED(CONFIG_ARM_ARCH_TIMER))
+ shmobile_setup_delay_hz(max_freq, 1, div);
}
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts
index 727ae5f8c4e7..b0ed44313a5b 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts
+++ b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20-ref.dts
@@ -70,7 +70,6 @@
i2c3 = &i2c3;
i2c4 = &i2c4;
i2c5 = &i2c5;
- i2c6 = &i2c6;
};
};
diff --git a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
index e682a3f52791..651c9d9d2d54 100644
--- a/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
+++ b/arch/arm64/boot/dts/socionext/uniphier-ph1-ld20.dtsi
@@ -201,15 +201,12 @@
i2c2: i2c@58782000 {
compatible = "socionext,uniphier-fi2c";
- status = "disabled";
reg = <0x58782000 0x80>;
#address-cells = <1>;
#size-cells = <0>;
interrupts = <0 43 4>;
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_i2c2>;
clocks = <&i2c_clk>;
- clock-frequency = <100000>;
+ clock-frequency = <400000>;
};
i2c3: i2c@58783000 {
@@ -227,12 +224,15 @@
i2c4: i2c@58784000 {
compatible = "socionext,uniphier-fi2c";
+ status = "disabled";
reg = <0x58784000 0x80>;
#address-cells = <1>;
#size-cells = <0>;
interrupts = <0 45 4>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_i2c4>;
clocks = <&i2c_clk>;
- clock-frequency = <400000>;
+ clock-frequency = <100000>;
};
i2c5: i2c@58785000 {
@@ -245,16 +245,6 @@
clock-frequency = <400000>;
};
- i2c6: i2c@58786000 {
- compatible = "socionext,uniphier-fi2c";
- reg = <0x58786000 0x80>;
- #address-cells = <1>;
- #size-cells = <0>;
- interrupts = <0 26 4>;
- clocks = <&i2c_clk>;
- clock-frequency = <400000>;
- };
-
system_bus: system-bus@58c00000 {
compatible = "socionext,uniphier-system-bus";
status = "disabled";
diff --git a/arch/nios2/lib/memset.c b/arch/nios2/lib/memset.c
index c2cfcb121e34..2fcefe720283 100644
--- a/arch/nios2/lib/memset.c
+++ b/arch/nios2/lib/memset.c
@@ -68,7 +68,7 @@ void *memset(void *s, int c, size_t count)
"=r" (charcnt), /* %1 Output */
"=r" (dwordcnt), /* %2 Output */
"=r" (fill8reg), /* %3 Output */
- "=r" (wrkrega) /* %4 Output */
+ "=&r" (wrkrega) /* %4 Output only */
: "r" (c), /* %5 Input */
"0" (s), /* %0 Input/Output */
"1" (count) /* %1 Input/Output */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 3fa9df70aa20..2fc5d4db503c 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -384,3 +384,5 @@ SYSCALL(ni_syscall)
SYSCALL(ni_syscall)
SYSCALL(mlock2)
SYSCALL(copy_file_range)
+COMPAT_SYS_SPU(preadv2)
+COMPAT_SYS_SPU(pwritev2)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 1f2594d45605..cf12c580f6b2 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 380
+#define NR_syscalls 382
#define __NR__exit __NR_exit
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 940290d45b08..e9f5f41aa55a 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -390,5 +390,7 @@
#define __NR_membarrier 365
#define __NR_mlock2 378
#define __NR_copy_file_range 379
+#define __NR_preadv2 380
+#define __NR_pwritev2 381
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index d29ad9545b41..081b2ad99d73 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -11,7 +11,7 @@ typedef struct {
spinlock_t list_lock;
struct list_head pgtable_list;
struct list_head gmap_list;
- unsigned long asce_bits;
+ unsigned long asce;
unsigned long asce_limit;
unsigned long vdso_base;
/* The mmu context allocates 4K page tables. */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index d321469eeda7..c837b79b455d 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -26,12 +26,28 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.has_pgste = 0;
mm->context.use_skey = 0;
#endif
- if (mm->context.asce_limit == 0) {
+ switch (mm->context.asce_limit) {
+ case 1UL << 42:
+ /*
+ * forked 3-level task, fall through to set new asce with new
+ * mm->pgd
+ */
+ case 0:
/* context created by exec, set asce limit to 4TB */
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
mm->context.asce_limit = STACK_TOP_MAX;
- } else if (mm->context.asce_limit == (1UL << 31)) {
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION3;
+ break;
+ case 1UL << 53:
+ /* forked 4-level task, set new asce with new mm->pgd */
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+ break;
+ case 1UL << 31:
+ /* forked 2-level compat task, set new asce with new mm->pgd */
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
+ /* pgd_alloc() did not increase mm->nr_pmds */
mm_inc_nr_pmds(mm);
}
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
@@ -42,7 +58,7 @@ static inline int init_new_context(struct task_struct *tsk,
static inline void set_user_asce(struct mm_struct *mm)
{
- S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
+ S390_lowcore.user_asce = mm->context.asce;
if (current->thread.mm_segment.ar4)
__ctl_load(S390_lowcore.user_asce, 7, 7);
set_cpu_flag(CIF_ASCE);
@@ -71,7 +87,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{
int cpu = smp_processor_id();
- S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
+ S390_lowcore.user_asce = next->context.asce;
if (prev == next)
return;
if (MACHINE_HAS_TLB_LC)
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 9b3d9b6099f2..da34cb6b1f3b 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -52,8 +52,8 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
return _REGION2_ENTRY_EMPTY;
}
-int crst_table_upgrade(struct mm_struct *, unsigned long limit);
-void crst_table_downgrade(struct mm_struct *, unsigned long limit);
+int crst_table_upgrade(struct mm_struct *);
+void crst_table_downgrade(struct mm_struct *);
static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
{
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index d6fd22ea270d..18cdede1aeda 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -175,7 +175,7 @@ extern __vector128 init_task_fpu_regs[__NUM_VXRS];
regs->psw.mask = PSW_USER_BITS | PSW_MASK_BA; \
regs->psw.addr = new_psw; \
regs->gprs[15] = new_stackp; \
- crst_table_downgrade(current->mm, 1UL << 31); \
+ crst_table_downgrade(current->mm); \
execve_tail(); \
} while (0)
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index ca148f7c3eaa..a2e6ef32e054 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -110,8 +110,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
static inline void __tlb_flush_kernel(void)
{
if (MACHINE_HAS_IDTE)
- __tlb_flush_idte((unsigned long) init_mm.pgd |
- init_mm.context.asce_bits);
+ __tlb_flush_idte(init_mm.context.asce);
else
__tlb_flush_global();
}
@@ -133,8 +132,7 @@ static inline void __tlb_flush_asce(struct mm_struct *mm, unsigned long asce)
static inline void __tlb_flush_kernel(void)
{
if (MACHINE_HAS_TLB_LC)
- __tlb_flush_idte_local((unsigned long) init_mm.pgd |
- init_mm.context.asce_bits);
+ __tlb_flush_idte_local(init_mm.context.asce);
else
__tlb_flush_local();
}
@@ -148,8 +146,7 @@ static inline void __tlb_flush_mm(struct mm_struct * mm)
* only ran on the local cpu.
*/
if (MACHINE_HAS_IDTE && list_empty(&mm->context.gmap_list))
- __tlb_flush_asce(mm, (unsigned long) mm->pgd |
- mm->context.asce_bits);
+ __tlb_flush_asce(mm, mm->context.asce);
else
__tlb_flush_full(mm);
}
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index c7b0451397d6..2489b2e917c8 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -89,7 +89,8 @@ void __init paging_init(void)
asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
pgd_type = _REGION3_ENTRY_EMPTY;
}
- S390_lowcore.kernel_asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
+ init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
+ S390_lowcore.kernel_asce = init_mm.context.asce;
clear_table((unsigned long *) init_mm.pgd, pgd_type,
sizeof(unsigned long)*2048);
vmem_map_init();
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 45c4daa49930..89cf09e5f168 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -174,7 +174,7 @@ int s390_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
if (!(flags & MAP_FIXED))
addr = 0;
if ((addr + len) >= TASK_SIZE)
- return crst_table_upgrade(current->mm, TASK_MAX_SIZE);
+ return crst_table_upgrade(current->mm);
return 0;
}
@@ -191,7 +191,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
return area;
if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
/* Upgrade the page table to 4 levels and retry. */
- rc = crst_table_upgrade(mm, TASK_MAX_SIZE);
+ rc = crst_table_upgrade(mm);
if (rc)
return (unsigned long) rc;
area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
@@ -213,7 +213,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
return area;
if (area == -ENOMEM && !is_compat_task() && TASK_SIZE < TASK_MAX_SIZE) {
/* Upgrade the page table to 4 levels and retry. */
- rc = crst_table_upgrade(mm, TASK_MAX_SIZE);
+ rc = crst_table_upgrade(mm);
if (rc)
return (unsigned long) rc;
area = arch_get_unmapped_area_topdown(filp, addr, len,
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index f6c3de26cda8..e8b5962ac12a 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -76,81 +76,52 @@ static void __crst_table_upgrade(void *arg)
__tlb_flush_local();
}
-int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
+int crst_table_upgrade(struct mm_struct *mm)
{
unsigned long *table, *pgd;
- unsigned long entry;
- int flush;
- BUG_ON(limit > TASK_MAX_SIZE);
- flush = 0;
-repeat:
+ /* upgrade should only happen from 3 to 4 levels */
+ BUG_ON(mm->context.asce_limit != (1UL << 42));
+
table = crst_table_alloc(mm);
if (!table)
return -ENOMEM;
+
spin_lock_bh(&mm->page_table_lock);
- if (mm->context.asce_limit < limit) {
- pgd = (unsigned long *) mm->pgd;
- if (mm->context.asce_limit <= (1UL << 31)) {
- entry = _REGION3_ENTRY_EMPTY;
- mm->context.asce_limit = 1UL << 42;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_REGION3;
- } else {
- entry = _REGION2_ENTRY_EMPTY;
- mm->context.asce_limit = 1UL << 53;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_REGION2;
- }
- crst_table_init(table, entry);
- pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
- mm->pgd = (pgd_t *) table;
- mm->task_size = mm->context.asce_limit;
- table = NULL;
- flush = 1;
- }
+ pgd = (unsigned long *) mm->pgd;
+ crst_table_init(table, _REGION2_ENTRY_EMPTY);
+ pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
+ mm->pgd = (pgd_t *) table;
+ mm->context.asce_limit = 1UL << 53;
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
+ mm->task_size = mm->context.asce_limit;
spin_unlock_bh(&mm->page_table_lock);
- if (table)
- crst_table_free(mm, table);
- if (mm->context.asce_limit < limit)
- goto repeat;
- if (flush)
- on_each_cpu(__crst_table_upgrade, mm, 0);
+
+ on_each_cpu(__crst_table_upgrade, mm, 0);
return 0;
}
-void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
+void crst_table_downgrade(struct mm_struct *mm)
{
pgd_t *pgd;
+ /* downgrade should only happen from 3 to 2 levels (compat only) */
+ BUG_ON(mm->context.asce_limit != (1UL << 42));
+
if (current->active_mm == mm) {
clear_user_asce();
__tlb_flush_mm(mm);
}
- while (mm->context.asce_limit > limit) {
- pgd = mm->pgd;
- switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
- case _REGION_ENTRY_TYPE_R2:
- mm->context.asce_limit = 1UL << 42;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_REGION3;
- break;
- case _REGION_ENTRY_TYPE_R3:
- mm->context.asce_limit = 1UL << 31;
- mm->context.asce_bits = _ASCE_TABLE_LENGTH |
- _ASCE_USER_BITS |
- _ASCE_TYPE_SEGMENT;
- break;
- default:
- BUG();
- }
- mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
- mm->task_size = mm->context.asce_limit;
- crst_table_free(mm, (unsigned long *) pgd);
- }
+
+ pgd = mm->pgd;
+ mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
+ mm->context.asce_limit = 1UL << 31;
+ mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
+ _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
+ mm->task_size = mm->context.asce_limit;
+ crst_table_free(mm, (unsigned long *) pgd);
+
if (current->active_mm == mm)
set_user_asce(mm);
}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index e595e89eac65..1ea8c07eab84 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -457,7 +457,7 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
zdev->dma_table = dma_alloc_cpu_table();
if (!zdev->dma_table) {
rc = -ENOMEM;
- goto out_clean;
+ goto out;
}
/*
@@ -477,18 +477,22 @@ int zpci_dma_init_device(struct zpci_dev *zdev)
zdev->iommu_bitmap = vzalloc(zdev->iommu_pages / 8);
if (!zdev->iommu_bitmap) {
rc = -ENOMEM;
- goto out_reg;
+ goto free_dma_table;
}
rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
(u64) zdev->dma_table);
if (rc)
- goto out_reg;
- return 0;
+ goto free_bitmap;
-out_reg:
+ return 0;
+free_bitmap:
+ vfree(zdev->iommu_bitmap);
+ zdev->iommu_bitmap = NULL;
+free_dma_table:
dma_free_cpu_table(zdev->dma_table);
-out_clean:
+ zdev->dma_table = NULL;
+out:
return rc;
}
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 86a9bec18dab..bd3e8421b57c 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -115,7 +115,7 @@ static __initconst const u64 amd_hw_cache_event_ids
/*
* AMD Performance Monitor K7 and later.
*/
-static const u64 amd_perfmon_event_map[] =
+static const u64 amd_perfmon_event_map[PERF_COUNT_HW_MAX] =
{
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 68fa55b4d42e..aff79884e17d 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3639,6 +3639,7 @@ __init int intel_pmu_init(void)
case 78: /* 14nm Skylake Mobile */
case 94: /* 14nm Skylake Desktop */
+ case 85: /* 14nm Skylake Server */
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 6c3b7c1780c9..1ca5d1e7d4f2 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -63,7 +63,7 @@ static enum {
#define LBR_PLM (LBR_KERNEL | LBR_USER)
-#define LBR_SEL_MASK 0x1ff /* valid bits in LBR_SELECT */
+#define LBR_SEL_MASK 0x3ff /* valid bits in LBR_SELECT */
#define LBR_NOT_SUPP -1 /* LBR filter not supported */
#define LBR_IGN 0 /* ignored */
@@ -610,8 +610,10 @@ static int intel_pmu_setup_hw_lbr_filter(struct perf_event *event)
* The first 9 bits (LBR_SEL_MASK) in LBR_SELECT operate
* in suppress mode. So LBR_SELECT should be set to
* (~mask & LBR_SEL_MASK) | (mask & ~LBR_SEL_MASK)
+ * But the 10th bit LBR_CALL_STACK does not operate
+ * in suppress mode.
*/
- reg->config = mask ^ x86_pmu.lbr_sel_mask;
+ reg->config = mask ^ (x86_pmu.lbr_sel_mask & ~LBR_CALL_STACK);
if ((br_type & PERF_SAMPLE_BRANCH_NO_CYCLES) &&
(br_type & PERF_SAMPLE_BRANCH_NO_FLAGS) &&
diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c
index 6af7cf71d6b2..09a77dbc73c9 100644
--- a/arch/x86/events/intel/pt.c
+++ b/arch/x86/events/intel/pt.c
@@ -136,9 +136,21 @@ static int __init pt_pmu_hw_init(void)
struct dev_ext_attribute *de_attrs;
struct attribute **attrs;
size_t size;
+ u64 reg;
int ret;
long i;
+ if (boot_cpu_has(X86_FEATURE_VMX)) {
+ /*
+ * Intel SDM, 36.5 "Tracing post-VMXON" says that
+ * "IA32_VMX_MISC[bit 14]" being 1 means PT can trace
+ * post-VMXON.
+ */
+ rdmsrl(MSR_IA32_VMX_MISC, reg);
+ if (reg & BIT(14))
+ pt_pmu.vmx = true;
+ }
+
attrs = NULL;
for (i = 0; i < PT_CPUID_LEAVES; i++) {
@@ -269,20 +281,23 @@ static void pt_config(struct perf_event *event)
reg |= (event->attr.config & PT_CONFIG_MASK);
+ event->hw.config = reg;
wrmsrl(MSR_IA32_RTIT_CTL, reg);
}
-static void pt_config_start(bool start)
+static void pt_config_stop(struct perf_event *event)
{
- u64 ctl;
+ u64 ctl = READ_ONCE(event->hw.config);
+
+ /* may be already stopped by a PMI */
+ if (!(ctl & RTIT_CTL_TRACEEN))
+ return;
- rdmsrl(MSR_IA32_RTIT_CTL, ctl);
- if (start)
- ctl |= RTIT_CTL_TRACEEN;
- else
- ctl &= ~RTIT_CTL_TRACEEN;
+ ctl &= ~RTIT_CTL_TRACEEN;
wrmsrl(MSR_IA32_RTIT_CTL, ctl);
+ WRITE_ONCE(event->hw.config, ctl);
+
/*
* A wrmsr that disables trace generation serializes other PT
* registers and causes all data packets to be written to memory,
@@ -291,8 +306,7 @@ static void pt_config_start(bool start)
* The below WMB, separating data store and aux_head store matches
* the consumer's RMB that separates aux_head load and data load.
*/
- if (!start)
- wmb();
+ wmb();
}
static void pt_config_buffer(void *buf, unsigned int topa_idx,
@@ -942,11 +956,17 @@ void intel_pt_interrupt(void)
if (!ACCESS_ONCE(pt->handle_nmi))
return;
- pt_config_start(false);
+ /*
+ * If VMX is on and PT does not support it, don't touch anything.
+ */
+ if (READ_ONCE(pt->vmx_on))
+ return;
if (!event)
return;
+ pt_config_stop(event);
+
buf = perf_get_aux(&pt->handle);
if (!buf)
return;
@@ -983,6 +1003,35 @@ void intel_pt_interrupt(void)
}
}
+void intel_pt_handle_vmx(int on)
+{
+ struct pt *pt = this_cpu_ptr(&pt_ctx);
+ struct perf_event *event;
+ unsigned long flags;
+
+ /* PT plays nice with VMX, do nothing */
+ if (pt_pmu.vmx)
+ return;
+
+ /*
+ * VMXON will clear RTIT_CTL.TraceEn; we need to make
+ * sure to not try to set it while VMX is on. Disable
+ * interrupts to avoid racing with pmu callbacks;
+ * concurrent PMI should be handled fine.
+ */
+ local_irq_save(flags);
+ WRITE_ONCE(pt->vmx_on, on);
+
+ if (on) {
+ /* prevent pt_config_stop() from writing RTIT_CTL */
+ event = pt->handle.event;
+ if (event)
+ event->hw.config = 0;
+ }
+ local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(intel_pt_handle_vmx);
+
/*
* PMU callbacks
*/
@@ -992,6 +1041,9 @@ static void pt_event_start(struct perf_event *event, int mode)
struct pt *pt = this_cpu_ptr(&pt_ctx);
struct pt_buffer *buf = perf_get_aux(&pt->handle);
+ if (READ_ONCE(pt->vmx_on))
+ return;
+
if (!buf || pt_buffer_is_full(buf, pt)) {
event->hw.state = PERF_HES_STOPPED;
return;
@@ -1014,7 +1066,8 @@ static void pt_event_stop(struct perf_event *event, int mode)
* see comment in intel_pt_interrupt().
*/
ACCESS_ONCE(pt->handle_nmi) = 0;
- pt_config_start(false);
+
+ pt_config_stop(event);
if (event->hw.state == PERF_HES_STOPPED)
return;
diff --git a/arch/x86/events/intel/pt.h b/arch/x86/events/intel/pt.h
index 336878a5d205..3abb5f5cccc8 100644
--- a/arch/x86/events/intel/pt.h
+++ b/arch/x86/events/intel/pt.h
@@ -65,6 +65,7 @@ enum pt_capabilities {
struct pt_pmu {
struct pmu pmu;
u32 caps[PT_CPUID_REGS_NUM * PT_CPUID_LEAVES];
+ bool vmx;
};
/**
@@ -107,10 +108,12 @@ struct pt_buffer {
* struct pt - per-cpu pt context
* @handle: perf output handle
* @handle_nmi: do handle PT PMI on this cpu, there's an active event
+ * @vmx_on: 1 if VMX is ON on this cpu
*/
struct pt {
struct perf_output_handle handle;
int handle_nmi;
+ int vmx_on;
};
#endif /* __INTEL_PT_H__ */
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index 70c93f9b03ac..1705c9d75e44 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -718,6 +718,7 @@ static int __init rapl_pmu_init(void)
break;
case 60: /* Haswell */
case 69: /* Haswell-Celeron */
+ case 70: /* Haswell GT3e */
case 61: /* Broadwell */
case 71: /* Broadwell-H */
rapl_cntr_mask = RAPL_IDX_HSW;
diff --git a/arch/x86/include/asm/perf_event.h b/arch/x86/include/asm/perf_event.h
index 5a2ed3ed2f26..f353061bba1d 100644
--- a/arch/x86/include/asm/perf_event.h
+++ b/arch/x86/include/asm/perf_event.h
@@ -285,6 +285,10 @@ static inline void perf_events_lapic_init(void) { }
static inline void perf_check_microcode(void) { }
#endif
+#ifdef CONFIG_CPU_SUP_INTEL
+ extern void intel_pt_handle_vmx(int on);
+#endif
+
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
extern void amd_pmu_enable_virt(void);
extern void amd_pmu_disable_virt(void);
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index ad59d70bcb1a..ef495511f019 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -256,7 +256,8 @@ static void clear_irq_vector(int irq, struct apic_chip_data *data)
struct irq_desc *desc;
int cpu, vector;
- BUG_ON(!data->cfg.vector);
+ if (!data->cfg.vector)
+ return;
vector = data->cfg.vector;
for_each_cpu_and(cpu, data->domain, cpu_online_mask)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 54cdbd2003fe..af1112980dd4 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -389,12 +389,6 @@ default_entry:
/* Make changes effective */
wrmsr
- /*
- * And make sure that all the mappings we set up have NX set from
- * the beginning.
- */
- orl $(1 << (_PAGE_BIT_NX - 32)), pa(__supported_pte_mask + 4)
-
enable_paging:
/*
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ee1c8a93871c..133679d520af 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3103,6 +3103,8 @@ static __init int vmx_disabled_by_bios(void)
static void kvm_cpu_vmxon(u64 addr)
{
+ intel_pt_handle_vmx(1);
+
asm volatile (ASM_VMX_VMXON_RAX
: : "a"(&addr), "m"(addr)
: "memory", "cc");
@@ -3172,6 +3174,8 @@ static void vmclear_local_loaded_vmcss(void)
static void kvm_cpu_vmxoff(void)
{
asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc");
+
+ intel_pt_handle_vmx(0);
}
static void hardware_disable(void)
diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
index 8bea84724a7d..f65a33f505b6 100644
--- a/arch/x86/mm/setup_nx.c
+++ b/arch/x86/mm/setup_nx.c
@@ -32,8 +32,9 @@ early_param("noexec", noexec_setup);
void x86_configure_nx(void)
{
- /* If disable_nx is set, clear NX on all new mappings going forward. */
- if (disable_nx)
+ if (boot_cpu_has(X86_FEATURE_NX) && !disable_nx)
+ __supported_pte_mask |= _PAGE_NX;
+ else
__supported_pte_mask &= ~_PAGE_NX;
}
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index 9e2ba5c6e1dd..f42e78de1e10 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -27,6 +27,12 @@ static bool xen_pvspin = true;
static void xen_qlock_kick(int cpu)
{
+ int irq = per_cpu(lock_kicker_irq, cpu);
+
+ /* Don't kick if the target's kicker interrupt is not initialized. */
+ if (irq == -1)
+ return;
+
xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
}
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 94a1843b0426..0ede6d7e2568 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -538,7 +538,6 @@ static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
u8 *order, u64 *snap_size);
static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
u64 *snap_features);
-static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
static int rbd_open(struct block_device *bdev, fmode_t mode)
{
@@ -3127,9 +3126,6 @@ static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
struct rbd_device *rbd_dev = (struct rbd_device *)data;
int ret;
- if (!rbd_dev)
- return;
-
dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
rbd_dev->header_name, (unsigned long long)notify_id,
(unsigned int)opcode);
@@ -3263,6 +3259,9 @@ static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
ceph_osdc_cancel_event(rbd_dev->watch_event);
rbd_dev->watch_event = NULL;
+
+ dout("%s flushing notifies\n", __func__);
+ ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
}
/*
@@ -3642,21 +3641,14 @@ static void rbd_exists_validate(struct rbd_device *rbd_dev)
static void rbd_dev_update_size(struct rbd_device *rbd_dev)
{
sector_t size;
- bool removing;
/*
- * Don't hold the lock while doing disk operations,
- * or lock ordering will conflict with the bdev mutex via:
- * rbd_add() -> blkdev_get() -> rbd_open()
+ * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
+ * try to update its size. If REMOVING is set, updating size
+ * is just useless work since the device can't be opened.
*/
- spin_lock_irq(&rbd_dev->lock);
- removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
- spin_unlock_irq(&rbd_dev->lock);
- /*
- * If the device is being removed, rbd_dev->disk has
- * been destroyed, so don't try to update its size
- */
- if (!removing) {
+ if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
+ !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
dout("setting size to %llu sectors", (unsigned long long)size);
set_capacity(rbd_dev->disk, size);
@@ -4191,7 +4183,7 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
__le64 features;
__le64 incompat;
} __attribute__ ((packed)) features_buf = { 0 };
- u64 incompat;
+ u64 unsup;
int ret;
ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
@@ -4204,9 +4196,12 @@ static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
if (ret < sizeof (features_buf))
return -ERANGE;
- incompat = le64_to_cpu(features_buf.incompat);
- if (incompat & ~RBD_FEATURES_SUPPORTED)
+ unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
+ if (unsup) {
+ rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
+ unsup);
return -ENXIO;
+ }
*snap_features = le64_to_cpu(features_buf.features);
@@ -5187,6 +5182,10 @@ out_err:
return ret;
}
+/*
+ * rbd_dev->header_rwsem must be locked for write and will be unlocked
+ * upon return.
+ */
static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
{
int ret;
@@ -5195,7 +5194,7 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
ret = rbd_dev_id_get(rbd_dev);
if (ret)
- return ret;
+ goto err_out_unlock;
BUILD_BUG_ON(DEV_NAME_LEN
< sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
@@ -5236,8 +5235,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
/* Everything's ready. Announce the disk to the world. */
set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
- add_disk(rbd_dev->disk);
+ up_write(&rbd_dev->header_rwsem);
+ add_disk(rbd_dev->disk);
pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
(unsigned long long) rbd_dev->mapping.size);
@@ -5252,6 +5252,8 @@ err_out_blkdev:
unregister_blkdev(rbd_dev->major, rbd_dev->name);
err_out_id:
rbd_dev_id_put(rbd_dev);
+err_out_unlock:
+ up_write(&rbd_dev->header_rwsem);
return ret;
}
@@ -5442,6 +5444,7 @@ static ssize_t do_rbd_add(struct bus_type *bus,
spec = NULL; /* rbd_dev now owns this */
rbd_opts = NULL; /* rbd_dev now owns this */
+ down_write(&rbd_dev->header_rwsem);
rc = rbd_dev_image_probe(rbd_dev, 0);
if (rc < 0)
goto err_out_rbd_dev;
@@ -5471,6 +5474,7 @@ out:
return rc;
err_out_rbd_dev:
+ up_write(&rbd_dev->header_rwsem);
rbd_dev_destroy(rbd_dev);
err_out_client:
rbd_put_client(rbdc);
@@ -5577,12 +5581,6 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
return ret;
rbd_dev_header_unwatch_sync(rbd_dev);
- /*
- * flush remaining watch callbacks - these must be complete
- * before the osd_client is shutdown
- */
- dout("%s: flushing notifies", __func__);
- ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
/*
* Don't free anything from rbd_dev->disk until after all
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 10a5cfeae8c5..5f1147fa9239 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -193,12 +193,8 @@ unsigned int dbs_update(struct cpufreq_policy *policy)
wall_time = cur_wall_time - j_cdbs->prev_cpu_wall;
j_cdbs->prev_cpu_wall = cur_wall_time;
- if (cur_idle_time <= j_cdbs->prev_cpu_idle) {
- idle_time = 0;
- } else {
- idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
- j_cdbs->prev_cpu_idle = cur_idle_time;
- }
+ idle_time = cur_idle_time - j_cdbs->prev_cpu_idle;
+ j_cdbs->prev_cpu_idle = cur_idle_time;
if (ignore_nice) {
u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 30fe323c4551..f502d5b90c25 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -813,6 +813,11 @@ static int core_get_max_pstate(void)
if (err)
goto skip_tar;
+ /* For level 1 and 2, bits[23:16] contain the ratio */
+ if (tdp_ctrl)
+ tdp_ratio >>= 16;
+
+ tdp_ratio &= 0xff; /* ratios are only 8 bits long */
if (tdp_ratio - 1 == tar) {
max_pstate = tar;
pr_debug("max_pstate=TAC %x\n", max_pstate);
diff --git a/drivers/crypto/talitos.c b/drivers/crypto/talitos.c
index a0d4a08313ae..aae05547b924 100644
--- a/drivers/crypto/talitos.c
+++ b/drivers/crypto/talitos.c
@@ -63,6 +63,14 @@ static void to_talitos_ptr(struct talitos_ptr *ptr, dma_addr_t dma_addr,
ptr->eptr = upper_32_bits(dma_addr);
}
+static void copy_talitos_ptr(struct talitos_ptr *dst_ptr,
+ struct talitos_ptr *src_ptr, bool is_sec1)
+{
+ dst_ptr->ptr = src_ptr->ptr;
+ if (!is_sec1)
+ dst_ptr->eptr = src_ptr->eptr;
+}
+
static void to_talitos_ptr_len(struct talitos_ptr *ptr, unsigned int len,
bool is_sec1)
{
@@ -1083,21 +1091,20 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
sg_count = dma_map_sg(dev, areq->src, edesc->src_nents ?: 1,
(areq->src == areq->dst) ? DMA_BIDIRECTIONAL
: DMA_TO_DEVICE);
-
/* hmac data */
desc->ptr[1].len = cpu_to_be16(areq->assoclen);
if (sg_count > 1 &&
(ret = sg_to_link_tbl_offset(areq->src, sg_count, 0,
areq->assoclen,
&edesc->link_tbl[tbl_off])) > 1) {
- tbl_off += ret;
-
to_talitos_ptr(&desc->ptr[1], edesc->dma_link_tbl + tbl_off *
sizeof(struct talitos_ptr), 0);
desc->ptr[1].j_extent = DESC_PTR_LNKTBL_JUMP;
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len, DMA_BIDIRECTIONAL);
+
+ tbl_off += ret;
} else {
to_talitos_ptr(&desc->ptr[1], sg_dma_address(areq->src), 0);
desc->ptr[1].j_extent = 0;
@@ -1126,11 +1133,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
if (edesc->desc.hdr & DESC_HDR_MODE1_MDEU_CICV)
sg_link_tbl_len += authsize;
- if (sg_count > 1 &&
- (ret = sg_to_link_tbl_offset(areq->src, sg_count, areq->assoclen,
- sg_link_tbl_len,
- &edesc->link_tbl[tbl_off])) > 1) {
- tbl_off += ret;
+ if (sg_count == 1) {
+ to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src) +
+ areq->assoclen, 0);
+ } else if ((ret = sg_to_link_tbl_offset(areq->src, sg_count,
+ areq->assoclen, sg_link_tbl_len,
+ &edesc->link_tbl[tbl_off])) >
+ 1) {
desc->ptr[4].j_extent |= DESC_PTR_LNKTBL_JUMP;
to_talitos_ptr(&desc->ptr[4], edesc->dma_link_tbl +
tbl_off *
@@ -1138,8 +1147,10 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
dma_sync_single_for_device(dev, edesc->dma_link_tbl,
edesc->dma_len,
DMA_BIDIRECTIONAL);
- } else
- to_talitos_ptr(&desc->ptr[4], sg_dma_address(areq->src), 0);
+ tbl_off += ret;
+ } else {
+ copy_talitos_ptr(&desc->ptr[4], &edesc->link_tbl[tbl_off], 0);
+ }
/* cipher out */
desc->ptr[5].len = cpu_to_be16(cryptlen);
@@ -1151,11 +1162,13 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->icv_ool = false;
- if (sg_count > 1 &&
- (sg_count = sg_to_link_tbl_offset(areq->dst, sg_count,
+ if (sg_count == 1) {
+ to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst) +
+ areq->assoclen, 0);
+ } else if ((sg_count =
+ sg_to_link_tbl_offset(areq->dst, sg_count,
areq->assoclen, cryptlen,
- &edesc->link_tbl[tbl_off])) >
- 1) {
+ &edesc->link_tbl[tbl_off])) > 1) {
struct talitos_ptr *tbl_ptr = &edesc->link_tbl[tbl_off];
to_talitos_ptr(&desc->ptr[5], edesc->dma_link_tbl +
@@ -1178,8 +1191,9 @@ static int ipsec_esp(struct talitos_edesc *edesc, struct aead_request *areq,
edesc->dma_len, DMA_BIDIRECTIONAL);
edesc->icv_ool = true;
- } else
- to_talitos_ptr(&desc->ptr[5], sg_dma_address(areq->dst), 0);
+ } else {
+ copy_talitos_ptr(&desc->ptr[5], &edesc->link_tbl[tbl_off], 0);
+ }
/* iv out */
map_single_talitos_ptr(dev, &desc->ptr[6], ivsize, ctx->iv,
@@ -2629,21 +2643,11 @@ struct talitos_crypto_alg {
struct talitos_alg_template algt;
};
-static int talitos_cra_init(struct crypto_tfm *tfm)
+static int talitos_init_common(struct talitos_ctx *ctx,
+ struct talitos_crypto_alg *talitos_alg)
{
- struct crypto_alg *alg = tfm->__crt_alg;
- struct talitos_crypto_alg *talitos_alg;
- struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
struct talitos_private *priv;
- if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
- talitos_alg = container_of(__crypto_ahash_alg(alg),
- struct talitos_crypto_alg,
- algt.alg.hash);
- else
- talitos_alg = container_of(alg, struct talitos_crypto_alg,
- algt.alg.crypto);
-
/* update context with ptr to dev */
ctx->dev = talitos_alg->dev;
@@ -2661,10 +2665,33 @@ static int talitos_cra_init(struct crypto_tfm *tfm)
return 0;
}
+static int talitos_cra_init(struct crypto_tfm *tfm)
+{
+ struct crypto_alg *alg = tfm->__crt_alg;
+ struct talitos_crypto_alg *talitos_alg;
+ struct talitos_ctx *ctx = crypto_tfm_ctx(tfm);
+
+ if ((alg->cra_flags & CRYPTO_ALG_TYPE_MASK) == CRYPTO_ALG_TYPE_AHASH)
+ talitos_alg = container_of(__crypto_ahash_alg(alg),
+ struct talitos_crypto_alg,
+ algt.alg.hash);
+ else
+ talitos_alg = container_of(alg, struct talitos_crypto_alg,
+ algt.alg.crypto);
+
+ return talitos_init_common(ctx, talitos_alg);
+}
+
static int talitos_cra_init_aead(struct crypto_aead *tfm)
{
- talitos_cra_init(crypto_aead_tfm(tfm));
- return 0;
+ struct aead_alg *alg = crypto_aead_alg(tfm);
+ struct talitos_crypto_alg *talitos_alg;
+ struct talitos_ctx *ctx = crypto_aead_ctx(tfm);
+
+ talitos_alg = container_of(alg, struct talitos_crypto_alg,
+ algt.alg.aead);
+
+ return talitos_init_common(ctx, talitos_alg);
}
static int talitos_cra_init_ahash(struct crypto_tfm *tfm)
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 01087a38da22..792bdae2b91d 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1866,7 +1866,7 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
i7_dev = get_i7core_dev(mce->socketid);
if (!i7_dev)
- return NOTIFY_BAD;
+ return NOTIFY_DONE;
mci = i7_dev->mci;
pvt = mci->pvt_info;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 468447aff8eb..8bf745d2da7e 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -3168,7 +3168,7 @@ static int sbridge_mce_check_error(struct notifier_block *nb, unsigned long val,
mci = get_mci_for_node_id(mce->socketid);
if (!mci)
- return NOTIFY_BAD;
+ return NOTIFY_DONE;
pvt = mci->pvt_info;
/*
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 0ac594c0a234..34b741940494 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -202,29 +202,44 @@ static const struct variable_validate variable_validate[] = {
{ NULL_GUID, "", NULL },
};
+/*
+ * Check if @var_name matches the pattern given in @match_name.
+ *
+ * @var_name: an array of @len non-NUL characters.
+ * @match_name: a NUL-terminated pattern string, optionally ending in "*". A
+ * final "*" character matches any trailing characters @var_name,
+ * including the case when there are none left in @var_name.
+ * @match: on output, the number of non-wildcard characters in @match_name
+ * that @var_name matches, regardless of the return value.
+ * @return: whether @var_name fully matches @match_name.
+ */
static bool
variable_matches(const char *var_name, size_t len, const char *match_name,
int *match)
{
for (*match = 0; ; (*match)++) {
char c = match_name[*match];
- char u = var_name[*match];
- /* Wildcard in the matching name means we've matched */
- if (c == '*')
+ switch (c) {
+ case '*':
+ /* Wildcard in @match_name means we've matched. */
return true;
- /* Case sensitive match */
- if (!c && *match == len)
- return true;
+ case '\0':
+ /* @match_name has ended. Has @var_name too? */
+ return (*match == len);
- if (c != u)
+ default:
+ /*
+ * We've reached a non-wildcard char in @match_name.
+ * Continue only if there's an identical character in
+ * @var_name.
+ */
+ if (*match < len && c == var_name[*match])
+ continue;
return false;
-
- if (!c)
- return true;
+ }
}
- return true;
}
bool
diff --git a/drivers/firmware/psci.c b/drivers/firmware/psci.c
index 11bfee8b79a9..b5d05807e6ec 100644
--- a/drivers/firmware/psci.c
+++ b/drivers/firmware/psci.c
@@ -360,7 +360,7 @@ static struct cpuidle_ops psci_cpuidle_ops __initdata = {
.init = psci_dt_cpu_init_idle,
};
-CPUIDLE_METHOD_OF_DECLARE(psci, "arm,psci", &psci_cpuidle_ops);
+CPUIDLE_METHOD_OF_DECLARE(psci, "psci", &psci_cpuidle_ops);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index 0020a0ea43ff..35a1248aaa77 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -63,10 +63,6 @@ bool amdgpu_has_atpx(void) {
return amdgpu_atpx_priv.atpx_detected;
}
-bool amdgpu_has_atpx_dgpu_power_cntl(void) {
- return amdgpu_atpx_priv.atpx.functions.power_cntl;
-}
-
/**
* amdgpu_atpx_call - call an ATPX method
*
@@ -146,6 +142,13 @@ static void amdgpu_atpx_parse_functions(struct amdgpu_atpx_functions *f, u32 mas
*/
static int amdgpu_atpx_validate(struct amdgpu_atpx *atpx)
{
+ /* make sure required functions are enabled */
+ /* dGPU power control is required */
+ if (atpx->functions.power_cntl == false) {
+ printk("ATPX dGPU power cntl not present, forcing\n");
+ atpx->functions.power_cntl = true;
+ }
+
if (atpx->functions.px_params) {
union acpi_object *info;
struct atpx_px_params output;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 612117478b57..2139da773da6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -62,12 +62,6 @@ static const char *amdgpu_asic_name[] = {
"LAST",
};
-#if defined(CONFIG_VGA_SWITCHEROO)
-bool amdgpu_has_atpx_dgpu_power_cntl(void);
-#else
-static inline bool amdgpu_has_atpx_dgpu_power_cntl(void) { return false; }
-#endif
-
bool amdgpu_device_is_px(struct drm_device *dev)
{
struct amdgpu_device *adev = dev->dev_private;
@@ -1485,7 +1479,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
if (amdgpu_runtime_pm == 1)
runtime = true;
- if (amdgpu_device_is_px(ddev) && amdgpu_has_atpx_dgpu_power_cntl())
+ if (amdgpu_device_is_px(ddev))
runtime = true;
vga_switcheroo_register_client(adev->pdev, &amdgpu_switcheroo_ops, runtime);
if (runtime)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 05b0353d3880..a4a2e6cc61bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -910,7 +910,10 @@ static int gmc_v7_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+ return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ else
+ return 0;
}
static int gmc_v7_0_sw_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 02deb3229405..7a9db2c72c89 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -870,7 +870,10 @@ static int gmc_v8_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+ return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ else
+ return 0;
}
#define mmMC_SEQ_MISC0_FIJI 0xA71
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index e17fbdaf874b..71ea0521ea96 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1796,6 +1796,11 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
req_payload.start_slot = cur_slots;
if (mgr->proposed_vcpis[i]) {
port = container_of(mgr->proposed_vcpis[i], struct drm_dp_mst_port, vcpi);
+ port = drm_dp_get_validated_port_ref(mgr, port);
+ if (!port) {
+ mutex_unlock(&mgr->payload_lock);
+ return -EINVAL;
+ }
req_payload.num_slots = mgr->proposed_vcpis[i]->num_slots;
req_payload.vcpi = mgr->proposed_vcpis[i]->vcpi;
} else {
@@ -1823,6 +1828,9 @@ int drm_dp_update_payload_part1(struct drm_dp_mst_topology_mgr *mgr)
mgr->payloads[i].payload_state = req_payload.payload_state;
}
cur_slots += req_payload.num_slots;
+
+ if (port)
+ drm_dp_put_port(port);
}
for (i = 0; i < mgr->max_payloads; i++) {
@@ -2128,6 +2136,8 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
if (mgr->mst_primary) {
int sret;
+ u8 guid[16];
+
sret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, mgr->dpcd, DP_RECEIVER_CAP_SIZE);
if (sret != DP_RECEIVER_CAP_SIZE) {
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
@@ -2142,6 +2152,16 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr)
ret = -1;
goto out_unlock;
}
+
+ /* Some hubs forget their guids after they resume */
+ sret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16);
+ if (sret != 16) {
+ DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
+ ret = -1;
+ goto out_unlock;
+ }
+ drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+
ret = 0;
} else
ret = -1;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 09198d0b5814..306dde18a94a 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -572,6 +572,24 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
goto fail;
}
+ /*
+ * Set the GPU linear window to be at the end of the DMA window, where
+ * the CMA area is likely to reside. This ensures that we are able to
+ * map the command buffers while having the linear window overlap as
+ * much RAM as possible, so we can optimize mappings for other buffers.
+ *
+ * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
+ * to different views of the memory on the individual engines.
+ */
+ if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
+ (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
+ u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
+ if (dma_mask < PHYS_OFFSET + SZ_2G)
+ gpu->memory_base = PHYS_OFFSET;
+ else
+ gpu->memory_base = dma_mask - SZ_2G + 1;
+ }
+
ret = etnaviv_hw_reset(gpu);
if (ret)
goto fail;
@@ -1566,7 +1584,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct etnaviv_gpu *gpu;
- u32 dma_mask;
int err = 0;
gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
@@ -1576,18 +1593,6 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
gpu->dev = &pdev->dev;
mutex_init(&gpu->lock);
- /*
- * Set the GPU linear window to be at the end of the DMA window, where
- * the CMA area is likely to reside. This ensures that we are able to
- * map the command buffers while having the linear window overlap as
- * much RAM as possible, so we can optimize mappings for other buffers.
- */
- dma_mask = (u32)dma_get_required_mask(dev);
- if (dma_mask < PHYS_OFFSET + SZ_2G)
- gpu->memory_base = PHYS_OFFSET;
- else
- gpu->memory_base = dma_mask - SZ_2G + 1;
-
/* Map registers: */
gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
if (IS_ERR(gpu->mmio))
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 76c4bdf21b20..34f7a29d9366 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2608,10 +2608,152 @@ static void evergreen_agp_enable(struct radeon_device *rdev)
WREG32(VM_CONTEXT1_CNTL, 0);
}
+static const unsigned ni_dig_offsets[] =
+{
+ NI_DIG0_REGISTER_OFFSET,
+ NI_DIG1_REGISTER_OFFSET,
+ NI_DIG2_REGISTER_OFFSET,
+ NI_DIG3_REGISTER_OFFSET,
+ NI_DIG4_REGISTER_OFFSET,
+ NI_DIG5_REGISTER_OFFSET
+};
+
+static const unsigned ni_tx_offsets[] =
+{
+ NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1,
+ NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1
+};
+
+static const unsigned evergreen_dp_offsets[] =
+{
+ EVERGREEN_DP0_REGISTER_OFFSET,
+ EVERGREEN_DP1_REGISTER_OFFSET,
+ EVERGREEN_DP2_REGISTER_OFFSET,
+ EVERGREEN_DP3_REGISTER_OFFSET,
+ EVERGREEN_DP4_REGISTER_OFFSET,
+ EVERGREEN_DP5_REGISTER_OFFSET
+};
+
+
+/*
+ * Assumption is that EVERGREEN_CRTC_MASTER_EN enable for requested crtc
+ * We go from crtc to connector and it is not relible since it
+ * should be an opposite direction .If crtc is enable then
+ * find the dig_fe which selects this crtc and insure that it enable.
+ * if such dig_fe is found then find dig_be which selects found dig_be and
+ * insure that it enable and in DP_SST mode.
+ * if UNIPHY_PLL_CONTROL1.enable then we should disconnect timing
+ * from dp symbols clocks .
+ */
+static bool evergreen_is_dp_sst_stream_enabled(struct radeon_device *rdev,
+ unsigned crtc_id, unsigned *ret_dig_fe)
+{
+ unsigned i;
+ unsigned dig_fe;
+ unsigned dig_be;
+ unsigned dig_en_be;
+ unsigned uniphy_pll;
+ unsigned digs_fe_selected;
+ unsigned dig_be_mode;
+ unsigned dig_fe_mask;
+ bool is_enabled = false;
+ bool found_crtc = false;
+
+ /* loop through all running dig_fe to find selected crtc */
+ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+ dig_fe = RREG32(NI_DIG_FE_CNTL + ni_dig_offsets[i]);
+ if (dig_fe & NI_DIG_FE_CNTL_SYMCLK_FE_ON &&
+ crtc_id == NI_DIG_FE_CNTL_SOURCE_SELECT(dig_fe)) {
+ /* found running pipe */
+ found_crtc = true;
+ dig_fe_mask = 1 << i;
+ dig_fe = i;
+ break;
+ }
+ }
+
+ if (found_crtc) {
+ /* loop through all running dig_be to find selected dig_fe */
+ for (i = 0; i < ARRAY_SIZE(ni_dig_offsets); i++) {
+ dig_be = RREG32(NI_DIG_BE_CNTL + ni_dig_offsets[i]);
+ /* if dig_fe_selected by dig_be? */
+ digs_fe_selected = NI_DIG_BE_CNTL_FE_SOURCE_SELECT(dig_be);
+ dig_be_mode = NI_DIG_FE_CNTL_MODE(dig_be);
+ if (dig_fe_mask & digs_fe_selected &&
+ /* if dig_be in sst mode? */
+ dig_be_mode == NI_DIG_BE_DPSST) {
+ dig_en_be = RREG32(NI_DIG_BE_EN_CNTL +
+ ni_dig_offsets[i]);
+ uniphy_pll = RREG32(NI_DCIO_UNIPHY0_PLL_CONTROL1 +
+ ni_tx_offsets[i]);
+ /* dig_be enable and tx is running */
+ if (dig_en_be & NI_DIG_BE_EN_CNTL_ENABLE &&
+ dig_en_be & NI_DIG_BE_EN_CNTL_SYMBCLK_ON &&
+ uniphy_pll & NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE) {
+ is_enabled = true;
+ *ret_dig_fe = dig_fe;
+ break;
+ }
+ }
+ }
+ }
+
+ return is_enabled;
+}
+
+/*
+ * Blank dig when in dp sst mode
+ * Dig ignores crtc timing
+ */
+static void evergreen_blank_dp_output(struct radeon_device *rdev,
+ unsigned dig_fe)
+{
+ unsigned stream_ctrl;
+ unsigned fifo_ctrl;
+ unsigned counter = 0;
+
+ if (dig_fe >= ARRAY_SIZE(evergreen_dp_offsets)) {
+ DRM_ERROR("invalid dig_fe %d\n", dig_fe);
+ return;
+ }
+
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ if (!(stream_ctrl & EVERGREEN_DP_VID_STREAM_CNTL_ENABLE)) {
+ DRM_ERROR("dig %d , should be enable\n", dig_fe);
+ return;
+ }
+
+ stream_ctrl &=~EVERGREEN_DP_VID_STREAM_CNTL_ENABLE;
+ WREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe], stream_ctrl);
+
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ while (counter < 32 && stream_ctrl & EVERGREEN_DP_VID_STREAM_STATUS) {
+ msleep(1);
+ counter++;
+ stream_ctrl = RREG32(EVERGREEN_DP_VID_STREAM_CNTL +
+ evergreen_dp_offsets[dig_fe]);
+ }
+ if (counter >= 32 )
+ DRM_ERROR("counter exceeds %d\n", counter);
+
+ fifo_ctrl = RREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe]);
+ fifo_ctrl |= EVERGREEN_DP_STEER_FIFO_RESET;
+ WREG32(EVERGREEN_DP_STEER_FIFO + evergreen_dp_offsets[dig_fe], fifo_ctrl);
+
+}
+
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
u32 crtc_enabled, tmp, frame_count, blackout;
int i, j;
+ unsigned dig_fe;
if (!ASIC_IS_NODCE(rdev)) {
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
@@ -2651,7 +2793,17 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
break;
udelay(1);
}
-
+ /*we should disable dig if it drives dp sst*/
+ /*but we are in radeon_device_init and the topology is unknown*/
+ /*and it is available after radeon_modeset_init*/
+ /*the following method radeon_atom_encoder_dpms_dig*/
+ /*does the job if we initialize it properly*/
+ /*for now we do it this manually*/
+ /**/
+ if (ASIC_IS_DCE5(rdev) &&
+ evergreen_is_dp_sst_stream_enabled(rdev, i ,&dig_fe))
+ evergreen_blank_dp_output(rdev, dig_fe);
+ /*we could remove 6 lines below*/
/* XXX this is a hack to avoid strange behavior with EFI on certain systems */
WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index aa939dfed3a3..b436badf9efa 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -250,8 +250,43 @@
/* HDMI blocks at 0x7030, 0x7c30, 0x10830, 0x11430, 0x12030, 0x12c30 */
#define EVERGREEN_HDMI_BASE 0x7030
+/*DIG block*/
+#define NI_DIG0_REGISTER_OFFSET (0x7000 - 0x7000)
+#define NI_DIG1_REGISTER_OFFSET (0x7C00 - 0x7000)
+#define NI_DIG2_REGISTER_OFFSET (0x10800 - 0x7000)
+#define NI_DIG3_REGISTER_OFFSET (0x11400 - 0x7000)
+#define NI_DIG4_REGISTER_OFFSET (0x12000 - 0x7000)
+#define NI_DIG5_REGISTER_OFFSET (0x12C00 - 0x7000)
+
+
+#define NI_DIG_FE_CNTL 0x7000
+# define NI_DIG_FE_CNTL_SOURCE_SELECT(x) ((x) & 0x3)
+# define NI_DIG_FE_CNTL_SYMCLK_FE_ON (1<<24)
+
+
+#define NI_DIG_BE_CNTL 0x7140
+# define NI_DIG_BE_CNTL_FE_SOURCE_SELECT(x) (((x) >> 8 ) & 0x3F)
+# define NI_DIG_FE_CNTL_MODE(x) (((x) >> 16) & 0x7 )
+
+#define NI_DIG_BE_EN_CNTL 0x7144
+# define NI_DIG_BE_EN_CNTL_ENABLE (1 << 0)
+# define NI_DIG_BE_EN_CNTL_SYMBCLK_ON (1 << 8)
+# define NI_DIG_BE_DPSST 0
/* Display Port block */
+#define EVERGREEN_DP0_REGISTER_OFFSET (0x730C - 0x730C)
+#define EVERGREEN_DP1_REGISTER_OFFSET (0x7F0C - 0x730C)
+#define EVERGREEN_DP2_REGISTER_OFFSET (0x10B0C - 0x730C)
+#define EVERGREEN_DP3_REGISTER_OFFSET (0x1170C - 0x730C)
+#define EVERGREEN_DP4_REGISTER_OFFSET (0x1230C - 0x730C)
+#define EVERGREEN_DP5_REGISTER_OFFSET (0x12F0C - 0x730C)
+
+
+#define EVERGREEN_DP_VID_STREAM_CNTL 0x730C
+# define EVERGREEN_DP_VID_STREAM_CNTL_ENABLE (1 << 0)
+# define EVERGREEN_DP_VID_STREAM_STATUS (1 <<16)
+#define EVERGREEN_DP_STEER_FIFO 0x7310
+# define EVERGREEN_DP_STEER_FIFO_RESET (1 << 0)
#define EVERGREEN_DP_SEC_CNTL 0x7280
# define EVERGREEN_DP_SEC_STREAM_ENABLE (1 << 0)
# define EVERGREEN_DP_SEC_ASP_ENABLE (1 << 4)
@@ -266,4 +301,15 @@
# define EVERGREEN_DP_SEC_N_BASE_MULTIPLE(x) (((x) & 0xf) << 24)
# define EVERGREEN_DP_SEC_SS_EN (1 << 28)
+/*DCIO_UNIPHY block*/
+#define NI_DCIO_UNIPHY0_UNIPHY_TX_CONTROL1 (0x6600 -0x6600)
+#define NI_DCIO_UNIPHY1_UNIPHY_TX_CONTROL1 (0x6640 -0x6600)
+#define NI_DCIO_UNIPHY2_UNIPHY_TX_CONTROL1 (0x6680 - 0x6600)
+#define NI_DCIO_UNIPHY3_UNIPHY_TX_CONTROL1 (0x66C0 - 0x6600)
+#define NI_DCIO_UNIPHY4_UNIPHY_TX_CONTROL1 (0x6700 - 0x6600)
+#define NI_DCIO_UNIPHY5_UNIPHY_TX_CONTROL1 (0x6740 - 0x6600)
+
+#define NI_DCIO_UNIPHY0_PLL_CONTROL1 0x6618
+# define NI_DCIO_UNIPHY0_PLL_CONTROL1_ENABLE (1 << 0)
+
#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 4cbf26555093..e3daafa1be13 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -230,22 +230,13 @@ EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
void ttm_bo_move_to_lru_tail(struct ttm_buffer_object *bo)
{
- struct ttm_bo_device *bdev = bo->bdev;
- struct ttm_mem_type_manager *man;
+ int put_count = 0;
lockdep_assert_held(&bo->resv->lock.base);
- if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
- list_del_init(&bo->swap);
- list_del_init(&bo->lru);
-
- } else {
- if (bo->ttm && !(bo->ttm->page_flags & TTM_PAGE_FLAG_SG))
- list_move_tail(&bo->swap, &bo->glob->swap_lru);
-
- man = &bdev->man[bo->mem.mem_type];
- list_move_tail(&bo->lru, &man->lru);
- }
+ put_count = ttm_bo_del_from_lru(bo);
+ ttm_bo_list_ref_sub(bo, put_count, true);
+ ttm_bo_add_to_lru(bo);
}
EXPORT_SYMBOL(ttm_bo_move_to_lru_tail);
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 4854dac87e24..5fd1fd06effc 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -267,11 +267,23 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
return 0;
}
+static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ if (crtc->state->event)
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+}
+
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
.enable = virtio_gpu_crtc_enable,
.disable = virtio_gpu_crtc_disable,
.mode_set_nofb = virtio_gpu_crtc_mode_set_nofb,
.atomic_check = virtio_gpu_crtc_atomic_check,
+ .atomic_flush = virtio_gpu_crtc_atomic_flush,
};
static void virtio_gpu_enc_mode_set(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 723ba16c6084..1a1a87cbf109 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -3293,19 +3293,19 @@ static const struct vmw_cmd_entry vmw_cmd_entries[SVGA_3D_CMD_MAX] = {
&vmw_cmd_dx_cid_check, true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_DEFINE_QUERY, &vmw_cmd_dx_define_query,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_ok,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_DESTROY_QUERY, &vmw_cmd_dx_cid_check,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_BIND_QUERY, &vmw_cmd_dx_bind_query,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_QUERY_OFFSET,
- &vmw_cmd_ok, true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_ok,
+ &vmw_cmd_dx_cid_check, true, false, true),
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_BEGIN_QUERY, &vmw_cmd_dx_cid_check,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_ok,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_END_QUERY, &vmw_cmd_dx_cid_check,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_READBACK_QUERY, &vmw_cmd_invalid,
true, false, true),
- VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_invalid,
+ VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_PREDICATION, &vmw_cmd_dx_cid_check,
true, false, true),
VMW_CMD_DEF(SVGA_3D_CMD_DX_SET_VIEWPORTS, &vmw_cmd_dx_cid_check,
true, false, true),
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index 3b1faf7862a5..679a4cb98ee3 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -573,9 +573,9 @@ static int vmw_fb_set_par(struct fb_info *info)
mode = old_mode;
old_mode = NULL;
} else if (!vmw_kms_validate_mode_vram(vmw_priv,
- mode->hdisplay *
- (var->bits_per_pixel + 7) / 8,
- mode->vdisplay)) {
+ mode->hdisplay *
+ DIV_ROUND_UP(var->bits_per_pixel, 8),
+ mode->vdisplay)) {
drm_mode_destroy(vmw_priv->dev, mode);
return -EINVAL;
}
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index faa8e6821fea..0967e1a5b3a2 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -975,10 +975,10 @@ config I2C_XLR
config I2C_XLP9XX
tristate "XLP9XX I2C support"
- depends on CPU_XLP || COMPILE_TEST
+ depends on CPU_XLP || ARCH_VULCAN || COMPILE_TEST
help
This driver enables support for the on-chip I2C interface of
- the Broadcom XLP9xx/XLP5xx MIPS processors.
+ the Broadcom XLP9xx/XLP5xx MIPS and Vulcan ARM64 processors.
This driver can also be built as a module. If so, the module will
be called i2c-xlp9xx.
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index 714bdc837769..b167ab25310a 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -116,8 +116,8 @@ struct cpm_i2c {
cbd_t __iomem *rbase;
u_char *txbuf[CPM_MAXBD];
u_char *rxbuf[CPM_MAXBD];
- u32 txdma[CPM_MAXBD];
- u32 rxdma[CPM_MAXBD];
+ dma_addr_t txdma[CPM_MAXBD];
+ dma_addr_t rxdma[CPM_MAXBD];
};
static irqreturn_t cpm_i2c_interrupt(int irq, void *dev_id)
diff --git a/drivers/i2c/busses/i2c-exynos5.c b/drivers/i2c/busses/i2c-exynos5.c
index b29c7500461a..f54ece8fce78 100644
--- a/drivers/i2c/busses/i2c-exynos5.c
+++ b/drivers/i2c/busses/i2c-exynos5.c
@@ -671,7 +671,9 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
return -EIO;
}
- clk_prepare_enable(i2c->clk);
+ ret = clk_enable(i2c->clk);
+ if (ret)
+ return ret;
for (i = 0; i < num; i++, msgs++) {
stop = (i == num - 1);
@@ -695,7 +697,7 @@ static int exynos5_i2c_xfer(struct i2c_adapter *adap,
}
out:
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
return ret;
}
@@ -747,7 +749,9 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
return -ENOENT;
}
- clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ return ret;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
@@ -799,6 +803,10 @@ static int exynos5_i2c_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, i2c);
+ clk_disable(i2c->clk);
+
+ return 0;
+
err_clk:
clk_disable_unprepare(i2c->clk);
return ret;
@@ -810,6 +818,8 @@ static int exynos5_i2c_remove(struct platform_device *pdev)
i2c_del_adapter(&i2c->adap);
+ clk_unprepare(i2c->clk);
+
return 0;
}
@@ -821,6 +831,8 @@ static int exynos5_i2c_suspend_noirq(struct device *dev)
i2c->suspended = 1;
+ clk_unprepare(i2c->clk);
+
return 0;
}
@@ -830,7 +842,9 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
struct exynos5_i2c *i2c = platform_get_drvdata(pdev);
int ret = 0;
- clk_prepare_enable(i2c->clk);
+ ret = clk_prepare_enable(i2c->clk);
+ if (ret)
+ return ret;
ret = exynos5_hsi2c_clock_setup(i2c);
if (ret) {
@@ -839,7 +853,7 @@ static int exynos5_i2c_resume_noirq(struct device *dev)
}
exynos5_i2c_init(i2c);
- clk_disable_unprepare(i2c->clk);
+ clk_disable(i2c->clk);
i2c->suspended = 0;
return 0;
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 7ba795b24e75..1c8707710098 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -75,6 +75,7 @@
/* PCI DIDs for the Intel SMBus Message Transport (SMT) Devices */
#define PCI_DEVICE_ID_INTEL_S1200_SMT0 0x0c59
#define PCI_DEVICE_ID_INTEL_S1200_SMT1 0x0c5a
+#define PCI_DEVICE_ID_INTEL_DNV_SMT 0x19ac
#define PCI_DEVICE_ID_INTEL_AVOTON_SMT 0x1f15
#define ISMT_DESC_ENTRIES 2 /* number of descriptor entries */
@@ -180,6 +181,7 @@ struct ismt_priv {
static const struct pci_device_id ismt_ids[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_S1200_SMT1) },
+ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMT) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_AVOTON_SMT) },
{ 0, }
};
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
index 9096d17beb5b..3dcc5f3f26cb 100644
--- a/drivers/i2c/busses/i2c-rk3x.c
+++ b/drivers/i2c/busses/i2c-rk3x.c
@@ -855,6 +855,7 @@ static struct rk3x_i2c_soc_data soc_data[3] = {
static const struct of_device_id rk3x_i2c_match[] = {
{ .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] },
{ .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] },
+ { .compatible = "rockchip,rk3228-i2c", .data = (void *)&soc_data[2] },
{ .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] },
{},
};
diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
index cb00d59da456..c2e257d97eff 100644
--- a/drivers/infiniband/core/cache.c
+++ b/drivers/infiniband/core/cache.c
@@ -691,7 +691,8 @@ void ib_cache_gid_set_default_gid(struct ib_device *ib_dev, u8 port,
NULL);
/* Coudn't find default GID location */
- WARN_ON(ix < 0);
+ if (WARN_ON(ix < 0))
+ goto release;
zattr_type.gid_type = gid_type;
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 4a9aa0433b07..7713ef089c3c 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -48,6 +48,7 @@
#include <asm/uaccess.h>
+#include <rdma/ib.h>
#include <rdma/ib_cm.h>
#include <rdma/ib_user_cm.h>
#include <rdma/ib_marshall.h>
@@ -1103,6 +1104,9 @@ static ssize_t ib_ucm_write(struct file *filp, const char __user *buf,
struct ib_ucm_cmd_hdr hdr;
ssize_t result;
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ return -EACCES;
+
if (len < sizeof(hdr))
return -EINVAL;
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index dd3bcceadfde..c0f3826abb30 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -1574,6 +1574,9 @@ static ssize_t ucma_write(struct file *filp, const char __user *buf,
struct rdma_ucm_cmd_hdr hdr;
ssize_t ret;
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ return -EACCES;
+
if (len < sizeof(hdr))
return -EINVAL;
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 28ba2cc81535..31f422a70623 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -48,6 +48,8 @@
#include <asm/uaccess.h>
+#include <rdma/ib.h>
+
#include "uverbs.h"
MODULE_AUTHOR("Roland Dreier");
@@ -709,6 +711,9 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
int srcu_key;
ssize_t ret;
+ if (WARN_ON_ONCE(!ib_safe_file_access(filp)))
+ return -EACCES;
+
if (count < sizeof hdr)
return -EINVAL;
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 15b8adbf39c0..b65b3541e732 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1860,6 +1860,7 @@ EXPORT_SYMBOL(ib_drain_rq);
void ib_drain_qp(struct ib_qp *qp)
{
ib_drain_sq(qp);
- ib_drain_rq(qp);
+ if (!qp->srq)
+ ib_drain_rq(qp);
}
EXPORT_SYMBOL(ib_drain_qp);
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 42a7b8952d13..3234a8be16f6 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1390,6 +1390,8 @@ int iwch_register_device(struct iwch_dev *dev)
dev->ibdev.iwcm->add_ref = iwch_qp_add_ref;
dev->ibdev.iwcm->rem_ref = iwch_qp_rem_ref;
dev->ibdev.iwcm->get_qp = iwch_get_qp;
+ memcpy(dev->ibdev.iwcm->ifname, dev->rdev.t3cdev_p->lldev->name,
+ sizeof(dev->ibdev.iwcm->ifname));
ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index b4eeb783573c..b0b955724458 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -162,7 +162,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
cq->bar2_va = c4iw_bar2_addrs(rdev, cq->cqid, T4_BAR2_QTYPE_INGRESS,
&cq->bar2_qid,
user ? &cq->bar2_pa : NULL);
- if (user && !cq->bar2_va) {
+ if (user && !cq->bar2_pa) {
pr_warn(MOD "%s: cqid %u not in BAR2 range.\n",
pci_name(rdev->lldi.pdev), cq->cqid);
ret = -EINVAL;
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 124682dc5709..7574f394fdac 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -580,6 +580,8 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.iwcm->add_ref = c4iw_qp_add_ref;
dev->ibdev.iwcm->rem_ref = c4iw_qp_rem_ref;
dev->ibdev.iwcm->get_qp = c4iw_get_qp;
+ memcpy(dev->ibdev.iwcm->ifname, dev->rdev.lldi.ports[0]->name,
+ sizeof(dev->ibdev.iwcm->ifname));
ret = ib_register_device(&dev->ibdev, NULL);
if (ret)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index e17fb5d5e033..e8993e49b8b3 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -185,6 +185,10 @@ void __iomem *c4iw_bar2_addrs(struct c4iw_rdev *rdev, unsigned int qid,
if (pbar2_pa)
*pbar2_pa = (rdev->bar2_pa + bar2_qoffset) & PAGE_MASK;
+
+ if (is_t4(rdev->lldi.adapter_type))
+ return NULL;
+
return rdev->bar2_kva + bar2_qoffset;
}
@@ -270,7 +274,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
/*
* User mode must have bar2 access.
*/
- if (user && (!wq->sq.bar2_va || !wq->rq.bar2_va)) {
+ if (user && (!wq->sq.bar2_pa || !wq->rq.bar2_pa)) {
pr_warn(MOD "%s: sqid %u or rqid %u not in BAR2 range.\n",
pci_name(rdev->lldi.pdev), wq->sq.qid, wq->rq.qid);
goto free_dma;
@@ -1895,13 +1899,27 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
void c4iw_drain_sq(struct ib_qp *ibqp)
{
struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+ unsigned long flag;
+ bool need_to_wait;
- wait_for_completion(&qp->sq_drained);
+ spin_lock_irqsave(&qp->lock, flag);
+ need_to_wait = !t4_sq_empty(&qp->wq);
+ spin_unlock_irqrestore(&qp->lock, flag);
+
+ if (need_to_wait)
+ wait_for_completion(&qp->sq_drained);
}
void c4iw_drain_rq(struct ib_qp *ibqp)
{
struct c4iw_qp *qp = to_c4iw_qp(ibqp);
+ unsigned long flag;
+ bool need_to_wait;
+
+ spin_lock_irqsave(&qp->lock, flag);
+ need_to_wait = !t4_rq_empty(&qp->wq);
+ spin_unlock_irqrestore(&qp->lock, flag);
- wait_for_completion(&qp->rq_drained);
+ if (need_to_wait)
+ wait_for_completion(&qp->rq_drained);
}
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index 5acf346e048e..6ad0489cb3c5 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -530,7 +530,7 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
sizeof(struct mlx5_wqe_ctrl_seg)) /
sizeof(struct mlx5_wqe_data_seg);
props->max_sge = min(max_rq_sg, max_sq_sg);
- props->max_sge_rd = props->max_sge;
+ props->max_sge_rd = MLX5_MAX_SGE_RD;
props->max_cq = 1 << MLX5_CAP_GEN(mdev, log_max_cq);
props->max_cqe = (1 << MLX5_CAP_GEN(mdev, log_max_cq_sz)) - 1;
props->max_mr = 1 << MLX5_CAP_GEN(mdev, log_max_mkey);
@@ -671,8 +671,8 @@ static int mlx5_query_hca_port(struct ib_device *ibdev, u8 port,
struct mlx5_ib_dev *dev = to_mdev(ibdev);
struct mlx5_core_dev *mdev = dev->mdev;
struct mlx5_hca_vport_context *rep;
- int max_mtu;
- int oper_mtu;
+ u16 max_mtu;
+ u16 oper_mtu;
int err;
u8 ib_link_width_oper;
u8 vl_hw_cap;
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 3ea9e055fdd3..92914539edc7 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -500,9 +500,6 @@ static int nes_netdev_start_xmit(struct sk_buff *skb, struct net_device *netdev)
* skb_shinfo(skb)->nr_frags, skb_is_gso(skb));
*/
- if (!netif_carrier_ok(netdev))
- return NETDEV_TX_OK;
-
if (netif_queue_stopped(netdev))
return NETDEV_TX_BUSY;
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index e449e394963f..24f4a782e0f4 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -45,6 +45,8 @@
#include <linux/export.h>
#include <linux/uio.h>
+#include <rdma/ib.h>
+
#include "qib.h"
#include "qib_common.h"
#include "qib_user_sdma.h"
@@ -2067,6 +2069,9 @@ static ssize_t qib_write(struct file *fp, const char __user *data,
ssize_t ret = 0;
void *dest;
+ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+ return -EACCES;
+
if (count < sizeof(cmd.type)) {
ret = -EINVAL;
goto bail;
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index bd82a6948dc8..a9e3bcc522c4 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -1637,9 +1637,9 @@ bail:
spin_unlock_irqrestore(&qp->s_hlock, flags);
if (nreq) {
if (call_send)
- rdi->driver_f.schedule_send_no_lock(qp);
- else
rdi->driver_f.do_send(qp);
+ else
+ rdi->driver_f.schedule_send_no_lock(qp);
}
return err;
}
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 12f5ebbd0436..ad2f3d27b266 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -1452,13 +1452,6 @@ static int usbvision_probe(struct usb_interface *intf,
printk(KERN_INFO "%s: %s found\n", __func__,
usbvision_device_data[model].model_string);
- /*
- * this is a security check.
- * an exploit using an incorrect bInterfaceNumber is known
- */
- if (ifnum >= USB_MAXINTERFACES || !dev->actconfig->interface[ifnum])
- return -ENODEV;
-
if (usbvision_device_data[model].interface >= 0)
interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0];
else if (ifnum < dev->actconfig->desc.bNumInterfaces)
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index 5d016f496e0e..9fbcb67a9ee6 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -1645,7 +1645,7 @@ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
* Will sleep if required for nonblocking == false.
*/
static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
- int nonblocking)
+ void *pb, int nonblocking)
{
unsigned long flags;
int ret;
@@ -1666,10 +1666,10 @@ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
/*
* Only remove the buffer from done_list if v4l2_buffer can handle all
* the planes.
- * Verifying planes is NOT necessary since it already has been checked
- * before the buffer is queued/prepared. So it can never fail.
*/
- list_del(&(*vb)->done_entry);
+ ret = call_bufop(q, verify_planes_array, *vb, pb);
+ if (!ret)
+ list_del(&(*vb)->done_entry);
spin_unlock_irqrestore(&q->done_lock, flags);
return ret;
@@ -1748,7 +1748,7 @@ int vb2_core_dqbuf(struct vb2_queue *q, unsigned int *pindex, void *pb,
struct vb2_buffer *vb = NULL;
int ret;
- ret = __vb2_get_done_vb(q, &vb, nonblocking);
+ ret = __vb2_get_done_vb(q, &vb, pb, nonblocking);
if (ret < 0)
return ret;
@@ -2298,6 +2298,16 @@ unsigned int vb2_core_poll(struct vb2_queue *q, struct file *file,
return POLLERR;
/*
+ * If this quirk is set and QBUF hasn't been called yet then
+ * return POLLERR as well. This only affects capture queues, output
+ * queues will always initialize waiting_for_buffers to false.
+ * This quirk is set by V4L2 for backwards compatibility reasons.
+ */
+ if (q->quirk_poll_must_check_waiting_for_buffers &&
+ q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
+ return POLLERR;
+
+ /*
* For output streams you can call write() as long as there are fewer
* buffers queued than there are buffers available.
*/
diff --git a/drivers/media/v4l2-core/videobuf2-memops.c b/drivers/media/v4l2-core/videobuf2-memops.c
index dbec5923fcf0..3c3b517f1d1c 100644
--- a/drivers/media/v4l2-core/videobuf2-memops.c
+++ b/drivers/media/v4l2-core/videobuf2-memops.c
@@ -49,7 +49,7 @@ struct frame_vector *vb2_create_framevec(unsigned long start,
vec = frame_vector_create(nr);
if (!vec)
return ERR_PTR(-ENOMEM);
- ret = get_vaddr_frames(start, nr, write, 1, vec);
+ ret = get_vaddr_frames(start & PAGE_MASK, nr, write, true, vec);
if (ret < 0)
goto out_destroy;
/* We accept only complete set of PFNs */
diff --git a/drivers/media/v4l2-core/videobuf2-v4l2.c b/drivers/media/v4l2-core/videobuf2-v4l2.c
index 91f552124050..7f366f1b0377 100644
--- a/drivers/media/v4l2-core/videobuf2-v4l2.c
+++ b/drivers/media/v4l2-core/videobuf2-v4l2.c
@@ -74,6 +74,11 @@ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer
return 0;
}
+static int __verify_planes_array_core(struct vb2_buffer *vb, const void *pb)
+{
+ return __verify_planes_array(vb, pb);
+}
+
/**
* __verify_length() - Verify that the bytesused value for each plane fits in
* the plane length and that the data offset doesn't exceed the bytesused value.
@@ -437,6 +442,7 @@ static int __fill_vb2_buffer(struct vb2_buffer *vb,
}
static const struct vb2_buf_ops v4l2_buf_ops = {
+ .verify_planes_array = __verify_planes_array_core,
.fill_user_buffer = __fill_v4l2_buffer,
.fill_vb2_buffer = __fill_vb2_buffer,
.copy_timestamp = __copy_timestamp,
@@ -765,6 +771,12 @@ int vb2_queue_init(struct vb2_queue *q)
q->is_output = V4L2_TYPE_IS_OUTPUT(q->type);
q->copy_timestamp = (q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK)
== V4L2_BUF_FLAG_TIMESTAMP_COPY;
+ /*
+ * For compatibility with vb1: if QBUF hasn't been called yet, then
+ * return POLLERR as well. This only affects capture queues, output
+ * queues will always initialize waiting_for_buffers to false.
+ */
+ q->quirk_poll_must_check_waiting_for_buffers = true;
return vb2_core_queue_init(q);
}
@@ -818,14 +830,6 @@ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
poll_wait(file, &fh->wait, wait);
}
- /*
- * For compatibility with vb1: if QBUF hasn't been called yet, then
- * return POLLERR as well. This only affects capture queues, output
- * queues will always initialize waiting_for_buffers to false.
- */
- if (q->waiting_for_buffers && (req_events & (POLLIN | POLLRDNORM)))
- return POLLERR;
-
return res | vb2_core_poll(q, file, wait);
}
EXPORT_SYMBOL_GPL(vb2_poll);
diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c
index 10370f280500..7edea9c19199 100644
--- a/drivers/misc/cxl/context.c
+++ b/drivers/misc/cxl/context.c
@@ -223,6 +223,13 @@ int __detach_context(struct cxl_context *ctx)
cxl_ops->link_ok(ctx->afu->adapter, ctx->afu));
flush_work(&ctx->fault_work); /* Only needed for dedicated process */
+ /*
+ * Wait until no further interrupts are presented by the PSL
+ * for this context.
+ */
+ if (cxl_ops->irq_wait)
+ cxl_ops->irq_wait(ctx);
+
/* release the reference to the group leader and mm handling pid */
put_pid(ctx->pid);
put_pid(ctx->glpid);
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 38e21cf7806e..73dc2a33da74 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -274,6 +274,7 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0};
#define CXL_PSL_DSISR_An_PE (1ull << (63-4)) /* PSL Error (implementation specific) */
#define CXL_PSL_DSISR_An_AE (1ull << (63-5)) /* AFU Error */
#define CXL_PSL_DSISR_An_OC (1ull << (63-6)) /* OS Context Warning */
+#define CXL_PSL_DSISR_PENDING (CXL_PSL_DSISR_TRANS | CXL_PSL_DSISR_An_PE | CXL_PSL_DSISR_An_AE | CXL_PSL_DSISR_An_OC)
/* NOTE: Bits 32:63 are undefined if DSISR[DS] = 1 */
#define CXL_PSL_DSISR_An_M DSISR_NOHPTE /* PTE not found */
#define CXL_PSL_DSISR_An_P DSISR_PROTFAULT /* Storage protection violation */
@@ -855,6 +856,7 @@ struct cxl_backend_ops {
u64 dsisr, u64 errstat);
irqreturn_t (*psl_interrupt)(int irq, void *data);
int (*ack_irq)(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
+ void (*irq_wait)(struct cxl_context *ctx);
int (*attach_process)(struct cxl_context *ctx, bool kernel,
u64 wed, u64 amr);
int (*detach_process)(struct cxl_context *ctx);
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index be646dc41a2c..8def4553acba 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -203,7 +203,6 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
void cxl_unmap_irq(unsigned int virq, void *cookie)
{
free_irq(virq, cookie);
- irq_dispose_mapping(virq);
}
int cxl_register_one_irq(struct cxl *adapter,
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 387fcbdf9793..ecf7557cd657 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -14,6 +14,7 @@
#include <linux/mutex.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
+#include <linux/delay.h>
#include <asm/synch.h>
#include <misc/cxl-base.h>
@@ -797,6 +798,35 @@ static irqreturn_t native_irq_multiplexed(int irq, void *data)
return fail_psl_irq(afu, &irq_info);
}
+void native_irq_wait(struct cxl_context *ctx)
+{
+ u64 dsisr;
+ int timeout = 1000;
+ int ph;
+
+ /*
+ * Wait until no further interrupts are presented by the PSL
+ * for this context.
+ */
+ while (timeout--) {
+ ph = cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) & 0xffff;
+ if (ph != ctx->pe)
+ return;
+ dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An);
+ if ((dsisr & CXL_PSL_DSISR_PENDING) == 0)
+ return;
+ /*
+ * We are waiting for the workqueue to process our
+ * irq, so need to let that run here.
+ */
+ msleep(1);
+ }
+
+ dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i"
+ " DSISR %016llx!\n", ph, dsisr);
+ return;
+}
+
static irqreturn_t native_slice_irq_err(int irq, void *data)
{
struct cxl_afu *afu = data;
@@ -1076,6 +1106,7 @@ const struct cxl_backend_ops cxl_native_ops = {
.handle_psl_slice_error = native_handle_psl_slice_error,
.psl_interrupt = NULL,
.ack_irq = native_ack_irq,
+ .irq_wait = native_irq_wait,
.attach_process = native_attach_process,
.detach_process = native_detach_process,
.support_attributes = native_support_attributes,
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 04feea8354cb..e657af0e95fa 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -97,6 +97,7 @@ config MMC_RICOH_MMC
config MMC_SDHCI_ACPI
tristate "SDHCI support for ACPI enumerated SDHCI controllers"
depends on MMC_SDHCI && ACPI
+ select IOSF_MBI if X86
help
This selects support for ACPI enumerated SDHCI controllers,
identified by ACPI Compatibility ID PNP0D40 or specific
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 6839e41c6d58..bed6a494f52c 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -41,6 +41,11 @@
#include <linux/mmc/pm.h>
#include <linux/mmc/slot-gpio.h>
+#ifdef CONFIG_X86
+#include <asm/cpu_device_id.h>
+#include <asm/iosf_mbi.h>
+#endif
+
#include "sdhci.h"
enum {
@@ -116,6 +121,75 @@ static const struct sdhci_acpi_chip sdhci_acpi_chip_int = {
.ops = &sdhci_acpi_ops_int,
};
+#ifdef CONFIG_X86
+
+static bool sdhci_acpi_byt(void)
+{
+ static const struct x86_cpu_id byt[] = {
+ { X86_VENDOR_INTEL, 6, 0x37 },
+ {}
+ };
+
+ return x86_match_cpu(byt);
+}
+
+#define BYT_IOSF_SCCEP 0x63
+#define BYT_IOSF_OCP_NETCTRL0 0x1078
+#define BYT_IOSF_OCP_TIMEOUT_BASE GENMASK(10, 8)
+
+static void sdhci_acpi_byt_setting(struct device *dev)
+{
+ u32 val = 0;
+
+ if (!sdhci_acpi_byt())
+ return;
+
+ if (iosf_mbi_read(BYT_IOSF_SCCEP, MBI_CR_READ, BYT_IOSF_OCP_NETCTRL0,
+ &val)) {
+ dev_err(dev, "%s read error\n", __func__);
+ return;
+ }
+
+ if (!(val & BYT_IOSF_OCP_TIMEOUT_BASE))
+ return;
+
+ val &= ~BYT_IOSF_OCP_TIMEOUT_BASE;
+
+ if (iosf_mbi_write(BYT_IOSF_SCCEP, MBI_CR_WRITE, BYT_IOSF_OCP_NETCTRL0,
+ val)) {
+ dev_err(dev, "%s write error\n", __func__);
+ return;
+ }
+
+ dev_dbg(dev, "%s completed\n", __func__);
+}
+
+static bool sdhci_acpi_byt_defer(struct device *dev)
+{
+ if (!sdhci_acpi_byt())
+ return false;
+
+ if (!iosf_mbi_available())
+ return true;
+
+ sdhci_acpi_byt_setting(dev);
+
+ return false;
+}
+
+#else
+
+static inline void sdhci_acpi_byt_setting(struct device *dev)
+{
+}
+
+static inline bool sdhci_acpi_byt_defer(struct device *dev)
+{
+ return false;
+}
+
+#endif
+
static int bxt_get_cd(struct mmc_host *mmc)
{
int gpio_cd = mmc_gpio_get_cd(mmc);
@@ -322,6 +396,9 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
if (acpi_bus_get_status(device) || !device->status.present)
return -ENODEV;
+ if (sdhci_acpi_byt_defer(dev))
+ return -EPROBE_DEFER;
+
hid = acpi_device_hid(device);
uid = device->pnp.unique_id;
@@ -447,6 +524,8 @@ static int sdhci_acpi_resume(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ sdhci_acpi_byt_setting(&c->pdev->dev);
+
return sdhci_resume_host(c->host);
}
@@ -470,6 +549,8 @@ static int sdhci_acpi_runtime_resume(struct device *dev)
{
struct sdhci_acpi_host *c = dev_get_drvdata(dev);
+ sdhci_acpi_byt_setting(&c->pdev->dev);
+
return sdhci_runtime_resume_host(c->host);
}
diff --git a/drivers/mmc/host/sunxi-mmc.c b/drivers/mmc/host/sunxi-mmc.c
index 8372a413848c..7fc8b7aa83f0 100644
--- a/drivers/mmc/host/sunxi-mmc.c
+++ b/drivers/mmc/host/sunxi-mmc.c
@@ -1129,6 +1129,11 @@ static int sunxi_mmc_probe(struct platform_device *pdev)
MMC_CAP_1_8V_DDR |
MMC_CAP_ERASE | MMC_CAP_SDIO_IRQ;
+ /* TODO MMC DDR is not working on A80 */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "allwinner,sun9i-a80-mmc"))
+ mmc->caps &= ~MMC_CAP_1_8V_DDR;
+
ret = mmc_of_parse(mmc);
if (ret)
goto error_free_dma;
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index a24c18eee598..befd67df08e1 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -62,9 +62,8 @@ config DUMMY
this device is consigned into oblivion) with a configurable IP
address. It is most commonly used in order to make your currently
inactive SLIP address seem like a real address for local programs.
- If you use SLIP or PPP, you might want to say Y here. Since this
- thing often comes in handy, the default is Y. It won't enlarge your
- kernel either. What a deal. Read about it in the Network
+ If you use SLIP or PPP, you might want to say Y here. It won't
+ enlarge your kernel. What a deal. Read about it in the Network
Administrator's Guide, available from
<http://www.tldp.org/docs.html#guide>.
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index c0d7b7296236..a386f047c1af 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -405,7 +405,6 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
u32 packets = 0;
u32 bytes = 0;
int factor = priv->cqe_factor;
- u64 timestamp = 0;
int done = 0;
int budget = priv->tx_work_limit;
u32 last_nr_txbb;
@@ -445,9 +444,12 @@ static bool mlx4_en_process_tx_cq(struct net_device *dev,
new_index = be16_to_cpu(cqe->wqe_index) & size_mask;
do {
+ u64 timestamp = 0;
+
txbbs_skipped += last_nr_txbb;
ring_index = (ring_index + last_nr_txbb) & size_mask;
- if (ring->tx_info[ring_index].ts_requested)
+
+ if (unlikely(ring->tx_info[ring_index].ts_requested))
timestamp = mlx4_en_get_cqe_ts(cqe);
/* free next descriptor */
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index 879e6276c473..e80ce94b5dcf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -609,7 +609,7 @@ enum mlx5e_link_mode {
MLX5E_100GBASE_KR4 = 22,
MLX5E_100GBASE_LR4 = 23,
MLX5E_100BASE_TX = 24,
- MLX5E_100BASE_T = 25,
+ MLX5E_1000BASE_T = 25,
MLX5E_10GBASE_T = 26,
MLX5E_25GBASE_CR = 27,
MLX5E_25GBASE_KR = 28,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 68834b715f6c..3476ab844634 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -138,10 +138,10 @@ static const struct {
[MLX5E_100BASE_TX] = {
.speed = 100,
},
- [MLX5E_100BASE_T] = {
- .supported = SUPPORTED_100baseT_Full,
- .advertised = ADVERTISED_100baseT_Full,
- .speed = 100,
+ [MLX5E_1000BASE_T] = {
+ .supported = SUPPORTED_1000baseT_Full,
+ .advertised = ADVERTISED_1000baseT_Full,
+ .speed = 1000,
},
[MLX5E_10GBASE_T] = {
.supported = SUPPORTED_10000baseT_Full,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index e0adb604f461..67d548b70e14 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1404,24 +1404,50 @@ static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
return 0;
}
-static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
+static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
{
- struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- int hw_mtu;
+ u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
int err;
- err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
+ err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
if (err)
return err;
- mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+ /* Update vport context MTU */
+ mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
+ return 0;
+}
- if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
- netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
- __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
+static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
+{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ u16 hw_mtu = 0;
+ int err;
- netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
+ err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
+ if (err || !hw_mtu) /* fallback to port oper mtu */
+ mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
+
+ *mtu = MLX5E_HW2SW_MTU(hw_mtu);
+}
+
+static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
+{
+ struct mlx5e_priv *priv = netdev_priv(netdev);
+ u16 mtu;
+ int err;
+
+ err = mlx5e_set_mtu(priv, netdev->mtu);
+ if (err)
+ return err;
+
+ mlx5e_query_mtu(priv, &mtu);
+ if (mtu != netdev->mtu)
+ netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
+ __func__, mtu, netdev->mtu);
+
+ netdev->mtu = mtu;
return 0;
}
@@ -1999,22 +2025,27 @@ static int mlx5e_set_features(struct net_device *netdev,
return err;
}
+#define MXL5_HW_MIN_MTU 64
+#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
+
static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
bool was_opened;
- int max_mtu;
+ u16 max_mtu;
+ u16 min_mtu;
int err = 0;
mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
max_mtu = MLX5E_HW2SW_MTU(max_mtu);
+ min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
- if (new_mtu > max_mtu) {
+ if (new_mtu > max_mtu || new_mtu < min_mtu) {
netdev_err(netdev,
- "%s: Bad MTU (%d) > (%d) Max\n",
- __func__, new_mtu, max_mtu);
+ "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
+ __func__, new_mtu, min_mtu, max_mtu);
return -EINVAL;
}
@@ -2602,7 +2633,16 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
schedule_work(&priv->set_rx_mode_work);
mlx5e_disable_async_events(priv);
flush_scheduled_work();
- unregister_netdev(netdev);
+ if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
+ netif_device_detach(netdev);
+ mutex_lock(&priv->state_lock);
+ if (test_bit(MLX5E_STATE_OPENED, &priv->state))
+ mlx5e_close_locked(netdev);
+ mutex_unlock(&priv->state_lock);
+ } else {
+ unregister_netdev(netdev);
+ }
+
mlx5e_tc_cleanup(priv);
mlx5e_vxlan_cleanup(priv);
mlx5e_destroy_flow_tables(priv);
@@ -2615,7 +2655,9 @@ static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
- free_netdev(netdev);
+
+ if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
+ free_netdev(netdev);
}
static void *mlx5e_get_netdev(void *vpriv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 5121be4675d1..89cce97d46c6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1065,33 +1065,6 @@ unlock_fg:
return rule;
}
-static struct mlx5_flow_rule *add_rule_to_auto_fg(struct mlx5_flow_table *ft,
- u8 match_criteria_enable,
- u32 *match_criteria,
- u32 *match_value,
- u8 action,
- u32 flow_tag,
- struct mlx5_flow_destination *dest)
-{
- struct mlx5_flow_rule *rule;
- struct mlx5_flow_group *g;
-
- g = create_autogroup(ft, match_criteria_enable, match_criteria);
- if (IS_ERR(g))
- return (void *)g;
-
- rule = add_rule_fg(g, match_value,
- action, flow_tag, dest);
- if (IS_ERR(rule)) {
- /* Remove assumes refcount > 0 and autogroup creates a group
- * with a refcount = 0.
- */
- tree_get_node(&g->node);
- tree_remove_node(&g->node);
- }
- return rule;
-}
-
static struct mlx5_flow_rule *
_mlx5_add_flow_rule(struct mlx5_flow_table *ft,
u8 match_criteria_enable,
@@ -1119,8 +1092,23 @@ _mlx5_add_flow_rule(struct mlx5_flow_table *ft,
goto unlock;
}
- rule = add_rule_to_auto_fg(ft, match_criteria_enable, match_criteria,
- match_value, action, flow_tag, dest);
+ g = create_autogroup(ft, match_criteria_enable, match_criteria);
+ if (IS_ERR(g)) {
+ rule = (void *)g;
+ goto unlock;
+ }
+
+ rule = add_rule_fg(g, match_value,
+ action, flow_tag, dest);
+ if (IS_ERR(rule)) {
+ /* Remove assumes refcount > 0 and autogroup creates a group
+ * with a refcount = 0.
+ */
+ unlock_ref_node(&ft->node);
+ tree_get_node(&g->node);
+ tree_remove_node(&g->node);
+ return rule;
+ }
unlock:
unlock_ref_node(&ft->node);
return rule;
@@ -1288,7 +1276,7 @@ struct mlx5_flow_namespace *mlx5_get_flow_namespace(struct mlx5_core_dev *dev,
{
struct mlx5_flow_root_namespace *root_ns = dev->priv.root_ns;
int prio;
- static struct fs_prio *fs_prio;
+ struct fs_prio *fs_prio;
struct mlx5_flow_namespace *ns;
if (!root_ns)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3f3b2fae4991..6892746fd10d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -966,7 +966,7 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
int err;
mutex_lock(&dev->intf_state_mutex);
- if (dev->interface_state == MLX5_INTERFACE_STATE_UP) {
+ if (test_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is up, NOP\n",
__func__);
goto out;
@@ -1133,7 +1133,8 @@ static int mlx5_load_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
if (err)
pr_info("failed request module on %s\n", MLX5_IB_MOD);
- dev->interface_state = MLX5_INTERFACE_STATE_UP;
+ clear_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
+ set_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
out:
mutex_unlock(&dev->intf_state_mutex);
@@ -1207,7 +1208,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
}
mutex_lock(&dev->intf_state_mutex);
- if (dev->interface_state == MLX5_INTERFACE_STATE_DOWN) {
+ if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
dev_warn(&dev->pdev->dev, "%s: interface is down, NOP\n",
__func__);
goto out;
@@ -1241,7 +1242,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv)
mlx5_cmd_cleanup(dev);
out:
- dev->interface_state = MLX5_INTERFACE_STATE_DOWN;
+ clear_bit(MLX5_INTERFACE_STATE_UP, &dev->intf_state);
+ set_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state);
mutex_unlock(&dev->intf_state_mutex);
return err;
}
@@ -1452,6 +1454,18 @@ static const struct pci_error_handlers mlx5_err_handler = {
.resume = mlx5_pci_resume
};
+static void shutdown(struct pci_dev *pdev)
+{
+ struct mlx5_core_dev *dev = pci_get_drvdata(pdev);
+ struct mlx5_priv *priv = &dev->priv;
+
+ dev_info(&pdev->dev, "Shutdown was called\n");
+ /* Notify mlx5 clients that the kernel is being shut down */
+ set_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &dev->intf_state);
+ mlx5_unload_one(dev, priv);
+ mlx5_pci_disable_device(dev);
+}
+
static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1011) }, /* Connect-IB */
{ PCI_VDEVICE(MELLANOX, 0x1012), MLX5_PCI_DEV_IS_VF}, /* Connect-IB VF */
@@ -1459,6 +1473,8 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x1014), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4 VF */
{ PCI_VDEVICE(MELLANOX, 0x1015) }, /* ConnectX-4LX */
{ PCI_VDEVICE(MELLANOX, 0x1016), MLX5_PCI_DEV_IS_VF}, /* ConnectX-4LX VF */
+ { PCI_VDEVICE(MELLANOX, 0x1017) }, /* ConnectX-5 */
+ { PCI_VDEVICE(MELLANOX, 0x1018), MLX5_PCI_DEV_IS_VF}, /* ConnectX-5 VF */
{ 0, }
};
@@ -1469,6 +1485,7 @@ static struct pci_driver mlx5_core_driver = {
.id_table = mlx5_core_pci_table,
.probe = init_one,
.remove = remove_one,
+ .shutdown = shutdown,
.err_handler = &mlx5_err_handler,
.sriov_configure = mlx5_core_sriov_configure,
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index ae378c575deb..53cc1e2c693b 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -247,8 +247,8 @@ int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
}
EXPORT_SYMBOL_GPL(mlx5_query_port_admin_status);
-static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
- int *max_mtu, int *oper_mtu, u8 port)
+static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, u16 *admin_mtu,
+ u16 *max_mtu, u16 *oper_mtu, u8 port)
{
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -268,7 +268,7 @@ static void mlx5_query_port_mtu(struct mlx5_core_dev *dev, int *admin_mtu,
*admin_mtu = MLX5_GET(pmtu_reg, out, admin_mtu);
}
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port)
{
u32 in[MLX5_ST_SZ_DW(pmtu_reg)];
u32 out[MLX5_ST_SZ_DW(pmtu_reg)];
@@ -283,14 +283,14 @@ int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port)
}
EXPORT_SYMBOL_GPL(mlx5_set_port_mtu);
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu,
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu,
u8 port)
{
mlx5_query_port_mtu(dev, NULL, max_mtu, NULL, port);
}
EXPORT_SYMBOL_GPL(mlx5_query_port_max_mtu);
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
u8 port)
{
mlx5_query_port_mtu(dev, NULL, NULL, oper_mtu, port);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index bd518405859e..b69dadcfb897 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -196,6 +196,46 @@ int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *mdev,
}
EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mac_address);
+int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu)
+{
+ int outlen = MLX5_ST_SZ_BYTES(query_nic_vport_context_out);
+ u32 *out;
+ int err;
+
+ out = mlx5_vzalloc(outlen);
+ if (!out)
+ return -ENOMEM;
+
+ err = mlx5_query_nic_vport_context(mdev, 0, out, outlen);
+ if (!err)
+ *mtu = MLX5_GET(query_nic_vport_context_out, out,
+ nic_vport_context.mtu);
+
+ kvfree(out);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_mtu);
+
+int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu)
+{
+ int inlen = MLX5_ST_SZ_BYTES(modify_nic_vport_context_in);
+ void *in;
+ int err;
+
+ in = mlx5_vzalloc(inlen);
+ if (!in)
+ return -ENOMEM;
+
+ MLX5_SET(modify_nic_vport_context_in, in, field_select.mtu, 1);
+ MLX5_SET(modify_nic_vport_context_in, in, nic_vport_context.mtu, mtu);
+
+ err = mlx5_modify_nic_vport_context(mdev, in, inlen);
+
+ kvfree(in);
+ return err;
+}
+EXPORT_SYMBOL_GPL(mlx5_modify_nic_vport_mtu);
+
int mlx5_query_nic_vport_mac_list(struct mlx5_core_dev *dev,
u32 vport,
enum mlx5_list_type list_type,
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 55007f1e6bbc..caf6ddb7ea76 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -37,8 +37,8 @@
#define _QLCNIC_LINUX_MAJOR 5
#define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 63
-#define QLCNIC_LINUX_VERSIONID "5.3.63"
+#define _QLCNIC_LINUX_SUBVERSION 64
+#define QLCNIC_LINUX_VERSIONID "5.3.64"
#define QLCNIC_DRV_IDC_VER 0x01
#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
(_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
index 44022b1845ce..afb90d129cb6 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
@@ -49,7 +49,6 @@ struct socfpga_dwmac {
u32 reg_shift;
struct device *dev;
struct regmap *sys_mgr_base_addr;
- struct reset_control *stmmac_rst;
void __iomem *splitter_base;
bool f2h_ptp_ref_clk;
};
@@ -92,15 +91,6 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
struct device_node *np_splitter;
struct resource res_splitter;
- dwmac->stmmac_rst = devm_reset_control_get(dev,
- STMMAC_RESOURCE_NAME);
- if (IS_ERR(dwmac->stmmac_rst)) {
- dev_info(dev, "Could not get reset control!\n");
- if (PTR_ERR(dwmac->stmmac_rst) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- dwmac->stmmac_rst = NULL;
- }
-
dwmac->interface = of_get_phy_mode(np);
sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
@@ -194,30 +184,23 @@ static int socfpga_dwmac_setup(struct socfpga_dwmac *dwmac)
return 0;
}
-static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
-{
- struct socfpga_dwmac *dwmac = priv;
-
- /* On socfpga platform exit, assert and hold reset to the
- * enet controller - the default state after a hard reset.
- */
- if (dwmac->stmmac_rst)
- reset_control_assert(dwmac->stmmac_rst);
-}
-
static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
{
- struct socfpga_dwmac *dwmac = priv;
+ struct socfpga_dwmac *dwmac = priv;
struct net_device *ndev = platform_get_drvdata(pdev);
struct stmmac_priv *stpriv = NULL;
int ret = 0;
- if (ndev)
- stpriv = netdev_priv(ndev);
+ if (!ndev)
+ return -EINVAL;
+
+ stpriv = netdev_priv(ndev);
+ if (!stpriv)
+ return -EINVAL;
/* Assert reset to the enet controller before changing the phy mode */
- if (dwmac->stmmac_rst)
- reset_control_assert(dwmac->stmmac_rst);
+ if (stpriv->stmmac_rst)
+ reset_control_assert(stpriv->stmmac_rst);
/* Setup the phy mode in the system manager registers according to
* devicetree configuration
@@ -227,8 +210,8 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
/* Deassert reset for the phy configuration to be sampled by
* the enet controller, and operation to start in requested mode
*/
- if (dwmac->stmmac_rst)
- reset_control_deassert(dwmac->stmmac_rst);
+ if (stpriv->stmmac_rst)
+ reset_control_deassert(stpriv->stmmac_rst);
/* Before the enet controller is suspended, the phy is suspended.
* This causes the phy clock to be gated. The enet controller is
@@ -245,7 +228,7 @@ static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
* control register 0, and can be modified by the phy driver
* framework.
*/
- if (stpriv && stpriv->phydev)
+ if (stpriv->phydev)
phy_resume(stpriv->phydev);
return ret;
@@ -285,14 +268,13 @@ static int socfpga_dwmac_probe(struct platform_device *pdev)
plat_dat->bsp_priv = dwmac;
plat_dat->init = socfpga_dwmac_init;
- plat_dat->exit = socfpga_dwmac_exit;
plat_dat->fix_mac_speed = socfpga_dwmac_fix_mac_speed;
- ret = socfpga_dwmac_init(pdev, plat_dat->bsp_priv);
- if (ret)
- return ret;
+ ret = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ if (!ret)
+ ret = socfpga_dwmac_init(pdev, dwmac);
- return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
+ return ret;
}
static const struct of_device_id socfpga_dwmac_match[] = {
diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c
index 84d3e5ca8817..c6385617bfb2 100644
--- a/drivers/net/macsec.c
+++ b/drivers/net/macsec.c
@@ -880,12 +880,12 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
macsec_skb_cb(skb)->valid = false;
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
- return NULL;
+ return ERR_PTR(-ENOMEM);
req = aead_request_alloc(rx_sa->key.tfm, GFP_ATOMIC);
if (!req) {
kfree_skb(skb);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
hdr = (struct macsec_eth_header *)skb->data;
@@ -905,7 +905,7 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
skb = skb_unshare(skb, GFP_ATOMIC);
if (!skb) {
aead_request_free(req);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
} else {
/* integrity only: all headers + data authenticated */
@@ -921,14 +921,14 @@ static struct sk_buff *macsec_decrypt(struct sk_buff *skb,
dev_hold(dev);
ret = crypto_aead_decrypt(req);
if (ret == -EINPROGRESS) {
- return NULL;
+ return ERR_PTR(ret);
} else if (ret != 0) {
/* decryption/authentication failed
* 10.6 if validateFrames is disabled, deliver anyway
*/
if (ret != -EBADMSG) {
kfree_skb(skb);
- skb = NULL;
+ skb = ERR_PTR(ret);
}
} else {
macsec_skb_cb(skb)->valid = true;
@@ -1146,8 +1146,10 @@ static rx_handler_result_t macsec_handle_frame(struct sk_buff **pskb)
secy->validate_frames != MACSEC_VALIDATE_DISABLED)
skb = macsec_decrypt(skb, dev, rx_sa, sci, secy);
- if (!skb) {
- macsec_rxsa_put(rx_sa);
+ if (IS_ERR(skb)) {
+ /* the decrypt callback needs the reference */
+ if (PTR_ERR(skb) != -EINPROGRESS)
+ macsec_rxsa_put(rx_sa);
rcu_read_unlock();
*pskb = NULL;
return RX_HANDLER_CONSUMED;
@@ -1161,7 +1163,8 @@ deliver:
macsec_extra_len(macsec_skb_cb(skb)->has_sci));
macsec_reset_skb(skb, secy->netdev);
- macsec_rxsa_put(rx_sa);
+ if (rx_sa)
+ macsec_rxsa_put(rx_sa);
count_rx(dev, skb->len);
rcu_read_unlock();
@@ -1622,8 +1625,9 @@ static int macsec_add_rxsa(struct sk_buff *skb, struct genl_info *info)
}
rx_sa = kmalloc(sizeof(*rx_sa), GFP_KERNEL);
- if (init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]), secy->key_len,
- secy->icv_len)) {
+ if (!rx_sa || init_rx_sa(rx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
+ secy->key_len, secy->icv_len)) {
+ kfree(rx_sa);
rtnl_unlock();
return -ENOMEM;
}
@@ -1768,6 +1772,7 @@ static int macsec_add_txsa(struct sk_buff *skb, struct genl_info *info)
tx_sa = kmalloc(sizeof(*tx_sa), GFP_KERNEL);
if (!tx_sa || init_tx_sa(tx_sa, nla_data(tb_sa[MACSEC_SA_ATTR_KEY]),
secy->key_len, secy->icv_len)) {
+ kfree(tx_sa);
rtnl_unlock();
return -ENOMEM;
}
@@ -2227,7 +2232,8 @@ static int nla_put_secy(struct macsec_secy *secy, struct sk_buff *skb)
return 1;
if (nla_put_sci(skb, MACSEC_SECY_ATTR_SCI, secy->sci) ||
- nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE, DEFAULT_CIPHER_ID) ||
+ nla_put_u64(skb, MACSEC_SECY_ATTR_CIPHER_SUITE,
+ MACSEC_DEFAULT_CIPHER_ID) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_ICV_LEN, secy->icv_len) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_OPER, secy->operational) ||
nla_put_u8(skb, MACSEC_SECY_ATTR_PROTECT, secy->protect_frames) ||
@@ -2268,7 +2274,7 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
if (!hdr)
return -EMSGSIZE;
- rtnl_lock();
+ genl_dump_check_consistent(cb, hdr, &macsec_fam);
if (nla_put_u32(skb, MACSEC_ATTR_IFINDEX, dev->ifindex))
goto nla_put_failure;
@@ -2429,18 +2435,17 @@ static int dump_secy(struct macsec_secy *secy, struct net_device *dev,
nla_nest_end(skb, rxsc_list);
- rtnl_unlock();
-
genlmsg_end(skb, hdr);
return 0;
nla_put_failure:
- rtnl_unlock();
genlmsg_cancel(skb, hdr);
return -EMSGSIZE;
}
+static int macsec_generation = 1; /* protected by RTNL */
+
static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
@@ -2450,6 +2455,10 @@ static int macsec_dump_txsc(struct sk_buff *skb, struct netlink_callback *cb)
dev_idx = cb->args[0];
d = 0;
+ rtnl_lock();
+
+ cb->seq = macsec_generation;
+
for_each_netdev(net, dev) {
struct macsec_secy *secy;
@@ -2467,6 +2476,7 @@ next:
}
done:
+ rtnl_unlock();
cb->args[0] = d;
return skb->len;
}
@@ -2920,10 +2930,14 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head)
struct net_device *real_dev = macsec->real_dev;
struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev);
+ macsec_generation++;
+
unregister_netdevice_queue(dev, head);
list_del_rcu(&macsec->secys);
- if (list_empty(&rxd->secys))
+ if (list_empty(&rxd->secys)) {
netdev_rx_handler_unregister(real_dev);
+ kfree(rxd);
+ }
macsec_del_dev(macsec);
}
@@ -2945,8 +2959,10 @@ static int register_macsec_dev(struct net_device *real_dev,
err = netdev_rx_handler_register(real_dev, macsec_handle_frame,
rxd);
- if (err < 0)
+ if (err < 0) {
+ kfree(rxd);
return err;
+ }
}
list_add_tail_rcu(&macsec->secys, &rxd->secys);
@@ -3066,6 +3082,8 @@ static int macsec_newlink(struct net *net, struct net_device *dev,
if (err < 0)
goto del_dev;
+ macsec_generation++;
+
dev_hold(real_dev);
return 0;
@@ -3079,7 +3097,7 @@ unregister:
static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
{
- u64 csid = DEFAULT_CIPHER_ID;
+ u64 csid = MACSEC_DEFAULT_CIPHER_ID;
u8 icv_len = DEFAULT_ICV_LEN;
int flag;
bool es, scb, sci;
@@ -3094,8 +3112,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]);
switch (csid) {
- case DEFAULT_CIPHER_ID:
- case DEFAULT_CIPHER_ALT:
+ case MACSEC_DEFAULT_CIPHER_ID:
+ case MACSEC_DEFAULT_CIPHER_ALT:
if (icv_len < MACSEC_MIN_ICV_LEN ||
icv_len > MACSEC_MAX_ICV_LEN)
return -EINVAL;
@@ -3129,8 +3147,8 @@ static int macsec_validate_attr(struct nlattr *tb[], struct nlattr *data[])
nla_get_u8(data[IFLA_MACSEC_VALIDATION]) > MACSEC_VALIDATE_MAX)
return -EINVAL;
- if ((data[IFLA_MACSEC_PROTECT] &&
- nla_get_u8(data[IFLA_MACSEC_PROTECT])) &&
+ if ((data[IFLA_MACSEC_REPLAY_PROTECT] &&
+ nla_get_u8(data[IFLA_MACSEC_REPLAY_PROTECT])) &&
!data[IFLA_MACSEC_WINDOW])
return -EINVAL;
@@ -3168,7 +3186,8 @@ static int macsec_fill_info(struct sk_buff *skb,
if (nla_put_sci(skb, IFLA_MACSEC_SCI, secy->sci) ||
nla_put_u8(skb, IFLA_MACSEC_ICV_LEN, secy->icv_len) ||
- nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE, DEFAULT_CIPHER_ID) ||
+ nla_put_u64(skb, IFLA_MACSEC_CIPHER_SUITE,
+ MACSEC_DEFAULT_CIPHER_ID) ||
nla_put_u8(skb, IFLA_MACSEC_ENCODING_SA, tx_sc->encoding_sa) ||
nla_put_u8(skb, IFLA_MACSEC_ENCRYPT, tx_sc->encrypt) ||
nla_put_u8(skb, IFLA_MACSEC_PROTECT, secy->protect_frames) ||
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index df1f1a76a862..01e12d221a8b 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -135,7 +135,7 @@ MODULE_LICENSE("GPL");
/* Field definitions */
#define HCI_ACCEL_MASK 0x7fff
#define HCI_HOTKEY_DISABLE 0x0b
-#define HCI_HOTKEY_ENABLE 0x01
+#define HCI_HOTKEY_ENABLE 0x09
#define HCI_HOTKEY_SPECIAL_FUNCTIONS 0x10
#define HCI_LCD_BRIGHTNESS_BITS 3
#define HCI_LCD_BRIGHTNESS_SHIFT (16-HCI_LCD_BRIGHTNESS_BITS)
diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
index 5d4d91846357..96168b819044 100644
--- a/drivers/rapidio/devices/rio_mport_cdev.c
+++ b/drivers/rapidio/devices/rio_mport_cdev.c
@@ -2669,9 +2669,9 @@ static int __init mport_init(void)
/* Create device class needed by udev */
dev_class = class_create(THIS_MODULE, DRV_NAME);
- if (!dev_class) {
+ if (IS_ERR(dev_class)) {
rmcd_error("Unable to create " DRV_NAME " class");
- return -EINVAL;
+ return PTR_ERR(dev_class);
}
ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
diff --git a/drivers/s390/char/sclp_ctl.c b/drivers/s390/char/sclp_ctl.c
index 648cb86afd42..ea607a4a1bdd 100644
--- a/drivers/s390/char/sclp_ctl.c
+++ b/drivers/s390/char/sclp_ctl.c
@@ -56,6 +56,7 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
{
struct sclp_ctl_sccb ctl_sccb;
struct sccb_header *sccb;
+ unsigned long copied;
int rc;
if (copy_from_user(&ctl_sccb, user_area, sizeof(ctl_sccb)))
@@ -65,14 +66,15 @@ static int sclp_ctl_ioctl_sccb(void __user *user_area)
sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
- if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sizeof(*sccb))) {
+ copied = PAGE_SIZE -
+ copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), PAGE_SIZE);
+ if (offsetof(struct sccb_header, length) +
+ sizeof(sccb->length) > copied || sccb->length > copied) {
rc = -EFAULT;
goto out_free;
}
- if (sccb->length > PAGE_SIZE || sccb->length < 8)
- return -EINVAL;
- if (copy_from_user(sccb, u64_to_uptr(ctl_sccb.sccb), sccb->length)) {
- rc = -EFAULT;
+ if (sccb->length < 8) {
+ rc = -EINVAL;
goto out_free;
}
rc = sclp_sync_request(ctl_sccb.cmdw, sccb);
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index f3bb7af4e984..ead83a24bcd1 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -688,6 +688,7 @@ static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
{
struct flowi6 fl;
+ memset(&fl, 0, sizeof(fl));
if (saddr)
memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
if (daddr)
diff --git a/drivers/soc/mediatek/mtk-scpsys.c b/drivers/soc/mediatek/mtk-scpsys.c
index 57e781c71e67..837effe19907 100644
--- a/drivers/soc/mediatek/mtk-scpsys.c
+++ b/drivers/soc/mediatek/mtk-scpsys.c
@@ -491,13 +491,14 @@ static int scpsys_probe(struct platform_device *pdev)
genpd->dev_ops.active_wakeup = scpsys_active_wakeup;
/*
- * With CONFIG_PM disabled turn on all domains to make the
- * hardware usable.
+ * Initially turn on all domains to make the domains usable
+ * with !CONFIG_PM and to get the hardware in sync with the
+ * software. The unused domains will be switched off during
+ * late_init time.
*/
- if (!IS_ENABLED(CONFIG_PM))
- genpd->power_on(genpd);
+ genpd->power_on(genpd);
- pm_genpd_init(genpd, NULL, true);
+ pm_genpd_init(genpd, NULL, false);
}
/*
diff --git a/drivers/staging/media/davinci_vpfe/vpfe_video.c b/drivers/staging/media/davinci_vpfe/vpfe_video.c
index b793c04028a3..be72a8e5f221 100644
--- a/drivers/staging/media/davinci_vpfe/vpfe_video.c
+++ b/drivers/staging/media/davinci_vpfe/vpfe_video.c
@@ -172,9 +172,11 @@ static int vpfe_prepare_pipeline(struct vpfe_video_device *video)
static int vpfe_update_pipe_state(struct vpfe_video_device *video)
{
struct vpfe_pipeline *pipe = &video->pipe;
+ int ret;
- if (vpfe_prepare_pipeline(video))
- return vpfe_prepare_pipeline(video);
+ ret = vpfe_prepare_pipeline(video);
+ if (ret)
+ return ret;
/*
* Find out if there is any input video
@@ -182,9 +184,10 @@ static int vpfe_update_pipe_state(struct vpfe_video_device *video)
*/
if (pipe->input_num == 0) {
pipe->state = VPFE_PIPELINE_STREAM_CONTINUOUS;
- if (vpfe_update_current_ext_subdev(video)) {
+ ret = vpfe_update_current_ext_subdev(video);
+ if (ret) {
pr_err("Invalid external subdev\n");
- return vpfe_update_current_ext_subdev(video);
+ return ret;
}
} else {
pipe->state = VPFE_PIPELINE_STREAM_SINGLESHOT;
@@ -667,6 +670,7 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
struct v4l2_subdev *subdev;
struct v4l2_format format;
struct media_pad *remote;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_enum_fmt\n");
@@ -695,10 +699,11 @@ static int vpfe_enum_fmt(struct file *file, void *priv,
sd_fmt.pad = remote->index;
sd_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
/* get output format of remote subdev */
- if (v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt)) {
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
+ if (ret) {
v4l2_err(&vpfe_dev->v4l2_dev,
"invalid remote subdev for video node\n");
- return v4l2_subdev_call(subdev, pad, get_fmt, NULL, &sd_fmt);
+ return ret;
}
/* convert to pix format */
mbus.code = sd_fmt.format.code;
@@ -725,6 +730,7 @@ static int vpfe_s_fmt(struct file *file, void *priv,
struct vpfe_video_device *video = video_drvdata(file);
struct vpfe_device *vpfe_dev = video->vpfe_dev;
struct v4l2_format format;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_fmt\n");
/* If streaming is started, return error */
@@ -733,8 +739,9 @@ static int vpfe_s_fmt(struct file *file, void *priv,
return -EBUSY;
}
/* get adjacent subdev's output pad format */
- if (__vpfe_video_get_format(video, &format))
- return __vpfe_video_get_format(video, &format);
+ ret = __vpfe_video_get_format(video, &format);
+ if (ret)
+ return ret;
*fmt = format;
video->fmt = *fmt;
return 0;
@@ -757,11 +764,13 @@ static int vpfe_try_fmt(struct file *file, void *priv,
struct vpfe_video_device *video = video_drvdata(file);
struct vpfe_device *vpfe_dev = video->vpfe_dev;
struct v4l2_format format;
+ int ret;
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_try_fmt\n");
/* get adjacent subdev's output pad format */
- if (__vpfe_video_get_format(video, &format))
- return __vpfe_video_get_format(video, &format);
+ ret = __vpfe_video_get_format(video, &format);
+ if (ret)
+ return ret;
*fmt = format;
return 0;
@@ -838,8 +847,9 @@ static int vpfe_s_input(struct file *file, void *priv, unsigned int index)
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_input\n");
- if (mutex_lock_interruptible(&video->lock))
- return mutex_lock_interruptible(&video->lock);
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
/*
* If streaming is started return device busy
* error
@@ -940,8 +950,9 @@ static int vpfe_s_std(struct file *file, void *priv, v4l2_std_id std_id)
v4l2_dbg(1, debug, &vpfe_dev->v4l2_dev, "vpfe_s_std\n");
/* Call decoder driver function to set the standard */
- if (mutex_lock_interruptible(&video->lock))
- return mutex_lock_interruptible(&video->lock);
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
sdinfo = video->current_ext_subdev;
/* If streaming is started, return device busy error */
if (video->started) {
@@ -1327,8 +1338,9 @@ static int vpfe_reqbufs(struct file *file, void *priv,
return -EINVAL;
}
- if (mutex_lock_interruptible(&video->lock))
- return mutex_lock_interruptible(&video->lock);
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
if (video->io_usrs != 0) {
v4l2_err(&vpfe_dev->v4l2_dev, "Only one IO user allowed\n");
@@ -1354,10 +1366,11 @@ static int vpfe_reqbufs(struct file *file, void *priv,
q->buf_struct_size = sizeof(struct vpfe_cap_buffer);
q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
- if (vb2_queue_init(q)) {
+ ret = vb2_queue_init(q);
+ if (ret) {
v4l2_err(&vpfe_dev->v4l2_dev, "vb2_queue_init() failed\n");
vb2_dma_contig_cleanup_ctx(vpfe_dev->pdev);
- return vb2_queue_init(q);
+ return ret;
}
fh->io_allowed = 1;
@@ -1533,8 +1546,9 @@ static int vpfe_streamoff(struct file *file, void *priv,
return -EINVAL;
}
- if (mutex_lock_interruptible(&video->lock))
- return mutex_lock_interruptible(&video->lock);
+ ret = mutex_lock_interruptible(&video->lock);
+ if (ret)
+ return ret;
vpfe_stop_capture(video);
ret = vb2_streamoff(&video->buffer_queue, buf_type);
diff --git a/drivers/staging/rdma/hfi1/TODO b/drivers/staging/rdma/hfi1/TODO
index 05de0dad8762..4c6f1d7d2eaf 100644
--- a/drivers/staging/rdma/hfi1/TODO
+++ b/drivers/staging/rdma/hfi1/TODO
@@ -3,4 +3,4 @@ July, 2015
- Remove unneeded file entries in sysfs
- Remove software processing of IB protocol and place in library for use
by qib, ipath (if still present), hfi1, and eventually soft-roce
-
+- Replace incorrect uAPI
diff --git a/drivers/staging/rdma/hfi1/file_ops.c b/drivers/staging/rdma/hfi1/file_ops.c
index 8396dc5fb6c1..c1c5bf82addb 100644
--- a/drivers/staging/rdma/hfi1/file_ops.c
+++ b/drivers/staging/rdma/hfi1/file_ops.c
@@ -49,6 +49,8 @@
#include <linux/vmalloc.h>
#include <linux/io.h>
+#include <rdma/ib.h>
+
#include "hfi.h"
#include "pio.h"
#include "device.h"
@@ -190,6 +192,10 @@ static ssize_t hfi1_file_write(struct file *fp, const char __user *data,
int uctxt_required = 1;
int must_be_root = 0;
+ /* FIXME: This interface cannot continue out of staging */
+ if (WARN_ON_ONCE(!ib_safe_file_access(fp)))
+ return -EACCES;
+
if (count < sizeof(cmd)) {
ret = -EINVAL;
goto bail;
@@ -791,15 +797,16 @@ static int hfi1_file_close(struct inode *inode, struct file *fp)
spin_unlock_irqrestore(&dd->uctxt_lock, flags);
dd->rcd[uctxt->ctxt] = NULL;
+
+ hfi1_user_exp_rcv_free(fdata);
+ hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
+
uctxt->rcvwait_to = 0;
uctxt->piowait_to = 0;
uctxt->rcvnowait = 0;
uctxt->pionowait = 0;
uctxt->event_flags = 0;
- hfi1_user_exp_rcv_free(fdata);
- hfi1_clear_ctxt_pkey(dd, uctxt->ctxt);
-
hfi1_stats.sps_ctxts--;
if (++dd->freectxts == dd->num_user_contexts)
aspm_enable_all(dd);
@@ -1127,27 +1134,13 @@ bail:
static int user_init(struct file *fp)
{
- int ret;
unsigned int rcvctrl_ops = 0;
struct hfi1_filedata *fd = fp->private_data;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
/* make sure that the context has already been setup */
- if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags)) {
- ret = -EFAULT;
- goto done;
- }
-
- /*
- * Subctxts don't need to initialize anything since master
- * has done it.
- */
- if (fd->subctxt) {
- ret = wait_event_interruptible(uctxt->wait, !test_bit(
- HFI1_CTXT_MASTER_UNINIT,
- &uctxt->event_flags));
- goto expected;
- }
+ if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
+ return -EFAULT;
/* initialize poll variables... */
uctxt->urgent = 0;
@@ -1202,19 +1195,7 @@ static int user_init(struct file *fp)
wake_up(&uctxt->wait);
}
-expected:
- /*
- * Expected receive has to be setup for all processes (including
- * shared contexts). However, it has to be done after the master
- * context has been fully configured as it depends on the
- * eager/expected split of the RcvArray entries.
- * Setting it up here ensures that the subcontexts will be waiting
- * (due to the above wait_event_interruptible() until the master
- * is setup.
- */
- ret = hfi1_user_exp_rcv_init(fp);
-done:
- return ret;
+ return 0;
}
static int get_ctxt_info(struct file *fp, void __user *ubase, __u32 len)
@@ -1261,7 +1242,7 @@ static int setup_ctxt(struct file *fp)
int ret = 0;
/*
- * Context should be set up only once (including allocation and
+ * Context should be set up only once, including allocation and
* programming of eager buffers. This is done if context sharing
* is not requested or by the master process.
*/
@@ -1282,10 +1263,29 @@ static int setup_ctxt(struct file *fp)
if (ret)
goto done;
}
+ } else {
+ ret = wait_event_interruptible(uctxt->wait, !test_bit(
+ HFI1_CTXT_MASTER_UNINIT,
+ &uctxt->event_flags));
+ if (ret)
+ goto done;
}
+
ret = hfi1_user_sdma_alloc_queues(uctxt, fp);
if (ret)
goto done;
+ /*
+ * Expected receive has to be setup for all processes (including
+ * shared contexts). However, it has to be done after the master
+ * context has been fully configured as it depends on the
+ * eager/expected split of the RcvArray entries.
+ * Setting it up here ensures that the subcontexts will be waiting
+ * (due to the above wait_event_interruptible() until the master
+ * is setup.
+ */
+ ret = hfi1_user_exp_rcv_init(fp);
+ if (ret)
+ goto done;
set_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags);
done:
@@ -1565,29 +1565,8 @@ static loff_t ui_lseek(struct file *filp, loff_t offset, int whence)
{
struct hfi1_devdata *dd = filp->private_data;
- switch (whence) {
- case SEEK_SET:
- break;
- case SEEK_CUR:
- offset += filp->f_pos;
- break;
- case SEEK_END:
- offset = ((dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE) -
- offset;
- break;
- default:
- return -EINVAL;
- }
-
- if (offset < 0)
- return -EINVAL;
-
- if (offset >= (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE)
- return -EINVAL;
-
- filp->f_pos = offset;
-
- return filp->f_pos;
+ return fixed_size_llseek(filp, offset, whence,
+ (dd->kregend - dd->kregbase) + DC8051_DATA_MEM_SIZE);
}
/* NOTE: assumes unsigned long is 8 bytes */
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.c b/drivers/staging/rdma/hfi1/mmu_rb.c
index c7ad0164ea9a..b3f0682a36c9 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.c
+++ b/drivers/staging/rdma/hfi1/mmu_rb.c
@@ -71,6 +71,7 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *,
struct mm_struct *,
unsigned long, unsigned long);
static void mmu_notifier_mem_invalidate(struct mmu_notifier *,
+ struct mm_struct *,
unsigned long, unsigned long);
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *,
unsigned long, unsigned long);
@@ -137,7 +138,7 @@ void hfi1_mmu_rb_unregister(struct rb_root *root)
rbnode = rb_entry(node, struct mmu_rb_node, node);
rb_erase(node, root);
if (handler->ops->remove)
- handler->ops->remove(root, rbnode, false);
+ handler->ops->remove(root, rbnode, NULL);
}
}
@@ -176,7 +177,7 @@ unlock:
return ret;
}
-/* Caller must host handler lock */
+/* Caller must hold handler lock */
static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
unsigned long addr,
unsigned long len)
@@ -200,15 +201,21 @@ static struct mmu_rb_node *__mmu_rb_search(struct mmu_rb_handler *handler,
return node;
}
+/* Caller must *not* hold handler lock. */
static void __mmu_rb_remove(struct mmu_rb_handler *handler,
- struct mmu_rb_node *node, bool arg)
+ struct mmu_rb_node *node, struct mm_struct *mm)
{
+ unsigned long flags;
+
/* Validity of handler and node pointers has been checked by caller. */
hfi1_cdbg(MMU, "Removing node addr 0x%llx, len %u", node->addr,
node->len);
+ spin_lock_irqsave(&handler->lock, flags);
__mmu_int_rb_remove(node, handler->root);
+ spin_unlock_irqrestore(&handler->lock, flags);
+
if (handler->ops->remove)
- handler->ops->remove(handler->root, node, arg);
+ handler->ops->remove(handler->root, node, mm);
}
struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
@@ -231,14 +238,11 @@ struct mmu_rb_node *hfi1_mmu_rb_search(struct rb_root *root, unsigned long addr,
void hfi1_mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node)
{
struct mmu_rb_handler *handler = find_mmu_handler(root);
- unsigned long flags;
if (!handler || !node)
return;
- spin_lock_irqsave(&handler->lock, flags);
- __mmu_rb_remove(handler, node, false);
- spin_unlock_irqrestore(&handler->lock, flags);
+ __mmu_rb_remove(handler, node, NULL);
}
static struct mmu_rb_handler *find_mmu_handler(struct rb_root *root)
@@ -260,7 +264,7 @@ unlock:
static inline void mmu_notifier_page(struct mmu_notifier *mn,
struct mm_struct *mm, unsigned long addr)
{
- mmu_notifier_mem_invalidate(mn, addr, addr + PAGE_SIZE);
+ mmu_notifier_mem_invalidate(mn, mm, addr, addr + PAGE_SIZE);
}
static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
@@ -268,25 +272,31 @@ static inline void mmu_notifier_range_start(struct mmu_notifier *mn,
unsigned long start,
unsigned long end)
{
- mmu_notifier_mem_invalidate(mn, start, end);
+ mmu_notifier_mem_invalidate(mn, mm, start, end);
}
static void mmu_notifier_mem_invalidate(struct mmu_notifier *mn,
+ struct mm_struct *mm,
unsigned long start, unsigned long end)
{
struct mmu_rb_handler *handler =
container_of(mn, struct mmu_rb_handler, mn);
struct rb_root *root = handler->root;
- struct mmu_rb_node *node;
+ struct mmu_rb_node *node, *ptr = NULL;
unsigned long flags;
spin_lock_irqsave(&handler->lock, flags);
- for (node = __mmu_int_rb_iter_first(root, start, end - 1); node;
- node = __mmu_int_rb_iter_next(node, start, end - 1)) {
+ for (node = __mmu_int_rb_iter_first(root, start, end - 1);
+ node; node = ptr) {
+ /* Guard against node removal. */
+ ptr = __mmu_int_rb_iter_next(node, start, end - 1);
hfi1_cdbg(MMU, "Invalidating node addr 0x%llx, len %u",
node->addr, node->len);
- if (handler->ops->invalidate(root, node))
- __mmu_rb_remove(handler, node, true);
+ if (handler->ops->invalidate(root, node)) {
+ spin_unlock_irqrestore(&handler->lock, flags);
+ __mmu_rb_remove(handler, node, mm);
+ spin_lock_irqsave(&handler->lock, flags);
+ }
}
spin_unlock_irqrestore(&handler->lock, flags);
}
diff --git a/drivers/staging/rdma/hfi1/mmu_rb.h b/drivers/staging/rdma/hfi1/mmu_rb.h
index f8523fdb8a18..19a306e83c7d 100644
--- a/drivers/staging/rdma/hfi1/mmu_rb.h
+++ b/drivers/staging/rdma/hfi1/mmu_rb.h
@@ -59,7 +59,8 @@ struct mmu_rb_node {
struct mmu_rb_ops {
bool (*filter)(struct mmu_rb_node *, unsigned long, unsigned long);
int (*insert)(struct rb_root *, struct mmu_rb_node *);
- void (*remove)(struct rb_root *, struct mmu_rb_node *, bool);
+ void (*remove)(struct rb_root *, struct mmu_rb_node *,
+ struct mm_struct *);
int (*invalidate)(struct rb_root *, struct mmu_rb_node *);
};
diff --git a/drivers/staging/rdma/hfi1/qp.c b/drivers/staging/rdma/hfi1/qp.c
index 29a5ad28019b..dc9119e1b458 100644
--- a/drivers/staging/rdma/hfi1/qp.c
+++ b/drivers/staging/rdma/hfi1/qp.c
@@ -519,10 +519,12 @@ static void iowait_sdma_drained(struct iowait *wait)
* do the flush work until that QP's
* sdma work has finished.
*/
+ spin_lock(&qp->s_lock);
if (qp->s_flags & RVT_S_WAIT_DMA) {
qp->s_flags &= ~RVT_S_WAIT_DMA;
hfi1_schedule_send(qp);
}
+ spin_unlock(&qp->s_lock);
}
/**
diff --git a/drivers/staging/rdma/hfi1/user_exp_rcv.c b/drivers/staging/rdma/hfi1/user_exp_rcv.c
index 0861e095df8d..8bd56d5c783d 100644
--- a/drivers/staging/rdma/hfi1/user_exp_rcv.c
+++ b/drivers/staging/rdma/hfi1/user_exp_rcv.c
@@ -87,7 +87,8 @@ static u32 find_phys_blocks(struct page **, unsigned, struct tid_pageset *);
static int set_rcvarray_entry(struct file *, unsigned long, u32,
struct tid_group *, struct page **, unsigned);
static int mmu_rb_insert(struct rb_root *, struct mmu_rb_node *);
-static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *, bool);
+static void mmu_rb_remove(struct rb_root *, struct mmu_rb_node *,
+ struct mm_struct *);
static int mmu_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
static int program_rcvarray(struct file *, unsigned long, struct tid_group *,
struct tid_pageset *, unsigned, u16, struct page **,
@@ -254,6 +255,8 @@ int hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
struct hfi1_ctxtdata *uctxt = fd->uctxt;
struct tid_group *grp, *gptr;
+ if (!test_bit(HFI1_CTXT_SETUP_DONE, &uctxt->event_flags))
+ return 0;
/*
* The notifier would have been removed when the process'es mm
* was freed.
@@ -899,7 +902,7 @@ static int unprogram_rcvarray(struct file *fp, u32 tidinfo,
if (!node || node->rcventry != (uctxt->expected_base + rcventry))
return -EBADF;
if (HFI1_CAP_IS_USET(TID_UNMAP))
- mmu_rb_remove(&fd->tid_rb_root, &node->mmu, false);
+ mmu_rb_remove(&fd->tid_rb_root, &node->mmu, NULL);
else
hfi1_mmu_rb_remove(&fd->tid_rb_root, &node->mmu);
@@ -965,7 +968,7 @@ static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
continue;
if (HFI1_CAP_IS_USET(TID_UNMAP))
mmu_rb_remove(&fd->tid_rb_root,
- &node->mmu, false);
+ &node->mmu, NULL);
else
hfi1_mmu_rb_remove(&fd->tid_rb_root,
&node->mmu);
@@ -1032,7 +1035,7 @@ static int mmu_rb_insert(struct rb_root *root, struct mmu_rb_node *node)
}
static void mmu_rb_remove(struct rb_root *root, struct mmu_rb_node *node,
- bool notifier)
+ struct mm_struct *mm)
{
struct hfi1_filedata *fdata =
container_of(root, struct hfi1_filedata, tid_rb_root);
diff --git a/drivers/staging/rdma/hfi1/user_sdma.c b/drivers/staging/rdma/hfi1/user_sdma.c
index ab6b6a42000f..d53a659548e0 100644
--- a/drivers/staging/rdma/hfi1/user_sdma.c
+++ b/drivers/staging/rdma/hfi1/user_sdma.c
@@ -278,7 +278,8 @@ static inline void pq_update(struct hfi1_user_sdma_pkt_q *);
static void user_sdma_free_request(struct user_sdma_request *, bool);
static int pin_vector_pages(struct user_sdma_request *,
struct user_sdma_iovec *);
-static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned);
+static void unpin_vector_pages(struct mm_struct *, struct page **, unsigned,
+ unsigned);
static int check_header_template(struct user_sdma_request *,
struct hfi1_pkt_header *, u32, u32);
static int set_txreq_header(struct user_sdma_request *,
@@ -299,7 +300,8 @@ static int defer_packet_queue(
static void activate_packet_queue(struct iowait *, int);
static bool sdma_rb_filter(struct mmu_rb_node *, unsigned long, unsigned long);
static int sdma_rb_insert(struct rb_root *, struct mmu_rb_node *);
-static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *, bool);
+static void sdma_rb_remove(struct rb_root *, struct mmu_rb_node *,
+ struct mm_struct *);
static int sdma_rb_invalidate(struct rb_root *, struct mmu_rb_node *);
static struct mmu_rb_ops sdma_rb_ops = {
@@ -1063,8 +1065,10 @@ static int pin_vector_pages(struct user_sdma_request *req,
rb_node = hfi1_mmu_rb_search(&pq->sdma_rb_root,
(unsigned long)iovec->iov.iov_base,
iovec->iov.iov_len);
- if (rb_node)
+ if (rb_node && !IS_ERR(rb_node))
node = container_of(rb_node, struct sdma_mmu_node, rb);
+ else
+ rb_node = NULL;
if (!node) {
node = kzalloc(sizeof(*node), GFP_KERNEL);
@@ -1107,7 +1111,8 @@ retry:
goto bail;
}
if (pinned != npages) {
- unpin_vector_pages(current->mm, pages, pinned);
+ unpin_vector_pages(current->mm, pages, node->npages,
+ pinned);
ret = -EFAULT;
goto bail;
}
@@ -1147,9 +1152,9 @@ bail:
}
static void unpin_vector_pages(struct mm_struct *mm, struct page **pages,
- unsigned npages)
+ unsigned start, unsigned npages)
{
- hfi1_release_user_pages(mm, pages, npages, 0);
+ hfi1_release_user_pages(mm, pages + start, npages, 0);
kfree(pages);
}
@@ -1502,7 +1507,7 @@ static void user_sdma_free_request(struct user_sdma_request *req, bool unpin)
&req->pq->sdma_rb_root,
(unsigned long)req->iovs[i].iov.iov_base,
req->iovs[i].iov.iov_len);
- if (!mnode)
+ if (!mnode || IS_ERR(mnode))
continue;
node = container_of(mnode, struct sdma_mmu_node, rb);
@@ -1547,7 +1552,7 @@ static int sdma_rb_insert(struct rb_root *root, struct mmu_rb_node *mnode)
}
static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
- bool notifier)
+ struct mm_struct *mm)
{
struct sdma_mmu_node *node =
container_of(mnode, struct sdma_mmu_node, rb);
@@ -1557,14 +1562,20 @@ static void sdma_rb_remove(struct rb_root *root, struct mmu_rb_node *mnode,
node->pq->n_locked -= node->npages;
spin_unlock(&node->pq->evict_lock);
- unpin_vector_pages(notifier ? NULL : current->mm, node->pages,
+ /*
+ * If mm is set, we are being called by the MMU notifier and we
+ * should not pass a mm_struct to unpin_vector_page(). This is to
+ * prevent a deadlock when hfi1_release_user_pages() attempts to
+ * take the mmap_sem, which the MMU notifier has already taken.
+ */
+ unpin_vector_pages(mm ? NULL : current->mm, node->pages, 0,
node->npages);
/*
* If called by the MMU notifier, we have to adjust the pinned
* page count ourselves.
*/
- if (notifier)
- current->mm->pinned_vm -= node->npages;
+ if (mm)
+ mm->pinned_vm -= node->npages;
kfree(node);
}
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index 36d07295f8e3..5e820b541506 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -68,12 +68,12 @@ static inline int _step_to_temp(int step)
* Every step equals (1 * 200) / 255 celsius, and finally
* need convert to millicelsius.
*/
- return (HISI_TEMP_BASE + (step * 200 / 255)) * 1000;
+ return (HISI_TEMP_BASE * 1000 + (step * 200000 / 255));
}
static inline long _temp_to_step(long temp)
{
- return ((temp / 1000 - HISI_TEMP_BASE) * 255 / 200);
+ return ((temp - HISI_TEMP_BASE * 1000) * 255) / 200000;
}
static long hisi_thermal_get_sensor_temp(struct hisi_thermal_data *data,
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index f1db49625555..5133cd1e10b7 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -959,7 +959,7 @@ static DEVICE_ATTR(sustainable_power, S_IWUSR | S_IRUGO, sustainable_power_show,
struct thermal_zone_device *tz = to_thermal_zone(dev); \
\
if (tz->tzp) \
- return sprintf(buf, "%u\n", tz->tzp->name); \
+ return sprintf(buf, "%d\n", tz->tzp->name); \
else \
return -EIO; \
} \
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 0058d9fbf931..cf0dc51a2690 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -626,7 +626,7 @@ static int pty_unix98_ioctl(struct tty_struct *tty,
*/
static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
- struct inode *ptm_inode, int idx)
+ struct file *file, int idx)
{
/* Master must be open via /dev/ptmx */
return ERR_PTR(-EIO);
@@ -642,12 +642,12 @@ static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver,
*/
static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver,
- struct inode *pts_inode, int idx)
+ struct file *file, int idx)
{
struct tty_struct *tty;
mutex_lock(&devpts_mutex);
- tty = devpts_get_priv(pts_inode);
+ tty = devpts_get_priv(file->f_path.dentry);
mutex_unlock(&devpts_mutex);
/* Master must be open before slave */
if (!tty)
@@ -722,7 +722,7 @@ static int ptmx_open(struct inode *inode, struct file *filp)
{
struct pts_fs_info *fsi;
struct tty_struct *tty;
- struct inode *slave_inode;
+ struct dentry *dentry;
int retval;
int index;
@@ -769,14 +769,12 @@ static int ptmx_open(struct inode *inode, struct file *filp)
tty_add_file(tty, filp);
- slave_inode = devpts_pty_new(fsi,
- MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index,
- tty->link);
- if (IS_ERR(slave_inode)) {
- retval = PTR_ERR(slave_inode);
+ dentry = devpts_pty_new(fsi, index, tty->link);
+ if (IS_ERR(dentry)) {
+ retval = PTR_ERR(dentry);
goto err_release;
}
- tty->link->driver_data = slave_inode;
+ tty->link->driver_data = dentry;
retval = ptm_driver->ops->open(tty, filp);
if (retval)
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 9b04d72e752e..24d5491ef0da 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1367,12 +1367,12 @@ static ssize_t tty_line_name(struct tty_driver *driver, int index, char *p)
* Locking: tty_mutex must be held. If the tty is found, bump the tty kref.
*/
static struct tty_struct *tty_driver_lookup_tty(struct tty_driver *driver,
- struct inode *inode, int idx)
+ struct file *file, int idx)
{
struct tty_struct *tty;
if (driver->ops->lookup)
- tty = driver->ops->lookup(driver, inode, idx);
+ tty = driver->ops->lookup(driver, file, idx);
else
tty = driver->ttys[idx];
@@ -2040,7 +2040,7 @@ static struct tty_struct *tty_open_by_driver(dev_t device, struct inode *inode,
}
/* check whether we're reopening an existing tty */
- tty = tty_driver_lookup_tty(driver, inode, index);
+ tty = tty_driver_lookup_tty(driver, filp, index);
if (IS_ERR(tty)) {
mutex_unlock(&tty_mutex);
goto out;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index c6f497f16526..d96d423d00e6 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -375,18 +375,22 @@ static void option_instat_callback(struct urb *urb);
#define HAIER_PRODUCT_CE81B 0x10f8
#define HAIER_PRODUCT_CE100 0x2009
-/* Cinterion (formerly Siemens) products */
-#define SIEMENS_VENDOR_ID 0x0681
-#define CINTERION_VENDOR_ID 0x1e2d
+/* Gemalto's Cinterion products (formerly Siemens) */
+#define SIEMENS_VENDOR_ID 0x0681
+#define CINTERION_VENDOR_ID 0x1e2d
+#define CINTERION_PRODUCT_HC25_MDMNET 0x0040
#define CINTERION_PRODUCT_HC25_MDM 0x0047
-#define CINTERION_PRODUCT_HC25_MDMNET 0x0040
+#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */
#define CINTERION_PRODUCT_HC28_MDM 0x004C
-#define CINTERION_PRODUCT_HC28_MDMNET 0x004A /* same for HC28J */
#define CINTERION_PRODUCT_EU3_E 0x0051
#define CINTERION_PRODUCT_EU3_P 0x0052
#define CINTERION_PRODUCT_PH8 0x0053
#define CINTERION_PRODUCT_AHXX 0x0055
#define CINTERION_PRODUCT_PLXX 0x0060
+#define CINTERION_PRODUCT_PH8_2RMNET 0x0082
+#define CINTERION_PRODUCT_PH8_AUDIO 0x0083
+#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
+#define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -633,6 +637,10 @@ static const struct option_blacklist_info telit_le922_blacklist_usbcfg3 = {
.reserved = BIT(1) | BIT(2) | BIT(3),
};
+static const struct option_blacklist_info cinterion_rmnet2_blacklist = {
+ .reserved = BIT(4) | BIT(5),
+};
+
static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) },
{ USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) },
@@ -1602,7 +1610,79 @@ static const struct usb_device_id option_ids[] = {
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t)&net_intf3_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff42, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff43, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff44, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff45, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff46, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff47, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff48, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff49, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4a, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4b, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4c, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4d, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4e, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff4f, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff50, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff51, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff52, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff53, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff54, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff55, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff56, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff57, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff58, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff59, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5a, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5b, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5c, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5d, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5e, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff5f, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff60, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff61, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff62, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff63, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff64, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff65, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff66, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff67, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff68, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff69, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6a, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6b, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6c, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6d, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6e, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff6f, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff70, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff71, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff72, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff73, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff74, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff75, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff76, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff77, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff78, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff79, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7a, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7b, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7c, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7d, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7e, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff7f, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff80, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff81, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff82, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff83, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff84, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff85, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff86, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff87, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff88, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff89, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8a, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8b, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8c, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff8d, 0xff, 0xff, 0xff) },
@@ -1613,6 +1693,61 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff92, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff93, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff94, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff9f, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa0, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa1, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa2, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa3, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa4, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa5, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa6, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa7, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa8, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffa9, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaa, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffab, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffac, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffae, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffaf, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb0, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb1, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb2, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb3, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb4, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb5, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb6, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb7, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb8, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffb9, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffba, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbb, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbc, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbd, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbe, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffbf, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc0, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc1, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc2, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc3, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc4, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc5, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc6, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc7, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc8, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffc9, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffca, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcb, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcc, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcd, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffce, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffcf, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd0, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd1, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd2, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd3, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd4, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffd5, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffe9, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffec, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xffee, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xfff6, 0xff, 0xff, 0xff) },
@@ -1712,7 +1847,13 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX, 0xff) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX),
.driver_info = (kernel_ulong_t)&net_intf4_blacklist },
- { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_2RMNET, 0xff),
+ .driver_info = (kernel_ulong_t)&cinterion_rmnet2_blacklist },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8_AUDIO, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_2RMNET, 0xff) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX_AUDIO, 0xff) },
+ { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) },
{ USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 541ead4d8965..85b8517f17a0 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -386,9 +386,7 @@ void ceph_put_mds_session(struct ceph_mds_session *s)
atomic_read(&s->s_ref), atomic_read(&s->s_ref)-1);
if (atomic_dec_and_test(&s->s_ref)) {
if (s->s_auth.authorizer)
- ceph_auth_destroy_authorizer(
- s->s_mdsc->fsc->client->monc.auth,
- s->s_auth.authorizer);
+ ceph_auth_destroy_authorizer(s->s_auth.authorizer);
kfree(s);
}
}
@@ -3900,7 +3898,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
struct ceph_auth_handshake *auth = &s->s_auth;
if (force_new && auth->authorizer) {
- ceph_auth_destroy_authorizer(ac, auth->authorizer);
+ ceph_auth_destroy_authorizer(auth->authorizer);
auth->authorizer = NULL;
}
if (!auth->authorizer) {
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 0af8e7d70d27..0b2954d7172d 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -604,8 +604,7 @@ void devpts_put_ref(struct pts_fs_info *fsi)
*
* The created inode is returned. Remove it from /dev/pts/ by devpts_pty_kill.
*/
-struct inode *devpts_pty_new(struct pts_fs_info *fsi, dev_t device, int index,
- void *priv)
+struct dentry *devpts_pty_new(struct pts_fs_info *fsi, int index, void *priv)
{
struct dentry *dentry;
struct super_block *sb;
@@ -629,25 +628,21 @@ struct inode *devpts_pty_new(struct pts_fs_info *fsi, dev_t device, int index,
inode->i_uid = opts->setuid ? opts->uid : current_fsuid();
inode->i_gid = opts->setgid ? opts->gid : current_fsgid();
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
- init_special_inode(inode, S_IFCHR|opts->mode, device);
- inode->i_private = priv;
+ init_special_inode(inode, S_IFCHR|opts->mode, MKDEV(UNIX98_PTY_SLAVE_MAJOR, index));
sprintf(s, "%d", index);
- inode_lock(d_inode(root));
-
dentry = d_alloc_name(root, s);
if (dentry) {
+ dentry->d_fsdata = priv;
d_add(dentry, inode);
fsnotify_create(d_inode(root), dentry);
} else {
iput(inode);
- inode = ERR_PTR(-ENOMEM);
+ dentry = ERR_PTR(-ENOMEM);
}
- inode_unlock(d_inode(root));
-
- return inode;
+ return dentry;
}
/**
@@ -656,24 +651,10 @@ struct inode *devpts_pty_new(struct pts_fs_info *fsi, dev_t device, int index,
*
* Returns whatever was passed as priv in devpts_pty_new for a given inode.
*/
-void *devpts_get_priv(struct inode *pts_inode)
+void *devpts_get_priv(struct dentry *dentry)
{
- struct dentry *dentry;
- void *priv = NULL;
-
- BUG_ON(pts_inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR));
-
- /* Ensure dentry has not been deleted by devpts_pty_kill() */
- dentry = d_find_alias(pts_inode);
- if (!dentry)
- return NULL;
-
- if (pts_inode->i_sb->s_magic == DEVPTS_SUPER_MAGIC)
- priv = pts_inode->i_private;
-
- dput(dentry);
-
- return priv;
+ WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC);
+ return dentry->d_fsdata;
}
/**
@@ -682,24 +663,14 @@ void *devpts_get_priv(struct inode *pts_inode)
*
* This is an inverse operation of devpts_pty_new.
*/
-void devpts_pty_kill(struct inode *inode)
+void devpts_pty_kill(struct dentry *dentry)
{
- struct super_block *sb = pts_sb_from_inode(inode);
- struct dentry *root = sb->s_root;
- struct dentry *dentry;
+ WARN_ON_ONCE(dentry->d_sb->s_magic != DEVPTS_SUPER_MAGIC);
- BUG_ON(inode->i_rdev == MKDEV(TTYAUX_MAJOR, PTMX_MINOR));
-
- inode_lock(d_inode(root));
-
- dentry = d_find_alias(inode);
-
- drop_nlink(inode);
+ dentry->d_fsdata = NULL;
+ drop_nlink(dentry->d_inode);
d_delete(dentry);
dput(dentry); /* d_alloc_name() in devpts_pty_new() */
- dput(dentry); /* d_find_alias above */
-
- inode_unlock(d_inode(root));
}
static int __init init_devpts_fs(void)
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 9aed6e202201..13719d3f35f8 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -2455,6 +2455,8 @@ int dlm_deref_lockres_done_handler(struct o2net_msg *msg, u32 len, void *data,
spin_unlock(&dlm->spinlock);
+ ret = 0;
+
done:
dlm_put(dlm);
return ret;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 229cb546bee0..541583510cfb 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1518,6 +1518,32 @@ static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
return page;
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static struct page *can_gather_numa_stats_pmd(pmd_t pmd,
+ struct vm_area_struct *vma,
+ unsigned long addr)
+{
+ struct page *page;
+ int nid;
+
+ if (!pmd_present(pmd))
+ return NULL;
+
+ page = vm_normal_page_pmd(vma, addr, pmd);
+ if (!page)
+ return NULL;
+
+ if (PageReserved(page))
+ return NULL;
+
+ nid = page_to_nid(page);
+ if (!node_isset(nid, node_states[N_MEMORY]))
+ return NULL;
+
+ return page;
+}
+#endif
+
static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
unsigned long end, struct mm_walk *walk)
{
@@ -1527,14 +1553,14 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
pte_t *orig_pte;
pte_t *pte;
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
- pte_t huge_pte = *(pte_t *)pmd;
struct page *page;
- page = can_gather_numa_stats(huge_pte, vma, addr);
+ page = can_gather_numa_stats_pmd(*pmd, vma, addr);
if (page)
- gather_stats(page, md, pte_dirty(huge_pte),
+ gather_stats(page, md, pmd_dirty(*pmd),
HPAGE_PMD_SIZE/PAGE_SIZE);
spin_unlock(ptl);
return 0;
@@ -1542,6 +1568,7 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
if (pmd_trans_unstable(pmd))
return 0;
+#endif
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
do {
struct page *page = can_gather_numa_stats(*pte, vma, addr);
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index 260d78b587c4..1563265d2097 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -12,9 +12,12 @@
*/
struct ceph_auth_client;
-struct ceph_authorizer;
struct ceph_msg;
+struct ceph_authorizer {
+ void (*destroy)(struct ceph_authorizer *);
+};
+
struct ceph_auth_handshake {
struct ceph_authorizer *authorizer;
void *authorizer_buf;
@@ -62,8 +65,6 @@ struct ceph_auth_client_ops {
struct ceph_auth_handshake *auth);
int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
struct ceph_authorizer *a, size_t len);
- void (*destroy_authorizer)(struct ceph_auth_client *ac,
- struct ceph_authorizer *a);
void (*invalidate_authorizer)(struct ceph_auth_client *ac,
int peer_type);
@@ -112,8 +113,7 @@ extern int ceph_auth_is_authenticated(struct ceph_auth_client *ac);
extern int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
int peer_type,
struct ceph_auth_handshake *auth);
-extern void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac,
- struct ceph_authorizer *a);
+void ceph_auth_destroy_authorizer(struct ceph_authorizer *a);
extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
int peer_type,
struct ceph_auth_handshake *a);
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 4343df806710..cbf460927c42 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -16,7 +16,6 @@ struct ceph_msg;
struct ceph_snap_context;
struct ceph_osd_request;
struct ceph_osd_client;
-struct ceph_authorizer;
/*
* completion callback for async writepages
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 3e39ae5bc799..5b17de62c962 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -444,6 +444,7 @@ struct cgroup_subsys {
int (*can_attach)(struct cgroup_taskset *tset);
void (*cancel_attach)(struct cgroup_taskset *tset);
void (*attach)(struct cgroup_taskset *tset);
+ void (*post_attach)(void);
int (*can_fork)(struct task_struct *task);
void (*cancel_fork)(struct task_struct *task);
void (*fork)(struct task_struct *task);
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index fea160ee5803..85a868ccb493 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -137,8 +137,6 @@ static inline void set_mems_allowed(nodemask_t nodemask)
task_unlock(current);
}
-extern void cpuset_post_attach_flush(void);
-
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
@@ -245,10 +243,6 @@ static inline bool read_mems_allowed_retry(unsigned int seq)
return false;
}
-static inline void cpuset_post_attach_flush(void)
-{
-}
-
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index 358a4db72a27..5871f292b596 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -27,11 +27,11 @@ int devpts_new_index(struct pts_fs_info *);
void devpts_kill_index(struct pts_fs_info *, int);
/* mknod in devpts */
-struct inode *devpts_pty_new(struct pts_fs_info *, dev_t, int, void *);
+struct dentry *devpts_pty_new(struct pts_fs_info *, int, void *);
/* get private structure */
-void *devpts_get_priv(struct inode *pts_inode);
+void *devpts_get_priv(struct dentry *);
/* unlink */
-void devpts_pty_kill(struct inode *inode);
+void devpts_pty_kill(struct dentry *);
#endif
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 7008623e24b1..d7b9e5346fba 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -152,6 +152,7 @@ static inline bool is_huge_zero_pmd(pmd_t pmd)
}
struct page *get_huge_zero_page(void);
+void put_huge_zero_page(void);
#else /* CONFIG_TRANSPARENT_HUGEPAGE */
#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
@@ -208,6 +209,10 @@ static inline bool is_huge_zero_page(struct page *page)
return false;
}
+static inline void put_huge_zero_page(void)
+{
+ BUILD_BUG();
+}
static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
unsigned long addr, pmd_t *pmd, int flags)
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index d026b190c530..d10ef06971b5 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -196,9 +196,11 @@ struct lock_list {
* We record lock dependency chains, so that we can cache them:
*/
struct lock_chain {
- u8 irq_context;
- u8 depth;
- u16 base;
+ /* see BUILD_BUG_ON()s in lookup_chain_cache() */
+ unsigned int irq_context : 2,
+ depth : 6,
+ base : 24;
+ /* 4 byte hole */
struct hlist_node entry;
u64 chain_key;
};
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 8156e3c9239c..b3575f392492 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -392,6 +392,17 @@ enum {
MLX5_CAP_OFF_CMDIF_CSUM = 46,
};
+enum {
+ /*
+ * Max wqe size for rdma read is 512 bytes, so this
+ * limits our max_sge_rd as the wqe needs to fit:
+ * - ctrl segment (16 bytes)
+ * - rdma segment (16 bytes)
+ * - scatter elements (16 bytes each)
+ */
+ MLX5_MAX_SGE_RD = (512 - 16 - 16) / 16
+};
+
struct mlx5_inbox_hdr {
__be16 opcode;
u8 rsvd[4];
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index dcd5ac8d3b14..369c837d40f5 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -519,8 +519,9 @@ enum mlx5_device_state {
};
enum mlx5_interface_state {
- MLX5_INTERFACE_STATE_DOWN,
- MLX5_INTERFACE_STATE_UP,
+ MLX5_INTERFACE_STATE_DOWN = BIT(0),
+ MLX5_INTERFACE_STATE_UP = BIT(1),
+ MLX5_INTERFACE_STATE_SHUTDOWN = BIT(2),
};
enum mlx5_pci_status {
@@ -544,7 +545,7 @@ struct mlx5_core_dev {
enum mlx5_device_state state;
/* sync interface state */
struct mutex intf_state_mutex;
- enum mlx5_interface_state interface_state;
+ unsigned long intf_state;
void (*event) (struct mlx5_core_dev *dev,
enum mlx5_dev_event event,
unsigned long param);
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index a1d145abd4eb..b30250ab7604 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -54,9 +54,9 @@ int mlx5_set_port_admin_status(struct mlx5_core_dev *dev,
int mlx5_query_port_admin_status(struct mlx5_core_dev *dev,
enum mlx5_port_status *status);
-int mlx5_set_port_mtu(struct mlx5_core_dev *dev, int mtu, u8 port);
-void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, int *max_mtu, u8 port);
-void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, int *oper_mtu,
+int mlx5_set_port_mtu(struct mlx5_core_dev *dev, u16 mtu, u8 port);
+void mlx5_query_port_max_mtu(struct mlx5_core_dev *dev, u16 *max_mtu, u8 port);
+void mlx5_query_port_oper_mtu(struct mlx5_core_dev *dev, u16 *oper_mtu,
u8 port);
int mlx5_query_port_vl_hw_cap(struct mlx5_core_dev *dev,
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index bd93e6323603..301da4a5e6bf 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -45,6 +45,8 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
u16 vport, u8 *addr);
+int mlx5_query_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 *mtu);
+int mlx5_modify_nic_vport_mtu(struct mlx5_core_dev *mdev, u16 mtu);
int mlx5_query_nic_vport_system_image_guid(struct mlx5_core_dev *mdev,
u64 *system_image_guid);
int mlx5_query_nic_vport_node_guid(struct mlx5_core_dev *mdev, u64 *node_guid);
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a55e5be0894f..864d7221de84 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1031,6 +1031,8 @@ static inline bool page_mapped(struct page *page)
page = compound_head(page);
if (atomic_read(compound_mapcount_ptr(page)) >= 0)
return true;
+ if (PageHuge(page))
+ return false;
for (i = 0; i < hpage_nr_pages(page); i++) {
if (atomic_read(&page[i]._mapcount) >= 0)
return true;
@@ -1138,6 +1140,8 @@ struct zap_details {
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t pmd);
int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
unsigned long size);
diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
index 161052477f77..b742b5e47cc2 100644
--- a/include/linux/tty_driver.h
+++ b/include/linux/tty_driver.h
@@ -7,7 +7,7 @@
* defined; unless noted otherwise, they are optional, and can be
* filled in with a null pointer.
*
- * struct tty_struct * (*lookup)(struct tty_driver *self, int idx)
+ * struct tty_struct * (*lookup)(struct tty_driver *self, struct file *, int idx)
*
* Return the tty device corresponding to idx, NULL if there is not
* one currently in use and an ERR_PTR value on error. Called under
@@ -250,7 +250,7 @@ struct serial_icounter_struct;
struct tty_operations {
struct tty_struct * (*lookup)(struct tty_driver *driver,
- struct inode *inode, int idx);
+ struct file *filp, int idx);
int (*install)(struct tty_driver *driver, struct tty_struct *tty);
void (*remove)(struct tty_driver *driver, struct tty_struct *tty);
int (*open)(struct tty_struct * tty, struct file * filp);
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 8a0f55b6c2ba..88e3ab496e8f 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -375,6 +375,9 @@ struct vb2_ops {
/**
* struct vb2_ops - driver-specific callbacks
*
+ * @verify_planes_array: Verify that a given user space structure contains
+ * enough planes for the buffer. This is called
+ * for each dequeued buffer.
* @fill_user_buffer: given a vb2_buffer fill in the userspace structure.
* For V4L2 this is a struct v4l2_buffer.
* @fill_vb2_buffer: given a userspace structure, fill in the vb2_buffer.
@@ -384,6 +387,7 @@ struct vb2_ops {
* the vb2_buffer struct.
*/
struct vb2_buf_ops {
+ int (*verify_planes_array)(struct vb2_buffer *vb, const void *pb);
void (*fill_user_buffer)(struct vb2_buffer *vb, void *pb);
int (*fill_vb2_buffer)(struct vb2_buffer *vb, const void *pb,
struct vb2_plane *planes);
@@ -400,6 +404,9 @@ struct vb2_buf_ops {
* @fileio_read_once: report EOF after reading the first buffer
* @fileio_write_immediately: queue buffer after each write() call
* @allow_zero_bytesused: allow bytesused == 0 to be passed to the driver
+ * @quirk_poll_must_check_waiting_for_buffers: Return POLLERR at poll when QBUF
+ * has not been called. This is a vb1 idiom that has been adopted
+ * also by vb2.
* @lock: pointer to a mutex that protects the vb2_queue struct. The
* driver can set this to a mutex to let the v4l2 core serialize
* the queuing ioctls. If the driver wants to handle locking
@@ -463,6 +470,7 @@ struct vb2_queue {
unsigned fileio_read_once:1;
unsigned fileio_write_immediately:1;
unsigned allow_zero_bytesused:1;
+ unsigned quirk_poll_must_check_waiting_for_buffers:1;
struct mutex *lock;
void *owner;
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index d451122e8404..51d77b2ce2b2 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -54,6 +54,8 @@ struct switchdev_attr {
struct net_device *orig_dev;
enum switchdev_attr_id id;
u32 flags;
+ void *complete_priv;
+ void (*complete)(struct net_device *dev, int err, void *priv);
union {
struct netdev_phys_item_id ppid; /* PORT_PARENT_ID */
u8 stp_state; /* PORT_STP_STATE */
@@ -75,6 +77,8 @@ struct switchdev_obj {
struct net_device *orig_dev;
enum switchdev_obj_id id;
u32 flags;
+ void *complete_priv;
+ void (*complete)(struct net_device *dev, int err, void *priv);
};
/* SWITCHDEV_OBJ_ID_PORT_VLAN */
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index cf8f9e700e48..a6b93706b0fc 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -34,6 +34,7 @@
#define _RDMA_IB_H
#include <linux/types.h>
+#include <linux/sched.h>
struct ib_addr {
union {
@@ -86,4 +87,19 @@ struct sockaddr_ib {
__u64 sib_scope_id;
};
+/*
+ * The IB interfaces that use write() as bi-directional ioctl() are
+ * fundamentally unsafe, since there are lots of ways to trigger "write()"
+ * calls from various contexts with elevated privileges. That includes the
+ * traditional suid executable error message writes, but also various kernel
+ * interfaces that can write to file descriptors.
+ *
+ * This function provides protection for the legacy API by restricting the
+ * calling context.
+ */
+static inline bool ib_safe_file_access(struct file *filp)
+{
+ return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
+}
+
#endif /* _RDMA_IB_H */
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index fa341fcb5829..f5842bcd9c94 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -9,7 +9,7 @@
#ifdef CONFIG_SND_HDA_I915
int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
-int snd_hdac_get_display_clk(struct hdac_bus *bus);
+void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid, int rate);
int snd_hdac_acomp_get_eld(struct hdac_bus *bus, hda_nid_t nid,
bool *audio_enabled, char *buffer, int max_bytes);
@@ -25,9 +25,8 @@ static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
{
return 0;
}
-static inline int snd_hdac_get_display_clk(struct hdac_bus *bus)
+static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
{
- return 0;
}
static inline int snd_hdac_sync_audio_rate(struct hdac_bus *bus, hda_nid_t nid,
int rate)
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
index 26b0d1e3e3e7..4c58d9917aa4 100644
--- a/include/uapi/linux/if_macsec.h
+++ b/include/uapi/linux/if_macsec.h
@@ -19,8 +19,8 @@
#define MACSEC_MAX_KEY_LEN 128
-#define DEFAULT_CIPHER_ID 0x0080020001000001ULL
-#define DEFAULT_CIPHER_ALT 0x0080C20001000001ULL
+#define MACSEC_DEFAULT_CIPHER_ID 0x0080020001000001ULL
+#define MACSEC_DEFAULT_CIPHER_ALT 0x0080C20001000001ULL
#define MACSEC_MIN_ICV_LEN 8
#define MACSEC_MAX_ICV_LEN 32
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index c039f1d68a09..086168e18ca8 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -183,7 +183,8 @@
#define V4L2_DV_BT_CEA_3840X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1276, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -191,14 +192,16 @@
#define V4L2_DV_BT_CEA_3840X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -206,14 +209,16 @@
#define V4L2_DV_BT_CEA_3840X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 1056, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_3840X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(3840, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(3840, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 176, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -221,7 +226,8 @@
#define V4L2_DV_BT_CEA_4096X2160P24 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 1020, 88, 296, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -229,14 +235,16 @@
#define V4L2_DV_BT_CEA_4096X2160P25 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P30 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
297000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
@@ -244,14 +252,16 @@
#define V4L2_DV_BT_CEA_4096X2160P50 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 968, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, V4L2_DV_FL_IS_CE_VIDEO) \
}
#define V4L2_DV_BT_CEA_4096X2160P60 { \
.type = V4L2_DV_BT_656_1120, \
- V4L2_INIT_BT_TIMINGS(4096, 2160, 0, V4L2_DV_HSYNC_POS_POL, \
+ V4L2_INIT_BT_TIMINGS(4096, 2160, 0, \
+ V4L2_DV_HSYNC_POS_POL | V4L2_DV_VSYNC_POS_POL, \
594000000, 88, 88, 128, 8, 10, 72, 0, 0, 0, \
V4L2_DV_BT_STD_CEA861, \
V4L2_DV_FL_CAN_REDUCE_FPS | V4L2_DV_FL_IS_CE_VIDEO) \
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 618ef77c302a..db2574e7b8b0 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2030,7 +2030,6 @@ static int replace_map_fd_with_map_ptr(struct verifier_env *env)
if (IS_ERR(map)) {
verbose("fd %d is not pointing to valid bpf_map\n",
insn->imm);
- fdput(f);
return PTR_ERR(map);
}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 671dc05c0b0f..909a7d31ffd3 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2825,9 +2825,10 @@ static ssize_t __cgroup_procs_write(struct kernfs_open_file *of, char *buf,
size_t nbytes, loff_t off, bool threadgroup)
{
struct task_struct *tsk;
+ struct cgroup_subsys *ss;
struct cgroup *cgrp;
pid_t pid;
- int ret;
+ int ssid, ret;
if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
return -EINVAL;
@@ -2875,8 +2876,10 @@ out_unlock_rcu:
rcu_read_unlock();
out_unlock_threadgroup:
percpu_up_write(&cgroup_threadgroup_rwsem);
+ for_each_subsys(ss, ssid)
+ if (ss->post_attach)
+ ss->post_attach();
cgroup_kn_unlock(of->kn);
- cpuset_post_attach_flush();
return ret ?: nbytes;
}
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 00ab5c2b7c5b..1902956baba1 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -58,7 +58,6 @@
#include <asm/uaccess.h>
#include <linux/atomic.h>
#include <linux/mutex.h>
-#include <linux/workqueue.h>
#include <linux/cgroup.h>
#include <linux/wait.h>
@@ -1016,7 +1015,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
}
}
-void cpuset_post_attach_flush(void)
+static void cpuset_post_attach(void)
{
flush_workqueue(cpuset_migrate_mm_wq);
}
@@ -2087,6 +2086,7 @@ struct cgroup_subsys cpuset_cgrp_subsys = {
.can_attach = cpuset_can_attach,
.cancel_attach = cpuset_cancel_attach,
.attach = cpuset_attach,
+ .post_attach = cpuset_post_attach,
.bind = cpuset_bind,
.legacy_cftypes = files,
.early_init = true,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 52bedc5a5aaa..4e2ebf6f2f1f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -412,7 +412,8 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
if (ret || !write)
return ret;
- if (sysctl_perf_cpu_time_max_percent == 100) {
+ if (sysctl_perf_cpu_time_max_percent == 100 ||
+ sysctl_perf_cpu_time_max_percent == 0) {
printk(KERN_WARNING
"perf: Dynamic interrupt throttling disabled, can hang your system!\n");
WRITE_ONCE(perf_sample_allowed_ns, 0);
@@ -1105,6 +1106,7 @@ static void put_ctx(struct perf_event_context *ctx)
* function.
*
* Lock order:
+ * cred_guard_mutex
* task_struct::perf_event_mutex
* perf_event_context::mutex
* perf_event::child_mutex;
@@ -3420,7 +3422,6 @@ static struct task_struct *
find_lively_task_by_vpid(pid_t vpid)
{
struct task_struct *task;
- int err;
rcu_read_lock();
if (!vpid)
@@ -3434,16 +3435,7 @@ find_lively_task_by_vpid(pid_t vpid)
if (!task)
return ERR_PTR(-ESRCH);
- /* Reuse ptrace permission checks for now. */
- err = -EACCES;
- if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
- goto errout;
-
return task;
-errout:
- put_task_struct(task);
- return ERR_PTR(err);
-
}
/*
@@ -8413,6 +8405,24 @@ SYSCALL_DEFINE5(perf_event_open,
get_online_cpus();
+ if (task) {
+ err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
+ if (err)
+ goto err_cpus;
+
+ /*
+ * Reuse ptrace permission checks for now.
+ *
+ * We must hold cred_guard_mutex across this and any potential
+ * perf_install_in_context() call for this new event to
+ * serialize against exec() altering our credentials (and the
+ * perf_event_exit_task() that could imply).
+ */
+ err = -EACCES;
+ if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
+ goto err_cred;
+ }
+
if (flags & PERF_FLAG_PID_CGROUP)
cgroup_fd = pid;
@@ -8420,7 +8430,7 @@ SYSCALL_DEFINE5(perf_event_open,
NULL, NULL, cgroup_fd);
if (IS_ERR(event)) {
err = PTR_ERR(event);
- goto err_cpus;
+ goto err_cred;
}
if (is_sampling_event(event)) {
@@ -8479,11 +8489,6 @@ SYSCALL_DEFINE5(perf_event_open,
goto err_context;
}
- if (task) {
- put_task_struct(task);
- task = NULL;
- }
-
/*
* Look up the group leader (we will attach this event to it):
*/
@@ -8581,6 +8586,11 @@ SYSCALL_DEFINE5(perf_event_open,
WARN_ON_ONCE(ctx->parent_ctx);
+ /*
+ * This is the point on no return; we cannot fail hereafter. This is
+ * where we start modifying current state.
+ */
+
if (move_group) {
/*
* See perf_event_ctx_lock() for comments on the details
@@ -8652,6 +8662,11 @@ SYSCALL_DEFINE5(perf_event_open,
mutex_unlock(&gctx->mutex);
mutex_unlock(&ctx->mutex);
+ if (task) {
+ mutex_unlock(&task->signal->cred_guard_mutex);
+ put_task_struct(task);
+ }
+
put_online_cpus();
mutex_lock(&current->perf_event_mutex);
@@ -8684,6 +8699,9 @@ err_alloc:
*/
if (!event_file)
free_event(event);
+err_cred:
+ if (task)
+ mutex_unlock(&task->signal->cred_guard_mutex);
err_cpus:
put_online_cpus();
err_task:
@@ -8968,6 +8986,9 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
/*
* When a child task exits, feed back event values to parent events.
+ *
+ * Can be called with cred_guard_mutex held when called from
+ * install_exec_creds().
*/
void perf_event_exit_task(struct task_struct *child)
{
diff --git a/kernel/kcov.c b/kernel/kcov.c
index 3efbee0834a8..a02f2dddd1d7 100644
--- a/kernel/kcov.c
+++ b/kernel/kcov.c
@@ -1,5 +1,6 @@
#define pr_fmt(fmt) "kcov: " fmt
+#define DISABLE_BRANCH_PROFILING
#include <linux/compiler.h>
#include <linux/types.h>
#include <linux/file.h>
@@ -43,7 +44,7 @@ struct kcov {
* Entry point from instrumented code.
* This is called once per basic-block/edge.
*/
-void __sanitizer_cov_trace_pc(void)
+void notrace __sanitizer_cov_trace_pc(void)
{
struct task_struct *t;
enum kcov_mode mode;
diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c
index 8d34308ea449..1391d3ee3b86 100644
--- a/kernel/kexec_core.c
+++ b/kernel/kexec_core.c
@@ -1415,6 +1415,9 @@ static int __init crash_save_vmcoreinfo_init(void)
VMCOREINFO_OFFSET(page, lru);
VMCOREINFO_OFFSET(page, _mapcount);
VMCOREINFO_OFFSET(page, private);
+ VMCOREINFO_OFFSET(page, compound_dtor);
+ VMCOREINFO_OFFSET(page, compound_order);
+ VMCOREINFO_OFFSET(page, compound_head);
VMCOREINFO_OFFSET(pglist_data, node_zones);
VMCOREINFO_OFFSET(pglist_data, nr_zones);
#ifdef CONFIG_FLAT_NODE_MEM_MAP
@@ -1447,8 +1450,8 @@ static int __init crash_save_vmcoreinfo_init(void)
#ifdef CONFIG_X86
VMCOREINFO_NUMBER(KERNEL_IMAGE_SIZE);
#endif
-#ifdef CONFIG_HUGETLBFS
- VMCOREINFO_SYMBOL(free_huge_page);
+#ifdef CONFIG_HUGETLB_PAGE
+ VMCOREINFO_NUMBER(HUGETLB_PAGE_DTOR);
#endif
arch_crash_save_vmcoreinfo();
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index ed9410936a22..78c1c0ee6dc1 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2176,15 +2176,37 @@ cache_hit:
chain->irq_context = hlock->irq_context;
i = get_first_held_lock(curr, hlock);
chain->depth = curr->lockdep_depth + 1 - i;
+
+ BUILD_BUG_ON((1UL << 24) <= ARRAY_SIZE(chain_hlocks));
+ BUILD_BUG_ON((1UL << 6) <= ARRAY_SIZE(curr->held_locks));
+ BUILD_BUG_ON((1UL << 8*sizeof(chain_hlocks[0])) <= ARRAY_SIZE(lock_classes));
+
if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
chain->base = nr_chain_hlocks;
- nr_chain_hlocks += chain->depth;
for (j = 0; j < chain->depth - 1; j++, i++) {
int lock_id = curr->held_locks[i].class_idx - 1;
chain_hlocks[chain->base + j] = lock_id;
}
chain_hlocks[chain->base + j] = class - lock_classes;
}
+
+ if (nr_chain_hlocks < MAX_LOCKDEP_CHAIN_HLOCKS)
+ nr_chain_hlocks += chain->depth;
+
+#ifdef CONFIG_DEBUG_LOCKDEP
+ /*
+ * Important for check_no_collision().
+ */
+ if (unlikely(nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)) {
+ if (debug_locks_off_graph_unlock())
+ return 0;
+
+ print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
+ dump_stack();
+ return 0;
+ }
+#endif
+
hlist_add_head_rcu(&chain->entry, hash_head);
debug_atomic_inc(chain_lookup_misses);
inc_chains();
@@ -2932,6 +2954,11 @@ static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
return 1;
}
+static inline unsigned int task_irq_context(struct task_struct *task)
+{
+ return 2 * !!task->hardirq_context + !!task->softirq_context;
+}
+
static int separate_irq_context(struct task_struct *curr,
struct held_lock *hlock)
{
@@ -2940,8 +2967,6 @@ static int separate_irq_context(struct task_struct *curr,
/*
* Keep track of points where we cross into an interrupt context:
*/
- hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
- curr->softirq_context;
if (depth) {
struct held_lock *prev_hlock;
@@ -2973,6 +2998,11 @@ static inline int mark_irqflags(struct task_struct *curr,
return 1;
}
+static inline unsigned int task_irq_context(struct task_struct *task)
+{
+ return 0;
+}
+
static inline int separate_irq_context(struct task_struct *curr,
struct held_lock *hlock)
{
@@ -3241,6 +3271,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
hlock->acquire_ip = ip;
hlock->instance = lock;
hlock->nest_lock = nest_lock;
+ hlock->irq_context = task_irq_context(curr);
hlock->trylock = trylock;
hlock->read = read;
hlock->check = check;
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index dbb61a302548..a0f61effad25 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -141,6 +141,8 @@ static int lc_show(struct seq_file *m, void *v)
int i;
if (v == SEQ_START_TOKEN) {
+ if (nr_chain_hlocks > MAX_LOCKDEP_CHAIN_HLOCKS)
+ seq_printf(m, "(buggered) ");
seq_printf(m, "all lock chains:\n");
return 0;
}
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2232ae3e3ad6..3bfdff06eea7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -666,6 +666,35 @@ static void set_work_pool_and_clear_pending(struct work_struct *work,
*/
smp_wmb();
set_work_data(work, (unsigned long)pool_id << WORK_OFFQ_POOL_SHIFT, 0);
+ /*
+ * The following mb guarantees that previous clear of a PENDING bit
+ * will not be reordered with any speculative LOADS or STORES from
+ * work->current_func, which is executed afterwards. This possible
+ * reordering can lead to a missed execution on attempt to qeueue
+ * the same @work. E.g. consider this case:
+ *
+ * CPU#0 CPU#1
+ * ---------------------------- --------------------------------
+ *
+ * 1 STORE event_indicated
+ * 2 queue_work_on() {
+ * 3 test_and_set_bit(PENDING)
+ * 4 } set_..._and_clear_pending() {
+ * 5 set_work_data() # clear bit
+ * 6 smp_mb()
+ * 7 work->current_func() {
+ * 8 LOAD event_indicated
+ * }
+ *
+ * Without an explicit full barrier speculative LOAD on line 8 can
+ * be executed before CPU#0 does STORE on line 1. If that happens,
+ * CPU#0 observes the PENDING bit is still set and new execution of
+ * a @work is not queued in a hope, that CPU#1 will eventually
+ * finish the queued @work. Meanwhile CPU#1 does not see
+ * event_indicated is set, because speculative LOAD was executed
+ * before actual STORE.
+ */
+ smp_mb();
}
static void clear_work_data(struct work_struct *work)
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index 654c9d87e83a..9e0b0315a724 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -210,10 +210,6 @@ depot_stack_handle_t depot_save_stack(struct stack_trace *trace,
goto fast_exit;
hash = hash_stack(trace->entries, trace->nr_entries);
- /* Bad luck, we won't store this stack. */
- if (hash == 0)
- goto exit;
-
bucket = &stack_table[hash & STACK_HASH_MASK];
/*
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 86f9f8b82f8e..df67b53ae3c5 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -232,7 +232,7 @@ retry:
return READ_ONCE(huge_zero_page);
}
-static void put_huge_zero_page(void)
+void put_huge_zero_page(void)
{
/*
* Counter should never go to zero here. Only shrinker can put
@@ -1684,12 +1684,12 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
if (vma_is_dax(vma)) {
spin_unlock(ptl);
if (is_huge_zero_pmd(orig_pmd))
- put_huge_zero_page();
+ tlb_remove_page(tlb, pmd_page(orig_pmd));
} else if (is_huge_zero_pmd(orig_pmd)) {
pte_free(tlb->mm, pgtable_trans_huge_withdraw(tlb->mm, pmd));
atomic_long_dec(&tlb->mm->nr_ptes);
spin_unlock(ptl);
- put_huge_zero_page();
+ tlb_remove_page(tlb, pmd_page(orig_pmd));
} else {
struct page *page = pmd_page(orig_pmd);
page_remove_rmap(page, true);
@@ -1960,10 +1960,9 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
* page fault if needed.
*/
return 0;
- if (vma->vm_ops)
+ if (vma->vm_ops || (vm_flags & VM_NO_THP))
/* khugepaged not yet working on file or special mappings */
return 0;
- VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (hstart < hend)
@@ -2352,8 +2351,7 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
return false;
if (is_vma_temporary_stack(vma))
return false;
- VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma);
- return true;
+ return !(vma->vm_flags & VM_NO_THP);
}
static void collapse_huge_page(struct mm_struct *mm,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 36db05fa8acb..fe787f5c41bd 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -207,6 +207,7 @@ static void mem_cgroup_oom_notify(struct mem_cgroup *memcg);
/* "mc" and its members are protected by cgroup_mutex */
static struct move_charge_struct {
spinlock_t lock; /* for from, to */
+ struct mm_struct *mm;
struct mem_cgroup *from;
struct mem_cgroup *to;
unsigned long flags;
@@ -4667,6 +4668,8 @@ static void __mem_cgroup_clear_mc(void)
static void mem_cgroup_clear_mc(void)
{
+ struct mm_struct *mm = mc.mm;
+
/*
* we must clear moving_task before waking up waiters at the end of
* task migration.
@@ -4676,7 +4679,10 @@ static void mem_cgroup_clear_mc(void)
spin_lock(&mc.lock);
mc.from = NULL;
mc.to = NULL;
+ mc.mm = NULL;
spin_unlock(&mc.lock);
+
+ mmput(mm);
}
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -4733,6 +4739,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
VM_BUG_ON(mc.moved_swap);
spin_lock(&mc.lock);
+ mc.mm = mm;
mc.from = from;
mc.to = memcg;
mc.flags = move_flags;
@@ -4742,8 +4749,9 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
ret = mem_cgroup_precharge_mc(mm);
if (ret)
mem_cgroup_clear_mc();
+ } else {
+ mmput(mm);
}
- mmput(mm);
return ret;
}
@@ -4852,11 +4860,11 @@ put: /* get_mctgt_type() gets the page */
return ret;
}
-static void mem_cgroup_move_charge(struct mm_struct *mm)
+static void mem_cgroup_move_charge(void)
{
struct mm_walk mem_cgroup_move_charge_walk = {
.pmd_entry = mem_cgroup_move_charge_pte_range,
- .mm = mm,
+ .mm = mc.mm,
};
lru_add_drain_all();
@@ -4868,7 +4876,7 @@ static void mem_cgroup_move_charge(struct mm_struct *mm)
atomic_inc(&mc.from->moving_account);
synchronize_rcu();
retry:
- if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
+ if (unlikely(!down_read_trylock(&mc.mm->mmap_sem))) {
/*
* Someone who are holding the mmap_sem might be waiting in
* waitq. So we cancel all extra charges, wake up all waiters,
@@ -4885,23 +4893,16 @@ retry:
* additional charge, the page walk just aborts.
*/
walk_page_range(0, ~0UL, &mem_cgroup_move_charge_walk);
- up_read(&mm->mmap_sem);
+ up_read(&mc.mm->mmap_sem);
atomic_dec(&mc.from->moving_account);
}
-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
+static void mem_cgroup_move_task(void)
{
- struct cgroup_subsys_state *css;
- struct task_struct *p = cgroup_taskset_first(tset, &css);
- struct mm_struct *mm = get_task_mm(p);
-
- if (mm) {
- if (mc.to)
- mem_cgroup_move_charge(mm);
- mmput(mm);
- }
- if (mc.to)
+ if (mc.to) {
+ mem_cgroup_move_charge();
mem_cgroup_clear_mc();
+ }
}
#else /* !CONFIG_MMU */
static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
@@ -4911,7 +4912,7 @@ static int mem_cgroup_can_attach(struct cgroup_taskset *tset)
static void mem_cgroup_cancel_attach(struct cgroup_taskset *tset)
{
}
-static void mem_cgroup_move_task(struct cgroup_taskset *tset)
+static void mem_cgroup_move_task(void)
{
}
#endif
@@ -5195,7 +5196,7 @@ struct cgroup_subsys memory_cgrp_subsys = {
.css_reset = mem_cgroup_css_reset,
.can_attach = mem_cgroup_can_attach,
.cancel_attach = mem_cgroup_cancel_attach,
- .attach = mem_cgroup_move_task,
+ .post_attach = mem_cgroup_move_task,
.bind = mem_cgroup_bind,
.dfl_cftypes = memory_files,
.legacy_cftypes = mem_cgroup_legacy_files,
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 78f5f2641b91..ca5acee53b7a 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -888,7 +888,15 @@ int get_hwpoison_page(struct page *page)
}
}
- return get_page_unless_zero(head);
+ if (get_page_unless_zero(head)) {
+ if (head == compound_head(page))
+ return 1;
+
+ pr_info("MCE: %#lx cannot catch tail\n", page_to_pfn(page));
+ put_page(head);
+ }
+
+ return 0;
}
EXPORT_SYMBOL_GPL(get_hwpoison_page);
diff --git a/mm/memory.c b/mm/memory.c
index 93897f23cc11..305537fc8640 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -789,6 +789,46 @@ out:
return pfn_to_page(pfn);
}
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
+ pmd_t pmd)
+{
+ unsigned long pfn = pmd_pfn(pmd);
+
+ /*
+ * There is no pmd_special() but there may be special pmds, e.g.
+ * in a direct-access (dax) mapping, so let's just replicate the
+ * !HAVE_PTE_SPECIAL case from vm_normal_page() here.
+ */
+ if (unlikely(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP))) {
+ if (vma->vm_flags & VM_MIXEDMAP) {
+ if (!pfn_valid(pfn))
+ return NULL;
+ goto out;
+ } else {
+ unsigned long off;
+ off = (addr - vma->vm_start) >> PAGE_SHIFT;
+ if (pfn == vma->vm_pgoff + off)
+ return NULL;
+ if (!is_cow_mapping(vma->vm_flags))
+ return NULL;
+ }
+ }
+
+ if (is_zero_pfn(pfn))
+ return NULL;
+ if (unlikely(pfn > highest_memmap_pfn))
+ return NULL;
+
+ /*
+ * NOTE! We still have PageReserved() pages in the page tables.
+ * eg. VDSO mappings can cause them to exist.
+ */
+out:
+ return pfn_to_page(pfn);
+}
+#endif
+
/*
* copy one vm_area from one task to the other. Assumes the page tables
* already present in the new task to be cleared in the whole range
diff --git a/mm/migrate.c b/mm/migrate.c
index 6c822a7b27e0..f9dfb18a4eba 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -975,7 +975,13 @@ out:
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
/* Soft-offlined page shouldn't go through lru cache list */
- if (reason == MR_MEMORY_FAILURE) {
+ if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
+ /*
+ * With this release, we free successfully migrated
+ * page and set PG_HWPoison on just freed page
+ * intentionally. Although it's rather weird, it's how
+ * HWPoison flag works at the moment.
+ */
put_page(page);
if (!test_set_page_hwpoison(page))
num_poisoned_pages_inc();
diff --git a/mm/page_io.c b/mm/page_io.c
index cd92e3d67a32..985f23cfa79b 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -353,7 +353,11 @@ int swap_readpage(struct page *page)
ret = bdev_read_page(sis->bdev, swap_page_sector(page), page);
if (!ret) {
- swap_slot_free_notify(page);
+ if (trylock_page(page)) {
+ swap_slot_free_notify(page);
+ unlock_page(page);
+ }
+
count_vm_event(PSWPIN);
return 0;
}
diff --git a/mm/swap.c b/mm/swap.c
index a0bc206b4ac6..03aacbcb013f 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -728,6 +728,11 @@ void release_pages(struct page **pages, int nr, bool cold)
zone = NULL;
}
+ if (is_huge_zero_page(page)) {
+ put_huge_zero_page();
+ continue;
+ }
+
page = compound_head(page);
if (!put_page_testzero(page))
continue;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b934223eaa45..142cb61f4822 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2553,7 +2553,7 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
sc->gfp_mask |= __GFP_HIGHMEM;
for_each_zone_zonelist_nodemask(zone, z, zonelist,
- requested_highidx, sc->nodemask) {
+ gfp_zone(sc->gfp_mask), sc->nodemask) {
enum zone_type classzone_idx;
if (!populated_zone(zone))
@@ -3318,6 +3318,20 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order,
/* Try to sleep for a short interval */
if (prepare_kswapd_sleep(pgdat, order, remaining,
balanced_classzone_idx)) {
+ /*
+ * Compaction records what page blocks it recently failed to
+ * isolate pages from and skips them in the future scanning.
+ * When kswapd is going to sleep, it is reasonable to assume
+ * that pages and compaction may succeed so reset the cache.
+ */
+ reset_isolation_suitable(pgdat);
+
+ /*
+ * We have freed the memory, now we should compact it to make
+ * allocation of the requested order possible.
+ */
+ wakeup_kcompactd(pgdat, order, classzone_idx);
+
remaining = schedule_timeout(HZ/10);
finish_wait(&pgdat->kswapd_wait, &wait);
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
@@ -3341,20 +3355,6 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order,
*/
set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
- /*
- * Compaction records what page blocks it recently failed to
- * isolate pages from and skips them in the future scanning.
- * When kswapd is going to sleep, it is reasonable to assume
- * that pages and compaction may succeed so reset the cache.
- */
- reset_isolation_suitable(pgdat);
-
- /*
- * We have freed the memory, now we should compact it to make
- * allocation of the requested order possible.
- */
- wakeup_kcompactd(pgdat, order, classzone_idx);
-
if (!kthread_should_stop())
schedule();
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 253bc77eda3b..7dbc80d01eb0 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -61,6 +61,19 @@ static void __mdb_entry_fill_flags(struct br_mdb_entry *e, unsigned char flags)
e->flags |= MDB_FLAGS_OFFLOAD;
}
+static void __mdb_entry_to_br_ip(struct br_mdb_entry *entry, struct br_ip *ip)
+{
+ memset(ip, 0, sizeof(struct br_ip));
+ ip->vid = entry->vid;
+ ip->proto = entry->addr.proto;
+ if (ip->proto == htons(ETH_P_IP))
+ ip->u.ip4 = entry->addr.u.ip4;
+#if IS_ENABLED(CONFIG_IPV6)
+ else
+ ip->u.ip6 = entry->addr.u.ip6;
+#endif
+}
+
static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,
struct net_device *dev)
{
@@ -243,9 +256,45 @@ static inline size_t rtnl_mdb_nlmsg_size(void)
+ nla_total_size(sizeof(struct br_mdb_entry));
}
-static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
- int type, struct net_bridge_port_group *pg)
+struct br_mdb_complete_info {
+ struct net_bridge_port *port;
+ struct br_ip ip;
+};
+
+static void br_mdb_complete(struct net_device *dev, int err, void *priv)
{
+ struct br_mdb_complete_info *data = priv;
+ struct net_bridge_port_group __rcu **pp;
+ struct net_bridge_port_group *p;
+ struct net_bridge_mdb_htable *mdb;
+ struct net_bridge_mdb_entry *mp;
+ struct net_bridge_port *port = data->port;
+ struct net_bridge *br = port->br;
+
+ if (err)
+ goto err;
+
+ spin_lock_bh(&br->multicast_lock);
+ mdb = mlock_dereference(br->mdb, br);
+ mp = br_mdb_ip_get(mdb, &data->ip);
+ if (!mp)
+ goto out;
+ for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL;
+ pp = &p->next) {
+ if (p->port != port)
+ continue;
+ p->flags |= MDB_PG_FLAGS_OFFLOAD;
+ }
+out:
+ spin_unlock_bh(&br->multicast_lock);
+err:
+ kfree(priv);
+}
+
+static void __br_mdb_notify(struct net_device *dev, struct net_bridge_port *p,
+ struct br_mdb_entry *entry, int type)
+{
+ struct br_mdb_complete_info *complete_info;
struct switchdev_obj_port_mdb mdb = {
.obj = {
.id = SWITCHDEV_OBJ_ID_PORT_MDB,
@@ -268,9 +317,14 @@ static void __br_mdb_notify(struct net_device *dev, struct br_mdb_entry *entry,
mdb.obj.orig_dev = port_dev;
if (port_dev && type == RTM_NEWMDB) {
- err = switchdev_port_obj_add(port_dev, &mdb.obj);
- if (!err && pg)
- pg->flags |= MDB_PG_FLAGS_OFFLOAD;
+ complete_info = kmalloc(sizeof(*complete_info), GFP_ATOMIC);
+ if (complete_info) {
+ complete_info->port = p;
+ __mdb_entry_to_br_ip(entry, &complete_info->ip);
+ mdb.obj.complete_priv = complete_info;
+ mdb.obj.complete = br_mdb_complete;
+ switchdev_port_obj_add(port_dev, &mdb.obj);
+ }
} else if (port_dev && type == RTM_DELMDB) {
switchdev_port_obj_del(port_dev, &mdb.obj);
}
@@ -291,21 +345,21 @@ errout:
rtnl_set_sk_err(net, RTNLGRP_MDB, err);
}
-void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg,
- int type)
+void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
+ struct br_ip *group, int type, u8 flags)
{
struct br_mdb_entry entry;
memset(&entry, 0, sizeof(entry));
- entry.ifindex = pg->port->dev->ifindex;
- entry.addr.proto = pg->addr.proto;
- entry.addr.u.ip4 = pg->addr.u.ip4;
+ entry.ifindex = port->dev->ifindex;
+ entry.addr.proto = group->proto;
+ entry.addr.u.ip4 = group->u.ip4;
#if IS_ENABLED(CONFIG_IPV6)
- entry.addr.u.ip6 = pg->addr.u.ip6;
+ entry.addr.u.ip6 = group->u.ip6;
#endif
- entry.vid = pg->addr.vid;
- __mdb_entry_fill_flags(&entry, pg->flags);
- __br_mdb_notify(dev, &entry, type, pg);
+ entry.vid = group->vid;
+ __mdb_entry_fill_flags(&entry, flags);
+ __br_mdb_notify(dev, port, &entry, type);
}
static int nlmsg_populate_rtr_fill(struct sk_buff *skb,
@@ -450,8 +504,7 @@ static int br_mdb_parse(struct sk_buff *skb, struct nlmsghdr *nlh,
}
static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
- struct br_ip *group, unsigned char state,
- struct net_bridge_port_group **pg)
+ struct br_ip *group, unsigned char state)
{
struct net_bridge_mdb_entry *mp;
struct net_bridge_port_group *p;
@@ -482,7 +535,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
if (unlikely(!p))
return -ENOMEM;
rcu_assign_pointer(*pp, p);
- *pg = p;
if (state == MDB_TEMPORARY)
mod_timer(&p->timer, now + br->multicast_membership_interval);
@@ -490,8 +542,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
}
static int __br_mdb_add(struct net *net, struct net_bridge *br,
- struct br_mdb_entry *entry,
- struct net_bridge_port_group **pg)
+ struct br_mdb_entry *entry)
{
struct br_ip ip;
struct net_device *dev;
@@ -509,18 +560,10 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
if (!p || p->br != br || p->state == BR_STATE_DISABLED)
return -EINVAL;
- memset(&ip, 0, sizeof(ip));
- ip.vid = entry->vid;
- ip.proto = entry->addr.proto;
- if (ip.proto == htons(ETH_P_IP))
- ip.u.ip4 = entry->addr.u.ip4;
-#if IS_ENABLED(CONFIG_IPV6)
- else
- ip.u.ip6 = entry->addr.u.ip6;
-#endif
+ __mdb_entry_to_br_ip(entry, &ip);
spin_lock_bh(&br->multicast_lock);
- ret = br_mdb_add_group(br, p, &ip, entry->state, pg);
+ ret = br_mdb_add_group(br, p, &ip, entry->state);
spin_unlock_bh(&br->multicast_lock);
return ret;
}
@@ -528,7 +571,6 @@ static int __br_mdb_add(struct net *net, struct net_bridge *br,
static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
{
struct net *net = sock_net(skb->sk);
- struct net_bridge_port_group *pg;
struct net_bridge_vlan_group *vg;
struct net_device *dev, *pdev;
struct br_mdb_entry *entry;
@@ -558,15 +600,15 @@ static int br_mdb_add(struct sk_buff *skb, struct nlmsghdr *nlh)
if (br_vlan_enabled(br) && vg && entry->vid == 0) {
list_for_each_entry(v, &vg->vlan_list, vlist) {
entry->vid = v->vid;
- err = __br_mdb_add(net, br, entry, &pg);
+ err = __br_mdb_add(net, br, entry);
if (err)
break;
- __br_mdb_notify(dev, entry, RTM_NEWMDB, pg);
+ __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
}
} else {
- err = __br_mdb_add(net, br, entry, &pg);
+ err = __br_mdb_add(net, br, entry);
if (!err)
- __br_mdb_notify(dev, entry, RTM_NEWMDB, pg);
+ __br_mdb_notify(dev, p, entry, RTM_NEWMDB);
}
return err;
@@ -584,15 +626,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
if (!netif_running(br->dev) || br->multicast_disabled)
return -EINVAL;
- memset(&ip, 0, sizeof(ip));
- ip.vid = entry->vid;
- ip.proto = entry->addr.proto;
- if (ip.proto == htons(ETH_P_IP))
- ip.u.ip4 = entry->addr.u.ip4;
-#if IS_ENABLED(CONFIG_IPV6)
- else
- ip.u.ip6 = entry->addr.u.ip6;
-#endif
+ __mdb_entry_to_br_ip(entry, &ip);
spin_lock_bh(&br->multicast_lock);
mdb = mlock_dereference(br->mdb, br);
@@ -662,12 +696,12 @@ static int br_mdb_del(struct sk_buff *skb, struct nlmsghdr *nlh)
entry->vid = v->vid;
err = __br_mdb_del(br, entry);
if (!err)
- __br_mdb_notify(dev, entry, RTM_DELMDB, NULL);
+ __br_mdb_notify(dev, p, entry, RTM_DELMDB);
}
} else {
err = __br_mdb_del(br, entry);
if (!err)
- __br_mdb_notify(dev, entry, RTM_DELMDB, NULL);
+ __br_mdb_notify(dev, p, entry, RTM_DELMDB);
}
return err;
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index a4c15df2b792..191ea66e4d92 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -283,7 +283,8 @@ static void br_multicast_del_pg(struct net_bridge *br,
rcu_assign_pointer(*pp, p->next);
hlist_del_init(&p->mglist);
del_timer(&p->timer);
- br_mdb_notify(br->dev, p, RTM_DELMDB);
+ br_mdb_notify(br->dev, p->port, &pg->addr, RTM_DELMDB,
+ p->flags);
call_rcu_bh(&p->rcu, br_multicast_free_pg);
if (!mp->ports && !mp->mglist &&
@@ -705,7 +706,7 @@ static int br_multicast_add_group(struct net_bridge *br,
if (unlikely(!p))
goto err;
rcu_assign_pointer(*pp, p);
- br_mdb_notify(br->dev, p, RTM_NEWMDB);
+ br_mdb_notify(br->dev, port, group, RTM_NEWMDB, 0);
found:
mod_timer(&p->timer, now + br->multicast_membership_interval);
@@ -1461,7 +1462,8 @@ br_multicast_leave_group(struct net_bridge *br,
hlist_del_init(&p->mglist);
del_timer(&p->timer);
call_rcu_bh(&p->rcu, br_multicast_free_pg);
- br_mdb_notify(br->dev, p, RTM_DELMDB);
+ br_mdb_notify(br->dev, port, group, RTM_DELMDB,
+ p->flags);
if (!mp->ports && !mp->mglist &&
netif_running(br->dev))
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 1b5d145dfcbf..d9da857182ef 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -560,8 +560,8 @@ br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
unsigned char flags);
void br_mdb_init(void);
void br_mdb_uninit(void);
-void br_mdb_notify(struct net_device *dev, struct net_bridge_port_group *pg,
- int type);
+void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
+ struct br_ip *group, int type, u8 flags);
void br_rtr_notify(struct net_device *dev, struct net_bridge_port *port,
int type);
diff --git a/net/ceph/auth.c b/net/ceph/auth.c
index 6b923bcaa2a4..2bc5965fdd1e 100644
--- a/net/ceph/auth.c
+++ b/net/ceph/auth.c
@@ -293,13 +293,9 @@ int ceph_auth_create_authorizer(struct ceph_auth_client *ac,
}
EXPORT_SYMBOL(ceph_auth_create_authorizer);
-void ceph_auth_destroy_authorizer(struct ceph_auth_client *ac,
- struct ceph_authorizer *a)
+void ceph_auth_destroy_authorizer(struct ceph_authorizer *a)
{
- mutex_lock(&ac->mutex);
- if (ac->ops && ac->ops->destroy_authorizer)
- ac->ops->destroy_authorizer(ac, a);
- mutex_unlock(&ac->mutex);
+ a->destroy(a);
}
EXPORT_SYMBOL(ceph_auth_destroy_authorizer);
diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c
index 8c93fa8d81bc..5f836f02ae36 100644
--- a/net/ceph/auth_none.c
+++ b/net/ceph/auth_none.c
@@ -16,7 +16,6 @@ static void reset(struct ceph_auth_client *ac)
struct ceph_auth_none_info *xi = ac->private;
xi->starting = true;
- xi->built_authorizer = false;
}
static void destroy(struct ceph_auth_client *ac)
@@ -39,6 +38,27 @@ static int should_authenticate(struct ceph_auth_client *ac)
return xi->starting;
}
+static int ceph_auth_none_build_authorizer(struct ceph_auth_client *ac,
+ struct ceph_none_authorizer *au)
+{
+ void *p = au->buf;
+ void *const end = p + sizeof(au->buf);
+ int ret;
+
+ ceph_encode_8_safe(&p, end, 1, e_range);
+ ret = ceph_entity_name_encode(ac->name, &p, end);
+ if (ret < 0)
+ return ret;
+
+ ceph_encode_64_safe(&p, end, ac->global_id, e_range);
+ au->buf_len = p - (void *)au->buf;
+ dout("%s built authorizer len %d\n", __func__, au->buf_len);
+ return 0;
+
+e_range:
+ return -ERANGE;
+}
+
static int build_request(struct ceph_auth_client *ac, void *buf, void *end)
{
return 0;
@@ -57,32 +77,32 @@ static int handle_reply(struct ceph_auth_client *ac, int result,
return result;
}
+static void ceph_auth_none_destroy_authorizer(struct ceph_authorizer *a)
+{
+ kfree(a);
+}
+
/*
- * build an 'authorizer' with our entity_name and global_id. we can
- * reuse a single static copy since it is identical for all services
- * we connect to.
+ * build an 'authorizer' with our entity_name and global_id. it is
+ * identical for all services we connect to.
*/
static int ceph_auth_none_create_authorizer(
struct ceph_auth_client *ac, int peer_type,
struct ceph_auth_handshake *auth)
{
- struct ceph_auth_none_info *ai = ac->private;
- struct ceph_none_authorizer *au = &ai->au;
- void *p, *end;
+ struct ceph_none_authorizer *au;
int ret;
- if (!ai->built_authorizer) {
- p = au->buf;
- end = p + sizeof(au->buf);
- ceph_encode_8(&p, 1);
- ret = ceph_entity_name_encode(ac->name, &p, end - 8);
- if (ret < 0)
- goto bad;
- ceph_decode_need(&p, end, sizeof(u64), bad2);
- ceph_encode_64(&p, ac->global_id);
- au->buf_len = p - (void *)au->buf;
- ai->built_authorizer = true;
- dout("built authorizer len %d\n", au->buf_len);
+ au = kmalloc(sizeof(*au), GFP_NOFS);
+ if (!au)
+ return -ENOMEM;
+
+ au->base.destroy = ceph_auth_none_destroy_authorizer;
+
+ ret = ceph_auth_none_build_authorizer(ac, au);
+ if (ret) {
+ kfree(au);
+ return ret;
}
auth->authorizer = (struct ceph_authorizer *) au;
@@ -92,17 +112,6 @@ static int ceph_auth_none_create_authorizer(
auth->authorizer_reply_buf_len = sizeof (au->reply_buf);
return 0;
-
-bad2:
- ret = -ERANGE;
-bad:
- return ret;
-}
-
-static void ceph_auth_none_destroy_authorizer(struct ceph_auth_client *ac,
- struct ceph_authorizer *a)
-{
- /* nothing to do */
}
static const struct ceph_auth_client_ops ceph_auth_none_ops = {
@@ -114,7 +123,6 @@ static const struct ceph_auth_client_ops ceph_auth_none_ops = {
.build_request = build_request,
.handle_reply = handle_reply,
.create_authorizer = ceph_auth_none_create_authorizer,
- .destroy_authorizer = ceph_auth_none_destroy_authorizer,
};
int ceph_auth_none_init(struct ceph_auth_client *ac)
@@ -127,7 +135,6 @@ int ceph_auth_none_init(struct ceph_auth_client *ac)
return -ENOMEM;
xi->starting = true;
- xi->built_authorizer = false;
ac->protocol = CEPH_AUTH_NONE;
ac->private = xi;
diff --git a/net/ceph/auth_none.h b/net/ceph/auth_none.h
index 059a3ce4b53f..62021535ae4a 100644
--- a/net/ceph/auth_none.h
+++ b/net/ceph/auth_none.h
@@ -12,6 +12,7 @@
*/
struct ceph_none_authorizer {
+ struct ceph_authorizer base;
char buf[128];
int buf_len;
char reply_buf[0];
@@ -19,8 +20,6 @@ struct ceph_none_authorizer {
struct ceph_auth_none_info {
bool starting;
- bool built_authorizer;
- struct ceph_none_authorizer au; /* we only need one; it's static */
};
int ceph_auth_none_init(struct ceph_auth_client *ac);
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index 9e43a315e662..a0905f04bd13 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -565,6 +565,14 @@ static int ceph_x_handle_reply(struct ceph_auth_client *ac, int result,
return -EAGAIN;
}
+static void ceph_x_destroy_authorizer(struct ceph_authorizer *a)
+{
+ struct ceph_x_authorizer *au = (void *)a;
+
+ ceph_x_authorizer_cleanup(au);
+ kfree(au);
+}
+
static int ceph_x_create_authorizer(
struct ceph_auth_client *ac, int peer_type,
struct ceph_auth_handshake *auth)
@@ -581,6 +589,8 @@ static int ceph_x_create_authorizer(
if (!au)
return -ENOMEM;
+ au->base.destroy = ceph_x_destroy_authorizer;
+
ret = ceph_x_build_authorizer(ac, th, au);
if (ret) {
kfree(au);
@@ -643,16 +653,6 @@ static int ceph_x_verify_authorizer_reply(struct ceph_auth_client *ac,
return ret;
}
-static void ceph_x_destroy_authorizer(struct ceph_auth_client *ac,
- struct ceph_authorizer *a)
-{
- struct ceph_x_authorizer *au = (void *)a;
-
- ceph_x_authorizer_cleanup(au);
- kfree(au);
-}
-
-
static void ceph_x_reset(struct ceph_auth_client *ac)
{
struct ceph_x_info *xi = ac->private;
@@ -770,7 +770,6 @@ static const struct ceph_auth_client_ops ceph_x_ops = {
.create_authorizer = ceph_x_create_authorizer,
.update_authorizer = ceph_x_update_authorizer,
.verify_authorizer_reply = ceph_x_verify_authorizer_reply,
- .destroy_authorizer = ceph_x_destroy_authorizer,
.invalidate_authorizer = ceph_x_invalidate_authorizer,
.reset = ceph_x_reset,
.destroy = ceph_x_destroy,
diff --git a/net/ceph/auth_x.h b/net/ceph/auth_x.h
index 40b1a3cf7397..21a5af904bae 100644
--- a/net/ceph/auth_x.h
+++ b/net/ceph/auth_x.h
@@ -26,6 +26,7 @@ struct ceph_x_ticket_handler {
struct ceph_x_authorizer {
+ struct ceph_authorizer base;
struct ceph_crypto_key session_key;
struct ceph_buffer *buf;
unsigned int service;
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 32355d9d0103..40a53a70efdf 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1087,10 +1087,8 @@ static void put_osd(struct ceph_osd *osd)
dout("put_osd %p %d -> %d\n", osd, atomic_read(&osd->o_ref),
atomic_read(&osd->o_ref) - 1);
if (atomic_dec_and_test(&osd->o_ref)) {
- struct ceph_auth_client *ac = osd->o_osdc->client->monc.auth;
-
if (osd->o_auth.authorizer)
- ceph_auth_destroy_authorizer(ac, osd->o_auth.authorizer);
+ ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
kfree(osd);
}
}
@@ -2984,7 +2982,7 @@ static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
struct ceph_auth_handshake *auth = &o->o_auth;
if (force_new && auth->authorizer) {
- ceph_auth_destroy_authorizer(ac, auth->authorizer);
+ ceph_auth_destroy_authorizer(auth->authorizer);
auth->authorizer = NULL;
}
if (!auth->authorizer) {
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 8a9246deccfe..63566ec54794 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -904,7 +904,11 @@ void fib_del_ifaddr(struct in_ifaddr *ifa, struct in_ifaddr *iprim)
if (ifa->ifa_flags & IFA_F_SECONDARY) {
prim = inet_ifa_byprefix(in_dev, any, ifa->ifa_mask);
if (!prim) {
- pr_warn("%s: bug: prim == NULL\n", __func__);
+ /* if the device has been deleted, we don't perform
+ * address promotion
+ */
+ if (!in_dev->dead)
+ pr_warn("%s: bug: prim == NULL\n", __func__);
return;
}
if (iprim && iprim != prim) {
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 23cec53b568a..8ec4b3089e20 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3176,35 +3176,9 @@ static void addrconf_gre_config(struct net_device *dev)
}
#endif
-#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
-/* If the host route is cached on the addr struct make sure it is associated
- * with the proper table. e.g., enslavement can change and if so the cached
- * host route needs to move to the new table.
- */
-static void l3mdev_check_host_rt(struct inet6_dev *idev,
- struct inet6_ifaddr *ifp)
-{
- if (ifp->rt) {
- u32 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
-
- if (tb_id != ifp->rt->rt6i_table->tb6_id) {
- ip6_del_rt(ifp->rt);
- ifp->rt = NULL;
- }
- }
-}
-#else
-static void l3mdev_check_host_rt(struct inet6_dev *idev,
- struct inet6_ifaddr *ifp)
-{
-}
-#endif
-
static int fixup_permanent_addr(struct inet6_dev *idev,
struct inet6_ifaddr *ifp)
{
- l3mdev_check_host_rt(idev, ifp);
-
if (!ifp->rt) {
struct rt6_info *rt;
@@ -3304,6 +3278,9 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
break;
if (event == NETDEV_UP) {
+ /* restore routes for permanent addresses */
+ addrconf_permanent_addr(dev);
+
if (!addrconf_qdisc_ok(dev)) {
/* device is not ready yet. */
pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
@@ -3337,9 +3314,6 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
run_pending = 1;
}
- /* restore routes for permanent addresses */
- addrconf_permanent_addr(dev);
-
switch (dev->type) {
#if IS_ENABLED(CONFIG_IPV6_SIT)
case ARPHRD_SIT:
@@ -3556,6 +3530,8 @@ restart:
INIT_LIST_HEAD(&del_list);
list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
+ struct rt6_info *rt = NULL;
+
addrconf_del_dad_work(ifa);
write_unlock_bh(&idev->lock);
@@ -3568,6 +3544,9 @@ restart:
ifa->state = 0;
if (!(ifa->flags & IFA_F_NODAD))
ifa->flags |= IFA_F_TENTATIVE;
+
+ rt = ifa->rt;
+ ifa->rt = NULL;
} else {
state = ifa->state;
ifa->state = INET6_IFADDR_STATE_DEAD;
@@ -3578,6 +3557,9 @@ restart:
spin_unlock_bh(&ifa->lock);
+ if (rt)
+ ip6_del_rt(rt);
+
if (state != INET6_IFADDR_STATE_DEAD) {
__ipv6_ifa_notify(RTM_DELADDR, ifa);
inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
@@ -5343,10 +5325,10 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
if (rt)
ip6_del_rt(rt);
}
- dst_hold(&ifp->rt->dst);
-
- ip6_del_rt(ifp->rt);
-
+ if (ifp->rt) {
+ dst_hold(&ifp->rt->dst);
+ ip6_del_rt(ifp->rt);
+ }
rt_genid_bump_ipv6(net);
break;
}
diff --git a/net/switchdev/switchdev.c b/net/switchdev/switchdev.c
index 2b9b98f1c2ff..b7e01d88bdc5 100644
--- a/net/switchdev/switchdev.c
+++ b/net/switchdev/switchdev.c
@@ -305,6 +305,8 @@ static void switchdev_port_attr_set_deferred(struct net_device *dev,
if (err && err != -EOPNOTSUPP)
netdev_err(dev, "failed (err=%d) to set attribute (id=%d)\n",
err, attr->id);
+ if (attr->complete)
+ attr->complete(dev, err, attr->complete_priv);
}
static int switchdev_port_attr_set_defer(struct net_device *dev,
@@ -434,6 +436,8 @@ static void switchdev_port_obj_add_deferred(struct net_device *dev,
if (err && err != -EOPNOTSUPP)
netdev_err(dev, "failed (err=%d) to add object (id=%d)\n",
err, obj->id);
+ if (obj->complete)
+ obj->complete(dev, err, obj->complete_priv);
}
static int switchdev_port_obj_add_defer(struct net_device *dev,
@@ -502,6 +506,8 @@ static void switchdev_port_obj_del_deferred(struct net_device *dev,
if (err && err != -EOPNOTSUPP)
netdev_err(dev, "failed (err=%d) to del object (id=%d)\n",
err, obj->id);
+ if (obj->complete)
+ obj->complete(dev, err, obj->complete_priv);
}
static int switchdev_port_obj_del_defer(struct net_device *dev,
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index 023cc4cad5c1..626f3bb24c55 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -104,12 +104,11 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_stream_init_all);
*/
void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus)
{
- struct hdac_stream *s;
+ struct hdac_stream *s, *_s;
struct hdac_ext_stream *stream;
struct hdac_bus *bus = ebus_to_hbus(ebus);
- while (!list_empty(&bus->stream_list)) {
- s = list_first_entry(&bus->stream_list, struct hdac_stream, list);
+ list_for_each_entry_safe(s, _s, &bus->stream_list, list) {
stream = stream_to_hdac_ext_stream(s);
snd_hdac_ext_stream_decouple(ebus, stream, false);
list_del(&s->list);
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 54babe1c0b16..607bbeaebddf 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -20,6 +20,7 @@
#include <sound/core.h>
#include <sound/hdaudio.h>
#include <sound/hda_i915.h>
+#include <sound/hda_register.h>
static struct i915_audio_component *hdac_acomp;
@@ -97,26 +98,65 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
}
EXPORT_SYMBOL_GPL(snd_hdac_display_power);
+#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
+ ((pci)->device == 0x0c0c) || \
+ ((pci)->device == 0x0d0c) || \
+ ((pci)->device == 0x160c))
+
/**
- * snd_hdac_get_display_clk - Get CDCLK in kHz
+ * snd_hdac_i915_set_bclk - Reprogram BCLK for HSW/BDW
* @bus: HDA core bus
*
- * This function is supposed to be used only by a HD-audio controller
- * driver that needs the interaction with i915 graphics.
+ * Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
+ * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
+ * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
+ * BCLK = CDCLK * M / N
+ * The values will be lost when the display power well is disabled and need to
+ * be restored to avoid abnormal playback speed.
*
- * This function queries CDCLK value in kHz from the graphics driver and
- * returns the value. A negative code is returned in error.
+ * Call this function at initializing and changing power well, as well as
+ * at ELD notifier for the hotplug.
*/
-int snd_hdac_get_display_clk(struct hdac_bus *bus)
+void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
{
struct i915_audio_component *acomp = bus->audio_component;
+ struct pci_dev *pci = to_pci_dev(bus->dev);
+ int cdclk_freq;
+ unsigned int bclk_m, bclk_n;
+
+ if (!acomp || !acomp->ops || !acomp->ops->get_cdclk_freq)
+ return; /* only for i915 binding */
+ if (!CONTROLLER_IN_GPU(pci))
+ return; /* only HSW/BDW */
+
+ cdclk_freq = acomp->ops->get_cdclk_freq(acomp->dev);
+ switch (cdclk_freq) {
+ case 337500:
+ bclk_m = 16;
+ bclk_n = 225;
+ break;
+
+ case 450000:
+ default: /* default CDCLK 450MHz */
+ bclk_m = 4;
+ bclk_n = 75;
+ break;
+
+ case 540000:
+ bclk_m = 4;
+ bclk_n = 90;
+ break;
+
+ case 675000:
+ bclk_m = 8;
+ bclk_n = 225;
+ break;
+ }
- if (!acomp || !acomp->ops)
- return -ENODEV;
-
- return acomp->ops->get_cdclk_freq(acomp->dev);
+ snd_hdac_chip_writew(bus, HSW_EM4, bclk_m);
+ snd_hdac_chip_writew(bus, HSW_EM5, bclk_n);
}
-EXPORT_SYMBOL_GPL(snd_hdac_get_display_clk);
+EXPORT_SYMBOL_GPL(snd_hdac_i915_set_bclk);
/* There is a fixed mapping between audio pin node and display port
* on current Intel platforms:
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 637b8a0e2a91..9a0d1445ca5c 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -857,50 +857,6 @@ static int param_set_xint(const char *val, const struct kernel_param *kp)
#define azx_del_card_list(chip) /* NOP */
#endif /* CONFIG_PM */
-/* Intel HSW/BDW display HDA controller is in GPU. Both its power and link BCLK
- * depends on GPU. Two Extended Mode registers EM4 (M value) and EM5 (N Value)
- * are used to convert CDClk (Core Display Clock) to 24MHz BCLK:
- * BCLK = CDCLK * M / N
- * The values will be lost when the display power well is disabled and need to
- * be restored to avoid abnormal playback speed.
- */
-static void haswell_set_bclk(struct hda_intel *hda)
-{
- struct azx *chip = &hda->chip;
- int cdclk_freq;
- unsigned int bclk_m, bclk_n;
-
- if (!hda->need_i915_power)
- return;
-
- cdclk_freq = snd_hdac_get_display_clk(azx_bus(chip));
- switch (cdclk_freq) {
- case 337500:
- bclk_m = 16;
- bclk_n = 225;
- break;
-
- case 450000:
- default: /* default CDCLK 450MHz */
- bclk_m = 4;
- bclk_n = 75;
- break;
-
- case 540000:
- bclk_m = 4;
- bclk_n = 90;
- break;
-
- case 675000:
- bclk_m = 8;
- bclk_n = 225;
- break;
- }
-
- azx_writew(chip, HSW_EM4, bclk_m);
- azx_writew(chip, HSW_EM5, bclk_n);
-}
-
#if defined(CONFIG_PM_SLEEP) || defined(SUPPORT_VGA_SWITCHEROO)
/*
* power management
@@ -958,7 +914,7 @@ static int azx_resume(struct device *dev)
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
&& hda->need_i915_power) {
snd_hdac_display_power(azx_bus(chip), true);
- haswell_set_bclk(hda);
+ snd_hdac_i915_set_bclk(azx_bus(chip));
}
if (chip->msi)
if (pci_enable_msi(pci) < 0)
@@ -1058,7 +1014,7 @@ static int azx_runtime_resume(struct device *dev)
bus = azx_bus(chip);
if (hda->need_i915_power) {
snd_hdac_display_power(bus, true);
- haswell_set_bclk(hda);
+ snd_hdac_i915_set_bclk(bus);
} else {
/* toggle codec wakeup bit for STATESTS read */
snd_hdac_set_codec_wakeup(bus, true);
@@ -1796,12 +1752,8 @@ static int azx_first_init(struct azx *chip)
/* initialize chip */
azx_init_pci(chip);
- if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
- struct hda_intel *hda;
-
- hda = container_of(chip, struct hda_intel, chip);
- haswell_set_bclk(hda);
- }
+ if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
+ snd_hdac_i915_set_bclk(bus);
hda_intel_init_chip(chip, (probe_only[dev] & 2) == 0);
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 40933aa33afe..1483f85999ec 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2232,6 +2232,7 @@ static void intel_pin_eld_notify(void *audio_ptr, int port)
if (atomic_read(&(codec)->core.in_pm))
return;
+ snd_hdac_i915_set_bclk(&codec->bus->core);
check_presence_and_report(codec, pin_nid);
}
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 810bceee4fd2..ac4490a96863 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5584,6 +5584,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x504a, "ThinkPad X260", ALC292_FIXUP_TPT440_DOCK),
SND_PCI_QUIRK(0x17aa, 0x504b, "Thinkpad", ALC293_FIXUP_LENOVO_SPK_NOISE),
SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 649e92a252ae..7ef3a0c16478 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -629,6 +629,7 @@ config SND_SOC_RT5514
config SND_SOC_RT5616
tristate "Realtek RT5616 CODEC"
+ depends on I2C
config SND_SOC_RT5631
tristate "Realtek ALC5631/RT5631 CODEC"
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 92d22a018d68..83959312f7a0 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -249,6 +249,18 @@ int arizona_init_spk(struct snd_soc_codec *codec)
}
EXPORT_SYMBOL_GPL(arizona_init_spk);
+int arizona_free_spk(struct snd_soc_codec *codec)
+{
+ struct arizona_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->arizona;
+
+ arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT_WARN, arizona);
+ arizona_free_irq(arizona, ARIZONA_IRQ_SPK_OVERHEAT, arizona);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(arizona_free_spk);
+
static const struct snd_soc_dapm_route arizona_mono_routes[] = {
{ "OUT1R", NULL, "OUT1L" },
{ "OUT2R", NULL, "OUT2L" },
diff --git a/sound/soc/codecs/arizona.h b/sound/soc/codecs/arizona.h
index 1ea8e4ecf8d4..ce0531b8c632 100644
--- a/sound/soc/codecs/arizona.h
+++ b/sound/soc/codecs/arizona.h
@@ -307,6 +307,8 @@ extern int arizona_init_spk(struct snd_soc_codec *codec);
extern int arizona_init_gpio(struct snd_soc_codec *codec);
extern int arizona_init_mono(struct snd_soc_codec *codec);
+extern int arizona_free_spk(struct snd_soc_codec *codec);
+
extern int arizona_init_dai(struct arizona_priv *priv, int dai);
int arizona_set_output_mode(struct snd_soc_codec *codec, int output,
diff --git a/sound/soc/codecs/cs35l32.c b/sound/soc/codecs/cs35l32.c
index 44c30fe3e315..287d13740be4 100644
--- a/sound/soc/codecs/cs35l32.c
+++ b/sound/soc/codecs/cs35l32.c
@@ -274,7 +274,9 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
if (of_property_read_u32(np, "cirrus,sdout-share", &val) >= 0)
pdata->sdout_share = val;
- of_property_read_u32(np, "cirrus,boost-manager", &val);
+ if (of_property_read_u32(np, "cirrus,boost-manager", &val))
+ val = -1u;
+
switch (val) {
case CS35L32_BOOST_MGR_AUTO:
case CS35L32_BOOST_MGR_AUTO_AUDIO:
@@ -282,13 +284,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
case CS35L32_BOOST_MGR_FIXED:
pdata->boost_mng = val;
break;
+ case -1u:
default:
dev_err(&i2c_client->dev,
"Wrong cirrus,boost-manager DT value %d\n", val);
pdata->boost_mng = CS35L32_BOOST_MGR_BYPASS;
}
- of_property_read_u32(np, "cirrus,sdout-datacfg", &val);
+ if (of_property_read_u32(np, "cirrus,sdout-datacfg", &val))
+ val = -1u;
switch (val) {
case CS35L32_DATA_CFG_LR_VP:
case CS35L32_DATA_CFG_LR_STAT:
@@ -296,13 +300,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
case CS35L32_DATA_CFG_LR_VPSTAT:
pdata->sdout_datacfg = val;
break;
+ case -1u:
default:
dev_err(&i2c_client->dev,
"Wrong cirrus,sdout-datacfg DT value %d\n", val);
pdata->sdout_datacfg = CS35L32_DATA_CFG_LR;
}
- of_property_read_u32(np, "cirrus,battery-threshold", &val);
+ if (of_property_read_u32(np, "cirrus,battery-threshold", &val))
+ val = -1u;
switch (val) {
case CS35L32_BATT_THRESH_3_1V:
case CS35L32_BATT_THRESH_3_2V:
@@ -310,13 +316,15 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
case CS35L32_BATT_THRESH_3_4V:
pdata->batt_thresh = val;
break;
+ case -1u:
default:
dev_err(&i2c_client->dev,
"Wrong cirrus,battery-threshold DT value %d\n", val);
pdata->batt_thresh = CS35L32_BATT_THRESH_3_3V;
}
- of_property_read_u32(np, "cirrus,battery-recovery", &val);
+ if (of_property_read_u32(np, "cirrus,battery-recovery", &val))
+ val = -1u;
switch (val) {
case CS35L32_BATT_RECOV_3_1V:
case CS35L32_BATT_RECOV_3_2V:
@@ -326,6 +334,7 @@ static int cs35l32_handle_of_data(struct i2c_client *i2c_client,
case CS35L32_BATT_RECOV_3_6V:
pdata->batt_recov = val;
break;
+ case -1u:
default:
dev_err(&i2c_client->dev,
"Wrong cirrus,battery-recovery DT value %d\n", val);
diff --git a/sound/soc/codecs/cs47l24.c b/sound/soc/codecs/cs47l24.c
index 576087bda330..00e9b6fc1b5c 100644
--- a/sound/soc/codecs/cs47l24.c
+++ b/sound/soc/codecs/cs47l24.c
@@ -1108,6 +1108,9 @@ static int cs47l24_codec_remove(struct snd_soc_codec *codec)
priv->core.arizona->dapm = NULL;
arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
+
+ arizona_free_spk(codec);
+
return 0;
}
diff --git a/sound/soc/codecs/hdac_hdmi.c b/sound/soc/codecs/hdac_hdmi.c
index 26f9459cb3bc..aaa038ffc8a5 100644
--- a/sound/soc/codecs/hdac_hdmi.c
+++ b/sound/soc/codecs/hdac_hdmi.c
@@ -1420,32 +1420,39 @@ static int hdmi_codec_remove(struct snd_soc_codec *codec)
}
#ifdef CONFIG_PM
-static int hdmi_codec_resume(struct snd_soc_codec *codec)
+static int hdmi_codec_prepare(struct device *dev)
{
- struct hdac_ext_device *edev = snd_soc_codec_get_drvdata(codec);
+ struct hdac_ext_device *edev = to_hda_ext_device(dev);
+ struct hdac_device *hdac = &edev->hdac;
+
+ pm_runtime_get_sync(&edev->hdac.dev);
+
+ /*
+ * Power down afg.
+ * codec_read is preferred over codec_write to set the power state.
+ * This way verb is send to set the power state and response
+ * is received. So setting power state is ensured without using loop
+ * to read the state.
+ */
+ snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ AC_PWRST_D3);
+
+ return 0;
+}
+
+static void hdmi_codec_complete(struct device *dev)
+{
+ struct hdac_ext_device *edev = to_hda_ext_device(dev);
struct hdac_hdmi_priv *hdmi = edev->private_data;
struct hdac_hdmi_pin *pin;
struct hdac_device *hdac = &edev->hdac;
- struct hdac_bus *bus = hdac->bus;
- int err;
- unsigned long timeout;
-
- hdac_hdmi_skl_enable_all_pins(&edev->hdac);
- hdac_hdmi_skl_enable_dp12(&edev->hdac);
/* Power up afg */
- if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)) {
-
- snd_hdac_codec_write(hdac, hdac->afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
+ snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ AC_PWRST_D0);
- /* Wait till power state is set to D0 */
- timeout = jiffies + msecs_to_jiffies(1000);
- while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0)
- && time_before(jiffies, timeout)) {
- msleep(50);
- }
- }
+ hdac_hdmi_skl_enable_all_pins(&edev->hdac);
+ hdac_hdmi_skl_enable_dp12(&edev->hdac);
/*
* As the ELD notify callback request is not entertained while the
@@ -1455,28 +1462,16 @@ static int hdmi_codec_resume(struct snd_soc_codec *codec)
list_for_each_entry(pin, &hdmi->pin_list, head)
hdac_hdmi_present_sense(pin, 1);
- /*
- * Codec power is turned ON during controller resume.
- * Turn it OFF here
- */
- err = snd_hdac_display_power(bus, false);
- if (err < 0) {
- dev_err(bus->dev,
- "Cannot turn OFF display power on i915, err: %d\n",
- err);
- return err;
- }
-
- return 0;
+ pm_runtime_put_sync(&edev->hdac.dev);
}
#else
-#define hdmi_codec_resume NULL
+#define hdmi_codec_prepare NULL
+#define hdmi_codec_complete NULL
#endif
static struct snd_soc_codec_driver hdmi_hda_codec = {
.probe = hdmi_codec_probe,
.remove = hdmi_codec_remove,
- .resume = hdmi_codec_resume,
.idle_bias_off = true,
};
@@ -1561,7 +1556,6 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
struct hdac_ext_device *edev = to_hda_ext_device(dev);
struct hdac_device *hdac = &edev->hdac;
struct hdac_bus *bus = hdac->bus;
- unsigned long timeout;
int err;
dev_dbg(dev, "Enter: %s\n", __func__);
@@ -1570,20 +1564,15 @@ static int hdac_hdmi_runtime_suspend(struct device *dev)
if (!bus)
return 0;
- /* Power down afg */
- if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3)) {
- snd_hdac_codec_write(hdac, hdac->afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D3);
-
- /* Wait till power state is set to D3 */
- timeout = jiffies + msecs_to_jiffies(1000);
- while (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D3)
- && time_before(jiffies, timeout)) {
-
- msleep(50);
- }
- }
-
+ /*
+ * Power down afg.
+ * codec_read is preferred over codec_write to set the power state.
+ * This way verb is send to set the power state and response
+ * is received. So setting power state is ensured without using loop
+ * to read the state.
+ */
+ snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ AC_PWRST_D3);
err = snd_hdac_display_power(bus, false);
if (err < 0) {
dev_err(bus->dev, "Cannot turn on display power on i915\n");
@@ -1616,9 +1605,8 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
hdac_hdmi_skl_enable_dp12(&edev->hdac);
/* Power up afg */
- if (!snd_hdac_check_power_state(hdac, hdac->afg, AC_PWRST_D0))
- snd_hdac_codec_write(hdac, hdac->afg, 0,
- AC_VERB_SET_POWER_STATE, AC_PWRST_D0);
+ snd_hdac_codec_read(hdac, hdac->afg, 0, AC_VERB_SET_POWER_STATE,
+ AC_PWRST_D0);
return 0;
}
@@ -1629,6 +1617,8 @@ static int hdac_hdmi_runtime_resume(struct device *dev)
static const struct dev_pm_ops hdac_hdmi_pm = {
SET_RUNTIME_PM_OPS(hdac_hdmi_runtime_suspend, hdac_hdmi_runtime_resume, NULL)
+ .prepare = hdmi_codec_prepare,
+ .complete = hdmi_codec_complete,
};
static const struct hda_device_id hdmi_list[] = {
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index 1c8729984c2b..683769f0f246 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -343,9 +343,12 @@ static const struct snd_soc_dapm_widget nau8825_dapm_widgets[] = {
SND_SOC_DAPM_SUPPLY("ADC Power", NAU8825_REG_ANALOG_ADC_2, 6, 0, NULL,
0),
- /* ADC for button press detection */
- SND_SOC_DAPM_ADC("SAR", NULL, NAU8825_REG_SAR_CTRL,
- NAU8825_SAR_ADC_EN_SFT, 0),
+ /* ADC for button press detection. A dapm supply widget is used to
+ * prevent dapm_power_widgets keeping the codec at SND_SOC_BIAS_ON
+ * during suspend.
+ */
+ SND_SOC_DAPM_SUPPLY("SAR", NAU8825_REG_SAR_CTRL,
+ NAU8825_SAR_ADC_EN_SFT, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("ADACL", 2, NAU8825_REG_RDAC, 12, 0, NULL, 0),
SND_SOC_DAPM_PGA_S("ADACR", 2, NAU8825_REG_RDAC, 13, 0, NULL, 0),
@@ -607,6 +610,16 @@ static bool nau8825_is_jack_inserted(struct regmap *regmap)
static void nau8825_restart_jack_detection(struct regmap *regmap)
{
+ /* Chip needs one FSCLK cycle in order to generate interrupts,
+ * as we cannot guarantee one will be provided by the system. Turning
+ * master mode on then off enables us to generate that FSCLK cycle
+ * with a minimum of contention on the clock bus.
+ */
+ regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
+ NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
+ regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
+ NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
+
/* this will restart the entire jack detection process including MIC/GND
* switching and create interrupts. We have to go from 0 to 1 and back
* to 0 to restart.
@@ -728,7 +741,10 @@ static irqreturn_t nau8825_interrupt(int irq, void *data)
struct regmap *regmap = nau8825->regmap;
int active_irq, clear_irq = 0, event = 0, event_mask = 0;
- regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq);
+ if (regmap_read(regmap, NAU8825_REG_IRQ_STATUS, &active_irq)) {
+ dev_err(nau8825->dev, "failed to read irq status\n");
+ return IRQ_NONE;
+ }
if ((active_irq & NAU8825_JACK_EJECTION_IRQ_MASK) ==
NAU8825_JACK_EJECTION_DETECTED) {
@@ -1141,33 +1157,74 @@ static int nau8825_set_bias_level(struct snd_soc_codec *codec,
return ret;
}
}
-
- ret = regcache_sync(nau8825->regmap);
- if (ret) {
- dev_err(codec->dev,
- "Failed to sync cache: %d\n", ret);
- return ret;
- }
}
-
break;
case SND_SOC_BIAS_OFF:
if (nau8825->mclk_freq)
clk_disable_unprepare(nau8825->mclk);
-
- regcache_mark_dirty(nau8825->regmap);
break;
}
return 0;
}
+#ifdef CONFIG_PM
+static int nau8825_suspend(struct snd_soc_codec *codec)
+{
+ struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
+
+ disable_irq(nau8825->irq);
+ regcache_cache_only(nau8825->regmap, true);
+ regcache_mark_dirty(nau8825->regmap);
+
+ return 0;
+}
+
+static int nau8825_resume(struct snd_soc_codec *codec)
+{
+ struct nau8825 *nau8825 = snd_soc_codec_get_drvdata(codec);
+
+ /* The chip may lose power and reset in S3. regcache_sync restores
+ * register values including configurations for sysclk, irq, and
+ * jack/button detection.
+ */
+ regcache_cache_only(nau8825->regmap, false);
+ regcache_sync(nau8825->regmap);
+
+ /* Check the jack plug status directly. If the headset is unplugged
+ * during S3 when the chip has no power, there will be no jack
+ * detection irq even after the nau8825_restart_jack_detection below,
+ * because the chip just thinks no headset has ever been plugged in.
+ */
+ if (!nau8825_is_jack_inserted(nau8825->regmap)) {
+ nau8825_eject_jack(nau8825);
+ snd_soc_jack_report(nau8825->jack, 0, SND_JACK_HEADSET);
+ }
+
+ enable_irq(nau8825->irq);
+
+ /* Run jack detection to check the type (OMTP or CTIA) of the headset
+ * if there is one. This handles the case where a different type of
+ * headset is plugged in during S3. This triggers an IRQ iff a headset
+ * is already plugged in.
+ */
+ nau8825_restart_jack_detection(nau8825->regmap);
+
+ return 0;
+}
+#else
+#define nau8825_suspend NULL
+#define nau8825_resume NULL
+#endif
+
static struct snd_soc_codec_driver nau8825_codec_driver = {
.probe = nau8825_codec_probe,
.set_sysclk = nau8825_set_sysclk,
.set_pll = nau8825_set_pll,
.set_bias_level = nau8825_set_bias_level,
.suspend_bias_off = true,
+ .suspend = nau8825_suspend,
+ .resume = nau8825_resume,
.controls = nau8825_controls,
.num_controls = ARRAY_SIZE(nau8825_controls),
@@ -1277,16 +1334,6 @@ static int nau8825_setup_irq(struct nau8825 *nau8825)
regmap_update_bits(regmap, NAU8825_REG_ENA_CTRL,
NAU8825_ENABLE_DACR, NAU8825_ENABLE_DACR);
- /* Chip needs one FSCLK cycle in order to generate interrupts,
- * as we cannot guarantee one will be provided by the system. Turning
- * master mode on then off enables us to generate that FSCLK cycle
- * with a minimum of contention on the clock bus.
- */
- regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
- NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_MASTER);
- regmap_update_bits(regmap, NAU8825_REG_I2S_PCM_CTRL2,
- NAU8825_I2S_MS_MASK, NAU8825_I2S_MS_SLAVE);
-
ret = devm_request_threaded_irq(nau8825->dev, nau8825->irq, NULL,
nau8825_interrupt, IRQF_TRIGGER_LOW | IRQF_ONESHOT,
"nau8825", nau8825);
@@ -1354,36 +1401,6 @@ static int nau8825_i2c_remove(struct i2c_client *client)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
-static int nau8825_suspend(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct nau8825 *nau8825 = dev_get_drvdata(dev);
-
- disable_irq(client->irq);
- regcache_cache_only(nau8825->regmap, true);
- regcache_mark_dirty(nau8825->regmap);
-
- return 0;
-}
-
-static int nau8825_resume(struct device *dev)
-{
- struct i2c_client *client = to_i2c_client(dev);
- struct nau8825 *nau8825 = dev_get_drvdata(dev);
-
- regcache_cache_only(nau8825->regmap, false);
- regcache_sync(nau8825->regmap);
- enable_irq(client->irq);
-
- return 0;
-}
-#endif
-
-static const struct dev_pm_ops nau8825_pm = {
- SET_SYSTEM_SLEEP_PM_OPS(nau8825_suspend, nau8825_resume)
-};
-
static const struct i2c_device_id nau8825_i2c_ids[] = {
{ "nau8825", 0 },
{ }
@@ -1410,7 +1427,6 @@ static struct i2c_driver nau8825_driver = {
.name = "nau8825",
.of_match_table = of_match_ptr(nau8825_of_ids),
.acpi_match_table = ACPI_PTR(nau8825_acpi_match),
- .pm = &nau8825_pm,
},
.probe = nau8825_i2c_probe,
.remove = nau8825_i2c_remove,
diff --git a/sound/soc/codecs/rt5640.c b/sound/soc/codecs/rt5640.c
index e8b5ba04417a..09e8988bbb2d 100644
--- a/sound/soc/codecs/rt5640.c
+++ b/sound/soc/codecs/rt5640.c
@@ -359,7 +359,7 @@ static const DECLARE_TLV_DB_RANGE(bst_tlv,
/* Interface data select */
static const char * const rt5640_data_select[] = {
- "Normal", "left copy to right", "right copy to left", "Swap"};
+ "Normal", "Swap", "left copy to right", "right copy to left"};
static SOC_ENUM_SINGLE_DECL(rt5640_if1_dac_enum, RT5640_DIG_INF_DATA,
RT5640_IF1_DAC_SEL_SFT, rt5640_data_select);
diff --git a/sound/soc/codecs/rt5640.h b/sound/soc/codecs/rt5640.h
index 1761c3a98b76..58b664b06c16 100644
--- a/sound/soc/codecs/rt5640.h
+++ b/sound/soc/codecs/rt5640.h
@@ -443,39 +443,39 @@
#define RT5640_IF1_DAC_SEL_MASK (0x3 << 14)
#define RT5640_IF1_DAC_SEL_SFT 14
#define RT5640_IF1_DAC_SEL_NOR (0x0 << 14)
-#define RT5640_IF1_DAC_SEL_L2R (0x1 << 14)
-#define RT5640_IF1_DAC_SEL_R2L (0x2 << 14)
-#define RT5640_IF1_DAC_SEL_SWAP (0x3 << 14)
+#define RT5640_IF1_DAC_SEL_SWAP (0x1 << 14)
+#define RT5640_IF1_DAC_SEL_L2R (0x2 << 14)
+#define RT5640_IF1_DAC_SEL_R2L (0x3 << 14)
#define RT5640_IF1_ADC_SEL_MASK (0x3 << 12)
#define RT5640_IF1_ADC_SEL_SFT 12
#define RT5640_IF1_ADC_SEL_NOR (0x0 << 12)
-#define RT5640_IF1_ADC_SEL_L2R (0x1 << 12)
-#define RT5640_IF1_ADC_SEL_R2L (0x2 << 12)
-#define RT5640_IF1_ADC_SEL_SWAP (0x3 << 12)
+#define RT5640_IF1_ADC_SEL_SWAP (0x1 << 12)
+#define RT5640_IF1_ADC_SEL_L2R (0x2 << 12)
+#define RT5640_IF1_ADC_SEL_R2L (0x3 << 12)
#define RT5640_IF2_DAC_SEL_MASK (0x3 << 10)
#define RT5640_IF2_DAC_SEL_SFT 10
#define RT5640_IF2_DAC_SEL_NOR (0x0 << 10)
-#define RT5640_IF2_DAC_SEL_L2R (0x1 << 10)
-#define RT5640_IF2_DAC_SEL_R2L (0x2 << 10)
-#define RT5640_IF2_DAC_SEL_SWAP (0x3 << 10)
+#define RT5640_IF2_DAC_SEL_SWAP (0x1 << 10)
+#define RT5640_IF2_DAC_SEL_L2R (0x2 << 10)
+#define RT5640_IF2_DAC_SEL_R2L (0x3 << 10)
#define RT5640_IF2_ADC_SEL_MASK (0x3 << 8)
#define RT5640_IF2_ADC_SEL_SFT 8
#define RT5640_IF2_ADC_SEL_NOR (0x0 << 8)
-#define RT5640_IF2_ADC_SEL_L2R (0x1 << 8)
-#define RT5640_IF2_ADC_SEL_R2L (0x2 << 8)
-#define RT5640_IF2_ADC_SEL_SWAP (0x3 << 8)
+#define RT5640_IF2_ADC_SEL_SWAP (0x1 << 8)
+#define RT5640_IF2_ADC_SEL_L2R (0x2 << 8)
+#define RT5640_IF2_ADC_SEL_R2L (0x3 << 8)
#define RT5640_IF3_DAC_SEL_MASK (0x3 << 6)
#define RT5640_IF3_DAC_SEL_SFT 6
#define RT5640_IF3_DAC_SEL_NOR (0x0 << 6)
-#define RT5640_IF3_DAC_SEL_L2R (0x1 << 6)
-#define RT5640_IF3_DAC_SEL_R2L (0x2 << 6)
-#define RT5640_IF3_DAC_SEL_SWAP (0x3 << 6)
+#define RT5640_IF3_DAC_SEL_SWAP (0x1 << 6)
+#define RT5640_IF3_DAC_SEL_L2R (0x2 << 6)
+#define RT5640_IF3_DAC_SEL_R2L (0x3 << 6)
#define RT5640_IF3_ADC_SEL_MASK (0x3 << 4)
#define RT5640_IF3_ADC_SEL_SFT 4
#define RT5640_IF3_ADC_SEL_NOR (0x0 << 4)
-#define RT5640_IF3_ADC_SEL_L2R (0x1 << 4)
-#define RT5640_IF3_ADC_SEL_R2L (0x2 << 4)
-#define RT5640_IF3_ADC_SEL_SWAP (0x3 << 4)
+#define RT5640_IF3_ADC_SEL_SWAP (0x1 << 4)
+#define RT5640_IF3_ADC_SEL_L2R (0x2 << 4)
+#define RT5640_IF3_ADC_SEL_R2L (0x3 << 4)
/* REC Left Mixer Control 1 (0x3b) */
#define RT5640_G_HP_L_RM_L_MASK (0x7 << 13)
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index a8b3e3f701f9..1bae17ee8817 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -1955,11 +1955,16 @@ err_adsp2_codec_probe:
static int wm5102_codec_remove(struct snd_soc_codec *codec)
{
struct wm5102_priv *priv = snd_soc_codec_get_drvdata(codec);
+ struct arizona *arizona = priv->core.arizona;
wm_adsp2_codec_remove(&priv->core.adsp[0], codec);
priv->core.arizona->dapm = NULL;
+ arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
+
+ arizona_free_spk(codec);
+
return 0;
}
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 83ba70fe16e6..2728ac545ffe 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -2298,6 +2298,8 @@ static int wm5110_codec_remove(struct snd_soc_codec *codec)
arizona_free_irq(arizona, ARIZONA_IRQ_DSP_IRQ1, priv);
+ arizona_free_spk(codec);
+
return 0;
}
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 88223608a33f..720a14e0687d 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2471,7 +2471,7 @@ static void wm8962_configure_bclk(struct snd_soc_codec *codec)
break;
default:
dev_warn(codec->dev, "Unknown DSPCLK divisor read back\n");
- dspclk = wm8962->sysclk;
+ dspclk = wm8962->sysclk_rate;
}
dev_dbg(codec->dev, "DSPCLK is %dHz, BCLK %d\n", dspclk, wm8962->bclk);
diff --git a/sound/soc/codecs/wm8997.c b/sound/soc/codecs/wm8997.c
index 52d766efe14f..6b0785b5a5c5 100644
--- a/sound/soc/codecs/wm8997.c
+++ b/sound/soc/codecs/wm8997.c
@@ -1072,6 +1072,8 @@ static int wm8997_codec_remove(struct snd_soc_codec *codec)
priv->core.arizona->dapm = NULL;
+ arizona_free_spk(codec);
+
return 0;
}
diff --git a/sound/soc/codecs/wm8998.c b/sound/soc/codecs/wm8998.c
index 012396074a8a..449f66636205 100644
--- a/sound/soc/codecs/wm8998.c
+++ b/sound/soc/codecs/wm8998.c
@@ -1324,6 +1324,8 @@ static int wm8998_codec_remove(struct snd_soc_codec *codec)
priv->core.arizona->dapm = NULL;
+ arizona_free_spk(codec);
+
return 0;
}
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index b3e6c2300457..1120f4f4d011 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -163,7 +163,6 @@ config SND_SOC_INTEL_SKYLAKE
tristate
select SND_HDA_EXT_CORE
select SND_SOC_TOPOLOGY
- select SND_HDA_I915
select SND_SOC_INTEL_SST
config SND_SOC_INTEL_SKL_RT286_MACH
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index ac60f1301e21..91565229d074 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -1345,7 +1345,7 @@ int sst_hsw_stream_reset(struct sst_hsw *hsw, struct sst_hsw_stream *stream)
return 0;
/* wait for pause to complete before we reset the stream */
- while (stream->running && tries--)
+ while (stream->running && --tries)
msleep(1);
if (!tries) {
dev_err(hsw->dev, "error: reset stream %d still running\n",
diff --git a/sound/soc/intel/skylake/skl-sst-dsp.c b/sound/soc/intel/skylake/skl-sst-dsp.c
index a5267e8a96e0..2962ef22fc84 100644
--- a/sound/soc/intel/skylake/skl-sst-dsp.c
+++ b/sound/soc/intel/skylake/skl-sst-dsp.c
@@ -336,6 +336,11 @@ void skl_dsp_free(struct sst_dsp *dsp)
skl_ipc_int_disable(dsp);
free_irq(dsp->irq, dsp);
+ dsp->cl_dev.ops.cl_cleanup_controller(dsp);
+ skl_cldma_int_disable(dsp);
+ skl_ipc_op_int_disable(dsp);
+ skl_ipc_int_disable(dsp);
+
skl_dsp_disable_core(dsp);
}
EXPORT_SYMBOL_GPL(skl_dsp_free);
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 545b4e77b8aa..cdb78b7e5a14 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -239,6 +239,7 @@ static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
{
int multiplier = 1;
struct skl_module_fmt *in_fmt, *out_fmt;
+ int in_rate, out_rate;
/* Since fixups is applied to pin 0 only, ibs, obs needs
@@ -249,15 +250,24 @@ static void skl_tplg_update_buffer_size(struct skl_sst *ctx,
if (mcfg->m_type == SKL_MODULE_TYPE_SRCINT)
multiplier = 5;
- mcfg->ibs = (in_fmt->s_freq / 1000) *
- (mcfg->in_fmt->channels) *
- (mcfg->in_fmt->bit_depth >> 3) *
- multiplier;
-
- mcfg->obs = (mcfg->out_fmt->s_freq / 1000) *
- (mcfg->out_fmt->channels) *
- (mcfg->out_fmt->bit_depth >> 3) *
- multiplier;
+
+ if (in_fmt->s_freq % 1000)
+ in_rate = (in_fmt->s_freq / 1000) + 1;
+ else
+ in_rate = (in_fmt->s_freq / 1000);
+
+ mcfg->ibs = in_rate * (mcfg->in_fmt->channels) *
+ (mcfg->in_fmt->bit_depth >> 3) *
+ multiplier;
+
+ if (mcfg->out_fmt->s_freq % 1000)
+ out_rate = (mcfg->out_fmt->s_freq / 1000) + 1;
+ else
+ out_rate = (mcfg->out_fmt->s_freq / 1000);
+
+ mcfg->obs = out_rate * (mcfg->out_fmt->channels) *
+ (mcfg->out_fmt->bit_depth >> 3) *
+ multiplier;
}
static int skl_tplg_update_be_blob(struct snd_soc_dapm_widget *w,
@@ -485,11 +495,15 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
if (!skl_is_pipe_mcps_avail(skl, mconfig))
return -ENOMEM;
+ skl_tplg_alloc_pipe_mcps(skl, mconfig);
+
if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
ret = ctx->dsp->fw_ops.load_mod(ctx->dsp,
mconfig->id.module_id, mconfig->guid);
if (ret < 0)
return ret;
+
+ mconfig->m_state = SKL_MODULE_LOADED;
}
/* update blob if blob is null for be with default value */
@@ -509,7 +523,6 @@ skl_tplg_init_pipe_modules(struct skl *skl, struct skl_pipe *pipe)
ret = skl_tplg_set_module_params(w, ctx);
if (ret < 0)
return ret;
- skl_tplg_alloc_pipe_mcps(skl, mconfig);
}
return 0;
@@ -524,7 +537,8 @@ static int skl_tplg_unload_pipe_modules(struct skl_sst *ctx,
list_for_each_entry(w_module, &pipe->w_list, node) {
mconfig = w_module->w->priv;
- if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod)
+ if (mconfig->is_loadable && ctx->dsp->fw_ops.unload_mod &&
+ mconfig->m_state > SKL_MODULE_UNINIT)
return ctx->dsp->fw_ops.unload_mod(ctx->dsp,
mconfig->id.module_id);
}
@@ -558,6 +572,9 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
if (!skl_is_pipe_mem_avail(skl, mconfig))
return -ENOMEM;
+ skl_tplg_alloc_pipe_mem(skl, mconfig);
+ skl_tplg_alloc_pipe_mcps(skl, mconfig);
+
/*
* Create a list of modules for pipe.
* This list contains modules from source to sink
@@ -601,9 +618,6 @@ static int skl_tplg_mixer_dapm_pre_pmu_event(struct snd_soc_dapm_widget *w,
src_module = dst_module;
}
- skl_tplg_alloc_pipe_mem(skl, mconfig);
- skl_tplg_alloc_pipe_mcps(skl, mconfig);
-
return 0;
}
diff --git a/sound/soc/intel/skylake/skl-topology.h b/sound/soc/intel/skylake/skl-topology.h
index de3c401284d9..d2d923002d5c 100644
--- a/sound/soc/intel/skylake/skl-topology.h
+++ b/sound/soc/intel/skylake/skl-topology.h
@@ -274,10 +274,10 @@ struct skl_pipe {
enum skl_module_state {
SKL_MODULE_UNINIT = 0,
- SKL_MODULE_INIT_DONE = 1,
- SKL_MODULE_LOADED = 2,
- SKL_MODULE_UNLOADED = 3,
- SKL_MODULE_BIND_DONE = 4
+ SKL_MODULE_LOADED = 1,
+ SKL_MODULE_INIT_DONE = 2,
+ SKL_MODULE_BIND_DONE = 3,
+ SKL_MODULE_UNLOADED = 4,
};
struct skl_module_cfg {
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index ab5e25aaeee3..3982f5536f2d 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -222,6 +222,7 @@ static int skl_suspend(struct device *dev)
struct hdac_ext_bus *ebus = pci_get_drvdata(pci);
struct skl *skl = ebus_to_skl(ebus);
struct hdac_bus *bus = ebus_to_hbus(ebus);
+ int ret = 0;
/*
* Do not suspend if streams which are marked ignore suspend are
@@ -232,10 +233,20 @@ static int skl_suspend(struct device *dev)
enable_irq_wake(bus->irq);
pci_save_state(pci);
pci_disable_device(pci);
- return 0;
} else {
- return _skl_suspend(ebus);
+ ret = _skl_suspend(ebus);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI)) {
+ ret = snd_hdac_display_power(bus, false);
+ if (ret < 0)
+ dev_err(bus->dev,
+ "Cannot turn OFF display power on i915\n");
}
+
+ return ret;
}
static int skl_resume(struct device *dev)
@@ -316,17 +327,20 @@ static int skl_free(struct hdac_ext_bus *ebus)
if (bus->irq >= 0)
free_irq(bus->irq, (void *)bus);
- if (bus->remap_addr)
- iounmap(bus->remap_addr);
-
snd_hdac_bus_free_stream_pages(bus);
snd_hdac_stream_free_all(ebus);
snd_hdac_link_free_all(ebus);
+
+ if (bus->remap_addr)
+ iounmap(bus->remap_addr);
+
pci_release_regions(skl->pci);
pci_disable_device(skl->pci);
snd_hdac_ext_bus_exit(ebus);
+ if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
+ snd_hdac_i915_exit(&ebus->bus);
return 0;
}
@@ -719,12 +733,12 @@ static void skl_remove(struct pci_dev *pci)
if (skl->tplg)
release_firmware(skl->tplg);
- if (IS_ENABLED(CONFIG_SND_SOC_HDAC_HDMI))
- snd_hdac_i915_exit(&ebus->bus);
-
if (pci_dev_run_wake(pci))
pm_runtime_get_noresume(&pci->dev);
- pci_dev_put(pci);
+
+ /* codec removal, invoke bus_device_remove */
+ snd_hdac_ext_bus_device_remove(ebus);
+
skl_platform_unregister(&pci->dev);
skl_free_dsp(skl);
skl_machine_device_unregister(skl);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 801ae1a81dfd..c4464858bf01 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -2188,6 +2188,13 @@ static ssize_t dapm_widget_show_component(struct snd_soc_component *cmpnt,
int count = 0;
char *state = "not set";
+ /* card won't be set for the dummy component, as a spot fix
+ * we're checking for that case specifically here but in future
+ * we will ensure that the dummy component looks like others.
+ */
+ if (!cmpnt->card)
+ return 0;
+
list_for_each_entry(w, &cmpnt->card->widgets, list) {
if (w->dapm != dapm)
continue;