diff options
418 files changed, 3742 insertions, 3337 deletions
diff --git a/.gitignore b/.gitignore index 8fe465f251c0..70ec6037fa7a 100644 --- a/.gitignore +++ b/.gitignore @@ -78,6 +78,7 @@ modules.order # RPM spec file (make rpm-pkg) # /*.spec +/rpmbuild/ # # Debian directory (make deb-pkg) @@ -28,6 +28,7 @@ Alexander Lobakin <alobakin@pm.me> <bloodyreaper@yandex.ru> Alexander Mikhalitsyn <alexander@mihalicyn.com> <alexander.mikhalitsyn@virtuozzo.com> Alexander Mikhalitsyn <alexander@mihalicyn.com> <aleksandr.mikhalitsyn@canonical.com> Alexandre Belloni <alexandre.belloni@bootlin.com> <alexandre.belloni@free-electrons.com> +Alexandre Ghiti <alex@ghiti.fr> <alexandre.ghiti@canonical.com> Alexei Starovoitov <ast@kernel.org> <alexei.starovoitov@gmail.com> Alexei Starovoitov <ast@kernel.org> <ast@fb.com> Alexei Starovoitov <ast@kernel.org> <ast@plumgrid.com> @@ -121,7 +122,7 @@ Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@gmail.com> Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@imgtec.com> Dengcheng Zhu <dzhu@wavecomp.com> <dengcheng.zhu@mips.com> <dev.kurt@vandijck-laurijssen.be> <kurt.van.dijck@eia.be> -Dikshita Agarwal <dikshita@qti.qualcomm.com> <dikshita@codeaurora.org> +Dikshita Agarwal <quic_dikshita@quicinc.com> <dikshita@codeaurora.org> Dmitry Baryshkov <dbaryshkov@gmail.com> Dmitry Baryshkov <dbaryshkov@gmail.com> <[dbaryshkov@gmail.com]> Dmitry Baryshkov <dbaryshkov@gmail.com> <dmitry_baryshkov@mentor.com> @@ -194,6 +195,7 @@ Jan Glauber <jan.glauber@gmail.com> <jang@linux.vnet.ibm.com> Jan Glauber <jan.glauber@gmail.com> <jglauber@cavium.com> Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@linux.intel.com> Jarkko Sakkinen <jarkko@kernel.org> <jarkko@profian.com> +Jarkko Sakkinen <jarkko@kernel.org> <jarkko.sakkinen@tuni.fi> Jason Gunthorpe <jgg@ziepe.ca> <jgg@mellanox.com> Jason Gunthorpe <jgg@ziepe.ca> <jgg@nvidia.com> Jason Gunthorpe <jgg@ziepe.ca> <jgunthorpe@obsidianresearch.com> @@ -213,6 +215,9 @@ Jens Axboe <axboe@suse.de> Jens Osterkamp <Jens.Osterkamp@de.ibm.com> Jernej Skrabec <jernej.skrabec@gmail.com> <jernej.skrabec@siol.net> Jessica Zhang <quic_jesszhan@quicinc.com> <jesszhan@codeaurora.org> +Jiri Pirko <jiri@resnulli.us> <jiri@nvidia.com> +Jiri Pirko <jiri@resnulli.us> <jiri@mellanox.com> +Jiri Pirko <jiri@resnulli.us> <jpirko@redhat.com> Jiri Slaby <jirislaby@kernel.org> <jirislaby@gmail.com> Jiri Slaby <jirislaby@kernel.org> <jslaby@novell.com> Jiri Slaby <jirislaby@kernel.org> <jslaby@suse.com> diff --git a/Documentation/driver-api/vfio.rst b/Documentation/driver-api/vfio.rst index 50b690f7f663..68abc089d6dd 100644 --- a/Documentation/driver-api/vfio.rst +++ b/Documentation/driver-api/vfio.rst @@ -242,7 +242,7 @@ group and can access them as follows:: VFIO User API ------------------------------------------------------------------------------- -Please see include/linux/vfio.h for complete API documentation. +Please see include/uapi/linux/vfio.h for complete API documentation. VFIO bus driver API ------------------------------------------------------------------------------- diff --git a/Documentation/filesystems/vfs.rst b/Documentation/filesystems/vfs.rst index c53f30251a66..f3b344f0c0a4 100644 --- a/Documentation/filesystems/vfs.rst +++ b/Documentation/filesystems/vfs.rst @@ -1222,7 +1222,7 @@ defined: return -ECHILD and it will be called again in ref-walk mode. -``_weak_revalidate`` +``d_weak_revalidate`` called when the VFS needs to revalidate a "jumped" dentry. This is called when a path-walk ends at dentry that was not acquired by doing a lookup in the parent directory. This includes "/", diff --git a/Documentation/firmware-guide/acpi/enumeration.rst b/Documentation/firmware-guide/acpi/enumeration.rst index b9dc0c603f36..56d9913a3370 100644 --- a/Documentation/firmware-guide/acpi/enumeration.rst +++ b/Documentation/firmware-guide/acpi/enumeration.rst @@ -19,7 +19,7 @@ possible we decided to do following: platform devices. - Devices behind real busses where there is a connector resource - are represented as struct spi_device or struct i2c_device. Note + are represented as struct spi_device or struct i2c_client. Note that standard UARTs are not busses so there is no struct uart_device, although some of them may be represented by struct serdev_device. diff --git a/Documentation/maintainer/rebasing-and-merging.rst b/Documentation/maintainer/rebasing-and-merging.rst index 09f988e7fa71..85800ce95ae5 100644 --- a/Documentation/maintainer/rebasing-and-merging.rst +++ b/Documentation/maintainer/rebasing-and-merging.rst @@ -213,11 +213,7 @@ point rather than some random spot. If your upstream-bound branch has emptied entirely into the mainline during the merge window, you can pull it forward with a command like:: - git merge v5.2-rc1^0 - -The "^0" will cause Git to do a fast-forward merge (which should be -possible in this situation), thus avoiding the addition of a spurious merge -commit. + git merge --ff-only v5.2-rc1 The guidelines laid out above are just that: guidelines. There will always be situations that call out for a different solution, and these guidelines diff --git a/Documentation/mm/hugetlbfs_reserv.rst b/Documentation/mm/hugetlbfs_reserv.rst index 3d05d64de9b4..d9c2b0f01dcd 100644 --- a/Documentation/mm/hugetlbfs_reserv.rst +++ b/Documentation/mm/hugetlbfs_reserv.rst @@ -5,10 +5,10 @@ Hugetlbfs Reservation Overview ======== -Huge pages as described at Documentation/mm/hugetlbpage.rst are typically -preallocated for application use. These huge pages are instantiated in a -task's address space at page fault time if the VMA indicates huge pages are -to be used. If no huge page exists at page fault time, the task is sent +Huge pages as described at Documentation/admin-guide/mm/hugetlbpage.rst are +typically preallocated for application use. These huge pages are instantiated +in a task's address space at page fault time if the VMA indicates huge pages +are to be used. If no huge page exists at page fault time, the task is sent a SIGBUS and often dies an unhappy death. Shortly after huge page support was added, it was determined that it would be better to detect a shortage of huge pages at mmap() time. The idea is that if there were not enough diff --git a/Documentation/mm/physical_memory.rst b/Documentation/mm/physical_memory.rst index f9d7ea4b9dca..1bc888d36ea1 100644 --- a/Documentation/mm/physical_memory.rst +++ b/Documentation/mm/physical_memory.rst @@ -66,7 +66,7 @@ one of the types described below. also populated on boot using one of ``kernelcore``, ``movablecore`` and ``movable_node`` kernel command line parameters. See Documentation/mm/page_migration.rst and - Documentation/admin-guide/mm/memory_hotplug.rst for additional details. + Documentation/admin-guide/mm/memory-hotplug.rst for additional details. * ``ZONE_DEVICE`` represents memory residing on devices such as PMEM and GPU. It has different characteristics than RAM zone types and it exists to provide diff --git a/Documentation/netlink/genetlink-c.yaml b/Documentation/netlink/genetlink-c.yaml index f082a5ad7cf1..5c3642b3f802 100644 --- a/Documentation/netlink/genetlink-c.yaml +++ b/Documentation/netlink/genetlink-c.yaml @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) %YAML 1.2 --- $id: http://kernel.org/schemas/netlink/genetlink-c.yaml# diff --git a/Documentation/netlink/genetlink-legacy.yaml b/Documentation/netlink/genetlink-legacy.yaml index c6b8c77f7d12..5e98c6d2b9aa 100644 --- a/Documentation/netlink/genetlink-legacy.yaml +++ b/Documentation/netlink/genetlink-legacy.yaml @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) %YAML 1.2 --- $id: http://kernel.org/schemas/netlink/genetlink-legacy.yaml# diff --git a/Documentation/netlink/genetlink.yaml b/Documentation/netlink/genetlink.yaml index b2d56ab9e615..d35dcd6f8d82 100644 --- a/Documentation/netlink/genetlink.yaml +++ b/Documentation/netlink/genetlink.yaml @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) %YAML 1.2 --- $id: http://kernel.org/schemas/netlink/genetlink-legacy.yaml# diff --git a/Documentation/netlink/specs/ethtool.yaml b/Documentation/netlink/specs/ethtool.yaml index 18ecb7d90cbe..4727c067e2ba 100644 --- a/Documentation/netlink/specs/ethtool.yaml +++ b/Documentation/netlink/specs/ethtool.yaml @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) name: ethtool diff --git a/Documentation/netlink/specs/fou.yaml b/Documentation/netlink/specs/fou.yaml index cff104288723..3e13826a3fdf 100644 --- a/Documentation/netlink/specs/fou.yaml +++ b/Documentation/netlink/specs/fou.yaml @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) name: fou diff --git a/Documentation/netlink/specs/netdev.yaml b/Documentation/netlink/specs/netdev.yaml index 24de747b5344..b99e7ffef7a1 100644 --- a/Documentation/netlink/specs/netdev.yaml +++ b/Documentation/netlink/specs/netdev.yaml @@ -1,4 +1,4 @@ -# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) name: netdev @@ -9,6 +9,7 @@ definitions: - type: flags name: xdp-act + render-max: true entries: - name: basic diff --git a/Documentation/process/programming-language.rst b/Documentation/process/programming-language.rst index 5fc9160ca1fa..bc56dee6d0bc 100644 --- a/Documentation/process/programming-language.rst +++ b/Documentation/process/programming-language.rst @@ -12,10 +12,6 @@ under ``-std=gnu11`` [gcc-c-dialect-options]_: the GNU dialect of ISO C11. This dialect contains many extensions to the language [gnu-extensions]_, and many of them are used within the kernel as a matter of course. -There is some support for compiling the kernel with ``icc`` [icc]_ for several -of the architectures, although at the time of writing it is not completed, -requiring third-party patches. - Attributes ---------- @@ -35,12 +31,28 @@ in order to feature detect which ones can be used and/or to shorten the code. Please refer to ``include/linux/compiler_attributes.h`` for more information. +Rust +---- + +The kernel has experimental support for the Rust programming language +[rust-language]_ under ``CONFIG_RUST``. It is compiled with ``rustc`` [rustc]_ +under ``--edition=2021`` [rust-editions]_. Editions are a way to introduce +small changes to the language that are not backwards compatible. + +On top of that, some unstable features [rust-unstable-features]_ are used in +the kernel. Unstable features may change in the future, thus it is an important +goal to reach a point where only stable features are used. + +Please refer to Documentation/rust/index.rst for more information. + .. [c-language] http://www.open-std.org/jtc1/sc22/wg14/www/standards .. [gcc] https://gcc.gnu.org .. [clang] https://clang.llvm.org -.. [icc] https://software.intel.com/en-us/c-compilers .. [gcc-c-dialect-options] https://gcc.gnu.org/onlinedocs/gcc/C-Dialect-Options.html .. [gnu-extensions] https://gcc.gnu.org/onlinedocs/gcc/C-Extensions.html .. [gcc-attribute-syntax] https://gcc.gnu.org/onlinedocs/gcc/Attribute-Syntax.html .. [n2049] http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2049.pdf - +.. [rust-language] https://www.rust-lang.org +.. [rustc] https://doc.rust-lang.org/rustc/ +.. [rust-editions] https://doc.rust-lang.org/edition-guide/editions/ +.. [rust-unstable-features] https://github.com/Rust-for-Linux/linux/issues/2 diff --git a/Documentation/process/submitting-patches.rst b/Documentation/process/submitting-patches.rst index eac7167dce83..69ce64e03c70 100644 --- a/Documentation/process/submitting-patches.rst +++ b/Documentation/process/submitting-patches.rst @@ -320,7 +320,7 @@ for their time. Code review is a tiring and time-consuming process, and reviewers sometimes get grumpy. Even in that case, though, respond politely and address the problems they have pointed out. When sending a next version, add a ``patch changelog`` to the cover letter or to individual patches -explaining difference aganst previous submission (see +explaining difference against previous submission (see :ref:`the_canonical_patch_format`). See Documentation/process/email-clients.rst for recommendations on email diff --git a/Documentation/scheduler/sched-capacity.rst b/Documentation/scheduler/sched-capacity.rst index 8e2b8538bc2b..e2c1cf743158 100644 --- a/Documentation/scheduler/sched-capacity.rst +++ b/Documentation/scheduler/sched-capacity.rst @@ -258,7 +258,7 @@ Linux cannot currently figure out CPU capacity on its own, this information thus needs to be handed to it. Architectures must define arch_scale_cpu_capacity() for that purpose. -The arm and arm64 architectures directly map this to the arch_topology driver +The arm, arm64, and RISC-V architectures directly map this to the arch_topology driver CPU scaling data, which is derived from the capacity-dmips-mhz CPU binding; see Documentation/devicetree/bindings/cpu/cpu-capacity.txt. diff --git a/Documentation/translations/zh_CN/mm/hugetlbfs_reserv.rst b/Documentation/translations/zh_CN/mm/hugetlbfs_reserv.rst index c1fa35315d8b..b7a0544224ad 100644 --- a/Documentation/translations/zh_CN/mm/hugetlbfs_reserv.rst +++ b/Documentation/translations/zh_CN/mm/hugetlbfs_reserv.rst @@ -15,7 +15,8 @@ Hugetlbfs 预留 概述 ==== -Documentation/mm/hugetlbpage.rst ä¸æ述的巨页通常是预先分é…给应用程åºä½¿ç”¨çš„。如果VMA指 +Documentation/admin-guide/mm/hugetlbpage.rst +ä¸æ述的巨页通常是预先分é…给应用程åºä½¿ç”¨çš„ 。如果VMA指 示è¦ä½¿ç”¨å·¨é¡µï¼Œè¿™äº›å·¨é¡µä¼šåœ¨ç¼ºé¡µå¼‚常时被实例化到任务的地å€ç©ºé—´ã€‚如果在缺页异常 时没有巨页å˜åœ¨ï¼Œä»»åŠ¡å°±ä¼šè¢«å‘é€ä¸€ä¸ªSIGBUS,并ç»å¸¸ä¸é«˜å…´åœ°æ»åŽ»ã€‚åœ¨åŠ å…¥å·¨é¡µæ”¯ æŒåŽä¸ä¹…,人们决定,在mmap()时检测巨页的çŸç¼ºæƒ…况会更好。这个想法是,如果 diff --git a/Documentation/translations/zh_CN/scheduler/sched-capacity.rst b/Documentation/translations/zh_CN/scheduler/sched-capacity.rst index e07ffdd391d3..8cba135dcd1a 100644 --- a/Documentation/translations/zh_CN/scheduler/sched-capacity.rst +++ b/Documentation/translations/zh_CN/scheduler/sched-capacity.rst @@ -231,7 +231,7 @@ CFS调度类基于实体负载跟踪机制(Per-Entity Load Tracking, PELTï¼‰ç» å½“å‰ï¼ŒLinuxæ— æ³•å‡è‡ªèº«ç®—出CPUç®—åŠ›ï¼Œå› æ¤å¿…é¡»è¦æœ‰æŠŠè¿™ä¸ªä¿¡æ¯ä¼ 递给Linuxçš„æ–¹å¼ã€‚æ¯ä¸ªæž¶æž„å¿…é¡»ä¸ºæ¤ å®šä¹‰arch_scale_cpu_capacity()函数。 -armå’Œarm64架构直接把这个信æ¯æ˜ 射到arch_topology驱动的CPU scalingæ•°æ®ä¸ï¼ˆè¯‘注:å‚考 +armã€arm64å’ŒRISC-V架构直接把这个信æ¯æ˜ 射到arch_topology驱动的CPU scalingæ•°æ®ä¸ï¼ˆè¯‘注:å‚考 arch_topology.hçš„percpuå˜é‡cpu_scale),它是从capacity-dmips-mhz CPU bindingä¸è¡ç”Ÿè®¡ç®— 出æ¥çš„。å‚è§Documentation/devicetree/bindings/cpu/cpu-capacity.txt。 diff --git a/Documentation/userspace-api/netlink/specs.rst b/Documentation/userspace-api/netlink/specs.rst index 2122e0c4a399..a22442ba1d30 100644 --- a/Documentation/userspace-api/netlink/specs.rst +++ b/Documentation/userspace-api/netlink/specs.rst @@ -24,7 +24,8 @@ YAML specifications can be found under ``Documentation/netlink/specs/`` This document describes details of the schema. See :doc:`intro-specs` for a practical starting guide. -All specs must be licensed under ``GPL-2.0-only OR BSD-3-Clause`` +All specs must be licensed under +``((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)`` to allow for easy adoption in user space code. Compatibility levels diff --git a/MAINTAINERS b/MAINTAINERS index d603402b0810..d8ebab595b2a 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -5971,7 +5971,7 @@ F: include/linux/dm-*.h F: include/uapi/linux/dm-*.h DEVLINK -M: Jiri Pirko <jiri@nvidia.com> +M: Jiri Pirko <jiri@resnulli.us> L: netdev@vger.kernel.org S: Supported F: Documentation/networking/devlink @@ -14872,12 +14872,12 @@ M: Sagi Grimberg <sagi@grimberg.me> L: linux-nvme@lists.infradead.org S: Supported W: http://git.infradead.org/nvme.git -T: git://git.infradead.org/nvme.git +T: git git://git.infradead.org/nvme.git F: Documentation/nvme/ -F: drivers/nvme/host/ F: drivers/nvme/common/ -F: include/linux/nvme.h +F: drivers/nvme/host/ F: include/linux/nvme-*.h +F: include/linux/nvme.h F: include/uapi/linux/nvme_ioctl.h NVM EXPRESS FABRICS AUTHENTICATION @@ -14912,7 +14912,7 @@ M: Chaitanya Kulkarni <kch@nvidia.com> L: linux-nvme@lists.infradead.org S: Supported W: http://git.infradead.org/nvme.git -T: git://git.infradead.org/nvme.git +T: git git://git.infradead.org/nvme.git F: drivers/nvme/target/ NVMEM FRAMEWORK @@ -15079,7 +15079,7 @@ F: Documentation/hwmon/nzxt-smart2.rst F: drivers/hwmon/nzxt-smart2.c OBJAGG -M: Jiri Pirko <jiri@nvidia.com> +M: Jiri Pirko <jiri@resnulli.us> L: netdev@vger.kernel.org S: Supported F: include/linux/objagg.h @@ -15853,7 +15853,7 @@ F: drivers/video/logo/logo_parisc* F: include/linux/hp_sdc.h PARMAN -M: Jiri Pirko <jiri@nvidia.com> +M: Jiri Pirko <jiri@resnulli.us> L: netdev@vger.kernel.org S: Supported F: include/linux/parman.h @@ -17990,7 +17990,7 @@ F: Documentation/devicetree/bindings/spi/microchip,mpfs-spi.yaml F: Documentation/devicetree/bindings/usb/microchip,mpfs-musb.yaml F: arch/riscv/boot/dts/microchip/ F: drivers/char/hw_random/mpfs-rng.c -F: drivers/clk/microchip/clk-mpfs.c +F: drivers/clk/microchip/clk-mpfs*.c F: drivers/i2c/busses/i2c-microchip-corei2c.c F: drivers/mailbox/mailbox-mpfs.c F: drivers/pci/controller/pcie-microchip-host.c @@ -274,8 +274,7 @@ no-dot-config-targets := $(clean-targets) \ cscope gtags TAGS tags help% %docs check% coccicheck \ $(version_h) headers headers_% archheaders archscripts \ %asm-generic kernelversion %src-pkg dt_binding_check \ - outputmakefile rustavailable rustfmt rustfmtcheck \ - scripts_package + outputmakefile rustavailable rustfmt rustfmtcheck # Installation targets should not require compiler. Unfortunately, vdso_install # is an exception where build artifacts may be updated. This must be fixed. no-compiler-targets := $(no-dot-config-targets) install dtbs_install \ @@ -1605,7 +1604,7 @@ MRPROPER_FILES += include/config include/generated \ certs/signing_key.pem \ certs/x509.genkey \ vmlinux-gdb.py \ - *.spec \ + *.spec rpmbuild \ rust/libmacros.so # clean - Delete most, but leave enough to build external modules @@ -1656,10 +1655,6 @@ distclean: mrproper %pkg: include/config/kernel.release FORCE $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.package $@ -PHONY += scripts_package -scripts_package: scripts_basic - $(Q)$(MAKE) $(build)=scripts scripts/list-gitignored - # Brief documentation of the typical targets used # --------------------------------------------------------------------------- @@ -1886,6 +1881,8 @@ endif else # KBUILD_EXTMOD +filechk_kernel.release = echo $(KERNELRELEASE) + ### # External module support. # When building external modules the kernel used as basis is considered diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index a1892a8f6032..bcd774d74f34 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -193,6 +193,9 @@ struct kvm_arch { /* Interrupt controller */ struct vgic_dist vgic; + /* Timers */ + struct arch_timer_vm_data timer_data; + /* Mandated version of PSCI */ u32 psci_version; diff --git a/arch/arm64/kvm/arch_timer.c b/arch/arm64/kvm/arch_timer.c index 00610477ec7b..e1af4301b913 100644 --- a/arch/arm64/kvm/arch_timer.c +++ b/arch/arm64/kvm/arch_timer.c @@ -84,14 +84,10 @@ u64 timer_get_cval(struct arch_timer_context *ctxt) static u64 timer_get_offset(struct arch_timer_context *ctxt) { - struct kvm_vcpu *vcpu = ctxt->vcpu; + if (ctxt->offset.vm_offset) + return *ctxt->offset.vm_offset; - switch(arch_timer_ctx_index(ctxt)) { - case TIMER_VTIMER: - return __vcpu_sys_reg(vcpu, CNTVOFF_EL2); - default: - return 0; - } + return 0; } static void timer_set_ctl(struct arch_timer_context *ctxt, u32 ctl) @@ -128,15 +124,12 @@ static void timer_set_cval(struct arch_timer_context *ctxt, u64 cval) static void timer_set_offset(struct arch_timer_context *ctxt, u64 offset) { - struct kvm_vcpu *vcpu = ctxt->vcpu; - - switch(arch_timer_ctx_index(ctxt)) { - case TIMER_VTIMER: - __vcpu_sys_reg(vcpu, CNTVOFF_EL2) = offset; - break; - default: + if (!ctxt->offset.vm_offset) { WARN(offset, "timer %ld\n", arch_timer_ctx_index(ctxt)); + return; } + + WRITE_ONCE(*ctxt->offset.vm_offset, offset); } u64 kvm_phys_timer_read(void) @@ -765,25 +758,6 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu) return 0; } -/* Make the updates of cntvoff for all vtimer contexts atomic */ -static void update_vtimer_cntvoff(struct kvm_vcpu *vcpu, u64 cntvoff) -{ - unsigned long i; - struct kvm *kvm = vcpu->kvm; - struct kvm_vcpu *tmp; - - mutex_lock(&kvm->lock); - kvm_for_each_vcpu(i, tmp, kvm) - timer_set_offset(vcpu_vtimer(tmp), cntvoff); - - /* - * When called from the vcpu create path, the CPU being created is not - * included in the loop above, so we just set it here as well. - */ - timer_set_offset(vcpu_vtimer(vcpu), cntvoff); - mutex_unlock(&kvm->lock); -} - void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) { struct arch_timer_cpu *timer = vcpu_timer(vcpu); @@ -791,10 +765,11 @@ void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu) struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); vtimer->vcpu = vcpu; + vtimer->offset.vm_offset = &vcpu->kvm->arch.timer_data.voffset; ptimer->vcpu = vcpu; /* Synchronize cntvoff across all vtimers of a VM. */ - update_vtimer_cntvoff(vcpu, kvm_phys_timer_read()); + timer_set_offset(vtimer, kvm_phys_timer_read()); timer_set_offset(ptimer, 0); hrtimer_init(&timer->bg_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); @@ -840,7 +815,7 @@ int kvm_arm_timer_set_reg(struct kvm_vcpu *vcpu, u64 regid, u64 value) break; case KVM_REG_ARM_TIMER_CNT: timer = vcpu_vtimer(vcpu); - update_vtimer_cntvoff(vcpu, kvm_phys_timer_read() - value); + timer_set_offset(timer, kvm_phys_timer_read() - value); break; case KVM_REG_ARM_TIMER_CVAL: timer = vcpu_vtimer(vcpu); diff --git a/arch/arm64/kvm/hypercalls.c b/arch/arm64/kvm/hypercalls.c index 64c086c02c60..5da884e11337 100644 --- a/arch/arm64/kvm/hypercalls.c +++ b/arch/arm64/kvm/hypercalls.c @@ -44,7 +44,7 @@ static void kvm_ptp_get_time(struct kvm_vcpu *vcpu, u64 *val) feature = smccc_get_arg1(vcpu); switch (feature) { case KVM_PTP_VIRT_COUNTER: - cycles = systime_snapshot.cycles - vcpu_read_sys_reg(vcpu, CNTVOFF_EL2); + cycles = systime_snapshot.cycles - vcpu->kvm->arch.timer_data.voffset; break; case KVM_PTP_PHYS_COUNTER: cycles = systime_snapshot.cycles; diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 2bef19cc1b98..af46aa88422b 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c @@ -271,11 +271,16 @@ static bool access_error(bool is_write, bool is_exec, struct vm_area_struct *vma } /* - * Check for a read fault. This could be caused by a read on an - * inaccessible page (i.e. PROT_NONE), or a Radix MMU execute-only page. + * VM_READ, VM_WRITE and VM_EXEC all imply read permissions, as + * defined in protection_map[]. Read faults can only be caused by + * a PROT_NONE mapping, or with a PROT_EXEC-only mapping on Radix. */ - if (unlikely(!(vma->vm_flags & VM_READ))) + if (unlikely(!vma_is_accessible(vma))) return true; + + if (unlikely(radix_enabled() && ((vma->vm_flags & VM_ACCESS_FLAGS) == VM_EXEC))) + return true; + /* * We should ideally do the vma pkey access check here. But in the * fault path, handle_mm_fault() also does the same check. To avoid diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index b481c5c8bae1..21b22bf16ce6 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig @@ -7,6 +7,7 @@ config PPC_PSERIES select OF_DYNAMIC select FORCE_PCI select PCI_MSI + select GENERIC_ALLOCATOR select PPC_XICS select PPC_XIVE_SPAPR select PPC_ICP_NATIVE diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h index 5ff1f19fd45c..0099dc116168 100644 --- a/arch/riscv/include/asm/mmu.h +++ b/arch/riscv/include/asm/mmu.h @@ -19,8 +19,6 @@ typedef struct { #ifdef CONFIG_SMP /* A local icache flush is needed before user execution can resume. */ cpumask_t icache_stale_mask; - /* A local tlb flush is needed before user execution can resume. */ - cpumask_t tlb_stale_mask; #endif } mm_context_t; diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 907b9efd39a8..801019381dea 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -22,24 +22,6 @@ static inline void local_flush_tlb_page(unsigned long addr) { ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); } - -static inline void local_flush_tlb_all_asid(unsigned long asid) -{ - __asm__ __volatile__ ("sfence.vma x0, %0" - : - : "r" (asid) - : "memory"); -} - -static inline void local_flush_tlb_page_asid(unsigned long addr, - unsigned long asid) -{ - __asm__ __volatile__ ("sfence.vma %0, %1" - : - : "r" (addr), "r" (asid) - : "memory"); -} - #else /* CONFIG_MMU */ #define local_flush_tlb_all() do { } while (0) #define local_flush_tlb_page(addr) do { } while (0) diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index 80ce9caba8d2..0f784e3d307b 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -196,16 +196,6 @@ switch_mm_fast: if (need_flush_tlb) local_flush_tlb_all(); -#ifdef CONFIG_SMP - else { - cpumask_t *mask = &mm->context.tlb_stale_mask; - - if (cpumask_test_cpu(cpu, mask)) { - cpumask_clear_cpu(cpu, mask); - local_flush_tlb_all_asid(cntx & asid_mask); - } - } -#endif } static void set_mm_noasid(struct mm_struct *mm) @@ -215,12 +205,24 @@ static void set_mm_noasid(struct mm_struct *mm) local_flush_tlb_all(); } -static inline void set_mm(struct mm_struct *mm, unsigned int cpu) +static inline void set_mm(struct mm_struct *prev, + struct mm_struct *next, unsigned int cpu) { - if (static_branch_unlikely(&use_asid_allocator)) - set_mm_asid(mm, cpu); - else - set_mm_noasid(mm); + /* + * The mm_cpumask indicates which harts' TLBs contain the virtual + * address mapping of the mm. Compared to noasid, using asid + * can't guarantee that stale TLB entries are invalidated because + * the asid mechanism wouldn't flush TLB for every switch_mm for + * performance. So when using asid, keep all CPUs footmarks in + * cpumask() until mm reset. + */ + cpumask_set_cpu(cpu, mm_cpumask(next)); + if (static_branch_unlikely(&use_asid_allocator)) { + set_mm_asid(next, cpu); + } else { + cpumask_clear_cpu(cpu, mm_cpumask(prev)); + set_mm_noasid(next); + } } static int __init asids_init(void) @@ -274,7 +276,8 @@ static int __init asids_init(void) } early_initcall(asids_init); #else -static inline void set_mm(struct mm_struct *mm, unsigned int cpu) +static inline void set_mm(struct mm_struct *prev, + struct mm_struct *next, unsigned int cpu) { /* Nothing to do here when there is no MMU */ } @@ -327,10 +330,7 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, */ cpu = smp_processor_id(); - cpumask_clear_cpu(cpu, mm_cpumask(prev)); - cpumask_set_cpu(cpu, mm_cpumask(next)); - - set_mm(next, cpu); + set_mm(prev, next, cpu); flush_icache_deferred(next, cpu); } diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 460f785f6e09..d5f3e501dffb 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -143,6 +143,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a no_context(regs, addr); return; } + if (pud_leaf(*pud_k)) + goto flush_tlb; /* * Since the vmalloc area is global, it is unnecessary @@ -153,6 +155,8 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a no_context(regs, addr); return; } + if (pmd_leaf(*pmd_k)) + goto flush_tlb; /* * Make sure the actual PTE exists as well to @@ -172,6 +176,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a * ordering constraint, not a cache flush; it is * necessary even after writing invalid entries. */ +flush_tlb: local_flush_tlb_page(addr); } diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index ce7dfc81bb3f..37ed760d007c 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -5,7 +5,23 @@ #include <linux/sched.h> #include <asm/sbi.h> #include <asm/mmu_context.h> -#include <asm/tlbflush.h> + +static inline void local_flush_tlb_all_asid(unsigned long asid) +{ + __asm__ __volatile__ ("sfence.vma x0, %0" + : + : "r" (asid) + : "memory"); +} + +static inline void local_flush_tlb_page_asid(unsigned long addr, + unsigned long asid) +{ + __asm__ __volatile__ ("sfence.vma %0, %1" + : + : "r" (addr), "r" (asid) + : "memory"); +} void flush_tlb_all(void) { @@ -15,7 +31,6 @@ void flush_tlb_all(void) static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, unsigned long size, unsigned long stride) { - struct cpumask *pmask = &mm->context.tlb_stale_mask; struct cpumask *cmask = mm_cpumask(mm); unsigned int cpuid; bool broadcast; @@ -29,15 +44,6 @@ static void __sbi_tlb_flush_range(struct mm_struct *mm, unsigned long start, if (static_branch_unlikely(&use_asid_allocator)) { unsigned long asid = atomic_long_read(&mm->context.id); - /* - * TLB will be immediately flushed on harts concurrently - * executing this MM context. TLB flush on other harts - * is deferred until this MM context migrates there. - */ - cpumask_setall(pmask); - cpumask_clear_cpu(cpuid, pmask); - cpumask_andnot(pmask, pmask, cmask); - if (broadcast) { sbi_remote_sfence_vma_asid(cmask, start, size, asid); } else if (size <= stride) { diff --git a/arch/s390/boot/ipl_report.c b/arch/s390/boot/ipl_report.c index 9b14045065b6..74b5cd264862 100644 --- a/arch/s390/boot/ipl_report.c +++ b/arch/s390/boot/ipl_report.c @@ -57,11 +57,19 @@ repeat: if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_data.start && initrd_data.size && intersects(initrd_data.start, initrd_data.size, safe_addr, size)) safe_addr = initrd_data.start + initrd_data.size; + if (intersects(safe_addr, size, (unsigned long)comps, comps->len)) { + safe_addr = (unsigned long)comps + comps->len; + goto repeat; + } for_each_rb_entry(comp, comps) if (intersects(safe_addr, size, comp->addr, comp->len)) { safe_addr = comp->addr + comp->len; goto repeat; } + if (intersects(safe_addr, size, (unsigned long)certs, certs->len)) { + safe_addr = (unsigned long)certs + certs->len; + goto repeat; + } for_each_rb_entry(cert, certs) if (intersects(safe_addr, size, cert->addr, cert->len)) { safe_addr = cert->addr + cert->len; diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig index 3c68fe49042c..4ccf66d29fc2 100644 --- a/arch/s390/configs/debug_defconfig +++ b/arch/s390/configs/debug_defconfig @@ -23,7 +23,6 @@ CONFIG_NUMA_BALANCING=y CONFIG_MEMCG=y CONFIG_BLK_CGROUP=y CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y @@ -90,7 +89,6 @@ CONFIG_MINIX_SUBPARTITION=y CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_BINFMT_MISC=m CONFIG_ZSWAP=y CONFIG_ZSMALLOC_STAT=y @@ -298,7 +296,6 @@ CONFIG_IP_NF_TARGET_REJECT=m CONFIG_IP_NF_NAT=m CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_TTL=m CONFIG_IP_NF_RAW=m @@ -340,7 +337,6 @@ CONFIG_BRIDGE_MRP=y CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_NET_SCHED=y -CONFIG_NET_SCH_CBQ=m CONFIG_NET_SCH_HTB=m CONFIG_NET_SCH_HFSC=m CONFIG_NET_SCH_PRIO=m @@ -351,7 +347,6 @@ CONFIG_NET_SCH_SFQ=m CONFIG_NET_SCH_TEQL=m CONFIG_NET_SCH_TBF=m CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m CONFIG_NET_SCH_NETEM=m CONFIG_NET_SCH_DRR=m CONFIG_NET_SCH_MQPRIO=m @@ -363,14 +358,11 @@ CONFIG_NET_SCH_INGRESS=m CONFIG_NET_SCH_PLUG=m CONFIG_NET_SCH_ETS=m CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m CONFIG_NET_CLS_ROUTE4=m CONFIG_NET_CLS_FW=m CONFIG_NET_CLS_U32=m CONFIG_CLS_U32_PERF=y CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m CONFIG_NET_CLS_FLOW=m CONFIG_NET_CLS_CGROUP=y CONFIG_NET_CLS_BPF=m @@ -584,7 +576,7 @@ CONFIG_DIAG288_WATCHDOG=m CONFIG_FB=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -# CONFIG_HID is not set +# CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_ACCESS=m @@ -828,6 +820,7 @@ CONFIG_PANIC_ON_OOPS=y CONFIG_DETECT_HUNG_TASK=y CONFIG_WQ_WATCHDOG=y CONFIG_TEST_LOCKUP=m +CONFIG_DEBUG_PREEMPT=y CONFIG_PROVE_LOCKING=y CONFIG_LOCK_STAT=y CONFIG_DEBUG_ATOMIC_SLEEP=y @@ -843,6 +836,7 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=300 # CONFIG_RCU_TRACE is not set CONFIG_LATENCYTOP=y CONFIG_BOOTTIME_TRACING=y +CONFIG_FPROBE=y CONFIG_FUNCTION_PROFILER=y CONFIG_STACK_TRACER=y CONFIG_IRQSOFF_TRACER=y @@ -857,6 +851,7 @@ CONFIG_SAMPLES=y CONFIG_SAMPLE_TRACE_PRINTK=m CONFIG_SAMPLE_FTRACE_DIRECT=m CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m +CONFIG_SAMPLE_FTRACE_OPS=m CONFIG_DEBUG_ENTRY=y CONFIG_CIO_INJECT=y CONFIG_KUNIT=m diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig index 9ab91632f74c..693297a2e897 100644 --- a/arch/s390/configs/defconfig +++ b/arch/s390/configs/defconfig @@ -21,7 +21,6 @@ CONFIG_NUMA_BALANCING=y CONFIG_MEMCG=y CONFIG_BLK_CGROUP=y CONFIG_CFS_BANDWIDTH=y -CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_RDMA=y CONFIG_CGROUP_FREEZER=y @@ -85,7 +84,6 @@ CONFIG_MINIX_SUBPARTITION=y CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y CONFIG_IOSCHED_BFQ=y -CONFIG_BFQ_GROUP_IOSCHED=y CONFIG_BINFMT_MISC=m CONFIG_ZSWAP=y CONFIG_ZSMALLOC_STAT=y @@ -289,7 +287,6 @@ CONFIG_IP_NF_TARGET_REJECT=m CONFIG_IP_NF_NAT=m CONFIG_IP_NF_TARGET_MASQUERADE=m CONFIG_IP_NF_MANGLE=m -CONFIG_IP_NF_TARGET_CLUSTERIP=m CONFIG_IP_NF_TARGET_ECN=m CONFIG_IP_NF_TARGET_TTL=m CONFIG_IP_NF_RAW=m @@ -330,7 +327,6 @@ CONFIG_BRIDGE_MRP=y CONFIG_VLAN_8021Q=m CONFIG_VLAN_8021Q_GVRP=y CONFIG_NET_SCHED=y -CONFIG_NET_SCH_CBQ=m CONFIG_NET_SCH_HTB=m CONFIG_NET_SCH_HFSC=m CONFIG_NET_SCH_PRIO=m @@ -341,7 +337,6 @@ CONFIG_NET_SCH_SFQ=m CONFIG_NET_SCH_TEQL=m CONFIG_NET_SCH_TBF=m CONFIG_NET_SCH_GRED=m -CONFIG_NET_SCH_DSMARK=m CONFIG_NET_SCH_NETEM=m CONFIG_NET_SCH_DRR=m CONFIG_NET_SCH_MQPRIO=m @@ -353,14 +348,11 @@ CONFIG_NET_SCH_INGRESS=m CONFIG_NET_SCH_PLUG=m CONFIG_NET_SCH_ETS=m CONFIG_NET_CLS_BASIC=m -CONFIG_NET_CLS_TCINDEX=m CONFIG_NET_CLS_ROUTE4=m CONFIG_NET_CLS_FW=m CONFIG_NET_CLS_U32=m CONFIG_CLS_U32_PERF=y CONFIG_CLS_U32_MARK=y -CONFIG_NET_CLS_RSVP=m -CONFIG_NET_CLS_RSVP6=m CONFIG_NET_CLS_FLOW=m CONFIG_NET_CLS_CGROUP=y CONFIG_NET_CLS_BPF=m @@ -573,7 +565,7 @@ CONFIG_DIAG288_WATCHDOG=m CONFIG_FB=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -# CONFIG_HID is not set +# CONFIG_HID_SUPPORT is not set # CONFIG_USB_SUPPORT is not set CONFIG_INFINIBAND=m CONFIG_INFINIBAND_USER_ACCESS=m @@ -795,6 +787,7 @@ CONFIG_RCU_REF_SCALE_TEST=m CONFIG_RCU_CPU_STALL_TIMEOUT=60 CONFIG_LATENCYTOP=y CONFIG_BOOTTIME_TRACING=y +CONFIG_FPROBE=y CONFIG_FUNCTION_PROFILER=y CONFIG_STACK_TRACER=y CONFIG_SCHED_TRACER=y @@ -805,6 +798,7 @@ CONFIG_SAMPLES=y CONFIG_SAMPLE_TRACE_PRINTK=m CONFIG_SAMPLE_FTRACE_DIRECT=m CONFIG_SAMPLE_FTRACE_DIRECT_MULTI=m +CONFIG_SAMPLE_FTRACE_OPS=m CONFIG_KUNIT=m CONFIG_KUNIT_DEBUGFS=y CONFIG_LKDTM=m diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig index a9c0c81d1de9..33a232bb68af 100644 --- a/arch/s390/configs/zfcpdump_defconfig +++ b/arch/s390/configs/zfcpdump_defconfig @@ -58,7 +58,7 @@ CONFIG_ZFCP=y # CONFIG_VMCP is not set # CONFIG_MONWRITER is not set # CONFIG_S390_VMUR is not set -# CONFIG_HID is not set +# CONFIG_HID_SUPPORT is not set # CONFIG_VIRTIO_MENU is not set # CONFIG_VHOST_MENU is not set # CONFIG_IOMMU_SUPPORT is not set diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c index ef38b1514c77..e16afacc8fd1 100644 --- a/arch/s390/pci/pci.c +++ b/arch/s390/pci/pci.c @@ -544,8 +544,7 @@ static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start, return r; } -int zpci_setup_bus_resources(struct zpci_dev *zdev, - struct list_head *resources) +int zpci_setup_bus_resources(struct zpci_dev *zdev) { unsigned long addr, size, flags; struct resource *res; @@ -581,7 +580,6 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev, return -ENOMEM; } zdev->bars[i].res = res; - pci_add_resource(resources, res); } zdev->has_resources = 1; @@ -590,17 +588,23 @@ int zpci_setup_bus_resources(struct zpci_dev *zdev, static void zpci_cleanup_bus_resources(struct zpci_dev *zdev) { + struct resource *res; int i; + pci_lock_rescan_remove(); for (i = 0; i < PCI_STD_NUM_BARS; i++) { - if (!zdev->bars[i].size || !zdev->bars[i].res) + res = zdev->bars[i].res; + if (!res) continue; + release_resource(res); + pci_bus_remove_resource(zdev->zbus->bus, res); zpci_free_iomap(zdev, zdev->bars[i].map_idx); - release_resource(zdev->bars[i].res); - kfree(zdev->bars[i].res); + zdev->bars[i].res = NULL; + kfree(res); } zdev->has_resources = 0; + pci_unlock_rescan_remove(); } int pcibios_device_add(struct pci_dev *pdev) diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c index 6a8da1b742ae..a99926af2b69 100644 --- a/arch/s390/pci/pci_bus.c +++ b/arch/s390/pci/pci_bus.c @@ -41,9 +41,7 @@ static int zpci_nb_devices; */ static int zpci_bus_prepare_device(struct zpci_dev *zdev) { - struct resource_entry *window, *n; - struct resource *res; - int rc; + int rc, i; if (!zdev_enabled(zdev)) { rc = zpci_enable_device(zdev); @@ -57,10 +55,10 @@ static int zpci_bus_prepare_device(struct zpci_dev *zdev) } if (!zdev->has_resources) { - zpci_setup_bus_resources(zdev, &zdev->zbus->resources); - resource_list_for_each_entry_safe(window, n, &zdev->zbus->resources) { - res = window->res; - pci_bus_add_resource(zdev->zbus->bus, res, 0); + zpci_setup_bus_resources(zdev); + for (i = 0; i < PCI_STD_NUM_BARS; i++) { + if (zdev->bars[i].res) + pci_bus_add_resource(zdev->zbus->bus, zdev->bars[i].res, 0); } } diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h index e96c9860e064..af9f0ac79a1b 100644 --- a/arch/s390/pci/pci_bus.h +++ b/arch/s390/pci/pci_bus.h @@ -30,8 +30,7 @@ static inline void zpci_zdev_get(struct zpci_dev *zdev) int zpci_alloc_domain(int domain); void zpci_free_domain(int domain); -int zpci_setup_bus_resources(struct zpci_dev *zdev, - struct list_head *resources); +int zpci_setup_bus_resources(struct zpci_dev *zdev); static inline struct zpci_dev *zdev_from_bus(struct pci_bus *bus, unsigned int devfn) diff --git a/arch/x86/include/asm/svm.h b/arch/x86/include/asm/svm.h index cb1ee53ad3b1..770dcf75eaa9 100644 --- a/arch/x86/include/asm/svm.h +++ b/arch/x86/include/asm/svm.h @@ -261,20 +261,22 @@ enum avic_ipi_failure_cause { AVIC_IPI_FAILURE_INVALID_BACKING_PAGE, }; -#define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(9, 0) +#define AVIC_PHYSICAL_MAX_INDEX_MASK GENMASK_ULL(8, 0) /* - * For AVIC, the max index allowed for physical APIC ID - * table is 0xff (255). + * For AVIC, the max index allowed for physical APIC ID table is 0xfe (254), as + * 0xff is a broadcast to all CPUs, i.e. can't be targeted individually. */ #define AVIC_MAX_PHYSICAL_ID 0XFEULL /* - * For x2AVIC, the max index allowed for physical APIC ID - * table is 0x1ff (511). + * For x2AVIC, the max index allowed for physical APIC ID table is 0x1ff (511). */ #define X2AVIC_MAX_PHYSICAL_ID 0x1FFUL +static_assert((AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == AVIC_MAX_PHYSICAL_ID); +static_assert((X2AVIC_MAX_PHYSICAL_ID & AVIC_PHYSICAL_MAX_INDEX_MASK) == X2AVIC_MAX_PHYSICAL_ID); + #define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF) #define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL diff --git a/arch/x86/include/asm/xen/cpuid.h b/arch/x86/include/asm/xen/cpuid.h index 6daa9b0c8d11..a3c29b1496c8 100644 --- a/arch/x86/include/asm/xen/cpuid.h +++ b/arch/x86/include/asm/xen/cpuid.h @@ -89,11 +89,21 @@ * Sub-leaf 2: EAX: host tsc frequency in kHz */ +#define XEN_CPUID_TSC_EMULATED (1u << 0) +#define XEN_CPUID_HOST_TSC_RELIABLE (1u << 1) +#define XEN_CPUID_RDTSCP_INSTR_AVAIL (1u << 2) + +#define XEN_CPUID_TSC_MODE_DEFAULT (0) +#define XEN_CPUID_TSC_MODE_ALWAYS_EMULATE (1u) +#define XEN_CPUID_TSC_MODE_NEVER_EMULATE (2u) +#define XEN_CPUID_TSC_MODE_PVRDTSCP (3u) + /* * Leaf 5 (0x40000x04) * HVM-specific features * Sub-leaf 0: EAX: Features * Sub-leaf 0: EBX: vcpu id (iff EAX has XEN_HVM_CPUID_VCPU_ID_PRESENT flag) + * Sub-leaf 0: ECX: domain id (iff EAX has XEN_HVM_CPUID_DOMID_PRESENT flag) */ #define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0) /* Virtualized APIC registers */ #define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1) /* Virtualized x2APIC accesses */ @@ -102,12 +112,16 @@ #define XEN_HVM_CPUID_VCPU_ID_PRESENT (1u << 3) /* vcpu id is present in EBX */ #define XEN_HVM_CPUID_DOMID_PRESENT (1u << 4) /* domid is present in ECX */ /* - * Bits 55:49 from the IO-APIC RTE and bits 11:5 from the MSI address can be - * used to store high bits for the Destination ID. This expands the Destination - * ID field from 8 to 15 bits, allowing to target APIC IDs up 32768. + * With interrupt format set to 0 (non-remappable) bits 55:49 from the + * IO-APIC RTE and bits 11:5 from the MSI address can be used to store + * high bits for the Destination ID. This expands the Destination ID + * field from 8 to 15 bits, allowing to target APIC IDs up 32768. */ #define XEN_HVM_CPUID_EXT_DEST_ID (1u << 5) -/* Per-vCPU event channel upcalls */ +/* + * Per-vCPU event channel upcalls work correctly with physical IRQs + * bound to event channels. + */ #define XEN_HVM_CPUID_UPCALL_VECTOR (1u << 6) /* diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S index 1265ad519249..fb4f1e01b64a 100644 --- a/arch/x86/kernel/ftrace_64.S +++ b/arch/x86/kernel/ftrace_64.S @@ -136,10 +136,12 @@ SYM_TYPED_FUNC_START(ftrace_stub) RET SYM_FUNC_END(ftrace_stub) +#ifdef CONFIG_FUNCTION_GRAPH_TRACER SYM_TYPED_FUNC_START(ftrace_stub_graph) CALL_DEPTH_ACCOUNT RET SYM_FUNC_END(ftrace_stub_graph) +#endif #ifdef CONFIG_DYNAMIC_FTRACE diff --git a/arch/x86/kvm/svm/avic.c b/arch/x86/kvm/svm/avic.c index ca684979e90d..cfc8ab773025 100644 --- a/arch/x86/kvm/svm/avic.c +++ b/arch/x86/kvm/svm/avic.c @@ -27,19 +27,38 @@ #include "irq.h" #include "svm.h" -/* AVIC GATAG is encoded using VM and VCPU IDs */ -#define AVIC_VCPU_ID_BITS 8 -#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1) +/* + * Encode the arbitrary VM ID and the vCPU's default APIC ID, i.e the vCPU ID, + * into the GATag so that KVM can retrieve the correct vCPU from a GALog entry + * if an interrupt can't be delivered, e.g. because the vCPU isn't running. + * + * For the vCPU ID, use however many bits are currently allowed for the max + * guest physical APIC ID (limited by the size of the physical ID table), and + * use whatever bits remain to assign arbitrary AVIC IDs to VMs. Note, the + * size of the GATag is defined by hardware (32 bits), but is an opaque value + * as far as hardware is concerned. + */ +#define AVIC_VCPU_ID_MASK AVIC_PHYSICAL_MAX_INDEX_MASK -#define AVIC_VM_ID_BITS 24 -#define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS) -#define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1) +#define AVIC_VM_ID_SHIFT HWEIGHT32(AVIC_PHYSICAL_MAX_INDEX_MASK) +#define AVIC_VM_ID_MASK (GENMASK(31, AVIC_VM_ID_SHIFT) >> AVIC_VM_ID_SHIFT) -#define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \ - (y & AVIC_VCPU_ID_MASK)) -#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK) +#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VM_ID_SHIFT) & AVIC_VM_ID_MASK) #define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK) +#define __AVIC_GATAG(vm_id, vcpu_id) ((((vm_id) & AVIC_VM_ID_MASK) << AVIC_VM_ID_SHIFT) | \ + ((vcpu_id) & AVIC_VCPU_ID_MASK)) +#define AVIC_GATAG(vm_id, vcpu_id) \ +({ \ + u32 ga_tag = __AVIC_GATAG(vm_id, vcpu_id); \ + \ + WARN_ON_ONCE(AVIC_GATAG_TO_VCPUID(ga_tag) != (vcpu_id)); \ + WARN_ON_ONCE(AVIC_GATAG_TO_VMID(ga_tag) != (vm_id)); \ + ga_tag; \ +}) + +static_assert(__AVIC_GATAG(AVIC_VM_ID_MASK, AVIC_VCPU_ID_MASK) == -1u); + static bool force_avic; module_param_unsafe(force_avic, bool, 0444); diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c index 7c4f5ca405c7..1bc2b80273c9 100644 --- a/arch/x86/kvm/vmx/nested.c +++ b/arch/x86/kvm/vmx/nested.c @@ -2903,7 +2903,7 @@ static int nested_vmx_check_address_space_size(struct kvm_vcpu *vcpu, static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) { - bool ia32e; + bool ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE); if (CC(!nested_host_cr0_valid(vcpu, vmcs12->host_cr0)) || CC(!nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) || @@ -2923,12 +2923,6 @@ static int nested_vmx_check_host_state(struct kvm_vcpu *vcpu, vmcs12->host_ia32_perf_global_ctrl))) return -EINVAL; -#ifdef CONFIG_X86_64 - ia32e = !!(vmcs12->vm_exit_controls & VM_EXIT_HOST_ADDR_SPACE_SIZE); -#else - ia32e = false; -#endif - if (ia32e) { if (CC(!(vmcs12->host_cr4 & X86_CR4_PAE))) return -EINVAL; @@ -3022,7 +3016,7 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, enum vm_entry_failure_code *entry_failure_code) { - bool ia32e; + bool ia32e = !!(vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE); *entry_failure_code = ENTRY_FAIL_DEFAULT; @@ -3048,6 +3042,13 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, vmcs12->guest_ia32_perf_global_ctrl))) return -EINVAL; + if (CC((vmcs12->guest_cr0 & (X86_CR0_PG | X86_CR0_PE)) == X86_CR0_PG)) + return -EINVAL; + + if (CC(ia32e && !(vmcs12->guest_cr4 & X86_CR4_PAE)) || + CC(ia32e && !(vmcs12->guest_cr0 & X86_CR0_PG))) + return -EINVAL; + /* * If the load IA32_EFER VM-entry control is 1, the following checks * are performed on the field for the IA32_EFER MSR: @@ -3059,7 +3060,6 @@ static int nested_vmx_check_guest_state(struct kvm_vcpu *vcpu, */ if (to_vmx(vcpu)->nested.nested_run_pending && (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER)) { - ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0; if (CC(!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer)) || CC(ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA)) || CC(((vmcs12->guest_cr0 & X86_CR0_PG) && diff --git a/arch/x86/kvm/vmx/vmenter.S b/arch/x86/kvm/vmx/vmenter.S index f550540ed54e..631fd7da2bc3 100644 --- a/arch/x86/kvm/vmx/vmenter.S +++ b/arch/x86/kvm/vmx/vmenter.S @@ -262,7 +262,7 @@ SYM_INNER_LABEL(vmx_vmexit, SYM_L_GLOBAL) * eIBRS has its own protection against poisoned RSB, so it doesn't * need the RSB filling sequence. But it does need to be enabled, and a * single call to retire, before the first unbalanced RET. - */ + */ FILL_RETURN_BUFFER %_ASM_CX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT,\ X86_FEATURE_RSB_VMEXIT_LITE @@ -311,7 +311,7 @@ SYM_FUNC_END(vmx_do_nmi_irqoff) * vmread_error_trampoline - Trampoline from inline asm to vmread_error() * @field: VMCS field encoding that failed * @fault: %true if the VMREAD faulted, %false if it failed - + * * Save and restore volatile registers across a call to vmread_error(). Note, * all parameters are passed on the stack. */ diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index bcac3efcde41..d2d6e1b6c788 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -874,7 +874,7 @@ void vmx_update_exception_bitmap(struct kvm_vcpu *vcpu) */ if (is_guest_mode(vcpu)) eb |= get_vmcs12(vcpu)->exception_bitmap; - else { + else { int mask = 0, match = 0; if (enable_ept && (eb & (1u << PF_VECTOR))) { @@ -1282,7 +1282,7 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) } } - if (vmx->nested.need_vmcs12_to_shadow_sync) + if (vmx->nested.need_vmcs12_to_shadow_sync) nested_sync_vmcs12_to_shadow(vcpu); if (vmx->guest_state_loaded) @@ -5049,10 +5049,10 @@ static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu, bool for_injection) if (to_vmx(vcpu)->nested.nested_run_pending) return -EBUSY; - /* - * An IRQ must not be injected into L2 if it's supposed to VM-Exit, - * e.g. if the IRQ arrived asynchronously after checking nested events. - */ + /* + * An IRQ must not be injected into L2 if it's supposed to VM-Exit, + * e.g. if the IRQ arrived asynchronously after checking nested events. + */ if (for_injection && is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) return -EBUSY; diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile index 3c5b52fbe4a7..a9ec8c9f5c5d 100644 --- a/arch/x86/xen/Makefile +++ b/arch/x86/xen/Makefile @@ -45,6 +45,6 @@ obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o -obj-$(CONFIG_XEN_PV_DOM0) += vga.o +obj-$(CONFIG_XEN_DOM0) += vga.o obj-$(CONFIG_XEN_EFI) += efi.o diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index bb59cc6ddb2d..093b78c8bbec 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1390,7 +1390,8 @@ asmlinkage __visible void __init xen_start_kernel(struct start_info *si) x86_platform.set_legacy_features = xen_dom0_set_legacy_features; - xen_init_vga(info, xen_start_info->console.dom0.info_size); + xen_init_vga(info, xen_start_info->console.dom0.info_size, + &boot_params.screen_info); xen_start_info->console.domU.mfn = 0; xen_start_info->console.domU.evtchn = 0; diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c index bcae606bbc5c..1da44aca896c 100644 --- a/arch/x86/xen/enlighten_pvh.c +++ b/arch/x86/xen/enlighten_pvh.c @@ -43,6 +43,19 @@ void __init xen_pvh_init(struct boot_params *boot_params) x86_init.oem.banner = xen_banner; xen_efi_init(boot_params); + + if (xen_initial_domain()) { + struct xen_platform_op op = { + .cmd = XENPF_get_dom0_console, + }; + long ret = HYPERVISOR_platform_op(&op); + + if (ret > 0) + xen_init_vga(&op.u.dom0_console, + min(ret * sizeof(char), + sizeof(op.u.dom0_console)), + &boot_params->screen_info); + } } void __init mem_map_via_hcall(struct boot_params *boot_params_p) diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 1d597364b49d..b74ac2562cfb 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -20,6 +20,7 @@ #include <asm/pvclock.h> #include <asm/xen/hypervisor.h> #include <asm/xen/hypercall.h> +#include <asm/xen/cpuid.h> #include <xen/events.h> #include <xen/features.h> @@ -503,11 +504,7 @@ static int __init xen_tsc_safe_clocksource(void) /* Leaf 4, sub-leaf 0 (0x40000x03) */ cpuid_count(xen_cpuid_base() + 3, 0, &eax, &ebx, &ecx, &edx); - /* tsc_mode = no_emulate (2) */ - if (ebx != 2) - return 0; - - return 1; + return ebx == XEN_CPUID_TSC_MODE_NEVER_EMULATE; } static void __init xen_time_init(void) diff --git a/arch/x86/xen/vga.c b/arch/x86/xen/vga.c index 14ea32e734d5..d97adab8420f 100644 --- a/arch/x86/xen/vga.c +++ b/arch/x86/xen/vga.c @@ -9,10 +9,9 @@ #include "xen-ops.h" -void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size) +void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size, + struct screen_info *screen_info) { - struct screen_info *screen_info = &boot_params.screen_info; - /* This is drawn from a dump from vgacon:startup in * standard Linux. */ screen_info->orig_video_mode = 3; diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 9a8bb972193d..a10903785a33 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -108,11 +108,12 @@ static inline void xen_uninit_lock_cpu(int cpu) struct dom0_vga_console_info; -#ifdef CONFIG_XEN_PV_DOM0 -void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size); +#ifdef CONFIG_XEN_DOM0 +void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size, + struct screen_info *); #else static inline void __init xen_init_vga(const struct dom0_vga_console_info *info, - size_t size) + size_t size, struct screen_info *si) { } #endif diff --git a/block/Kconfig b/block/Kconfig index 5d9d9c84d516..941b2dca70db 100644 --- a/block/Kconfig +++ b/block/Kconfig @@ -204,9 +204,6 @@ config BLK_INLINE_ENCRYPTION_FALLBACK source "block/partitions/Kconfig" -config BLOCK_COMPAT - def_bool COMPAT - config BLK_MQ_PCI def_bool PCI diff --git a/block/blk-core.c b/block/blk-core.c index 9e5e0277a4d9..42926e6cb83c 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -959,16 +959,11 @@ again: } } -unsigned long bdev_start_io_acct(struct block_device *bdev, - unsigned int sectors, enum req_op op, +unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op, unsigned long start_time) { - const int sgrp = op_stat_group(op); - part_stat_lock(); update_io_ticks(bdev, start_time, false); - part_stat_inc(bdev, ios[sgrp]); - part_stat_add(bdev, sectors[sgrp], sectors); part_stat_local_inc(bdev, in_flight[op_is_write(op)]); part_stat_unlock(); @@ -984,13 +979,12 @@ EXPORT_SYMBOL(bdev_start_io_acct); */ unsigned long bio_start_io_acct(struct bio *bio) { - return bdev_start_io_acct(bio->bi_bdev, bio_sectors(bio), - bio_op(bio), jiffies); + return bdev_start_io_acct(bio->bi_bdev, bio_op(bio), jiffies); } EXPORT_SYMBOL_GPL(bio_start_io_acct); void bdev_end_io_acct(struct block_device *bdev, enum req_op op, - unsigned long start_time) + unsigned int sectors, unsigned long start_time) { const int sgrp = op_stat_group(op); unsigned long now = READ_ONCE(jiffies); @@ -998,6 +992,8 @@ void bdev_end_io_acct(struct block_device *bdev, enum req_op op, part_stat_lock(); update_io_ticks(bdev, now, true); + part_stat_inc(bdev, ios[sgrp]); + part_stat_add(bdev, sectors[sgrp], sectors); part_stat_add(bdev, nsecs[sgrp], jiffies_to_nsecs(duration)); part_stat_local_dec(bdev, in_flight[op_is_write(op)]); part_stat_unlock(); @@ -1007,7 +1003,7 @@ EXPORT_SYMBOL(bdev_end_io_acct); void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, struct block_device *orig_bdev) { - bdev_end_io_acct(orig_bdev, bio_op(bio), start_time); + bdev_end_io_acct(orig_bdev, bio_op(bio), bio_sectors(bio), start_time); } EXPORT_SYMBOL_GPL(bio_end_io_acct_remapped); diff --git a/block/blk-mq.c b/block/blk-mq.c index d0cb2ef18fe2..cf1a39adf9a5 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -2725,6 +2725,7 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) struct blk_mq_hw_ctx *this_hctx = NULL; struct blk_mq_ctx *this_ctx = NULL; struct request *requeue_list = NULL; + struct request **requeue_lastp = &requeue_list; unsigned int depth = 0; LIST_HEAD(list); @@ -2735,10 +2736,10 @@ static void blk_mq_dispatch_plug_list(struct blk_plug *plug, bool from_sched) this_hctx = rq->mq_hctx; this_ctx = rq->mq_ctx; } else if (this_hctx != rq->mq_hctx || this_ctx != rq->mq_ctx) { - rq_list_add(&requeue_list, rq); + rq_list_add_tail(&requeue_lastp, rq); continue; } - list_add_tail(&rq->queuelist, &list); + list_add(&rq->queuelist, &list); depth++; } while (!rq_list_empty(plug->mq_list)); diff --git a/block/blk-mq.h b/block/blk-mq.h index ef59fee62780..a7482d2cc82e 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -378,12 +378,13 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \ do { \ if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \ + struct blk_mq_tag_set *__tag_set = (q)->tag_set; \ int srcu_idx; \ \ might_sleep_if(check_sleep); \ - srcu_idx = srcu_read_lock((q)->tag_set->srcu); \ + srcu_idx = srcu_read_lock(__tag_set->srcu); \ (dispatch_ops); \ - srcu_read_unlock((q)->tag_set->srcu, srcu_idx); \ + srcu_read_unlock(__tag_set->srcu, srcu_idx); \ } else { \ rcu_read_lock(); \ (dispatch_ops); \ diff --git a/drivers/accel/Makefile b/drivers/accel/Makefile index 07aa77aed1c8..f22fd44d586b 100644 --- a/drivers/accel/Makefile +++ b/drivers/accel/Makefile @@ -1,4 +1,4 @@ # SPDX-License-Identifier: GPL-2.0-only -obj-y += habanalabs/ -obj-y += ivpu/ +obj-$(CONFIG_DRM_ACCEL_HABANALABS) += habanalabs/ +obj-$(CONFIG_DRM_ACCEL_IVPU) += ivpu/ diff --git a/drivers/acpi/pptt.c b/drivers/acpi/pptt.c index 10975bb603fb..a35dd0e41c27 100644 --- a/drivers/acpi/pptt.c +++ b/drivers/acpi/pptt.c @@ -536,16 +536,19 @@ static int topology_get_acpi_cpu_tag(struct acpi_table_header *table, static struct acpi_table_header *acpi_get_pptt(void) { static struct acpi_table_header *pptt; + static bool is_pptt_checked; acpi_status status; /* * PPTT will be used at runtime on every CPU hotplug in path, so we * don't need to call acpi_put_table() to release the table mapping. */ - if (!pptt) { + if (!pptt && !is_pptt_checked) { status = acpi_get_table(ACPI_SIG_PPTT, 0, &pptt); if (ACPI_FAILURE(status)) acpi_pptt_warn_missing(); + + is_pptt_checked = true; } return pptt; diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c index 710ac640267d..14d6d81e536f 100644 --- a/drivers/acpi/video_detect.c +++ b/drivers/acpi/video_detect.c @@ -716,6 +716,13 @@ static const struct dmi_system_id video_detect_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Dell G15 5515"), }, }, + { + .callback = video_detect_force_native, + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 15 3535"), + }, + }, /* * Desktops which falsely report a backlight and which our heuristics diff --git a/drivers/acpi/x86/utils.c b/drivers/acpi/x86/utils.c index e45285d4e62a..da5727069d85 100644 --- a/drivers/acpi/x86/utils.c +++ b/drivers/acpi/x86/utils.c @@ -251,6 +251,7 @@ bool force_storage_d3(void) #define ACPI_QUIRK_UART1_TTY_UART2_SKIP BIT(1) #define ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY BIT(2) #define ACPI_QUIRK_USE_ACPI_AC_AND_BATTERY BIT(3) +#define ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS BIT(4) static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { /* @@ -280,13 +281,35 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { */ #if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS) { + /* Acer Iconia One 7 B1-750 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "VESPA2"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), + }, + { .matches = { DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "ME176C"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | ACPI_QUIRK_UART1_TTY_UART2_SKIP | - ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), + }, + { + /* Lenovo Yoga Book X90F/L */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Intel Corporation"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "CHERRYVIEW D1 PLATFORM"), + DMI_EXACT_MATCH(DMI_PRODUCT_VERSION, "YETI-11"), + }, + .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { .matches = { @@ -294,7 +317,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | - ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { /* Lenovo Yoga Tablet 2 1050F/L */ @@ -336,7 +360,8 @@ static const struct dmi_system_id acpi_quirk_skip_dmi_ids[] = { DMI_MATCH(DMI_PRODUCT_NAME, "M890BAP"), }, .driver_data = (void *)(ACPI_QUIRK_SKIP_I2C_CLIENTS | - ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY), + ACPI_QUIRK_SKIP_ACPI_AC_AND_BATTERY | + ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS), }, { /* Whitelabel (sold as various brands) TM800A550L */ @@ -413,6 +438,20 @@ int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *s return 0; } EXPORT_SYMBOL_GPL(acpi_quirk_skip_serdev_enumeration); + +bool acpi_quirk_skip_gpio_event_handlers(void) +{ + const struct dmi_system_id *dmi_id; + long quirks; + + dmi_id = dmi_first_match(acpi_quirk_skip_dmi_ids); + if (!dmi_id) + return false; + + quirks = (unsigned long)dmi_id->driver_data; + return (quirks & ACPI_QUIRK_SKIP_GPIO_EVENT_HANDLERS); +} +EXPORT_SYMBOL_GPL(acpi_quirk_skip_gpio_event_handlers); #endif /* Lists of PMIC ACPI HIDs with an (often better) native charger driver */ diff --git a/drivers/ata/pata_parport/pata_parport.c b/drivers/ata/pata_parport/pata_parport.c index 294a266a0dda..c1576d943b43 100644 --- a/drivers/ata/pata_parport/pata_parport.c +++ b/drivers/ata/pata_parport/pata_parport.c @@ -381,6 +381,7 @@ static void pata_parport_dev_release(struct device *dev) { struct pi_adapter *pi = container_of(dev, struct pi_adapter, dev); + ida_free(&pata_parport_bus_dev_ids, dev->id); kfree(pi); } @@ -433,23 +434,27 @@ static struct pi_adapter *pi_init_one(struct parport *parport, if (bus_for_each_dev(&pata_parport_bus_type, NULL, &match, pi_find_dev)) return NULL; + id = ida_alloc(&pata_parport_bus_dev_ids, GFP_KERNEL); + if (id < 0) + return NULL; + pi = kzalloc(sizeof(struct pi_adapter), GFP_KERNEL); - if (!pi) + if (!pi) { + ida_free(&pata_parport_bus_dev_ids, id); return NULL; + } /* set up pi->dev before pi_probe_unit() so it can use dev_printk() */ pi->dev.parent = &pata_parport_bus; pi->dev.bus = &pata_parport_bus_type; pi->dev.driver = &pr->driver; pi->dev.release = pata_parport_dev_release; - id = ida_alloc(&pata_parport_bus_dev_ids, GFP_KERNEL); - if (id < 0) - return NULL; /* pata_parport_dev_release will do kfree(pi) */ pi->dev.id = id; dev_set_name(&pi->dev, "pata_parport.%u", pi->dev.id); if (device_register(&pi->dev)) { put_device(&pi->dev); - goto out_ida_free; + /* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */ + return NULL; } pi->proto = pr; @@ -464,8 +469,7 @@ static struct pi_adapter *pi_init_one(struct parport *parport, pi->port = parport->base; par_cb.private = pi; - pi->pardev = parport_register_dev_model(parport, DRV_NAME, &par_cb, - pi->dev.id); + pi->pardev = parport_register_dev_model(parport, DRV_NAME, &par_cb, id); if (!pi->pardev) goto out_module_put; @@ -487,12 +491,13 @@ static struct pi_adapter *pi_init_one(struct parport *parport, pi_connect(pi); if (ata_host_activate(host, 0, NULL, 0, &pata_parport_sht)) - goto out_unreg_parport; + goto out_disconnect; return pi; -out_unreg_parport: +out_disconnect: pi_disconnect(pi); +out_unreg_parport: parport_unregister_device(pi->pardev); if (pi->proto->release_proto) pi->proto->release_proto(pi); @@ -500,8 +505,7 @@ out_module_put: module_put(pi->proto->owner); out_unreg_dev: device_unregister(&pi->dev); -out_ida_free: - ida_free(&pata_parport_bus_dev_ids, pi->dev.id); + /* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */ return NULL; } @@ -626,8 +630,7 @@ static void pi_remove_one(struct device *dev) pi_disconnect(pi); pi_release(pi); device_unregister(dev); - ida_free(&pata_parport_bus_dev_ids, dev->id); - /* pata_parport_dev_release will do kfree(pi) */ + /* pata_parport_dev_release will do ida_free(dev->id) and kfree(pi) */ } static ssize_t delete_device_store(struct bus_type *bus, const char *buf, @@ -643,6 +646,7 @@ static ssize_t delete_device_store(struct bus_type *bus, const char *buf, } pi_remove_one(dev); + put_device(dev); mutex_unlock(&pi_mutex); return count; diff --git a/drivers/block/loop.c b/drivers/block/loop.c index 839373451c2b..28eb59fd71ca 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c @@ -1859,35 +1859,44 @@ static blk_status_t loop_queue_rq(struct blk_mq_hw_ctx *hctx, static void loop_handle_cmd(struct loop_cmd *cmd) { + struct cgroup_subsys_state *cmd_blkcg_css = cmd->blkcg_css; + struct cgroup_subsys_state *cmd_memcg_css = cmd->memcg_css; struct request *rq = blk_mq_rq_from_pdu(cmd); const bool write = op_is_write(req_op(rq)); struct loop_device *lo = rq->q->queuedata; int ret = 0; struct mem_cgroup *old_memcg = NULL; + const bool use_aio = cmd->use_aio; if (write && (lo->lo_flags & LO_FLAGS_READ_ONLY)) { ret = -EIO; goto failed; } - if (cmd->blkcg_css) - kthread_associate_blkcg(cmd->blkcg_css); - if (cmd->memcg_css) + if (cmd_blkcg_css) + kthread_associate_blkcg(cmd_blkcg_css); + if (cmd_memcg_css) old_memcg = set_active_memcg( - mem_cgroup_from_css(cmd->memcg_css)); + mem_cgroup_from_css(cmd_memcg_css)); + /* + * do_req_filebacked() may call blk_mq_complete_request() synchronously + * or asynchronously if using aio. Hence, do not touch 'cmd' after + * do_req_filebacked() has returned unless we are sure that 'cmd' has + * not yet been completed. + */ ret = do_req_filebacked(lo, rq); - if (cmd->blkcg_css) + if (cmd_blkcg_css) kthread_associate_blkcg(NULL); - if (cmd->memcg_css) { + if (cmd_memcg_css) { set_active_memcg(old_memcg); - css_put(cmd->memcg_css); + css_put(cmd_memcg_css); } failed: /* complete non-aio request */ - if (!cmd->use_aio || ret) { + if (!use_aio || ret) { if (ret == -EOPNOTSUPP) cmd->ret = ret; else diff --git a/drivers/block/null_blk/main.c b/drivers/block/null_blk/main.c index 4c601ca9552a..9e6b032c8ecc 100644 --- a/drivers/block/null_blk/main.c +++ b/drivers/block/null_blk/main.c @@ -1413,8 +1413,7 @@ static inline void nullb_complete_cmd(struct nullb_cmd *cmd) case NULL_IRQ_SOFTIRQ: switch (cmd->nq->dev->queue_mode) { case NULL_Q_MQ: - if (likely(!blk_should_fake_timeout(cmd->rq->q))) - blk_mq_complete_request(cmd->rq); + blk_mq_complete_request(cmd->rq); break; case NULL_Q_BIO: /* @@ -1658,12 +1657,13 @@ static enum blk_eh_timer_return null_timeout_rq(struct request *rq) } static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, - const struct blk_mq_queue_data *bd) + const struct blk_mq_queue_data *bd) { - struct nullb_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); + struct request *rq = bd->rq; + struct nullb_cmd *cmd = blk_mq_rq_to_pdu(rq); struct nullb_queue *nq = hctx->driver_data; - sector_t nr_sectors = blk_rq_sectors(bd->rq); - sector_t sector = blk_rq_pos(bd->rq); + sector_t nr_sectors = blk_rq_sectors(rq); + sector_t sector = blk_rq_pos(rq); const bool is_poll = hctx->type == HCTX_TYPE_POLL; might_sleep_if(hctx->flags & BLK_MQ_F_BLOCKING); @@ -1672,14 +1672,15 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, hrtimer_init(&cmd->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); cmd->timer.function = null_cmd_timer_expired; } - cmd->rq = bd->rq; + cmd->rq = rq; cmd->error = BLK_STS_OK; cmd->nq = nq; - cmd->fake_timeout = should_timeout_request(bd->rq); + cmd->fake_timeout = should_timeout_request(rq) || + blk_should_fake_timeout(rq->q); - blk_mq_start_request(bd->rq); + blk_mq_start_request(rq); - if (should_requeue_request(bd->rq)) { + if (should_requeue_request(rq)) { /* * Alternate between hitting the core BUSY path, and the * driver driven requeue path @@ -1687,22 +1688,20 @@ static blk_status_t null_queue_rq(struct blk_mq_hw_ctx *hctx, nq->requeue_selection++; if (nq->requeue_selection & 1) return BLK_STS_RESOURCE; - else { - blk_mq_requeue_request(bd->rq, true); - return BLK_STS_OK; - } + blk_mq_requeue_request(rq, true); + return BLK_STS_OK; } if (is_poll) { spin_lock(&nq->poll_lock); - list_add_tail(&bd->rq->queuelist, &nq->poll_list); + list_add_tail(&rq->queuelist, &nq->poll_list); spin_unlock(&nq->poll_lock); return BLK_STS_OK; } if (cmd->fake_timeout) return BLK_STS_OK; - return null_handle_cmd(cmd, sector, nr_sectors, req_op(bd->rq)); + return null_handle_cmd(cmd, sector, nr_sectors, req_op(rq)); } static void cleanup_queue(struct nullb_queue *nq) diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c index fb855da971ee..9fa821fa76b0 100644 --- a/drivers/block/sunvdc.c +++ b/drivers/block/sunvdc.c @@ -972,6 +972,8 @@ static int vdc_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) print_version(); hp = mdesc_grab(); + if (!hp) + return -ENODEV; err = -ENODEV; if ((vdev->dev_no << PARTITION_SHIFT) & ~(u64)MINORMASK) { diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig index b6c5bf69a2b2..1eef05bb1f99 100644 --- a/drivers/clk/Kconfig +++ b/drivers/clk/Kconfig @@ -91,7 +91,7 @@ config COMMON_CLK_RK808 config COMMON_CLK_HI655X tristate "Clock driver for Hi655x" if EXPERT depends on (MFD_HI655X_PMIC || COMPILE_TEST) - depends on REGMAP + select REGMAP default MFD_HI655X_PMIC help This driver supports the hi655x PMIC clock. This diff --git a/drivers/clk/bcm/clk-bcm2835-aux.c b/drivers/clk/bcm/clk-bcm2835-aux.c index 290a2846a86b..0fafa5cba442 100644 --- a/drivers/clk/bcm/clk-bcm2835-aux.c +++ b/drivers/clk/bcm/clk-bcm2835-aux.c @@ -69,4 +69,3 @@ builtin_platform_driver(bcm2835_aux_clk_driver); MODULE_AUTHOR("Eric Anholt <eric@anholt.net>"); MODULE_DESCRIPTION("BCM2835 auxiliary peripheral clock driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/clk/bcm/clk-bcm2835.c b/drivers/clk/bcm/clk-bcm2835.c index e74fe6219d14..8dc476ef5bf9 100644 --- a/drivers/clk/bcm/clk-bcm2835.c +++ b/drivers/clk/bcm/clk-bcm2835.c @@ -2350,4 +2350,3 @@ builtin_platform_driver(bcm2835_clk_driver); MODULE_AUTHOR("Eric Anholt <eric@anholt.net>"); MODULE_DESCRIPTION("BCM2835 clock driver"); -MODULE_LICENSE("GPL"); diff --git a/drivers/clk/clk-fixed-mmio.c b/drivers/clk/clk-fixed-mmio.c index 5225d17d6b3f..8609fca29cc4 100644 --- a/drivers/clk/clk-fixed-mmio.c +++ b/drivers/clk/clk-fixed-mmio.c @@ -99,4 +99,3 @@ module_platform_driver(of_fixed_mmio_clk_driver); MODULE_AUTHOR("Jan Kotas <jank@cadence.com>"); MODULE_DESCRIPTION("Memory Mapped IO Fixed clock driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/clk/clk-fsl-sai.c b/drivers/clk/clk-fsl-sai.c index 6238fcea0467..ee5baf993ff2 100644 --- a/drivers/clk/clk-fsl-sai.c +++ b/drivers/clk/clk-fsl-sai.c @@ -88,5 +88,4 @@ module_platform_driver(fsl_sai_clk_driver); MODULE_DESCRIPTION("Freescale SAI bitclock-as-a-clock driver"); MODULE_AUTHOR("Michael Walle <michael@walle.cc>"); -MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:fsl-sai-clk"); diff --git a/drivers/clk/clk-k210.c b/drivers/clk/clk-k210.c index 67a7cb3503c3..4eed667eddaf 100644 --- a/drivers/clk/clk-k210.c +++ b/drivers/clk/clk-k210.c @@ -495,7 +495,7 @@ static unsigned long k210_pll_get_rate(struct clk_hw *hw, f = FIELD_GET(K210_PLL_CLKF, reg) + 1; od = FIELD_GET(K210_PLL_CLKOD, reg) + 1; - return (u64)parent_rate * f / (r * od); + return div_u64((u64)parent_rate * f, r * od); } static const struct clk_ops k210_pll_ops = { diff --git a/drivers/clk/hisilicon/clk-hi3559a.c b/drivers/clk/hisilicon/clk-hi3559a.c index 9ea1a80acbe8..8036bd8cbb0a 100644 --- a/drivers/clk/hisilicon/clk-hi3559a.c +++ b/drivers/clk/hisilicon/clk-hi3559a.c @@ -841,5 +841,4 @@ static void __exit hi3559av100_crg_exit(void) module_exit(hi3559av100_crg_exit); -MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("HiSilicon Hi3559AV100 CRG Driver"); diff --git a/drivers/clk/microchip/clk-mpfs-ccc.c b/drivers/clk/microchip/clk-mpfs-ccc.c index 0ddc73e07be4..bce61c45e967 100644 --- a/drivers/clk/microchip/clk-mpfs-ccc.c +++ b/drivers/clk/microchip/clk-mpfs-ccc.c @@ -291,4 +291,3 @@ module_exit(clk_ccc_exit); MODULE_DESCRIPTION("Microchip PolarFire SoC Clock Conditioning Circuitry Driver"); MODULE_AUTHOR("Conor Dooley <conor.dooley@microchip.com>"); -MODULE_LICENSE("GPL"); diff --git a/drivers/cpuidle/cpuidle-psci-domain.c b/drivers/cpuidle/cpuidle-psci-domain.c index 6ad2954948a5..11316c3b14ca 100644 --- a/drivers/cpuidle/cpuidle-psci-domain.c +++ b/drivers/cpuidle/cpuidle-psci-domain.c @@ -106,7 +106,8 @@ static void psci_pd_remove(void) struct psci_pd_provider *pd_provider, *it; struct generic_pm_domain *genpd; - list_for_each_entry_safe(pd_provider, it, &psci_pd_providers, link) { + list_for_each_entry_safe_reverse(pd_provider, it, + &psci_pd_providers, link) { of_genpd_del_provider(pd_provider->node); genpd = of_genpd_remove_last(pd_provider->node); diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c index d8a421ce26a8..31ae0adbb295 100644 --- a/drivers/gpio/gpiolib-acpi.c +++ b/drivers/gpio/gpiolib-acpi.c @@ -536,6 +536,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip) if (ACPI_FAILURE(status)) return; + if (acpi_quirk_skip_gpio_event_handlers()) + return; + acpi_walk_resources(handle, METHOD_NAME__AEI, acpi_gpiochip_alloc_event, acpi_gpio); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index c4a4e2fe6681..da5b0258a237 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4145,8 +4145,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D3)) DRM_WARN("smart shift update failed\n"); - drm_kms_helper_poll_disable(dev); - if (fbcon) drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true); @@ -4243,8 +4241,6 @@ exit: if (fbcon) drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false); - drm_kms_helper_poll_enable(dev); - amdgpu_ras_resume(adev); if (adev->mode_info.num_crtc) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 503f89a766c3..d60fe7eb5579 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -1618,6 +1618,8 @@ int amdgpu_display_suspend_helper(struct amdgpu_device *adev) struct drm_connector_list_iter iter; int r; + drm_kms_helper_poll_disable(dev); + /* turn off display hw */ drm_modeset_lock_all(dev); drm_connector_list_iter_begin(dev, &iter); @@ -1694,6 +1696,8 @@ int amdgpu_display_resume_helper(struct amdgpu_device *adev) drm_modeset_unlock_all(dev); + drm_kms_helper_poll_enable(dev); + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 25217b05c0ea..e7974de8b035 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -26,6 +26,7 @@ #include <linux/firmware.h> #include <linux/module.h> +#include <linux/dmi.h> #include <linux/pci.h> #include <linux/debugfs.h> #include <drm/drm_drv.h> @@ -114,6 +115,24 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev) (adev->pg_flags & AMD_PG_SUPPORT_VCN_DPG)) adev->vcn.indirect_sram = true; + /* + * Some Steam Deck's BIOS versions are incompatible with the + * indirect SRAM mode, leading to amdgpu being unable to get + * properly probed (and even potentially crashing the kernel). + * Hence, check for these versions here - notice this is + * restricted to Vangogh (Deck's APU). + */ + if (adev->ip_versions[UVD_HWIP][0] == IP_VERSION(3, 0, 2)) { + const char *bios_ver = dmi_get_system_info(DMI_BIOS_VERSION); + + if (bios_ver && (!strncmp("F7A0113", bios_ver, 7) || + !strncmp("F7A0114", bios_ver, 7))) { + adev->vcn.indirect_sram = false; + dev_info(adev->dev, + "Steam Deck quirk: indirect SRAM disabled on BIOS %s\n", bios_ver); + } + } + hdr = (const struct common_firmware_header *)adev->vcn.fw->data; adev->vcn.fw_version = le32_to_cpu(hdr->ucode_version); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index b9e9480448af..4f7bab52282a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -124,6 +124,8 @@ enum AMDGIM_FEATURE_FLAG { AMDGIM_FEATURE_PP_ONE_VF = (1 << 4), /* Indirect Reg Access enabled */ AMDGIM_FEATURE_INDIRECT_REG_ACCESS = (1 << 5), + /* AV1 Support MODE*/ + AMDGIM_FEATURE_AV1_SUPPORT = (1 << 6), }; enum AMDGIM_REG_ACCESS_FLAG { @@ -322,6 +324,8 @@ static inline bool is_virtual_machine(void) ((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug) #define amdgpu_sriov_is_normal(adev) \ ((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug)) +#define amdgpu_sriov_is_av1_support(adev) \ + ((adev)->virt.gim_feature & AMDGIM_FEATURE_AV1_SUPPORT) bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h index 6c97148ca0ed..24d42d24e6a0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgv_sriovmsg.h @@ -93,7 +93,8 @@ union amd_sriov_msg_feature_flags { uint32_t mm_bw_management : 1; uint32_t pp_one_vf_mode : 1; uint32_t reg_indirect_acc : 1; - uint32_t reserved : 26; + uint32_t av1_support : 1; + uint32_t reserved : 25; } flags; uint32_t all; }; diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 855d390c41de..22e25ca285f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -1055,8 +1055,8 @@ static int nv_common_late_init(void *handle) amdgpu_virt_update_sriov_video_codec(adev, sriov_sc_video_codecs_encode_array, ARRAY_SIZE(sriov_sc_video_codecs_encode_array), - sriov_sc_video_codecs_decode_array_vcn1, - ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn1)); + sriov_sc_video_codecs_decode_array_vcn0, + ARRAY_SIZE(sriov_sc_video_codecs_decode_array_vcn0)); } } diff --git a/drivers/gpu/drm/amd/amdgpu/soc21.c b/drivers/gpu/drm/amd/amdgpu/soc21.c index 061793d390cc..c82b3a7ea5f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc21.c +++ b/drivers/gpu/drm/amd/amdgpu/soc21.c @@ -102,6 +102,59 @@ static const struct amdgpu_video_codecs vcn_4_0_0_video_codecs_decode_vcn1 = .codec_array = vcn_4_0_0_video_codecs_decode_array_vcn1, }; +/* SRIOV SOC21, not const since data is controlled by host */ +static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn0[] = { + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, +}; + +static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_encode_array_vcn1[] = { + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, +}; + +static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn0 = { + .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0), + .codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn0, +}; + +static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_encode_vcn1 = { + .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1), + .codec_array = sriov_vcn_4_0_0_video_codecs_encode_array_vcn1, +}; + +static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn0[] = { + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, +}; + +static struct amdgpu_video_codec_info sriov_vcn_4_0_0_video_codecs_decode_array_vcn1[] = { + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, + {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, +}; + +static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn0 = { + .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0), + .codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn0, +}; + +static struct amdgpu_video_codecs sriov_vcn_4_0_0_video_codecs_decode_vcn1 = { + .codec_count = ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1), + .codec_array = sriov_vcn_4_0_0_video_codecs_decode_array_vcn1, +}; + static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode, const struct amdgpu_video_codecs **codecs) { @@ -112,16 +165,31 @@ static int soc21_query_video_codecs(struct amdgpu_device *adev, bool encode, case IP_VERSION(4, 0, 0): case IP_VERSION(4, 0, 2): case IP_VERSION(4, 0, 4): - if (adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) { - if (encode) - *codecs = &vcn_4_0_0_video_codecs_encode_vcn1; - else - *codecs = &vcn_4_0_0_video_codecs_decode_vcn1; + if (amdgpu_sriov_vf(adev)) { + if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) || + !amdgpu_sriov_is_av1_support(adev)) { + if (encode) + *codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn1; + else + *codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn1; + } else { + if (encode) + *codecs = &sriov_vcn_4_0_0_video_codecs_encode_vcn0; + else + *codecs = &sriov_vcn_4_0_0_video_codecs_decode_vcn0; + } } else { - if (encode) - *codecs = &vcn_4_0_0_video_codecs_encode_vcn0; - else - *codecs = &vcn_4_0_0_video_codecs_decode_vcn0; + if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0)) { + if (encode) + *codecs = &vcn_4_0_0_video_codecs_encode_vcn1; + else + *codecs = &vcn_4_0_0_video_codecs_decode_vcn1; + } else { + if (encode) + *codecs = &vcn_4_0_0_video_codecs_encode_vcn0; + else + *codecs = &vcn_4_0_0_video_codecs_decode_vcn0; + } } return 0; default: @@ -730,8 +798,23 @@ static int soc21_common_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (amdgpu_sriov_vf(adev)) + if (amdgpu_sriov_vf(adev)) { xgpu_nv_mailbox_get_irq(adev); + if ((adev->vcn.harvest_config & AMDGPU_VCN_HARVEST_VCN0) || + !amdgpu_sriov_is_av1_support(adev)) { + amdgpu_virt_update_sriov_video_codec(adev, + sriov_vcn_4_0_0_video_codecs_encode_array_vcn1, + ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn1), + sriov_vcn_4_0_0_video_codecs_decode_array_vcn1, + ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn1)); + } else { + amdgpu_virt_update_sriov_video_codec(adev, + sriov_vcn_4_0_0_video_codecs_encode_array_vcn0, + ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_encode_array_vcn0), + sriov_vcn_4_0_0_video_codecs_decode_array_vcn0, + ARRAY_SIZE(sriov_vcn_4_0_0_video_codecs_decode_array_vcn0)); + } + } return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index a0e30f21e12e..de310ed367ca 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1312,14 +1312,14 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, args->n_success = i+1; } - mutex_unlock(&p->mutex); - err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true); if (err) { pr_debug("Sync memory failed, wait interrupted by user signal\n"); goto sync_memory_failed; } + mutex_unlock(&p->mutex); + /* Flush TLBs after waiting for the page table updates to complete */ for (i = 0; i < args->n_devices; i++) { peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); @@ -1335,9 +1335,9 @@ get_process_device_data_failed: bind_process_to_device_failed: get_mem_obj_from_handle_failed: map_memory_to_gpu_failed: +sync_memory_failed: mutex_unlock(&p->mutex); copy_from_user_failed: -sync_memory_failed: kfree(devices_arr); return err; @@ -1351,6 +1351,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, void *mem; long err = 0; uint32_t *devices_arr = NULL, i; + bool flush_tlb; if (!args->n_devices) { pr_debug("Device IDs array empty\n"); @@ -1403,16 +1404,19 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, } args->n_success = i+1; } - mutex_unlock(&p->mutex); - if (kfd_flush_tlb_after_unmap(pdd->dev)) { + flush_tlb = kfd_flush_tlb_after_unmap(pdd->dev); + if (flush_tlb) { err = amdgpu_amdkfd_gpuvm_sync_memory(pdd->dev->adev, (struct kgd_mem *) mem, true); if (err) { pr_debug("Sync memory failed, wait interrupted by user signal\n"); goto sync_memory_failed; } + } + mutex_unlock(&p->mutex); + if (flush_tlb) { /* Flush TLBs after waiting for the page table updates to complete */ for (i = 0; i < args->n_devices; i++) { peer_pdd = kfd_process_device_data_by_id(p, devices_arr[i]); @@ -1428,9 +1432,9 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, bind_process_to_device_failed: get_mem_obj_from_handle_failed: unmap_memory_from_gpu_failed: +sync_memory_failed: mutex_unlock(&p->mutex); copy_from_user_failed: -sync_memory_failed: kfree(devices_arr); return err; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 3de7f616a001..ec70a1658dc3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -59,6 +59,7 @@ static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, unsigned int chunk_size); static void kfd_gtt_sa_fini(struct kfd_dev *kfd); +static int kfd_resume_iommu(struct kfd_dev *kfd); static int kfd_resume(struct kfd_dev *kfd); static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) @@ -624,7 +625,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, svm_migrate_init(kfd->adev); - if (kgd2kfd_resume_iommu(kfd)) + if (kfd_resume_iommu(kfd)) goto device_iommu_error; if (kfd_resume(kfd)) @@ -773,6 +774,14 @@ int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) int kgd2kfd_resume_iommu(struct kfd_dev *kfd) { + if (!kfd->init_complete) + return 0; + + return kfd_resume_iommu(kfd); +} + +static int kfd_resume_iommu(struct kfd_dev *kfd) +{ int err = 0; err = kfd_iommu_resume(kfd); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index de8ce72344fc..54933903bcb8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -289,7 +289,7 @@ static unsigned long svm_migrate_unsuccessful_pages(struct migrate_vma *migrate) static int svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, struct migrate_vma *migrate, struct dma_fence **mfence, - dma_addr_t *scratch) + dma_addr_t *scratch, uint64_t ttm_res_offset) { uint64_t npages = migrate->npages; struct device *dev = adev->dev; @@ -299,19 +299,13 @@ svm_migrate_copy_to_vram(struct amdgpu_device *adev, struct svm_range *prange, uint64_t i, j; int r; - pr_debug("svms 0x%p [0x%lx 0x%lx]\n", prange->svms, prange->start, - prange->last); + pr_debug("svms 0x%p [0x%lx 0x%lx 0x%llx]\n", prange->svms, prange->start, + prange->last, ttm_res_offset); src = scratch; dst = (uint64_t *)(scratch + npages); - r = svm_range_vram_node_new(adev, prange, true); - if (r) { - dev_dbg(adev->dev, "fail %d to alloc vram\n", r); - goto out; - } - - amdgpu_res_first(prange->ttm_res, prange->offset << PAGE_SHIFT, + amdgpu_res_first(prange->ttm_res, ttm_res_offset, npages << PAGE_SHIFT, &cursor); for (i = j = 0; i < npages; i++) { struct page *spage; @@ -391,14 +385,14 @@ out_free_vram_pages: migrate->dst[i + 3] = 0; } #endif -out: + return r; } static long svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, struct vm_area_struct *vma, uint64_t start, - uint64_t end, uint32_t trigger) + uint64_t end, uint32_t trigger, uint64_t ttm_res_offset) { struct kfd_process *p = container_of(prange->svms, struct kfd_process, svms); uint64_t npages = (end - start) >> PAGE_SHIFT; @@ -451,7 +445,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, else pr_debug("0x%lx pages migrated\n", cpages); - r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch); + r = svm_migrate_copy_to_vram(adev, prange, &migrate, &mfence, scratch, ttm_res_offset); migrate_vma_pages(&migrate); pr_debug("successful/cpages/npages 0x%lx/0x%lx/0x%lx\n", @@ -499,6 +493,7 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, unsigned long addr, start, end; struct vm_area_struct *vma; struct amdgpu_device *adev; + uint64_t ttm_res_offset; unsigned long cpages = 0; long r = 0; @@ -520,6 +515,13 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, start = prange->start << PAGE_SHIFT; end = (prange->last + 1) << PAGE_SHIFT; + r = svm_range_vram_node_new(adev, prange, true); + if (r) { + dev_dbg(adev->dev, "fail %ld to alloc vram\n", r); + return r; + } + ttm_res_offset = prange->offset << PAGE_SHIFT; + for (addr = start; addr < end;) { unsigned long next; @@ -528,18 +530,21 @@ svm_migrate_ram_to_vram(struct svm_range *prange, uint32_t best_loc, break; next = min(vma->vm_end, end); - r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger); + r = svm_migrate_vma_to_vram(adev, prange, vma, addr, next, trigger, ttm_res_offset); if (r < 0) { pr_debug("failed %ld to migrate\n", r); break; } else { cpages += r; } + ttm_res_offset += next - addr; addr = next; } if (cpages) prange->actual_loc = best_loc; + else + svm_range_vram_node_free(prange); return r < 0 ? r : 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c index 09b966dc3768..aee2212e52f6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c @@ -77,6 +77,7 @@ err_ioctl: static void kfd_exit(void) { + kfd_cleanup_processes(); kfd_debugfs_fini(); kfd_process_destroy_wq(); kfd_procfs_shutdown(); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index bfa30d12406b..7e4d992e48b3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -928,6 +928,7 @@ bool kfd_dev_is_large_bar(struct kfd_dev *dev); int kfd_process_create_wq(void); void kfd_process_destroy_wq(void); +void kfd_cleanup_processes(void); struct kfd_process *kfd_create_process(struct file *filep); struct kfd_process *kfd_get_process(const struct task_struct *task); struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 7acd55a814b2..4208e0f01064 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1167,6 +1167,17 @@ static void kfd_process_free_notifier(struct mmu_notifier *mn) kfd_unref_process(container_of(mn, struct kfd_process, mmu_notifier)); } +static void kfd_process_notifier_release_internal(struct kfd_process *p) +{ + cancel_delayed_work_sync(&p->eviction_work); + cancel_delayed_work_sync(&p->restore_work); + + /* Indicate to other users that MM is no longer valid */ + p->mm = NULL; + + mmu_notifier_put(&p->mmu_notifier); +} + static void kfd_process_notifier_release(struct mmu_notifier *mn, struct mm_struct *mm) { @@ -1181,17 +1192,22 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, return; mutex_lock(&kfd_processes_mutex); + /* + * Do early return if table is empty. + * + * This could potentially happen if this function is called concurrently + * by mmu_notifier and by kfd_cleanup_pocesses. + * + */ + if (hash_empty(kfd_processes_table)) { + mutex_unlock(&kfd_processes_mutex); + return; + } hash_del_rcu(&p->kfd_processes); mutex_unlock(&kfd_processes_mutex); synchronize_srcu(&kfd_processes_srcu); - cancel_delayed_work_sync(&p->eviction_work); - cancel_delayed_work_sync(&p->restore_work); - - /* Indicate to other users that MM is no longer valid */ - p->mm = NULL; - - mmu_notifier_put(&p->mmu_notifier); + kfd_process_notifier_release_internal(p); } static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = { @@ -1200,6 +1216,43 @@ static const struct mmu_notifier_ops kfd_process_mmu_notifier_ops = { .free_notifier = kfd_process_free_notifier, }; +/* + * This code handles the case when driver is being unloaded before all + * mm_struct are released. We need to safely free the kfd_process and + * avoid race conditions with mmu_notifier that might try to free them. + * + */ +void kfd_cleanup_processes(void) +{ + struct kfd_process *p; + struct hlist_node *p_temp; + unsigned int temp; + HLIST_HEAD(cleanup_list); + + /* + * Move all remaining kfd_process from the process table to a + * temp list for processing. Once done, callback from mmu_notifier + * release will not see the kfd_process in the table and do early return, + * avoiding double free issues. + */ + mutex_lock(&kfd_processes_mutex); + hash_for_each_safe(kfd_processes_table, temp, p_temp, p, kfd_processes) { + hash_del_rcu(&p->kfd_processes); + synchronize_srcu(&kfd_processes_srcu); + hlist_add_head(&p->kfd_processes, &cleanup_list); + } + mutex_unlock(&kfd_processes_mutex); + + hlist_for_each_entry_safe(p, p_temp, &cleanup_list, kfd_processes) + kfd_process_notifier_release_internal(p); + + /* + * Ensures that all outstanding free_notifier get called, triggering + * the release of the kfd_process struct. + */ + mmu_notifier_synchronize(); +} + static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) { unsigned long offset; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 5137476ec18e..4236539d9f93 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -218,8 +218,8 @@ static int init_user_queue(struct process_queue_manager *pqm, return 0; cleanup: - if (dev->shared_resources.enable_mes) - uninit_queue(*q); + uninit_queue(*q); + *q = NULL; return retval; } diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 009ef917dad4..32abbafd43fa 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -5105,9 +5105,9 @@ static void fill_dc_dirty_rects(struct drm_plane *plane, for (; flip_addrs->dirty_rect_count < num_clips; clips++) fill_dc_dirty_rect(new_plane_state->plane, - &dirty_rects[i], clips->x1, - clips->y1, clips->x2 - clips->x1, - clips->y2 - clips->y1, + &dirty_rects[flip_addrs->dirty_rect_count], + clips->x1, clips->y1, + clips->x2 - clips->x1, clips->y2 - clips->y1, &flip_addrs->dirty_rect_count, false); return; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 8e572f07ec47..4abfd2c9679f 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -561,7 +561,7 @@ static void update_config(void *handle, struct cp_psp_stream_config *config) link->dp.mst_enabled = config->mst_enabled; link->dp.usb4_enabled = config->usb4_enabled; display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; - link->adjust.auth_delay = 0; + link->adjust.auth_delay = 2; link->adjust.hdcp1.disable = 0; conn_state = aconnector->base.state; diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 3b4d4d68359b..df787fcf8e86 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -998,8 +998,5 @@ void dcn30_prepare_bandwidth(struct dc *dc, dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz); dcn20_prepare_bandwidth(dc, context); - - dc_dmub_srv_p_state_delegate(dc, - context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context); } diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c index 16f892125b6f..9d14045cccd6 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c @@ -1104,7 +1104,7 @@ unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsign *k2_div = PIXEL_RATE_DIV_BY_2; else *k2_div = PIXEL_RATE_DIV_BY_4; - } else if (dc_is_dp_signal(stream->signal) || dc_is_virtual_signal(stream->signal)) { + } else if (dc_is_dp_signal(stream->signal)) { if (two_pix_per_container) { *k1_div = PIXEL_RATE_DIV_BY_1; *k2_div = PIXEL_RATE_DIV_BY_2; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c index 74e50c09bb62..d024007f0f65 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource.c @@ -1915,6 +1915,7 @@ int dcn32_populate_dml_pipes_from_context( bool subvp_in_use = false; uint8_t is_pipe_split_expected[MAX_PIPES] = {0}; struct dc_crtc_timing *timing; + bool vsr_odm_support = false; dcn20_populate_dml_pipes_from_context(dc, context, pipes, fast_validate); @@ -1932,12 +1933,15 @@ int dcn32_populate_dml_pipes_from_context( timing = &pipe->stream->timing; pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_dal; + vsr_odm_support = (res_ctx->pipe_ctx[i].stream->src.width >= 5120 && + res_ctx->pipe_ctx[i].stream->src.width > res_ctx->pipe_ctx[i].stream->dst.width); if (context->stream_count == 1 && context->stream_status[0].plane_count == 1 && !dc_is_hdmi_signal(res_ctx->pipe_ctx[i].stream->signal) && is_h_timing_divisible_by_2(res_ctx->pipe_ctx[i].stream) && pipe->stream->timing.pix_clk_100hz * 100 > DCN3_2_VMIN_DISPCLK_HZ && - dc->debug.enable_single_display_2to1_odm_policy) { + dc->debug.enable_single_display_2to1_odm_policy && + !vsr_odm_support) { //excluding 2to1 ODM combine on >= 5k vsr pipes[pipe_cnt].pipe.dest.odm_combine_policy = dm_odm_combine_policy_2to1; } pipe_cnt++; diff --git a/drivers/gpu/drm/amd/display/dc/link/link_detection.c b/drivers/gpu/drm/amd/display/dc/link/link_detection.c index 38216c789d77..f70025ef7b69 100644 --- a/drivers/gpu/drm/amd/display/dc/link/link_detection.c +++ b/drivers/gpu/drm/amd/display/dc/link/link_detection.c @@ -855,6 +855,7 @@ static bool detect_link_and_local_sink(struct dc_link *link, struct dc_sink *prev_sink = NULL; struct dpcd_caps prev_dpcd_caps; enum dc_connection_type new_connection_type = dc_connection_none; + enum dc_connection_type pre_connection_type = link->type; const uint32_t post_oui_delay = 30; // 30ms DC_LOGGER_INIT(link->ctx->logger); @@ -957,6 +958,8 @@ static bool detect_link_and_local_sink(struct dc_link *link, } if (!detect_dp(link, &sink_caps, reason)) { + link->type = pre_connection_type; + if (prev_sink) dc_sink_release(prev_sink); return false; @@ -1244,11 +1247,16 @@ bool link_detect(struct dc_link *link, enum dc_detect_reason reason) bool is_delegated_to_mst_top_mgr = false; enum dc_connection_type pre_link_type = link->type; + DC_LOGGER_INIT(link->ctx->logger); + is_local_sink_detect_success = detect_link_and_local_sink(link, reason); if (is_local_sink_detect_success && link->local_sink) verify_link_capability(link, link->local_sink, reason); + DC_LOG_DC("%s: link_index=%d is_local_sink_detect_success=%d pre_link_type=%d link_type=%d\n", __func__, + link->link_index, is_local_sink_detect_success, pre_link_type, link->type); + if (is_local_sink_detect_success && link->local_sink && dc_is_dp_signal(link->local_sink->sink_signal) && link->dpcd_caps.is_mst_capable) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h index f77401709d83..2162ecd1057d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h @@ -27,7 +27,7 @@ // *** IMPORTANT *** // SMU TEAM: Always increment the interface version if // any structure is changed in this file -#define PMFW_DRIVER_IF_VERSION 7 +#define PMFW_DRIVER_IF_VERSION 8 typedef struct { int32_t value; @@ -198,7 +198,7 @@ typedef struct { uint16_t SkinTemp; uint16_t DeviceState; uint16_t CurTemp; //[centi-Celsius] - uint16_t spare2; + uint16_t FilterAlphaValue; uint16_t AverageGfxclkFrequency; uint16_t AverageFclkFrequency; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h index 1c0ae2cb757b..f085cb97a620 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -29,7 +29,7 @@ #define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04 #define SMU13_DRIVER_IF_VERSION_ALDE 0x08 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_0 0x37 -#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x07 +#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x08 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0_10 0x32 #define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x37 diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 697e98a0a20a..75f18681e984 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -2143,16 +2143,9 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu) (OverDriveTable_t *)smu->smu_table.boot_overdrive_table; OverDriveTable_t *user_od_table = (OverDriveTable_t *)smu->smu_table.user_overdrive_table; + OverDriveTable_t user_od_table_bak; int ret = 0; - /* - * For S3/S4/Runpm resume, no need to setup those overdrive tables again as - * - either they already have the default OD settings got during cold bootup - * - or they have some user customized OD settings which cannot be overwritten - */ - if (smu->adev->in_suspend) - return 0; - ret = smu_cmn_update_table(smu, SMU_TABLE_OVERDRIVE, 0, (void *)boot_od_table, false); if (ret) { @@ -2163,7 +2156,23 @@ static int sienna_cichlid_set_default_od_settings(struct smu_context *smu) sienna_cichlid_dump_od_table(smu, boot_od_table); memcpy(od_table, boot_od_table, sizeof(OverDriveTable_t)); - memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t)); + + /* + * For S3/S4/Runpm resume, we need to setup those overdrive tables again, + * but we have to preserve user defined values in "user_od_table". + */ + if (!smu->adev->in_suspend) { + memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t)); + smu->user_dpm_profile.user_od = false; + } else if (smu->user_dpm_profile.user_od) { + memcpy(&user_od_table_bak, user_od_table, sizeof(OverDriveTable_t)); + memcpy(user_od_table, boot_od_table, sizeof(OverDriveTable_t)); + user_od_table->GfxclkFmin = user_od_table_bak.GfxclkFmin; + user_od_table->GfxclkFmax = user_od_table_bak.GfxclkFmax; + user_od_table->UclkFmin = user_od_table_bak.UclkFmin; + user_od_table->UclkFmax = user_od_table_bak.UclkFmax; + user_od_table->VddGfxOffset = user_od_table_bak.VddGfxOffset; + } return 0; } @@ -2373,6 +2382,20 @@ static int sienna_cichlid_od_edit_dpm_table(struct smu_context *smu, return ret; } +static int sienna_cichlid_restore_user_od_settings(struct smu_context *smu) +{ + struct smu_table_context *table_context = &smu->smu_table; + OverDriveTable_t *od_table = table_context->overdrive_table; + OverDriveTable_t *user_od_table = table_context->user_overdrive_table; + int res; + + res = smu_v11_0_restore_user_od_settings(smu); + if (res == 0) + memcpy(od_table, user_od_table, sizeof(OverDriveTable_t)); + + return res; +} + static int sienna_cichlid_run_btc(struct smu_context *smu) { int res; @@ -4400,7 +4423,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range, .set_default_od_settings = sienna_cichlid_set_default_od_settings, .od_edit_dpm_table = sienna_cichlid_od_edit_dpm_table, - .restore_user_od_settings = smu_v11_0_restore_user_od_settings, + .restore_user_od_settings = sienna_cichlid_restore_user_od_settings, .run_btc = sienna_cichlid_run_btc, .set_power_source = smu_v11_0_set_power_source, .get_pp_feature_mask = smu_cmn_get_pp_feature_mask, diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index 3d0a4da661bc..261a62e15934 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -2796,7 +2796,7 @@ u32 drm_edid_get_panel_id(struct i2c_adapter *adapter) * the EDID then we'll just return 0. */ - base_block = kmalloc(EDID_LENGTH, GFP_KERNEL); + base_block = kzalloc(EDID_LENGTH, GFP_KERNEL); if (!base_block) return 0; diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 7a3cb08dc942..a5d392f7e11f 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -1388,10 +1388,13 @@ EXPORT_SYMBOL(drm_gem_lru_move_tail); * * @lru: The LRU to scan * @nr_to_scan: The number of pages to try to reclaim + * @remaining: The number of pages left to reclaim, should be initialized by caller * @shrink: Callback to try to shrink/reclaim the object. */ unsigned long -drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan, +drm_gem_lru_scan(struct drm_gem_lru *lru, + unsigned int nr_to_scan, + unsigned long *remaining, bool (*shrink)(struct drm_gem_object *obj)) { struct drm_gem_lru still_in_lru; @@ -1430,8 +1433,10 @@ drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan, * hit shrinker in response to trying to get backing pages * for this obj (ie. while it's lock is already held) */ - if (!dma_resv_trylock(obj->resv)) + if (!dma_resv_trylock(obj->resv)) { + *remaining += obj->size >> PAGE_SHIFT; goto tail; + } if (shrink(obj)) { freed += obj->size >> PAGE_SHIFT; diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c index 75185a960fc4..2b2163c8138e 100644 --- a/drivers/gpu/drm/drm_gem_shmem_helper.c +++ b/drivers/gpu/drm/drm_gem_shmem_helper.c @@ -619,11 +619,14 @@ int drm_gem_shmem_mmap(struct drm_gem_shmem_object *shmem, struct vm_area_struct int ret; if (obj->import_attach) { - /* Drop the reference drm_gem_mmap_obj() acquired.*/ - drm_gem_object_put(obj); vma->vm_private_data = NULL; + ret = dma_buf_mmap(obj->dma_buf, vma, 0); + + /* Drop the reference drm_gem_mmap_obj() acquired.*/ + if (!ret) + drm_gem_object_put(obj); - return dma_buf_mmap(obj->dma_buf, vma, 0); + return ret; } ret = drm_gem_shmem_get_pages(shmem); diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index 54c517ca9632..582234f0c49a 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -1631,6 +1631,8 @@ struct intel_psr { bool psr2_sel_fetch_cff_enabled; bool req_psr2_sdp_prior_scanline; u8 sink_sync_latency; + u8 io_wake_lines; + u8 fast_wake_lines; ktime_t last_entry_attempt; ktime_t last_exit; bool sink_not_reliable; diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 054a009e800d..2106b3de225a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -265,6 +265,19 @@ static int intel_dp_mst_update_slots(struct intel_encoder *encoder, return 0; } +static bool intel_dp_mst_has_audio(const struct drm_connector_state *conn_state) +{ + const struct intel_digital_connector_state *intel_conn_state = + to_intel_digital_connector_state(conn_state); + struct intel_connector *connector = + to_intel_connector(conn_state->connector); + + if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) + return connector->port->has_audio; + else + return intel_conn_state->force_audio == HDMI_AUDIO_ON; +} + static int intel_dp_mst_compute_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config, struct drm_connector_state *conn_state) @@ -272,10 +285,6 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); struct intel_dp_mst_encoder *intel_mst = enc_to_mst(encoder); struct intel_dp *intel_dp = &intel_mst->primary->dp; - struct intel_connector *connector = - to_intel_connector(conn_state->connector); - struct intel_digital_connector_state *intel_conn_state = - to_intel_digital_connector_state(conn_state); const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode; struct link_config_limits limits; @@ -287,11 +296,9 @@ static int intel_dp_mst_compute_config(struct intel_encoder *encoder, pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB; pipe_config->has_pch_encoder = false; - if (intel_conn_state->force_audio == HDMI_AUDIO_AUTO) - pipe_config->has_audio = connector->port->has_audio; - else - pipe_config->has_audio = - intel_conn_state->force_audio == HDMI_AUDIO_ON; + pipe_config->has_audio = + intel_dp_mst_has_audio(conn_state) && + intel_audio_compute_config(encoder, pipe_config, conn_state); /* * for MST we always configure max link bw - the spec doesn't diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 7a72e15e6836..9f1a0bebae24 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -542,6 +542,14 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) val |= EDP_PSR2_FRAME_BEFORE_SU(max_t(u8, intel_dp->psr.sink_sync_latency + 1, 2)); val |= intel_psr2_get_tp_time(intel_dp); + if (DISPLAY_VER(dev_priv) >= 12) { + if (intel_dp->psr.io_wake_lines < 9 && + intel_dp->psr.fast_wake_lines < 9) + val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; + else + val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_3; + } + /* Wa_22012278275:adl-p */ if (IS_ADLP_DISPLAY_STEP(dev_priv, STEP_A0, STEP_E0)) { static const u8 map[] = { @@ -558,31 +566,21 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) * Still using the default IO_BUFFER_WAKE and FAST_WAKE, see * comments bellow for more information */ - u32 tmp, lines = 7; - - val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; + u32 tmp; - tmp = map[lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES]; + tmp = map[intel_dp->psr.io_wake_lines - TGL_EDP_PSR2_IO_BUFFER_WAKE_MIN_LINES]; tmp = tmp << TGL_EDP_PSR2_IO_BUFFER_WAKE_SHIFT; val |= tmp; - tmp = map[lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; + tmp = map[intel_dp->psr.fast_wake_lines - TGL_EDP_PSR2_FAST_WAKE_MIN_LINES]; tmp = tmp << TGL_EDP_PSR2_FAST_WAKE_MIN_SHIFT; val |= tmp; } else if (DISPLAY_VER(dev_priv) >= 12) { - /* - * TODO: 7 lines of IO_BUFFER_WAKE and FAST_WAKE are default - * values from BSpec. In order to setting an optimal power - * consumption, lower than 4k resolution mode needs to decrease - * IO_BUFFER_WAKE and FAST_WAKE. And higher than 4K resolution - * mode needs to increase IO_BUFFER_WAKE and FAST_WAKE. - */ - val |= TGL_EDP_PSR2_BLOCK_COUNT_NUM_2; - val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(7); - val |= TGL_EDP_PSR2_FAST_WAKE(7); + val |= TGL_EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); + val |= TGL_EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); } else if (DISPLAY_VER(dev_priv) >= 9) { - val |= EDP_PSR2_IO_BUFFER_WAKE(7); - val |= EDP_PSR2_FAST_WAKE(7); + val |= EDP_PSR2_IO_BUFFER_WAKE(intel_dp->psr.io_wake_lines); + val |= EDP_PSR2_FAST_WAKE(intel_dp->psr.fast_wake_lines); } if (intel_dp->psr.req_psr2_sdp_prior_scanline) @@ -842,6 +840,46 @@ static bool _compute_psr2_sdp_prior_scanline_indication(struct intel_dp *intel_d return true; } +static bool _compute_psr2_wake_times(struct intel_dp *intel_dp, + struct intel_crtc_state *crtc_state) +{ + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + int io_wake_lines, io_wake_time, fast_wake_lines, fast_wake_time; + u8 max_wake_lines; + + if (DISPLAY_VER(i915) >= 12) { + io_wake_time = 42; + /* + * According to Bspec it's 42us, but based on testing + * it is not enough -> use 45 us. + */ + fast_wake_time = 45; + max_wake_lines = 12; + } else { + io_wake_time = 50; + fast_wake_time = 32; + max_wake_lines = 8; + } + + io_wake_lines = intel_usecs_to_scanlines( + &crtc_state->uapi.adjusted_mode, io_wake_time); + fast_wake_lines = intel_usecs_to_scanlines( + &crtc_state->uapi.adjusted_mode, fast_wake_time); + + if (io_wake_lines > max_wake_lines || + fast_wake_lines > max_wake_lines) + return false; + + if (i915->params.psr_safest_params) + io_wake_lines = fast_wake_lines = max_wake_lines; + + /* According to Bspec lower limit should be set as 7 lines. */ + intel_dp->psr.io_wake_lines = max(io_wake_lines, 7); + intel_dp->psr.fast_wake_lines = max(fast_wake_lines, 7); + + return true; +} + static bool intel_psr2_config_valid(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state) { @@ -936,6 +974,12 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, return false; } + if (!_compute_psr2_wake_times(intel_dp, crtc_state)) { + drm_dbg_kms(&dev_priv->drm, + "PSR2 not enabled, Unable to use long enough wake times\n"); + return false; + } + if (HAS_PSR2_SEL_FETCH(dev_priv)) { if (!intel_psr2_sel_fetch_config_valid(intel_dp, crtc_state) && !HAS_PSR_HW_TRACKING(dev_priv)) { diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index c65c771f5c46..1cfb94b5cedb 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -1419,6 +1419,36 @@ static const struct intel_mpllb_state dg2_hdmi_262750 = { REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; +static const struct intel_mpllb_state dg2_hdmi_267300 = { + .clock = 267300, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 7) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 3), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 74) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 30146) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 36699), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + static const struct intel_mpllb_state dg2_hdmi_268500 = { .clock = 268500, .ref_control = @@ -1509,6 +1539,36 @@ static const struct intel_mpllb_state dg2_hdmi_241500 = { REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), }; +static const struct intel_mpllb_state dg2_hdmi_319890 = { + .clock = 319890, + .ref_control = + REG_FIELD_PREP(SNPS_PHY_REF_CONTROL_REF_RANGE, 3), + .mpllb_cp = + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT, 6) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP, 14) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_INT_GS, 64) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_CP_PROP_GS, 124), + .mpllb_div = + REG_FIELD_PREP(SNPS_PHY_MPLLB_DIV5_CLK_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_TX_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_PMIX_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_V2I, 2) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FREQ_VCO, 2), + .mpllb_div2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_REF_CLK_DIV, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_MULTIPLIER, 94) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_HDMI_DIV, 1), + .mpllb_fracn1 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_EN, 1) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_DEN, 65535), + .mpllb_fracn2 = + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_QUOT, 64094) | + REG_FIELD_PREP(SNPS_PHY_MPLLB_FRACN_REM, 13631), + .mpllb_sscen = + REG_FIELD_PREP(SNPS_PHY_MPLLB_SSC_UP_SPREAD, 1), +}; + static const struct intel_mpllb_state dg2_hdmi_497750 = { .clock = 497750, .ref_control = @@ -1696,8 +1756,10 @@ static const struct intel_mpllb_state * const dg2_hdmi_tables[] = { &dg2_hdmi_209800, &dg2_hdmi_241500, &dg2_hdmi_262750, + &dg2_hdmi_267300, &dg2_hdmi_268500, &dg2_hdmi_296703, + &dg2_hdmi_319890, &dg2_hdmi_497750, &dg2_hdmi_592000, &dg2_hdmi_593407, diff --git a/drivers/gpu/drm/i915/gt/intel_sseu.h b/drivers/gpu/drm/i915/gt/intel_sseu.h index aa87d3832d60..d7e8c374f153 100644 --- a/drivers/gpu/drm/i915/gt/intel_sseu.h +++ b/drivers/gpu/drm/i915/gt/intel_sseu.h @@ -27,7 +27,7 @@ struct drm_printer; * is only relevant to pre-Xe_HP platforms (Xe_HP and beyond use the * I915_MAX_SS_FUSE_BITS value below). */ -#define GEN_MAX_SS_PER_HSW_SLICE 6 +#define GEN_MAX_SS_PER_HSW_SLICE 8 /* * Maximum number of 32-bit registers used by hardware to express the diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c index 7412abf166a8..a9fea115f2d2 100644 --- a/drivers/gpu/drm/i915/i915_active.c +++ b/drivers/gpu/drm/i915/i915_active.c @@ -422,12 +422,12 @@ replace_barrier(struct i915_active *ref, struct i915_active_fence *active) * we can use it to substitute for the pending idle-barrer * request that we want to emit on the kernel_context. */ - __active_del_barrier(ref, node_from_active(active)); - return true; + return __active_del_barrier(ref, node_from_active(active)); } int i915_active_add_request(struct i915_active *ref, struct i915_request *rq) { + u64 idx = i915_request_timeline(rq)->fence_context; struct dma_fence *fence = &rq->fence; struct i915_active_fence *active; int err; @@ -437,16 +437,19 @@ int i915_active_add_request(struct i915_active *ref, struct i915_request *rq) if (err) return err; - active = active_instance(ref, i915_request_timeline(rq)->fence_context); - if (!active) { - err = -ENOMEM; - goto out; - } + do { + active = active_instance(ref, idx); + if (!active) { + err = -ENOMEM; + goto out; + } + + if (replace_barrier(ref, active)) { + RCU_INIT_POINTER(active->fence, NULL); + atomic_dec(&ref->count); + } + } while (unlikely(is_barrier(active))); - if (replace_barrier(ref, active)) { - RCU_INIT_POINTER(active->fence, NULL); - atomic_dec(&ref->count); - } if (!__i915_active_fence_set(active, fence)) __i915_active_acquire(ref); diff --git a/drivers/gpu/drm/i915/i915_hwmon.c b/drivers/gpu/drm/i915/i915_hwmon.c index 1225bc432f0d..4683a5b96eff 100644 --- a/drivers/gpu/drm/i915/i915_hwmon.c +++ b/drivers/gpu/drm/i915/i915_hwmon.c @@ -687,6 +687,11 @@ hwm_get_preregistration_info(struct drm_i915_private *i915) for_each_gt(gt, i915, i) hwm_energy(&hwmon->ddat_gt[i], &energy); } + + /* Enable PL1 power limit */ + if (i915_mmio_reg_valid(hwmon->rg.pkg_rapl_limit)) + hwm_locked_with_pm_intel_uncore_rmw(ddat, hwmon->rg.pkg_rapl_limit, + PKG_PWR_LIM_1_EN, PKG_PWR_LIM_1_EN); } void i915_hwmon_register(struct drm_i915_private *i915) diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c index 534621a13a34..3d046878ce6c 100644 --- a/drivers/gpu/drm/meson/meson_dw_hdmi.c +++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c @@ -718,7 +718,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master, dw_plat_data = &meson_dw_hdmi->dw_plat_data; ret = devm_regulator_get_enable_optional(dev, "hdmi"); - if (ret < 0) + if (ret < 0 && ret != -ENODEV) return ret; meson_dw_hdmi->hdmitx_apb = devm_reset_control_get_exclusive(dev, diff --git a/drivers/gpu/drm/meson/meson_vpp.c b/drivers/gpu/drm/meson/meson_vpp.c index 154837688ab0..5df1957c8e41 100644 --- a/drivers/gpu/drm/meson/meson_vpp.c +++ b/drivers/gpu/drm/meson/meson_vpp.c @@ -100,6 +100,8 @@ void meson_vpp_init(struct meson_drm *priv) priv->io_base + _REG(VPP_DOLBY_CTRL)); writel_relaxed(0x1020080, priv->io_base + _REG(VPP_DUMMY_DATA1)); + writel_relaxed(0x42020, + priv->io_base + _REG(VPP_DUMMY_DATA)); } else if (meson_vpu_is_compatible(priv, VPU_COMPATIBLE_G12A)) writel_relaxed(0xf, priv->io_base + _REG(DOLBY_PATH_CTRL)); diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index 051bdbc093cf..f38296ad8743 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -107,6 +107,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) bool (*shrink)(struct drm_gem_object *obj); bool cond; unsigned long freed; + unsigned long remaining; } stages[] = { /* Stages of progressively more aggressive/expensive reclaim: */ { &priv->lru.dontneed, purge, true }, @@ -116,14 +117,18 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) }; long nr = sc->nr_to_scan; unsigned long freed = 0; + unsigned long remaining = 0; for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) { if (!stages[i].cond) continue; stages[i].freed = - drm_gem_lru_scan(stages[i].lru, nr, stages[i].shrink); + drm_gem_lru_scan(stages[i].lru, nr, + &stages[i].remaining, + stages[i].shrink); nr -= stages[i].freed; freed += stages[i].freed; + remaining += stages[i].remaining; } if (freed) { @@ -132,7 +137,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) stages[3].freed); } - return (freed > 0) ? freed : SHRINK_STOP; + return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP; } #ifdef CONFIG_DEBUG_FS @@ -182,10 +187,12 @@ msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr) NULL, }; unsigned idx, unmapped = 0; + unsigned long remaining = 0; for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) { unmapped += drm_gem_lru_scan(lrus[idx], vmap_shrink_limit - unmapped, + &remaining, vmap_shrink); } diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c index 4e83a1891f3e..666a5e53fe19 100644 --- a/drivers/gpu/drm/panfrost/panfrost_mmu.c +++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c @@ -282,7 +282,7 @@ static void panfrost_mmu_flush_range(struct panfrost_device *pfdev, if (pm_runtime_active(pfdev->dev)) mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT); - pm_runtime_put_sync_autosuspend(pfdev->dev); + pm_runtime_put_autosuspend(pfdev->dev); } static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu, diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index cc94efbbf2d4..d6c741716167 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -95,12 +95,12 @@ static int sun4i_drv_bind(struct device *dev) /* drm_vblank_init calls kcalloc, which can fail */ ret = drm_vblank_init(drm, drm->mode_config.num_crtc); if (ret) - goto cleanup_mode_config; + goto unbind_all; /* Remove early framebuffers (ie. simplefb) */ ret = drm_aperture_remove_framebuffers(false, &sun4i_drv_driver); if (ret) - goto cleanup_mode_config; + goto unbind_all; sun4i_framebuffer_init(drm); @@ -119,6 +119,8 @@ static int sun4i_drv_bind(struct device *dev) finish_poll: drm_kms_helper_poll_fini(drm); +unbind_all: + component_unbind_all(dev, NULL); cleanup_mode_config: drm_mode_config_cleanup(drm); of_reserved_mem_device_release(dev); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 326a3d13a829..c286c6ffe07f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -295,8 +295,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, if (unlock_resv) dma_resv_unlock(bo->base.resv); - ttm_bo_put(bo); - return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index c7a1862f322a..ae2f19dc9f81 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -158,7 +158,7 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo = res->bo; uint32_t num_pages; - if (!bo) + if (!bo || bo->resource != res) continue; num_pages = PFN_UP(bo->base.size); diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c index a04a9b20896d..1778a2081fd6 100644 --- a/drivers/gpu/drm/virtio/virtgpu_vq.c +++ b/drivers/gpu/drm/virtio/virtgpu_vq.c @@ -604,7 +604,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev, bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); if (virtio_gpu_is_shmem(bo) && use_dma_api) - dma_sync_sgtable_for_device(&vgdev->vdev->dev, + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, bo->base.sgt, DMA_TO_DEVICE); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); @@ -1026,7 +1026,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev, bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev); if (virtio_gpu_is_shmem(bo) && use_dma_api) - dma_sync_sgtable_for_device(&vgdev->vdev->dev, + dma_sync_sgtable_for_device(vgdev->vdev->dev.parent, bo->base.sgt, DMA_TO_DEVICE); cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p)); diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c index 51b3d16c3223..6e4c92b500b8 100644 --- a/drivers/hwmon/adt7475.c +++ b/drivers/hwmon/adt7475.c @@ -488,10 +488,10 @@ static ssize_t temp_store(struct device *dev, struct device_attribute *attr, val = (temp - val) / 1000; if (sattr->index != 1) { - data->temp[HYSTERSIS][sattr->index] &= 0xF0; + data->temp[HYSTERSIS][sattr->index] &= 0x0F; data->temp[HYSTERSIS][sattr->index] |= (val & 0xF) << 4; } else { - data->temp[HYSTERSIS][sattr->index] &= 0x0F; + data->temp[HYSTERSIS][sattr->index] &= 0xF0; data->temp[HYSTERSIS][sattr->index] |= (val & 0xF); } @@ -556,11 +556,11 @@ static ssize_t temp_st_show(struct device *dev, struct device_attribute *attr, val = data->enh_acoustics[0] & 0xf; break; case 1: - val = (data->enh_acoustics[1] >> 4) & 0xf; + val = data->enh_acoustics[1] & 0xf; break; case 2: default: - val = data->enh_acoustics[1] & 0xf; + val = (data->enh_acoustics[1] >> 4) & 0xf; break; } diff --git a/drivers/hwmon/ina3221.c b/drivers/hwmon/ina3221.c index e06186986444..f3a4c5633b1e 100644 --- a/drivers/hwmon/ina3221.c +++ b/drivers/hwmon/ina3221.c @@ -772,7 +772,7 @@ static int ina3221_probe_child_from_dt(struct device *dev, return ret; } else if (val > INA3221_CHANNEL3) { dev_err(dev, "invalid reg %d of %pOFn\n", val, child); - return ret; + return -EINVAL; } input = &ina->inputs[val]; diff --git a/drivers/hwmon/ltc2992.c b/drivers/hwmon/ltc2992.c index 88514152d930..69341de397cb 100644 --- a/drivers/hwmon/ltc2992.c +++ b/drivers/hwmon/ltc2992.c @@ -323,6 +323,7 @@ static int ltc2992_config_gpio(struct ltc2992_state *st) st->gc.label = name; st->gc.parent = &st->client->dev; st->gc.owner = THIS_MODULE; + st->gc.can_sleep = true; st->gc.base = -1; st->gc.names = st->gpio_names; st->gc.ngpio = ARRAY_SIZE(st->gpio_names); diff --git a/drivers/hwmon/pmbus/adm1266.c b/drivers/hwmon/pmbus/adm1266.c index ec5f932fc6f0..1ac2b2f4c570 100644 --- a/drivers/hwmon/pmbus/adm1266.c +++ b/drivers/hwmon/pmbus/adm1266.c @@ -301,6 +301,7 @@ static int adm1266_config_gpio(struct adm1266_data *data) data->gc.label = name; data->gc.parent = &data->client->dev; data->gc.owner = THIS_MODULE; + data->gc.can_sleep = true; data->gc.base = -1; data->gc.names = data->gpio_names; data->gc.ngpio = ARRAY_SIZE(data->gpio_names); diff --git a/drivers/hwmon/pmbus/ucd9000.c b/drivers/hwmon/pmbus/ucd9000.c index 75fc770c9e40..3daaf2237832 100644 --- a/drivers/hwmon/pmbus/ucd9000.c +++ b/drivers/hwmon/pmbus/ucd9000.c @@ -7,6 +7,7 @@ */ #include <linux/debugfs.h> +#include <linux/delay.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_device.h> @@ -16,6 +17,7 @@ #include <linux/i2c.h> #include <linux/pmbus.h> #include <linux/gpio/driver.h> +#include <linux/timekeeping.h> #include "pmbus.h" enum chips { ucd9000, ucd90120, ucd90124, ucd90160, ucd90320, ucd9090, @@ -65,6 +67,7 @@ struct ucd9000_data { struct gpio_chip gpio; #endif struct dentry *debugfs; + ktime_t write_time; }; #define to_ucd9000_data(_info) container_of(_info, struct ucd9000_data, info) @@ -73,6 +76,73 @@ struct ucd9000_debugfs_entry { u8 index; }; +/* + * It has been observed that the UCD90320 randomly fails register access when + * doing another access right on the back of a register write. To mitigate this + * make sure that there is a minimum delay between a write access and the + * following access. The 250us is based on experimental data. At a delay of + * 200us the issue seems to go away. Add a bit of extra margin to allow for + * system to system differences. + */ +#define UCD90320_WAIT_DELAY_US 250 + +static inline void ucd90320_wait(const struct ucd9000_data *data) +{ + s64 delta = ktime_us_delta(ktime_get(), data->write_time); + + if (delta < UCD90320_WAIT_DELAY_US) + udelay(UCD90320_WAIT_DELAY_US - delta); +} + +static int ucd90320_read_word_data(struct i2c_client *client, int page, + int phase, int reg) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + struct ucd9000_data *data = to_ucd9000_data(info); + + if (reg >= PMBUS_VIRT_BASE) + return -ENXIO; + + ucd90320_wait(data); + return pmbus_read_word_data(client, page, phase, reg); +} + +static int ucd90320_read_byte_data(struct i2c_client *client, int page, int reg) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + struct ucd9000_data *data = to_ucd9000_data(info); + + ucd90320_wait(data); + return pmbus_read_byte_data(client, page, reg); +} + +static int ucd90320_write_word_data(struct i2c_client *client, int page, + int reg, u16 word) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + struct ucd9000_data *data = to_ucd9000_data(info); + int ret; + + ucd90320_wait(data); + ret = pmbus_write_word_data(client, page, reg, word); + data->write_time = ktime_get(); + + return ret; +} + +static int ucd90320_write_byte(struct i2c_client *client, int page, u8 value) +{ + const struct pmbus_driver_info *info = pmbus_get_driver_info(client); + struct ucd9000_data *data = to_ucd9000_data(info); + int ret; + + ucd90320_wait(data); + ret = pmbus_write_byte(client, page, value); + data->write_time = ktime_get(); + + return ret; +} + static int ucd9000_get_fan_config(struct i2c_client *client, int fan) { int fan_config = 0; @@ -598,6 +668,11 @@ static int ucd9000_probe(struct i2c_client *client) info->read_byte_data = ucd9000_read_byte_data; info->func[0] |= PMBUS_HAVE_FAN12 | PMBUS_HAVE_STATUS_FAN12 | PMBUS_HAVE_FAN34 | PMBUS_HAVE_STATUS_FAN34; + } else if (mid->driver_data == ucd90320) { + info->read_byte_data = ucd90320_read_byte_data; + info->read_word_data = ucd90320_read_word_data; + info->write_byte = ucd90320_write_byte; + info->write_word_data = ucd90320_write_word_data; } ucd9000_probe_gpio(client, mid, data); diff --git a/drivers/hwmon/tmp513.c b/drivers/hwmon/tmp513.c index 47bbe47e062f..7d5f7441aceb 100644 --- a/drivers/hwmon/tmp513.c +++ b/drivers/hwmon/tmp513.c @@ -758,7 +758,7 @@ static int tmp51x_probe(struct i2c_client *client) static struct i2c_driver tmp51x_driver = { .driver = { .name = "tmp51x", - .of_match_table = of_match_ptr(tmp51x_of_match), + .of_match_table = tmp51x_of_match, }, .probe_new = tmp51x_probe, .id_table = tmp51x_id, diff --git a/drivers/hwmon/xgene-hwmon.c b/drivers/hwmon/xgene-hwmon.c index 5cde837bfd09..d1abea49f01b 100644 --- a/drivers/hwmon/xgene-hwmon.c +++ b/drivers/hwmon/xgene-hwmon.c @@ -761,6 +761,7 @@ static int xgene_hwmon_remove(struct platform_device *pdev) { struct xgene_hwmon_dev *ctx = platform_get_drvdata(pdev); + cancel_work_sync(&ctx->workq); hwmon_device_unregister(ctx->hwmon_dev); kfifo_free(&ctx->async_msg_fifo); if (acpi_disabled) diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 5f1e2593fad7..b0a22e99bade 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -15,6 +15,10 @@ if MD config BLK_DEV_MD tristate "RAID support" select BLOCK_HOLDER_DEPRECATED if SYSFS + # BLOCK_LEGACY_AUTOLOAD requirement should be removed + # after relevant mdadm enhancements - to make "names=yes" + # the default - are widely available. + select BLOCK_LEGACY_AUTOLOAD help This driver lets you combine several hard disk partitions into one logical block device. This can be used to simply append one diff --git a/drivers/md/dm.c b/drivers/md/dm.c index eace45a18d45..f5cc330bb549 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -512,10 +512,10 @@ static void dm_io_acct(struct dm_io *io, bool end) sectors = io->sectors; if (!end) - bdev_start_io_acct(bio->bi_bdev, sectors, bio_op(bio), - start_time); + bdev_start_io_acct(bio->bi_bdev, bio_op(bio), start_time); else - bdev_end_io_acct(bio->bi_bdev, bio_op(bio), start_time); + bdev_end_io_acct(bio->bi_bdev, bio_op(bio), sectors, + start_time); if (static_branch_unlikely(&stats_enabled) && unlikely(dm_stats_used(&md->stats))) { diff --git a/drivers/md/md.c b/drivers/md/md.c index 927a43db5dfb..39e49e5d7182 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -3128,6 +3128,9 @@ slot_store(struct md_rdev *rdev, const char *buf, size_t len) err = kstrtouint(buf, 10, (unsigned int *)&slot); if (err < 0) return err; + if (slot < 0) + /* overflow */ + return -ENOSPC; } if (rdev->mddev->pers && slot == -1) { /* Setting 'slot' on an active array requires also @@ -6256,6 +6259,11 @@ static void __md_stop(struct mddev *mddev) mddev->to_remove = &md_redundancy_group; module_put(pers->owner); clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); + + percpu_ref_exit(&mddev->writes_pending); + percpu_ref_exit(&mddev->active_io); + bioset_exit(&mddev->bio_set); + bioset_exit(&mddev->sync_set); } void md_stop(struct mddev *mddev) @@ -6265,10 +6273,6 @@ void md_stop(struct mddev *mddev) */ __md_stop_writes(mddev); __md_stop(mddev); - percpu_ref_exit(&mddev->writes_pending); - percpu_ref_exit(&mddev->active_io); - bioset_exit(&mddev->bio_set); - bioset_exit(&mddev->sync_set); } EXPORT_SYMBOL_GPL(md_stop); @@ -7839,11 +7843,6 @@ static void md_free_disk(struct gendisk *disk) { struct mddev *mddev = disk->private_data; - percpu_ref_exit(&mddev->writes_pending); - percpu_ref_exit(&mddev->active_io); - bioset_exit(&mddev->bio_set); - bioset_exit(&mddev->sync_set); - mddev_free(mddev); } diff --git a/drivers/media/i2c/m5mols/m5mols_core.c b/drivers/media/i2c/m5mols/m5mols_core.c index 2b01873ba0db..5c2336f318d9 100644 --- a/drivers/media/i2c/m5mols/m5mols_core.c +++ b/drivers/media/i2c/m5mols/m5mols_core.c @@ -488,7 +488,7 @@ static enum m5mols_restype __find_restype(u32 code) do { if (code == m5mols_default_ffmt[type].code) return type; - } while (type++ != SIZE_DEFAULT_FFMT); + } while (++type != SIZE_DEFAULT_FFMT); return 0; } diff --git a/drivers/mmc/host/dw_mmc-starfive.c b/drivers/mmc/host/dw_mmc-starfive.c index 40f5969b07a6..dab1508bf83c 100644 --- a/drivers/mmc/host/dw_mmc-starfive.c +++ b/drivers/mmc/host/dw_mmc-starfive.c @@ -51,7 +51,7 @@ static int dw_mci_starfive_execute_tuning(struct dw_mci_slot *slot, struct dw_mci *host = slot->host; struct starfive_priv *priv = host->priv; int rise_point = -1, fall_point = -1; - int err, prev_err; + int err, prev_err = 0; int i; bool found = 0; u32 regval; diff --git a/drivers/mmc/host/sdhci_am654.c b/drivers/mmc/host/sdhci_am654.c index 7ef828942df3..89953093e20c 100644 --- a/drivers/mmc/host/sdhci_am654.c +++ b/drivers/mmc/host/sdhci_am654.c @@ -369,7 +369,7 @@ static void sdhci_am654_write_b(struct sdhci_host *host, u8 val, int reg) MAX_POWER_ON_TIMEOUT, false, host, val, reg); if (ret) - dev_warn(mmc_dev(host->mmc), "Power on failed\n"); + dev_info(mmc_dev(host->mmc), "Power on failed\n"); } } diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 00646aa315c3..236e5219c811 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1775,6 +1775,19 @@ void bond_lower_state_changed(struct slave *slave) slave_err(bond_dev, slave_dev, "Error: %s\n", errmsg); \ } while (0) +/* The bonding driver uses ether_setup() to convert a master bond device + * to ARPHRD_ETHER, that resets the target netdevice's flags so we always + * have to restore the IFF_MASTER flag, and only restore IFF_SLAVE if it was set + */ +static void bond_ether_setup(struct net_device *bond_dev) +{ + unsigned int slave_flag = bond_dev->flags & IFF_SLAVE; + + ether_setup(bond_dev); + bond_dev->flags |= IFF_MASTER | slave_flag; + bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; +} + /* enslave device <slave> to bond device <master> */ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, struct netlink_ext_ack *extack) @@ -1866,10 +1879,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev, if (slave_dev->type != ARPHRD_ETHER) bond_setup_by_slave(bond_dev, slave_dev); - else { - ether_setup(bond_dev); - bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; - } + else + bond_ether_setup(bond_dev); call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, bond_dev); @@ -2289,9 +2300,7 @@ err_undo_flags: eth_hw_addr_random(bond_dev); if (bond_dev->type != ARPHRD_ETHER) { dev_close(bond_dev); - ether_setup(bond_dev); - bond_dev->flags |= IFF_MASTER; - bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING; + bond_ether_setup(bond_dev); } } diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c index 8d916e2ee6c2..8dcc32e4e30e 100644 --- a/drivers/net/can/cc770/cc770_platform.c +++ b/drivers/net/can/cc770/cc770_platform.c @@ -93,20 +93,20 @@ static int cc770_get_of_node_data(struct platform_device *pdev, if (priv->can.clock.freq > 8000000) priv->cpu_interface |= CPUIF_DMC; - if (of_get_property(np, "bosch,divide-memory-clock", NULL)) + if (of_property_read_bool(np, "bosch,divide-memory-clock")) priv->cpu_interface |= CPUIF_DMC; - if (of_get_property(np, "bosch,iso-low-speed-mux", NULL)) + if (of_property_read_bool(np, "bosch,iso-low-speed-mux")) priv->cpu_interface |= CPUIF_MUX; if (!of_get_property(np, "bosch,no-comperator-bypass", NULL)) priv->bus_config |= BUSCFG_CBY; - if (of_get_property(np, "bosch,disconnect-rx0-input", NULL)) + if (of_property_read_bool(np, "bosch,disconnect-rx0-input")) priv->bus_config |= BUSCFG_DR0; - if (of_get_property(np, "bosch,disconnect-rx1-input", NULL)) + if (of_property_read_bool(np, "bosch,disconnect-rx1-input")) priv->bus_config |= BUSCFG_DR1; - if (of_get_property(np, "bosch,disconnect-tx1-output", NULL)) + if (of_property_read_bool(np, "bosch,disconnect-tx1-output")) priv->bus_config |= BUSCFG_DT1; - if (of_get_property(np, "bosch,polarity-dominant", NULL)) + if (of_property_read_bool(np, "bosch,polarity-dominant")) priv->bus_config |= BUSCFG_POL; prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size); diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 729b36eeb2c4..7fc2155d93d6 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -319,7 +319,7 @@ static const u16 ksz8795_regs[] = { [S_BROADCAST_CTRL] = 0x06, [S_MULTICAST_CTRL] = 0x04, [P_XMII_CTRL_0] = 0x06, - [P_XMII_CTRL_1] = 0x56, + [P_XMII_CTRL_1] = 0x06, }; static const u32 ksz8795_masks[] = { diff --git a/drivers/net/dsa/mt7530.c b/drivers/net/dsa/mt7530.c index a508402c4ecb..c2d81b7a429d 100644 --- a/drivers/net/dsa/mt7530.c +++ b/drivers/net/dsa/mt7530.c @@ -430,8 +430,6 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface) switch (interface) { case PHY_INTERFACE_MODE_RGMII: trgint = 0; - /* PLL frequency: 125MHz */ - ncpo1 = 0x0c80; break; case PHY_INTERFACE_MODE_TRGMII: trgint = 1; @@ -462,38 +460,40 @@ mt7530_pad_clk_setup(struct dsa_switch *ds, phy_interface_t interface) mt7530_rmw(priv, MT7530_P6ECR, P6_INTF_MODE_MASK, P6_INTF_MODE(trgint)); - /* Lower Tx Driving for TRGMII path */ - for (i = 0 ; i < NUM_TRGMII_CTRL ; i++) - mt7530_write(priv, MT7530_TRGMII_TD_ODT(i), - TD_DM_DRVP(8) | TD_DM_DRVN(8)); - - /* Disable MT7530 core and TRGMII Tx clocks */ - core_clear(priv, CORE_TRGMII_GSW_CLK_CG, - REG_GSWCK_EN | REG_TRGMIICK_EN); - - /* Setup the MT7530 TRGMII Tx Clock */ - core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1)); - core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0)); - core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta)); - core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta)); - core_write(priv, CORE_PLL_GROUP4, - RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN | - RG_SYSPLL_BIAS_LPF_EN); - core_write(priv, CORE_PLL_GROUP2, - RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN | - RG_SYSPLL_POSDIV(1)); - core_write(priv, CORE_PLL_GROUP7, - RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) | - RG_LCDDS_PWDB | RG_LCDDS_ISO_EN); - - /* Enable MT7530 core and TRGMII Tx clocks */ - core_set(priv, CORE_TRGMII_GSW_CLK_CG, - REG_GSWCK_EN | REG_TRGMIICK_EN); - - if (!trgint) + if (trgint) { + /* Lower Tx Driving for TRGMII path */ + for (i = 0 ; i < NUM_TRGMII_CTRL ; i++) + mt7530_write(priv, MT7530_TRGMII_TD_ODT(i), + TD_DM_DRVP(8) | TD_DM_DRVN(8)); + + /* Disable MT7530 core and TRGMII Tx clocks */ + core_clear(priv, CORE_TRGMII_GSW_CLK_CG, + REG_GSWCK_EN | REG_TRGMIICK_EN); + + /* Setup the MT7530 TRGMII Tx Clock */ + core_write(priv, CORE_PLL_GROUP5, RG_LCDDS_PCW_NCPO1(ncpo1)); + core_write(priv, CORE_PLL_GROUP6, RG_LCDDS_PCW_NCPO0(0)); + core_write(priv, CORE_PLL_GROUP10, RG_LCDDS_SSC_DELTA(ssc_delta)); + core_write(priv, CORE_PLL_GROUP11, RG_LCDDS_SSC_DELTA1(ssc_delta)); + core_write(priv, CORE_PLL_GROUP4, + RG_SYSPLL_DDSFBK_EN | RG_SYSPLL_BIAS_EN | + RG_SYSPLL_BIAS_LPF_EN); + core_write(priv, CORE_PLL_GROUP2, + RG_SYSPLL_EN_NORMAL | RG_SYSPLL_VODEN | + RG_SYSPLL_POSDIV(1)); + core_write(priv, CORE_PLL_GROUP7, + RG_LCDDS_PCW_NCPO_CHG | RG_LCCDS_C(3) | + RG_LCDDS_PWDB | RG_LCDDS_ISO_EN); + + /* Enable MT7530 core and TRGMII Tx clocks */ + core_set(priv, CORE_TRGMII_GSW_CLK_CG, + REG_GSWCK_EN | REG_TRGMIICK_EN); + } else { for (i = 0 ; i < NUM_TRGMII_CTRL; i++) mt7530_rmw(priv, MT7530_TRGMII_RD(i), RD_TAP_MASK, RD_TAP(16)); + } + return 0; } @@ -2201,7 +2201,7 @@ mt7530_setup(struct dsa_switch *ds) mt7530_pll_setup(priv); - /* Enable Port 6 only; P5 as GMAC5 which currently is not supported */ + /* Enable port 6 */ val = mt7530_read(priv, MT7530_MHWTRAP); val &= ~MHWTRAP_P6_DIS & ~MHWTRAP_PHY_ACCESS; val |= MHWTRAP_MANUAL; diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c index 0a5d6c7bb128..30383c4f8fd0 100644 --- a/drivers/net/dsa/mv88e6xxx/chip.c +++ b/drivers/net/dsa/mv88e6xxx/chip.c @@ -3549,7 +3549,7 @@ static int mv88e6xxx_get_max_mtu(struct dsa_switch *ds, int port) return 10240 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN; else if (chip->info->ops->set_max_frame_size) return 1632 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN; - return 1522 - VLAN_ETH_HLEN - EDSA_HLEN - ETH_FCS_LEN; + return ETH_DATA_LEN; } static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu) @@ -3557,6 +3557,17 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu) struct mv88e6xxx_chip *chip = ds->priv; int ret = 0; + /* For families where we don't know how to alter the MTU, + * just accept any value up to ETH_DATA_LEN + */ + if (!chip->info->ops->port_set_jumbo_size && + !chip->info->ops->set_max_frame_size) { + if (new_mtu > ETH_DATA_LEN) + return -EINVAL; + + return 0; + } + if (dsa_is_dsa_port(ds, port) || dsa_is_cpu_port(ds, port)) new_mtu += EDSA_HLEN; @@ -3565,9 +3576,6 @@ static int mv88e6xxx_change_mtu(struct dsa_switch *ds, int port, int new_mtu) ret = chip->info->ops->port_set_jumbo_size(chip, port, new_mtu); else if (chip->info->ops->set_max_frame_size) ret = chip->info->ops->set_max_frame_size(chip, new_mtu); - else - if (new_mtu > 1522) - ret = -EINVAL; mv88e6xxx_reg_unlock(chip); return ret; diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 8da79eedc057..1d4f2f4d10f2 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c @@ -850,11 +850,20 @@ static int ena_set_channels(struct net_device *netdev, struct ena_adapter *adapter = netdev_priv(netdev); u32 count = channels->combined_count; /* The check for max value is already done in ethtool */ - if (count < ENA_MIN_NUM_IO_QUEUES || - (ena_xdp_present(adapter) && - !ena_xdp_legal_queue_count(adapter, count))) + if (count < ENA_MIN_NUM_IO_QUEUES) return -EINVAL; + if (!ena_xdp_legal_queue_count(adapter, count)) { + if (ena_xdp_present(adapter)) + return -EINVAL; + + xdp_clear_features_flag(netdev); + } else { + xdp_set_features_flag(netdev, + NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT); + } + return ena_update_queue_count(adapter, count); } diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index d3999db7c6a2..cbfe7f977270 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c @@ -4105,8 +4105,6 @@ static void ena_set_conf_feat_params(struct ena_adapter *adapter, /* Set offload features */ ena_set_dev_offloads(feat, netdev); - netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT; - adapter->max_mtu = feat->dev_attr.max_mtu; netdev->max_mtu = adapter->max_mtu; netdev->min_mtu = ENA_MIN_MTU; @@ -4393,6 +4391,10 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ena_config_debug_area(adapter); + if (ena_xdp_legal_queue_count(adapter, adapter->num_io_queues)) + netdev->xdp_features = NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT; + memcpy(adapter->netdev->perm_addr, adapter->mac_addr, netdev->addr_len); netif_carrier_off(netdev); diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c index 1e8d902e1c8e..7f933175cbda 100644 --- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c +++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c @@ -412,6 +412,25 @@ int aq_xdp_xmit(struct net_device *dev, int num_frames, return num_frames - drop; } +static struct sk_buff *aq_xdp_build_skb(struct xdp_buff *xdp, + struct net_device *dev, + struct aq_ring_buff_s *buff) +{ + struct xdp_frame *xdpf; + struct sk_buff *skb; + + xdpf = xdp_convert_buff_to_frame(xdp); + if (unlikely(!xdpf)) + return NULL; + + skb = xdp_build_skb_from_frame(xdpf, dev); + if (!skb) + return NULL; + + aq_get_rxpages_xdp(buff, xdp); + return skb; +} + static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic, struct xdp_buff *xdp, struct aq_ring_s *rx_ring, @@ -431,7 +450,7 @@ static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic, prog = READ_ONCE(rx_ring->xdp_prog); if (!prog) - goto pass; + return aq_xdp_build_skb(xdp, aq_nic->ndev, buff); prefetchw(xdp->data_hard_start); /* xdp_frame write */ @@ -442,17 +461,12 @@ static struct sk_buff *aq_xdp_run_prog(struct aq_nic_s *aq_nic, act = bpf_prog_run_xdp(prog, xdp); switch (act) { case XDP_PASS: -pass: - xdpf = xdp_convert_buff_to_frame(xdp); - if (unlikely(!xdpf)) - goto out_aborted; - skb = xdp_build_skb_from_frame(xdpf, aq_nic->ndev); + skb = aq_xdp_build_skb(xdp, aq_nic->ndev, buff); if (!skb) goto out_aborted; u64_stats_update_begin(&rx_ring->stats.rx.syncp); ++rx_ring->stats.rx.xdp_pass; u64_stats_update_end(&rx_ring->stats.rx.syncp); - aq_get_rxpages_xdp(buff, xdp); return skb; case XDP_TX: xdpf = xdp_convert_buff_to_frame(xdp); diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index 808236dc898b..e2e2c986c82b 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -6990,11 +6990,9 @@ static int bnxt_hwrm_func_qcfg(struct bnxt *bp) if (flags & FUNC_QCFG_RESP_FLAGS_FW_DCBX_AGENT_ENABLED) bp->fw_cap |= BNXT_FW_CAP_DCBX_AGENT; } - if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) { + if (BNXT_PF(bp) && (flags & FUNC_QCFG_RESP_FLAGS_MULTI_HOST)) bp->flags |= BNXT_FLAG_MULTI_HOST; - if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) - bp->fw_cap &= ~BNXT_FW_CAP_PTP_RTC; - } + if (flags & FUNC_QCFG_RESP_FLAGS_RING_MONITOR_ENABLED) bp->fw_cap |= BNXT_FW_CAP_RING_MONITOR; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index dcb09fbe4007..c0628ac1b798 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -2000,6 +2000,8 @@ struct bnxt { u32 fw_dbg_cap; #define BNXT_NEW_RM(bp) ((bp)->fw_cap & BNXT_FW_CAP_NEW_RM) +#define BNXT_PTP_USE_RTC(bp) (!BNXT_MH(bp) && \ + ((bp)->fw_cap & BNXT_FW_CAP_PTP_RTC)) u32 hwrm_spec_code; u16 hwrm_cmd_seq; u16 hwrm_cmd_kong_seq; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c index 4ec8bba18cdd..a3a3978a4d1c 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ptp.c @@ -63,7 +63,7 @@ static int bnxt_ptp_settime(struct ptp_clock_info *ptp_info, ptp_info); u64 ns = timespec64_to_ns(ts); - if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC) + if (BNXT_PTP_USE_RTC(ptp->bp)) return bnxt_ptp_cfg_settime(ptp->bp, ns); spin_lock_bh(&ptp->ptp_lock); @@ -196,7 +196,7 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, ptp_info); - if (ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC) + if (BNXT_PTP_USE_RTC(ptp->bp)) return bnxt_ptp_adjphc(ptp, delta); spin_lock_bh(&ptp->ptp_lock); @@ -205,34 +205,39 @@ static int bnxt_ptp_adjtime(struct ptp_clock_info *ptp_info, s64 delta) return 0; } +static int bnxt_ptp_adjfine_rtc(struct bnxt *bp, long scaled_ppm) +{ + s32 ppb = scaled_ppm_to_ppb(scaled_ppm); + struct hwrm_port_mac_cfg_input *req; + int rc; + + rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); + if (rc) + return rc; + + req->ptp_freq_adj_ppb = cpu_to_le32(ppb); + req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB); + rc = hwrm_req_send(bp, req); + if (rc) + netdev_err(bp->dev, + "ptp adjfine failed. rc = %d\n", rc); + return rc; +} + static int bnxt_ptp_adjfine(struct ptp_clock_info *ptp_info, long scaled_ppm) { struct bnxt_ptp_cfg *ptp = container_of(ptp_info, struct bnxt_ptp_cfg, ptp_info); - struct hwrm_port_mac_cfg_input *req; struct bnxt *bp = ptp->bp; - int rc = 0; - if (!(ptp->bp->fw_cap & BNXT_FW_CAP_PTP_RTC)) { - spin_lock_bh(&ptp->ptp_lock); - timecounter_read(&ptp->tc); - ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm); - spin_unlock_bh(&ptp->ptp_lock); - } else { - s32 ppb = scaled_ppm_to_ppb(scaled_ppm); - - rc = hwrm_req_init(bp, req, HWRM_PORT_MAC_CFG); - if (rc) - return rc; + if (BNXT_PTP_USE_RTC(bp)) + return bnxt_ptp_adjfine_rtc(bp, scaled_ppm); - req->ptp_freq_adj_ppb = cpu_to_le32(ppb); - req->enables = cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_PTP_FREQ_ADJ_PPB); - rc = hwrm_req_send(ptp->bp, req); - if (rc) - netdev_err(ptp->bp->dev, - "ptp adjfine failed. rc = %d\n", rc); - } - return rc; + spin_lock_bh(&ptp->ptp_lock); + timecounter_read(&ptp->tc); + ptp->cc.mult = adjust_by_scaled_ppm(ptp->cmult, scaled_ppm); + spin_unlock_bh(&ptp->ptp_lock); + return 0; } void bnxt_ptp_pps_event(struct bnxt *bp, u32 data1, u32 data2) @@ -879,7 +884,7 @@ int bnxt_ptp_init_rtc(struct bnxt *bp, bool phc_cfg) u64 ns; int rc; - if (!bp->ptp_cfg || !(bp->fw_cap & BNXT_FW_CAP_PTP_RTC)) + if (!bp->ptp_cfg || !BNXT_PTP_USE_RTC(bp)) return -ENODEV; if (!phc_cfg) { @@ -932,13 +937,14 @@ int bnxt_ptp_init(struct bnxt *bp, bool phc_cfg) atomic_set(&ptp->tx_avail, BNXT_MAX_TX_TS); spin_lock_init(&ptp->ptp_lock); - if (bp->fw_cap & BNXT_FW_CAP_PTP_RTC) { + if (BNXT_PTP_USE_RTC(bp)) { bnxt_ptp_timecounter_init(bp, false); rc = bnxt_ptp_init_rtc(bp, phc_cfg); if (rc) goto out; } else { bnxt_ptp_timecounter_init(bp, true); + bnxt_ptp_adjfine_rtc(bp, 0); } ptp->ptp_info = bnxt_ptp_caps; diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 6e141a8bbf43..66e30561569e 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -4990,7 +4990,7 @@ static int macb_probe(struct platform_device *pdev) bp->jumbo_max_len = macb_config->jumbo_max_len; bp->wol = 0; - if (of_get_property(np, "magic-packet", NULL)) + if (of_property_read_bool(np, "magic-packet")) bp->wol |= MACB_WOL_HAS_MAGIC_PACKET; device_set_wakeup_capable(&pdev->dev, bp->wol & MACB_WOL_HAS_MAGIC_PACKET); diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c index e5c71f907852..d8d71bf97983 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c @@ -735,12 +735,17 @@ static int nicvf_set_channels(struct net_device *dev, if (channel->tx_count > nic->max_queues) return -EINVAL; - if (nic->xdp_prog && - ((channel->tx_count + channel->rx_count) > nic->max_queues)) { - netdev_err(nic->netdev, - "XDP mode, RXQs + TXQs > Max %d\n", - nic->max_queues); - return -EINVAL; + if (channel->tx_count + channel->rx_count > nic->max_queues) { + if (nic->xdp_prog) { + netdev_err(nic->netdev, + "XDP mode, RXQs + TXQs > Max %d\n", + nic->max_queues); + return -EINVAL; + } + + xdp_clear_features_flag(nic->netdev); + } else if (!pass1_silicon(nic->pdev)) { + xdp_set_features_flag(dev, NETDEV_XDP_ACT_BASIC); } if (if_up) diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c index 8b25313c7f6b..eff350e0bc2a 100644 --- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c +++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c @@ -2218,7 +2218,9 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netdev->netdev_ops = &nicvf_netdev_ops; netdev->watchdog_timeo = NICVF_TX_TIMEOUT; - netdev->xdp_features = NETDEV_XDP_ACT_BASIC; + if (!pass1_silicon(nic->pdev) && + nic->rx_queues + nic->tx_queues <= nic->max_queues) + netdev->xdp_features = NETDEV_XDP_ACT_BASIC; /* MTU range: 64 - 9200 */ netdev->min_mtu = NIC_HW_MIN_FRS; diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c index b21e56de6167..05a89ab6766c 100644 --- a/drivers/net/ethernet/davicom/dm9000.c +++ b/drivers/net/ethernet/davicom/dm9000.c @@ -1393,9 +1393,9 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev) if (!pdata) return ERR_PTR(-ENOMEM); - if (of_find_property(np, "davicom,ext-phy", NULL)) + if (of_property_read_bool(np, "davicom,ext-phy")) pdata->flags |= DM9000_PLATF_EXT_PHY; - if (of_find_property(np, "davicom,no-eeprom", NULL)) + if (of_property_read_bool(np, "davicom,no-eeprom")) pdata->flags |= DM9000_PLATF_NO_EEPROM; ret = of_get_mac_address(np, pdata->dev_addr); diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index c73e25f8995e..f3b16a6673e2 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -4251,7 +4251,7 @@ fec_probe(struct platform_device *pdev) if (ret) goto failed_ipc_init; - if (of_get_property(np, "fsl,magic-packet", NULL)) + if (of_property_read_bool(np, "fsl,magic-packet")) fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET; ret = fec_enet_init_stop_mode(fep, np); diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c index a7f4c3c29f3e..b88816b71ddf 100644 --- a/drivers/net/ethernet/freescale/fec_mpc52xx.c +++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c @@ -937,7 +937,7 @@ static int mpc52xx_fec_probe(struct platform_device *op) priv->phy_node = of_parse_phandle(np, "phy-handle", 0); /* the 7-wire property means don't use MII mode */ - if (of_find_property(np, "fsl,7-wire-mode", NULL)) { + if (of_property_read_bool(np, "fsl,7-wire-mode")) { priv->seven_wire_mode = 1; dev_info(&ndev->dev, "using 7-wire PHY mode\n"); } diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c index b2def295523a..38d5013c6fed 100644 --- a/drivers/net/ethernet/freescale/gianfar.c +++ b/drivers/net/ethernet/freescale/gianfar.c @@ -787,10 +787,10 @@ static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev) else priv->interface = gfar_get_interface(dev); - if (of_find_property(np, "fsl,magic-packet", NULL)) + if (of_property_read_bool(np, "fsl,magic-packet")) priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET; - if (of_get_property(np, "fsl,wake-on-filer", NULL)) + if (of_property_read_bool(np, "fsl,wake-on-filer")) priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER; priv->phy_node = of_parse_phandle(np, "phy-handle", 0); diff --git a/drivers/net/ethernet/i825xx/sni_82596.c b/drivers/net/ethernet/i825xx/sni_82596.c index daec9ce04531..54bb4d9a0d1e 100644 --- a/drivers/net/ethernet/i825xx/sni_82596.c +++ b/drivers/net/ethernet/i825xx/sni_82596.c @@ -78,6 +78,7 @@ static int sni_82596_probe(struct platform_device *dev) void __iomem *mpu_addr; void __iomem *ca_addr; u8 __iomem *eth_addr; + u8 mac[ETH_ALEN]; res = platform_get_resource(dev, IORESOURCE_MEM, 0); ca = platform_get_resource(dev, IORESOURCE_MEM, 1); @@ -109,12 +110,13 @@ static int sni_82596_probe(struct platform_device *dev) goto probe_failed; /* someone seems to like messed up stuff */ - netdevice->dev_addr[0] = readb(eth_addr + 0x0b); - netdevice->dev_addr[1] = readb(eth_addr + 0x0a); - netdevice->dev_addr[2] = readb(eth_addr + 0x09); - netdevice->dev_addr[3] = readb(eth_addr + 0x08); - netdevice->dev_addr[4] = readb(eth_addr + 0x07); - netdevice->dev_addr[5] = readb(eth_addr + 0x06); + mac[0] = readb(eth_addr + 0x0b); + mac[1] = readb(eth_addr + 0x0a); + mac[2] = readb(eth_addr + 0x09); + mac[3] = readb(eth_addr + 0x08); + mac[4] = readb(eth_addr + 0x07); + mac[5] = readb(eth_addr + 0x06); + eth_hw_addr_set(netdevice, mac); iounmap(eth_addr); if (netdevice->irq < 0) { diff --git a/drivers/net/ethernet/ibm/emac/core.c b/drivers/net/ethernet/ibm/emac/core.c index 9b08e41ccc29..c97095abd26a 100644 --- a/drivers/net/ethernet/ibm/emac/core.c +++ b/drivers/net/ethernet/ibm/emac/core.c @@ -2939,9 +2939,9 @@ static int emac_init_config(struct emac_instance *dev) } /* Fixup some feature bits based on the device tree */ - if (of_get_property(np, "has-inverted-stacr-oc", NULL)) + if (of_property_read_bool(np, "has-inverted-stacr-oc")) dev->features |= EMAC_FTR_STACR_OC_INVERT; - if (of_get_property(np, "has-new-stacr-staopc", NULL)) + if (of_property_read_bool(np, "has-new-stacr-staopc")) dev->features |= EMAC_FTR_HAS_NEW_STACR; /* CAB lacks the appropriate properties */ @@ -3042,7 +3042,7 @@ static int emac_probe(struct platform_device *ofdev) * property here for now, but new flat device trees should set a * status property to "disabled" instead. */ - if (of_get_property(np, "unused", NULL) || !of_device_is_available(np)) + if (of_property_read_bool(np, "unused") || !of_device_is_available(np)) return -ENODEV; /* Find ourselves in the bootlist if we are there */ @@ -3333,7 +3333,7 @@ static void __init emac_make_bootlist(void) if (of_match_node(emac_match, np) == NULL) continue; - if (of_get_property(np, "unused", NULL)) + if (of_property_read_bool(np, "unused")) continue; idx = of_get_property(np, "cell-index", NULL); if (idx == NULL) diff --git a/drivers/net/ethernet/ibm/emac/rgmii.c b/drivers/net/ethernet/ibm/emac/rgmii.c index 242ef976fd15..50358cf00130 100644 --- a/drivers/net/ethernet/ibm/emac/rgmii.c +++ b/drivers/net/ethernet/ibm/emac/rgmii.c @@ -242,7 +242,7 @@ static int rgmii_probe(struct platform_device *ofdev) } /* Check for RGMII flags */ - if (of_get_property(ofdev->dev.of_node, "has-mdio", NULL)) + if (of_property_read_bool(ofdev->dev.of_node, "has-mdio")) dev->flags |= EMAC_RGMII_FLAG_HAS_MDIO; /* CAB lacks the right properties, fix this up */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 467001db5070..228cd502bb48 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -15525,6 +15525,7 @@ static int i40e_init_recovery_mode(struct i40e_pf *pf, struct i40e_hw *hw) int err; int v_idx; + pci_set_drvdata(pf->pdev, pf); pci_save_state(pf->pdev); /* set up periodic task facility */ diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h index b0e29e342401..e809249500e1 100644 --- a/drivers/net/ethernet/intel/ice/ice.h +++ b/drivers/net/ethernet/intel/ice/ice.h @@ -509,6 +509,7 @@ enum ice_pf_flags { ICE_FLAG_VF_VLAN_PRUNING, ICE_FLAG_LINK_LENIENT_MODE_ENA, ICE_FLAG_PLUG_AUX_DEV, + ICE_FLAG_UNPLUG_AUX_DEV, ICE_FLAG_MTU_CHANGED, ICE_FLAG_GNSS, /* GNSS successfully initialized */ ICE_PF_FLAGS_NBITS /* must be last */ @@ -955,16 +956,11 @@ static inline void ice_set_rdma_cap(struct ice_pf *pf) */ static inline void ice_clear_rdma_cap(struct ice_pf *pf) { - /* We can directly unplug aux device here only if the flag bit - * ICE_FLAG_PLUG_AUX_DEV is not set because ice_unplug_aux_dev() - * could race with ice_plug_aux_dev() called from - * ice_service_task(). In this case we only clear that bit now and - * aux device will be unplugged later once ice_plug_aux_device() - * called from ice_service_task() finishes (see ice_service_task()). + /* defer unplug to service task to avoid RTNL lock and + * clear PLUG bit so that pending plugs don't interfere */ - if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) - ice_unplug_aux_dev(pf); - + clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags); + set_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags); clear_bit(ICE_FLAG_RDMA_ENA, pf->flags); } #endif /* _ICE_H_ */ diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c index 567694bf098b..c233464b8f6b 100644 --- a/drivers/net/ethernet/intel/ice/ice_main.c +++ b/drivers/net/ethernet/intel/ice/ice_main.c @@ -2316,18 +2316,15 @@ static void ice_service_task(struct work_struct *work) } } - if (test_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) { - /* Plug aux device per request */ - ice_plug_aux_dev(pf); + /* unplug aux dev per request, if an unplug request came in + * while processing a plug request, this will handle it + */ + if (test_and_clear_bit(ICE_FLAG_UNPLUG_AUX_DEV, pf->flags)) + ice_unplug_aux_dev(pf); - /* Mark plugging as done but check whether unplug was - * requested during ice_plug_aux_dev() call - * (e.g. from ice_clear_rdma_cap()) and if so then - * plug aux device. - */ - if (!test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) - ice_unplug_aux_dev(pf); - } + /* Plug aux device per request */ + if (test_and_clear_bit(ICE_FLAG_PLUG_AUX_DEV, pf->flags)) + ice_plug_aux_dev(pf); if (test_and_clear_bit(ICE_FLAG_MTU_CHANGED, pf->flags)) { struct iidc_event *event; diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c index 31565bbafa22..d1e489da7363 100644 --- a/drivers/net/ethernet/intel/ice/ice_xsk.c +++ b/drivers/net/ethernet/intel/ice/ice_xsk.c @@ -184,8 +184,6 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) } netif_tx_stop_queue(netdev_get_tx_queue(vsi->netdev, q_idx)); - ice_qvec_dis_irq(vsi, rx_ring, q_vector); - ice_fill_txq_meta(vsi, tx_ring, &txq_meta); err = ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, 0, tx_ring, &txq_meta); if (err) @@ -200,10 +198,11 @@ static int ice_qp_dis(struct ice_vsi *vsi, u16 q_idx) if (err) return err; } + ice_qvec_dis_irq(vsi, rx_ring, q_vector); + err = ice_vsi_ctrl_one_rx_ring(vsi, false, q_idx, true); if (err) return err; - ice_clean_rx_ring(rx_ring); ice_qvec_toggle_napi(vsi, q_vector, false); ice_qp_clean_rings(vsi, q_idx); diff --git a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c index 9b4ecbe4f36d..3ea00bc9b91c 100644 --- a/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c +++ b/drivers/net/ethernet/marvell/mvpp2/mvpp2_main.c @@ -4996,6 +4996,14 @@ static int mvpp2_bm_switch_buffers(struct mvpp2 *priv, bool percpu) for (i = 0; i < priv->port_count; i++) { port = priv->port_list[i]; + if (percpu && port->ntxqs >= num_possible_cpus() * 2) + xdp_set_features_flag(port->dev, + NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_NDO_XMIT); + else + xdp_clear_features_flag(port->dev); + mvpp2_swf_bm_pool_init(port); if (status[i]) mvpp2_open(port->dev); @@ -6863,13 +6871,14 @@ static int mvpp2_port_probe(struct platform_device *pdev, if (!port->priv->percpu_pools) mvpp2_set_hw_csum(port, port->pool_long->id); + else if (port->ntxqs >= num_possible_cpus() * 2) + dev->xdp_features = NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_NDO_XMIT; dev->vlan_features |= features; netif_set_tso_max_segs(dev, MVPP2_MAX_TSO_SEGS); - dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | - NETDEV_XDP_ACT_NDO_XMIT; - dev->priv_flags |= IFF_UNICAST_FLT; /* MTU range: 68 - 9704 */ diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.h b/drivers/net/ethernet/mediatek/mtk_eth_soc.h index b65de174c3d9..084a6badef6d 100644 --- a/drivers/net/ethernet/mediatek/mtk_eth_soc.h +++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.h @@ -542,6 +542,10 @@ #define SGMII_SEND_AN_ERROR_EN BIT(11) #define SGMII_IF_MODE_MASK GENMASK(5, 1) +/* Register to reset SGMII design */ +#define SGMII_RESERVED_0 0x34 +#define SGMII_SW_RESET BIT(0) + /* Register to set SGMII speed, ANA RG_ Control Signals III*/ #define SGMSYS_ANA_RG_CS3 0x2028 #define RG_PHY_SPEED_MASK (BIT(2) | BIT(3)) diff --git a/drivers/net/ethernet/mediatek/mtk_sgmii.c b/drivers/net/ethernet/mediatek/mtk_sgmii.c index bb00de1003ac..83976dc86887 100644 --- a/drivers/net/ethernet/mediatek/mtk_sgmii.c +++ b/drivers/net/ethernet/mediatek/mtk_sgmii.c @@ -38,20 +38,16 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode, const unsigned long *advertising, bool permit_pause_to_mac) { + bool mode_changed = false, changed, use_an; struct mtk_pcs *mpcs = pcs_to_mtk_pcs(pcs); unsigned int rgc3, sgm_mode, bmcr; int advertise, link_timer; - bool changed, use_an; advertise = phylink_mii_c22_pcs_encode_advertisement(interface, advertising); if (advertise < 0) return advertise; - link_timer = phylink_get_link_timer_ns(interface); - if (link_timer < 0) - return link_timer; - /* Clearing IF_MODE_BIT0 switches the PCS to BASE-X mode, and * we assume that fixes it's speed at bitrate = line rate (in * other words, 1000Mbps or 2500Mbps). @@ -77,17 +73,24 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode, } if (use_an) { - /* FIXME: Do we need to set AN_RESTART here? */ - bmcr = SGMII_AN_RESTART | SGMII_AN_ENABLE; + bmcr = SGMII_AN_ENABLE; } else { bmcr = 0; } if (mpcs->interface != interface) { + link_timer = phylink_get_link_timer_ns(interface); + if (link_timer < 0) + return link_timer; + /* PHYA power down */ regmap_update_bits(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, SGMII_PHYA_PWD, SGMII_PHYA_PWD); + /* Reset SGMII PCS state */ + regmap_update_bits(mpcs->regmap, SGMII_RESERVED_0, + SGMII_SW_RESET, SGMII_SW_RESET); + if (interface == PHY_INTERFACE_MODE_2500BASEX) rgc3 = RG_PHY_SPEED_3_125G; else @@ -97,16 +100,17 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode, regmap_update_bits(mpcs->regmap, mpcs->ana_rgc3, RG_PHY_SPEED_3_125G, rgc3); + /* Setup the link timer */ + regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, link_timer / 2 / 8); + mpcs->interface = interface; + mode_changed = true; } /* Update the advertisement, noting whether it has changed */ regmap_update_bits_check(mpcs->regmap, SGMSYS_PCS_ADVERTISE, SGMII_ADVERTISE, advertise, &changed); - /* Setup the link timer and QPHY power up inside SGMIISYS */ - regmap_write(mpcs->regmap, SGMSYS_PCS_LINK_TIMER, link_timer / 2 / 8); - /* Update the sgmsys mode register */ regmap_update_bits(mpcs->regmap, SGMSYS_SGMII_MODE, SGMII_REMOTE_FAULT_DIS | SGMII_SPEED_DUPLEX_AN | @@ -114,7 +118,7 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode, /* Update the BMCR */ regmap_update_bits(mpcs->regmap, SGMSYS_PCS_CONTROL_1, - SGMII_AN_RESTART | SGMII_AN_ENABLE, bmcr); + SGMII_AN_ENABLE, bmcr); /* Release PHYA power down state * Only removing bit SGMII_PHYA_PWD isn't enough. @@ -128,7 +132,7 @@ static int mtk_pcs_config(struct phylink_pcs *pcs, unsigned int mode, usleep_range(50, 100); regmap_write(mpcs->regmap, SGMSYS_QPHY_PWR_STATE_CTRL, 0); - return changed; + return changed || mode_changed; } static void mtk_pcs_restart_an(struct phylink_pcs *pcs) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 88460b7796e5..4a19ef4a9811 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -313,7 +313,6 @@ struct mlx5e_params { } channel; } mqprio; bool rx_cqe_compress_def; - bool tunneled_offload_en; struct dim_cq_moder rx_cq_moderation; struct dim_cq_moder tx_cq_moderation; struct mlx5e_packet_merge_param packet_merge; @@ -1243,6 +1242,7 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 void mlx5e_rx_dim_work(struct work_struct *work); void mlx5e_tx_dim_work(struct work_struct *work); +void mlx5e_set_xdp_feature(struct net_device *netdev); netdev_features_t mlx5e_features_check(struct sk_buff *skb, struct net_device *netdev, netdev_features_t features); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c index c4378afdec09..1bd1c94fb977 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act/police.c @@ -178,7 +178,6 @@ tc_act_police_stats(struct mlx5e_priv *priv, meter = mlx5e_tc_meter_get(priv->mdev, ¶ms); if (IS_ERR(meter)) { NL_SET_ERR_MSG_MOD(fl_act->extack, "Failed to get flow meter"); - mlx5_core_err(priv->mdev, "Failed to get flow meter %d\n", params.index); return PTR_ERR(meter); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c index 626cb7470fa5..07c1895a2b23 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc/act_stats.c @@ -64,6 +64,7 @@ mlx5e_tc_act_stats_add(struct mlx5e_tc_act_stats_handle *handle, { struct mlx5e_tc_act_stats *act_stats, *old_act_stats; struct rhashtable *ht = &handle->ht; + u64 lastused; int err = 0; act_stats = kvzalloc(sizeof(*act_stats), GFP_KERNEL); @@ -73,6 +74,10 @@ mlx5e_tc_act_stats_add(struct mlx5e_tc_act_stats_handle *handle, act_stats->tc_act_cookie = act_cookie; act_stats->counter = counter; + mlx5_fc_query_cached_raw(counter, + &act_stats->lastbytes, + &act_stats->lastpackets, &lastused); + rcu_read_lock(); old_act_stats = rhashtable_lookup_get_insert_fast(ht, &act_stats->hash, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index 4be770443b0c..9b597cb24598 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -621,15 +621,6 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, if (unlikely(!priv_rx)) return -ENOMEM; - dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info); - if (IS_ERR(dek)) { - err = PTR_ERR(dek); - goto err_create_key; - } - priv_rx->dek = dek; - - INIT_LIST_HEAD(&priv_rx->list); - spin_lock_init(&priv_rx->lock); switch (crypto_info->cipher_type) { case TLS_CIPHER_AES_GCM_128: priv_rx->crypto_info.crypto_info_128 = @@ -642,9 +633,20 @@ int mlx5e_ktls_add_rx(struct net_device *netdev, struct sock *sk, default: WARN_ONCE(1, "Unsupported cipher type %u\n", crypto_info->cipher_type); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto err_cipher_type; } + dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info); + if (IS_ERR(dek)) { + err = PTR_ERR(dek); + goto err_cipher_type; + } + priv_rx->dek = dek; + + INIT_LIST_HEAD(&priv_rx->list); + spin_lock_init(&priv_rx->lock); + rxq = mlx5e_ktls_sk_get_rxq(sk); priv_rx->rxq = rxq; priv_rx->sk = sk; @@ -677,7 +679,7 @@ err_post_wqes: mlx5e_tir_destroy(&priv_rx->tir); err_create_tir: mlx5_ktls_destroy_key(priv->tls->dek_pool, priv_rx->dek); -err_create_key: +err_cipher_type: kfree(priv_rx); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c index 60b3e08a1028..0e4c0a093293 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_tx.c @@ -469,14 +469,6 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk, if (IS_ERR(priv_tx)) return PTR_ERR(priv_tx); - dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info); - if (IS_ERR(dek)) { - err = PTR_ERR(dek); - goto err_create_key; - } - priv_tx->dek = dek; - - priv_tx->expected_seq = start_offload_tcp_sn; switch (crypto_info->cipher_type) { case TLS_CIPHER_AES_GCM_128: priv_tx->crypto_info.crypto_info_128 = @@ -489,8 +481,18 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk, default: WARN_ONCE(1, "Unsupported cipher type %u\n", crypto_info->cipher_type); - return -EOPNOTSUPP; + err = -EOPNOTSUPP; + goto err_pool_push; } + + dek = mlx5_ktls_create_key(priv->tls->dek_pool, crypto_info); + if (IS_ERR(dek)) { + err = PTR_ERR(dek); + goto err_pool_push; + } + + priv_tx->dek = dek; + priv_tx->expected_seq = start_offload_tcp_sn; priv_tx->tx_ctx = tls_offload_ctx_tx(tls_ctx); mlx5e_set_ktls_tx_priv_ctx(tls_ctx, priv_tx); @@ -500,7 +502,7 @@ int mlx5e_ktls_add_tx(struct net_device *netdev, struct sock *sk, return 0; -err_create_key: +err_pool_push: pool_push(pool, priv_tx); return err; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c index 08d0929e8260..8af53178e40d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/macsec.c @@ -89,8 +89,8 @@ struct mlx5e_macsec_rx_sc { }; struct mlx5e_macsec_umr { + u8 __aligned(64) ctx[MLX5_ST_SZ_BYTES(macsec_aso)]; dma_addr_t dma_addr; - u8 ctx[MLX5_ST_SZ_BYTES(macsec_aso)]; u32 mkey; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c index 7708acc9b2ab..79fd21ecb9cb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c @@ -1985,6 +1985,7 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) struct mlx5e_priv *priv = netdev_priv(netdev); struct mlx5_core_dev *mdev = priv->mdev; struct mlx5e_params new_params; + int err; if (enable) { /* Checking the regular RQ here; mlx5e_validate_xsk_param called @@ -2005,7 +2006,14 @@ static int set_pflag_rx_striding_rq(struct net_device *netdev, bool enable) MLX5E_SET_PFLAG(&new_params, MLX5E_PFLAG_RX_STRIDING_RQ, enable); mlx5e_set_rq_type(mdev, &new_params); - return mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); + err = mlx5e_safe_switch_params(priv, &new_params, NULL, NULL, true); + if (err) + return err; + + /* update XDP supported features */ + mlx5e_set_xdp_feature(netdev); + + return 0; } static int set_pflag_rx_no_csum_complete(struct net_device *netdev, bool enable) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index 76a9c5194a70..a7f2ab22cc40 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -4004,6 +4004,25 @@ static int mlx5e_handle_feature(struct net_device *netdev, return 0; } +void mlx5e_set_xdp_feature(struct net_device *netdev) +{ + struct mlx5e_priv *priv = netdev_priv(netdev); + struct mlx5e_params *params = &priv->channels.params; + xdp_features_t val; + + if (params->packet_merge.type != MLX5E_PACKET_MERGE_NONE) { + xdp_clear_features_flag(netdev); + return; + } + + val = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_XSK_ZEROCOPY | + NETDEV_XDP_ACT_NDO_XMIT; + if (params->rq_wq_type == MLX5_WQ_TYPE_CYCLIC) + val |= NETDEV_XDP_ACT_RX_SG; + xdp_set_features_flag(netdev, val); +} + int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) { netdev_features_t oper_features = features; @@ -4030,6 +4049,9 @@ int mlx5e_set_features(struct net_device *netdev, netdev_features_t features) return -EINVAL; } + /* update XDP supported features */ + mlx5e_set_xdp_feature(netdev); + return 0; } @@ -4147,13 +4169,17 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev, struct xsk_buff_pool *xsk_pool = mlx5e_xsk_get_pool(&chs->params, chs->params.xsk, ix); struct mlx5e_xsk_param xsk; + int max_xdp_mtu; if (!xsk_pool) continue; mlx5e_build_xsk_param(xsk_pool, &xsk); + max_xdp_mtu = mlx5e_xdp_max_mtu(new_params, &xsk); - if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev)) { + /* Validate XSK params and XDP MTU in advance */ + if (!mlx5e_validate_xsk_param(new_params, &xsk, mdev) || + new_params->sw_mtu > max_xdp_mtu) { u32 hr = mlx5e_get_linear_rq_headroom(new_params, &xsk); int max_mtu_frame, max_mtu_page, max_mtu; @@ -4163,9 +4189,9 @@ static bool mlx5e_xsk_validate_mtu(struct net_device *netdev, */ max_mtu_frame = MLX5E_HW2SW_MTU(new_params, xsk.chunk_size - hr); max_mtu_page = MLX5E_HW2SW_MTU(new_params, SKB_MAX_HEAD(0)); - max_mtu = min(max_mtu_frame, max_mtu_page); + max_mtu = min3(max_mtu_frame, max_mtu_page, max_xdp_mtu); - netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u. Try MTU <= %d\n", + netdev_err(netdev, "MTU %d is too big for an XSK running on channel %u or its redirection XDP program. Try MTU <= %d\n", new_params->sw_mtu, ix, max_mtu); return false; } @@ -4761,13 +4787,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) if (old_prog) bpf_prog_put(old_prog); - if (reset) { - if (prog) - xdp_features_set_redirect_target(netdev, true); - else - xdp_features_clear_redirect_target(netdev); - } - if (!test_bit(MLX5E_STATE_OPENED, &priv->state) || reset) goto unlock; @@ -4964,8 +4983,6 @@ void mlx5e_build_nic_params(struct mlx5e_priv *priv, struct mlx5e_xsk *xsk, u16 /* TX inline */ mlx5_query_min_inline(mdev, ¶ms->tx_min_inline_mode); - params->tunneled_offload_en = mlx5_tunnel_inner_ft_supported(mdev); - /* AF_XDP */ params->xsk = xsk; @@ -5163,13 +5180,10 @@ static void mlx5e_build_nic_netdev(struct net_device *netdev) netdev->features |= NETIF_F_HIGHDMA; netdev->features |= NETIF_F_HW_VLAN_STAG_FILTER; - netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | - NETDEV_XDP_ACT_XSK_ZEROCOPY | - NETDEV_XDP_ACT_RX_SG; - netdev->priv_flags |= IFF_UNICAST_FLT; netif_set_tso_max_size(netdev, GSO_MAX_SIZE); + mlx5e_set_xdp_feature(netdev); mlx5e_set_netdev_dev_addr(netdev); mlx5e_macsec_build_netdev(priv); mlx5e_ipsec_build_netdev(priv); @@ -5241,6 +5255,9 @@ static int mlx5e_nic_init(struct mlx5_core_dev *mdev, mlx5_core_err(mdev, "TLS initialization failed, %d\n", err); mlx5e_health_create_reporters(priv); + /* update XDP supported features */ + mlx5e_set_xdp_feature(netdev); + return 0; } @@ -5270,7 +5287,7 @@ static int mlx5e_init_nic_rx(struct mlx5e_priv *priv) } features = MLX5E_RX_RES_FEATURE_PTP; - if (priv->channels.params.tunneled_offload_en) + if (mlx5_tunnel_inner_ft_supported(mdev)) features |= MLX5E_RX_RES_FEATURE_INNER_FT; err = mlx5e_rx_res_init(priv->rx_res, priv->mdev, features, priv->max_nch, priv->drop_rq.rqn, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index 9b9203443085..8ff654b4e9e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -747,12 +747,14 @@ static void mlx5e_build_rep_params(struct net_device *netdev) /* RQ */ mlx5e_build_rq_params(mdev, params); + /* update XDP supported features */ + mlx5e_set_xdp_feature(netdev); + /* CQ moderation params */ params->rx_dim_enabled = MLX5_CAP_GEN(mdev, cq_moderation); mlx5e_set_rx_cq_mode_params(params, cq_period_mode); params->mqprio.num_tc = 1; - params->tunneled_offload_en = false; if (rep->vport != MLX5_VPORT_UPLINK) params->vlan_strip_disable = true; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 70b8d2dfa751..6bfed633343a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -3752,7 +3752,7 @@ mlx5e_clone_flow_attr_for_post_act(struct mlx5_flow_attr *attr, parse_attr->filter_dev = attr->parse_attr->filter_dev; attr2->action = 0; attr2->counter = NULL; - attr->tc_act_cookies_count = 0; + attr2->tc_act_cookies_count = 0; attr2->flags = 0; attr2->parse_attr = parse_attr; attr2->dest_chain = 0; @@ -4304,6 +4304,7 @@ int mlx5e_set_fwd_to_int_port_actions(struct mlx5e_priv *priv, esw_attr->dest_int_port = dest_int_port; esw_attr->dests[out_index].flags |= MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE; + esw_attr->split_count = out_index; /* Forward to root fdb for matching against the new source vport */ attr->dest_chain = 0; @@ -5304,8 +5305,10 @@ int mlx5e_tc_nic_init(struct mlx5e_priv *priv) mlx5e_tc_debugfs_init(tc, mlx5e_fs_get_debugfs_root(priv->fs)); tc->action_stats_handle = mlx5e_tc_act_stats_create(); - if (IS_ERR(tc->action_stats_handle)) + if (IS_ERR(tc->action_stats_handle)) { + err = PTR_ERR(tc->action_stats_handle); goto err_act_stats; + } return 0; @@ -5440,8 +5443,10 @@ int mlx5e_tc_esw_init(struct mlx5_rep_uplink_priv *uplink_priv) } uplink_priv->action_stats_handle = mlx5e_tc_act_stats_create(); - if (IS_ERR(uplink_priv->action_stats_handle)) + if (IS_ERR(uplink_priv->action_stats_handle)) { + err = PTR_ERR(uplink_priv->action_stats_handle); goto err_action_counter; + } return 0; @@ -5463,6 +5468,16 @@ err_tun_mapping: void mlx5e_tc_esw_cleanup(struct mlx5_rep_uplink_priv *uplink_priv) { + struct mlx5e_rep_priv *rpriv; + struct mlx5_eswitch *esw; + struct mlx5e_priv *priv; + + rpriv = container_of(uplink_priv, struct mlx5e_rep_priv, uplink_priv); + priv = netdev_priv(rpriv->netdev); + esw = priv->mdev->priv.eswitch; + + mlx5e_tc_clean_fdb_peer_flows(esw); + mlx5e_tc_tun_cleanup(uplink_priv->encap); mapping_destroy(uplink_priv->tunnel_enc_opts_mapping); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index d766a64b1823..22075943bb58 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -723,11 +723,11 @@ mlx5_eswitch_add_fwd_rule(struct mlx5_eswitch *esw, flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; for (i = 0; i < esw_attr->split_count; i++) { - if (esw_is_indir_table(esw, attr)) - err = esw_setup_indir_table(dest, &flow_act, esw, attr, false, &i); - else if (esw_is_chain_src_port_rewrite(esw, esw_attr)) - err = esw_setup_chain_src_port_rewrite(dest, &flow_act, esw, chains, attr, - &i); + if (esw_attr->dests[i].flags & MLX5_ESW_DEST_CHAIN_WITH_SRC_PORT_CHANGE) + /* Source port rewrite (forward to ovs internal port or statck device) isn't + * supported in the rule of split action. + */ + err = -EOPNOTSUPP; else esw_setup_vport_dest(dest, &flow_act, esw, esw_attr, i, i, false); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c index c2a4f86bc890..baa7ef812313 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/ipoib/ipoib.c @@ -70,7 +70,6 @@ static void mlx5i_build_nic_params(struct mlx5_core_dev *mdev, params->packet_merge.type = MLX5E_PACKET_MERGE_NONE; params->hard_mtu = MLX5_IB_GRH_BYTES + MLX5_IPOIB_HARD_LEN; - params->tunneled_offload_en = false; /* CQE compression is not supported for IPoIB */ params->rx_cqe_compress_def = false; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index 540840e80493..f1de152a6113 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c @@ -1364,8 +1364,8 @@ static void mlx5_unload(struct mlx5_core_dev *dev) { mlx5_devlink_traps_unregister(priv_to_devlink(dev)); mlx5_sf_dev_table_destroy(dev); - mlx5_sriov_detach(dev); mlx5_eswitch_disable(dev->priv.eswitch); + mlx5_sriov_detach(dev); mlx5_lag_remove_mdev(dev); mlx5_ec_cleanup(dev); mlx5_sf_hw_table_destroy(dev); @@ -1789,11 +1789,11 @@ static void remove_one(struct pci_dev *pdev) struct mlx5_core_dev *dev = pci_get_drvdata(pdev); struct devlink *devlink = priv_to_devlink(dev); + set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state); /* mlx5_drain_fw_reset() is using devlink APIs. Hence, we must drain * fw_reset before unregistering the devlink. */ mlx5_drain_fw_reset(dev); - set_bit(MLX5_BREAK_FW_WAIT, &dev->intf_state); devlink_unregister(devlink); mlx5_sriov_disable(pdev); mlx5_crdump_disable(dev); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c index 64d4e7125e9b..95dc67fb3001 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c @@ -82,6 +82,16 @@ static u16 func_id_to_type(struct mlx5_core_dev *dev, u16 func_id, bool ec_funct return func_id <= mlx5_core_max_vfs(dev) ? MLX5_VF : MLX5_SF; } +static u32 mlx5_get_ec_function(u32 function) +{ + return function >> 16; +} + +static u32 mlx5_get_func_id(u32 function) +{ + return function & 0xffff; +} + static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function) { struct rb_root *root; @@ -665,20 +675,22 @@ static int optimal_reclaimed_pages(void) } static int mlx5_reclaim_root_pages(struct mlx5_core_dev *dev, - struct rb_root *root, u16 func_id) + struct rb_root *root, u32 function) { u64 recl_pages_to_jiffies = msecs_to_jiffies(mlx5_tout_ms(dev, RECLAIM_PAGES)); unsigned long end = jiffies + recl_pages_to_jiffies; while (!RB_EMPTY_ROOT(root)) { + u32 ec_function = mlx5_get_ec_function(function); + u32 function_id = mlx5_get_func_id(function); int nclaimed; int err; - err = reclaim_pages(dev, func_id, optimal_reclaimed_pages(), - &nclaimed, false, mlx5_core_is_ecpf(dev)); + err = reclaim_pages(dev, function_id, optimal_reclaimed_pages(), + &nclaimed, false, ec_function); if (err) { - mlx5_core_warn(dev, "failed reclaiming pages (%d) for func id 0x%x\n", - err, func_id); + mlx5_core_warn(dev, "reclaim_pages err (%d) func_id=0x%x ec_func=0x%x\n", + err, function_id, ec_function); return err; } diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c index a8f94b7544ee..02a327744a61 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c @@ -2937,6 +2937,7 @@ static int mlxsw_sp_netdevice_event(struct notifier_block *unused, static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp) { + refcount_set(&mlxsw_sp->parsing.parsing_depth_ref, 0); mlxsw_sp->parsing.parsing_depth = MLXSW_SP_DEFAULT_PARSING_DEPTH; mlxsw_sp->parsing.vxlan_udp_dport = MLXSW_SP_DEFAULT_VXLAN_UDP_DPORT; mutex_init(&mlxsw_sp->parsing.lock); @@ -2945,6 +2946,7 @@ static void mlxsw_sp_parsing_init(struct mlxsw_sp *mlxsw_sp) static void mlxsw_sp_parsing_fini(struct mlxsw_sp *mlxsw_sp) { mutex_destroy(&mlxsw_sp->parsing.lock); + WARN_ON_ONCE(refcount_read(&mlxsw_sp->parsing.parsing_depth_ref)); } struct mlxsw_sp_ipv6_addr_node { diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c index 09e32778b012..4a73e2fe95ef 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c @@ -10381,11 +10381,23 @@ err_reg_write: old_inc_parsing_depth); return err; } + +static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp) +{ + bool old_inc_parsing_depth = mlxsw_sp->router->inc_parsing_depth; + + mlxsw_sp_mp_hash_parsing_depth_adjust(mlxsw_sp, old_inc_parsing_depth, + false); +} #else static int mlxsw_sp_mp_hash_init(struct mlxsw_sp *mlxsw_sp) { return 0; } + +static void mlxsw_sp_mp_hash_fini(struct mlxsw_sp *mlxsw_sp) +{ +} #endif static int mlxsw_sp_dscp_init(struct mlxsw_sp *mlxsw_sp) @@ -10615,6 +10627,7 @@ err_register_inet6addr_notifier: err_register_inetaddr_notifier: mlxsw_core_flush_owq(); err_dscp_init: + mlxsw_sp_mp_hash_fini(mlxsw_sp); err_mp_hash_init: mlxsw_sp_neigh_fini(mlxsw_sp); err_neigh_init: @@ -10655,6 +10668,7 @@ void mlxsw_sp_router_fini(struct mlxsw_sp *mlxsw_sp) unregister_inet6addr_notifier(&mlxsw_sp->router->inet6addr_nb); unregister_inetaddr_notifier(&mlxsw_sp->router->inetaddr_nb); mlxsw_core_flush_owq(); + mlxsw_sp_mp_hash_fini(mlxsw_sp); mlxsw_sp_neigh_fini(mlxsw_sp); mlxsw_sp_lb_rif_fini(mlxsw_sp); mlxsw_sp_vrs_fini(mlxsw_sp); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index d61cd32ec3b6..86a93cac2647 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -5083,6 +5083,11 @@ static int qed_init_wfq_param(struct qed_hwfn *p_hwfn, num_vports = p_hwfn->qm_info.num_vports; + if (num_vports < 2) { + DP_NOTICE(p_hwfn, "Unexpected num_vports: %d\n", num_vports); + return -EINVAL; + } + /* Accounting for the vports which are configured for WFQ explicitly */ for (i = 0; i < num_vports; i++) { u32 tmp_speed; diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c index 6190adf965bc..f55eed092f25 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c +++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c @@ -422,7 +422,7 @@ qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time, if (p_time->hour > 23) p_time->hour = 0; if (p_time->min > 59) - p_time->hour = 0; + p_time->min = 0; if (p_time->msec > 999) p_time->msec = 0; if (p_time->usec > 999) diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c index 0f54849a3823..894e2690c643 100644 --- a/drivers/net/ethernet/renesas/ravb_main.c +++ b/drivers/net/ethernet/renesas/ravb_main.c @@ -1455,8 +1455,6 @@ static int ravb_phy_init(struct net_device *ndev) phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_100baseT_Half_BIT); } - /* Indicate that the MAC is responsible for managing PHY PM */ - phydev->mac_managed_pm = true; phy_attached_info(phydev); return 0; @@ -2379,6 +2377,8 @@ static int ravb_mdio_init(struct ravb_private *priv) { struct platform_device *pdev = priv->pdev; struct device *dev = &pdev->dev; + struct phy_device *phydev; + struct device_node *pn; int error; /* Bitbang init */ @@ -2400,6 +2400,14 @@ static int ravb_mdio_init(struct ravb_private *priv) if (error) goto out_free_bus; + pn = of_parse_phandle(dev->of_node, "phy-handle", 0); + phydev = of_phy_find_device(pn); + if (phydev) { + phydev->mac_managed_pm = true; + put_device(&phydev->mdio.dev); + } + of_node_put(pn); + return 0; out_free_bus: diff --git a/drivers/net/ethernet/renesas/rswitch.c b/drivers/net/ethernet/renesas/rswitch.c index 853394e5bb8b..c4f93d24c6a4 100644 --- a/drivers/net/ethernet/renesas/rswitch.c +++ b/drivers/net/ethernet/renesas/rswitch.c @@ -702,13 +702,14 @@ static bool rswitch_rx(struct net_device *ndev, int *quota) u16 pkt_len; u32 get_ts; + if (*quota <= 0) + return true; + boguscnt = min_t(int, gq->ring_size, *quota); limit = boguscnt; desc = &gq->rx_ring[gq->cur]; while ((desc->desc.die_dt & DT_MASK) != DT_FEMPTY) { - if (--boguscnt < 0) - break; dma_rmb(); pkt_len = le16_to_cpu(desc->desc.info_ds) & RX_DS; skb = gq->skbs[gq->cur]; @@ -734,6 +735,9 @@ static bool rswitch_rx(struct net_device *ndev, int *quota) gq->cur = rswitch_next_queue_index(gq, true, 1); desc = &gq->rx_ring[gq->cur]; + + if (--boguscnt <= 0) + break; } num = rswitch_get_num_cur_queues(gq); @@ -745,7 +749,7 @@ static bool rswitch_rx(struct net_device *ndev, int *quota) goto err; gq->dirty = rswitch_next_queue_index(gq, false, num); - *quota -= limit - (++boguscnt); + *quota -= limit - boguscnt; return boguscnt <= 0; @@ -1437,7 +1441,10 @@ static int rswitch_open(struct net_device *ndev) rswitch_enadis_data_irq(rdev->priv, rdev->tx_queue->index, true); rswitch_enadis_data_irq(rdev->priv, rdev->rx_queue->index, true); - iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); + if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) + iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDIE); + + bitmap_set(rdev->priv->opened_ports, rdev->port, 1); return 0; }; @@ -1448,8 +1455,10 @@ static int rswitch_stop(struct net_device *ndev) struct rswitch_gwca_ts_info *ts_info, *ts_info2; netif_tx_stop_all_queues(ndev); + bitmap_clear(rdev->priv->opened_ports, rdev->port, 1); - iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); + if (bitmap_empty(rdev->priv->opened_ports, RSWITCH_NUM_PORTS)) + iowrite32(GWCA_TS_IRQ_BIT, rdev->priv->addr + GWTSDID); list_for_each_entry_safe(ts_info, ts_info2, &rdev->priv->gwca.ts_info_list, list) { if (ts_info->port != rdev->port) diff --git a/drivers/net/ethernet/renesas/rswitch.h b/drivers/net/ethernet/renesas/rswitch.h index 27d3d38c055f..b3e0411b408e 100644 --- a/drivers/net/ethernet/renesas/rswitch.h +++ b/drivers/net/ethernet/renesas/rswitch.h @@ -998,6 +998,7 @@ struct rswitch_private { struct rcar_gen4_ptp_private *ptp_priv; struct rswitch_device *rdev[RSWITCH_NUM_PORTS]; + DECLARE_BITMAP(opened_ports, RSWITCH_NUM_PORTS); struct rswitch_gwca gwca; struct rswitch_etha etha[RSWITCH_NUM_PORTS]; diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index ed17163d7811..d8ec729825be 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -2029,8 +2029,6 @@ static int sh_eth_phy_init(struct net_device *ndev) if (mdp->cd->register_type != SH_ETH_REG_GIGABIT) phy_set_max_speed(phydev, SPEED_100); - /* Indicate that the MAC is responsible for managing PHY PM */ - phydev->mac_managed_pm = true; phy_attached_info(phydev); return 0; @@ -3097,6 +3095,8 @@ static int sh_mdio_init(struct sh_eth_private *mdp, struct bb_info *bitbang; struct platform_device *pdev = mdp->pdev; struct device *dev = &mdp->pdev->dev; + struct phy_device *phydev; + struct device_node *pn; /* create bit control struct for PHY */ bitbang = devm_kzalloc(dev, sizeof(struct bb_info), GFP_KERNEL); @@ -3133,6 +3133,14 @@ static int sh_mdio_init(struct sh_eth_private *mdp, if (ret) goto out_free_bus; + pn = of_parse_phandle(dev->of_node, "phy-handle", 0); + phydev = of_phy_find_device(pn); + if (phydev) { + phydev->mac_managed_pm = true; + put_device(&phydev->mdio.dev); + } + of_node_put(pn); + return 0; out_free_bus: diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c index ac8580f501e2..ac550d1ac015 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-imx.c @@ -213,8 +213,7 @@ imx_dwmac_parse_dt(struct imx_priv_data *dwmac, struct device *dev) struct device_node *np = dev->of_node; int err = 0; - if (of_get_property(np, "snps,rmii_refclk_ext", NULL)) - dwmac->rmii_refclk_ext = true; + dwmac->rmii_refclk_ext = of_property_read_bool(np, "snps,rmii_refclk_ext"); dwmac->clk_tx = devm_clk_get(dev, "tx"); if (IS_ERR(dwmac->clk_tx)) { diff --git a/drivers/net/ethernet/sun/ldmvsw.c b/drivers/net/ethernet/sun/ldmvsw.c index 8addee6d04bd..734a817d3c94 100644 --- a/drivers/net/ethernet/sun/ldmvsw.c +++ b/drivers/net/ethernet/sun/ldmvsw.c @@ -287,6 +287,9 @@ static int vsw_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) hp = mdesc_grab(); + if (!hp) + return -ENODEV; + rmac = mdesc_get_property(hp, vdev->mp, remote_macaddr_prop, &len); err = -ENODEV; if (!rmac) { diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c index e6144d963eaa..ab8b09a9ef61 100644 --- a/drivers/net/ethernet/sun/niu.c +++ b/drivers/net/ethernet/sun/niu.c @@ -9271,7 +9271,7 @@ static int niu_get_of_props(struct niu *np) if (model) strcpy(np->vpd.model, model); - if (of_find_property(dp, "hot-swappable-phy", NULL)) { + if (of_property_read_bool(dp, "hot-swappable-phy")) { np->flags |= (NIU_FLAGS_10G | NIU_FLAGS_FIBER | NIU_FLAGS_HOTPLUG_PHY); } diff --git a/drivers/net/ethernet/sun/sunvnet.c b/drivers/net/ethernet/sun/sunvnet.c index fe86fbd58586..e220620d0ffc 100644 --- a/drivers/net/ethernet/sun/sunvnet.c +++ b/drivers/net/ethernet/sun/sunvnet.c @@ -433,6 +433,9 @@ static int vnet_port_probe(struct vio_dev *vdev, const struct vio_device_id *id) hp = mdesc_grab(); + if (!hp) + return -ENODEV; + vp = vnet_find_parent(hp, vdev->mp, vdev); if (IS_ERR(vp)) { pr_err("Cannot find port parent vnet\n"); diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c index e8f38e3f7706..25e707d7b87c 100644 --- a/drivers/net/ethernet/ti/cpsw-phy-sel.c +++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c @@ -226,8 +226,7 @@ static int cpsw_phy_sel_probe(struct platform_device *pdev) if (IS_ERR(priv->gmii_sel)) return PTR_ERR(priv->gmii_sel); - if (of_find_property(pdev->dev.of_node, "rmii-clock-ext", NULL)) - priv->rmii_clock_external = true; + priv->rmii_clock_external = of_property_read_bool(pdev->dev.of_node, "rmii-clock-ext"); dev_set_drvdata(&pdev->dev, priv); diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c index 751fb0bc65c5..2adf82a32bf6 100644 --- a/drivers/net/ethernet/ti/netcp_ethss.c +++ b/drivers/net/ethernet/ti/netcp_ethss.c @@ -3583,13 +3583,11 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev, /* init the hw stats lock */ spin_lock_init(&gbe_dev->hw_stats_lock); - if (of_find_property(node, "enable-ale", NULL)) { - gbe_dev->enable_ale = true; + gbe_dev->enable_ale = of_property_read_bool(node, "enable-ale"); + if (gbe_dev->enable_ale) dev_info(dev, "ALE enabled\n"); - } else { - gbe_dev->enable_ale = false; + else dev_dbg(dev, "ALE bypass enabled*\n"); - } ret = of_property_read_u32(node, "tx-queue", &gbe_dev->tx_queue_id); diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c index a502812ac418..86f7843b4591 100644 --- a/drivers/net/ethernet/via/via-velocity.c +++ b/drivers/net/ethernet/via/via-velocity.c @@ -2709,8 +2709,7 @@ static int velocity_get_platform_info(struct velocity_info *vptr) struct resource res; int ret; - if (of_get_property(vptr->dev->of_node, "no-eeprom", NULL)) - vptr->no_eeprom = 1; + vptr->no_eeprom = of_property_read_bool(vptr->dev->of_node, "no-eeprom"); ret = of_address_to_resource(vptr->dev->of_node, 0, &res); if (ret) { diff --git a/drivers/net/ethernet/via/via-velocity.h b/drivers/net/ethernet/via/via-velocity.h index ffdac6fac054..f64ed39b93d8 100644 --- a/drivers/net/ethernet/via/via-velocity.h +++ b/drivers/net/ethernet/via/via-velocity.h @@ -1383,7 +1383,7 @@ struct velocity_info { struct device *dev; struct pci_dev *pdev; struct net_device *netdev; - int no_eeprom; + bool no_eeprom; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; u8 ip_addr[4]; diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 1066420d6a83..e0ac1bcd9925 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1455,12 +1455,11 @@ static int temac_probe(struct platform_device *pdev) * endianness mode. Default for OF devices is big-endian. */ little_endian = false; - if (temac_np) { - if (of_get_property(temac_np, "little-endian", NULL)) - little_endian = true; - } else if (pdata) { + if (temac_np) + little_endian = of_property_read_bool(temac_np, "little-endian"); + else if (pdata) little_endian = pdata->reg_little_endian; - } + if (little_endian) { lp->temac_ior = _temac_ior_le; lp->temac_iow = _temac_iow_le; diff --git a/drivers/net/ipa/gsi_reg.c b/drivers/net/ipa/gsi_reg.c index 1412b67304c8..1651fbad4bd5 100644 --- a/drivers/net/ipa/gsi_reg.c +++ b/drivers/net/ipa/gsi_reg.c @@ -15,6 +15,14 @@ static bool gsi_reg_id_valid(struct gsi *gsi, enum gsi_reg_id reg_id) switch (reg_id) { case INTER_EE_SRC_CH_IRQ_MSK: case INTER_EE_SRC_EV_CH_IRQ_MSK: + return gsi->version >= IPA_VERSION_3_5; + + case HW_PARAM_2: + return gsi->version >= IPA_VERSION_3_5_1; + + case HW_PARAM_4: + return gsi->version >= IPA_VERSION_5_0; + case CH_C_CNTXT_0: case CH_C_CNTXT_1: case CH_C_CNTXT_2: @@ -43,7 +51,6 @@ static bool gsi_reg_id_valid(struct gsi *gsi, enum gsi_reg_id reg_id) case CH_CMD: case EV_CH_CMD: case GENERIC_CMD: - case HW_PARAM_2: case CNTXT_TYPE_IRQ: case CNTXT_TYPE_IRQ_MSK: case CNTXT_SRC_CH_IRQ: diff --git a/drivers/net/ipa/gsi_reg.h b/drivers/net/ipa/gsi_reg.h index f62f0a5c653d..48fde65fa2e8 100644 --- a/drivers/net/ipa/gsi_reg.h +++ b/drivers/net/ipa/gsi_reg.h @@ -10,6 +10,10 @@ #include <linux/bits.h> +struct platform_device; + +struct gsi; + /** * DOC: GSI Registers * diff --git a/drivers/net/ipa/ipa_reg.c b/drivers/net/ipa/ipa_reg.c index 735fa6591609..3f475428dddd 100644 --- a/drivers/net/ipa/ipa_reg.c +++ b/drivers/net/ipa/ipa_reg.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved. - * Copyright (C) 2019-2022 Linaro Ltd. + * Copyright (C) 2019-2023 Linaro Ltd. */ #include <linux/io.h> @@ -15,6 +15,17 @@ static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id) enum ipa_version version = ipa->version; switch (reg_id) { + case FILT_ROUT_HASH_EN: + return version == IPA_VERSION_4_2; + + case FILT_ROUT_HASH_FLUSH: + return version < IPA_VERSION_5_0 && version != IPA_VERSION_4_2; + + case FILT_ROUT_CACHE_FLUSH: + case ENDP_FILTER_CACHE_CFG: + case ENDP_ROUTER_CACHE_CFG: + return version >= IPA_VERSION_5_0; + case IPA_BCR: case COUNTER_CFG: return version < IPA_VERSION_4_5; @@ -32,14 +43,17 @@ static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id) case SRC_RSRC_GRP_45_RSRC_TYPE: case DST_RSRC_GRP_45_RSRC_TYPE: return version <= IPA_VERSION_3_1 || - version == IPA_VERSION_4_5; + version == IPA_VERSION_4_5 || + version == IPA_VERSION_5_0; case SRC_RSRC_GRP_67_RSRC_TYPE: case DST_RSRC_GRP_67_RSRC_TYPE: - return version <= IPA_VERSION_3_1; + return version <= IPA_VERSION_3_1 || + version == IPA_VERSION_5_0; case ENDP_FILTER_ROUTER_HSH_CFG: - return version != IPA_VERSION_4_2; + return version < IPA_VERSION_5_0 && + version != IPA_VERSION_4_2; case IRQ_SUSPEND_EN: case IRQ_SUSPEND_CLR: @@ -51,10 +65,6 @@ static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id) case SHARED_MEM_SIZE: case QSB_MAX_WRITES: case QSB_MAX_READS: - case FILT_ROUT_HASH_EN: - case FILT_ROUT_CACHE_CFG: - case FILT_ROUT_HASH_FLUSH: - case FILT_ROUT_CACHE_FLUSH: case STATE_AGGR_ACTIVE: case LOCAL_PKT_PROC_CNTXT: case AGGR_FORCE_CLOSE: @@ -76,8 +86,6 @@ static bool ipa_reg_id_valid(struct ipa *ipa, enum ipa_reg_id reg_id) case ENDP_INIT_RSRC_GRP: case ENDP_INIT_SEQ: case ENDP_STATUS: - case ENDP_FILTER_CACHE_CFG: - case ENDP_ROUTER_CACHE_CFG: case IPA_IRQ_STTS: case IPA_IRQ_EN: case IPA_IRQ_CLR: diff --git a/drivers/net/ipa/ipa_reg.h b/drivers/net/ipa/ipa_reg.h index 28aa1351dd48..7dd65d39333d 100644 --- a/drivers/net/ipa/ipa_reg.h +++ b/drivers/net/ipa/ipa_reg.h @@ -60,9 +60,8 @@ enum ipa_reg_id { SHARED_MEM_SIZE, QSB_MAX_WRITES, QSB_MAX_READS, - FILT_ROUT_HASH_EN, /* Not IPA v5.0+ */ - FILT_ROUT_CACHE_CFG, /* IPA v5.0+ */ - FILT_ROUT_HASH_FLUSH, /* Not IPA v5.0+ */ + FILT_ROUT_HASH_EN, /* IPA v4.2 */ + FILT_ROUT_HASH_FLUSH, /* Not IPA v4.2 nor IPA v5.0+ */ FILT_ROUT_CACHE_FLUSH, /* IPA v5.0+ */ STATE_AGGR_ACTIVE, IPA_BCR, /* Not IPA v4.5+ */ @@ -77,12 +76,12 @@ enum ipa_reg_id { TIMERS_PULSE_GRAN_CFG, /* IPA v4.5+ */ SRC_RSRC_GRP_01_RSRC_TYPE, SRC_RSRC_GRP_23_RSRC_TYPE, - SRC_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+, IPA v4.5 */ - SRC_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+ */ + SRC_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+; IPA v4.5, IPA v5.0 */ + SRC_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+; IPA v5.0 */ DST_RSRC_GRP_01_RSRC_TYPE, DST_RSRC_GRP_23_RSRC_TYPE, - DST_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+, IPA v4.5 */ - DST_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+ */ + DST_RSRC_GRP_45_RSRC_TYPE, /* Not IPA v3.5+; IPA v4.5, IPA v5.0 */ + DST_RSRC_GRP_67_RSRC_TYPE, /* Not IPA v3.5+; IPA v5.0 */ ENDP_INIT_CTRL, /* Not IPA v4.2+ for TX, not IPA v4.0+ for RX */ ENDP_INIT_CFG, ENDP_INIT_NAT, /* TX only */ @@ -206,14 +205,6 @@ enum ipa_reg_qsb_max_reads_field_id { GEN_QMB_1_MAX_READS_BEATS, /* IPA v4.0+ */ }; -/* FILT_ROUT_CACHE_CFG register */ -enum ipa_reg_filt_rout_cache_cfg_field_id { - ROUTER_CACHE_EN, - FILTER_CACHE_EN, - LOW_PRI_HASH_HIT_DISABLE, - LRU_EVICTION_THRESHOLD, -}; - /* FILT_ROUT_HASH_EN and FILT_ROUT_HASH_FLUSH registers */ enum ipa_reg_filt_rout_hash_field_id { IPV6_ROUTER_HASH, diff --git a/drivers/net/ipa/reg.h b/drivers/net/ipa/reg.h index 57b457f39b6e..2ee07eebca67 100644 --- a/drivers/net/ipa/reg.h +++ b/drivers/net/ipa/reg.h @@ -6,7 +6,8 @@ #define _REG_H_ #include <linux/types.h> -#include <linux/bits.h> +#include <linux/log2.h> +#include <linux/bug.h> /** * struct reg - A register descriptor diff --git a/drivers/net/ipa/reg/gsi_reg-v4.5.c b/drivers/net/ipa/reg/gsi_reg-v4.5.c index 648b51b88d4e..2900e5c3ff88 100644 --- a/drivers/net/ipa/reg/gsi_reg-v4.5.c +++ b/drivers/net/ipa/reg/gsi_reg-v4.5.c @@ -137,17 +137,17 @@ REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1, 0x0001004c + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0, - 0x0001e000 + 0x4000 * GSI_EE_AP, 0x08); + 0x00011000 + 0x4000 * GSI_EE_AP, 0x08); REG_STRIDE(EV_CH_E_DOORBELL_0, ev_ch_e_doorbell_0, - 0x0001e100 + 0x4000 * GSI_EE_AP, 0x08); + 0x00011100 + 0x4000 * GSI_EE_AP, 0x08); static const u32 reg_gsi_status_fmask[] = { [ENABLED] = BIT(0), /* Bits 1-31 reserved */ }; -REG_FIELDS(GSI_STATUS, gsi_status, 0x0001f000 + 0x4000 * GSI_EE_AP); +REG_FIELDS(GSI_STATUS, gsi_status, 0x00012000 + 0x4000 * GSI_EE_AP); static const u32 reg_ch_cmd_fmask[] = { [CH_CHID] = GENMASK(7, 0), @@ -155,7 +155,7 @@ static const u32 reg_ch_cmd_fmask[] = { [CH_OPCODE] = GENMASK(31, 24), }; -REG_FIELDS(CH_CMD, ch_cmd, 0x0001f008 + 0x4000 * GSI_EE_AP); +REG_FIELDS(CH_CMD, ch_cmd, 0x00012008 + 0x4000 * GSI_EE_AP); static const u32 reg_ev_ch_cmd_fmask[] = { [EV_CHID] = GENMASK(7, 0), @@ -163,7 +163,7 @@ static const u32 reg_ev_ch_cmd_fmask[] = { [EV_OPCODE] = GENMASK(31, 24), }; -REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x0001f010 + 0x4000 * GSI_EE_AP); +REG_FIELDS(EV_CH_CMD, ev_ch_cmd, 0x00012010 + 0x4000 * GSI_EE_AP); static const u32 reg_generic_cmd_fmask[] = { [GENERIC_OPCODE] = GENMASK(4, 0), @@ -172,7 +172,7 @@ static const u32 reg_generic_cmd_fmask[] = { /* Bits 14-31 reserved */ }; -REG_FIELDS(GENERIC_CMD, generic_cmd, 0x0001f018 + 0x4000 * GSI_EE_AP); +REG_FIELDS(GENERIC_CMD, generic_cmd, 0x00012018 + 0x4000 * GSI_EE_AP); static const u32 reg_hw_param_2_fmask[] = { [IRAM_SIZE] = GENMASK(2, 0), @@ -188,58 +188,58 @@ static const u32 reg_hw_param_2_fmask[] = { [GSI_USE_INTER_EE] = BIT(31), }; -REG_FIELDS(HW_PARAM_2, hw_param_2, 0x0001f040 + 0x4000 * GSI_EE_AP); +REG_FIELDS(HW_PARAM_2, hw_param_2, 0x00012040 + 0x4000 * GSI_EE_AP); -REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x0001f080 + 0x4000 * GSI_EE_AP); +REG(CNTXT_TYPE_IRQ, cntxt_type_irq, 0x00012080 + 0x4000 * GSI_EE_AP); -REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x0001f088 + 0x4000 * GSI_EE_AP); +REG(CNTXT_TYPE_IRQ_MSK, cntxt_type_irq_msk, 0x00012088 + 0x4000 * GSI_EE_AP); -REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x0001f090 + 0x4000 * GSI_EE_AP); +REG(CNTXT_SRC_CH_IRQ, cntxt_src_ch_irq, 0x00012090 + 0x4000 * GSI_EE_AP); -REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x0001f094 + 0x4000 * GSI_EE_AP); +REG(CNTXT_SRC_EV_CH_IRQ, cntxt_src_ev_ch_irq, 0x00012094 + 0x4000 * GSI_EE_AP); REG(CNTXT_SRC_CH_IRQ_MSK, cntxt_src_ch_irq_msk, - 0x0001f098 + 0x4000 * GSI_EE_AP); + 0x00012098 + 0x4000 * GSI_EE_AP); REG(CNTXT_SRC_EV_CH_IRQ_MSK, cntxt_src_ev_ch_irq_msk, - 0x0001f09c + 0x4000 * GSI_EE_AP); + 0x0001209c + 0x4000 * GSI_EE_AP); REG(CNTXT_SRC_CH_IRQ_CLR, cntxt_src_ch_irq_clr, - 0x0001f0a0 + 0x4000 * GSI_EE_AP); + 0x000120a0 + 0x4000 * GSI_EE_AP); REG(CNTXT_SRC_EV_CH_IRQ_CLR, cntxt_src_ev_ch_irq_clr, - 0x0001f0a4 + 0x4000 * GSI_EE_AP); + 0x000120a4 + 0x4000 * GSI_EE_AP); -REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x0001f0b0 + 0x4000 * GSI_EE_AP); +REG(CNTXT_SRC_IEOB_IRQ, cntxt_src_ieob_irq, 0x000120b0 + 0x4000 * GSI_EE_AP); REG(CNTXT_SRC_IEOB_IRQ_MSK, cntxt_src_ieob_irq_msk, - 0x0001f0b8 + 0x4000 * GSI_EE_AP); + 0x000120b8 + 0x4000 * GSI_EE_AP); REG(CNTXT_SRC_IEOB_IRQ_CLR, cntxt_src_ieob_irq_clr, - 0x0001f0c0 + 0x4000 * GSI_EE_AP); + 0x000120c0 + 0x4000 * GSI_EE_AP); -REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x0001f100 + 0x4000 * GSI_EE_AP); +REG(CNTXT_GLOB_IRQ_STTS, cntxt_glob_irq_stts, 0x00012100 + 0x4000 * GSI_EE_AP); -REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x0001f108 + 0x4000 * GSI_EE_AP); +REG(CNTXT_GLOB_IRQ_EN, cntxt_glob_irq_en, 0x00012108 + 0x4000 * GSI_EE_AP); -REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x0001f110 + 0x4000 * GSI_EE_AP); +REG(CNTXT_GLOB_IRQ_CLR, cntxt_glob_irq_clr, 0x00012110 + 0x4000 * GSI_EE_AP); -REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x0001f118 + 0x4000 * GSI_EE_AP); +REG(CNTXT_GSI_IRQ_STTS, cntxt_gsi_irq_stts, 0x00012118 + 0x4000 * GSI_EE_AP); -REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x0001f120 + 0x4000 * GSI_EE_AP); +REG(CNTXT_GSI_IRQ_EN, cntxt_gsi_irq_en, 0x00012120 + 0x4000 * GSI_EE_AP); -REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x0001f128 + 0x4000 * GSI_EE_AP); +REG(CNTXT_GSI_IRQ_CLR, cntxt_gsi_irq_clr, 0x00012128 + 0x4000 * GSI_EE_AP); static const u32 reg_cntxt_intset_fmask[] = { [INTYPE] = BIT(0) /* Bits 1-31 reserved */ }; -REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x0001f180 + 0x4000 * GSI_EE_AP); +REG_FIELDS(CNTXT_INTSET, cntxt_intset, 0x00012180 + 0x4000 * GSI_EE_AP); -REG_FIELDS(ERROR_LOG, error_log, 0x0001f200 + 0x4000 * GSI_EE_AP); +REG_FIELDS(ERROR_LOG, error_log, 0x00012200 + 0x4000 * GSI_EE_AP); -REG(ERROR_LOG_CLR, error_log_clr, 0x0001f210 + 0x4000 * GSI_EE_AP); +REG(ERROR_LOG_CLR, error_log_clr, 0x00012210 + 0x4000 * GSI_EE_AP); static const u32 reg_cntxt_scratch_0_fmask[] = { [INTER_EE_RESULT] = GENMASK(2, 0), @@ -248,7 +248,7 @@ static const u32 reg_cntxt_scratch_0_fmask[] = { /* Bits 8-31 reserved */ }; -REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x0001f400 + 0x4000 * GSI_EE_AP); +REG_FIELDS(CNTXT_SCRATCH_0, cntxt_scratch_0, 0x00012400 + 0x4000 * GSI_EE_AP); static const struct reg *reg_array[] = { [INTER_EE_SRC_CH_IRQ_MSK] = ®_inter_ee_src_ch_irq_msk, diff --git a/drivers/net/ipa/reg/gsi_reg-v4.9.c b/drivers/net/ipa/reg/gsi_reg-v4.9.c index 4bf45d264d6b..8b5d95425a76 100644 --- a/drivers/net/ipa/reg/gsi_reg-v4.9.c +++ b/drivers/net/ipa/reg/gsi_reg-v4.9.c @@ -27,7 +27,7 @@ static const u32 reg_ch_c_cntxt_0_fmask[] = { }; REG_STRIDE_FIELDS(CH_C_CNTXT_0, ch_c_cntxt_0, - 0x0001c000 + 0x4000 * GSI_EE_AP, 0x80); + 0x0000f000 + 0x4000 * GSI_EE_AP, 0x80); static const u32 reg_ch_c_cntxt_1_fmask[] = { [CH_R_LENGTH] = GENMASK(19, 0), @@ -35,11 +35,11 @@ static const u32 reg_ch_c_cntxt_1_fmask[] = { }; REG_STRIDE_FIELDS(CH_C_CNTXT_1, ch_c_cntxt_1, - 0x0001c004 + 0x4000 * GSI_EE_AP, 0x80); + 0x0000f004 + 0x4000 * GSI_EE_AP, 0x80); -REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0001c008 + 0x4000 * GSI_EE_AP, 0x80); +REG_STRIDE(CH_C_CNTXT_2, ch_c_cntxt_2, 0x0000f008 + 0x4000 * GSI_EE_AP, 0x80); -REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0001c00c + 0x4000 * GSI_EE_AP, 0x80); +REG_STRIDE(CH_C_CNTXT_3, ch_c_cntxt_3, 0x0000f00c + 0x4000 * GSI_EE_AP, 0x80); static const u32 reg_ch_c_qos_fmask[] = { [WRR_WEIGHT] = GENMASK(3, 0), @@ -53,7 +53,7 @@ static const u32 reg_ch_c_qos_fmask[] = { /* Bits 25-31 reserved */ }; -REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0001c05c + 0x4000 * GSI_EE_AP, 0x80); +REG_STRIDE_FIELDS(CH_C_QOS, ch_c_qos, 0x0000f05c + 0x4000 * GSI_EE_AP, 0x80); static const u32 reg_error_log_fmask[] = { [ERR_ARG3] = GENMASK(3, 0), @@ -67,16 +67,16 @@ static const u32 reg_error_log_fmask[] = { }; REG_STRIDE(CH_C_SCRATCH_0, ch_c_scratch_0, - 0x0001c060 + 0x4000 * GSI_EE_AP, 0x80); + 0x0000f060 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(CH_C_SCRATCH_1, ch_c_scratch_1, - 0x0001c064 + 0x4000 * GSI_EE_AP, 0x80); + 0x0000f064 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(CH_C_SCRATCH_2, ch_c_scratch_2, - 0x0001c068 + 0x4000 * GSI_EE_AP, 0x80); + 0x0000f068 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(CH_C_SCRATCH_3, ch_c_scratch_3, - 0x0001c06c + 0x4000 * GSI_EE_AP, 0x80); + 0x0000f06c + 0x4000 * GSI_EE_AP, 0x80); static const u32 reg_ev_ch_e_cntxt_0_fmask[] = { [EV_CHTYPE] = GENMASK(3, 0), @@ -89,23 +89,23 @@ static const u32 reg_ev_ch_e_cntxt_0_fmask[] = { }; REG_STRIDE_FIELDS(EV_CH_E_CNTXT_0, ev_ch_e_cntxt_0, - 0x0001d000 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010000 + 0x4000 * GSI_EE_AP, 0x80); static const u32 reg_ev_ch_e_cntxt_1_fmask[] = { [R_LENGTH] = GENMASK(15, 0), }; REG_STRIDE_FIELDS(EV_CH_E_CNTXT_1, ev_ch_e_cntxt_1, - 0x0001d004 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010004 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_CNTXT_2, ev_ch_e_cntxt_2, - 0x0001d008 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010008 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_CNTXT_3, ev_ch_e_cntxt_3, - 0x0001d00c + 0x4000 * GSI_EE_AP, 0x80); + 0x0001000c + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_CNTXT_4, ev_ch_e_cntxt_4, - 0x0001d010 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010010 + 0x4000 * GSI_EE_AP, 0x80); static const u32 reg_ev_ch_e_cntxt_8_fmask[] = { [EV_MODT] = GENMASK(15, 0), @@ -114,28 +114,28 @@ static const u32 reg_ev_ch_e_cntxt_8_fmask[] = { }; REG_STRIDE_FIELDS(EV_CH_E_CNTXT_8, ev_ch_e_cntxt_8, - 0x0001d020 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010020 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_CNTXT_9, ev_ch_e_cntxt_9, - 0x0001d024 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010024 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_CNTXT_10, ev_ch_e_cntxt_10, - 0x0001d028 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010028 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_CNTXT_11, ev_ch_e_cntxt_11, - 0x0001d02c + 0x4000 * GSI_EE_AP, 0x80); + 0x0001002c + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_CNTXT_12, ev_ch_e_cntxt_12, - 0x0001d030 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010030 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_CNTXT_13, ev_ch_e_cntxt_13, - 0x0001d034 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010034 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_SCRATCH_0, ev_ch_e_scratch_0, - 0x0001d048 + 0x4000 * GSI_EE_AP, 0x80); + 0x00010048 + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(EV_CH_E_SCRATCH_1, ev_ch_e_scratch_1, - 0x0001d04c + 0x4000 * GSI_EE_AP, 0x80); + 0x0001004c + 0x4000 * GSI_EE_AP, 0x80); REG_STRIDE(CH_C_DOORBELL_0, ch_c_doorbell_0, 0x00011000 + 0x4000 * GSI_EE_AP, 0x08); diff --git a/drivers/net/ipvlan/ipvlan_l3s.c b/drivers/net/ipvlan/ipvlan_l3s.c index 943d26cbf39f..71712ea25403 100644 --- a/drivers/net/ipvlan/ipvlan_l3s.c +++ b/drivers/net/ipvlan/ipvlan_l3s.c @@ -101,6 +101,7 @@ static unsigned int ipvlan_nf_input(void *priv, struct sk_buff *skb, goto out; skb->dev = addr->master->dev; + skb->skb_iif = skb->dev->ifindex; len = skb->len + ETH_HLEN; ipvlan_count_rx(addr->master, len, true, false); out: diff --git a/drivers/net/phy/mscc/mscc_main.c b/drivers/net/phy/mscc/mscc_main.c index 8a13b1ad9a33..62bf99e45af1 100644 --- a/drivers/net/phy/mscc/mscc_main.c +++ b/drivers/net/phy/mscc/mscc_main.c @@ -280,12 +280,9 @@ static int vsc85xx_wol_set(struct phy_device *phydev, u16 pwd[3] = {0, 0, 0}; struct ethtool_wolinfo *wol_conf = wol; - mutex_lock(&phydev->lock); rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2); - if (rc < 0) { - rc = phy_restore_page(phydev, rc, rc); - goto out_unlock; - } + if (rc < 0) + return phy_restore_page(phydev, rc, rc); if (wol->wolopts & WAKE_MAGIC) { /* Store the device address for the magic packet */ @@ -323,7 +320,7 @@ static int vsc85xx_wol_set(struct phy_device *phydev, rc = phy_restore_page(phydev, rc, rc > 0 ? 0 : rc); if (rc < 0) - goto out_unlock; + return rc; if (wol->wolopts & WAKE_MAGIC) { /* Enable the WOL interrupt */ @@ -331,22 +328,19 @@ static int vsc85xx_wol_set(struct phy_device *phydev, reg_val |= MII_VSC85XX_INT_MASK_WOL; rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val); if (rc) - goto out_unlock; + return rc; } else { /* Disable the WOL interrupt */ reg_val = phy_read(phydev, MII_VSC85XX_INT_MASK); reg_val &= (~MII_VSC85XX_INT_MASK_WOL); rc = phy_write(phydev, MII_VSC85XX_INT_MASK, reg_val); if (rc) - goto out_unlock; + return rc; } /* Clear WOL iterrupt status */ reg_val = phy_read(phydev, MII_VSC85XX_INT_STATUS); -out_unlock: - mutex_unlock(&phydev->lock); - - return rc; + return 0; } static void vsc85xx_wol_get(struct phy_device *phydev, @@ -358,10 +352,9 @@ static void vsc85xx_wol_get(struct phy_device *phydev, u16 pwd[3] = {0, 0, 0}; struct ethtool_wolinfo *wol_conf = wol; - mutex_lock(&phydev->lock); rc = phy_select_page(phydev, MSCC_PHY_PAGE_EXTENDED_2); if (rc < 0) - goto out_unlock; + goto out_restore_page; reg_val = __phy_read(phydev, MSCC_PHY_WOL_MAC_CONTROL); if (reg_val & SECURE_ON_ENABLE) @@ -377,9 +370,8 @@ static void vsc85xx_wol_get(struct phy_device *phydev, } } -out_unlock: +out_restore_page: phy_restore_page(phydev, rc, rc > 0 ? 0 : rc); - mutex_unlock(&phydev->lock); } #if IS_ENABLED(CONFIG_OF_MDIO) diff --git a/drivers/net/phy/nxp-c45-tja11xx.c b/drivers/net/phy/nxp-c45-tja11xx.c index 047c581457e3..5813b07242ce 100644 --- a/drivers/net/phy/nxp-c45-tja11xx.c +++ b/drivers/net/phy/nxp-c45-tja11xx.c @@ -79,7 +79,7 @@ #define SGMII_ABILITY BIT(0) #define VEND1_MII_BASIC_CONFIG 0xAFC6 -#define MII_BASIC_CONFIG_REV BIT(8) +#define MII_BASIC_CONFIG_REV BIT(4) #define MII_BASIC_CONFIG_SGMII 0x9 #define MII_BASIC_CONFIG_RGMII 0x7 #define MII_BASIC_CONFIG_RMII 0x5 diff --git a/drivers/net/phy/smsc.c b/drivers/net/phy/smsc.c index 00d9eff91dcf..df2c5435c5c4 100644 --- a/drivers/net/phy/smsc.c +++ b/drivers/net/phy/smsc.c @@ -199,8 +199,11 @@ static int lan95xx_config_aneg_ext(struct phy_device *phydev) static int lan87xx_read_status(struct phy_device *phydev) { struct smsc_phy_priv *priv = phydev->priv; + int err; - int err = genphy_read_status(phydev); + err = genphy_read_status(phydev); + if (err) + return err; if (!phydev->link && priv->energy_enable && phydev->irq == PHY_POLL) { /* Disable EDPD to wake up PHY */ diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 95de452ff4da..5d6454fedb3f 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -2200,6 +2200,13 @@ static int smsc75xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb) size = (rx_cmd_a & RX_CMD_A_LEN) - RXW_PADDING; align_count = (4 - ((size + RXW_PADDING) % 4)) % 4; + if (unlikely(size > skb->len)) { + netif_dbg(dev, rx_err, dev->net, + "size err rx_cmd_a=0x%08x\n", + rx_cmd_a); + return 0; + } + if (unlikely(rx_cmd_a & RX_CMD_A_RED)) { netif_dbg(dev, rx_err, dev->net, "Error rx_cmd_a=0x%08x\n", rx_cmd_a); diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 1bb54de7124d..a30a66cace14 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -708,7 +708,8 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, u32 frame_sz; if (skb_shared(skb) || skb_head_is_locked(skb) || - skb_shinfo(skb)->nr_frags) { + skb_shinfo(skb)->nr_frags || + skb_headroom(skb) < XDP_PACKET_HEADROOM) { u32 size, len, max_head_size, off; struct sk_buff *nskb; struct page *page; @@ -773,9 +774,6 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, consume_skb(skb); skb = nskb; - } else if (skb_headroom(skb) < XDP_PACKET_HEADROOM && - pskb_expand_head(skb, VETH_XDP_HEADROOM, 0, GFP_ATOMIC)) { - goto drop; } /* SKB "head" area always have tailroom for skb_shared_info */ @@ -1257,6 +1255,26 @@ static int veth_enable_range_safe(struct net_device *dev, int start, int end) return 0; } +static void veth_set_xdp_features(struct net_device *dev) +{ + struct veth_priv *priv = netdev_priv(dev); + struct net_device *peer; + + peer = rtnl_dereference(priv->peer); + if (peer && peer->real_num_tx_queues <= dev->real_num_rx_queues) { + xdp_features_t val = NETDEV_XDP_ACT_BASIC | + NETDEV_XDP_ACT_REDIRECT | + NETDEV_XDP_ACT_RX_SG; + + if (priv->_xdp_prog || veth_gro_requested(dev)) + val |= NETDEV_XDP_ACT_NDO_XMIT | + NETDEV_XDP_ACT_NDO_XMIT_SG; + xdp_set_features_flag(dev, val); + } else { + xdp_clear_features_flag(dev); + } +} + static int veth_set_channels(struct net_device *dev, struct ethtool_channels *ch) { @@ -1323,6 +1341,12 @@ out: if (peer) netif_carrier_on(peer); } + + /* update XDP supported features */ + veth_set_xdp_features(dev); + if (peer) + veth_set_xdp_features(peer); + return err; revert: @@ -1489,7 +1513,10 @@ static int veth_set_features(struct net_device *dev, err = veth_napi_enable(dev); if (err) return err; + + xdp_features_set_redirect_target(dev, true); } else { + xdp_features_clear_redirect_target(dev); veth_napi_del(dev); } return 0; @@ -1570,10 +1597,15 @@ static int veth_xdp_set(struct net_device *dev, struct bpf_prog *prog, peer->hw_features &= ~NETIF_F_GSO_SOFTWARE; peer->max_mtu = max_mtu; } + + xdp_features_set_redirect_target(dev, true); } if (old_prog) { if (!prog) { + if (!veth_gro_requested(dev)) + xdp_features_clear_redirect_target(dev); + if (dev->flags & IFF_UP) veth_disable_xdp(dev); @@ -1686,10 +1718,6 @@ static void veth_setup(struct net_device *dev) dev->hw_enc_features = VETH_FEATURES; dev->mpls_features = NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE; netif_set_tso_max_size(dev, GSO_MAX_SIZE); - - dev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | - NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_RX_SG | - NETDEV_XDP_ACT_NDO_XMIT_SG; } /* @@ -1857,6 +1885,10 @@ static int veth_newlink(struct net *src_net, struct net_device *dev, goto err_queues; veth_disable_gro(dev); + /* update XDP supported features */ + veth_set_xdp_features(dev); + veth_set_xdp_features(peer); + return 0; err_queues: diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index fb5e68ed3ec2..2396c28c0122 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -446,7 +446,8 @@ static unsigned int mergeable_ctx_to_truesize(void *mrg_ctx) static struct sk_buff *page_to_skb(struct virtnet_info *vi, struct receive_queue *rq, struct page *page, unsigned int offset, - unsigned int len, unsigned int truesize) + unsigned int len, unsigned int truesize, + unsigned int headroom) { struct sk_buff *skb; struct virtio_net_hdr_mrg_rxbuf *hdr; @@ -464,11 +465,11 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi, else hdr_padded_len = sizeof(struct padded_vnet_hdr); - buf = p; + buf = p - headroom; len -= hdr_len; offset += hdr_padded_len; p += hdr_padded_len; - tailroom = truesize - hdr_padded_len - len; + tailroom = truesize - headroom - hdr_padded_len - len; shinfo_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); @@ -545,6 +546,87 @@ ok: return skb; } +static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) +{ + unsigned int len; + unsigned int packets = 0; + unsigned int bytes = 0; + void *ptr; + + while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { + if (likely(!is_xdp_frame(ptr))) { + struct sk_buff *skb = ptr; + + pr_debug("Sent skb %p\n", skb); + + bytes += skb->len; + napi_consume_skb(skb, in_napi); + } else { + struct xdp_frame *frame = ptr_to_xdp(ptr); + + bytes += xdp_get_frame_len(frame); + xdp_return_frame(frame); + } + packets++; + } + + /* Avoid overhead when no packets have been processed + * happens when called speculatively from start_xmit. + */ + if (!packets) + return; + + u64_stats_update_begin(&sq->stats.syncp); + sq->stats.bytes += bytes; + sq->stats.packets += packets; + u64_stats_update_end(&sq->stats.syncp); +} + +static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) +{ + if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) + return false; + else if (q < vi->curr_queue_pairs) + return true; + else + return false; +} + +static void check_sq_full_and_disable(struct virtnet_info *vi, + struct net_device *dev, + struct send_queue *sq) +{ + bool use_napi = sq->napi.weight; + int qnum; + + qnum = sq - vi->sq; + + /* If running out of space, stop queue to avoid getting packets that we + * are then unable to transmit. + * An alternative would be to force queuing layer to requeue the skb by + * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be + * returned in a normal path of operation: it means that driver is not + * maintaining the TX queue stop/start state properly, and causes + * the stack to do a non-trivial amount of useless work. + * Since most packets only take 1 or 2 ring slots, stopping the queue + * early means 16 slots are typically wasted. + */ + if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { + netif_stop_subqueue(dev, qnum); + if (use_napi) { + if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) + virtqueue_napi_schedule(&sq->napi, sq->vq); + } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { + /* More just got used, free them then recheck. */ + free_old_xmit_skbs(sq, false); + if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { + netif_start_subqueue(dev, qnum); + virtqueue_disable_cb(sq->vq); + } + } + } +} + static int __virtnet_xdp_xmit_one(struct virtnet_info *vi, struct send_queue *sq, struct xdp_frame *xdpf) @@ -686,6 +768,9 @@ static int virtnet_xdp_xmit(struct net_device *dev, } ret = nxmit; + if (!is_xdp_raw_buffer_queue(vi, sq - vi->sq)) + check_sq_full_and_disable(vi, dev, sq); + if (flags & XDP_XMIT_FLUSH) { if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) kicks = 1; @@ -925,7 +1010,7 @@ static struct sk_buff *receive_big(struct net_device *dev, { struct page *page = buf; struct sk_buff *skb = - page_to_skb(vi, rq, page, 0, len, PAGE_SIZE); + page_to_skb(vi, rq, page, 0, len, PAGE_SIZE, 0); stats->bytes += len - vi->hdr_len; if (unlikely(!skb)) @@ -1188,9 +1273,12 @@ static struct sk_buff *receive_mergeable(struct net_device *dev, switch (act) { case XDP_PASS: + head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); + if (unlikely(!head_skb)) + goto err_xdp_frags; + if (unlikely(xdp_page != page)) put_page(page); - head_skb = build_skb_from_xdp_buff(dev, vi, &xdp, xdp_frags_truesz); rcu_read_unlock(); return head_skb; case XDP_TX: @@ -1248,7 +1336,7 @@ err_xdp_frags: rcu_read_unlock(); skip_xdp: - head_skb = page_to_skb(vi, rq, page, offset, len, truesize); + head_skb = page_to_skb(vi, rq, page, offset, len, truesize, headroom); curr_skb = head_skb; if (unlikely(!curr_skb)) @@ -1714,52 +1802,6 @@ static int virtnet_receive(struct receive_queue *rq, int budget, return stats.packets; } -static void free_old_xmit_skbs(struct send_queue *sq, bool in_napi) -{ - unsigned int len; - unsigned int packets = 0; - unsigned int bytes = 0; - void *ptr; - - while ((ptr = virtqueue_get_buf(sq->vq, &len)) != NULL) { - if (likely(!is_xdp_frame(ptr))) { - struct sk_buff *skb = ptr; - - pr_debug("Sent skb %p\n", skb); - - bytes += skb->len; - napi_consume_skb(skb, in_napi); - } else { - struct xdp_frame *frame = ptr_to_xdp(ptr); - - bytes += xdp_get_frame_len(frame); - xdp_return_frame(frame); - } - packets++; - } - - /* Avoid overhead when no packets have been processed - * happens when called speculatively from start_xmit. - */ - if (!packets) - return; - - u64_stats_update_begin(&sq->stats.syncp); - sq->stats.bytes += bytes; - sq->stats.packets += packets; - u64_stats_update_end(&sq->stats.syncp); -} - -static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q) -{ - if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs)) - return false; - else if (q < vi->curr_queue_pairs) - return true; - else - return false; -} - static void virtnet_poll_cleantx(struct receive_queue *rq) { struct virtnet_info *vi = rq->vq->vdev->priv; @@ -1989,30 +2031,7 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) nf_reset_ct(skb); } - /* If running out of space, stop queue to avoid getting packets that we - * are then unable to transmit. - * An alternative would be to force queuing layer to requeue the skb by - * returning NETDEV_TX_BUSY. However, NETDEV_TX_BUSY should not be - * returned in a normal path of operation: it means that driver is not - * maintaining the TX queue stop/start state properly, and causes - * the stack to do a non-trivial amount of useless work. - * Since most packets only take 1 or 2 ring slots, stopping the queue - * early means 16 slots are typically wasted. - */ - if (sq->vq->num_free < 2+MAX_SKB_FRAGS) { - netif_stop_subqueue(dev, qnum); - if (use_napi) { - if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) - virtqueue_napi_schedule(&sq->napi, sq->vq); - } else if (unlikely(!virtqueue_enable_cb_delayed(sq->vq))) { - /* More just got used, free them then recheck. */ - free_old_xmit_skbs(sq, false); - if (sq->vq->num_free >= 2+MAX_SKB_FRAGS) { - netif_start_subqueue(dev, qnum); - virtqueue_disable_cb(sq->vq); - } - } - } + check_sq_full_and_disable(vi, dev, sq); if (kick || netif_xmit_stopped(txq)) { if (virtqueue_kick_prepare(sq->vq) && virtqueue_notify(sq->vq)) { diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c index 1c53b5546927..47c2ad7a3e42 100644 --- a/drivers/net/wan/fsl_ucc_hdlc.c +++ b/drivers/net/wan/fsl_ucc_hdlc.c @@ -1177,14 +1177,9 @@ static int ucc_hdlc_probe(struct platform_device *pdev) uhdlc_priv->dev = &pdev->dev; uhdlc_priv->ut_info = ut_info; - if (of_get_property(np, "fsl,tdm-interface", NULL)) - uhdlc_priv->tsa = 1; - - if (of_get_property(np, "fsl,ucc-internal-loopback", NULL)) - uhdlc_priv->loopback = 1; - - if (of_get_property(np, "fsl,hdlc-bus", NULL)) - uhdlc_priv->hdlc_bus = 1; + uhdlc_priv->tsa = of_property_read_bool(np, "fsl,tdm-interface"); + uhdlc_priv->loopback = of_property_read_bool(np, "fsl,ucc-internal-loopback"); + uhdlc_priv->hdlc_bus = of_property_read_bool(np, "fsl,hdlc-bus"); if (uhdlc_priv->tsa == 1) { utdm = kzalloc(sizeof(*utdm), GFP_KERNEL); diff --git a/drivers/net/wireless/ti/wlcore/spi.c b/drivers/net/wireless/ti/wlcore/spi.c index 2d2edddc77bd..3f88e6a0a510 100644 --- a/drivers/net/wireless/ti/wlcore/spi.c +++ b/drivers/net/wireless/ti/wlcore/spi.c @@ -447,8 +447,7 @@ static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue, dev_info(&spi->dev, "selected chip family is %s\n", pdev_data->family->name); - if (of_find_property(dt_node, "clock-xtal", NULL)) - pdev_data->ref_clock_xtal = true; + pdev_data->ref_clock_xtal = of_property_read_bool(dt_node, "clock-xtal"); /* optional clock frequency params */ of_property_read_u32(dt_node, "ref-clock-frequency", diff --git a/drivers/nfc/pn533/usb.c b/drivers/nfc/pn533/usb.c index ed9c5e2cf3ad..a187f0e0b0f7 100644 --- a/drivers/nfc/pn533/usb.c +++ b/drivers/nfc/pn533/usb.c @@ -175,6 +175,7 @@ static int pn533_usb_send_frame(struct pn533 *dev, print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1, out->data, out->len, false); + arg.phy = phy; init_completion(&arg.done); cntx = phy->out_urb->context; phy->out_urb->context = &arg; diff --git a/drivers/nfc/st-nci/ndlc.c b/drivers/nfc/st-nci/ndlc.c index 755460a73c0d..d2aa9f766738 100644 --- a/drivers/nfc/st-nci/ndlc.c +++ b/drivers/nfc/st-nci/ndlc.c @@ -282,13 +282,15 @@ EXPORT_SYMBOL(ndlc_probe); void ndlc_remove(struct llt_ndlc *ndlc) { - st_nci_remove(ndlc->ndev); - /* cancel timers */ del_timer_sync(&ndlc->t1_timer); del_timer_sync(&ndlc->t2_timer); ndlc->t2_active = false; ndlc->t1_active = false; + /* cancel work */ + cancel_work_sync(&ndlc->sm_work); + + st_nci_remove(ndlc->ndev); skb_queue_purge(&ndlc->rcv_q); skb_queue_purge(&ndlc->send_q); diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index c2730b116dc6..d4be525f8100 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -781,16 +781,26 @@ static blk_status_t nvme_setup_discard(struct nvme_ns *ns, struct request *req, range = page_address(ns->ctrl->discard_page); } - __rq_for_each_bio(bio, req) { - u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); - u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; - - if (n < segments) { - range[n].cattr = cpu_to_le32(0); - range[n].nlb = cpu_to_le32(nlb); - range[n].slba = cpu_to_le64(slba); + if (queue_max_discard_segments(req->q) == 1) { + u64 slba = nvme_sect_to_lba(ns, blk_rq_pos(req)); + u32 nlb = blk_rq_sectors(req) >> (ns->lba_shift - 9); + + range[0].cattr = cpu_to_le32(0); + range[0].nlb = cpu_to_le32(nlb); + range[0].slba = cpu_to_le64(slba); + n = 1; + } else { + __rq_for_each_bio(bio, req) { + u64 slba = nvme_sect_to_lba(ns, bio->bi_iter.bi_sector); + u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift; + + if (n < segments) { + range[n].cattr = cpu_to_le32(0); + range[n].nlb = cpu_to_le32(nlb); + range[n].slba = cpu_to_le64(slba); + } + n++; } - n++; } if (WARN_ON_ONCE(n != segments)) { diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c index fc39d01e7b63..9171452e2f6d 100644 --- a/drivers/nvme/host/multipath.c +++ b/drivers/nvme/host/multipath.c @@ -123,9 +123,8 @@ void nvme_mpath_start_request(struct request *rq) return; nvme_req(rq)->flags |= NVME_MPATH_IO_STATS; - nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, - blk_rq_bytes(rq) >> SECTOR_SHIFT, - req_op(rq), jiffies); + nvme_req(rq)->start_time = bdev_start_io_acct(disk->part0, req_op(rq), + jiffies); } EXPORT_SYMBOL_GPL(nvme_mpath_start_request); @@ -136,7 +135,8 @@ void nvme_mpath_end_request(struct request *rq) if (!(nvme_req(rq)->flags & NVME_MPATH_IO_STATS)) return; bdev_end_io_acct(ns->head->disk->part0, req_op(rq), - nvme_req(rq)->start_time); + blk_rq_bytes(rq) >> SECTOR_SHIFT, + nvme_req(rq)->start_time); } void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl) diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 5b95c94ee40f..b615906263f3 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -3073,6 +3073,7 @@ out_dev_unmap: nvme_dev_unmap(dev); out_uninit_ctrl: nvme_uninit_ctrl(&dev->ctrl); + nvme_put_ctrl(&dev->ctrl); return result; } @@ -3415,6 +3416,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_DEVICE(0x2646, 0x501E), /* KINGSTON OM3PGP4xxxxQ OS21011 NVMe SSD */ .driver_data = NVME_QUIRK_DISABLE_WRITE_ZEROES, }, + { PCI_DEVICE(0x1f40, 0x1202), /* Netac Technologies Co. NV3000 NVMe SSD */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1f40, 0x5236), /* Netac Technologies Co. NV7000 NVMe SSD */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1e4B, 0x1001), /* MAXIO MAP1001 */ @@ -3435,6 +3438,8 @@ static const struct pci_device_id nvme_id_table[] = { .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1d97, 0x2263), /* Lexar NM610 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, + { PCI_DEVICE(0x1d97, 0x1d97), /* Lexar NM620 */ + .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(0x1d97, 0x2269), /* Lexar NM760 */ .driver_data = NVME_QUIRK_BOGUS_NID, }, { PCI_DEVICE(PCI_VENDOR_ID_AMAZON, 0x0061), diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 7723a4989524..42c0598c31f2 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -208,6 +208,18 @@ static inline u8 nvme_tcp_ddgst_len(struct nvme_tcp_queue *queue) return queue->data_digest ? NVME_TCP_DIGEST_LENGTH : 0; } +static inline void *nvme_tcp_req_cmd_pdu(struct nvme_tcp_request *req) +{ + return req->pdu; +} + +static inline void *nvme_tcp_req_data_pdu(struct nvme_tcp_request *req) +{ + /* use the pdu space in the back for the data pdu */ + return req->pdu + sizeof(struct nvme_tcp_cmd_pdu) - + sizeof(struct nvme_tcp_data_pdu); +} + static inline size_t nvme_tcp_inline_data_size(struct nvme_tcp_request *req) { if (nvme_is_fabrics(req->req.cmd)) @@ -614,7 +626,7 @@ static int nvme_tcp_handle_comp(struct nvme_tcp_queue *queue, static void nvme_tcp_setup_h2c_data_pdu(struct nvme_tcp_request *req) { - struct nvme_tcp_data_pdu *data = req->pdu; + struct nvme_tcp_data_pdu *data = nvme_tcp_req_data_pdu(req); struct nvme_tcp_queue *queue = req->queue; struct request *rq = blk_mq_rq_from_pdu(req); u32 h2cdata_sent = req->pdu_len; @@ -1038,7 +1050,7 @@ static int nvme_tcp_try_send_data(struct nvme_tcp_request *req) static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) { struct nvme_tcp_queue *queue = req->queue; - struct nvme_tcp_cmd_pdu *pdu = req->pdu; + struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); bool inline_data = nvme_tcp_has_inline_data(req); u8 hdgst = nvme_tcp_hdgst_len(queue); int len = sizeof(*pdu) + hdgst - req->offset; @@ -1077,7 +1089,7 @@ static int nvme_tcp_try_send_cmd_pdu(struct nvme_tcp_request *req) static int nvme_tcp_try_send_data_pdu(struct nvme_tcp_request *req) { struct nvme_tcp_queue *queue = req->queue; - struct nvme_tcp_data_pdu *pdu = req->pdu; + struct nvme_tcp_data_pdu *pdu = nvme_tcp_req_data_pdu(req); u8 hdgst = nvme_tcp_hdgst_len(queue); int len = sizeof(*pdu) - req->offset + hdgst; int ret; @@ -2284,7 +2296,7 @@ static enum blk_eh_timer_return nvme_tcp_timeout(struct request *rq) { struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); struct nvme_ctrl *ctrl = &req->queue->ctrl->ctrl; - struct nvme_tcp_cmd_pdu *pdu = req->pdu; + struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); u8 opc = pdu->cmd.common.opcode, fctype = pdu->cmd.fabrics.fctype; int qid = nvme_tcp_queue_id(req->queue); @@ -2323,7 +2335,7 @@ static blk_status_t nvme_tcp_map_data(struct nvme_tcp_queue *queue, struct request *rq) { struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); - struct nvme_tcp_cmd_pdu *pdu = req->pdu; + struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); struct nvme_command *c = &pdu->cmd; c->common.flags |= NVME_CMD_SGL_METABUF; @@ -2343,7 +2355,7 @@ static blk_status_t nvme_tcp_setup_cmd_pdu(struct nvme_ns *ns, struct request *rq) { struct nvme_tcp_request *req = blk_mq_rq_to_pdu(rq); - struct nvme_tcp_cmd_pdu *pdu = req->pdu; + struct nvme_tcp_cmd_pdu *pdu = nvme_tcp_req_cmd_pdu(req); struct nvme_tcp_queue *queue = req->queue; u8 hdgst = nvme_tcp_hdgst_len(queue), ddgst = 0; blk_status_t ret; @@ -2682,6 +2694,15 @@ static struct nvmf_transport_ops nvme_tcp_transport = { static int __init nvme_tcp_init_module(void) { + BUILD_BUG_ON(sizeof(struct nvme_tcp_hdr) != 8); + BUILD_BUG_ON(sizeof(struct nvme_tcp_cmd_pdu) != 72); + BUILD_BUG_ON(sizeof(struct nvme_tcp_data_pdu) != 24); + BUILD_BUG_ON(sizeof(struct nvme_tcp_rsp_pdu) != 24); + BUILD_BUG_ON(sizeof(struct nvme_tcp_r2t_pdu) != 24); + BUILD_BUG_ON(sizeof(struct nvme_tcp_icreq_pdu) != 128); + BUILD_BUG_ON(sizeof(struct nvme_tcp_icresp_pdu) != 128); + BUILD_BUG_ON(sizeof(struct nvme_tcp_term_pdu) != 24); + nvme_tcp_wq = alloc_workqueue("nvme_tcp_wq", WQ_MEM_RECLAIM | WQ_HIGHPRI, 0); if (!nvme_tcp_wq) diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c index f66ed13d7c11..3935165048e7 100644 --- a/drivers/nvme/target/core.c +++ b/drivers/nvme/target/core.c @@ -756,8 +756,10 @@ static void __nvmet_req_complete(struct nvmet_req *req, u16 status) void nvmet_req_complete(struct nvmet_req *req, u16 status) { + struct nvmet_sq *sq = req->sq; + __nvmet_req_complete(req, status); - percpu_ref_put(&req->sq->ref); + percpu_ref_put(&sq->ref); } EXPORT_SYMBOL_GPL(nvmet_req_complete); diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 83ae838ceb5f..549c4bd5caec 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -76,6 +76,27 @@ struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n) } EXPORT_SYMBOL_GPL(pci_bus_resource_n); +void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res) +{ + struct pci_bus_resource *bus_res, *tmp; + int i; + + for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) { + if (bus->resource[i] == res) { + bus->resource[i] = NULL; + return; + } + } + + list_for_each_entry_safe(bus_res, tmp, &bus->resources, list) { + if (bus_res->res == res) { + list_del(&bus_res->list); + kfree(bus_res); + return; + } + } +} + void pci_bus_remove_resources(struct pci_bus *bus) { int i; diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c index f7f62e56afca..9b6fbbe15d92 100644 --- a/drivers/scsi/hosts.c +++ b/drivers/scsi/hosts.c @@ -341,9 +341,6 @@ static void scsi_host_dev_release(struct device *dev) struct Scsi_Host *shost = dev_to_shost(dev); struct device *parent = dev->parent; - /* In case scsi_remove_host() has not been called. */ - scsi_proc_hostdir_rm(shost->hostt); - /* Wait for functions invoked through call_rcu(&scmd->rcu, ...) */ rcu_barrier(); diff --git a/drivers/scsi/mpi3mr/mpi3mr.h b/drivers/scsi/mpi3mr/mpi3mr.h index 40f238fa80cc..364fb1b5e45a 100644 --- a/drivers/scsi/mpi3mr/mpi3mr.h +++ b/drivers/scsi/mpi3mr/mpi3mr.h @@ -1393,4 +1393,6 @@ void mpi3mr_flush_drv_cmds(struct mpi3mr_ioc *mrioc); void mpi3mr_flush_cmds_for_unrecovered_controller(struct mpi3mr_ioc *mrioc); void mpi3mr_free_enclosure_list(struct mpi3mr_ioc *mrioc); int mpi3mr_process_admin_reply_q(struct mpi3mr_ioc *mrioc); +void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc, + struct mpi3mr_sas_node *sas_expander); #endif /*MPI3MR_H_INCLUDED*/ diff --git a/drivers/scsi/mpi3mr/mpi3mr_fw.c b/drivers/scsi/mpi3mr/mpi3mr_fw.c index 29acf6111db3..a565817aa56d 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_fw.c +++ b/drivers/scsi/mpi3mr/mpi3mr_fw.c @@ -3837,29 +3837,34 @@ retry_init: mpi3mr_print_ioc_info(mrioc); - dprint_init(mrioc, "allocating config page buffers\n"); - mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev, - MPI3MR_DEFAULT_CFG_PAGE_SZ, &mrioc->cfg_page_dma, GFP_KERNEL); if (!mrioc->cfg_page) { - retval = -1; - goto out_failed_noretry; + dprint_init(mrioc, "allocating config page buffers\n"); + mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ; + mrioc->cfg_page = dma_alloc_coherent(&mrioc->pdev->dev, + mrioc->cfg_page_sz, &mrioc->cfg_page_dma, GFP_KERNEL); + if (!mrioc->cfg_page) { + retval = -1; + goto out_failed_noretry; + } } - mrioc->cfg_page_sz = MPI3MR_DEFAULT_CFG_PAGE_SZ; - - retval = mpi3mr_alloc_reply_sense_bufs(mrioc); - if (retval) { - ioc_err(mrioc, - "%s :Failed to allocated reply sense buffers %d\n", - __func__, retval); - goto out_failed_noretry; + if (!mrioc->init_cmds.reply) { + retval = mpi3mr_alloc_reply_sense_bufs(mrioc); + if (retval) { + ioc_err(mrioc, + "%s :Failed to allocated reply sense buffers %d\n", + __func__, retval); + goto out_failed_noretry; + } } - retval = mpi3mr_alloc_chain_bufs(mrioc); - if (retval) { - ioc_err(mrioc, "Failed to allocated chain buffers %d\n", - retval); - goto out_failed_noretry; + if (!mrioc->chain_sgl_list) { + retval = mpi3mr_alloc_chain_bufs(mrioc); + if (retval) { + ioc_err(mrioc, "Failed to allocated chain buffers %d\n", + retval); + goto out_failed_noretry; + } } retval = mpi3mr_issue_iocinit(mrioc); @@ -4382,13 +4387,20 @@ void mpi3mr_free_mem(struct mpi3mr_ioc *mrioc) mrioc->admin_req_base, mrioc->admin_req_dma); mrioc->admin_req_base = NULL; } - + if (mrioc->cfg_page) { + dma_free_coherent(&mrioc->pdev->dev, mrioc->cfg_page_sz, + mrioc->cfg_page, mrioc->cfg_page_dma); + mrioc->cfg_page = NULL; + } if (mrioc->pel_seqnum_virt) { dma_free_coherent(&mrioc->pdev->dev, mrioc->pel_seqnum_sz, mrioc->pel_seqnum_virt, mrioc->pel_seqnum_dma); mrioc->pel_seqnum_virt = NULL; } + kfree(mrioc->throttle_groups); + mrioc->throttle_groups = NULL; + kfree(mrioc->logdata_buf); mrioc->logdata_buf = NULL; diff --git a/drivers/scsi/mpi3mr/mpi3mr_os.c b/drivers/scsi/mpi3mr/mpi3mr_os.c index a794cc8a1c0b..6d55698ea4d1 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_os.c +++ b/drivers/scsi/mpi3mr/mpi3mr_os.c @@ -5078,6 +5078,8 @@ static void mpi3mr_remove(struct pci_dev *pdev) struct workqueue_struct *wq; unsigned long flags; struct mpi3mr_tgt_dev *tgtdev, *tgtdev_next; + struct mpi3mr_hba_port *port, *hba_port_next; + struct mpi3mr_sas_node *sas_expander, *sas_expander_next; if (!shost) return; @@ -5117,6 +5119,28 @@ static void mpi3mr_remove(struct pci_dev *pdev) mpi3mr_free_mem(mrioc); mpi3mr_cleanup_resources(mrioc); + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + list_for_each_entry_safe_reverse(sas_expander, sas_expander_next, + &mrioc->sas_expander_list, list) { + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + mpi3mr_expander_node_remove(mrioc, sas_expander); + spin_lock_irqsave(&mrioc->sas_node_lock, flags); + } + list_for_each_entry_safe(port, hba_port_next, &mrioc->hba_port_table_list, list) { + ioc_info(mrioc, + "removing hba_port entry: %p port: %d from hba_port list\n", + port, port->port_id); + list_del(&port->list); + kfree(port); + } + spin_unlock_irqrestore(&mrioc->sas_node_lock, flags); + + if (mrioc->sas_hba.num_phys) { + kfree(mrioc->sas_hba.phy); + mrioc->sas_hba.phy = NULL; + mrioc->sas_hba.num_phys = 0; + } + spin_lock(&mrioc_list_lock); list_del(&mrioc->list); spin_unlock(&mrioc_list_lock); diff --git a/drivers/scsi/mpi3mr/mpi3mr_transport.c b/drivers/scsi/mpi3mr/mpi3mr_transport.c index be25f242fa79..5748bd9369ff 100644 --- a/drivers/scsi/mpi3mr/mpi3mr_transport.c +++ b/drivers/scsi/mpi3mr/mpi3mr_transport.c @@ -9,9 +9,6 @@ #include "mpi3mr.h" -static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc, - struct mpi3mr_sas_node *sas_expander); - /** * mpi3mr_post_transport_req - Issue transport requests and wait * @mrioc: Adapter instance reference @@ -2164,7 +2161,7 @@ out_fail: * * Return nothing. */ -static void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc, +void mpi3mr_expander_node_remove(struct mpi3mr_ioc *mrioc, struct mpi3mr_sas_node *sas_expander) { struct mpi3mr_sas_port *mr_sas_port, *next; diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c index e5ecd6ada6cd..e8a4750f6ec4 100644 --- a/drivers/scsi/mpt3sas/mpt3sas_transport.c +++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c @@ -785,7 +785,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, goto out_fail; } port = sas_port_alloc_num(sas_node->parent_dev); - if ((sas_port_add(port))) { + if (!port || (sas_port_add(port))) { ioc_err(ioc, "failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); goto out_fail; @@ -824,6 +824,12 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, mpt3sas_port->remote_identify.sas_address; } + if (!rphy) { + ioc_err(ioc, "failure at %s:%d/%s()!\n", + __FILE__, __LINE__, __func__); + goto out_delete_port; + } + rphy->identify = mpt3sas_port->remote_identify; if ((sas_rphy_add(rphy))) { @@ -831,6 +837,7 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, __FILE__, __LINE__, __func__); sas_rphy_free(rphy); rphy = NULL; + goto out_delete_port; } if (mpt3sas_port->remote_identify.device_type == SAS_END_DEVICE) { @@ -857,7 +864,10 @@ mpt3sas_transport_port_add(struct MPT3SAS_ADAPTER *ioc, u16 handle, rphy_to_expander_device(rphy), hba_port->port_id); return mpt3sas_port; - out_fail: +out_delete_port: + sas_port_delete(port); + +out_fail: list_for_each_entry_safe(mpt3sas_phy, next, &mpt3sas_port->phy_list, port_siblings) list_del(&mpt3sas_phy->port_siblings); diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c index 7d2210a006f0..5cce1ba70fc6 100644 --- a/drivers/scsi/scsi.c +++ b/drivers/scsi/scsi.c @@ -326,6 +326,9 @@ static int scsi_get_vpd_size(struct scsi_device *sdev, u8 page) unsigned char vpd_header[SCSI_VPD_HEADER_SIZE] __aligned(4); int result; + if (sdev->no_vpd_size) + return SCSI_DEFAULT_VPD_LEN; + /* * Fetch the VPD page header to find out how big the page * is. This is done to prevent problems on legacy devices diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c index c7080454aea9..bc9d280417f6 100644 --- a/drivers/scsi/scsi_devinfo.c +++ b/drivers/scsi/scsi_devinfo.c @@ -134,7 +134,7 @@ static struct { {"3PARdata", "VV", NULL, BLIST_REPORTLUN2}, {"ADAPTEC", "AACRAID", NULL, BLIST_FORCELUN}, {"ADAPTEC", "Adaptec 5400S", NULL, BLIST_FORCELUN}, - {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES}, + {"AIX", "VDASD", NULL, BLIST_TRY_VPD_PAGES | BLIST_NO_VPD_SIZE}, {"AFT PRO", "-IX CF", "0.0>", BLIST_FORCELUN}, {"BELKIN", "USB 2 HS-CF", "1.95", BLIST_FORCELUN | BLIST_INQUIRY_36}, {"BROWNIE", "1200U3P", NULL, BLIST_NOREPORTLUN}, @@ -188,6 +188,7 @@ static struct { {"HPE", "OPEN-", "*", BLIST_REPORTLUN2 | BLIST_TRY_VPD_PAGES}, {"IBM", "AuSaV1S2", NULL, BLIST_FORCELUN}, {"IBM", "ProFibre 4000R", "*", BLIST_SPARSELUN | BLIST_LARGELUN}, + {"IBM", "2076", NULL, BLIST_NO_VPD_SIZE}, {"IBM", "2105", NULL, BLIST_RETRY_HWERROR}, {"iomega", "jaz 1GB", "J.86", BLIST_NOTQ | BLIST_NOLUN}, {"IOMEGA", "ZIP", NULL, BLIST_NOTQ | BLIST_NOLUN}, diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 4e842d79de31..d217be323cc6 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -1057,6 +1057,9 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, else if (*bflags & BLIST_SKIP_VPD_PAGES) sdev->skip_vpd_pages = 1; + if (*bflags & BLIST_NO_VPD_SIZE) + sdev->no_vpd_size = 1; + transport_configure_device(&sdev->sdev_gendev); if (sdev->host->hostt->slave_configure) { diff --git a/drivers/ufs/core/ufshcd.c b/drivers/ufs/core/ufshcd.c index 05eac965ee27..37e178a9ac47 100644 --- a/drivers/ufs/core/ufshcd.c +++ b/drivers/ufs/core/ufshcd.c @@ -1500,7 +1500,7 @@ start_window: scaling->window_start_t = curr_t; scaling->tot_busy_t = 0; - if (hba->outstanding_reqs) { + if (scaling->active_reqs) { scaling->busy_start_t = curr_t; scaling->is_busy_started = true; } else { @@ -2118,7 +2118,7 @@ static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba) spin_lock_irqsave(hba->host->host_lock, flags); hba->clk_scaling.active_reqs--; - if (!hba->outstanding_reqs && scaling->is_busy_started) { + if (!scaling->active_reqs && scaling->is_busy_started) { scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(), scaling->busy_start_t)); scaling->busy_start_t = 0; diff --git a/drivers/vdpa/mlx5/core/mlx5_vdpa.h b/drivers/vdpa/mlx5/core/mlx5_vdpa.h index 058fbe28107e..25fc4120b618 100644 --- a/drivers/vdpa/mlx5/core/mlx5_vdpa.h +++ b/drivers/vdpa/mlx5/core/mlx5_vdpa.h @@ -96,6 +96,7 @@ struct mlx5_vdpa_dev { struct mlx5_control_vq cvq; struct workqueue_struct *wq; unsigned int group2asid[MLX5_VDPA_NUMVQ_GROUPS]; + bool suspended; }; int mlx5_vdpa_alloc_pd(struct mlx5_vdpa_dev *dev, u32 *pdn, u16 uid); diff --git a/drivers/vdpa/mlx5/net/mlx5_vnet.c b/drivers/vdpa/mlx5/net/mlx5_vnet.c index 3a0e721aef05..520646ae7fa0 100644 --- a/drivers/vdpa/mlx5/net/mlx5_vnet.c +++ b/drivers/vdpa/mlx5/net/mlx5_vnet.c @@ -2438,7 +2438,7 @@ static int mlx5_vdpa_change_map(struct mlx5_vdpa_dev *mvdev, if (err) goto err_mr; - if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) + if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended) goto err_mr; restore_channels_info(ndev); @@ -2606,6 +2606,7 @@ static int mlx5_vdpa_reset(struct vdpa_device *vdev) clear_vqs_ready(ndev); mlx5_vdpa_destroy_mr(&ndev->mvdev); ndev->mvdev.status = 0; + ndev->mvdev.suspended = false; ndev->cur_num_vqs = 0; ndev->mvdev.cvq.received_desc = 0; ndev->mvdev.cvq.completed_desc = 0; @@ -2852,6 +2853,8 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev) struct mlx5_vdpa_virtqueue *mvq; int i; + mlx5_vdpa_info(mvdev, "suspending device\n"); + down_write(&ndev->reslock); ndev->nb_registered = false; mlx5_notifier_unregister(mvdev->mdev, &ndev->nb); @@ -2861,6 +2864,7 @@ static int mlx5_vdpa_suspend(struct vdpa_device *vdev) suspend_vq(ndev, mvq); } mlx5_vdpa_cvq_suspend(mvdev); + mvdev->suspended = true; up_write(&ndev->reslock); return 0; } diff --git a/drivers/vdpa/vdpa_sim/vdpa_sim.c b/drivers/vdpa/vdpa_sim/vdpa_sim.c index 6a0a65814626..eea23c630f7c 100644 --- a/drivers/vdpa/vdpa_sim/vdpa_sim.c +++ b/drivers/vdpa/vdpa_sim/vdpa_sim.c @@ -68,6 +68,17 @@ static void vdpasim_queue_ready(struct vdpasim *vdpasim, unsigned int idx) (uintptr_t)vq->device_addr); vq->vring.last_avail_idx = last_avail_idx; + + /* + * Since vdpa_sim does not support receive inflight descriptors as a + * destination of a migration, let's set both avail_idx and used_idx + * the same at vq start. This is how vhost-user works in a + * VHOST_SET_VRING_BASE call. + * + * Although the simple fix is to set last_used_idx at + * vdpasim_set_vq_state, it would be reset at vdpasim_queue_ready. + */ + vq->vring.last_used_idx = last_avail_idx; vq->vring.notify = vdpasim_vq_notify; } diff --git a/drivers/vdpa/virtio_pci/vp_vdpa.c b/drivers/vdpa/virtio_pci/vp_vdpa.c index 8fe267ca3e76..281287fae89f 100644 --- a/drivers/vdpa/virtio_pci/vp_vdpa.c +++ b/drivers/vdpa/virtio_pci/vp_vdpa.c @@ -645,8 +645,8 @@ static void vp_vdpa_remove(struct pci_dev *pdev) struct virtio_pci_modern_device *mdev = NULL; mdev = vp_vdpa_mgtdev->mdev; - vp_modern_remove(mdev); vdpa_mgmtdev_unregister(&vp_vdpa_mgtdev->mgtdev); + vp_modern_remove(mdev); kfree(vp_vdpa_mgtdev->mgtdev.id_table); kfree(mdev); kfree(vp_vdpa_mgtdev); diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index dc12dbd5b43b..7be9d9d8f01c 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -1169,6 +1169,7 @@ static int vhost_vdpa_alloc_domain(struct vhost_vdpa *v) err_attach: iommu_domain_free(v->domain); + v->domain = NULL; return ret; } @@ -1213,6 +1214,7 @@ static void vhost_vdpa_cleanup(struct vhost_vdpa *v) vhost_vdpa_remove_as(v, asid); } + vhost_vdpa_free_domain(v); vhost_dev_cleanup(&v->vdev); kfree(v->vdev.vqs); } @@ -1285,7 +1287,6 @@ static int vhost_vdpa_release(struct inode *inode, struct file *filep) vhost_vdpa_clean_irq(v); vhost_vdpa_reset(v); vhost_dev_stop(&v->vdev); - vhost_vdpa_free_domain(v); vhost_vdpa_config_put(v); vhost_vdpa_cleanup(v); mutex_unlock(&d->mutex); diff --git a/drivers/video/fbdev/chipsfb.c b/drivers/video/fbdev/chipsfb.c index cc37ec3f8fc1..7799d52a651f 100644 --- a/drivers/video/fbdev/chipsfb.c +++ b/drivers/video/fbdev/chipsfb.c @@ -358,16 +358,21 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) if (rc) return rc; - if (pci_enable_device(dp) < 0) { + rc = pci_enable_device(dp); + if (rc < 0) { dev_err(&dp->dev, "Cannot enable PCI device\n"); goto err_out; } - if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) + if ((dp->resource[0].flags & IORESOURCE_MEM) == 0) { + rc = -ENODEV; goto err_disable; + } addr = pci_resource_start(dp, 0); - if (addr == 0) + if (addr == 0) { + rc = -ENODEV; goto err_disable; + } p = framebuffer_alloc(0, &dp->dev); if (p == NULL) { @@ -417,7 +422,8 @@ static int chipsfb_pci_init(struct pci_dev *dp, const struct pci_device_id *ent) init_chips(p, addr); - if (register_framebuffer(p) < 0) { + rc = register_framebuffer(p); + if (rc < 0) { dev_err(&dp->dev,"C&T 65550 framebuffer failed to register\n"); goto err_unmap; } diff --git a/drivers/video/fbdev/core/fb_defio.c b/drivers/video/fbdev/core/fb_defio.c index aa5f059d0222..274f5d0fa247 100644 --- a/drivers/video/fbdev/core/fb_defio.c +++ b/drivers/video/fbdev/core/fb_defio.c @@ -305,17 +305,18 @@ void fb_deferred_io_open(struct fb_info *info, struct inode *inode, struct file *file) { + struct fb_deferred_io *fbdefio = info->fbdefio; + file->f_mapping->a_ops = &fb_deferred_io_aops; + fbdefio->open_count++; } EXPORT_SYMBOL_GPL(fb_deferred_io_open); -void fb_deferred_io_release(struct fb_info *info) +static void fb_deferred_io_lastclose(struct fb_info *info) { - struct fb_deferred_io *fbdefio = info->fbdefio; struct page *page; int i; - BUG_ON(!fbdefio); cancel_delayed_work_sync(&info->deferred_work); /* clear out the mapping that we setup */ @@ -324,13 +325,21 @@ void fb_deferred_io_release(struct fb_info *info) page->mapping = NULL; } } + +void fb_deferred_io_release(struct fb_info *info) +{ + struct fb_deferred_io *fbdefio = info->fbdefio; + + if (!--fbdefio->open_count) + fb_deferred_io_lastclose(info); +} EXPORT_SYMBOL_GPL(fb_deferred_io_release); void fb_deferred_io_cleanup(struct fb_info *info) { struct fb_deferred_io *fbdefio = info->fbdefio; - fb_deferred_io_release(info); + fb_deferred_io_lastclose(info); kvfree(info->pagerefs); mutex_destroy(&fbdefio->lock); diff --git a/drivers/xen/xenfs/xensyms.c b/drivers/xen/xenfs/xensyms.c index c6c73a33c44d..b799bc759c15 100644 --- a/drivers/xen/xenfs/xensyms.c +++ b/drivers/xen/xenfs/xensyms.c @@ -64,7 +64,7 @@ static int xensyms_next_sym(struct xensyms *xs) static void *xensyms_start(struct seq_file *m, loff_t *pos) { - struct xensyms *xs = (struct xensyms *)m->private; + struct xensyms *xs = m->private; xs->op.u.symdata.symnum = *pos; @@ -76,7 +76,7 @@ static void *xensyms_start(struct seq_file *m, loff_t *pos) static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos) { - struct xensyms *xs = (struct xensyms *)m->private; + struct xensyms *xs = m->private; xs->op.u.symdata.symnum = ++(*pos); @@ -88,7 +88,7 @@ static void *xensyms_next(struct seq_file *m, void *p, loff_t *pos) static int xensyms_show(struct seq_file *m, void *p) { - struct xensyms *xs = (struct xensyms *)m->private; + struct xensyms *xs = m->private; struct xenpf_symdata *symdata = &xs->op.u.symdata; seq_printf(m, "%016llx %c %s\n", symdata->address, @@ -120,7 +120,7 @@ static int xensyms_open(struct inode *inode, struct file *file) return ret; m = file->private_data; - xs = (struct xensyms *)m->private; + xs = m->private; xs->namelen = XEN_KSYM_NAME_LEN + 1; xs->name = kzalloc(xs->namelen, GFP_KERNEL); @@ -138,7 +138,7 @@ static int xensyms_open(struct inode *inode, struct file *file) static int xensyms_release(struct inode *inode, struct file *file) { struct seq_file *m = file->private_data; - struct xensyms *xs = (struct xensyms *)m->private; + struct xensyms *xs = m->private; kfree(xs->name); return seq_release_private(inode, file); diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 1911f7016fa1..19a70a69c760 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c @@ -420,6 +420,11 @@ skip_rdma: from_kuid(&init_user_ns, ses->linux_uid), from_kuid(&init_user_ns, ses->cred_uid)); + if (ses->dfs_root_ses) { + seq_printf(m, "\n\tDFS root session id: 0x%llx", + ses->dfs_root_ses->Suid); + } + spin_lock(&ses->chan_lock); if (CIFS_CHAN_NEEDS_RECONNECT(ses, 0)) seq_puts(m, "\tPrimary channel: DISCONNECTED "); diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 2b1a8d55b4ec..cb40074feb3e 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -179,6 +179,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct path *path) tmp.source = full_path; tmp.leaf_fullpath = NULL; tmp.UNC = tmp.prepath = NULL; + tmp.dfs_root_ses = NULL; rc = smb3_fs_context_dup(ctx, &tmp); if (rc) { diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h index 013a4bd65280..651759192280 100644 --- a/fs/cifs/cifs_fs_sb.h +++ b/fs/cifs/cifs_fs_sb.h @@ -61,8 +61,6 @@ struct cifs_sb_info { /* only used when CIFS_MOUNT_USE_PREFIX_PATH is set */ char *prepath; - /* randomly generated 128-bit number for indexing dfs mount groups in referral cache */ - uuid_t dfs_mount_id; /* * Indicate whether serverino option was turned off later * (cifs_autodisable_serverino) in order to match new mounts. diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index a99883f16d94..08a73dcb7786 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -1233,6 +1233,7 @@ struct cifs_tcon { /* BB add field for back pointer to sb struct(s)? */ #ifdef CONFIG_CIFS_DFS_UPCALL struct list_head ulist; /* cache update list */ + struct list_head dfs_ses_list; #endif struct delayed_work query_interfaces; /* query interfaces workqueue job */ }; @@ -1749,9 +1750,8 @@ struct cifs_mount_ctx { struct TCP_Server_Info *server; struct cifs_ses *ses; struct cifs_tcon *tcon; - struct cifs_ses *root_ses; - uuid_t mount_id; char *origin_fullpath, *leaf_fullpath; + struct list_head dfs_ses_list; }; static inline void free_dfs_info_param(struct dfs_info3_param *param) diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 5233f14f0636..0eceddde7140 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -2229,6 +2229,7 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx) * need to lock before changing something in the session. */ spin_lock(&cifs_tcp_ses_lock); + ses->dfs_root_ses = ctx->dfs_root_ses; list_add(&ses->smb_ses_list, &server->smb_ses_list); spin_unlock(&cifs_tcp_ses_lock); @@ -3407,7 +3408,8 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) bool isdfs; int rc; - uuid_gen(&mnt_ctx.mount_id); + INIT_LIST_HEAD(&mnt_ctx.dfs_ses_list); + rc = dfs_mount_share(&mnt_ctx, &isdfs); if (rc) goto error; @@ -3427,7 +3429,6 @@ int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx) kfree(cifs_sb->prepath); cifs_sb->prepath = ctx->prepath; ctx->prepath = NULL; - uuid_copy(&cifs_sb->dfs_mount_id, &mnt_ctx.mount_id); out: cifs_try_adding_channels(cifs_sb, mnt_ctx.ses); @@ -3439,7 +3440,7 @@ out: return rc; error: - dfs_cache_put_refsrv_sessions(&mnt_ctx.mount_id); + dfs_put_root_smb_sessions(&mnt_ctx.dfs_ses_list); kfree(mnt_ctx.origin_fullpath); kfree(mnt_ctx.leaf_fullpath); cifs_mount_put_conns(&mnt_ctx); @@ -3637,9 +3638,6 @@ cifs_umount(struct cifs_sb_info *cifs_sb) spin_unlock(&cifs_sb->tlink_tree_lock); kfree(cifs_sb->prepath); -#ifdef CONFIG_CIFS_DFS_UPCALL - dfs_cache_put_refsrv_sessions(&cifs_sb->dfs_mount_id); -#endif call_rcu(&cifs_sb->rcu, delayed_free); } diff --git a/fs/cifs/dfs.c b/fs/cifs/dfs.c index b64d20374b9c..c8bda52fa096 100644 --- a/fs/cifs/dfs.c +++ b/fs/cifs/dfs.c @@ -95,25 +95,31 @@ static int get_session(struct cifs_mount_ctx *mnt_ctx, const char *full_path) ctx->leaf_fullpath = (char *)full_path; rc = cifs_mount_get_session(mnt_ctx); ctx->leaf_fullpath = NULL; - if (!rc) { - struct cifs_ses *ses = mnt_ctx->ses; - mutex_lock(&ses->session_mutex); - ses->dfs_root_ses = mnt_ctx->root_ses; - mutex_unlock(&ses->session_mutex); - } return rc; } -static void set_root_ses(struct cifs_mount_ctx *mnt_ctx) +static int get_root_smb_session(struct cifs_mount_ctx *mnt_ctx) { - if (mnt_ctx->ses) { + struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; + struct dfs_root_ses *root_ses; + struct cifs_ses *ses = mnt_ctx->ses; + + if (ses) { + root_ses = kmalloc(sizeof(*root_ses), GFP_KERNEL); + if (!root_ses) + return -ENOMEM; + + INIT_LIST_HEAD(&root_ses->list); + spin_lock(&cifs_tcp_ses_lock); - mnt_ctx->ses->ses_count++; + ses->ses_count++; spin_unlock(&cifs_tcp_ses_lock); - dfs_cache_add_refsrv_session(&mnt_ctx->mount_id, mnt_ctx->ses); + root_ses->ses = ses; + list_add_tail(&root_ses->list, &mnt_ctx->dfs_ses_list); } - mnt_ctx->root_ses = mnt_ctx->ses; + ctx->dfs_root_ses = ses; + return 0; } static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, const char *full_path, @@ -121,7 +127,8 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co { struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; struct dfs_info3_param ref = {}; - int rc; + bool is_refsrv = false; + int rc, rc2; rc = dfs_cache_get_tgt_referral(ref_path + 1, tit, &ref); if (rc) @@ -136,8 +143,7 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co if (rc) goto out; - if (ref.flags & DFSREF_REFERRAL_SERVER) - set_root_ses(mnt_ctx); + is_refsrv = !!(ref.flags & DFSREF_REFERRAL_SERVER); rc = -EREMOTE; if (ref.flags & DFSREF_STORAGE_SERVER) { @@ -146,13 +152,17 @@ static int get_dfs_conn(struct cifs_mount_ctx *mnt_ctx, const char *ref_path, co goto out; /* some servers may not advertise referral capability under ref.flags */ - if (!(ref.flags & DFSREF_REFERRAL_SERVER) && - is_tcon_dfs(mnt_ctx->tcon)) - set_root_ses(mnt_ctx); + is_refsrv |= is_tcon_dfs(mnt_ctx->tcon); rc = cifs_is_path_remote(mnt_ctx); } + if (rc == -EREMOTE && is_refsrv) { + rc2 = get_root_smb_session(mnt_ctx); + if (rc2) + rc = rc2; + } + out: free_dfs_info_param(&ref); return rc; @@ -165,6 +175,7 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx) char *ref_path = NULL, *full_path = NULL; struct dfs_cache_tgt_iterator *tit; struct TCP_Server_Info *server; + struct cifs_tcon *tcon; char *origin_fullpath = NULL; int num_links = 0; int rc; @@ -234,12 +245,22 @@ static int __dfs_mount_share(struct cifs_mount_ctx *mnt_ctx) if (!rc) { server = mnt_ctx->server; + tcon = mnt_ctx->tcon; mutex_lock(&server->refpath_lock); - server->origin_fullpath = origin_fullpath; - server->current_fullpath = server->leaf_fullpath; + if (!server->origin_fullpath) { + server->origin_fullpath = origin_fullpath; + server->current_fullpath = server->leaf_fullpath; + origin_fullpath = NULL; + } mutex_unlock(&server->refpath_lock); - origin_fullpath = NULL; + + if (list_empty(&tcon->dfs_ses_list)) { + list_replace_init(&mnt_ctx->dfs_ses_list, + &tcon->dfs_ses_list); + } else { + dfs_put_root_smb_sessions(&mnt_ctx->dfs_ses_list); + } } out: @@ -260,7 +281,7 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs) rc = get_session(mnt_ctx, NULL); if (rc) return rc; - mnt_ctx->root_ses = mnt_ctx->ses; + ctx->dfs_root_ses = mnt_ctx->ses; /* * If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally * try to get an DFS referral (even cached) to determine whether it is an DFS mount. @@ -280,7 +301,9 @@ int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs) } *isdfs = true; - set_root_ses(mnt_ctx); + rc = get_root_smb_session(mnt_ctx); + if (rc) + return rc; return __dfs_mount_share(mnt_ctx); } diff --git a/fs/cifs/dfs.h b/fs/cifs/dfs.h index 344bea6d8bab..13f26e01f7b9 100644 --- a/fs/cifs/dfs.h +++ b/fs/cifs/dfs.h @@ -10,6 +10,11 @@ #include "fs_context.h" #include "cifs_unicode.h" +struct dfs_root_ses { + struct list_head list; + struct cifs_ses *ses; +}; + int dfs_parse_target_referral(const char *full_path, const struct dfs_info3_param *ref, struct smb3_fs_context *ctx); int dfs_mount_share(struct cifs_mount_ctx *mnt_ctx, bool *isdfs); @@ -22,9 +27,10 @@ static inline char *dfs_get_path(struct cifs_sb_info *cifs_sb, const char *path) static inline int dfs_get_referral(struct cifs_mount_ctx *mnt_ctx, const char *path, struct dfs_info3_param *ref, struct dfs_cache_tgt_list *tl) { + struct smb3_fs_context *ctx = mnt_ctx->fs_ctx; struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb; - return dfs_cache_find(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls, + return dfs_cache_find(mnt_ctx->xid, ctx->dfs_root_ses, cifs_sb->local_nls, cifs_remap(cifs_sb), path, ref, tl); } @@ -43,4 +49,15 @@ static inline char *dfs_get_automount_devname(struct dentry *dentry, void *page) true); } +static inline void dfs_put_root_smb_sessions(struct list_head *head) +{ + struct dfs_root_ses *root, *tmp; + + list_for_each_entry_safe(root, tmp, head, list) { + list_del_init(&root->list); + cifs_put_smb_ses(root->ses); + kfree(root); + } +} + #endif /* _CIFS_DFS_H */ diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c index ac86bd0ebd63..1c59811bfa73 100644 --- a/fs/cifs/dfs_cache.c +++ b/fs/cifs/dfs_cache.c @@ -49,17 +49,6 @@ struct cache_entry { struct cache_dfs_tgt *tgthint; }; -/* List of referral server sessions per dfs mount */ -struct mount_group { - struct list_head list; - uuid_t id; - struct cifs_ses *sessions[CACHE_MAX_ENTRIES]; - int num_sessions; - spinlock_t lock; - struct list_head refresh_list; - struct kref refcount; -}; - static struct kmem_cache *cache_slab __read_mostly; static struct workqueue_struct *dfscache_wq __read_mostly; @@ -76,85 +65,10 @@ static atomic_t cache_count; static struct hlist_head cache_htable[CACHE_HTABLE_SIZE]; static DECLARE_RWSEM(htable_rw_lock); -static LIST_HEAD(mount_group_list); -static DEFINE_MUTEX(mount_group_list_lock); - static void refresh_cache_worker(struct work_struct *work); static DECLARE_DELAYED_WORK(refresh_task, refresh_cache_worker); -static void __mount_group_release(struct mount_group *mg) -{ - int i; - - for (i = 0; i < mg->num_sessions; i++) - cifs_put_smb_ses(mg->sessions[i]); - kfree(mg); -} - -static void mount_group_release(struct kref *kref) -{ - struct mount_group *mg = container_of(kref, struct mount_group, refcount); - - mutex_lock(&mount_group_list_lock); - list_del(&mg->list); - mutex_unlock(&mount_group_list_lock); - __mount_group_release(mg); -} - -static struct mount_group *find_mount_group_locked(const uuid_t *id) -{ - struct mount_group *mg; - - list_for_each_entry(mg, &mount_group_list, list) { - if (uuid_equal(&mg->id, id)) - return mg; - } - return ERR_PTR(-ENOENT); -} - -static struct mount_group *__get_mount_group_locked(const uuid_t *id) -{ - struct mount_group *mg; - - mg = find_mount_group_locked(id); - if (!IS_ERR(mg)) - return mg; - - mg = kmalloc(sizeof(*mg), GFP_KERNEL); - if (!mg) - return ERR_PTR(-ENOMEM); - kref_init(&mg->refcount); - uuid_copy(&mg->id, id); - mg->num_sessions = 0; - spin_lock_init(&mg->lock); - list_add(&mg->list, &mount_group_list); - return mg; -} - -static struct mount_group *get_mount_group(const uuid_t *id) -{ - struct mount_group *mg; - - mutex_lock(&mount_group_list_lock); - mg = __get_mount_group_locked(id); - if (!IS_ERR(mg)) - kref_get(&mg->refcount); - mutex_unlock(&mount_group_list_lock); - - return mg; -} - -static void free_mount_group_list(void) -{ - struct mount_group *mg, *tmp_mg; - - list_for_each_entry_safe(mg, tmp_mg, &mount_group_list, list) { - list_del_init(&mg->list); - __mount_group_release(mg); - } -} - /** * dfs_cache_canonical_path - get a canonical DFS path * @@ -704,7 +618,6 @@ void dfs_cache_destroy(void) { cancel_delayed_work_sync(&refresh_task); unload_nls(cache_cp); - free_mount_group_list(); flush_cache_ents(); kmem_cache_destroy(cache_slab); destroy_workqueue(dfscache_wq); @@ -1111,54 +1024,6 @@ out_unlock: return rc; } -/** - * dfs_cache_add_refsrv_session - add SMB session of referral server - * - * @mount_id: mount group uuid to lookup. - * @ses: reference counted SMB session of referral server. - */ -void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses) -{ - struct mount_group *mg; - - if (WARN_ON_ONCE(!mount_id || uuid_is_null(mount_id) || !ses)) - return; - - mg = get_mount_group(mount_id); - if (WARN_ON_ONCE(IS_ERR(mg))) - return; - - spin_lock(&mg->lock); - if (mg->num_sessions < ARRAY_SIZE(mg->sessions)) - mg->sessions[mg->num_sessions++] = ses; - spin_unlock(&mg->lock); - kref_put(&mg->refcount, mount_group_release); -} - -/** - * dfs_cache_put_refsrv_sessions - put all referral server sessions - * - * Put all SMB sessions from the given mount group id. - * - * @mount_id: mount group uuid to lookup. - */ -void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id) -{ - struct mount_group *mg; - - if (!mount_id || uuid_is_null(mount_id)) - return; - - mutex_lock(&mount_group_list_lock); - mg = find_mount_group_locked(mount_id); - if (IS_ERR(mg)) { - mutex_unlock(&mount_group_list_lock); - return; - } - mutex_unlock(&mount_group_list_lock); - kref_put(&mg->refcount, mount_group_release); -} - /* Extract share from DFS target and return a pointer to prefix path or NULL */ static const char *parse_target_share(const char *target, char **share) { @@ -1384,11 +1249,6 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb) cifs_dbg(FYI, "%s: not a dfs mount\n", __func__); return 0; } - - if (uuid_is_null(&cifs_sb->dfs_mount_id)) { - cifs_dbg(FYI, "%s: no dfs mount group id\n", __func__); - return -EINVAL; - } /* * After reconnecting to a different server, unique ids won't match anymore, so we disable * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE). diff --git a/fs/cifs/dfs_cache.h b/fs/cifs/dfs_cache.h index be3b5a44cf82..e0d39393035a 100644 --- a/fs/cifs/dfs_cache.h +++ b/fs/cifs/dfs_cache.h @@ -40,8 +40,6 @@ int dfs_cache_get_tgt_referral(const char *path, const struct dfs_cache_tgt_iter struct dfs_info3_param *ref); int dfs_cache_get_tgt_share(char *path, const struct dfs_cache_tgt_iterator *it, char **share, char **prefix); -void dfs_cache_put_refsrv_sessions(const uuid_t *mount_id); -void dfs_cache_add_refsrv_session(const uuid_t *mount_id, struct cifs_ses *ses); char *dfs_cache_canonical_path(const char *path, const struct nls_table *cp, int remap); int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb); diff --git a/fs/cifs/fs_context.h b/fs/cifs/fs_context.h index 44cb5639ed3b..1b8d4e27f831 100644 --- a/fs/cifs/fs_context.h +++ b/fs/cifs/fs_context.h @@ -265,6 +265,7 @@ struct smb3_fs_context { bool rootfs:1; /* if it's a SMB root file system */ bool witness:1; /* use witness protocol */ char *leaf_fullpath; + struct cifs_ses *dfs_root_ses; }; extern const struct fs_parameter_spec smb3_fs_parameters[]; diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index a0d286ee723d..b44fb51968bf 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c @@ -22,6 +22,7 @@ #ifdef CONFIG_CIFS_DFS_UPCALL #include "dns_resolve.h" #include "dfs_cache.h" +#include "dfs.h" #endif #include "fs_context.h" #include "cached_dir.h" @@ -134,6 +135,9 @@ tconInfoAlloc(void) spin_lock_init(&ret_buf->stat_lock); atomic_set(&ret_buf->num_local_opens, 0); atomic_set(&ret_buf->num_remote_opens, 0); +#ifdef CONFIG_CIFS_DFS_UPCALL + INIT_LIST_HEAD(&ret_buf->dfs_ses_list); +#endif return ret_buf; } @@ -149,6 +153,9 @@ tconInfoFree(struct cifs_tcon *tcon) atomic_dec(&tconInfoAllocCount); kfree(tcon->nativeFileSystem); kfree_sensitive(tcon->password); +#ifdef CONFIG_CIFS_DFS_UPCALL + dfs_put_root_smb_sessions(&tcon->dfs_ses_list); +#endif kfree(tcon); } @@ -1255,6 +1262,7 @@ int cifs_inval_name_dfs_link_error(const unsigned int xid, * removing cached DFS targets that the client would eventually * need during failover. */ + ses = CIFS_DFS_ROOT_SES(ses); if (ses->server->ops->get_dfs_refer && !ses->server->ops->get_dfs_refer(xid, ses, ref_path, &refs, &num_refs, cifs_sb->local_nls, diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c index 9b956294e864..8dd3791b5c53 100644 --- a/fs/cifs/smb2inode.c +++ b/fs/cifs/smb2inode.c @@ -234,15 +234,32 @@ static int smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon, size[0] = 8; /* sizeof __le64 */ data[0] = ptr; - rc = SMB2_set_info_init(tcon, server, - &rqst[num_rqst], COMPOUND_FID, - COMPOUND_FID, current->tgid, - FILE_END_OF_FILE_INFORMATION, - SMB2_O_INFO_FILE, 0, data, size); + if (cfile) { + rc = SMB2_set_info_init(tcon, server, + &rqst[num_rqst], + cfile->fid.persistent_fid, + cfile->fid.volatile_fid, + current->tgid, + FILE_END_OF_FILE_INFORMATION, + SMB2_O_INFO_FILE, 0, + data, size); + } else { + rc = SMB2_set_info_init(tcon, server, + &rqst[num_rqst], + COMPOUND_FID, + COMPOUND_FID, + current->tgid, + FILE_END_OF_FILE_INFORMATION, + SMB2_O_INFO_FILE, 0, + data, size); + if (!rc) { + smb2_set_next_command(tcon, &rqst[num_rqst]); + smb2_set_related(&rqst[num_rqst]); + } + } if (rc) goto finished; - smb2_set_next_command(tcon, &rqst[num_rqst]); - smb2_set_related(&rqst[num_rqst++]); + num_rqst++; trace_smb3_set_eof_enter(xid, ses->Suid, tcon->tid, full_path); break; case SMB2_OP_SET_INFO: diff --git a/fs/cifs/smb2transport.c b/fs/cifs/smb2transport.c index 381babc1212c..d827b7547ffa 100644 --- a/fs/cifs/smb2transport.c +++ b/fs/cifs/smb2transport.c @@ -425,7 +425,7 @@ generate_smb3signingkey(struct cifs_ses *ses, /* safe to access primary channel, since it will never go away */ spin_lock(&ses->chan_lock); - memcpy(ses->chans[0].signkey, ses->smb3signingkey, + memcpy(ses->chans[chan_index].signkey, ses->smb3signingkey, SMB3_SIGN_KEY_SIZE); spin_unlock(&ses->chan_lock); diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index b42050c68e6c..24bdd5f4d3bc 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -278,7 +278,7 @@ static int __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, struct smb_rqst *rqst) { - int rc = 0; + int rc; struct kvec *iov; int n_vec; unsigned int send_length = 0; @@ -289,6 +289,7 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, struct msghdr smb_msg = {}; __be32 rfc1002_marker; + cifs_in_send_inc(server); if (cifs_rdma_enabled(server)) { /* return -EAGAIN when connecting or reconnecting */ rc = -EAGAIN; @@ -297,14 +298,17 @@ __smb_send_rqst(struct TCP_Server_Info *server, int num_rqst, goto smbd_done; } + rc = -EAGAIN; if (ssocket == NULL) - return -EAGAIN; + goto out; + rc = -ERESTARTSYS; if (fatal_signal_pending(current)) { cifs_dbg(FYI, "signal pending before send request\n"); - return -ERESTARTSYS; + goto out; } + rc = 0; /* cork the socket */ tcp_sock_set_cork(ssocket->sk, true); @@ -407,7 +411,8 @@ smbd_done: rc); else if (rc > 0) rc = 0; - +out: + cifs_in_send_dec(server); return rc; } @@ -826,9 +831,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_rqst *rqst, * I/O response may come back and free the mid entry on another thread. */ cifs_save_when_sent(mid); - cifs_in_send_inc(server); rc = smb_send_rqst(server, 1, rqst, flags); - cifs_in_send_dec(server); if (rc < 0) { revert_current_mid(server, mid->credits); @@ -1144,9 +1147,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, else midQ[i]->callback = cifs_compound_last_callback; } - cifs_in_send_inc(server); rc = smb_send_rqst(server, num_rqst, rqst, flags); - cifs_in_send_dec(server); for (i = 0; i < num_rqst; i++) cifs_save_when_sent(midQ[i]); @@ -1396,9 +1397,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses, midQ->mid_state = MID_REQUEST_SUBMITTED; - cifs_in_send_inc(server); rc = smb_send(server, in_buf, len); - cifs_in_send_dec(server); cifs_save_when_sent(midQ); if (rc < 0) @@ -1539,9 +1538,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, } midQ->mid_state = MID_REQUEST_SUBMITTED; - cifs_in_send_inc(server); rc = smb_send(server, in_buf, len); - cifs_in_send_dec(server); cifs_save_when_sent(midQ); if (rc < 0) diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c index 1d65f6ef00ca..0394505fdce3 100644 --- a/fs/ocfs2/aops.c +++ b/fs/ocfs2/aops.c @@ -1977,11 +1977,26 @@ int ocfs2_write_end_nolock(struct address_space *mapping, } if (unlikely(copied < len) && wc->w_target_page) { + loff_t new_isize; + if (!PageUptodate(wc->w_target_page)) copied = 0; - ocfs2_zero_new_buffers(wc->w_target_page, start+copied, - start+len); + new_isize = max_t(loff_t, i_size_read(inode), pos + copied); + if (new_isize > page_offset(wc->w_target_page)) + ocfs2_zero_new_buffers(wc->w_target_page, start+copied, + start+len); + else { + /* + * When page is fully beyond new isize (data copy + * failed), do not bother zeroing the page. Invalidate + * it instead so that writeback does not get confused + * put page & buffer dirty bits into inconsistent + * state. + */ + block_invalidate_folio(page_folio(wc->w_target_page), + 0, PAGE_SIZE); + } } if (wc->w_target_page) flush_dcache_page(wc->w_target_page); diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h index 0584e9f6e339..57acb895c038 100644 --- a/include/acpi/acpi_bus.h +++ b/include/acpi/acpi_bus.h @@ -657,6 +657,7 @@ static inline bool acpi_quirk_skip_acpi_ac_and_battery(void) #if IS_ENABLED(CONFIG_X86_ANDROID_TABLETS) bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev); int acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip); +bool acpi_quirk_skip_gpio_event_handlers(void); #else static inline bool acpi_quirk_skip_i2c_client_enumeration(struct acpi_device *adev) { @@ -668,6 +669,10 @@ acpi_quirk_skip_serdev_enumeration(struct device *controller_parent, bool *skip) *skip = false; return 0; } +static inline bool acpi_quirk_skip_gpio_event_handlers(void) +{ + return false; +} #endif #ifdef CONFIG_PM diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h index 42f86327b40a..bf964cdfb330 100644 --- a/include/drm/drm_bridge.h +++ b/include/drm/drm_bridge.h @@ -423,11 +423,11 @@ struct drm_bridge_funcs { * * The returned array must be allocated with kmalloc() and will be * freed by the caller. If the allocation fails, NULL should be - * returned. num_output_fmts must be set to the returned array size. + * returned. num_input_fmts must be set to the returned array size. * Formats listed in the returned array should be listed in decreasing * preference order (the core will try all formats until it finds one * that works). When the format is not supported NULL should be - * returned and num_output_fmts should be set to 0. + * returned and num_input_fmts should be set to 0. * * This method is called on all elements of the bridge chain as part of * the bus format negotiation process that happens in diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h index 772a4adf5287..f1f00fc2dba6 100644 --- a/include/drm/drm_gem.h +++ b/include/drm/drm_gem.h @@ -476,7 +476,9 @@ int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev, void drm_gem_lru_init(struct drm_gem_lru *lru, struct mutex *lock); void drm_gem_lru_remove(struct drm_gem_object *obj); void drm_gem_lru_move_tail(struct drm_gem_lru *lru, struct drm_gem_object *obj); -unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, unsigned nr_to_scan, +unsigned long drm_gem_lru_scan(struct drm_gem_lru *lru, + unsigned int nr_to_scan, + unsigned long *remaining, bool (*shrink)(struct drm_gem_object *obj)); #endif /* __DRM_GEM_H__ */ diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index 71916de7c6c4..c52a6e6839da 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -23,6 +23,19 @@ enum kvm_arch_timer_regs { TIMER_REG_CTL, }; +struct arch_timer_offset { + /* + * If set, pointer to one of the offsets in the kvm's offset + * structure. If NULL, assume a zero offset. + */ + u64 *vm_offset; +}; + +struct arch_timer_vm_data { + /* Offset applied to the virtual timer/counter */ + u64 voffset; +}; + struct arch_timer_context { struct kvm_vcpu *vcpu; @@ -32,6 +45,8 @@ struct arch_timer_context { /* Emulated Timer (may be unused) */ struct hrtimer hrtimer; + /* Offset for this counter/timer */ + struct arch_timer_offset offset; /* * We have multiple paths which can save/restore the timer state onto * the hardware, so we need some way of keeping track of where the diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index dd5ce1137f04..de0b0c3e7395 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -228,6 +228,12 @@ static inline unsigned short req_get_ioprio(struct request *req) *(listptr) = rq; \ } while (0) +#define rq_list_add_tail(lastpptr, rq) do { \ + (rq)->rq_next = NULL; \ + **(lastpptr) = rq; \ + *(lastpptr) = &rq->rq_next; \ +} while (0) + #define rq_list_pop(listptr) \ ({ \ struct request *__req = NULL; \ diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index d1aee08f8c18..941304f17492 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -1446,11 +1446,10 @@ static inline void blk_wake_io_task(struct task_struct *waiter) wake_up_process(waiter); } -unsigned long bdev_start_io_acct(struct block_device *bdev, - unsigned int sectors, enum req_op op, +unsigned long bdev_start_io_acct(struct block_device *bdev, enum req_op op, unsigned long start_time); void bdev_end_io_acct(struct block_device *bdev, enum req_op op, - unsigned long start_time); + unsigned int sectors, unsigned long start_time); unsigned long bio_start_io_acct(struct bio *bio); void bio_end_io_acct_remapped(struct bio *bio, unsigned long start_time, diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h index 842e72a5348f..6f3175f0678a 100644 --- a/include/linux/clk-provider.h +++ b/include/linux/clk-provider.h @@ -1363,7 +1363,13 @@ struct clk_hw_onecell_data { struct clk_hw *hws[]; }; -#define CLK_OF_DECLARE(name, compat, fn) OF_DECLARE_1(clk, name, compat, fn) +#define CLK_OF_DECLARE(name, compat, fn) \ + static void __init __##name##_of_clk_init_declare(struct device_node *np) \ + { \ + fn(np); \ + fwnode_dev_initialized(of_fwnode_handle(np), true); \ + } \ + OF_DECLARE_1(clk, name, compat, __##name##_of_clk_init_declare) /* * Use this macro when you have a driver that requires two initialization diff --git a/include/linux/fb.h b/include/linux/fb.h index d8d20514ea05..02d09cb57f6c 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -212,6 +212,7 @@ struct fb_deferred_io { /* delay between mkwrite and deferred handler */ unsigned long delay; bool sort_pagereflist; /* sort pagelist by offset */ + int open_count; /* number of opened files; protected by fb_info lock */ struct mutex lock; /* mutex that protects the pageref list */ struct list_head pagereflist; /* list of pagerefs for touched pages */ /* callback */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 6a14b7b11766..470085b121d3 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -297,9 +297,11 @@ struct hh_cache { * relationship HH alignment <= LL alignment. */ #define LL_RESERVED_SPACE(dev) \ - ((((dev)->hard_header_len+(dev)->needed_headroom)&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) + ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom)) \ + & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) #define LL_RESERVED_SPACE_EXTRA(dev,extra) \ - ((((dev)->hard_header_len+(dev)->needed_headroom+(extra))&~(HH_DATA_MOD - 1)) + HH_DATA_MOD) + ((((dev)->hard_header_len + READ_ONCE((dev)->needed_headroom) + (extra)) \ + & ~(HH_DATA_MOD - 1)) + HH_DATA_MOD) struct header_ops { int (*create) (struct sk_buff *skb, struct net_device *dev, diff --git a/include/linux/nvme.h b/include/linux/nvme.h index 4fad4aa245fb..779507ac750b 100644 --- a/include/linux/nvme.h +++ b/include/linux/nvme.h @@ -812,6 +812,7 @@ enum nvme_opcode { nvme_opcode_name(nvme_cmd_compare), \ nvme_opcode_name(nvme_cmd_write_zeroes), \ nvme_opcode_name(nvme_cmd_dsm), \ + nvme_opcode_name(nvme_cmd_verify), \ nvme_opcode_name(nvme_cmd_resv_register), \ nvme_opcode_name(nvme_cmd_resv_report), \ nvme_opcode_name(nvme_cmd_resv_acquire), \ @@ -1144,10 +1145,14 @@ enum nvme_admin_opcode { nvme_admin_opcode_name(nvme_admin_ns_mgmt), \ nvme_admin_opcode_name(nvme_admin_activate_fw), \ nvme_admin_opcode_name(nvme_admin_download_fw), \ + nvme_admin_opcode_name(nvme_admin_dev_self_test), \ nvme_admin_opcode_name(nvme_admin_ns_attach), \ nvme_admin_opcode_name(nvme_admin_keep_alive), \ nvme_admin_opcode_name(nvme_admin_directive_send), \ nvme_admin_opcode_name(nvme_admin_directive_recv), \ + nvme_admin_opcode_name(nvme_admin_virtual_mgmt), \ + nvme_admin_opcode_name(nvme_admin_nvme_mi_send), \ + nvme_admin_opcode_name(nvme_admin_nvme_mi_recv), \ nvme_admin_opcode_name(nvme_admin_dbbuf), \ nvme_admin_opcode_name(nvme_admin_format_nvm), \ nvme_admin_opcode_name(nvme_admin_security_send), \ diff --git a/include/linux/pci.h b/include/linux/pci.h index fafd8020c6d7..b50e5c79f7e3 100644 --- a/include/linux/pci.h +++ b/include/linux/pci.h @@ -1438,6 +1438,7 @@ void pci_bus_add_resource(struct pci_bus *bus, struct resource *res, unsigned int flags); struct resource *pci_bus_resource_n(const struct pci_bus *bus, int n); void pci_bus_remove_resources(struct pci_bus *bus); +void pci_bus_remove_resource(struct pci_bus *bus, struct resource *res); int devm_request_pci_bus_resources(struct device *dev, struct list_head *resources); diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index e299f29375bb..6811e43c1b5c 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -242,12 +242,11 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) * not add unwanted padding between the beginning of the section and the * structure. Force alignment to the same alignment as the section start. * - * When lockdep is enabled, we make sure to always do the RCU portions of - * the tracepoint code, regardless of whether tracing is on. However, - * don't check if the condition is false, due to interaction with idle - * instrumentation. This lets us find RCU issues triggered with tracepoints - * even when this tracepoint is off. This code has no purpose other than - * poking RCU a bit. + * When lockdep is enabled, we make sure to always test if RCU is + * "watching" regardless if the tracepoint is enabled or not. Tracepoints + * require RCU to be active, and it should always warn at the tracepoint + * site if it is not watching, as it will need to be active when the + * tracepoint is enabled. */ #define __DECLARE_TRACE(name, proto, args, cond, data_proto) \ extern int __traceiter_##name(data_proto); \ @@ -260,9 +259,7 @@ static inline struct tracepoint *tracepoint_ptr_deref(tracepoint_ptr_t *p) TP_ARGS(args), \ TP_CONDITION(cond), 0); \ if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \ - rcu_read_lock_sched_notrace(); \ - rcu_dereference_sched(__tracepoint_##name.funcs);\ - rcu_read_unlock_sched_notrace(); \ + WARN_ON_ONCE(!rcu_is_watching()); \ } \ } \ __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ diff --git a/include/net/xdp.h b/include/net/xdp.h index d517bfac937b..41c57b8b1671 100644 --- a/include/net/xdp.h +++ b/include/net/xdp.h @@ -428,6 +428,7 @@ MAX_XDP_METADATA_KFUNC, #ifdef CONFIG_NET u32 bpf_xdp_metadata_kfunc_id(int id); bool bpf_dev_bound_kfunc_id(u32 btf_id); +void xdp_set_features_flag(struct net_device *dev, xdp_features_t val); void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg); void xdp_features_clear_redirect_target(struct net_device *dev); #else @@ -435,6 +436,11 @@ static inline u32 bpf_xdp_metadata_kfunc_id(int id) { return 0; } static inline bool bpf_dev_bound_kfunc_id(u32 btf_id) { return false; } static inline void +xdp_set_features_flag(struct net_device *dev, xdp_features_t val) +{ +} + +static inline void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg) { } @@ -445,4 +451,9 @@ xdp_features_clear_redirect_target(struct net_device *dev) } #endif +static inline void xdp_clear_features_flag(struct net_device *dev) +{ + xdp_set_features_flag(dev, 0); +} + #endif /* __LINUX_NET_XDP_H__ */ diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h index de310f21406c..f10a008e5bfa 100644 --- a/include/scsi/scsi_device.h +++ b/include/scsi/scsi_device.h @@ -145,6 +145,7 @@ struct scsi_device { const char * model; /* ... after scan; point to static string */ const char * rev; /* ... "nullnullnullnull" before scan */ +#define SCSI_DEFAULT_VPD_LEN 255 /* default SCSI VPD page size (max) */ struct scsi_vpd __rcu *vpd_pg0; struct scsi_vpd __rcu *vpd_pg83; struct scsi_vpd __rcu *vpd_pg80; @@ -215,6 +216,7 @@ struct scsi_device { * creation time */ unsigned ignore_media_change:1; /* Ignore MEDIA CHANGE on resume */ unsigned silence_suspend:1; /* Do not print runtime PM related messages */ + unsigned no_vpd_size:1; /* No VPD size reported in header */ unsigned int queue_stopped; /* request queue is quiesced */ bool offline_already; /* Device offline message logged */ diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h index 5d14adae21c7..6b548dc2c496 100644 --- a/include/scsi/scsi_devinfo.h +++ b/include/scsi/scsi_devinfo.h @@ -32,7 +32,8 @@ #define BLIST_IGN_MEDIA_CHANGE ((__force blist_flags_t)(1ULL << 11)) /* do not do automatic start on add */ #define BLIST_NOSTARTONADD ((__force blist_flags_t)(1ULL << 12)) -#define __BLIST_UNUSED_13 ((__force blist_flags_t)(1ULL << 13)) +/* do not ask for VPD page size first on some broken targets */ +#define BLIST_NO_VPD_SIZE ((__force blist_flags_t)(1ULL << 13)) #define __BLIST_UNUSED_14 ((__force blist_flags_t)(1ULL << 14)) #define __BLIST_UNUSED_15 ((__force blist_flags_t)(1ULL << 15)) #define __BLIST_UNUSED_16 ((__force blist_flags_t)(1ULL << 16)) @@ -74,8 +75,7 @@ #define __BLIST_HIGH_UNUSED (~(__BLIST_LAST_USED | \ (__force blist_flags_t) \ ((__force __u64)__BLIST_LAST_USED - 1ULL))) -#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_13 | \ - __BLIST_UNUSED_14 | \ +#define __BLIST_UNUSED_MASK (__BLIST_UNUSED_14 | \ __BLIST_UNUSED_15 | \ __BLIST_UNUSED_16 | \ __BLIST_UNUSED_24 | \ diff --git a/include/uapi/linux/fou.h b/include/uapi/linux/fou.h index 5041c3598493..b5cd3e7b3775 100644 --- a/include/uapi/linux/fou.h +++ b/include/uapi/linux/fou.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ /* Do not edit directly, auto-generated from: */ /* Documentation/netlink/specs/fou.yaml */ /* YNL-GEN uapi header */ diff --git a/include/uapi/linux/netdev.h b/include/uapi/linux/netdev.h index 8c4e3e536c04..639524b59930 100644 --- a/include/uapi/linux/netdev.h +++ b/include/uapi/linux/netdev.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ /* Do not edit directly, auto-generated from: */ /* Documentation/netlink/specs/netdev.yaml */ /* YNL-GEN uapi header */ @@ -33,6 +33,8 @@ enum netdev_xdp_act { NETDEV_XDP_ACT_HW_OFFLOAD = 16, NETDEV_XDP_ACT_RX_SG = 32, NETDEV_XDP_ACT_NDO_XMIT_SG = 64, + + NETDEV_XDP_ACT_MASK = 127, }; enum { diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h index 25a0af57dd5e..51c13cf9c5ae 100644 --- a/include/uapi/linux/rtnetlink.h +++ b/include/uapi/linux/rtnetlink.h @@ -789,6 +789,7 @@ enum { TCA_ROOT_FLAGS, TCA_ROOT_COUNT, TCA_ROOT_TIME_DELTA, /* in msecs */ + TCA_ROOT_EXT_WARN_MSG, __TCA_ROOT_MAX, #define TCA_ROOT_MAX (__TCA_ROOT_MAX - 1) }; diff --git a/include/xen/interface/platform.h b/include/xen/interface/platform.h index 655d92e803e1..79a443c65ea9 100644 --- a/include/xen/interface/platform.h +++ b/include/xen/interface/platform.h @@ -483,6 +483,8 @@ struct xenpf_symdata { }; DEFINE_GUEST_HANDLE_STRUCT(xenpf_symdata); +#define XENPF_get_dom0_console 64 + struct xen_platform_op { uint32_t cmd; uint32_t interface_version; /* XENPF_INTERFACE_VERSION */ @@ -506,6 +508,7 @@ struct xen_platform_op { struct xenpf_mem_hotadd mem_add; struct xenpf_core_parking core_parking; struct xenpf_symdata symdata; + struct dom0_vga_console_info dom0_console; uint8_t pad[128]; } u; }; diff --git a/io_uring/msg_ring.c b/io_uring/msg_ring.c index 8803c0979e2a..85fd7ce5f05b 100644 --- a/io_uring/msg_ring.c +++ b/io_uring/msg_ring.c @@ -202,7 +202,7 @@ static int io_msg_install_complete(struct io_kiocb *req, unsigned int issue_flag * completes with -EOVERFLOW, then the sender must ensure that a * later IORING_OP_MSG_RING delivers the message. */ - if (!io_post_aux_cqe(target_ctx, msg->user_data, msg->len, 0)) + if (!io_post_aux_cqe(target_ctx, msg->user_data, ret, 0)) ret = -EOVERFLOW; out_unlock: io_double_unlock_ctx(target_ctx); @@ -229,6 +229,8 @@ static int io_msg_send_fd(struct io_kiocb *req, unsigned int issue_flags) struct io_ring_ctx *ctx = req->ctx; struct file *src_file = msg->src_file; + if (msg->len) + return -EINVAL; if (target_ctx == ctx) return -EINVAL; if (target_ctx->flags & IORING_SETUP_R_DISABLED) diff --git a/io_uring/rsrc.c b/io_uring/rsrc.c index 056f40946ff6..e2bac9f89902 100644 --- a/io_uring/rsrc.c +++ b/io_uring/rsrc.c @@ -410,7 +410,7 @@ __cold static int io_rsrc_data_alloc(struct io_ring_ctx *ctx, unsigned nr, struct io_rsrc_data **pdata) { struct io_rsrc_data *data; - int ret = -ENOMEM; + int ret = 0; unsigned i; data = kzalloc(sizeof(*data), GFP_KERNEL); @@ -1235,7 +1235,13 @@ static int io_sqe_buffer_register(struct io_ring_ctx *ctx, struct iovec *iov, } } if (folio) { - folio_put_refs(folio, nr_pages - 1); + /* + * The pages are bound to the folio, it doesn't + * actually unpin them but drops all but one reference, + * which is usually put down by io_buffer_unmap(). + * Note, needs a better helper. + */ + unpin_user_pages(&pages[1], nr_pages - 1); nr_pages = 1; } } diff --git a/io_uring/sqpoll.c b/io_uring/sqpoll.c index 0119d3f1a556..9db4bc1f521a 100644 --- a/io_uring/sqpoll.c +++ b/io_uring/sqpoll.c @@ -233,7 +233,6 @@ static int io_sq_thread(void *data) set_cpus_allowed_ptr(current, cpumask_of(sqd->sq_cpu)); else set_cpus_allowed_ptr(current, cpu_online_mask); - current->flags |= PF_NO_SETAFFINITY; mutex_lock(&sqd->lock); while (1) { diff --git a/kernel/compat.c b/kernel/compat.c index 55551989d9da..fb50f29d9b36 100644 --- a/kernel/compat.c +++ b/kernel/compat.c @@ -152,7 +152,7 @@ COMPAT_SYSCALL_DEFINE3(sched_getaffinity, compat_pid_t, pid, unsigned int, len, if (len & (sizeof(compat_ulong_t)-1)) return -EINVAL; - if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; ret = sched_getaffinity(pid, mask); diff --git a/kernel/sched/core.c b/kernel/sched/core.c index af017e038b48..488655f2319f 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -8414,14 +8414,14 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len, if (len & (sizeof(unsigned long)-1)) return -EINVAL; - if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) return -ENOMEM; ret = sched_getaffinity(pid, mask); if (ret == 0) { unsigned int retlen = min(len, cpumask_size()); - if (copy_to_user(user_mask_ptr, mask, retlen)) + if (copy_to_user(user_mask_ptr, cpumask_bits(mask), retlen)) ret = -EFAULT; else ret = retlen; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 29baa97d0d53..9b2803c7a18f 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1564,7 +1564,8 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end) key.flags = end; /* overload flags, as it is unsigned long */ for (pg = ftrace_pages_start; pg; pg = pg->next) { - if (end < pg->records[0].ip || + if (pg->index == 0 || + end < pg->records[0].ip || start >= (pg->records[pg->index - 1].ip + MCOUNT_INSN_SIZE)) continue; rec = bsearch(&key, pg->records, pg->index, diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 89877a18f933..486cca3c2b75 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -1331,6 +1331,9 @@ static const char *hist_field_name(struct hist_field *field, { const char *field_name = ""; + if (WARN_ON_ONCE(!field)) + return field_name; + if (level > 1) return field_name; @@ -4235,6 +4238,15 @@ static int __create_val_field(struct hist_trigger_data *hist_data, goto out; } + /* Some types cannot be a value */ + if (hist_field->flags & (HIST_FIELD_FL_GRAPH | HIST_FIELD_FL_PERCENT | + HIST_FIELD_FL_BUCKET | HIST_FIELD_FL_LOG2 | + HIST_FIELD_FL_SYM | HIST_FIELD_FL_SYM_OFFSET | + HIST_FIELD_FL_SYSCALL | HIST_FIELD_FL_STACKTRACE)) { + hist_err(file->tr, HIST_ERR_BAD_FIELD_MODIFIER, errpos(field_str)); + ret = -EINVAL; + } + hist_data->fields[val_idx] = hist_field; ++hist_data->n_vals; diff --git a/lib/zstd/common/zstd_deps.h b/lib/zstd/common/zstd_deps.h index 7a5bf44839c9..f06df065dec0 100644 --- a/lib/zstd/common/zstd_deps.h +++ b/lib/zstd/common/zstd_deps.h @@ -84,7 +84,7 @@ static uint64_t ZSTD_div64(uint64_t dividend, uint32_t divisor) { #include <linux/kernel.h> -#define assert(x) WARN_ON((x)) +#define assert(x) WARN_ON(!(x)) #endif /* ZSTD_DEPS_ASSERT */ #endif /* ZSTD_DEPS_NEED_ASSERT */ diff --git a/lib/zstd/decompress/huf_decompress.c b/lib/zstd/decompress/huf_decompress.c index 89b269a641c7..60958afebc41 100644 --- a/lib/zstd/decompress/huf_decompress.c +++ b/lib/zstd/decompress/huf_decompress.c @@ -985,7 +985,7 @@ static void HUF_fillDTableX2Level2(HUF_DEltX2* DTable, U32 targetLog, const U32 static void HUF_fillDTableX2(HUF_DEltX2* DTable, const U32 targetLog, const sortedSymbol_t* sortedList, - const U32* rankStart, rankVal_t rankValOrigin, const U32 maxWeight, + const U32* rankStart, rankValCol_t *rankValOrigin, const U32 maxWeight, const U32 nbBitsBaseline) { U32* const rankVal = rankValOrigin[0]; diff --git a/lib/zstd/decompress/zstd_decompress.c b/lib/zstd/decompress/zstd_decompress.c index b9b935a9f5c0..6b3177c94711 100644 --- a/lib/zstd/decompress/zstd_decompress.c +++ b/lib/zstd/decompress/zstd_decompress.c @@ -798,7 +798,7 @@ static size_t ZSTD_copyRawBlock(void* dst, size_t dstCapacity, if (srcSize == 0) return 0; RETURN_ERROR(dstBuffer_null, ""); } - ZSTD_memcpy(dst, src, srcSize); + ZSTD_memmove(dst, src, srcSize); return srcSize; } @@ -858,6 +858,7 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, /* Loop on each block */ while (1) { + BYTE* oBlockEnd = oend; size_t decodedSize; blockProperties_t blockProperties; size_t const cBlockSize = ZSTD_getcBlockSize(ip, remainingSrcSize, &blockProperties); @@ -867,16 +868,34 @@ static size_t ZSTD_decompressFrame(ZSTD_DCtx* dctx, remainingSrcSize -= ZSTD_blockHeaderSize; RETURN_ERROR_IF(cBlockSize > remainingSrcSize, srcSize_wrong, ""); + if (ip >= op && ip < oBlockEnd) { + /* We are decompressing in-place. Limit the output pointer so that we + * don't overwrite the block that we are currently reading. This will + * fail decompression if the input & output pointers aren't spaced + * far enough apart. + * + * This is important to set, even when the pointers are far enough + * apart, because ZSTD_decompressBlock_internal() can decide to store + * literals in the output buffer, after the block it is decompressing. + * Since we don't want anything to overwrite our input, we have to tell + * ZSTD_decompressBlock_internal to never write past ip. + * + * See ZSTD_allocateLiteralsBuffer() for reference. + */ + oBlockEnd = op + (ip - op); + } + switch(blockProperties.blockType) { case bt_compressed: - decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oend-op), ip, cBlockSize, /* frame */ 1, not_streaming); + decodedSize = ZSTD_decompressBlock_internal(dctx, op, (size_t)(oBlockEnd-op), ip, cBlockSize, /* frame */ 1, not_streaming); break; case bt_raw : + /* Use oend instead of oBlockEnd because this function is safe to overlap. It uses memmove. */ decodedSize = ZSTD_copyRawBlock(op, (size_t)(oend-op), ip, cBlockSize); break; case bt_rle : - decodedSize = ZSTD_setRleBlock(op, (size_t)(oend-op), *ip, blockProperties.origSize); + decodedSize = ZSTD_setRleBlock(op, (size_t)(oBlockEnd-op), *ip, blockProperties.origSize); break; case bt_reserved : default: diff --git a/mm/damon/paddr.c b/mm/damon/paddr.c index 6c655d9b5639..dd9c33fbe805 100644 --- a/mm/damon/paddr.c +++ b/mm/damon/paddr.c @@ -130,7 +130,6 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz) accessed = false; else accessed = true; - folio_put(folio); goto out; } @@ -144,10 +143,10 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *folio_sz) if (need_lock) folio_unlock(folio); - folio_put(folio); out: *folio_sz = folio_size(folio); + folio_put(folio); return accessed; } @@ -281,8 +280,8 @@ static inline unsigned long damon_pa_mark_accessed_or_deactivate( folio_mark_accessed(folio); else folio_deactivate(folio); - folio_put(folio); applied += folio_nr_pages(folio); + folio_put(folio); } return applied * PAGE_SIZE; } diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 4fc43859e59a..032fb0ef9cd1 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -2037,7 +2037,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, { struct mm_struct *mm = vma->vm_mm; pgtable_t pgtable; - pmd_t _pmd; + pmd_t _pmd, old_pmd; int i; /* @@ -2048,7 +2048,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, * * See Documentation/mm/mmu_notifier.rst */ - pmdp_huge_clear_flush(vma, haddr, pmd); + old_pmd = pmdp_huge_clear_flush(vma, haddr, pmd); pgtable = pgtable_trans_huge_withdraw(mm, pmd); pmd_populate(mm, &_pmd, pgtable); @@ -2057,6 +2057,8 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, pte_t *pte, entry; entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); entry = pte_mkspecial(entry); + if (pmd_uffd_wp(old_pmd)) + entry = pte_mkuffd_wp(entry); pte = pte_offset_map(&_pmd, haddr); VM_BUG_ON(!pte_none(*pte)); set_pte_at(mm, haddr, pte, entry); diff --git a/mm/migrate.c b/mm/migrate.c index 98f1c11197a8..db3f154446af 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1112,9 +1112,8 @@ static void migrate_folio_done(struct folio *src, /* Obtain the lock on page, remove all ptes. */ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page, unsigned long private, struct folio *src, - struct folio **dstp, int force, bool avoid_force_lock, - enum migrate_mode mode, enum migrate_reason reason, - struct list_head *ret) + struct folio **dstp, enum migrate_mode mode, + enum migrate_reason reason, struct list_head *ret) { struct folio *dst; int rc = -EAGAIN; @@ -1144,7 +1143,7 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page dst->private = NULL; if (!folio_trylock(src)) { - if (!force || mode == MIGRATE_ASYNC) + if (mode == MIGRATE_ASYNC) goto out; /* @@ -1163,17 +1162,6 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page if (current->flags & PF_MEMALLOC) goto out; - /* - * We have locked some folios and are going to wait to lock - * this folio. To avoid a potential deadlock, let's bail - * out and not do that. The locked folios will be moved and - * unlocked, then we can wait to lock this folio. - */ - if (avoid_force_lock) { - rc = -EDEADLOCK; - goto out; - } - folio_lock(src); } locked = true; @@ -1193,8 +1181,6 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page rc = -EBUSY; goto out; } - if (!force) - goto out; folio_wait_writeback(src); } @@ -1253,7 +1239,7 @@ static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page /* Establish migration ptes */ VM_BUG_ON_FOLIO(folio_test_anon(src) && !folio_test_ksm(src) && !anon_vma, src); - try_to_migrate(src, TTU_BATCH_FLUSH); + try_to_migrate(src, mode == MIGRATE_ASYNC ? TTU_BATCH_FLUSH : 0); page_was_mapped = 1; } @@ -1267,7 +1253,7 @@ out: * A folio that has not been unmapped will be restored to * right list unless we want to retry. */ - if (rc == -EAGAIN || rc == -EDEADLOCK) + if (rc == -EAGAIN) ret = NULL; migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret); @@ -1508,6 +1494,9 @@ static inline int try_split_folio(struct folio *folio, struct list_head *split_f #define NR_MAX_BATCHED_MIGRATION 512 #endif #define NR_MAX_MIGRATE_PAGES_RETRY 10 +#define NR_MAX_MIGRATE_ASYNC_RETRY 3 +#define NR_MAX_MIGRATE_SYNC_RETRY \ + (NR_MAX_MIGRATE_PAGES_RETRY - NR_MAX_MIGRATE_ASYNC_RETRY) struct migrate_pages_stats { int nr_succeeded; /* Normal and large folios migrated successfully, in @@ -1618,13 +1607,19 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page, /* * migrate_pages_batch() first unmaps folios in the from list as many as * possible, then move the unmapped folios. + * + * We only batch migration if mode == MIGRATE_ASYNC to avoid to wait a + * lock or bit when we have locked more than one folio. Which may cause + * deadlock (e.g., for loop device). So, if mode != MIGRATE_ASYNC, the + * length of the from list must be <= 1. */ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, free_page_t put_new_page, unsigned long private, enum migrate_mode mode, int reason, struct list_head *ret_folios, - struct migrate_pages_stats *stats) + struct list_head *split_folios, struct migrate_pages_stats *stats, + int nr_pass) { - int retry; + int retry = 1; int large_retry = 1; int thp_retry = 1; int nr_failed = 0; @@ -1634,21 +1629,15 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, bool is_large = false; bool is_thp = false; struct folio *folio, *folio2, *dst = NULL, *dst2; - int rc, rc_saved, nr_pages; - LIST_HEAD(split_folios); + int rc, rc_saved = 0, nr_pages; LIST_HEAD(unmap_folios); LIST_HEAD(dst_folios); bool nosplit = (reason == MR_NUMA_MISPLACED); - bool no_split_folio_counting = false; - bool avoid_force_lock; -retry: - rc_saved = 0; - avoid_force_lock = false; - retry = 1; - for (pass = 0; - pass < NR_MAX_MIGRATE_PAGES_RETRY && (retry || large_retry); - pass++) { + VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC && + !list_empty(from) && !list_is_singular(from)); + + for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) { retry = 0; large_retry = 0; thp_retry = 0; @@ -1679,7 +1668,7 @@ retry: if (!thp_migration_supported() && is_thp) { nr_large_failed++; stats->nr_thp_failed++; - if (!try_split_folio(folio, &split_folios)) { + if (!try_split_folio(folio, split_folios)) { stats->nr_thp_split++; continue; } @@ -1689,15 +1678,13 @@ retry: } rc = migrate_folio_unmap(get_new_page, put_new_page, private, - folio, &dst, pass > 2, avoid_force_lock, - mode, reason, ret_folios); + folio, &dst, mode, reason, ret_folios); /* * The rules are: * Success: folio will be freed * Unmap: folio will be put on unmap_folios list, * dst folio put on dst_folios list * -EAGAIN: stay on the from list - * -EDEADLOCK: stay on the from list * -ENOMEM: stay on the from list * Other errno: put on ret_folios list */ @@ -1712,7 +1699,7 @@ retry: stats->nr_thp_failed += is_thp; /* Large folio NUMA faulting doesn't split to retry. */ if (!nosplit) { - int ret = try_split_folio(folio, &split_folios); + int ret = try_split_folio(folio, split_folios); if (!ret) { stats->nr_thp_split += is_thp; @@ -1729,18 +1716,11 @@ retry: break; } } - } else if (!no_split_folio_counting) { + } else { nr_failed++; } stats->nr_failed_pages += nr_pages + nr_retry_pages; - /* - * There might be some split folios of fail-to-migrate large - * folios left in split_folios list. Move them to ret_folios - * list so that they could be put back to the right list by - * the caller otherwise the folio refcnt will be leaked. - */ - list_splice_init(&split_folios, ret_folios); /* nr_failed isn't updated for not used */ nr_large_failed += large_retry; stats->nr_thp_failed += thp_retry; @@ -1749,19 +1729,11 @@ retry: goto out; else goto move; - case -EDEADLOCK: - /* - * The folio cannot be locked for potential deadlock. - * Go move (and unlock) all locked folios. Then we can - * try again. - */ - rc_saved = rc; - goto move; case -EAGAIN: if (is_large) { large_retry++; thp_retry += is_thp; - } else if (!no_split_folio_counting) { + } else { retry++; } nr_retry_pages += nr_pages; @@ -1771,11 +1743,6 @@ retry: stats->nr_thp_succeeded += is_thp; break; case MIGRATEPAGE_UNMAP: - /* - * We have locked some folios, don't force lock - * to avoid deadlock. - */ - avoid_force_lock = true; list_move_tail(&folio->lru, &unmap_folios); list_add_tail(&dst->lru, &dst_folios); break; @@ -1789,7 +1756,7 @@ retry: if (is_large) { nr_large_failed++; stats->nr_thp_failed += is_thp; - } else if (!no_split_folio_counting) { + } else { nr_failed++; } @@ -1807,9 +1774,7 @@ move: try_to_unmap_flush(); retry = 1; - for (pass = 0; - pass < NR_MAX_MIGRATE_PAGES_RETRY && (retry || large_retry); - pass++) { + for (pass = 0; pass < nr_pass && (retry || large_retry); pass++) { retry = 0; large_retry = 0; thp_retry = 0; @@ -1838,7 +1803,7 @@ move: if (is_large) { large_retry++; thp_retry += is_thp; - } else if (!no_split_folio_counting) { + } else { retry++; } nr_retry_pages += nr_pages; @@ -1851,7 +1816,7 @@ move: if (is_large) { nr_large_failed++; stats->nr_thp_failed += is_thp; - } else if (!no_split_folio_counting) { + } else { nr_failed++; } @@ -1888,30 +1853,52 @@ out: dst2 = list_next_entry(dst, lru); } - /* - * Try to migrate split folios of fail-to-migrate large folios, no - * nr_failed counting in this round, since all split folios of a - * large folio is counted as 1 failure in the first round. - */ - if (rc >= 0 && !list_empty(&split_folios)) { - /* - * Move non-migrated folios (after NR_MAX_MIGRATE_PAGES_RETRY - * retries) to ret_folios to avoid migrating them again. - */ - list_splice_init(from, ret_folios); - list_splice_init(&split_folios, from); - no_split_folio_counting = true; - goto retry; - } + return rc; +} +static int migrate_pages_sync(struct list_head *from, new_page_t get_new_page, + free_page_t put_new_page, unsigned long private, + enum migrate_mode mode, int reason, struct list_head *ret_folios, + struct list_head *split_folios, struct migrate_pages_stats *stats) +{ + int rc, nr_failed = 0; + LIST_HEAD(folios); + struct migrate_pages_stats astats; + + memset(&astats, 0, sizeof(astats)); + /* Try to migrate in batch with MIGRATE_ASYNC mode firstly */ + rc = migrate_pages_batch(from, get_new_page, put_new_page, private, MIGRATE_ASYNC, + reason, &folios, split_folios, &astats, + NR_MAX_MIGRATE_ASYNC_RETRY); + stats->nr_succeeded += astats.nr_succeeded; + stats->nr_thp_succeeded += astats.nr_thp_succeeded; + stats->nr_thp_split += astats.nr_thp_split; + if (rc < 0) { + stats->nr_failed_pages += astats.nr_failed_pages; + stats->nr_thp_failed += astats.nr_thp_failed; + list_splice_tail(&folios, ret_folios); + return rc; + } + stats->nr_thp_failed += astats.nr_thp_split; + nr_failed += astats.nr_thp_split; /* - * We have unlocked all locked folios, so we can force lock now, let's - * try again. + * Fall back to migrate all failed folios one by one synchronously. All + * failed folios except split THPs will be retried, so their failure + * isn't counted */ - if (rc == -EDEADLOCK) - goto retry; + list_splice_tail_init(&folios, from); + while (!list_empty(from)) { + list_move(from->next, &folios); + rc = migrate_pages_batch(&folios, get_new_page, put_new_page, + private, mode, reason, ret_folios, + split_folios, stats, NR_MAX_MIGRATE_SYNC_RETRY); + list_splice_tail_init(&folios, ret_folios); + if (rc < 0) + return rc; + nr_failed += rc; + } - return rc; + return nr_failed; } /* @@ -1949,6 +1936,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, struct folio *folio, *folio2; LIST_HEAD(folios); LIST_HEAD(ret_folios); + LIST_HEAD(split_folios); struct migrate_pages_stats stats; trace_mm_migrate_pages_start(mode, reason); @@ -1959,6 +1947,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, mode, reason, &stats, &ret_folios); if (rc_gather < 0) goto out; + again: nr_pages = 0; list_for_each_entry_safe(folio, folio2, from, lru) { @@ -1969,20 +1958,36 @@ again: } nr_pages += folio_nr_pages(folio); - if (nr_pages > NR_MAX_BATCHED_MIGRATION) + if (nr_pages >= NR_MAX_BATCHED_MIGRATION) break; } - if (nr_pages > NR_MAX_BATCHED_MIGRATION) - list_cut_before(&folios, from, &folio->lru); + if (nr_pages >= NR_MAX_BATCHED_MIGRATION) + list_cut_before(&folios, from, &folio2->lru); else list_splice_init(from, &folios); - rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private, - mode, reason, &ret_folios, &stats); + if (mode == MIGRATE_ASYNC) + rc = migrate_pages_batch(&folios, get_new_page, put_new_page, private, + mode, reason, &ret_folios, &split_folios, &stats, + NR_MAX_MIGRATE_PAGES_RETRY); + else + rc = migrate_pages_sync(&folios, get_new_page, put_new_page, private, + mode, reason, &ret_folios, &split_folios, &stats); list_splice_tail_init(&folios, &ret_folios); if (rc < 0) { rc_gather = rc; + list_splice_tail(&split_folios, &ret_folios); goto out; } + if (!list_empty(&split_folios)) { + /* + * Failure isn't counted since all split folios of a large folio + * is counted as 1 failure already. And, we only try to migrate + * with minimal effort, force MIGRATE_ASYNC mode and retry once. + */ + migrate_pages_batch(&split_folios, get_new_page, put_new_page, private, + MIGRATE_ASYNC, reason, &ret_folios, NULL, &stats, 1); + list_splice_tail_init(&split_folios, &ret_folios); + } rc_gather += rc; if (!list_empty(from)) goto again; diff --git a/mm/mincore.c b/mm/mincore.c index cd69b9db0081..d359650b0f75 100644 --- a/mm/mincore.c +++ b/mm/mincore.c @@ -33,7 +33,7 @@ static int mincore_hugetlb(pte_t *pte, unsigned long hmask, unsigned long addr, * Hugepages under user process are always in RAM and never * swapped out, but theoretically it needs to be checked. */ - present = pte && !huge_pte_none(huge_ptep_get(pte)); + present = pte && !huge_pte_none_mostly(huge_ptep_get(pte)); for (; addr != end; vec++, addr += PAGE_SIZE) *vec = present; walk->private = vec; diff --git a/net/core/netdev-genl-gen.c b/net/core/netdev-genl-gen.c index 9e10802587fc..3abab70d66dd 100644 --- a/net/core/netdev-genl-gen.c +++ b/net/core/netdev-genl-gen.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) /* Do not edit directly, auto-generated from: */ /* Documentation/netlink/specs/netdev.yaml */ /* YNL-GEN kernel source */ diff --git a/net/core/netdev-genl-gen.h b/net/core/netdev-genl-gen.h index 2c5fc7d1e8a7..74d74fc23167 100644 --- a/net/core/netdev-genl-gen.h +++ b/net/core/netdev-genl-gen.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ /* Do not edit directly, auto-generated from: */ /* Documentation/netlink/specs/netdev.yaml */ /* YNL-GEN kernel header */ diff --git a/net/core/xdp.c b/net/core/xdp.c index 8c92fc553317..b5737e47ec41 100644 --- a/net/core/xdp.c +++ b/net/core/xdp.c @@ -774,20 +774,34 @@ static int __init xdp_metadata_init(void) } late_initcall(xdp_metadata_init); +void xdp_set_features_flag(struct net_device *dev, xdp_features_t val) +{ + val &= NETDEV_XDP_ACT_MASK; + if (dev->xdp_features == val) + return; + + dev->xdp_features = val; + + if (dev->reg_state == NETREG_REGISTERED) + call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev); +} +EXPORT_SYMBOL_GPL(xdp_set_features_flag); + void xdp_features_set_redirect_target(struct net_device *dev, bool support_sg) { - dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT; - if (support_sg) - dev->xdp_features |= NETDEV_XDP_ACT_NDO_XMIT_SG; + xdp_features_t val = (dev->xdp_features | NETDEV_XDP_ACT_NDO_XMIT); - call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev); + if (support_sg) + val |= NETDEV_XDP_ACT_NDO_XMIT_SG; + xdp_set_features_flag(dev, val); } EXPORT_SYMBOL_GPL(xdp_features_set_redirect_target); void xdp_features_clear_redirect_target(struct net_device *dev) { - dev->xdp_features &= ~(NETDEV_XDP_ACT_NDO_XMIT | - NETDEV_XDP_ACT_NDO_XMIT_SG); - call_netdevice_notifiers(NETDEV_XDP_FEAT_CHANGE, dev); + xdp_features_t val = dev->xdp_features; + + val &= ~(NETDEV_XDP_ACT_NDO_XMIT | NETDEV_XDP_ACT_NDO_XMIT_SG); + xdp_set_features_flag(dev, val); } EXPORT_SYMBOL_GPL(xdp_features_clear_redirect_target); diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 6957971c2db2..cac17183589f 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1933,6 +1933,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu) int new_master_mtu; int old_master_mtu; int mtu_limit; + int overhead; int cpu_mtu; int err; @@ -1961,9 +1962,10 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu) largest_mtu = slave_mtu; } - mtu_limit = min_t(int, master->max_mtu, dev->max_mtu); + overhead = dsa_tag_protocol_overhead(cpu_dp->tag_ops); + mtu_limit = min_t(int, master->max_mtu, dev->max_mtu + overhead); old_master_mtu = master->mtu; - new_master_mtu = largest_mtu + dsa_tag_protocol_overhead(cpu_dp->tag_ops); + new_master_mtu = largest_mtu + overhead; if (new_master_mtu > mtu_limit) return -ERANGE; @@ -1998,8 +2000,7 @@ int dsa_slave_change_mtu(struct net_device *dev, int new_mtu) out_port_failed: if (new_master_mtu != old_master_mtu) - dsa_port_mtu_change(cpu_dp, old_master_mtu - - dsa_tag_protocol_overhead(cpu_dp->tag_ops)); + dsa_port_mtu_change(cpu_dp, old_master_mtu - overhead); out_cpu_failed: if (new_master_mtu != old_master_mtu) dev_set_mtu(master, old_master_mtu); diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c index 00db74d96583..b77f1189d19d 100644 --- a/net/hsr/hsr_framereg.c +++ b/net/hsr/hsr_framereg.c @@ -415,7 +415,7 @@ void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb, node_dst = find_node_by_addr_A(&port->hsr->node_db, eth_hdr(skb)->h_dest); if (!node_dst) { - if (net_ratelimit()) + if (port->hsr->prot_version != PRP_V1 && net_ratelimit()) netdev_err(skb->dev, "%s: Unknown node\n", __func__); return; } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index b5736ef16ed2..390f4be7f7be 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -576,6 +576,9 @@ static int rtentry_to_fib_config(struct net *net, int cmd, struct rtentry *rt, cfg->fc_scope = RT_SCOPE_UNIVERSE; } + if (!cfg->fc_table) + cfg->fc_table = RT_TABLE_MAIN; + if (cmd == SIOCDELRT) return 0; diff --git a/net/ipv4/fou_nl.c b/net/ipv4/fou_nl.c index 5c14fe030eda..6c37c4f98cca 100644 --- a/net/ipv4/fou_nl.c +++ b/net/ipv4/fou_nl.c @@ -1,4 +1,4 @@ -// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +// SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) /* Do not edit directly, auto-generated from: */ /* Documentation/netlink/specs/fou.yaml */ /* YNL-GEN kernel source */ diff --git a/net/ipv4/fou_nl.h b/net/ipv4/fou_nl.h index 58b1e1ed4b3b..dbd0780a5d34 100644 --- a/net/ipv4/fou_nl.h +++ b/net/ipv4/fou_nl.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ /* Do not edit directly, auto-generated from: */ /* Documentation/netlink/specs/fou.yaml */ /* YNL-GEN kernel header */ diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c index e41fdc38ce19..6edae3886885 100644 --- a/net/ipv4/inet_hashtables.c +++ b/net/ipv4/inet_hashtables.c @@ -828,8 +828,14 @@ bool inet_bind2_bucket_match_addr_any(const struct inet_bind2_bucket *tb, const #if IS_ENABLED(CONFIG_IPV6) struct in6_addr addr_any = {}; - if (sk->sk_family != tb->family) + if (sk->sk_family != tb->family) { + if (sk->sk_family == AF_INET) + return net_eq(ib2_net(tb), net) && tb->port == port && + tb->l3mdev == l3mdev && + ipv6_addr_equal(&tb->v6_rcv_saddr, &addr_any); + return false; + } if (sk->sk_family == AF_INET6) return net_eq(ib2_net(tb), net) && tb->port == port && diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c index de90b09dfe78..2541083d49ad 100644 --- a/net/ipv4/ip_tunnel.c +++ b/net/ipv4/ip_tunnel.c @@ -614,10 +614,10 @@ void ip_md_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, } headroom += LL_RESERVED_SPACE(rt->dst.dev) + rt->dst.header_len; - if (headroom > dev->needed_headroom) - dev->needed_headroom = headroom; + if (headroom > READ_ONCE(dev->needed_headroom)) + WRITE_ONCE(dev->needed_headroom, headroom); - if (skb_cow_head(skb, dev->needed_headroom)) { + if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) { ip_rt_put(rt); goto tx_dropped; } @@ -800,10 +800,10 @@ void ip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev, max_headroom = LL_RESERVED_SPACE(rt->dst.dev) + sizeof(struct iphdr) + rt->dst.header_len + ip_encap_hlen(&tunnel->encap); - if (max_headroom > dev->needed_headroom) - dev->needed_headroom = max_headroom; + if (max_headroom > READ_ONCE(dev->needed_headroom)) + WRITE_ONCE(dev->needed_headroom, max_headroom); - if (skb_cow_head(skb, dev->needed_headroom)) { + if (skb_cow_head(skb, READ_ONCE(dev->needed_headroom))) { ip_rt_put(rt); DEV_STATS_INC(dev, tx_dropped); kfree_skb(skb); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 71d01cf3c13e..ba839e441450 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -3605,7 +3605,7 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, th->window = htons(min(req->rsk_rcv_wnd, 65535U)); tcp_options_write(th, NULL, &opts); th->doff = (tcp_header_size >> 2); - __TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); + TCP_INC_STATS(sock_net(sk), TCP_MIB_OUTSEGS); #ifdef CONFIG_TCP_MD5SIG /* Okay, we have all we need - do the md5 hash if needed */ diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c index 47b6607a1370..5e80e517f071 100644 --- a/net/ipv6/ip6_tunnel.c +++ b/net/ipv6/ip6_tunnel.c @@ -1240,8 +1240,8 @@ route_lookup: */ max_headroom = LL_RESERVED_SPACE(dst->dev) + sizeof(struct ipv6hdr) + dst->header_len + t->hlen; - if (max_headroom > dev->needed_headroom) - dev->needed_headroom = max_headroom; + if (max_headroom > READ_ONCE(dev->needed_headroom)) + WRITE_ONCE(dev->needed_headroom, max_headroom); err = ip6_tnl_encap(skb, t, &proto, fl6); if (err) diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index eb0295d90039..fc3fddeb6f36 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c @@ -83,7 +83,7 @@ struct iucv_irq_data { u16 ippathid; u8 ipflags1; u8 iptype; - u32 res2[8]; + u32 res2[9]; }; struct iucv_irq_list { diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 8eb342300868..d3d861911ed6 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2611,6 +2611,17 @@ static int ieee80211_change_bss(struct wiphy *wiphy, if (!sband) return -EINVAL; + if (params->basic_rates) { + if (!ieee80211_parse_bitrates(link->conf->chandef.width, + wiphy->bands[sband->band], + params->basic_rates, + params->basic_rates_len, + &link->conf->basic_rates)) + return -EINVAL; + changed |= BSS_CHANGED_BASIC_RATES; + ieee80211_check_rate_mask(link); + } + if (params->use_cts_prot >= 0) { link->conf->use_cts_prot = params->use_cts_prot; changed |= BSS_CHANGED_ERP_CTS_PROT; @@ -2632,16 +2643,6 @@ static int ieee80211_change_bss(struct wiphy *wiphy, changed |= BSS_CHANGED_ERP_SLOT; } - if (params->basic_rates) { - ieee80211_parse_bitrates(link->conf->chandef.width, - wiphy->bands[sband->band], - params->basic_rates, - params->basic_rates_len, - &link->conf->basic_rates); - changed |= BSS_CHANGED_BASIC_RATES; - ieee80211_check_rate_mask(link); - } - if (params->ap_isolate >= 0) { if (params->ap_isolate) sdata->flags |= IEEE80211_SDATA_DONT_BRIDGE_PACKETS; diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index 56628b52d100..5c8dea49626c 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -997,9 +997,13 @@ out: return ret; } +static struct lock_class_key mptcp_slock_keys[2]; +static struct lock_class_key mptcp_keys[2]; + static int mptcp_pm_nl_create_listen_socket(struct sock *sk, struct mptcp_pm_addr_entry *entry) { + bool is_ipv6 = sk->sk_family == AF_INET6; int addrlen = sizeof(struct sockaddr_in); struct sockaddr_storage addr; struct socket *ssock; @@ -1016,6 +1020,18 @@ static int mptcp_pm_nl_create_listen_socket(struct sock *sk, if (!newsk) return -EINVAL; + /* The subflow socket lock is acquired in a nested to the msk one + * in several places, even by the TCP stack, and this msk is a kernel + * socket: lockdep complains. Instead of propagating the _nested + * modifiers in several places, re-init the lock class for the msk + * socket to an mptcp specific one. + */ + sock_lock_init_class_and_name(newsk, + is_ipv6 ? "mlock-AF_INET6" : "mlock-AF_INET", + &mptcp_slock_keys[is_ipv6], + is_ipv6 ? "msk_lock-AF_INET6" : "msk_lock-AF_INET", + &mptcp_keys[is_ipv6]); + lock_sock(newsk); ssock = __mptcp_nmpc_socket(mptcp_sk(newsk)); release_sock(newsk); diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c index 3ad9c46202fc..60b23b2716c4 100644 --- a/net/mptcp/protocol.c +++ b/net/mptcp/protocol.c @@ -825,7 +825,6 @@ static bool __mptcp_finish_join(struct mptcp_sock *msk, struct sock *ssk) if (sk->sk_socket && !ssk->sk_socket) mptcp_sock_graft(ssk, sk->sk_socket); - mptcp_propagate_sndbuf((struct sock *)msk, ssk); mptcp_sockopt_sync_locked(msk, ssk); return true; } @@ -2343,7 +2342,6 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, goto out; } - sock_orphan(ssk); subflow->disposable = 1; /* if ssk hit tcp_done(), tcp_cleanup_ulp() cleared the related ops @@ -2351,15 +2349,25 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, * reference owned by msk; */ if (!inet_csk(ssk)->icsk_ulp_ops) { + WARN_ON_ONCE(!sock_flag(ssk, SOCK_DEAD)); kfree_rcu(subflow, rcu); + } else if (msk->in_accept_queue && msk->first == ssk) { + /* if the first subflow moved to a close state, e.g. due to + * incoming reset and we reach here before inet_child_forget() + * the TCP stack could later try to close it via + * inet_csk_listen_stop(), or deliver it to the user space via + * accept(). + * We can't delete the subflow - or risk a double free - nor let + * the msk survive - or will be leaked in the non accept scenario: + * fallback and let TCP cope with the subflow cleanup. + */ + WARN_ON_ONCE(sock_flag(ssk, SOCK_DEAD)); + mptcp_subflow_drop_ctx(ssk); } else { /* otherwise tcp will dispose of the ssk and subflow ctx */ - if (ssk->sk_state == TCP_LISTEN) { - tcp_set_state(ssk, TCP_CLOSE); - mptcp_subflow_queue_clean(sk, ssk); - inet_csk_listen_stop(ssk); + if (ssk->sk_state == TCP_LISTEN) mptcp_event_pm_listener(ssk, MPTCP_EVENT_LISTENER_CLOSED); - } + __tcp_close(ssk, 0); /* close acquired an extra ref */ @@ -2399,9 +2407,10 @@ static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu) return 0; } -static void __mptcp_close_subflow(struct mptcp_sock *msk) +static void __mptcp_close_subflow(struct sock *sk) { struct mptcp_subflow_context *subflow, *tmp; + struct mptcp_sock *msk = mptcp_sk(sk); might_sleep(); @@ -2415,7 +2424,15 @@ static void __mptcp_close_subflow(struct mptcp_sock *msk) if (!skb_queue_empty_lockless(&ssk->sk_receive_queue)) continue; - mptcp_close_ssk((struct sock *)msk, ssk, subflow); + mptcp_close_ssk(sk, ssk, subflow); + } + + /* if the MPC subflow has been closed before the msk is accepted, + * msk will never be accept-ed, close it now + */ + if (!msk->first && msk->in_accept_queue) { + sock_set_flag(sk, SOCK_DEAD); + inet_sk_state_store(sk, TCP_CLOSE); } } @@ -2624,6 +2641,9 @@ static void mptcp_worker(struct work_struct *work) __mptcp_check_send_data_fin(sk); mptcp_check_data_fin(sk); + if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) + __mptcp_close_subflow(sk); + /* There is no point in keeping around an orphaned sk timedout or * closed, but we need the msk around to reply to incoming DATA_FIN, * even if it is orphaned and in FIN_WAIT2 state @@ -2639,9 +2659,6 @@ static void mptcp_worker(struct work_struct *work) } } - if (test_and_clear_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) - __mptcp_close_subflow(msk); - if (test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags)) __mptcp_retrans(sk); @@ -3079,6 +3096,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk, msk->local_key = subflow_req->local_key; msk->token = subflow_req->token; msk->subflow = NULL; + msk->in_accept_queue = 1; WRITE_ONCE(msk->fully_established, false); if (mp_opt->suboptions & OPTION_MPTCP_CSUMREQD) WRITE_ONCE(msk->csum_enabled, true); @@ -3096,8 +3114,7 @@ struct sock *mptcp_sk_clone(const struct sock *sk, security_inet_csk_clone(nsk, req); bh_unlock_sock(nsk); - /* keep a single reference */ - __sock_put(nsk); + /* note: the newly allocated socket refcount is 2 now */ return nsk; } @@ -3153,8 +3170,6 @@ static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, goto out; } - /* acquire the 2nd reference for the owning socket */ - sock_hold(new_mptcp_sock); newsk = new_mptcp_sock; MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK); } else { @@ -3705,25 +3720,10 @@ static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, struct sock *newsk = newsock->sk; set_bit(SOCK_CUSTOM_SOCKOPT, &newsock->flags); + msk->in_accept_queue = 0; lock_sock(newsk); - /* PM/worker can now acquire the first subflow socket - * lock without racing with listener queue cleanup, - * we can notify it, if needed. - * - * Even if remote has reset the initial subflow by now - * the refcnt is still at least one. - */ - subflow = mptcp_subflow_ctx(msk->first); - list_add(&subflow->node, &msk->conn_list); - sock_hold(msk->first); - if (mptcp_is_fully_established(newsk)) - mptcp_pm_fully_established(msk, msk->first, GFP_KERNEL); - - mptcp_rcv_space_init(msk, msk->first); - mptcp_propagate_sndbuf(newsk, msk->first); - /* set ssk->sk_socket of accept()ed flows to mptcp socket. * This is needed so NOSPACE flag can be set from tcp stack. */ diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h index 61fd8eabfca2..339a6f072989 100644 --- a/net/mptcp/protocol.h +++ b/net/mptcp/protocol.h @@ -295,7 +295,8 @@ struct mptcp_sock { u8 recvmsg_inq:1, cork:1, nodelay:1, - fastopening:1; + fastopening:1, + in_accept_queue:1; int connect_flags; struct work_struct work; struct sk_buff *ooo_last_skb; @@ -628,7 +629,6 @@ void mptcp_close_ssk(struct sock *sk, struct sock *ssk, struct mptcp_subflow_context *subflow); void __mptcp_subflow_send_ack(struct sock *ssk); void mptcp_subflow_reset(struct sock *ssk); -void mptcp_subflow_queue_clean(struct sock *sk, struct sock *ssk); void mptcp_sock_graft(struct sock *sk, struct socket *parent); struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk); bool __mptcp_close(struct sock *sk, long timeout); @@ -666,6 +666,8 @@ void mptcp_subflow_set_active(struct mptcp_subflow_context *subflow); bool mptcp_subflow_active(struct mptcp_subflow_context *subflow); +void mptcp_subflow_drop_ctx(struct sock *ssk); + static inline void mptcp_subflow_tcp_fallback(struct sock *sk, struct mptcp_subflow_context *ctx) { diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index 4ae1a7304cf0..a0041360ee9d 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -397,10 +397,15 @@ void mptcp_subflow_reset(struct sock *ssk) struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); struct sock *sk = subflow->conn; + /* mptcp_mp_fail_no_response() can reach here on an already closed + * socket + */ + if (ssk->sk_state == TCP_CLOSE) + return; + /* must hold: tcp_done() could drop last reference on parent */ sock_hold(sk); - tcp_set_state(ssk, TCP_CLOSE); tcp_send_active_reset(ssk, GFP_ATOMIC); tcp_done(ssk); if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) && @@ -622,7 +627,7 @@ static struct request_sock_ops mptcp_subflow_v6_request_sock_ops __ro_after_init static struct tcp_request_sock_ops subflow_request_sock_ipv6_ops __ro_after_init; static struct inet_connection_sock_af_ops subflow_v6_specific __ro_after_init; static struct inet_connection_sock_af_ops subflow_v6m_specific __ro_after_init; -static struct proto tcpv6_prot_override; +static struct proto tcpv6_prot_override __ro_after_init; static int subflow_v6_conn_request(struct sock *sk, struct sk_buff *skb) { @@ -693,9 +698,10 @@ static bool subflow_hmac_valid(const struct request_sock *req, static void mptcp_force_close(struct sock *sk) { - /* the msk is not yet exposed to user-space */ + /* the msk is not yet exposed to user-space, and refcount is 2 */ inet_sk_state_store(sk, TCP_CLOSE); sk_common_release(sk); + sock_put(sk); } static void subflow_ulp_fallback(struct sock *sk, @@ -711,7 +717,7 @@ static void subflow_ulp_fallback(struct sock *sk, mptcp_subflow_ops_undo_override(sk); } -static void subflow_drop_ctx(struct sock *ssk) +void mptcp_subflow_drop_ctx(struct sock *ssk) { struct mptcp_subflow_context *ctx = mptcp_subflow_ctx(ssk); @@ -750,6 +756,7 @@ static struct sock *subflow_syn_recv_sock(const struct sock *sk, struct mptcp_options_received mp_opt; bool fallback, fallback_is_fatal; struct sock *new_msk = NULL; + struct mptcp_sock *owner; struct sock *child; pr_debug("listener=%p, req=%p, conn=%p", listener, req, listener->conn); @@ -816,7 +823,7 @@ create_child: if (new_msk) mptcp_copy_inaddrs(new_msk, child); - subflow_drop_ctx(child); + mptcp_subflow_drop_ctx(child); goto out; } @@ -824,6 +831,8 @@ create_child: ctx->setsockopt_seq = listener->setsockopt_seq; if (ctx->mp_capable) { + owner = mptcp_sk(new_msk); + /* this can't race with mptcp_close(), as the msk is * not yet exposted to user-space */ @@ -832,14 +841,14 @@ create_child: /* record the newly created socket as the first msk * subflow, but don't link it yet into conn_list */ - WRITE_ONCE(mptcp_sk(new_msk)->first, child); + WRITE_ONCE(owner->first, child); /* new mpc subflow takes ownership of the newly * created mptcp socket */ mptcp_sk(new_msk)->setsockopt_seq = ctx->setsockopt_seq; - mptcp_pm_new_connection(mptcp_sk(new_msk), child, 1); - mptcp_token_accept(subflow_req, mptcp_sk(new_msk)); + mptcp_pm_new_connection(owner, child, 1); + mptcp_token_accept(subflow_req, owner); ctx->conn = new_msk; new_msk = NULL; @@ -847,15 +856,21 @@ create_child: * uses the correct data */ mptcp_copy_inaddrs(ctx->conn, child); + mptcp_propagate_sndbuf(ctx->conn, child); + + mptcp_rcv_space_init(owner, child); + list_add(&ctx->node, &owner->conn_list); + sock_hold(child); /* with OoO packets we can reach here without ingress * mpc option */ - if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) + if (mp_opt.suboptions & OPTION_MPTCP_MPC_ACK) { mptcp_subflow_fully_established(ctx, &mp_opt); + mptcp_pm_fully_established(owner, child, GFP_ATOMIC); + ctx->pm_notified = 1; + } } else if (ctx->mp_join) { - struct mptcp_sock *owner; - owner = subflow_req->msk; if (!owner) { subflow_add_reset_reason(skb, MPTCP_RST_EPROHIBIT); @@ -899,7 +914,7 @@ out: return child; dispose_child: - subflow_drop_ctx(child); + mptcp_subflow_drop_ctx(child); tcp_rsk(req)->drop_req = true; inet_csk_prepare_for_destroy_sock(child); tcp_done(child); @@ -910,7 +925,7 @@ dispose_child: } static struct inet_connection_sock_af_ops subflow_specific __ro_after_init; -static struct proto tcp_prot_override; +static struct proto tcp_prot_override __ro_after_init; enum mapping_status { MAPPING_OK, @@ -1432,6 +1447,13 @@ static void subflow_error_report(struct sock *ssk) { struct sock *sk = mptcp_subflow_ctx(ssk)->conn; + /* bail early if this is a no-op, so that we avoid introducing a + * problematic lockdep dependency between TCP accept queue lock + * and msk socket spinlock + */ + if (!sk->sk_socket) + return; + mptcp_data_lock(sk); if (!sock_owned_by_user(sk)) __mptcp_error_report(sk); @@ -1803,79 +1825,6 @@ static void subflow_state_change(struct sock *sk) } } -void mptcp_subflow_queue_clean(struct sock *listener_sk, struct sock *listener_ssk) -{ - struct request_sock_queue *queue = &inet_csk(listener_ssk)->icsk_accept_queue; - struct mptcp_sock *msk, *next, *head = NULL; - struct request_sock *req; - - /* build a list of all unaccepted mptcp sockets */ - spin_lock_bh(&queue->rskq_lock); - for (req = queue->rskq_accept_head; req; req = req->dl_next) { - struct mptcp_subflow_context *subflow; - struct sock *ssk = req->sk; - struct mptcp_sock *msk; - - if (!sk_is_mptcp(ssk)) - continue; - - subflow = mptcp_subflow_ctx(ssk); - if (!subflow || !subflow->conn) - continue; - - /* skip if already in list */ - msk = mptcp_sk(subflow->conn); - if (msk->dl_next || msk == head) - continue; - - msk->dl_next = head; - head = msk; - } - spin_unlock_bh(&queue->rskq_lock); - if (!head) - return; - - /* can't acquire the msk socket lock under the subflow one, - * or will cause ABBA deadlock - */ - release_sock(listener_ssk); - - for (msk = head; msk; msk = next) { - struct sock *sk = (struct sock *)msk; - bool do_cancel_work; - - sock_hold(sk); - lock_sock_nested(sk, SINGLE_DEPTH_NESTING); - next = msk->dl_next; - msk->first = NULL; - msk->dl_next = NULL; - - do_cancel_work = __mptcp_close(sk, 0); - release_sock(sk); - if (do_cancel_work) { - /* lockdep will report a false positive ABBA deadlock - * between cancel_work_sync and the listener socket. - * The involved locks belong to different sockets WRT - * the existing AB chain. - * Using a per socket key is problematic as key - * deregistration requires process context and must be - * performed at socket disposal time, in atomic - * context. - * Just tell lockdep to consider the listener socket - * released here. - */ - mutex_release(&listener_sk->sk_lock.dep_map, _RET_IP_); - mptcp_cancel_work(sk); - mutex_acquire(&listener_sk->sk_lock.dep_map, - SINGLE_DEPTH_NESTING, 0, _RET_IP_); - } - sock_put(sk); - } - - /* we are still under the listener msk socket lock */ - lock_sock_nested(listener_ssk, SINGLE_DEPTH_NESTING); -} - static int subflow_ulp_init(struct sock *sk) { struct inet_connection_sock *icsk = inet_csk(sk); @@ -1932,6 +1881,13 @@ static void subflow_ulp_release(struct sock *ssk) * when the subflow is still unaccepted */ release = ctx->disposable || list_empty(&ctx->node); + + /* inet_child_forget() does not call sk_state_change(), + * explicitly trigger the socket close machinery + */ + if (!release && !test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, + &mptcp_sk(sk)->flags)) + mptcp_schedule_work(sk); sock_put(sk); } diff --git a/net/ncsi/ncsi-manage.c b/net/ncsi/ncsi-manage.c index 80713febfac6..d9da942ad53d 100644 --- a/net/ncsi/ncsi-manage.c +++ b/net/ncsi/ncsi-manage.c @@ -1803,8 +1803,8 @@ struct ncsi_dev *ncsi_register_dev(struct net_device *dev, pdev = to_platform_device(dev->dev.parent); if (pdev) { np = pdev->dev.of_node; - if (np && (of_get_property(np, "mellanox,multi-host", NULL) || - of_get_property(np, "mlx,multi-host", NULL))) + if (np && (of_property_read_bool(np, "mellanox,multi-host") || + of_property_read_bool(np, "mlx,multi-host"))) ndp->mlx_multi_host = true; } diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c index e55e455275c4..9544c2f16998 100644 --- a/net/netfilter/nft_masq.c +++ b/net/netfilter/nft_masq.c @@ -43,7 +43,7 @@ static int nft_masq_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { - u32 plen = sizeof_field(struct nf_nat_range, min_addr.all); + u32 plen = sizeof_field(struct nf_nat_range, min_proto.all); struct nft_masq *priv = nft_expr_priv(expr); int err; diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index 047999150390..5c29915ab028 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -226,7 +226,7 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr, priv->flags |= NF_NAT_RANGE_MAP_IPS; } - plen = sizeof_field(struct nf_nat_range, min_addr.all); + plen = sizeof_field(struct nf_nat_range, min_proto.all); if (tb[NFTA_NAT_REG_PROTO_MIN]) { err = nft_parse_register_load(tb[NFTA_NAT_REG_PROTO_MIN], &priv->sreg_proto_min, plen); diff --git a/net/netfilter/nft_redir.c b/net/netfilter/nft_redir.c index 5f7739987559..67cec56bc84a 100644 --- a/net/netfilter/nft_redir.c +++ b/net/netfilter/nft_redir.c @@ -48,7 +48,7 @@ static int nft_redir_init(const struct nft_ctx *ctx, unsigned int plen; int err; - plen = sizeof_field(struct nf_nat_range, min_addr.all); + plen = sizeof_field(struct nf_nat_range, min_proto.all); if (tb[NFTA_REDIR_REG_PROTO_MIN]) { err = nft_parse_register_load(tb[NFTA_REDIR_REG_PROTO_MIN], &priv->sreg_proto_min, plen); @@ -236,7 +236,7 @@ static struct nft_expr_type nft_redir_inet_type __read_mostly = { .name = "redir", .ops = &nft_redir_inet_ops, .policy = nft_redir_policy, - .maxattr = NFTA_MASQ_MAX, + .maxattr = NFTA_REDIR_MAX, .owner = THIS_MODULE, }; diff --git a/net/sched/act_api.c b/net/sched/act_api.c index 34c508675041..296fc1afedd8 100644 --- a/net/sched/act_api.c +++ b/net/sched/act_api.c @@ -1589,6 +1589,10 @@ static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[], t->tca__pad1 = 0; t->tca__pad2 = 0; + if (extack && extack->_msg && + nla_put_string(skb, TCA_ROOT_EXT_WARN_MSG, extack->_msg)) + goto out_nlmsg_trim; + nest = nla_nest_start_noflag(skb, TCA_ACT_TAB); if (!nest) goto out_nlmsg_trim; @@ -1596,10 +1600,6 @@ static int tca_get_fill(struct sk_buff *skb, struct tc_action *actions[], if (tcf_action_dump(skb, actions, bind, ref, false) < 0) goto out_nlmsg_trim; - if (extack && extack->_msg && - nla_put_string(skb, TCA_EXT_WARN_MSG, extack->_msg)) - goto out_nlmsg_trim; - nla_nest_end(skb, nest); nlh->nlmsg_len = skb_tail_pointer(skb) - b; diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index ff6dd86bdc9f..c6b4a62276f6 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -3501,6 +3501,7 @@ out_pnet: out_nl: smc_nl_exit(); out_ism: + smc_clc_exit(); smc_ism_exit(); out_pernet_subsys_stat: unregister_pernet_subsys(&smc_net_stat_ops); diff --git a/net/smc/smc_cdc.c b/net/smc/smc_cdc.c index 53f63bfbaf5f..89105e95b452 100644 --- a/net/smc/smc_cdc.c +++ b/net/smc/smc_cdc.c @@ -114,6 +114,9 @@ int smc_cdc_msg_send(struct smc_connection *conn, union smc_host_cursor cfed; int rc; + if (unlikely(!READ_ONCE(conn->sndbuf_desc))) + return -ENOBUFS; + smc_cdc_add_pending_send(conn, pend); conn->tx_cdc_seq++; diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c index d52060b2680c..454356771cda 100644 --- a/net/smc/smc_core.c +++ b/net/smc/smc_core.c @@ -1464,7 +1464,7 @@ static void __smc_lgr_terminate(struct smc_link_group *lgr, bool soft) if (lgr->terminating) return; /* lgr already terminating */ /* cancel free_work sync, will terminate when lgr->freeing is set */ - cancel_delayed_work_sync(&lgr->free_work); + cancel_delayed_work(&lgr->free_work); lgr->terminating = 1; /* kill remaining link group connections */ diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c index a1581c77cf84..6564192e7f20 100644 --- a/net/vmw_vsock/virtio_transport_common.c +++ b/net/vmw_vsock/virtio_transport_common.c @@ -241,21 +241,18 @@ static int virtio_transport_send_pkt_info(struct vsock_sock *vsk, } static bool virtio_transport_inc_rx_pkt(struct virtio_vsock_sock *vvs, - struct sk_buff *skb) + u32 len) { - if (vvs->rx_bytes + skb->len > vvs->buf_alloc) + if (vvs->rx_bytes + len > vvs->buf_alloc) return false; - vvs->rx_bytes += skb->len; + vvs->rx_bytes += len; return true; } static void virtio_transport_dec_rx_pkt(struct virtio_vsock_sock *vvs, - struct sk_buff *skb) + u32 len) { - int len; - - len = skb_headroom(skb) - sizeof(struct virtio_vsock_hdr) - skb->len; vvs->rx_bytes -= len; vvs->fwd_cnt += len; } @@ -367,7 +364,7 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, spin_lock_bh(&vvs->rx_lock); while (total < len && !skb_queue_empty(&vvs->rx_queue)) { - skb = __skb_dequeue(&vvs->rx_queue); + skb = skb_peek(&vvs->rx_queue); bytes = len - total; if (bytes > skb->len) @@ -388,10 +385,11 @@ virtio_transport_stream_do_dequeue(struct vsock_sock *vsk, skb_pull(skb, bytes); if (skb->len == 0) { - virtio_transport_dec_rx_pkt(vvs, skb); + u32 pkt_len = le32_to_cpu(virtio_vsock_hdr(skb)->len); + + virtio_transport_dec_rx_pkt(vvs, pkt_len); + __skb_unlink(skb, &vvs->rx_queue); consume_skb(skb); - } else { - __skb_queue_head(&vvs->rx_queue, skb); } } @@ -437,17 +435,17 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk, while (!msg_ready) { struct virtio_vsock_hdr *hdr; + size_t pkt_len; skb = __skb_dequeue(&vvs->rx_queue); if (!skb) break; hdr = virtio_vsock_hdr(skb); + pkt_len = (size_t)le32_to_cpu(hdr->len); if (dequeued_len >= 0) { - size_t pkt_len; size_t bytes_to_copy; - pkt_len = (size_t)le32_to_cpu(hdr->len); bytes_to_copy = min(user_buf_len, pkt_len); if (bytes_to_copy) { @@ -466,7 +464,6 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk, dequeued_len = err; } else { user_buf_len -= bytes_to_copy; - skb_pull(skb, bytes_to_copy); } spin_lock_bh(&vvs->rx_lock); @@ -484,7 +481,7 @@ static int virtio_transport_seqpacket_do_dequeue(struct vsock_sock *vsk, msg->msg_flags |= MSG_EOR; } - virtio_transport_dec_rx_pkt(vvs, skb); + virtio_transport_dec_rx_pkt(vvs, pkt_len); kfree_skb(skb); } @@ -1040,7 +1037,7 @@ virtio_transport_recv_enqueue(struct vsock_sock *vsk, spin_lock_bh(&vvs->rx_lock); - can_enqueue = virtio_transport_inc_rx_pkt(vvs, skb); + can_enqueue = virtio_transport_inc_rx_pkt(vvs, len); if (!can_enqueue) { free_pkt = true; goto out; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 112b4bb009c8..4f63059efd81 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -462,6 +462,11 @@ nl80211_sta_wme_policy[NL80211_STA_WME_MAX + 1] = { [NL80211_STA_WME_MAX_SP] = { .type = NLA_U8 }, }; +static struct netlink_range_validation nl80211_punct_bitmap_range = { + .min = 0, + .max = 0xffff, +}; + static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [0] = { .strict_start_type = NL80211_ATTR_HE_OBSS_PD }, [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, @@ -805,7 +810,8 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = { [NL80211_ATTR_MLD_ADDR] = NLA_POLICY_EXACT_LEN(ETH_ALEN), [NL80211_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG }, [NL80211_ATTR_MAX_NUM_AKM_SUITES] = { .type = NLA_REJECT }, - [NL80211_ATTR_PUNCT_BITMAP] = NLA_POLICY_RANGE(NLA_U8, 0, 0xffff), + [NL80211_ATTR_PUNCT_BITMAP] = + NLA_POLICY_FULL_RANGE(NLA_U32, &nl80211_punct_bitmap_range), }; /* policy for the key attributes */ @@ -8901,7 +8907,7 @@ static bool cfg80211_off_channel_oper_allowed(struct wireless_dev *wdev, struct cfg80211_chan_def *chandef; chandef = wdev_chandef(wdev, link_id); - if (!chandef) + if (!chandef || !chandef->chan) continue; /* @@ -10793,8 +10799,7 @@ static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device *rdev, const u8 *ssid, int ssid_len, - struct nlattr **attrs, - const u8 **bssid_out) + struct nlattr **attrs) { struct ieee80211_channel *chan; struct cfg80211_bss *bss; @@ -10821,7 +10826,6 @@ static struct cfg80211_bss *nl80211_assoc_bss(struct cfg80211_registered_device if (!bss) return ERR_PTR(-ENOENT); - *bssid_out = bssid; return bss; } @@ -10831,7 +10835,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) struct net_device *dev = info->user_ptr[1]; struct cfg80211_assoc_request req = {}; struct nlattr **attrs = NULL; - const u8 *bssid, *ssid; + const u8 *ap_addr, *ssid; unsigned int link_id; int err, ssid_len; @@ -10968,6 +10972,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) return -EINVAL; req.ap_mld_addr = nla_data(info->attrs[NL80211_ATTR_MLD_ADDR]); + ap_addr = req.ap_mld_addr; attrs = kzalloc(attrsize, GFP_KERNEL); if (!attrs) @@ -10993,8 +10998,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) goto free; } req.links[link_id].bss = - nl80211_assoc_bss(rdev, ssid, ssid_len, attrs, - &bssid); + nl80211_assoc_bss(rdev, ssid, ssid_len, attrs); if (IS_ERR(req.links[link_id].bss)) { err = PTR_ERR(req.links[link_id].bss); req.links[link_id].bss = NULL; @@ -11045,10 +11049,10 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) if (req.link_id >= 0) return -EINVAL; - req.bss = nl80211_assoc_bss(rdev, ssid, ssid_len, info->attrs, - &bssid); + req.bss = nl80211_assoc_bss(rdev, ssid, ssid_len, info->attrs); if (IS_ERR(req.bss)) return PTR_ERR(req.bss); + ap_addr = req.bss->bssid; } err = nl80211_crypto_settings(rdev, info, &req.crypto, 1); @@ -11061,7 +11065,7 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) dev->ieee80211_ptr->conn_owner_nlportid = info->snd_portid; memcpy(dev->ieee80211_ptr->disconnect_bssid, - bssid, ETH_ALEN); + ap_addr, ETH_ALEN); } wdev_unlock(dev->ieee80211_ptr); diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c index 2ab3e09e2227..50baf50dc513 100644 --- a/net/xfrm/xfrm_state.c +++ b/net/xfrm/xfrm_state.c @@ -2815,11 +2815,6 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload, goto error; } - if (!(inner_mode->flags & XFRM_MODE_FLAG_TUNNEL)) { - NL_SET_ERR_MSG(extack, "Only tunnel modes can accommodate an AF_UNSPEC selector"); - goto error; - } - x->inner_mode = *inner_mode; if (x->props.family == AF_INET) diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c index cf5172d4ce68..103af2b3e986 100644 --- a/net/xfrm/xfrm_user.c +++ b/net/xfrm/xfrm_user.c @@ -1012,7 +1012,9 @@ static int copy_to_user_aead(struct xfrm_algo_aead *aead, struct sk_buff *skb) return -EMSGSIZE; ap = nla_data(nla); - memcpy(ap, aead, sizeof(*aead)); + strscpy_pad(ap->alg_name, aead->alg_name, sizeof(ap->alg_name)); + ap->alg_key_len = aead->alg_key_len; + ap->alg_icv_len = aead->alg_icv_len; if (redact_secret && aead->alg_key_len) memset(ap->alg_key, 0, (aead->alg_key_len + 7) / 8); @@ -1032,7 +1034,8 @@ static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb) return -EMSGSIZE; ap = nla_data(nla); - memcpy(ap, ealg, sizeof(*ealg)); + strscpy_pad(ap->alg_name, ealg->alg_name, sizeof(ap->alg_name)); + ap->alg_key_len = ealg->alg_key_len; if (redact_secret && ealg->alg_key_len) memset(ap->alg_key, 0, (ealg->alg_key_len + 7) / 8); @@ -1043,6 +1046,40 @@ static int copy_to_user_ealg(struct xfrm_algo *ealg, struct sk_buff *skb) return 0; } +static int copy_to_user_calg(struct xfrm_algo *calg, struct sk_buff *skb) +{ + struct nlattr *nla = nla_reserve(skb, XFRMA_ALG_COMP, sizeof(*calg)); + struct xfrm_algo *ap; + + if (!nla) + return -EMSGSIZE; + + ap = nla_data(nla); + strscpy_pad(ap->alg_name, calg->alg_name, sizeof(ap->alg_name)); + ap->alg_key_len = 0; + + return 0; +} + +static int copy_to_user_encap(struct xfrm_encap_tmpl *ep, struct sk_buff *skb) +{ + struct nlattr *nla = nla_reserve(skb, XFRMA_ENCAP, sizeof(*ep)); + struct xfrm_encap_tmpl *uep; + + if (!nla) + return -EMSGSIZE; + + uep = nla_data(nla); + memset(uep, 0, sizeof(*uep)); + + uep->encap_type = ep->encap_type; + uep->encap_sport = ep->encap_sport; + uep->encap_dport = ep->encap_dport; + uep->encap_oa = ep->encap_oa; + + return 0; +} + static int xfrm_smark_put(struct sk_buff *skb, struct xfrm_mark *m) { int ret = 0; @@ -1098,12 +1135,12 @@ static int copy_to_user_state_extra(struct xfrm_state *x, goto out; } if (x->calg) { - ret = nla_put(skb, XFRMA_ALG_COMP, sizeof(*(x->calg)), x->calg); + ret = copy_to_user_calg(x->calg, skb); if (ret) goto out; } if (x->encap) { - ret = nla_put(skb, XFRMA_ENCAP, sizeof(*x->encap), x->encap); + ret = copy_to_user_encap(x->encap, skb); if (ret) goto out; } diff --git a/scripts/.gitignore b/scripts/.gitignore index feb43045d1b1..6e9ce6720a05 100644 --- a/scripts/.gitignore +++ b/scripts/.gitignore @@ -3,7 +3,6 @@ /generate_rust_target /insert-sys-cert /kallsyms -/list-gitignored /module.lds /recordmcount /sign-file diff --git a/scripts/Makefile b/scripts/Makefile index e8917975905c..32b6ba722728 100644 --- a/scripts/Makefile +++ b/scripts/Makefile @@ -38,7 +38,7 @@ HOSTCFLAGS_sorttable.o += -DMCOUNT_SORT_ENABLED endif # The following programs are only built on demand -hostprogs += list-gitignored unifdef +hostprogs += unifdef # The module linker script is preprocessed on demand targets += module.lds diff --git a/scripts/Makefile.package b/scripts/Makefile.package index b941e6341b36..61f72eb8d9be 100644 --- a/scripts/Makefile.package +++ b/scripts/Makefile.package @@ -2,6 +2,7 @@ # Makefile for the different targets used to generate full packages of a kernel include $(srctree)/scripts/Kbuild.include +include $(srctree)/scripts/Makefile.lib KERNELPATH := kernel-$(subst -,_,$(KERNELRELEASE)) KBUILD_PKG_ROOTCMD ?="fakeroot -u" @@ -26,54 +27,46 @@ fi ; \ tar -I $(KGZIP) -c $(RCS_TAR_IGNORE) -f $(2).tar.gz \ --transform 's:^:$(2)/:S' $(TAR_CONTENT) $(3) -# .tmp_filelist .tmp_filelist_exclude +# tarball compression # --------------------------------------------------------------------------- -scripts/list-gitignored: FORCE - $(Q)$(MAKE) -f $(srctree)/Makefile scripts_package +%.tar.gz: %.tar + $(call cmd,gzip) -# 1f5d3a6b6532e25a5cdf1f311956b2b03d343a48 removed '*.rej' from .gitignore, -# but it is definitely a generated file. -filechk_filelist = \ - $< --exclude='*.rej' --output=$@_exclude --prefix=./ --rootdir=$(srctree) --stat=- +%.tar.bz2: %.tar + $(call cmd,bzip2) -.tmp_filelist: scripts/list-gitignored FORCE - $(call filechk,filelist) +%.tar.xz: %.tar + $(call cmd,xzmisc) -# tarball -# --------------------------------------------------------------------------- - -quiet_cmd_tar = TAR $@ - cmd_tar = tar -c -f $@ $(tar-compress-opt) $(tar-exclude-opt) \ - --owner=0 --group=0 --sort=name \ - --transform 's:^\.:$*:S' -C $(tar-rootdir) . - -tar-rootdir := $(srctree) +%.tar.zst: %.tar + $(call cmd,zstd) -%.tar: - $(call cmd,tar) - -%.tar.gz: private tar-compress-opt := -I $(KGZIP) -%.tar.gz: - $(call cmd,tar) +# Git +# --------------------------------------------------------------------------- -%.tar.bz2: private tar-compress-opt := -I $(KBZIP2) -%.tar.bz2: - $(call cmd,tar) +filechk_HEAD = git -C $(srctree) rev-parse --verify HEAD 2>/dev/null -%.tar.xz: private tar-compress-opt := -I $(XZ) -%.tar.xz: - $(call cmd,tar) +.tmp_HEAD: check-git FORCE + $(call filechk,HEAD) -%.tar.zst: private tar-compress-opt := -I $(ZSTD) -%.tar.zst: - $(call cmd,tar) +PHONY += check-git +check-git: + @if ! $(srctree)/scripts/check-git; then \ + echo >&2 "error: creating source package requires git repository"; \ + false; \ + fi # Linux source tarball # --------------------------------------------------------------------------- -linux.tar.gz: tar-exclude-opt = --exclude=./$@ --exclude-from=$<_exclude -linux.tar.gz: .tmp_filelist +quiet_cmd_archive_linux = ARCHIVE $@ + cmd_archive_linux = \ + git -C $(srctree) archive --output=$$(realpath $@) --prefix=$(basename $@)/ $$(cat $<) + +targets += linux.tar +linux.tar: .tmp_HEAD FORCE + $(call if_changed,archive_linux) # rpm-pkg # --------------------------------------------------------------------------- @@ -89,7 +82,7 @@ PHONY += srcrpm-pkg srcrpm-pkg: linux.tar.gz $(CONFIG_SHELL) $(MKSPEC) >$(objtree)/kernel.spec +rpmbuild $(RPMOPTS) --target $(UTS_MACHINE)-linux -bs kernel.spec \ - --define='_smp_mflags %{nil}' --define='_sourcedir .' --define='_srcrpmdir .' + --define='_smp_mflags %{nil}' --define='_sourcedir rpmbuild/SOURCES' --define='_srcrpmdir .' # binrpm-pkg # --------------------------------------------------------------------------- @@ -148,74 +141,62 @@ snap-pkg: # dir-pkg tar*-pkg - tarball targets # --------------------------------------------------------------------------- -tar-pkg-tarball = linux-$(KERNELRELEASE)-$(ARCH).$(1) -tar-pkg-phony = $(subst .,,$(1))-pkg - tar-install: FORCE $(Q)$(MAKE) -f $(srctree)/Makefile +$(Q)$(srctree)/scripts/package/buildtar $@ +quiet_cmd_tar = TAR $@ + cmd_tar = cd $<; tar cf ../$@ --owner=root --group=root --sort=name * + +linux-$(KERNELRELEASE)-$(ARCH).tar: tar-install + $(call cmd,tar) + PHONY += dir-pkg dir-pkg: tar-install @echo "Kernel tree successfully created in $<" -define tar-pkg-rule -PHONY += $(tar-pkg-phony) -$(tar-pkg-phony): $(tar-pkg-tarball) +PHONY += tar-pkg +tar-pkg: linux-$(KERNELRELEASE)-$(ARCH).tar @: -$(tar-pkg-tarball): private tar-rootdir := tar-install -$(tar-pkg-tarball): tar-install -endef - -$(foreach x, tar tar.gz tar.bz2 tar.xz tar.zst, $(eval $(call tar-pkg-rule,$(x)))) +tar%-pkg: linux-$(KERNELRELEASE)-$(ARCH).tar.% FORCE + @: # perf-tar*-src-pkg - generate a source tarball with perf source # --------------------------------------------------------------------------- -perf-tar-src-pkg-tarball = perf-$(KERNELVERSION).$(1) -perf-tar-src-pkg-phony = perf-$(subst .,,$(1))-src-pkg - -quiet_cmd_stage_perf_src = STAGE $@ - cmd_stage_perf_src = \ - rm -rf $@; \ - mkdir -p $@; \ - tar -c -f - --exclude-from=$<_exclude -C $(srctree) --files-from=$(srctree)/tools/perf/MANIFEST | \ - tar -x -f - -C $@ - -.tmp_perf: .tmp_filelist - $(call cmd,stage_perf_src) - -filechk_perf_head = \ - if test -z "$(git -C $(srctree) rev-parse --show-cdup 2>/dev/null)" && \ - head=$$(git -C $(srctree) rev-parse --verify HEAD 2>/dev/null); then \ - echo $$head; \ - else \ - echo "not a git tree"; \ - fi +.tmp_perf: + $(Q)mkdir .tmp_perf -.tmp_perf/HEAD: .tmp_perf FORCE - $(call filechk,perf_head) +.tmp_perf/HEAD: .tmp_HEAD | .tmp_perf + $(call cmd,copy) quiet_cmd_perf_version_file = GEN $@ cmd_perf_version_file = cd $(srctree)/tools/perf; util/PERF-VERSION-GEN $(dir $(abspath $@)) -# PERF-VERSION-FILE and HEAD are independent, but this avoids updating the +# PERF-VERSION-FILE and .tmp_HEAD are independent, but this avoids updating the # timestamp of PERF-VERSION-FILE. # The best is to fix tools/perf/util/PERF-VERSION-GEN. -.tmp_perf/PERF-VERSION-FILE: .tmp_perf/HEAD $(srctree)/tools/perf/util/PERF-VERSION-GEN +.tmp_perf/PERF-VERSION-FILE: .tmp_HEAD $(srctree)/tools/perf/util/PERF-VERSION-GEN | .tmp_perf $(call cmd,perf_version_file) -define perf-tar-src-pkg-rule -PHONY += $(perf-tar-src-pkg-phony) -$(perf-tar-src-pkg-phony): $(perf-tar-src-pkg-tarball) - @: +quiet_cmd_archive_perf = ARCHIVE $@ + cmd_archive_perf = \ + git -C $(srctree) archive --output=$$(realpath $@) --prefix=$(basename $@)/ \ + --add-file=$$(realpath $(word 2, $^)) \ + --add-file=$$(realpath $(word 3, $^)) \ + $$(cat $(word 2, $^))^{tree} $$(cat $<) -$(perf-tar-src-pkg-tarball): private tar-rootdir := .tmp_perf -$(perf-tar-src-pkg-tarball): .tmp_filelist .tmp_perf/HEAD .tmp_perf/PERF-VERSION-FILE -endef +targets += perf-$(KERNELVERSION).tar +perf-$(KERNELVERSION).tar: tools/perf/MANIFEST .tmp_perf/HEAD .tmp_perf/PERF-VERSION-FILE FORCE + $(call if_changed,archive_perf) + +PHONY += perf-tar-src-pkg +perf-tar-src-pkg: perf-$(KERNELVERSION).tar + @: -$(foreach x, tar tar.gz tar.bz2 tar.xz tar.zst, $(eval $(call perf-tar-src-pkg-rule,$(x)))) +perf-tar%-src-pkg: perf-$(KERNELVERSION).tar.% FORCE + @: # Help text displayed when executing 'make help' # --------------------------------------------------------------------------- @@ -243,4 +224,13 @@ help: PHONY += FORCE FORCE: +# Read all saved command lines and dependencies for the $(targets) we +# may be building above, using $(if_changed{,_dep}). As an +# optimization, we don't need to read them if the target does not +# exist, we will rebuild anyway in that case. + +existing-targets := $(wildcard $(sort $(targets))) + +-include $(foreach f,$(existing-targets),$(dir $(f)).$(notdir $(f)).cmd) + .PHONY: $(PHONY) diff --git a/scripts/check-git b/scripts/check-git new file mode 100755 index 000000000000..2ca6c5df10dd --- /dev/null +++ b/scripts/check-git @@ -0,0 +1,14 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0-only +# +# succeed if we are in a git repository + +srctree="$(dirname $0)/.." + +if ! git -C "${srctree}" rev-parse --verify HEAD >/dev/null 2>/dev/null; then + exit 1 +fi + +if ! test -z $(git -C "${srctree}" rev-parse --show-cdup 2>/dev/null); then + exit 1 +fi diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 8a68179a98a3..a239a87e7bec 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -119,6 +119,7 @@ static bool is_ignored_symbol(const char *name, char type) "kallsyms_markers", "kallsyms_token_table", "kallsyms_token_index", + "kallsyms_seqs_of_names", /* Exclude linker generated symbols which vary between passes */ "_SDA_BASE_", /* ppc */ "_SDA2_BASE_", /* ppc */ diff --git a/scripts/kconfig/confdata.c b/scripts/kconfig/confdata.c index b7c9f1dd5e42..992575f1e976 100644 --- a/scripts/kconfig/confdata.c +++ b/scripts/kconfig/confdata.c @@ -1226,10 +1226,12 @@ static void (*conf_changed_callback)(void); void conf_set_changed(bool val) { - if (conf_changed_callback && conf_changed != val) - conf_changed_callback(); + bool changed = conf_changed != val; conf_changed = val; + + if (conf_changed_callback && changed) + conf_changed_callback(); } bool conf_get_changed(void) diff --git a/scripts/list-gitignored.c b/scripts/list-gitignored.c deleted file mode 100644 index f9941f8dcd2b..000000000000 --- a/scripts/list-gitignored.c +++ /dev/null @@ -1,1057 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -// -// Traverse the source tree, parsing all .gitignore files, and print file paths -// that are ignored by git. -// The output is suitable to the --exclude-from option of tar. -// This is useful until the --exclude-vcs-ignores option gets working correctly. -// -// Copyright (C) 2023 Masahiro Yamada <masahiroy@kernel.org> -// (a lot of code imported from GIT) - -#include <assert.h> -#include <dirent.h> -#include <errno.h> -#include <fcntl.h> -#include <getopt.h> -#include <stdarg.h> -#include <stdbool.h> -#include <stdio.h> -#include <stdlib.h> -#include <string.h> -#include <sys/stat.h> -#include <sys/types.h> -#include <unistd.h> - -// Imported from commit 23c56f7bd5f1667f8b793d796bf30e39545920f6 in GIT -// -//---------------------------(IMPORT FROM GIT BEGIN)--------------------------- - -// Copied from environment.c - -static bool ignore_case; - -// Copied from git-compat-util.h - -/* Sane ctype - no locale, and works with signed chars */ -#undef isascii -#undef isspace -#undef isdigit -#undef isalpha -#undef isalnum -#undef isprint -#undef islower -#undef isupper -#undef tolower -#undef toupper -#undef iscntrl -#undef ispunct -#undef isxdigit - -static const unsigned char sane_ctype[256]; -#define GIT_SPACE 0x01 -#define GIT_DIGIT 0x02 -#define GIT_ALPHA 0x04 -#define GIT_GLOB_SPECIAL 0x08 -#define GIT_REGEX_SPECIAL 0x10 -#define GIT_PATHSPEC_MAGIC 0x20 -#define GIT_CNTRL 0x40 -#define GIT_PUNCT 0x80 -#define sane_istest(x,mask) ((sane_ctype[(unsigned char)(x)] & (mask)) != 0) -#define isascii(x) (((x) & ~0x7f) == 0) -#define isspace(x) sane_istest(x,GIT_SPACE) -#define isdigit(x) sane_istest(x,GIT_DIGIT) -#define isalpha(x) sane_istest(x,GIT_ALPHA) -#define isalnum(x) sane_istest(x,GIT_ALPHA | GIT_DIGIT) -#define isprint(x) ((x) >= 0x20 && (x) <= 0x7e) -#define islower(x) sane_iscase(x, 1) -#define isupper(x) sane_iscase(x, 0) -#define is_glob_special(x) sane_istest(x,GIT_GLOB_SPECIAL) -#define iscntrl(x) (sane_istest(x,GIT_CNTRL)) -#define ispunct(x) sane_istest(x, GIT_PUNCT | GIT_REGEX_SPECIAL | \ - GIT_GLOB_SPECIAL | GIT_PATHSPEC_MAGIC) -#define isxdigit(x) (hexval_table[(unsigned char)(x)] != -1) -#define tolower(x) sane_case((unsigned char)(x), 0x20) -#define toupper(x) sane_case((unsigned char)(x), 0) - -static inline int sane_case(int x, int high) -{ - if (sane_istest(x, GIT_ALPHA)) - x = (x & ~0x20) | high; - return x; -} - -static inline int sane_iscase(int x, int is_lower) -{ - if (!sane_istest(x, GIT_ALPHA)) - return 0; - - if (is_lower) - return (x & 0x20) != 0; - else - return (x & 0x20) == 0; -} - -// Copied from ctype.c - -enum { - S = GIT_SPACE, - A = GIT_ALPHA, - D = GIT_DIGIT, - G = GIT_GLOB_SPECIAL, /* *, ?, [, \\ */ - R = GIT_REGEX_SPECIAL, /* $, (, ), +, ., ^, {, | */ - P = GIT_PATHSPEC_MAGIC, /* other non-alnum, except for ] and } */ - X = GIT_CNTRL, - U = GIT_PUNCT, - Z = GIT_CNTRL | GIT_SPACE -}; - -static const unsigned char sane_ctype[256] = { - X, X, X, X, X, X, X, X, X, Z, Z, X, X, Z, X, X, /* 0.. 15 */ - X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, /* 16.. 31 */ - S, P, P, P, R, P, P, P, R, R, G, R, P, P, R, P, /* 32.. 47 */ - D, D, D, D, D, D, D, D, D, D, P, P, P, P, P, G, /* 48.. 63 */ - P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 64.. 79 */ - A, A, A, A, A, A, A, A, A, A, A, G, G, U, R, P, /* 80.. 95 */ - P, A, A, A, A, A, A, A, A, A, A, A, A, A, A, A, /* 96..111 */ - A, A, A, A, A, A, A, A, A, A, A, R, R, U, P, X, /* 112..127 */ - /* Nothing in the 128.. range */ -}; - -// Copied from hex.c - -static const signed char hexval_table[256] = { - -1, -1, -1, -1, -1, -1, -1, -1, /* 00-07 */ - -1, -1, -1, -1, -1, -1, -1, -1, /* 08-0f */ - -1, -1, -1, -1, -1, -1, -1, -1, /* 10-17 */ - -1, -1, -1, -1, -1, -1, -1, -1, /* 18-1f */ - -1, -1, -1, -1, -1, -1, -1, -1, /* 20-27 */ - -1, -1, -1, -1, -1, -1, -1, -1, /* 28-2f */ - 0, 1, 2, 3, 4, 5, 6, 7, /* 30-37 */ - 8, 9, -1, -1, -1, -1, -1, -1, /* 38-3f */ - -1, 10, 11, 12, 13, 14, 15, -1, /* 40-47 */ - -1, -1, -1, -1, -1, -1, -1, -1, /* 48-4f */ - -1, -1, -1, -1, -1, -1, -1, -1, /* 50-57 */ - -1, -1, -1, -1, -1, -1, -1, -1, /* 58-5f */ - -1, 10, 11, 12, 13, 14, 15, -1, /* 60-67 */ - -1, -1, -1, -1, -1, -1, -1, -1, /* 68-67 */ - -1, -1, -1, -1, -1, -1, -1, -1, /* 70-77 */ - -1, -1, -1, -1, -1, -1, -1, -1, /* 78-7f */ - -1, -1, -1, -1, -1, -1, -1, -1, /* 80-87 */ - -1, -1, -1, -1, -1, -1, -1, -1, /* 88-8f */ - -1, -1, -1, -1, -1, -1, -1, -1, /* 90-97 */ - -1, -1, -1, -1, -1, -1, -1, -1, /* 98-9f */ - -1, -1, -1, -1, -1, -1, -1, -1, /* a0-a7 */ - -1, -1, -1, -1, -1, -1, -1, -1, /* a8-af */ - -1, -1, -1, -1, -1, -1, -1, -1, /* b0-b7 */ - -1, -1, -1, -1, -1, -1, -1, -1, /* b8-bf */ - -1, -1, -1, -1, -1, -1, -1, -1, /* c0-c7 */ - -1, -1, -1, -1, -1, -1, -1, -1, /* c8-cf */ - -1, -1, -1, -1, -1, -1, -1, -1, /* d0-d7 */ - -1, -1, -1, -1, -1, -1, -1, -1, /* d8-df */ - -1, -1, -1, -1, -1, -1, -1, -1, /* e0-e7 */ - -1, -1, -1, -1, -1, -1, -1, -1, /* e8-ef */ - -1, -1, -1, -1, -1, -1, -1, -1, /* f0-f7 */ - -1, -1, -1, -1, -1, -1, -1, -1, /* f8-ff */ -}; - -// Copied from wildmatch.h - -#define WM_CASEFOLD 1 -#define WM_PATHNAME 2 - -#define WM_NOMATCH 1 -#define WM_MATCH 0 -#define WM_ABORT_ALL -1 -#define WM_ABORT_TO_STARSTAR -2 - -// Copied from wildmatch.c - -typedef unsigned char uchar; - -// local modification: remove NEGATE_CLASS(2) - -#define CC_EQ(class, len, litmatch) ((len) == sizeof (litmatch)-1 \ - && *(class) == *(litmatch) \ - && strncmp((char*)class, litmatch, len) == 0) - -// local modification: simpilify macros -#define ISBLANK(c) ((c) == ' ' || (c) == '\t') -#define ISGRAPH(c) (isprint(c) && !isspace(c)) -#define ISPRINT(c) isprint(c) -#define ISDIGIT(c) isdigit(c) -#define ISALNUM(c) isalnum(c) -#define ISALPHA(c) isalpha(c) -#define ISCNTRL(c) iscntrl(c) -#define ISLOWER(c) islower(c) -#define ISPUNCT(c) ispunct(c) -#define ISSPACE(c) isspace(c) -#define ISUPPER(c) isupper(c) -#define ISXDIGIT(c) isxdigit(c) - -/* Match pattern "p" against "text" */ -static int dowild(const uchar *p, const uchar *text, unsigned int flags) -{ - uchar p_ch; - const uchar *pattern = p; - - for ( ; (p_ch = *p) != '\0'; text++, p++) { - int matched, match_slash, negated; - uchar t_ch, prev_ch; - if ((t_ch = *text) == '\0' && p_ch != '*') - return WM_ABORT_ALL; - if ((flags & WM_CASEFOLD) && ISUPPER(t_ch)) - t_ch = tolower(t_ch); - if ((flags & WM_CASEFOLD) && ISUPPER(p_ch)) - p_ch = tolower(p_ch); - switch (p_ch) { - case '\\': - /* Literal match with following character. Note that the test - * in "default" handles the p[1] == '\0' failure case. */ - p_ch = *++p; - /* FALLTHROUGH */ - default: - if (t_ch != p_ch) - return WM_NOMATCH; - continue; - case '?': - /* Match anything but '/'. */ - if ((flags & WM_PATHNAME) && t_ch == '/') - return WM_NOMATCH; - continue; - case '*': - if (*++p == '*') { - const uchar *prev_p = p - 2; - while (*++p == '*') {} - if (!(flags & WM_PATHNAME)) - /* without WM_PATHNAME, '*' == '**' */ - match_slash = 1; - else if ((prev_p < pattern || *prev_p == '/') && - (*p == '\0' || *p == '/' || - (p[0] == '\\' && p[1] == '/'))) { - /* - * Assuming we already match 'foo/' and are at - * <star star slash>, just assume it matches - * nothing and go ahead match the rest of the - * pattern with the remaining string. This - * helps make foo/<*><*>/bar (<> because - * otherwise it breaks C comment syntax) match - * both foo/bar and foo/a/bar. - */ - if (p[0] == '/' && - dowild(p + 1, text, flags) == WM_MATCH) - return WM_MATCH; - match_slash = 1; - } else /* WM_PATHNAME is set */ - match_slash = 0; - } else - /* without WM_PATHNAME, '*' == '**' */ - match_slash = flags & WM_PATHNAME ? 0 : 1; - if (*p == '\0') { - /* Trailing "**" matches everything. Trailing "*" matches - * only if there are no more slash characters. */ - if (!match_slash) { - if (strchr((char *)text, '/')) - return WM_NOMATCH; - } - return WM_MATCH; - } else if (!match_slash && *p == '/') { - /* - * _one_ asterisk followed by a slash - * with WM_PATHNAME matches the next - * directory - */ - const char *slash = strchr((char*)text, '/'); - if (!slash) - return WM_NOMATCH; - text = (const uchar*)slash; - /* the slash is consumed by the top-level for loop */ - break; - } - while (1) { - if (t_ch == '\0') - break; - /* - * Try to advance faster when an asterisk is - * followed by a literal. We know in this case - * that the string before the literal - * must belong to "*". - * If match_slash is false, do not look past - * the first slash as it cannot belong to '*'. - */ - if (!is_glob_special(*p)) { - p_ch = *p; - if ((flags & WM_CASEFOLD) && ISUPPER(p_ch)) - p_ch = tolower(p_ch); - while ((t_ch = *text) != '\0' && - (match_slash || t_ch != '/')) { - if ((flags & WM_CASEFOLD) && ISUPPER(t_ch)) - t_ch = tolower(t_ch); - if (t_ch == p_ch) - break; - text++; - } - if (t_ch != p_ch) - return WM_NOMATCH; - } - if ((matched = dowild(p, text, flags)) != WM_NOMATCH) { - if (!match_slash || matched != WM_ABORT_TO_STARSTAR) - return matched; - } else if (!match_slash && t_ch == '/') - return WM_ABORT_TO_STARSTAR; - t_ch = *++text; - } - return WM_ABORT_ALL; - case '[': - p_ch = *++p; - if (p_ch == '^') - p_ch = '!'; - /* Assign literal 1/0 because of "matched" comparison. */ - negated = p_ch == '!' ? 1 : 0; - if (negated) { - /* Inverted character class. */ - p_ch = *++p; - } - prev_ch = 0; - matched = 0; - do { - if (!p_ch) - return WM_ABORT_ALL; - if (p_ch == '\\') { - p_ch = *++p; - if (!p_ch) - return WM_ABORT_ALL; - if (t_ch == p_ch) - matched = 1; - } else if (p_ch == '-' && prev_ch && p[1] && p[1] != ']') { - p_ch = *++p; - if (p_ch == '\\') { - p_ch = *++p; - if (!p_ch) - return WM_ABORT_ALL; - } - if (t_ch <= p_ch && t_ch >= prev_ch) - matched = 1; - else if ((flags & WM_CASEFOLD) && ISLOWER(t_ch)) { - uchar t_ch_upper = toupper(t_ch); - if (t_ch_upper <= p_ch && t_ch_upper >= prev_ch) - matched = 1; - } - p_ch = 0; /* This makes "prev_ch" get set to 0. */ - } else if (p_ch == '[' && p[1] == ':') { - const uchar *s; - int i; - for (s = p += 2; (p_ch = *p) && p_ch != ']'; p++) {} /*SHARED ITERATOR*/ - if (!p_ch) - return WM_ABORT_ALL; - i = p - s - 1; - if (i < 0 || p[-1] != ':') { - /* Didn't find ":]", so treat like a normal set. */ - p = s - 2; - p_ch = '['; - if (t_ch == p_ch) - matched = 1; - continue; - } - if (CC_EQ(s,i, "alnum")) { - if (ISALNUM(t_ch)) - matched = 1; - } else if (CC_EQ(s,i, "alpha")) { - if (ISALPHA(t_ch)) - matched = 1; - } else if (CC_EQ(s,i, "blank")) { - if (ISBLANK(t_ch)) - matched = 1; - } else if (CC_EQ(s,i, "cntrl")) { - if (ISCNTRL(t_ch)) - matched = 1; - } else if (CC_EQ(s,i, "digit")) { - if (ISDIGIT(t_ch)) - matched = 1; - } else if (CC_EQ(s,i, "graph")) { - if (ISGRAPH(t_ch)) - matched = 1; - } else if (CC_EQ(s,i, "lower")) { - if (ISLOWER(t_ch)) - matched = 1; - } else if (CC_EQ(s,i, "print")) { - if (ISPRINT(t_ch)) - matched = 1; - } else if (CC_EQ(s,i, "punct")) { - if (ISPUNCT(t_ch)) - matched = 1; - } else if (CC_EQ(s,i, "space")) { - if (ISSPACE(t_ch)) - matched = 1; - } else if (CC_EQ(s,i, "upper")) { - if (ISUPPER(t_ch)) - matched = 1; - else if ((flags & WM_CASEFOLD) && ISLOWER(t_ch)) - matched = 1; - } else if (CC_EQ(s,i, "xdigit")) { - if (ISXDIGIT(t_ch)) - matched = 1; - } else /* malformed [:class:] string */ - return WM_ABORT_ALL; - p_ch = 0; /* This makes "prev_ch" get set to 0. */ - } else if (t_ch == p_ch) - matched = 1; - } while (prev_ch = p_ch, (p_ch = *++p) != ']'); - if (matched == negated || - ((flags & WM_PATHNAME) && t_ch == '/')) - return WM_NOMATCH; - continue; - } - } - - return *text ? WM_NOMATCH : WM_MATCH; -} - -/* Match the "pattern" against the "text" string. */ -static int wildmatch(const char *pattern, const char *text, unsigned int flags) -{ - // local modification: move WM_CASEFOLD here - if (ignore_case) - flags |= WM_CASEFOLD; - - return dowild((const uchar*)pattern, (const uchar*)text, flags); -} - -// Copied from dir.h - -#define PATTERN_FLAG_NODIR 1 -#define PATTERN_FLAG_ENDSWITH 4 -#define PATTERN_FLAG_MUSTBEDIR 8 -#define PATTERN_FLAG_NEGATIVE 16 - -// Copied from dir.c - -static int fspathncmp(const char *a, const char *b, size_t count) -{ - return ignore_case ? strncasecmp(a, b, count) : strncmp(a, b, count); -} - -static int simple_length(const char *match) -{ - int len = -1; - - for (;;) { - unsigned char c = *match++; - len++; - if (c == '\0' || is_glob_special(c)) - return len; - } -} - -static int no_wildcard(const char *string) -{ - return string[simple_length(string)] == '\0'; -} - -static void parse_path_pattern(const char **pattern, - int *patternlen, - unsigned *flags, - int *nowildcardlen) -{ - const char *p = *pattern; - size_t i, len; - - *flags = 0; - if (*p == '!') { - *flags |= PATTERN_FLAG_NEGATIVE; - p++; - } - len = strlen(p); - if (len && p[len - 1] == '/') { - len--; - *flags |= PATTERN_FLAG_MUSTBEDIR; - } - for (i = 0; i < len; i++) { - if (p[i] == '/') - break; - } - if (i == len) - *flags |= PATTERN_FLAG_NODIR; - *nowildcardlen = simple_length(p); - /* - * we should have excluded the trailing slash from 'p' too, - * but that's one more allocation. Instead just make sure - * nowildcardlen does not exceed real patternlen - */ - if (*nowildcardlen > len) - *nowildcardlen = len; - if (*p == '*' && no_wildcard(p + 1)) - *flags |= PATTERN_FLAG_ENDSWITH; - *pattern = p; - *patternlen = len; -} - -static void trim_trailing_spaces(char *buf) -{ - char *p, *last_space = NULL; - - for (p = buf; *p; p++) - switch (*p) { - case ' ': - if (!last_space) - last_space = p; - break; - case '\\': - p++; - if (!*p) - return; - /* fallthrough */ - default: - last_space = NULL; - } - - if (last_space) - *last_space = '\0'; -} - -static int match_basename(const char *basename, int basenamelen, - const char *pattern, int prefix, int patternlen, - unsigned flags) -{ - if (prefix == patternlen) { - if (patternlen == basenamelen && - !fspathncmp(pattern, basename, basenamelen)) - return 1; - } else if (flags & PATTERN_FLAG_ENDSWITH) { - /* "*literal" matching against "fooliteral" */ - if (patternlen - 1 <= basenamelen && - !fspathncmp(pattern + 1, - basename + basenamelen - (patternlen - 1), - patternlen - 1)) - return 1; - } else { - // local modification: call wildmatch() directly - if (!wildmatch(pattern, basename, flags)) - return 1; - } - return 0; -} - -static int match_pathname(const char *pathname, int pathlen, - const char *base, int baselen, - const char *pattern, int prefix, int patternlen) -{ - // local modification: remove local variables - - /* - * match with FNM_PATHNAME; the pattern has base implicitly - * in front of it. - */ - if (*pattern == '/') { - pattern++; - patternlen--; - prefix--; - } - - /* - * baselen does not count the trailing slash. base[] may or - * may not end with a trailing slash though. - */ - if (pathlen < baselen + 1 || - (baselen && pathname[baselen] != '/') || - fspathncmp(pathname, base, baselen)) - return 0; - - // local modification: simplified because always baselen > 0 - pathname += baselen + 1; - pathlen -= baselen + 1; - - if (prefix) { - /* - * if the non-wildcard part is longer than the - * remaining pathname, surely it cannot match. - */ - if (prefix > pathlen) - return 0; - - if (fspathncmp(pattern, pathname, prefix)) - return 0; - pattern += prefix; - patternlen -= prefix; - pathname += prefix; - pathlen -= prefix; - - /* - * If the whole pattern did not have a wildcard, - * then our prefix match is all we need; we - * do not need to call fnmatch at all. - */ - if (!patternlen && !pathlen) - return 1; - } - - // local modification: call wildmatch() directly - return !wildmatch(pattern, pathname, WM_PATHNAME); -} - -// Copied from git/utf8.c - -static const char utf8_bom[] = "\357\273\277"; - -//----------------------------(IMPORT FROM GIT END)---------------------------- - -struct pattern { - unsigned int flags; - int nowildcardlen; - int patternlen; - int dirlen; - char pattern[]; -}; - -static struct pattern **pattern_list; -static int nr_patterns, alloced_patterns; - -// Remember the number of patterns at each directory level -static int *nr_patterns_at; -// Track the current/max directory level; -static int depth, max_depth; -static bool debug_on; -static FILE *out_fp, *stat_fp; -static char *prefix = ""; -static char *progname; - -static void __attribute__((noreturn)) perror_exit(const char *s) -{ - perror(s); - - exit(EXIT_FAILURE); -} - -static void __attribute__((noreturn)) error_exit(const char *fmt, ...) -{ - va_list args; - - fprintf(stderr, "%s: error: ", progname); - - va_start(args, fmt); - vfprintf(stderr, fmt, args); - va_end(args); - - exit(EXIT_FAILURE); -} - -static void debug(const char *fmt, ...) -{ - va_list args; - int i; - - if (!debug_on) - return; - - fprintf(stderr, "[DEBUG] "); - - for (i = 0; i < depth * 2; i++) - fputc(' ', stderr); - - va_start(args, fmt); - vfprintf(stderr, fmt, args); - va_end(args); -} - -static void *xrealloc(void *ptr, size_t size) -{ - ptr = realloc(ptr, size); - if (!ptr) - perror_exit(progname); - - return ptr; -} - -static void *xmalloc(size_t size) -{ - return xrealloc(NULL, size); -} - -// similar to last_matching_pattern_from_list() in GIT -static bool is_ignored(const char *path, int pathlen, int dirlen, bool is_dir) -{ - int i; - - // Search in the reverse order because the last matching pattern wins. - for (i = nr_patterns - 1; i >= 0; i--) { - struct pattern *p = pattern_list[i]; - unsigned int flags = p->flags; - const char *gitignore_dir = p->pattern + p->patternlen + 1; - bool ignored; - - if ((flags & PATTERN_FLAG_MUSTBEDIR) && !is_dir) - continue; - - if (flags & PATTERN_FLAG_NODIR) { - if (!match_basename(path + dirlen + 1, - pathlen - dirlen - 1, - p->pattern, - p->nowildcardlen, - p->patternlen, - p->flags)) - continue; - } else { - if (!match_pathname(path, pathlen, - gitignore_dir, p->dirlen, - p->pattern, - p->nowildcardlen, - p->patternlen)) - continue; - } - - debug("%s: matches %s%s%s (%s/.gitignore)\n", path, - flags & PATTERN_FLAG_NEGATIVE ? "!" : "", p->pattern, - flags & PATTERN_FLAG_MUSTBEDIR ? "/" : "", - gitignore_dir); - - ignored = (flags & PATTERN_FLAG_NEGATIVE) == 0; - if (ignored) - debug("Ignore: %s\n", path); - - return ignored; - } - - debug("%s: no match\n", path); - - return false; -} - -static void add_pattern(const char *string, const char *dir, int dirlen) -{ - struct pattern *p; - int patternlen, nowildcardlen; - unsigned int flags; - - parse_path_pattern(&string, &patternlen, &flags, &nowildcardlen); - - if (patternlen == 0) - return; - - p = xmalloc(sizeof(*p) + patternlen + dirlen + 2); - - memcpy(p->pattern, string, patternlen); - p->pattern[patternlen] = 0; - memcpy(p->pattern + patternlen + 1, dir, dirlen); - p->pattern[patternlen + 1 + dirlen] = 0; - - p->patternlen = patternlen; - p->nowildcardlen = nowildcardlen; - p->dirlen = dirlen; - p->flags = flags; - - debug("Add pattern: %s%s%s\n", - flags & PATTERN_FLAG_NEGATIVE ? "!" : "", p->pattern, - flags & PATTERN_FLAG_MUSTBEDIR ? "/" : ""); - - if (nr_patterns >= alloced_patterns) { - alloced_patterns += 128; - pattern_list = xrealloc(pattern_list, - sizeof(*pattern_list) * alloced_patterns); - } - - pattern_list[nr_patterns++] = p; -} - -// similar to add_patterns_from_buffer() in GIT -static void add_patterns_from_gitignore(const char *dir, int dirlen) -{ - struct stat st; - char path[PATH_MAX], *buf, *entry; - size_t size; - int fd, pathlen, i; - - pathlen = snprintf(path, sizeof(path), "%s/.gitignore", dir); - if (pathlen >= sizeof(path)) - error_exit("%s: too long path was truncated\n", path); - - fd = open(path, O_RDONLY | O_NOFOLLOW); - if (fd < 0) { - if (errno != ENOENT) - return perror_exit(path); - return; - } - - if (fstat(fd, &st) < 0) - perror_exit(path); - - size = st.st_size; - - buf = xmalloc(size + 1); - if (read(fd, buf, st.st_size) != st.st_size) - perror_exit(path); - - buf[st.st_size] = '\n'; - if (close(fd)) - perror_exit(path); - - debug("Parse %s\n", path); - - entry = buf; - - // skip utf8 bom - if (!strncmp(entry, utf8_bom, strlen(utf8_bom))) - entry += strlen(utf8_bom); - - for (i = entry - buf; i < size; i++) { - if (buf[i] == '\n') { - if (entry != buf + i && entry[0] != '#') { - buf[i - (i && buf[i-1] == '\r')] = 0; - trim_trailing_spaces(entry); - add_pattern(entry, dir, dirlen); - } - entry = buf + i + 1; - } - } - - free(buf); -} - -// Save the current number of patterns and increment the depth -static void increment_depth(void) -{ - if (depth >= max_depth) { - max_depth += 1; - nr_patterns_at = xrealloc(nr_patterns_at, - sizeof(*nr_patterns_at) * max_depth); - } - - nr_patterns_at[depth] = nr_patterns; - depth++; -} - -// Decrement the depth, and free up the patterns of this directory level. -static void decrement_depth(void) -{ - depth--; - assert(depth >= 0); - - while (nr_patterns > nr_patterns_at[depth]) - free(pattern_list[--nr_patterns]); -} - -static void print_path(const char *path) -{ - // The path always starts with "./" - assert(strlen(path) >= 2); - - // Replace the root directory with a preferred prefix. - // This is useful for the tar command. - fprintf(out_fp, "%s%s\n", prefix, path + 2); -} - -static void print_stat(const char *path, struct stat *st) -{ - if (!stat_fp) - return; - - if (!S_ISREG(st->st_mode) && !S_ISLNK(st->st_mode)) - return; - - assert(strlen(path) >= 2); - - fprintf(stat_fp, "%c %9ld %10ld %s\n", - S_ISLNK(st->st_mode) ? 'l' : '-', - st->st_size, st->st_mtim.tv_sec, path + 2); -} - -// Traverse the entire directory tree, parsing .gitignore files. -// Print file paths that are not tracked by git. -// -// Return true if all files under the directory are ignored, false otherwise. -static bool traverse_directory(const char *dir, int dirlen) -{ - bool all_ignored = true; - DIR *dirp; - - debug("Enter[%d]: %s\n", depth, dir); - increment_depth(); - - add_patterns_from_gitignore(dir, dirlen); - - dirp = opendir(dir); - if (!dirp) - perror_exit(dir); - - while (1) { - struct dirent *d; - struct stat st; - char path[PATH_MAX]; - int pathlen; - bool ignored; - - errno = 0; - d = readdir(dirp); - if (!d) { - if (errno) - perror_exit(dir); - break; - } - - if (!strcmp(d->d_name, "..") || !strcmp(d->d_name, ".")) - continue; - - pathlen = snprintf(path, sizeof(path), "%s/%s", dir, d->d_name); - if (pathlen >= sizeof(path)) - error_exit("%s: too long path was truncated\n", path); - - if (lstat(path, &st) < 0) - perror_exit(path); - - if ((!S_ISREG(st.st_mode) && !S_ISDIR(st.st_mode) && !S_ISLNK(st.st_mode)) || - is_ignored(path, pathlen, dirlen, S_ISDIR(st.st_mode))) { - ignored = true; - } else { - if (S_ISDIR(st.st_mode) && !S_ISLNK(st.st_mode)) - // If all the files in a directory are ignored, - // let's ignore that directory as well. This - // will avoid empty directories in the tarball. - ignored = traverse_directory(path, pathlen); - else - ignored = false; - } - - if (ignored) { - print_path(path); - } else { - print_stat(path, &st); - all_ignored = false; - } - } - - if (closedir(dirp)) - perror_exit(dir); - - decrement_depth(); - debug("Leave[%d]: %s\n", depth, dir); - - return all_ignored; -} - -static void usage(void) -{ - fprintf(stderr, - "usage: %s [options]\n" - "\n" - "Show files that are ignored by git\n" - "\n" - "options:\n" - " -d, --debug print debug messages to stderr\n" - " -e, --exclude PATTERN add the given exclude pattern\n" - " -h, --help show this help message and exit\n" - " -i, --ignore-case Ignore case differences between the patterns and the files\n" - " -o, --output FILE output the ignored files to a file (default: '-', i.e. stdout)\n" - " -p, --prefix PREFIX prefix added to each path (default: empty string)\n" - " -r, --rootdir DIR root of the source tree (default: current working directory)\n" - " -s, --stat FILE output the file stat of non-ignored files to a file\n", - progname); -} - -static void open_output(const char *pathname, FILE **fp) -{ - if (strcmp(pathname, "-")) { - *fp = fopen(pathname, "w"); - if (!*fp) - perror_exit(pathname); - } else { - *fp = stdout; - } -} - -static void close_output(const char *pathname, FILE *fp) -{ - fflush(fp); - - if (ferror(fp)) - error_exit("not all data was written to the output\n"); - - if (fclose(fp)) - perror_exit(pathname); -} - -int main(int argc, char *argv[]) -{ - const char *output = "-"; - const char *rootdir = "."; - const char *stat = NULL; - - progname = strrchr(argv[0], '/'); - if (progname) - progname++; - else - progname = argv[0]; - - while (1) { - static struct option long_options[] = { - {"debug", no_argument, NULL, 'd'}, - {"help", no_argument, NULL, 'h'}, - {"ignore-case", no_argument, NULL, 'i'}, - {"output", required_argument, NULL, 'o'}, - {"prefix", required_argument, NULL, 'p'}, - {"rootdir", required_argument, NULL, 'r'}, - {"stat", required_argument, NULL, 's'}, - {"exclude", required_argument, NULL, 'x'}, - {}, - }; - - int c = getopt_long(argc, argv, "dhino:p:r:s:x:", long_options, NULL); - - if (c == -1) - break; - - switch (c) { - case 'd': - debug_on = true; - break; - case 'h': - usage(); - exit(0); - case 'i': - ignore_case = true; - break; - case 'o': - output = optarg; - break; - case 'p': - prefix = optarg; - break; - case 'r': - rootdir = optarg; - break; - case 's': - stat = optarg; - break; - case 'x': - add_pattern(optarg, ".", strlen(".")); - break; - case '?': - usage(); - /* fallthrough */ - default: - exit(EXIT_FAILURE); - } - } - - open_output(output, &out_fp); - if (stat && stat[0]) - open_output(stat, &stat_fp); - - if (chdir(rootdir)) - perror_exit(rootdir); - - add_pattern(".git/", ".", strlen(".")); - - if (traverse_directory(".", strlen("."))) - print_path("./"); - - assert(depth == 0); - - while (nr_patterns > 0) - free(pattern_list[--nr_patterns]); - free(pattern_list); - free(nr_patterns_at); - - close_output(output, out_fp); - if (stat_fp) - close_output(stat, stat_fp); - - return 0; -} diff --git a/scripts/package/builddeb b/scripts/package/builddeb index ff5e7d8e380b..c5ae57167d7c 100755 --- a/scripts/package/builddeb +++ b/scripts/package/builddeb @@ -51,7 +51,116 @@ create_package() { dpkg-deb $dpkg_deb_opts ${KDEB_COMPRESS:+-Z$KDEB_COMPRESS} --build "$pdir" .. } -deploy_kernel_headers () { +install_linux_image () { + pdir=$1 + pname=$2 + + rm -rf ${pdir} + + # Only some architectures with OF support have this target + if is_enabled CONFIG_OF_EARLY_FLATTREE && [ -d "${srctree}/arch/${SRCARCH}/boot/dts" ]; then + ${MAKE} -f ${srctree}/Makefile INSTALL_DTBS_PATH="${pdir}/usr/lib/linux-image-${KERNELRELEASE}" dtbs_install + fi + + if is_enabled CONFIG_MODULES; then + ${MAKE} -f ${srctree}/Makefile INSTALL_MOD_PATH="${pdir}" modules_install + rm -f "${pdir}/lib/modules/${KERNELRELEASE}/build" + rm -f "${pdir}/lib/modules/${KERNELRELEASE}/source" + if [ "${SRCARCH}" = um ] ; then + mkdir -p "${pdir}/usr/lib/uml/modules" + mv "${pdir}/lib/modules/${KERNELRELEASE}" "${pdir}/usr/lib/uml/modules/${KERNELRELEASE}" + fi + fi + + # Install the kernel + if [ "${ARCH}" = um ] ; then + mkdir -p "${pdir}/usr/bin" "${pdir}/usr/share/doc/${pname}" + cp System.map "${pdir}/usr/lib/uml/modules/${KERNELRELEASE}/System.map" + cp ${KCONFIG_CONFIG} "${pdir}/usr/share/doc/${pname}/config" + gzip "${pdir}/usr/share/doc/${pname}/config" + else + mkdir -p "${pdir}/boot" + cp System.map "${pdir}/boot/System.map-${KERNELRELEASE}" + cp ${KCONFIG_CONFIG} "${pdir}/boot/config-${KERNELRELEASE}" + fi + + # Not all arches have the same installed path in debian + # XXX: have each arch Makefile export a variable of the canonical image install + # path instead + case "${SRCARCH}" in + um) + installed_image_path="usr/bin/linux-${KERNELRELEASE}";; + parisc|mips|powerpc) + installed_image_path="boot/vmlinux-${KERNELRELEASE}";; + *) + installed_image_path="boot/vmlinuz-${KERNELRELEASE}";; + esac + cp "$(${MAKE} -s -f ${srctree}/Makefile image_name)" "${pdir}/${installed_image_path}" + + # Install the maintainer scripts + # Note: hook scripts under /etc/kernel are also executed by official Debian + # kernel packages, as well as kernel packages built using make-kpkg. + # make-kpkg sets $INITRD to indicate whether an initramfs is wanted, and + # so do we; recent versions of dracut and initramfs-tools will obey this. + debhookdir=${KDEB_HOOKDIR:-/etc/kernel} + for script in postinst postrm preinst prerm; do + mkdir -p "${pdir}${debhookdir}/${script}.d" + + mkdir -p "${pdir}/DEBIAN" + cat <<-EOF > "${pdir}/DEBIAN/${script}" + + #!/bin/sh + + set -e + + # Pass maintainer script parameters to hook scripts + export DEB_MAINT_PARAMS="\$*" + + # Tell initramfs builder whether it's wanted + export INITRD=$(if_enabled_echo CONFIG_BLK_DEV_INITRD Yes No) + + test -d ${debhookdir}/${script}.d && run-parts --arg="${KERNELRELEASE}" --arg="/${installed_image_path}" ${debhookdir}/${script}.d + exit 0 + EOF + chmod 755 "${pdir}/DEBIAN/${script}" + done +} + +install_linux_image_dbg () { + pdir=$1 + image_pdir=$2 + + rm -rf ${pdir} + + for module in $(find ${image_pdir}/lib/modules/ -name *.ko -printf '%P\n'); do + module=lib/modules/${module} + mkdir -p $(dirname ${pdir}/usr/lib/debug/${module}) + # only keep debug symbols in the debug file + ${OBJCOPY} --only-keep-debug ${image_pdir}/${module} ${pdir}/usr/lib/debug/${module} + # strip original module from debug symbols + ${OBJCOPY} --strip-debug ${image_pdir}/${module} + # then add a link to those + ${OBJCOPY} --add-gnu-debuglink=${pdir}/usr/lib/debug/${module} ${image_pdir}/${module} + done + + # re-sign stripped modules + if is_enabled CONFIG_MODULE_SIG_ALL; then + ${MAKE} -f ${srctree}/Makefile INSTALL_MOD_PATH="${image_pdir}" modules_sign + fi + + # Build debug package + # Different tools want the image in different locations + # perf + mkdir -p ${pdir}/usr/lib/debug/lib/modules/${KERNELRELEASE}/ + cp vmlinux ${pdir}/usr/lib/debug/lib/modules/${KERNELRELEASE}/ + # systemtap + mkdir -p ${pdir}/usr/lib/debug/boot/ + ln -s ../lib/modules/${KERNELRELEASE}/vmlinux ${pdir}/usr/lib/debug/boot/vmlinux-${KERNELRELEASE} + # kdump-tools + ln -s lib/modules/${KERNELRELEASE}/vmlinux ${pdir}/usr/lib/debug/vmlinux-${KERNELRELEASE} +} + +install_kernel_headers () { pdir=$1 rm -rf $pdir @@ -89,7 +198,7 @@ deploy_kernel_headers () { ln -s /usr/src/linux-headers-$version $pdir/lib/modules/$version/build } -deploy_libc_headers () { +install_libc_headers () { pdir=$1 rm -rf $pdir @@ -104,132 +213,38 @@ deploy_libc_headers () { mv $pdir/usr/include/asm $pdir/usr/include/$host_arch/ } -version=$KERNELRELEASE -tmpdir=debian/linux-image -dbg_dir=debian/linux-image-dbg -packagename=linux-image-$version -dbg_packagename=$packagename-dbg - -if [ "$ARCH" = "um" ] ; then - packagename=user-mode-linux-$version -fi - -# Not all arches have the same installed path in debian -# XXX: have each arch Makefile export a variable of the canonical image install -# path instead -case $ARCH in -um) - installed_image_path="usr/bin/linux-$version" - ;; -parisc|mips|powerpc) - installed_image_path="boot/vmlinux-$version" - ;; -*) - installed_image_path="boot/vmlinuz-$version" -esac - -BUILD_DEBUG=$(if_enabled_echo CONFIG_DEBUG_INFO Yes) - -# Setup the directory structure -rm -rf "$tmpdir" "$dbg_dir" debian/files -mkdir -m 755 -p "$tmpdir/DEBIAN" -mkdir -p "$tmpdir/lib" "$tmpdir/boot" - -# Install the kernel -if [ "$ARCH" = "um" ] ; then - mkdir -p "$tmpdir/usr/lib/uml/modules/$version" "$tmpdir/usr/bin" "$tmpdir/usr/share/doc/$packagename" - cp System.map "$tmpdir/usr/lib/uml/modules/$version/System.map" - cp $KCONFIG_CONFIG "$tmpdir/usr/share/doc/$packagename/config" - gzip "$tmpdir/usr/share/doc/$packagename/config" -else - cp System.map "$tmpdir/boot/System.map-$version" - cp $KCONFIG_CONFIG "$tmpdir/boot/config-$version" -fi -cp "$($MAKE -s -f $srctree/Makefile image_name)" "$tmpdir/$installed_image_path" - -if is_enabled CONFIG_OF_EARLY_FLATTREE; then - # Only some architectures with OF support have this target - if [ -d "${srctree}/arch/$SRCARCH/boot/dts" ]; then - $MAKE -f $srctree/Makefile INSTALL_DTBS_PATH="$tmpdir/usr/lib/$packagename" dtbs_install - fi -fi - -if is_enabled CONFIG_MODULES; then - INSTALL_MOD_PATH="$tmpdir" $MAKE -f $srctree/Makefile modules_install - rm -f "$tmpdir/lib/modules/$version/build" - rm -f "$tmpdir/lib/modules/$version/source" - if [ "$ARCH" = "um" ] ; then - mv "$tmpdir/lib/modules/$version"/* "$tmpdir/usr/lib/uml/modules/$version/" - rmdir "$tmpdir/lib/modules/$version" - fi - if [ -n "$BUILD_DEBUG" ] ; then - for module in $(find $tmpdir/lib/modules/ -name *.ko -printf '%P\n'); do - module=lib/modules/$module - mkdir -p $(dirname $dbg_dir/usr/lib/debug/$module) - # only keep debug symbols in the debug file - $OBJCOPY --only-keep-debug $tmpdir/$module $dbg_dir/usr/lib/debug/$module - # strip original module from debug symbols - $OBJCOPY --strip-debug $tmpdir/$module - # then add a link to those - $OBJCOPY --add-gnu-debuglink=$dbg_dir/usr/lib/debug/$module $tmpdir/$module - done - - # resign stripped modules - if is_enabled CONFIG_MODULE_SIG_ALL; then - INSTALL_MOD_PATH="$tmpdir" $MAKE -f $srctree/Makefile modules_sign - fi - fi -fi - -# Install the maintainer scripts -# Note: hook scripts under /etc/kernel are also executed by official Debian -# kernel packages, as well as kernel packages built using make-kpkg. -# make-kpkg sets $INITRD to indicate whether an initramfs is wanted, and -# so do we; recent versions of dracut and initramfs-tools will obey this. -debhookdir=${KDEB_HOOKDIR:-/etc/kernel} -for script in postinst postrm preinst prerm ; do - mkdir -p "$tmpdir$debhookdir/$script.d" - cat <<EOF > "$tmpdir/DEBIAN/$script" -#!/bin/sh - -set -e - -# Pass maintainer script parameters to hook scripts -export DEB_MAINT_PARAMS="\$*" - -# Tell initramfs builder whether it's wanted -export INITRD=$(if_enabled_echo CONFIG_BLK_DEV_INITRD Yes No) - -test -d $debhookdir/$script.d && run-parts --arg="$version" --arg="/$installed_image_path" $debhookdir/$script.d -exit 0 -EOF - chmod 755 "$tmpdir/DEBIAN/$script" +rm -f debian/files + +packages_enabled=$(dh_listpackages) + +for package in ${packages_enabled} +do + case ${package} in + *-dbg) + # This must be done after linux-image, that is, we expect the + # debug package appears after linux-image in debian/control. + install_linux_image_dbg debian/linux-image-dbg debian/linux-image;; + linux-image-*|user-mode-linux-*) + install_linux_image debian/linux-image ${package};; + linux-libc-dev) + install_libc_headers debian/linux-libc-dev;; + linux-headers-*) + install_kernel_headers debian/linux-headers;; + esac done -if [ "$ARCH" != "um" ]; then - if is_enabled CONFIG_MODULES; then - deploy_kernel_headers debian/linux-headers - create_package linux-headers-$version debian/linux-headers - fi - - deploy_libc_headers debian/linux-libc-dev - create_package linux-libc-dev debian/linux-libc-dev -fi - -create_package "$packagename" "$tmpdir" - -if [ -n "$BUILD_DEBUG" ] ; then - # Build debug package - # Different tools want the image in different locations - # perf - mkdir -p $dbg_dir/usr/lib/debug/lib/modules/$version/ - cp vmlinux $dbg_dir/usr/lib/debug/lib/modules/$version/ - # systemtap - mkdir -p $dbg_dir/usr/lib/debug/boot/ - ln -s ../lib/modules/$version/vmlinux $dbg_dir/usr/lib/debug/boot/vmlinux-$version - # kdump-tools - ln -s lib/modules/$version/vmlinux $dbg_dir/usr/lib/debug/vmlinux-$version - create_package "$dbg_packagename" "$dbg_dir" -fi +for package in ${packages_enabled} +do + case ${package} in + *-dbg) + create_package ${package} debian/linux-image-dbg;; + linux-image-*|user-mode-linux-*) + create_package ${package} debian/linux-image;; + linux-libc-dev) + create_package ${package} debian/linux-libc-dev;; + linux-headers-*) + create_package ${package} debian/linux-headers;; + esac +done exit 0 diff --git a/scripts/package/deb-build-option b/scripts/package/deb-build-option index b079b0d121d4..7950eff01781 100755 --- a/scripts/package/deb-build-option +++ b/scripts/package/deb-build-option @@ -1,16 +1,14 @@ #!/bin/sh # SPDX-License-Identifier: GPL-2.0-only -# Set up CROSS_COMPILE if we are cross-compiling, but not called from the -# kernel toplevel Makefile -if [ -z "${CROSS_COMPILE}${cross_compiling}" -a "${DEB_HOST_ARCH}" != "${DEB_BUILD_ARCH}" ]; then +# Set up CROSS_COMPILE if not defined yet +if [ "${CROSS_COMPILE+set}" != "set" -a "${DEB_HOST_ARCH}" != "${DEB_BUILD_ARCH}" ]; then echo CROSS_COMPILE=${DEB_HOST_GNU_TYPE}- fi version=$(dpkg-parsechangelog -S Version) -version_upstream="${version%-*}" -debian_revision="${version#${version_upstream}}" -debian_revision="${debian_revision#*-}" +debian_revision="${version##*-}" -echo KERNELRELEASE=${version_upstream} -echo KBUILD_BUILD_VERSION=${debian_revision} +if [ "${version}" != "${debian_revision}" ]; then + echo KBUILD_BUILD_VERSION=${debian_revision} +fi diff --git a/scripts/package/gen-diff-patch b/scripts/package/gen-diff-patch new file mode 100755 index 000000000000..f842ab50a780 --- /dev/null +++ b/scripts/package/gen-diff-patch @@ -0,0 +1,44 @@ +#!/bin/sh +# SPDX-License-Identifier: GPL-2.0-only + +diff_patch="${1}" +untracked_patch="${2}" +srctree=$(dirname $0)/../.. + +rm -f ${diff_patch} ${untracked_patch} + +if ! ${srctree}/scripts/check-git; then + exit +fi + +mkdir -p "$(dirname ${diff_patch})" "$(dirname ${untracked_patch})" + +git -C "${srctree}" diff HEAD > "${diff_patch}" + +if [ ! -s "${diff_patch}" ]; then + rm -f "${diff_patch}" + exit +fi + +git -C ${srctree} status --porcelain --untracked-files=all | +while read stat path +do + if [ "${stat}" = '??' ]; then + + if ! diff -u /dev/null "${srctree}/${path}" > .tmp_diff && + ! head -n1 .tmp_diff | grep -q "Binary files"; then + { + echo "--- /dev/null" + echo "+++ linux/$path" + cat .tmp_diff | tail -n +3 + } >> ${untracked_patch} + fi + fi +done + +rm -f .tmp_diff + +if [ ! -s "${diff_patch}" ]; then + rm -f "${diff_patch}" + exit +fi diff --git a/scripts/package/mkdebian b/scripts/package/mkdebian index f74380036bb5..e20a2b5be9eb 100755 --- a/scripts/package/mkdebian +++ b/scripts/package/mkdebian @@ -91,7 +91,7 @@ version=$KERNELRELEASE if [ -n "$KDEB_PKGVERSION" ]; then packageversion=$KDEB_PKGVERSION else - packageversion=$version-$($srctree/init/build-version) + packageversion=$(${srctree}/scripts/setlocalversion --no-local ${srctree})-$($srctree/init/build-version) fi sourcename=${KDEB_SOURCENAME:-linux-upstream} @@ -152,6 +152,14 @@ mkdir -p debian/patches } > debian/patches/config echo config > debian/patches/series +$(dirname $0)/gen-diff-patch debian/patches/diff.patch debian/patches/untracked.patch +if [ -f debian/patches/diff.patch ]; then + echo diff.patch >> debian/patches/series +fi +if [ -f debian/patches/untracked.patch ]; then + echo untracked.patch >> debian/patches/series +fi + echo $debarch > debian/arch extra_build_depends=", $(if_enabled_echo CONFIG_UNWINDER_ORC libelf-dev:native)" extra_build_depends="$extra_build_depends, $(if_enabled_echo CONFIG_SYSTEM_TRUSTED_KEYRING libssl-dev:native)" @@ -192,7 +200,7 @@ Section: kernel Priority: optional Maintainer: $maintainer Rules-Requires-Root: no -Build-Depends: bc, rsync, kmod, cpio, bison, flex $extra_build_depends +Build-Depends: bc, debhelper, rsync, kmod, cpio, bison, flex $extra_build_depends Homepage: https://www.kernel.org/ Package: $packagename-$version @@ -200,6 +208,10 @@ Architecture: $debarch Description: Linux kernel, version $version This package contains the Linux kernel, modules and corresponding other files, version: $version. +EOF + +if [ "${SRCARCH}" != um ]; then +cat <<EOF >> debian/control Package: linux-libc-dev Section: devel @@ -222,6 +234,7 @@ Description: Linux kernel headers for $version on $debarch This is useful for people who need to build external modules EOF fi +fi if is_enabled CONFIG_DEBUG_INFO; then cat <<EOF >> debian/control @@ -239,10 +252,12 @@ cat <<EOF > debian/rules #!$(command -v $MAKE) -f srctree ?= . +KERNELRELEASE = ${KERNELRELEASE} build-indep: build-arch: \$(MAKE) -f \$(srctree)/Makefile ARCH=${ARCH} \ + KERNELRELEASE=\$(KERNELRELEASE) \ \$(shell \$(srctree)/scripts/package/deb-build-option) \ olddefconfig all @@ -250,7 +265,9 @@ build: build-arch binary-indep: binary-arch: build-arch - \$(MAKE) -f \$(srctree)/Makefile ARCH=${ARCH} intdeb-pkg + \$(MAKE) -f \$(srctree)/Makefile ARCH=${ARCH} \ + KERNELRELEASE=\$(KERNELRELEASE) intdeb-pkg + clean: rm -rf debian/files debian/linux-* \$(MAKE) -f \$(srctree)/Makefile ARCH=${ARCH} clean diff --git a/scripts/package/mkspec b/scripts/package/mkspec index 3c550960dd39..b7d1dc28a5d6 100755 --- a/scripts/package/mkspec +++ b/scripts/package/mkspec @@ -15,15 +15,21 @@ if [ "$1" = prebuilt ]; then MAKE="$MAKE -f $srctree/Makefile" else S= + + mkdir -p rpmbuild/SOURCES + cp linux.tar.gz rpmbuild/SOURCES + cp "${KCONFIG_CONFIG}" rpmbuild/SOURCES/config + $(dirname $0)/gen-diff-patch rpmbuild/SOURCES/diff.patch rpmbuild/SOURCES/untracked.patch + touch rpmbuild/SOURCES/diff.patch rpmbuild/SOURCES/untracked.patch fi -if grep -q CONFIG_MODULES=y .config; then +if grep -q CONFIG_MODULES=y include/config/auto.conf; then M= else M=DEL fi -if grep -q CONFIG_DRM=y .config; then +if grep -q CONFIG_DRM=y include/config/auto.conf; then PROVIDES=kernel-drm fi @@ -48,7 +54,9 @@ sed -e '/^DEL/d' -e 's/^\t*//' <<EOF Vendor: The Linux Community URL: https://www.kernel.org $S Source0: linux.tar.gz -$S Source1: .config +$S Source1: config +$S Source2: diff.patch +$S Source3: untracked.patch Provides: $PROVIDES $S BuildRequires: bc binutils bison dwarves $S BuildRequires: (elfutils-libelf-devel or libelf-devel) flex @@ -85,7 +93,13 @@ $S$M against the $__KERNELRELEASE kernel package. $S$M $S %prep $S %setup -q -n linux -$S cp %{SOURCE1} . +$S cp %{SOURCE1} .config +$S if [ -s %{SOURCE2} ]; then +$S patch -p1 < %{SOURCE2} +$S fi +$S if [ -s %{SOURCE3} ]; then +$S patch -p1 < %{SOURCE3} +$S fi $S $S %build $S $MAKE %{?_smp_mflags} KERNELRELEASE=$KERNELRELEASE KBUILD_BUILD_VERSION=%{release} diff --git a/scripts/setlocalversion b/scripts/setlocalversion index e54839a42d4b..3d3babac8298 100755 --- a/scripts/setlocalversion +++ b/scripts/setlocalversion @@ -11,10 +11,16 @@ # usage() { - echo "Usage: $0 [srctree]" >&2 + echo "Usage: $0 [--no-local] [srctree]" >&2 exit 1 } +no_local=false +if test "$1" = "--no-local"; then + no_local=true + shift +fi + srctree=. if test $# -gt 0; then srctree=$1 @@ -26,14 +32,22 @@ fi scm_version() { - local short + local short=false + local no_dirty=false local tag - short=false + + while [ $# -gt 0 ]; + do + case "$1" in + --short) + short=true;; + --no-dirty) + no_dirty=true;; + esac + shift + done cd "$srctree" - if test "$1" = "--short"; then - short=true - fi if test -n "$(git rev-parse --show-cdup 2>/dev/null)"; then return @@ -75,6 +89,10 @@ scm_version() printf '%s%s' -g "$(echo $head | cut -c1-12)" fi + if ${no_dirty}; then + return + fi + # Check for uncommitted changes. # This script must avoid any write attempt to the source tree, which # might be read-only. @@ -110,11 +128,6 @@ collect_files() echo "$res" } -if ! test -e include/config/auto.conf; then - echo "Error: kernelrelease not valid - run 'make prepare' to update it" >&2 - exit 1 -fi - if [ -z "${KERNELVERSION}" ]; then echo "KERNELVERSION is not set" >&2 exit 1 @@ -126,6 +139,16 @@ if test ! "$srctree" -ef .; then file_localversion="${file_localversion}$(collect_files "$srctree"/localversion*)" fi +if ${no_local}; then + echo "${KERNELVERSION}$(scm_version --no-dirty)" + exit 0 +fi + +if ! test -e include/config/auto.conf; then + echo "Error: kernelrelease not valid - run 'make prepare' to update it" >&2 + exit 1 +fi + # version string from CONFIG_LOCALVERSION config_localversion=$(sed -n 's/^CONFIG_LOCALVERSION=\(.*\)$/\1/p' include/config/auto.conf) diff --git a/sound/hda/intel-dsp-config.c b/sound/hda/intel-dsp-config.c index ae31bb127594..317bdf6dcbef 100644 --- a/sound/hda/intel-dsp-config.c +++ b/sound/hda/intel-dsp-config.c @@ -472,6 +472,15 @@ static const struct config_entry config_table[] = { }, #endif +/* Meteor Lake */ +#if IS_ENABLED(CONFIG_SND_SOC_SOF_METEORLAKE) + /* Meteorlake-P */ + { + .flags = FLAG_SOF | FLAG_SOF_ONLY_IF_DMIC_OR_SOUNDWIRE, + .device = 0x7e28, + }, +#endif + }; static const struct config_entry *snd_intel_dsp_find_config diff --git a/sound/pci/asihpi/hpi6205.c b/sound/pci/asihpi/hpi6205.c index 27e11b5f70b9..c7d7eff86727 100644 --- a/sound/pci/asihpi/hpi6205.c +++ b/sound/pci/asihpi/hpi6205.c @@ -430,7 +430,7 @@ void HPI_6205(struct hpi_message *phm, struct hpi_response *phr) pao = hpi_find_adapter(phm->adapter_index); } else { /* subsys messages don't address an adapter */ - _HPI_6205(NULL, phm, phr); + phr->error = HPI_ERROR_INVALID_OBJ_INDEX; return; } diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 81c4a45254ff..77a592f21947 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -328,14 +328,15 @@ enum { #define needs_eld_notify_link(chip) false #endif -#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \ +#define CONTROLLER_IN_GPU(pci) (((pci)->vendor == 0x8086) && \ + (((pci)->device == 0x0a0c) || \ ((pci)->device == 0x0c0c) || \ ((pci)->device == 0x0d0c) || \ ((pci)->device == 0x160c) || \ ((pci)->device == 0x490d) || \ ((pci)->device == 0x4f90) || \ ((pci)->device == 0x4f91) || \ - ((pci)->device == 0x4f92)) + ((pci)->device == 0x4f92))) #define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98) diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c index acde4cd58785..099722ebaed8 100644 --- a/sound/pci/hda/patch_ca0132.c +++ b/sound/pci/hda/patch_ca0132.c @@ -4228,8 +4228,10 @@ static int tuning_ctl_set(struct hda_codec *codec, hda_nid_t nid, for (i = 0; i < TUNING_CTLS_COUNT; i++) if (nid == ca0132_tuning_ctls[i].nid) - break; + goto found; + return -EINVAL; +found: snd_hda_power_up(codec); dspio_set_param(codec, ca0132_tuning_ctls[i].mid, 0x20, ca0132_tuning_ctls[i].req, diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 3c629f4ae080..f09a1d7c1b18 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -9447,6 +9447,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x103c, 0x8b8a, "HP", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b8b, "HP", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b8d, "HP", ALC236_FIXUP_HP_GPIO_LED), + SND_PCI_QUIRK(0x103c, 0x8b8f, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8b92, "HP", ALC245_FIXUP_CS35L41_SPI_2_HP_GPIO_LED), SND_PCI_QUIRK(0x103c, 0x8bf0, "HP", ALC236_FIXUP_HP_GPIO_LED), SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC), @@ -9539,6 +9540,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x144d, 0xc830, "Samsung Galaxy Book Ion (NT950XCJ-X716A)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x144d, 0xc832, "Samsung Galaxy Book Flex Alpha (NP730QCJ)", ALC256_FIXUP_SAMSUNG_HEADPHONE_VERY_QUIET), SND_PCI_QUIRK(0x144d, 0xca03, "Samsung Galaxy Book2 Pro 360 (NP930QED)", ALC298_FIXUP_SAMSUNG_AMP), + SND_PCI_QUIRK(0x144d, 0xc868, "Samsung Galaxy Book2 Pro (NP930XED)", ALC298_FIXUP_SAMSUNG_AMP), SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb120, "MSI Cubi MS-B120", ALC283_FIXUP_HEADSET_MIC), SND_PCI_QUIRK(0x1462, 0xb171, "Cubi N 8GL (MS-B171)", ALC283_FIXUP_HEADSET_MIC), diff --git a/sound/soc/codecs/da7219-aad.c b/sound/soc/codecs/da7219-aad.c index 4a4f09f924bc..e3d398b8f54e 100644 --- a/sound/soc/codecs/da7219-aad.c +++ b/sound/soc/codecs/da7219-aad.c @@ -968,6 +968,8 @@ int da7219_aad_init(struct snd_soc_component *component) INIT_WORK(&da7219_aad->hptest_work, da7219_aad_hptest_work); INIT_WORK(&da7219_aad->jack_det_work, da7219_aad_jack_det_work); + mutex_init(&da7219_aad->jack_det_mutex); + ret = request_threaded_irq(da7219_aad->irq, da7219_aad_pre_irq_thread, da7219_aad_irq_thread, IRQF_TRIGGER_LOW | IRQF_ONESHOT, diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c index 01e8ffda2a4b..6d980fbc4207 100644 --- a/sound/soc/codecs/hdmi-codec.c +++ b/sound/soc/codecs/hdmi-codec.c @@ -428,8 +428,13 @@ static int hdmi_codec_startup(struct snd_pcm_substream *substream, { struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai); bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; + bool has_capture = !hcp->hcd.no_i2s_capture; + bool has_playback = !hcp->hcd.no_i2s_playback; int ret = 0; + if (!((has_playback && tx) || (has_capture && !tx))) + return 0; + mutex_lock(&hcp->lock); if (hcp->busy) { dev_err(dai->dev, "Only one simultaneous stream supported!\n"); @@ -468,6 +473,12 @@ static void hdmi_codec_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct hdmi_codec_priv *hcp = snd_soc_dai_get_drvdata(dai); + bool tx = substream->stream == SNDRV_PCM_STREAM_PLAYBACK; + bool has_capture = !hcp->hcd.no_i2s_capture; + bool has_playback = !hcp->hcd.no_i2s_playback; + + if (!((has_playback && tx) || (has_capture && !tx))) + return; hcp->chmap_idx = HDMI_CODEC_CHMAP_IDX_UNKNOWN; hcp->hcd.ops->audio_shutdown(dai->dev->parent, hcp->hcd.data); diff --git a/sound/soc/codecs/lpass-tx-macro.c b/sound/soc/codecs/lpass-tx-macro.c index bf27bdd5be20..473d3cd39554 100644 --- a/sound/soc/codecs/lpass-tx-macro.c +++ b/sound/soc/codecs/lpass-tx-macro.c @@ -242,7 +242,7 @@ enum { struct tx_mute_work { struct tx_macro *tx; - u32 decimator; + u8 decimator; struct delayed_work dwork; }; @@ -635,7 +635,7 @@ exit: return 0; } -static bool is_amic_enabled(struct snd_soc_component *component, int decimator) +static bool is_amic_enabled(struct snd_soc_component *component, u8 decimator) { u16 adc_mux_reg, adc_reg, adc_n; @@ -849,7 +849,7 @@ static int tx_macro_enable_dec(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); - unsigned int decimator; + u8 decimator; u16 tx_vol_ctl_reg, dec_cfg_reg, hpf_gate_reg, tx_gain_ctl_reg; u8 hpf_cut_off_freq; int hpf_delay = TX_MACRO_DMIC_HPF_DELAY_MS; @@ -1064,7 +1064,8 @@ static int tx_macro_hw_params(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_component *component = dai->component; - u32 decimator, sample_rate; + u32 sample_rate; + u8 decimator; int tx_fs_rate; struct tx_macro *tx = snd_soc_component_get_drvdata(component); @@ -1128,7 +1129,7 @@ static int tx_macro_digital_mute(struct snd_soc_dai *dai, int mute, int stream) { struct snd_soc_component *component = dai->component; struct tx_macro *tx = snd_soc_component_get_drvdata(component); - u16 decimator; + u8 decimator; /* active decimator not set yet */ if (tx->active_decimator[dai->id] == -1) diff --git a/sound/soc/fsl/Kconfig b/sound/soc/fsl/Kconfig index 614eceda6b9e..33b67db8794e 100644 --- a/sound/soc/fsl/Kconfig +++ b/sound/soc/fsl/Kconfig @@ -294,6 +294,10 @@ config SND_SOC_IMX_SGTL5000 Say Y if you want to add support for SoC audio on an i.MX board with a sgtl5000 codec. + Note that this is an old driver. Consider enabling + SND_SOC_FSL_ASOC_CARD and SND_SOC_SGTL5000 to use the newer + driver. + config SND_SOC_IMX_SPDIF tristate "SoC Audio support for i.MX boards with S/PDIF" select SND_SOC_IMX_PCM_DMA diff --git a/sound/soc/intel/avs/boards/da7219.c b/sound/soc/intel/avs/boards/da7219.c index acd43b6108e9..1a1d572cc1d0 100644 --- a/sound/soc/intel/avs/boards/da7219.c +++ b/sound/soc/intel/avs/boards/da7219.c @@ -117,6 +117,26 @@ static void avs_da7219_codec_exit(struct snd_soc_pcm_runtime *rtd) snd_soc_component_set_jack(asoc_rtd_to_codec(rtd, 0)->component, NULL, NULL); } +static int +avs_da7219_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params) +{ + struct snd_interval *rate, *channels; + struct snd_mask *fmt; + + rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); + channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); + fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); + + /* The ADSP will convert the FE rate to 48k, stereo */ + rate->min = rate->max = 48000; + channels->min = channels->max = 2; + + /* set SSP0 to 24 bit */ + snd_mask_none(fmt); + snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE); + return 0; +} + static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, struct snd_soc_dai_link **dai_link) { @@ -148,6 +168,7 @@ static int avs_create_dai_link(struct device *dev, const char *platform_name, in dl->num_platforms = 1; dl->id = 0; dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; + dl->be_hw_params_fixup = avs_da7219_be_fixup; dl->init = avs_da7219_codec_init; dl->exit = avs_da7219_codec_exit; dl->nonatomic = 1; diff --git a/sound/soc/intel/avs/boards/max98357a.c b/sound/soc/intel/avs/boards/max98357a.c index 921f42caf7e0..183123d08c5a 100644 --- a/sound/soc/intel/avs/boards/max98357a.c +++ b/sound/soc/intel/avs/boards/max98357a.c @@ -8,6 +8,7 @@ #include <linux/module.h> #include <linux/platform_device.h> +#include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/soc-acpi.h> #include <sound/soc-dapm.h> @@ -24,6 +25,26 @@ static const struct snd_soc_dapm_route card_base_routes[] = { { "Spk", NULL, "Speaker" }, }; +static int +avs_max98357a_be_fixup(struct snd_soc_pcm_runtime *runrime, struct snd_pcm_hw_params *params) +{ + struct snd_interval *rate, *channels; + struct snd_mask *fmt; + + rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); + channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); + fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); + + /* The ADSP will convert the FE rate to 48k, stereo */ + rate->min = rate->max = 48000; + channels->min = channels->max = 2; + + /* set SSP0 to 16 bit */ + snd_mask_none(fmt); + snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S16_LE); + return 0; +} + static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, struct snd_soc_dai_link **dai_link) { @@ -55,6 +76,7 @@ static int avs_create_dai_link(struct device *dev, const char *platform_name, in dl->num_platforms = 1; dl->id = 0; dl->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; + dl->be_hw_params_fixup = avs_max98357a_be_fixup; dl->nonatomic = 1; dl->no_pcm = 1; dl->dpcm_playback = 1; diff --git a/sound/soc/intel/avs/boards/nau8825.c b/sound/soc/intel/avs/boards/nau8825.c index b31fa931ba8b..b69fc5567135 100644 --- a/sound/soc/intel/avs/boards/nau8825.c +++ b/sound/soc/intel/avs/boards/nau8825.c @@ -33,15 +33,15 @@ avs_nau8825_clock_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *co return -EINVAL; } - if (!SND_SOC_DAPM_EVENT_ON(event)) { + if (SND_SOC_DAPM_EVENT_ON(event)) + ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_MCLK, 24000000, + SND_SOC_CLOCK_IN); + else ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_INTERNAL, 0, SND_SOC_CLOCK_IN); - if (ret < 0) { - dev_err(card->dev, "set sysclk err = %d\n", ret); - return ret; - } - } + if (ret < 0) + dev_err(card->dev, "Set sysclk failed: %d\n", ret); - return 0; + return ret; } static const struct snd_kcontrol_new card_controls[] = { diff --git a/sound/soc/intel/avs/boards/rt5682.c b/sound/soc/intel/avs/boards/rt5682.c index 473e9fe5d0bf..b2c2ba93dcb5 100644 --- a/sound/soc/intel/avs/boards/rt5682.c +++ b/sound/soc/intel/avs/boards/rt5682.c @@ -169,6 +169,27 @@ static const struct snd_soc_ops avs_rt5682_ops = { .hw_params = avs_rt5682_hw_params, }; +static int +avs_rt5682_be_fixup(struct snd_soc_pcm_runtime *runtime, struct snd_pcm_hw_params *params) +{ + struct snd_interval *rate, *channels; + struct snd_mask *fmt; + + rate = hw_param_interval(params, SNDRV_PCM_HW_PARAM_RATE); + channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); + fmt = hw_param_mask(params, SNDRV_PCM_HW_PARAM_FORMAT); + + /* The ADSP will convert the FE rate to 48k, stereo */ + rate->min = rate->max = 48000; + channels->min = channels->max = 2; + + /* set SSPN to 24 bit */ + snd_mask_none(fmt); + snd_mask_set_format(fmt, SNDRV_PCM_FORMAT_S24_LE); + + return 0; +} + static int avs_create_dai_link(struct device *dev, const char *platform_name, int ssp_port, struct snd_soc_dai_link **dai_link) { @@ -201,6 +222,7 @@ static int avs_create_dai_link(struct device *dev, const char *platform_name, in dl->id = 0; dl->init = avs_rt5682_codec_init; dl->exit = avs_rt5682_codec_exit; + dl->be_hw_params_fixup = avs_rt5682_be_fixup; dl->ops = &avs_rt5682_ops; dl->nonatomic = 1; dl->no_pcm = 1; diff --git a/sound/soc/intel/avs/boards/ssm4567.c b/sound/soc/intel/avs/boards/ssm4567.c index c5db69612762..2b7f5ad92aca 100644 --- a/sound/soc/intel/avs/boards/ssm4567.c +++ b/sound/soc/intel/avs/boards/ssm4567.c @@ -15,7 +15,6 @@ #include <sound/soc-acpi.h> #include "../../../codecs/nau8825.h" -#define SKL_NUVOTON_CODEC_DAI "nau8825-hifi" #define SKL_SSM_CODEC_DAI "ssm4567-hifi" static struct snd_soc_codec_conf card_codec_conf[] = { @@ -34,41 +33,11 @@ static const struct snd_kcontrol_new card_controls[] = { SOC_DAPM_PIN_SWITCH("Right Speaker"), }; -static int -platform_clock_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *control, int event) -{ - struct snd_soc_dapm_context *dapm = w->dapm; - struct snd_soc_card *card = dapm->card; - struct snd_soc_dai *codec_dai; - int ret; - - codec_dai = snd_soc_card_get_codec_dai(card, SKL_NUVOTON_CODEC_DAI); - if (!codec_dai) { - dev_err(card->dev, "Codec dai not found\n"); - return -EINVAL; - } - - if (SND_SOC_DAPM_EVENT_ON(event)) { - ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_MCLK, 24000000, - SND_SOC_CLOCK_IN); - if (ret < 0) - dev_err(card->dev, "set sysclk err = %d\n", ret); - } else { - ret = snd_soc_dai_set_sysclk(codec_dai, NAU8825_CLK_INTERNAL, 0, SND_SOC_CLOCK_IN); - if (ret < 0) - dev_err(card->dev, "set sysclk err = %d\n", ret); - } - - return ret; -} - static const struct snd_soc_dapm_widget card_widgets[] = { SND_SOC_DAPM_SPK("Left Speaker", NULL), SND_SOC_DAPM_SPK("Right Speaker", NULL), SND_SOC_DAPM_SPK("DP1", NULL), SND_SOC_DAPM_SPK("DP2", NULL), - SND_SOC_DAPM_SUPPLY("Platform Clock", SND_SOC_NOPM, 0, 0, platform_clock_control, - SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), }; static const struct snd_soc_dapm_route card_base_routes[] = { diff --git a/sound/soc/intel/common/soc-acpi-intel-adl-match.c b/sound/soc/intel/common/soc-acpi-intel-adl-match.c index 56ee5fef66a8..28dd2046e4ac 100644 --- a/sound/soc/intel/common/soc-acpi-intel-adl-match.c +++ b/sound/soc/intel/common/soc-acpi-intel-adl-match.c @@ -559,7 +559,7 @@ struct snd_soc_acpi_mach snd_soc_acpi_intel_adl_machines[] = { { .comp_ids = &essx_83x6, .drv_name = "sof-essx8336", - .sof_tplg_filename = "sof-adl-es83x6", /* the tplg suffix is added at run time */ + .sof_tplg_filename = "sof-adl-es8336", /* the tplg suffix is added at run time */ .tplg_quirk_mask = SND_SOC_ACPI_TPLG_INTEL_SSP_NUMBER | SND_SOC_ACPI_TPLG_INTEL_SSP_MSB | SND_SOC_ACPI_TPLG_INTEL_DMIC_NUMBER, diff --git a/sound/soc/qcom/qdsp6/q6prm.c b/sound/soc/qcom/qdsp6/q6prm.c index 3aa63aac4a68..81554d202658 100644 --- a/sound/soc/qcom/qdsp6/q6prm.c +++ b/sound/soc/qcom/qdsp6/q6prm.c @@ -184,9 +184,9 @@ int q6prm_set_lpass_clock(struct device *dev, int clk_id, int clk_attr, int clk_ unsigned int freq) { if (freq) - return q6prm_request_lpass_clock(dev, clk_id, clk_attr, clk_attr, freq); + return q6prm_request_lpass_clock(dev, clk_id, clk_attr, clk_root, freq); - return q6prm_release_lpass_clock(dev, clk_id, clk_attr, clk_attr, freq); + return q6prm_release_lpass_clock(dev, clk_id, clk_attr, clk_root, freq); } EXPORT_SYMBOL_GPL(q6prm_set_lpass_clock); diff --git a/sound/soc/sof/intel/hda-ctrl.c b/sound/soc/sof/intel/hda-ctrl.c index 3aea36c077c9..f3bdeba28412 100644 --- a/sound/soc/sof/intel/hda-ctrl.c +++ b/sound/soc/sof/intel/hda-ctrl.c @@ -196,12 +196,15 @@ int hda_dsp_ctrl_init_chip(struct snd_sof_dev *sdev) goto err; } + usleep_range(500, 1000); + /* exit HDA controller reset */ ret = hda_dsp_ctrl_link_reset(sdev, false); if (ret < 0) { dev_err(sdev->dev, "error: failed to exit HDA controller reset\n"); goto err; } + usleep_range(1000, 1200); hda_codec_detect_mask(sdev); diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c index 68eb06f13a1f..a6f2822401e0 100644 --- a/sound/soc/sof/intel/hda-dsp.c +++ b/sound/soc/sof/intel/hda-dsp.c @@ -392,6 +392,12 @@ static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value) snd_sof_dsp_update8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset, SOF_HDA_VS_D0I3C_I3, value); + /* + * The value written to the D0I3C::I3 bit may not be taken into account immediately. + * A delay is recommended before checking if D0I3C::CIP is cleared + */ + usleep_range(30, 40); + /* Wait for cmd in progress to be cleared before exiting the function */ ret = hda_dsp_wait_d0i3c_done(sdev); if (ret < 0) { @@ -400,6 +406,12 @@ static int hda_dsp_update_d0i3c_register(struct snd_sof_dev *sdev, u8 value) } reg = snd_sof_dsp_read8(sdev, HDA_DSP_HDA_BAR, chip->d0i3_offset); + /* Confirm d0i3 state changed with paranoia check */ + if ((reg ^ value) & SOF_HDA_VS_D0I3C_I3) { + dev_err(sdev->dev, "failed to update D0I3C!\n"); + return -EIO; + } + trace_sof_intel_D0I3C_updated(sdev, reg); return 0; diff --git a/sound/soc/sof/intel/pci-apl.c b/sound/soc/sof/intel/pci-apl.c index 69279dcc92dc..aff6cb573c27 100644 --- a/sound/soc/sof/intel/pci-apl.c +++ b/sound/soc/sof/intel/pci-apl.c @@ -78,6 +78,7 @@ static const struct sof_dev_desc glk_desc = { .nocodec_tplg_filename = "sof-glk-nocodec.tplg", .ops = &sof_apl_ops, .ops_init = sof_apl_ops_init, + .ops_free = hda_ops_free, }; /* PCI IDs */ diff --git a/sound/soc/sof/intel/pci-cnl.c b/sound/soc/sof/intel/pci-cnl.c index 8db3f8d15b55..4c0c1c369dcd 100644 --- a/sound/soc/sof/intel/pci-cnl.c +++ b/sound/soc/sof/intel/pci-cnl.c @@ -48,6 +48,7 @@ static const struct sof_dev_desc cnl_desc = { .nocodec_tplg_filename = "sof-cnl-nocodec.tplg", .ops = &sof_cnl_ops, .ops_init = sof_cnl_ops_init, + .ops_free = hda_ops_free, }; static const struct sof_dev_desc cfl_desc = { @@ -111,6 +112,7 @@ static const struct sof_dev_desc cml_desc = { .nocodec_tplg_filename = "sof-cnl-nocodec.tplg", .ops = &sof_cnl_ops, .ops_init = sof_cnl_ops_init, + .ops_free = hda_ops_free, }; /* PCI IDs */ diff --git a/sound/soc/sof/intel/pci-icl.c b/sound/soc/sof/intel/pci-icl.c index d6cf75e357db..6785669113b3 100644 --- a/sound/soc/sof/intel/pci-icl.c +++ b/sound/soc/sof/intel/pci-icl.c @@ -79,6 +79,7 @@ static const struct sof_dev_desc jsl_desc = { .nocodec_tplg_filename = "sof-jsl-nocodec.tplg", .ops = &sof_cnl_ops, .ops_init = sof_cnl_ops_init, + .ops_free = hda_ops_free, }; /* PCI IDs */ diff --git a/sound/soc/sof/intel/pci-mtl.c b/sound/soc/sof/intel/pci-mtl.c index 6e4e6d4ef5a5..b183dc0014b4 100644 --- a/sound/soc/sof/intel/pci-mtl.c +++ b/sound/soc/sof/intel/pci-mtl.c @@ -46,6 +46,7 @@ static const struct sof_dev_desc mtl_desc = { .nocodec_tplg_filename = "sof-mtl-nocodec.tplg", .ops = &sof_mtl_ops, .ops_init = sof_mtl_ops_init, + .ops_free = hda_ops_free, }; /* PCI IDs */ diff --git a/sound/soc/sof/intel/pci-skl.c b/sound/soc/sof/intel/pci-skl.c index 3a99dc444f92..5b4bccf81965 100644 --- a/sound/soc/sof/intel/pci-skl.c +++ b/sound/soc/sof/intel/pci-skl.c @@ -38,6 +38,7 @@ static struct sof_dev_desc skl_desc = { .nocodec_tplg_filename = "sof-skl-nocodec.tplg", .ops = &sof_skl_ops, .ops_init = sof_skl_ops_init, + .ops_free = hda_ops_free, }; static struct sof_dev_desc kbl_desc = { @@ -61,6 +62,7 @@ static struct sof_dev_desc kbl_desc = { .nocodec_tplg_filename = "sof-kbl-nocodec.tplg", .ops = &sof_skl_ops, .ops_init = sof_skl_ops_init, + .ops_free = hda_ops_free, }; /* PCI IDs */ diff --git a/sound/soc/sof/intel/pci-tgl.c b/sound/soc/sof/intel/pci-tgl.c index e80c4dfef85a..22e769e0831d 100644 --- a/sound/soc/sof/intel/pci-tgl.c +++ b/sound/soc/sof/intel/pci-tgl.c @@ -48,6 +48,7 @@ static const struct sof_dev_desc tgl_desc = { .nocodec_tplg_filename = "sof-tgl-nocodec.tplg", .ops = &sof_tgl_ops, .ops_init = sof_tgl_ops_init, + .ops_free = hda_ops_free, }; static const struct sof_dev_desc tglh_desc = { @@ -110,6 +111,7 @@ static const struct sof_dev_desc ehl_desc = { .nocodec_tplg_filename = "sof-ehl-nocodec.tplg", .ops = &sof_tgl_ops, .ops_init = sof_tgl_ops_init, + .ops_free = hda_ops_free, }; static const struct sof_dev_desc adls_desc = { @@ -141,6 +143,7 @@ static const struct sof_dev_desc adls_desc = { .nocodec_tplg_filename = "sof-adl-nocodec.tplg", .ops = &sof_tgl_ops, .ops_init = sof_tgl_ops_init, + .ops_free = hda_ops_free, }; static const struct sof_dev_desc adl_desc = { @@ -172,6 +175,7 @@ static const struct sof_dev_desc adl_desc = { .nocodec_tplg_filename = "sof-adl-nocodec.tplg", .ops = &sof_tgl_ops, .ops_init = sof_tgl_ops_init, + .ops_free = hda_ops_free, }; static const struct sof_dev_desc adl_n_desc = { @@ -203,6 +207,7 @@ static const struct sof_dev_desc adl_n_desc = { .nocodec_tplg_filename = "sof-adl-nocodec.tplg", .ops = &sof_tgl_ops, .ops_init = sof_tgl_ops_init, + .ops_free = hda_ops_free, }; static const struct sof_dev_desc rpls_desc = { @@ -234,6 +239,7 @@ static const struct sof_dev_desc rpls_desc = { .nocodec_tplg_filename = "sof-rpl-nocodec.tplg", .ops = &sof_tgl_ops, .ops_init = sof_tgl_ops_init, + .ops_free = hda_ops_free, }; static const struct sof_dev_desc rpl_desc = { @@ -265,6 +271,7 @@ static const struct sof_dev_desc rpl_desc = { .nocodec_tplg_filename = "sof-rpl-nocodec.tplg", .ops = &sof_tgl_ops, .ops_init = sof_tgl_ops_init, + .ops_free = hda_ops_free, }; /* PCI IDs */ diff --git a/sound/soc/sof/intel/pci-tng.c b/sound/soc/sof/intel/pci-tng.c index 5b2b409752c5..8c22a00266c0 100644 --- a/sound/soc/sof/intel/pci-tng.c +++ b/sound/soc/sof/intel/pci-tng.c @@ -75,11 +75,7 @@ static int tangier_pci_probe(struct snd_sof_dev *sdev) /* LPE base */ base = pci_resource_start(pci, desc->resindex_lpe_base) - IRAM_OFFSET; - size = pci_resource_len(pci, desc->resindex_lpe_base); - if (size < PCI_BAR_SIZE) { - dev_err(sdev->dev, "error: I/O region is too small.\n"); - return -ENODEV; - } + size = PCI_BAR_SIZE; dev_dbg(sdev->dev, "LPE PHY base at 0x%x size 0x%x", base, size); sdev->bar[DSP_BAR] = devm_ioremap(sdev->dev, base, size); diff --git a/sound/soc/sof/ipc3-topology.c b/sound/soc/sof/ipc3-topology.c index dceb78bfe17c..b1f425b39db9 100644 --- a/sound/soc/sof/ipc3-topology.c +++ b/sound/soc/sof/ipc3-topology.c @@ -2081,7 +2081,9 @@ static int sof_ipc3_dai_config(struct snd_sof_dev *sdev, struct snd_sof_widget * break; case SOF_DAI_INTEL_ALH: if (data) { - config->dai_index = data->dai_index; + /* save the dai_index during hw_params and reuse it for hw_free */ + if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS) + config->dai_index = data->dai_index; config->alh.stream_id = data->dai_data; } break; @@ -2089,7 +2091,30 @@ static int sof_ipc3_dai_config(struct snd_sof_dev *sdev, struct snd_sof_widget * break; } - config->flags = flags; + /* + * The dai_config op is invoked several times and the flags argument varies as below: + * BE DAI hw_params: When the op is invoked during the BE DAI hw_params, flags contains + * SOF_DAI_CONFIG_FLAGS_HW_PARAMS along with quirks + * FE DAI hw_params: When invoked during FE DAI hw_params after the DAI widget has + * just been set up in the DSP, flags is set to SOF_DAI_CONFIG_FLAGS_HW_PARAMS with no + * quirks + * BE DAI trigger: When invoked during the BE DAI trigger, flags is set to + * SOF_DAI_CONFIG_FLAGS_PAUSE and contains no quirks + * BE DAI hw_free: When invoked during the BE DAI hw_free, flags is set to + * SOF_DAI_CONFIG_FLAGS_HW_FREE and contains no quirks + * FE DAI hw_free: When invoked during the FE DAI hw_free, flags is set to + * SOF_DAI_CONFIG_FLAGS_HW_FREE and contains no quirks + * + * The DAI_CONFIG IPC is sent to the DSP, only after the widget is set up during the FE + * DAI hw_params. But since the BE DAI hw_params precedes the FE DAI hw_params, the quirks + * need to be preserved when assigning the flags before sending the IPC. + * For the case of PAUSE/HW_FREE, since there are no quirks, flags can be used as is. + */ + + if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS) + config->flags |= flags; + else + config->flags = flags; /* only send the IPC if the widget is set up in the DSP */ if (swidget->use_count > 0) { @@ -2097,6 +2122,9 @@ static int sof_ipc3_dai_config(struct snd_sof_dev *sdev, struct snd_sof_widget * &reply, sizeof(reply)); if (ret < 0) dev_err(sdev->dev, "Failed to set dai config for %s\n", dai->name); + + /* clear the flags once the IPC has been sent even if it fails */ + config->flags = SOF_DAI_CONFIG_FLAGS_NONE; } return ret; diff --git a/sound/soc/sof/ipc3.c b/sound/soc/sof/ipc3.c index 3de64ea2dc9a..4493bbd7faf1 100644 --- a/sound/soc/sof/ipc3.c +++ b/sound/soc/sof/ipc3.c @@ -970,8 +970,9 @@ static void sof_ipc3_rx_msg(struct snd_sof_dev *sdev) return; } - if (hdr.size < sizeof(hdr)) { - dev_err(sdev->dev, "The received message size is invalid\n"); + if (hdr.size < sizeof(hdr) || hdr.size > SOF_IPC_MSG_MAX_SIZE) { + dev_err(sdev->dev, "The received message size is invalid: %u\n", + hdr.size); return; } diff --git a/sound/soc/sof/ipc4-control.c b/sound/soc/sof/ipc4-control.c index 67bd2233fd9a..9a71af1a613a 100644 --- a/sound/soc/sof/ipc4-control.c +++ b/sound/soc/sof/ipc4-control.c @@ -97,7 +97,8 @@ sof_ipc4_set_volume_data(struct snd_sof_dev *sdev, struct snd_sof_widget *swidge } /* set curve type and duration from topology */ - data.curve_duration = gain->data.curve_duration; + data.curve_duration_l = gain->data.curve_duration_l; + data.curve_duration_h = gain->data.curve_duration_h; data.curve_type = gain->data.curve_type; msg->data_ptr = &data; diff --git a/sound/soc/sof/ipc4-topology.c b/sound/soc/sof/ipc4-topology.c index 3e27c7a48ebd..a623707c8ffc 100644 --- a/sound/soc/sof/ipc4-topology.c +++ b/sound/soc/sof/ipc4-topology.c @@ -107,7 +107,7 @@ static const struct sof_topology_token gain_tokens[] = { get_token_u32, offsetof(struct sof_ipc4_gain_data, curve_type)}, {SOF_TKN_GAIN_RAMP_DURATION, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32, - offsetof(struct sof_ipc4_gain_data, curve_duration)}, + offsetof(struct sof_ipc4_gain_data, curve_duration_l)}, {SOF_TKN_GAIN_VAL, SND_SOC_TPLG_TUPLE_TYPE_WORD, get_token_u32, offsetof(struct sof_ipc4_gain_data, init_val)}, }; @@ -155,7 +155,7 @@ static void sof_ipc4_dbg_audio_format(struct device *dev, for (i = 0; i < num_format; i++, ptr = (u8 *)ptr + object_size) { fmt = ptr; dev_dbg(dev, - " #%d: %uKHz, %ubit (ch_map %#x ch_cfg %u interleaving_style %u fmt_cfg %#x)\n", + " #%d: %uHz, %ubit (ch_map %#x ch_cfg %u interleaving_style %u fmt_cfg %#x)\n", i, fmt->sampling_frequency, fmt->bit_depth, fmt->ch_map, fmt->ch_cfg, fmt->interleaving_style, fmt->fmt_cfg); } @@ -692,7 +692,7 @@ static int sof_ipc4_widget_setup_comp_pga(struct snd_sof_widget *swidget) dev_dbg(scomp->dev, "pga widget %s: ramp type: %d, ramp duration %d, initial gain value: %#x, cpc %d\n", - swidget->widget->name, gain->data.curve_type, gain->data.curve_duration, + swidget->widget->name, gain->data.curve_type, gain->data.curve_duration_l, gain->data.init_val, gain->base_config.cpc); ret = sof_ipc4_widget_setup_msg(swidget, &gain->msg); @@ -980,6 +980,7 @@ static void sof_ipc4_unprepare_copier_module(struct snd_sof_widget *swidget) ipc4_copier = dai->private; if (ipc4_copier->dai_type == SOF_DAI_INTEL_ALH) { + struct sof_ipc4_copier_data *copier_data = &ipc4_copier->data; struct sof_ipc4_alh_configuration_blob *blob; unsigned int group_id; @@ -989,6 +990,9 @@ static void sof_ipc4_unprepare_copier_module(struct snd_sof_widget *swidget) ALH_MULTI_GTW_BASE; ida_free(&alh_group_ida, group_id); } + + /* clear the node ID */ + copier_data->gtw_cfg.node_id &= ~SOF_IPC4_NODE_INDEX_MASK; } } @@ -1940,8 +1944,15 @@ static int sof_ipc4_dai_config(struct snd_sof_dev *sdev, struct snd_sof_widget * pipeline->skip_during_fe_trigger = true; fallthrough; case SOF_DAI_INTEL_ALH: - copier_data->gtw_cfg.node_id &= ~SOF_IPC4_NODE_INDEX_MASK; - copier_data->gtw_cfg.node_id |= SOF_IPC4_NODE_INDEX(data->dai_data); + /* + * Do not clear the node ID when this op is invoked with + * SOF_DAI_CONFIG_FLAGS_HW_FREE. It is needed to free the group_ida during + * unprepare. + */ + if (flags & SOF_DAI_CONFIG_FLAGS_HW_PARAMS) { + copier_data->gtw_cfg.node_id &= ~SOF_IPC4_NODE_INDEX_MASK; + copier_data->gtw_cfg.node_id |= SOF_IPC4_NODE_INDEX(data->dai_data); + } break; case SOF_DAI_INTEL_DMIC: case SOF_DAI_INTEL_SSP: diff --git a/sound/soc/sof/ipc4-topology.h b/sound/soc/sof/ipc4-topology.h index 72529179ac22..123f1096f326 100644 --- a/sound/soc/sof/ipc4-topology.h +++ b/sound/soc/sof/ipc4-topology.h @@ -46,7 +46,7 @@ #define SOF_IPC4_NODE_INDEX_INTEL_SSP(x) (((x) & 0xf) << 4) /* Node ID for DMIC type DAI copiers */ -#define SOF_IPC4_NODE_INDEX_INTEL_DMIC(x) (((x) & 0x7) << 5) +#define SOF_IPC4_NODE_INDEX_INTEL_DMIC(x) ((x) & 0x7) #define SOF_IPC4_GAIN_ALL_CHANNELS_MASK 0xffffffff #define SOF_IPC4_VOL_ZERO_DB 0x7fffffff @@ -277,14 +277,16 @@ struct sof_ipc4_control_data { * @init_val: Initial value * @curve_type: Curve type * @reserved: reserved for future use - * @curve_duration: Curve duration + * @curve_duration_l: Curve duration low part + * @curve_duration_h: Curve duration high part */ struct sof_ipc4_gain_data { uint32_t channels; uint32_t init_val; uint32_t curve_type; uint32_t reserved; - uint32_t curve_duration; + uint32_t curve_duration_l; + uint32_t curve_duration_h; } __aligned(8); /** diff --git a/sound/soc/sof/sof-audio.c b/sound/soc/sof/sof-audio.c index 760621bfc802..6de388a8d0b8 100644 --- a/sound/soc/sof/sof-audio.c +++ b/sound/soc/sof/sof-audio.c @@ -50,9 +50,27 @@ static int sof_widget_free_unlocked(struct snd_sof_dev *sdev, /* reset route setup status for all routes that contain this widget */ sof_reset_route_setup_status(sdev, swidget); + /* free DAI config and continue to free widget even if it fails */ + if (WIDGET_IS_DAI(swidget->id)) { + struct snd_sof_dai_config_data data; + unsigned int flags = SOF_DAI_CONFIG_FLAGS_HW_FREE; + + data.dai_data = DMA_CHAN_INVALID; + + if (tplg_ops && tplg_ops->dai_config) { + err = tplg_ops->dai_config(sdev, swidget, flags, &data); + if (err < 0) + dev_err(sdev->dev, "failed to free config for widget %s\n", + swidget->widget->name); + } + } + /* continue to disable core even if IPC fails */ - if (tplg_ops && tplg_ops->widget_free) - err = tplg_ops->widget_free(sdev, swidget); + if (tplg_ops && tplg_ops->widget_free) { + ret = tplg_ops->widget_free(sdev, swidget); + if (ret < 0 && !err) + err = ret; + } /* * disable widget core. continue to route setup status and complete flag @@ -151,8 +169,12 @@ static int sof_widget_setup_unlocked(struct snd_sof_dev *sdev, /* send config for DAI components */ if (WIDGET_IS_DAI(swidget->id)) { - unsigned int flags = SOF_DAI_CONFIG_FLAGS_NONE; + unsigned int flags = SOF_DAI_CONFIG_FLAGS_HW_PARAMS; + /* + * The config flags saved during BE DAI hw_params will be used for IPC3. IPC4 does + * not use the flags argument. + */ if (tplg_ops && tplg_ops->dai_config) { ret = tplg_ops->dai_config(sdev, swidget, flags, NULL); if (ret < 0) @@ -588,8 +610,8 @@ int sof_widget_list_setup(struct snd_sof_dev *sdev, struct snd_sof_pcm *spcm, ret = sof_walk_widgets_in_order(sdev, spcm, fe_params, platform_params, dir, SOF_WIDGET_SETUP); if (ret < 0) { - ret = sof_walk_widgets_in_order(sdev, spcm, fe_params, platform_params, - dir, SOF_WIDGET_UNPREPARE); + sof_walk_widgets_in_order(sdev, spcm, fe_params, platform_params, + dir, SOF_WIDGET_UNPREPARE); return ret; } diff --git a/sound/soc/sof/topology.c b/sound/soc/sof/topology.c index 4a62ccc71fcb..9f3a038fe21a 100644 --- a/sound/soc/sof/topology.c +++ b/sound/soc/sof/topology.c @@ -1388,14 +1388,15 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index, if (ret < 0) { dev_err(scomp->dev, "failed to parse component pin tokens for %s\n", w->name); - return ret; + goto widget_free; } if (swidget->num_sink_pins > SOF_WIDGET_MAX_NUM_PINS || swidget->num_source_pins > SOF_WIDGET_MAX_NUM_PINS) { dev_err(scomp->dev, "invalid pins for %s: [sink: %d, src: %d]\n", swidget->widget->name, swidget->num_sink_pins, swidget->num_source_pins); - return -EINVAL; + ret = -EINVAL; + goto widget_free; } if (swidget->num_sink_pins > 1) { @@ -1404,7 +1405,7 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index, if (ret < 0) { dev_err(scomp->dev, "failed to parse sink pin binding for %s\n", w->name); - return ret; + goto widget_free; } } @@ -1414,7 +1415,7 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index, if (ret < 0) { dev_err(scomp->dev, "failed to parse source pin binding for %s\n", w->name); - return ret; + goto widget_free; } } @@ -1436,9 +1437,8 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index, case snd_soc_dapm_dai_out: dai = kzalloc(sizeof(*dai), GFP_KERNEL); if (!dai) { - kfree(swidget); - return -ENOMEM; - + ret = -ENOMEM; + goto widget_free; } ret = sof_widget_parse_tokens(scomp, swidget, tw, token_list, token_list_size); @@ -1496,8 +1496,7 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index, tw->shift, swidget->id, tw->name, strnlen(tw->sname, SNDRV_CTL_ELEM_ID_NAME_MAXLEN) > 0 ? tw->sname : "none"); - kfree(swidget); - return ret; + goto widget_free; } if (sof_debug_check_flag(SOF_DBG_DISABLE_MULTICORE)) { @@ -1518,10 +1517,7 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index, if (ret) { dev_err(scomp->dev, "widget event binding failed for %s\n", swidget->widget->name); - kfree(swidget->private); - kfree(swidget->tuples); - kfree(swidget); - return ret; + goto free; } } } @@ -1532,10 +1528,8 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index, spipe = kzalloc(sizeof(*spipe), GFP_KERNEL); if (!spipe) { - kfree(swidget->private); - kfree(swidget->tuples); - kfree(swidget); - return -ENOMEM; + ret = -ENOMEM; + goto free; } spipe->pipe_widget = swidget; @@ -1546,6 +1540,12 @@ static int sof_widget_ready(struct snd_soc_component *scomp, int index, w->dobj.private = swidget; list_add(&swidget->list, &sdev->widget_list); return ret; +free: + kfree(swidget->private); + kfree(swidget->tuples); +widget_free: + kfree(swidget); + return ret; } static int sof_route_unload(struct snd_soc_component *scomp, diff --git a/tools/include/uapi/linux/netdev.h b/tools/include/uapi/linux/netdev.h index 8c4e3e536c04..639524b59930 100644 --- a/tools/include/uapi/linux/netdev.h +++ b/tools/include/uapi/linux/netdev.h @@ -1,4 +1,4 @@ -/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */ +/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) */ /* Do not edit directly, auto-generated from: */ /* Documentation/netlink/specs/netdev.yaml */ /* YNL-GEN uapi header */ @@ -33,6 +33,8 @@ enum netdev_xdp_act { NETDEV_XDP_ACT_HW_OFFLOAD = 16, NETDEV_XDP_ACT_RX_SG = 32, NETDEV_XDP_ACT_NDO_XMIT_SG = 64, + + NETDEV_XDP_ACT_MASK = 127, }; enum { diff --git a/tools/net/ynl/lib/nlspec.py b/tools/net/ynl/lib/nlspec.py index a34d088f6743..d04450c2a44a 100644 --- a/tools/net/ynl/lib/nlspec.py +++ b/tools/net/ynl/lib/nlspec.py @@ -138,10 +138,8 @@ class SpecEnumSet(SpecElement): def get_mask(self): mask = 0 - idx = self.yaml.get('value-start', 0) - for _ in self.entries.values(): - mask |= 1 << idx - idx += 1 + for e in self.entries.values(): + mask += e.user_value() return mask @@ -276,6 +274,7 @@ class SpecFamily(SpecElement): Attributes: proto protocol type (e.g. genetlink) + license spec license (loaded from an SPDX tag on the spec) attr_sets dict of attribute sets msgs dict of all messages (index by name) @@ -285,6 +284,13 @@ class SpecFamily(SpecElement): """ def __init__(self, spec_path, schema_path=None): with open(spec_path, "r") as stream: + prefix = '# SPDX-License-Identifier: ' + first = stream.readline().strip() + if not first.startswith(prefix): + raise Exception('SPDX license tag required in the spec') + self.license = first[len(prefix):] + + stream.seek(0) spec = yaml.safe_load(stream) self._resolution_list = [] @@ -389,7 +395,8 @@ class SpecFamily(SpecElement): def resolve(self): self.resolve_up(super()) - for elem in self.yaml['definitions']: + definitions = self.yaml.get('definitions', []) + for elem in definitions: if elem['type'] == 'enum' or elem['type'] == 'flags': self.consts[elem['name']] = self.new_enum(elem) else: diff --git a/tools/net/ynl/ynl-gen-c.py b/tools/net/ynl/ynl-gen-c.py index 1bcc5354d800..c16671a02621 100755 --- a/tools/net/ynl/ynl-gen-c.py +++ b/tools/net/ynl/ynl-gen-c.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -# SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause +# SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause) import argparse import collections @@ -1931,9 +1931,14 @@ def render_uapi(family, cw): if const.get('render-max', False): cw.nl() - max_name = c_upper(name_pfx + 'max') - cw.p('__' + max_name + ',') - cw.p(max_name + ' = (__' + max_name + ' - 1)') + if const['type'] == 'flags': + max_name = c_upper(name_pfx + 'mask') + max_val = f' = {enum.get_mask()},' + cw.p(max_name + max_val) + else: + max_name = c_upper(name_pfx + 'max') + cw.p('__' + max_name + ',') + cw.p(max_name + ' = (__' + max_name + ' - 1)') cw.block_end(line=';') cw.nl() elif const['type'] == 'const': @@ -2054,6 +2059,10 @@ def main(): try: parsed = Family(args.spec) + if parsed.license != '((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)': + print('Spec license:', parsed.license) + print('License must be: ((GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause)') + os.sys.exit(1) except yaml.YAMLError as exc: print(exc) os.sys.exit(1) @@ -2062,13 +2071,10 @@ def main(): cw = CodeWriter(BaseNlLib(), out_file) _, spec_kernel = find_kernel_root(args.spec) - if args.mode == 'uapi': - cw.p('/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR BSD-3-Clause */') + if args.mode == 'uapi' or args.header: + cw.p(f'/* SPDX-License-Identifier: {parsed.license} */') else: - if args.header: - cw.p('/* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */') - else: - cw.p('// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause') + cw.p(f'// SPDX-License-Identifier: {parsed.license}') cw.p("/* Do not edit directly, auto-generated from: */") cw.p(f"/*\t{spec_kernel} */") cw.p(f"/* YNL-GEN {args.mode} {'header' if args.header else 'source'} */") diff --git a/tools/power/acpi/tools/pfrut/pfrut.c b/tools/power/acpi/tools/pfrut/pfrut.c index 52aa0351533c..388c9e3ad040 100644 --- a/tools/power/acpi/tools/pfrut/pfrut.c +++ b/tools/power/acpi/tools/pfrut/pfrut.c @@ -97,7 +97,7 @@ static struct option long_options[] = { static void parse_options(int argc, char **argv) { int option_index = 0; - char *pathname; + char *pathname, *endptr; int opt; pathname = strdup(argv[0]); @@ -125,11 +125,23 @@ static void parse_options(int argc, char **argv) log_getinfo = 1; break; case 'T': - log_type = atoi(optarg); + log_type = strtol(optarg, &endptr, 0); + if (*endptr || (log_type != 0 && log_type != 1)) { + printf("Number expected: type(0:execution, 1:history) - Quit.\n"); + exit(1); + } + set_log_type = 1; break; case 'L': - log_level = atoi(optarg); + log_level = strtol(optarg, &endptr, 0); + if (*endptr || + (log_level != 0 && log_level != 1 && + log_level != 2 && log_level != 4)) { + printf("Number expected: level(0, 1, 2, 4) - Quit.\n"); + exit(1); + } + set_log_level = 1; break; case 'R': diff --git a/tools/power/pm-graph/sleepgraph.py b/tools/power/pm-graph/sleepgraph.py index 82c09cd25cc2..bf4ac24a1c7a 100755 --- a/tools/power/pm-graph/sleepgraph.py +++ b/tools/power/pm-graph/sleepgraph.py @@ -5556,9 +5556,8 @@ def executeSuspend(quiet=False): if not quiet: pprint('CAPTURING TRACE') op = sv.writeDatafileHeader(sv.ftracefile, testdata) - fp = open(tp+'trace', 'r') - for line in fp: - op.write(line) + fp = open(tp+'trace', 'rb') + op.write(ascii(fp.read())) op.close() sv.fsetVal('', 'trace') sv.platforminfo(cmdafter) diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8 index c7b26a3603af..8f08c3fd498d 100644 --- a/tools/power/x86/turbostat/turbostat.8 +++ b/tools/power/x86/turbostat/turbostat.8 @@ -340,10 +340,12 @@ starts a new interval. must be run as root. Alternatively, non-root users can be enabled to run turbostat this way: -# setcap cap_sys_admin,cap_sys_rawio,cap_sys_nice=+ep ./turbostat +# setcap cap_sys_admin,cap_sys_rawio,cap_sys_nice=+ep path/to/turbostat # chmod +r /dev/cpu/*/msr +# chmod +r /dev/cpu_dma_latency + .B "turbostat " reads hardware counters, but doesn't write them. So it will not interfere with the OS or other programs, including diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c index aba460410dbd..8a36ba5df9f9 100644 --- a/tools/power/x86/turbostat/turbostat.c +++ b/tools/power/x86/turbostat/turbostat.c @@ -3,7 +3,7 @@ * turbostat -- show CPU frequency and C-state residency * on modern Intel and AMD processors. * - * Copyright (c) 2022 Intel Corporation. + * Copyright (c) 2023 Intel Corporation. * Len Brown <len.brown@intel.com> */ @@ -670,7 +670,7 @@ static int perf_instr_count_open(int cpu_num) /* counter for cpu_num, including user + kernel and all processes */ fd = perf_event_open(&pea, -1, cpu_num, -1, 0); if (fd == -1) { - warn("cpu%d: perf instruction counter", cpu_num); + warnx("capget(CAP_PERFMON) failed, try \"# setcap cap_sys_admin=ep %s\"", progname); BIC_NOT_PRESENT(BIC_IPC); } @@ -2538,7 +2538,7 @@ static void dump_turbo_ratio_limits(int trl_msr_offset, int family, int model) get_msr(base_cpu, trl_msr_offset, &msr); fprintf(outf, "cpu%d: MSR_%sTURBO_RATIO_LIMIT: 0x%08llx\n", - base_cpu, trl_msr_offset == MSR_SECONDARY_TURBO_RATIO_LIMIT ? "SECONDARY" : "", msr); + base_cpu, trl_msr_offset == MSR_SECONDARY_TURBO_RATIO_LIMIT ? "SECONDARY_" : "", msr); if (has_turbo_ratio_group_limits(family, model)) { get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &core_counts); @@ -3502,9 +3502,6 @@ release_msr: /* * set_my_sched_priority(pri) * return previous - * - * if non-root, do this: - * # /sbin/setcap cap_sys_rawio,cap_sys_nice=+ep /usr/bin/turbostat */ int set_my_sched_priority(int priority) { @@ -3518,7 +3515,7 @@ int set_my_sched_priority(int priority) retval = setpriority(PRIO_PROCESS, 0, priority); if (retval) - err(retval, "setpriority(%d)", priority); + errx(retval, "capget(CAP_SYS_NICE) failed,try \"# setcap cap_sys_nice=ep %s\"", progname); errno = 0; retval = getpriority(PRIO_PROCESS, 0); @@ -4426,7 +4423,7 @@ int print_hwp(struct thread_data *t, struct core_data *c, struct pkg_data *p) fprintf(outf, "cpu%d: MSR_HWP_STATUS: 0x%08llx " "(%sGuaranteed_Perf_Change, %sExcursion_Min)\n", - cpu, msr, ((msr) & 0x1) ? "" : "No-", ((msr) & 0x2) ? "" : "No-"); + cpu, msr, ((msr) & 0x1) ? "" : "No-", ((msr) & 0x4) ? "" : "No-"); return 0; } @@ -5463,6 +5460,9 @@ unsigned int intel_model_duplicates(unsigned int model) case INTEL_FAM6_ICELAKE_D: return INTEL_FAM6_ICELAKE_X; + + case INTEL_FAM6_EMERALDRAPIDS_X: + return INTEL_FAM6_SAPPHIRERAPIDS_X; } return model; } @@ -5476,13 +5476,13 @@ void print_dev_latency(void) fd = open(path, O_RDONLY); if (fd < 0) { - warn("fopen %s\n", path); + warnx("capget(CAP_SYS_ADMIN) failed, try \"# setcap cap_sys_admin=ep %s\"", progname); return; } retval = read(fd, (void *)&value, sizeof(int)); if (retval != sizeof(int)) { - warn("read %s\n", path); + warn("read failed %s", path); close(fd); return; } @@ -5543,7 +5543,7 @@ void process_cpuid() edx_flags = edx; if (get_msr(sched_getcpu(), MSR_IA32_UCODE_REV, &ucode_patch)) - warnx("get_msr(UCODE)\n"); + warnx("get_msr(UCODE)"); /* * check max extended function levels of CPUID. @@ -6225,7 +6225,7 @@ int get_and_dump_counters(void) void print_version() { - fprintf(outf, "turbostat version 2022.10.04 - Len Brown <lenb@kernel.org>\n"); + fprintf(outf, "turbostat version 2023.03.17 - Len Brown <lenb@kernel.org>\n"); } #define COMMAND_LINE_SIZE 2048 diff --git a/tools/testing/selftests/amd-pstate/Makefile b/tools/testing/selftests/amd-pstate/Makefile index 5fd1424db37d..c382f579fe94 100644 --- a/tools/testing/selftests/amd-pstate/Makefile +++ b/tools/testing/selftests/amd-pstate/Makefile @@ -4,10 +4,15 @@ # No binaries, but make sure arg-less "make" doesn't trigger "run_tests" all: -uname_M := $(shell uname -m 2>/dev/null || echo not) -ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/x86/ -e s/x86_64/x86/) +ARCH ?= $(shell uname -m 2>/dev/null || echo not) +ARCH := $(shell echo $(ARCH) | sed -e s/i.86/x86/ -e s/x86_64/x86/) -TEST_PROGS := run.sh -TEST_FILES := basic.sh tbench.sh gitsource.sh +ifeq (x86,$(ARCH)) +TEST_FILES += ../../../power/x86/amd_pstate_tracer/amd_pstate_trace.py +TEST_FILES += ../../../power/x86/intel_pstate_tracer/intel_pstate_tracer.py +endif + +TEST_PROGS += run.sh +TEST_FILES += basic.sh tbench.sh gitsource.sh include ../lib.mk diff --git a/tools/testing/selftests/drivers/net/bonding/Makefile b/tools/testing/selftests/drivers/net/bonding/Makefile index 8e3b786a748f..a39bb2560d9b 100644 --- a/tools/testing/selftests/drivers/net/bonding/Makefile +++ b/tools/testing/selftests/drivers/net/bonding/Makefile @@ -8,7 +8,8 @@ TEST_PROGS := \ dev_addr_lists.sh \ mode-1-recovery-updelay.sh \ mode-2-recovery-updelay.sh \ - option_prio.sh + option_prio.sh \ + bond-eth-type-change.sh TEST_FILES := \ lag_lib.sh \ diff --git a/tools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh b/tools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh new file mode 100755 index 000000000000..5cdd22048ba7 --- /dev/null +++ b/tools/testing/selftests/drivers/net/bonding/bond-eth-type-change.sh @@ -0,0 +1,85 @@ +#!/bin/bash +# SPDX-License-Identifier: GPL-2.0 +# +# Test bond device ether type changing +# + +ALL_TESTS=" + bond_test_unsuccessful_enslave_type_change + bond_test_successful_enslave_type_change +" +REQUIRE_MZ=no +NUM_NETIFS=0 +lib_dir=$(dirname "$0") +source "$lib_dir"/net_forwarding_lib.sh + +bond_check_flags() +{ + local bonddev=$1 + + ip -d l sh dev "$bonddev" | grep -q "MASTER" + check_err $? "MASTER flag is missing from the bond device" + + ip -d l sh dev "$bonddev" | grep -q "SLAVE" + check_err $? "SLAVE flag is missing from the bond device" +} + +# test enslaved bond dev type change from ARPHRD_ETHER and back +# this allows us to test both MASTER and SLAVE flags at once +bond_test_enslave_type_change() +{ + local test_success=$1 + local devbond0="test-bond0" + local devbond1="test-bond1" + local devbond2="test-bond2" + local nonethdev="test-noneth0" + + # create a non-ARPHRD_ETHER device for testing (e.g. nlmon type) + ip link add name "$nonethdev" type nlmon + check_err $? "could not create a non-ARPHRD_ETHER device (nlmon)" + ip link add name "$devbond0" type bond + if [ $test_success -eq 1 ]; then + # we need devbond0 in active-backup mode to successfully enslave nonethdev + ip link set dev "$devbond0" type bond mode active-backup + check_err $? "could not change bond mode to active-backup" + fi + ip link add name "$devbond1" type bond + ip link add name "$devbond2" type bond + ip link set dev "$devbond0" master "$devbond1" + check_err $? "could not enslave $devbond0 to $devbond1" + # change bond type to non-ARPHRD_ETHER + ip link set dev "$nonethdev" master "$devbond0" 1>/dev/null 2>/dev/null + ip link set dev "$nonethdev" nomaster 1>/dev/null 2>/dev/null + # restore ARPHRD_ETHER type by enslaving such device + ip link set dev "$devbond2" master "$devbond0" + check_err $? "could not enslave $devbond2 to $devbond0" + ip link set dev "$devbond1" nomaster + + bond_check_flags "$devbond0" + + # clean up + ip link del dev "$devbond0" + ip link del dev "$devbond1" + ip link del dev "$devbond2" + ip link del dev "$nonethdev" +} + +bond_test_unsuccessful_enslave_type_change() +{ + RET=0 + + bond_test_enslave_type_change 0 + log_test "Change ether type of an enslaved bond device with unsuccessful enslave" +} + +bond_test_successful_enslave_type_change() +{ + RET=0 + + bond_test_enslave_type_change 1 + log_test "Change ether type of an enslaved bond device with successful enslave" +} + +tests_run + +exit "$EXIT_STATUS" diff --git a/tools/testing/selftests/kvm/aarch64/psci_test.c b/tools/testing/selftests/kvm/aarch64/psci_test.c index cfa36f387948..9b004905d1d3 100644 --- a/tools/testing/selftests/kvm/aarch64/psci_test.c +++ b/tools/testing/selftests/kvm/aarch64/psci_test.c @@ -180,9 +180,7 @@ static void host_test_system_suspend(void) enter_guest(source); - TEST_ASSERT(run->exit_reason == KVM_EXIT_SYSTEM_EVENT, - "Unhandled exit reason: %u (%s)", - run->exit_reason, exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(source, KVM_EXIT_SYSTEM_EVENT); TEST_ASSERT(run->system_event.type == KVM_SYSTEM_EVENT_SUSPEND, "Unhandled system event: %u (expected: %u)", run->system_event.type, KVM_SYSTEM_EVENT_SUSPEND); diff --git a/tools/testing/selftests/kvm/include/test_util.h b/tools/testing/selftests/kvm/include/test_util.h index 80d6416f3012..a6e9f215ce70 100644 --- a/tools/testing/selftests/kvm/include/test_util.h +++ b/tools/testing/selftests/kvm/include/test_util.h @@ -63,6 +63,15 @@ void test_assert(bool exp, const char *exp_str, #a, #b, #a, (unsigned long) __a, #b, (unsigned long) __b); \ } while (0) +#define TEST_ASSERT_KVM_EXIT_REASON(vcpu, expected) do { \ + __u32 exit_reason = (vcpu)->run->exit_reason; \ + \ + TEST_ASSERT(exit_reason == (expected), \ + "Wanted KVM exit reason: %u (%s), got: %u (%s)", \ + (expected), exit_reason_str((expected)), \ + exit_reason, exit_reason_str(exit_reason)); \ +} while (0) + #define TEST_FAIL(fmt, ...) do { \ TEST_ASSERT(false, fmt, ##__VA_ARGS__); \ __builtin_unreachable(); \ diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h index 53ffa43c90db..90387ddcb2a9 100644 --- a/tools/testing/selftests/kvm/include/x86_64/processor.h +++ b/tools/testing/selftests/kvm/include/x86_64/processor.h @@ -1063,6 +1063,8 @@ uint64_t *vm_get_page_table_entry(struct kvm_vm *vm, uint64_t vaddr); uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, uint64_t a3); +uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1); +void xen_hypercall(uint64_t nr, uint64_t a0, void *a1); void __vm_xsave_require_permission(int bit, const char *name); diff --git a/tools/testing/selftests/kvm/lib/kvm_util.c b/tools/testing/selftests/kvm/lib/kvm_util.c index 3ea24a5f4c43..8ec20ac33de0 100644 --- a/tools/testing/selftests/kvm/lib/kvm_util.c +++ b/tools/testing/selftests/kvm/lib/kvm_util.c @@ -1815,38 +1815,53 @@ void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) vcpu_dump(stream, vcpu, indent + 2); } +#define KVM_EXIT_STRING(x) {KVM_EXIT_##x, #x} + /* Known KVM exit reasons */ static struct exit_reason { unsigned int reason; const char *name; } exit_reasons_known[] = { - {KVM_EXIT_UNKNOWN, "UNKNOWN"}, - {KVM_EXIT_EXCEPTION, "EXCEPTION"}, - {KVM_EXIT_IO, "IO"}, - {KVM_EXIT_HYPERCALL, "HYPERCALL"}, - {KVM_EXIT_DEBUG, "DEBUG"}, - {KVM_EXIT_HLT, "HLT"}, - {KVM_EXIT_MMIO, "MMIO"}, - {KVM_EXIT_IRQ_WINDOW_OPEN, "IRQ_WINDOW_OPEN"}, - {KVM_EXIT_SHUTDOWN, "SHUTDOWN"}, - {KVM_EXIT_FAIL_ENTRY, "FAIL_ENTRY"}, - {KVM_EXIT_INTR, "INTR"}, - {KVM_EXIT_SET_TPR, "SET_TPR"}, - {KVM_EXIT_TPR_ACCESS, "TPR_ACCESS"}, - {KVM_EXIT_S390_SIEIC, "S390_SIEIC"}, - {KVM_EXIT_S390_RESET, "S390_RESET"}, - {KVM_EXIT_DCR, "DCR"}, - {KVM_EXIT_NMI, "NMI"}, - {KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"}, - {KVM_EXIT_OSI, "OSI"}, - {KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"}, - {KVM_EXIT_DIRTY_RING_FULL, "DIRTY_RING_FULL"}, - {KVM_EXIT_X86_RDMSR, "RDMSR"}, - {KVM_EXIT_X86_WRMSR, "WRMSR"}, - {KVM_EXIT_XEN, "XEN"}, - {KVM_EXIT_HYPERV, "HYPERV"}, + KVM_EXIT_STRING(UNKNOWN), + KVM_EXIT_STRING(EXCEPTION), + KVM_EXIT_STRING(IO), + KVM_EXIT_STRING(HYPERCALL), + KVM_EXIT_STRING(DEBUG), + KVM_EXIT_STRING(HLT), + KVM_EXIT_STRING(MMIO), + KVM_EXIT_STRING(IRQ_WINDOW_OPEN), + KVM_EXIT_STRING(SHUTDOWN), + KVM_EXIT_STRING(FAIL_ENTRY), + KVM_EXIT_STRING(INTR), + KVM_EXIT_STRING(SET_TPR), + KVM_EXIT_STRING(TPR_ACCESS), + KVM_EXIT_STRING(S390_SIEIC), + KVM_EXIT_STRING(S390_RESET), + KVM_EXIT_STRING(DCR), + KVM_EXIT_STRING(NMI), + KVM_EXIT_STRING(INTERNAL_ERROR), + KVM_EXIT_STRING(OSI), + KVM_EXIT_STRING(PAPR_HCALL), + KVM_EXIT_STRING(S390_UCONTROL), + KVM_EXIT_STRING(WATCHDOG), + KVM_EXIT_STRING(S390_TSCH), + KVM_EXIT_STRING(EPR), + KVM_EXIT_STRING(SYSTEM_EVENT), + KVM_EXIT_STRING(S390_STSI), + KVM_EXIT_STRING(IOAPIC_EOI), + KVM_EXIT_STRING(HYPERV), + KVM_EXIT_STRING(ARM_NISV), + KVM_EXIT_STRING(X86_RDMSR), + KVM_EXIT_STRING(X86_WRMSR), + KVM_EXIT_STRING(DIRTY_RING_FULL), + KVM_EXIT_STRING(AP_RESET_HOLD), + KVM_EXIT_STRING(X86_BUS_LOCK), + KVM_EXIT_STRING(XEN), + KVM_EXIT_STRING(RISCV_SBI), + KVM_EXIT_STRING(RISCV_CSR), + KVM_EXIT_STRING(NOTIFY), #ifdef KVM_EXIT_MEMORY_NOT_PRESENT - {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"}, + KVM_EXIT_STRING(MEMORY_NOT_PRESENT), #endif }; diff --git a/tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c b/tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c index cdb7daeed5fd..2c432fa164f1 100644 --- a/tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c +++ b/tools/testing/selftests/kvm/lib/s390x/diag318_test_handler.c @@ -35,8 +35,7 @@ static uint64_t diag318_handler(void) vcpu_run(vcpu); run = vcpu->run; - TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC, - "DIAGNOSE 0x0318 instruction was not intercepted"); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC); TEST_ASSERT(run->s390_sieic.icptcode == ICPT_INSTRUCTION, "Unexpected intercept code: 0x%x", run->s390_sieic.icptcode); TEST_ASSERT((run->s390_sieic.ipa & 0xff00) == IPA0_DIAG, diff --git a/tools/testing/selftests/kvm/lib/test_util.c b/tools/testing/selftests/kvm/lib/test_util.c index 5c22fa4c2825..b772193f6c18 100644 --- a/tools/testing/selftests/kvm/lib/test_util.c +++ b/tools/testing/selftests/kvm/lib/test_util.c @@ -165,26 +165,33 @@ size_t get_trans_hugepagesz(void) size_t get_def_hugetlb_pagesz(void) { char buf[64]; - const char *tag = "Hugepagesize:"; + const char *hugepagesize = "Hugepagesize:"; + const char *hugepages_total = "HugePages_Total:"; FILE *f; f = fopen("/proc/meminfo", "r"); TEST_ASSERT(f != NULL, "Error in opening /proc/meminfo"); while (fgets(buf, sizeof(buf), f) != NULL) { - if (strstr(buf, tag) == buf) { + if (strstr(buf, hugepages_total) == buf) { + unsigned long long total = strtoull(buf + strlen(hugepages_total), NULL, 10); + if (!total) { + fprintf(stderr, "HUGETLB is not enabled in /proc/sys/vm/nr_hugepages\n"); + exit(KSFT_SKIP); + } + } + if (strstr(buf, hugepagesize) == buf) { fclose(f); - return strtoull(buf + strlen(tag), NULL, 10) << 10; + return strtoull(buf + strlen(hugepagesize), NULL, 10) << 10; } } - if (feof(f)) - TEST_FAIL("HUGETLB is not configured in host kernel"); - else - TEST_FAIL("Error in reading /proc/meminfo"); + if (feof(f)) { + fprintf(stderr, "HUGETLB is not configured in host kernel"); + exit(KSFT_SKIP); + } - fclose(f); - return 0; + TEST_FAIL("Error in reading /proc/meminfo"); } #define ANON_FLAGS (MAP_PRIVATE | MAP_ANONYMOUS) diff --git a/tools/testing/selftests/kvm/lib/x86_64/processor.c b/tools/testing/selftests/kvm/lib/x86_64/processor.c index ae1e573d94ce..c39a4353ba19 100644 --- a/tools/testing/selftests/kvm/lib/x86_64/processor.c +++ b/tools/testing/selftests/kvm/lib/x86_64/processor.c @@ -1139,21 +1139,36 @@ const struct kvm_cpuid_entry2 *get_cpuid_entry(const struct kvm_cpuid2 *cpuid, return NULL; } +#define X86_HYPERCALL(inputs...) \ +({ \ + uint64_t r; \ + \ + asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t" \ + "jnz 1f\n\t" \ + "vmcall\n\t" \ + "jmp 2f\n\t" \ + "1: vmmcall\n\t" \ + "2:" \ + : "=a"(r) \ + : [use_vmmcall] "r" (host_cpu_is_amd), inputs); \ + \ + r; \ +}) + uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2, uint64_t a3) { - uint64_t r; - - asm volatile("test %[use_vmmcall], %[use_vmmcall]\n\t" - "jnz 1f\n\t" - "vmcall\n\t" - "jmp 2f\n\t" - "1: vmmcall\n\t" - "2:" - : "=a"(r) - : "a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3), - [use_vmmcall] "r" (host_cpu_is_amd)); - return r; + return X86_HYPERCALL("a"(nr), "b"(a0), "c"(a1), "d"(a2), "S"(a3)); +} + +uint64_t __xen_hypercall(uint64_t nr, uint64_t a0, void *a1) +{ + return X86_HYPERCALL("a"(nr), "D"(a0), "S"(a1)); +} + +void xen_hypercall(uint64_t nr, uint64_t a0, void *a1) +{ + GUEST_ASSERT(!__xen_hypercall(nr, a0, a1)); } const struct kvm_cpuid2 *kvm_get_supported_hv_cpuid(void) diff --git a/tools/testing/selftests/kvm/s390x/sync_regs_test.c b/tools/testing/selftests/kvm/s390x/sync_regs_test.c index 2ddde41c44ba..636a70ddac1e 100644 --- a/tools/testing/selftests/kvm/s390x/sync_regs_test.c +++ b/tools/testing/selftests/kvm/s390x/sync_regs_test.c @@ -126,10 +126,7 @@ void test_req_and_verify_all_valid_regs(struct kvm_vcpu *vcpu) run->kvm_valid_regs = TEST_SYNC_FIELDS; rv = _vcpu_run(vcpu); TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv); - TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC, - "Unexpected exit reason: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC); TEST_ASSERT(run->s390_sieic.icptcode == 4 && (run->s390_sieic.ipa >> 8) == 0x83 && (run->s390_sieic.ipb >> 16) == 0x501, @@ -165,10 +162,7 @@ void test_set_and_verify_various_reg_values(struct kvm_vcpu *vcpu) rv = _vcpu_run(vcpu); TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv); - TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC, - "Unexpected exit reason: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC); TEST_ASSERT(run->s.regs.gprs[11] == 0xBAD1DEA + 1, "r11 sync regs value incorrect 0x%llx.", run->s.regs.gprs[11]); @@ -200,10 +194,7 @@ void test_clear_kvm_dirty_regs_bits(struct kvm_vcpu *vcpu) run->s.regs.diag318 = 0x4B1D; rv = _vcpu_run(vcpu); TEST_ASSERT(rv == 0, "vcpu_run failed: %d\n", rv); - TEST_ASSERT(run->exit_reason == KVM_EXIT_S390_SIEIC, - "Unexpected exit reason: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_S390_SIEIC); TEST_ASSERT(run->s.regs.gprs[11] != 0xDEADBEEF, "r11 sync regs value incorrect 0x%llx.", run->s.regs.gprs[11]); diff --git a/tools/testing/selftests/kvm/set_memory_region_test.c b/tools/testing/selftests/kvm/set_memory_region_test.c index 2ef1d1b72ce4..a849ce23ca97 100644 --- a/tools/testing/selftests/kvm/set_memory_region_test.c +++ b/tools/testing/selftests/kvm/set_memory_region_test.c @@ -308,7 +308,6 @@ static void test_delete_memory_region(void) static void test_zero_memory_regions(void) { struct kvm_vcpu *vcpu; - struct kvm_run *run; struct kvm_vm *vm; pr_info("Testing KVM_RUN with zero added memory regions\n"); @@ -318,10 +317,7 @@ static void test_zero_memory_regions(void) vm_ioctl(vm, KVM_SET_NR_MMU_PAGES, (void *)64ul); vcpu_run(vcpu); - - run = vcpu->run; - TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR, - "Unexpected exit_reason = %u\n", run->exit_reason); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR); kvm_vm_free(vm); } diff --git a/tools/testing/selftests/kvm/x86_64/amx_test.c b/tools/testing/selftests/kvm/x86_64/amx_test.c index bd72c6eb3b67..b646cdb5055a 100644 --- a/tools/testing/selftests/kvm/x86_64/amx_test.c +++ b/tools/testing/selftests/kvm/x86_64/amx_test.c @@ -241,7 +241,6 @@ int main(int argc, char *argv[]) struct kvm_regs regs1, regs2; struct kvm_vcpu *vcpu; struct kvm_vm *vm; - struct kvm_run *run; struct kvm_x86_state *state; int xsave_restore_size; vm_vaddr_t amx_cfg, tiledata, xsavedata; @@ -268,7 +267,6 @@ int main(int argc, char *argv[]) "KVM should enumerate max XSAVE size when XSAVE is supported"); xsave_restore_size = kvm_cpu_property(X86_PROPERTY_XSTATE_MAX_SIZE); - run = vcpu->run; vcpu_regs_get(vcpu, ®s1); /* Register #NM handler */ @@ -291,10 +289,7 @@ int main(int argc, char *argv[]) for (stage = 1; ; stage++) { vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Stage %d: unexpected exit reason: %u (%s),\n", - stage, run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: @@ -350,7 +345,6 @@ int main(int argc, char *argv[]) /* Restore state in a new VM. */ vcpu = vm_recreate_with_one_vcpu(vm); vcpu_load_state(vcpu, state); - run = vcpu->run; kvm_x86_state_cleanup(state); memset(®s2, 0, sizeof(regs2)); diff --git a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c index 1027a671c7d3..624dc725e14d 100644 --- a/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c +++ b/tools/testing/selftests/kvm/x86_64/cr4_cpuid_sync_test.c @@ -50,7 +50,6 @@ static void guest_code(void) int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; - struct kvm_run *run; struct kvm_vm *vm; struct kvm_sregs sregs; struct ucall uc; @@ -58,15 +57,10 @@ int main(int argc, char *argv[]) TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_XSAVE)); vm = vm_create_with_one_vcpu(&vcpu, guest_code); - run = vcpu->run; while (1) { vcpu_run(vcpu); - - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Unexpected exit reason: %u (%s),\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_SYNC: diff --git a/tools/testing/selftests/kvm/x86_64/debug_regs.c b/tools/testing/selftests/kvm/x86_64/debug_regs.c index 7ef99c3359a0..f6b295e0b2d2 100644 --- a/tools/testing/selftests/kvm/x86_64/debug_regs.c +++ b/tools/testing/selftests/kvm/x86_64/debug_regs.c @@ -204,7 +204,7 @@ int main(void) vcpu_guest_debug_set(vcpu, &debug); vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, "KVM_EXIT_IO"); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); cmd = get_ucall(vcpu, &uc); TEST_ASSERT(cmd == UCALL_DONE, "UCALL_DONE"); diff --git a/tools/testing/selftests/kvm/x86_64/flds_emulation.h b/tools/testing/selftests/kvm/x86_64/flds_emulation.h index e43a7df25f2c..0a1573d52882 100644 --- a/tools/testing/selftests/kvm/x86_64/flds_emulation.h +++ b/tools/testing/selftests/kvm/x86_64/flds_emulation.h @@ -24,10 +24,7 @@ static inline void handle_flds_emulation_failure_exit(struct kvm_vcpu *vcpu) uint8_t *insn_bytes; uint64_t flags; - TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR, - "Unexpected exit reason: %u (%s)", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR); TEST_ASSERT(run->emulation_failure.suberror == KVM_INTERNAL_ERROR_EMULATION, "Unexpected suberror: %u", diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c index 2ee0af0d449e..f25749eaa6a8 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_clock.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_clock.c @@ -207,13 +207,11 @@ int main(void) { struct kvm_vcpu *vcpu; struct kvm_vm *vm; - struct kvm_run *run; struct ucall uc; vm_vaddr_t tsc_page_gva; int stage; vm = vm_create_with_one_vcpu(&vcpu, guest_main); - run = vcpu->run; vcpu_set_hv_cpuid(vcpu); @@ -227,10 +225,7 @@ int main(void) for (stage = 1;; stage++) { vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Stage %d: unexpected exit reason: %u (%s),\n", - stage, run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c b/tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c index af29e5776d40..7bde0c4dfdbd 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_evmcs.c @@ -237,7 +237,6 @@ int main(int argc, char *argv[]) struct kvm_vcpu *vcpu; struct kvm_vm *vm; - struct kvm_run *run; struct ucall uc; int stage; @@ -266,13 +265,8 @@ int main(int argc, char *argv[]) pr_info("Running L1 which uses EVMCS to run L2\n"); for (stage = 1;; stage++) { - run = vcpu->run; - vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Stage %d: unexpected exit reason: %u (%s),\n", - stage, run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_features.c b/tools/testing/selftests/kvm/x86_64/hyperv_features.c index c5e3b39edd07..78606de9385d 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_features.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_features.c @@ -122,7 +122,6 @@ static void guest_test_msrs_access(void) { struct kvm_cpuid2 *prev_cpuid = NULL; struct kvm_vcpu *vcpu; - struct kvm_run *run; struct kvm_vm *vm; struct ucall uc; int stage = 0; @@ -151,8 +150,6 @@ static void guest_test_msrs_access(void) vm_init_descriptor_tables(vm); vcpu_init_descriptor_tables(vcpu); - run = vcpu->run; - /* TODO: Make this entire test easier to maintain. */ if (stage >= 21) vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_SYNIC2, 0); @@ -494,9 +491,7 @@ static void guest_test_msrs_access(void) msr->idx, msr->write ? "write" : "read"); vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "unexpected exit reason: %u (%s)", - run->exit_reason, exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: @@ -518,7 +513,6 @@ static void guest_test_hcalls_access(void) { struct kvm_cpuid2 *prev_cpuid = NULL; struct kvm_vcpu *vcpu; - struct kvm_run *run; struct kvm_vm *vm; struct ucall uc; int stage = 0; @@ -550,8 +544,6 @@ static void guest_test_hcalls_access(void) vcpu_init_cpuid(vcpu, prev_cpuid); } - run = vcpu->run; - switch (stage) { case 0: vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE); @@ -669,9 +661,7 @@ static void guest_test_hcalls_access(void) pr_debug("Stage %d: testing hcall: 0x%lx\n", stage, hcall->control); vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "unexpected exit reason: %u (%s)", - run->exit_reason, exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c b/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c index 0cbb0e646ef8..6feb5ddb031d 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_ipi.c @@ -243,7 +243,6 @@ int main(int argc, char *argv[]) { struct kvm_vm *vm; struct kvm_vcpu *vcpu[3]; - unsigned int exit_reason; vm_vaddr_t hcall_page; pthread_t threads[2]; int stage = 1, r; @@ -283,10 +282,7 @@ int main(int argc, char *argv[]) while (true) { vcpu_run(vcpu[0]); - exit_reason = vcpu[0]->run->exit_reason; - TEST_ASSERT(exit_reason == KVM_EXIT_IO, - "unexpected exit reason: %u (%s)", - exit_reason, exit_reason_str(exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu[0], KVM_EXIT_IO); switch (get_ucall(vcpu[0], &uc)) { case UCALL_SYNC: diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c index 68a7d354ea07..e446d76d1c0c 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_svm_test.c @@ -156,7 +156,6 @@ int main(int argc, char *argv[]) vm_vaddr_t hcall_page; struct kvm_vcpu *vcpu; struct kvm_vm *vm; - struct kvm_run *run; struct ucall uc; int stage; @@ -165,7 +164,6 @@ int main(int argc, char *argv[]) /* Create VM */ vm = vm_create_with_one_vcpu(&vcpu, guest_code); vcpu_set_hv_cpuid(vcpu); - run = vcpu->run; vcpu_alloc_svm(vm, &nested_gva); vcpu_alloc_hyperv_test_pages(vm, &hv_pages_gva); @@ -177,10 +175,7 @@ int main(int argc, char *argv[]) for (stage = 1;; stage++) { vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Stage %d: unexpected exit reason: %u (%s),\n", - stage, run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: diff --git a/tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c b/tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c index 68f97ff720a7..4758b6ef5618 100644 --- a/tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c +++ b/tools/testing/selftests/kvm/x86_64/hyperv_tlb_flush.c @@ -542,18 +542,13 @@ static void *vcpu_thread(void *arg) struct ucall uc; int old; int r; - unsigned int exit_reason; r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old); TEST_ASSERT(!r, "pthread_setcanceltype failed on vcpu_id=%u with errno=%d", vcpu->id, r); vcpu_run(vcpu); - exit_reason = vcpu->run->exit_reason; - - TEST_ASSERT(exit_reason == KVM_EXIT_IO, - "vCPU %u exited with unexpected exit reason %u-%s, expected KVM_EXIT_IO", - vcpu->id, exit_reason, exit_reason_str(exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: @@ -587,7 +582,6 @@ int main(int argc, char *argv[]) { struct kvm_vm *vm; struct kvm_vcpu *vcpu[3]; - unsigned int exit_reason; pthread_t threads[2]; vm_vaddr_t test_data_page, gva; vm_paddr_t gpa; @@ -657,11 +651,7 @@ int main(int argc, char *argv[]) while (true) { vcpu_run(vcpu[0]); - exit_reason = vcpu[0]->run->exit_reason; - - TEST_ASSERT(exit_reason == KVM_EXIT_IO, - "unexpected exit reason: %u (%s)", - exit_reason, exit_reason_str(exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu[0], KVM_EXIT_IO); switch (get_ucall(vcpu[0], &uc)) { case UCALL_SYNC: diff --git a/tools/testing/selftests/kvm/x86_64/kvm_clock_test.c b/tools/testing/selftests/kvm/x86_64/kvm_clock_test.c index 813ce282cf56..1778704360a6 100644 --- a/tools/testing/selftests/kvm/x86_64/kvm_clock_test.c +++ b/tools/testing/selftests/kvm/x86_64/kvm_clock_test.c @@ -105,7 +105,6 @@ static void setup_clock(struct kvm_vm *vm, struct test_case *test_case) static void enter_guest(struct kvm_vcpu *vcpu) { struct kvm_clock_data start, end; - struct kvm_run *run = vcpu->run; struct kvm_vm *vm = vcpu->vm; struct ucall uc; int i; @@ -118,9 +117,7 @@ static void enter_guest(struct kvm_vcpu *vcpu) vcpu_run(vcpu); vm_ioctl(vm, KVM_GET_CLOCK, &end); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "unexpected exit reason: %u (%s)", - run->exit_reason, exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_SYNC: diff --git a/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c b/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c index 619655c1a1f3..f774a9e62858 100644 --- a/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c +++ b/tools/testing/selftests/kvm/x86_64/kvm_pv_test.c @@ -111,14 +111,11 @@ static void pr_hcall(struct ucall *uc) static void enter_guest(struct kvm_vcpu *vcpu) { - struct kvm_run *run = vcpu->run; struct ucall uc; while (true) { vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "unexpected exit reason: %u (%s)", - run->exit_reason, exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_PR_MSR: diff --git a/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c b/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c index 016070cad36e..72812644d7f5 100644 --- a/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c +++ b/tools/testing/selftests/kvm/x86_64/monitor_mwait_test.c @@ -64,7 +64,6 @@ int main(int argc, char *argv[]) { uint64_t disabled_quirks; struct kvm_vcpu *vcpu; - struct kvm_run *run; struct kvm_vm *vm; struct ucall uc; int testcase; @@ -74,18 +73,12 @@ int main(int argc, char *argv[]) vm = vm_create_with_one_vcpu(&vcpu, guest_code); vcpu_clear_cpuid_feature(vcpu, X86_FEATURE_MWAIT); - run = vcpu->run; - vm_init_descriptor_tables(vm); vcpu_init_descriptor_tables(vcpu); while (1) { vcpu_run(vcpu); - - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Unexpected exit reason: %u (%s),\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_SYNC: diff --git a/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c b/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c index ac33835f78f4..6502aa23c2f8 100644 --- a/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c +++ b/tools/testing/selftests/kvm/x86_64/nested_exceptions_test.c @@ -166,12 +166,9 @@ static void __attribute__((__flatten__)) l1_guest_code(void *test_data) static void assert_ucall_vector(struct kvm_vcpu *vcpu, int vector) { - struct kvm_run *run = vcpu->run; struct ucall uc; - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Unexpected exit reason: %u (%s),\n", - run->exit_reason, exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_SYNC: diff --git a/tools/testing/selftests/kvm/x86_64/platform_info_test.c b/tools/testing/selftests/kvm/x86_64/platform_info_test.c index 310a104d94f0..c9a07963d68a 100644 --- a/tools/testing/selftests/kvm/x86_64/platform_info_test.c +++ b/tools/testing/selftests/kvm/x86_64/platform_info_test.c @@ -36,15 +36,12 @@ static void guest_code(void) static void test_msr_platform_info_enabled(struct kvm_vcpu *vcpu) { - struct kvm_run *run = vcpu->run; struct ucall uc; vm_enable_cap(vcpu->vm, KVM_CAP_MSR_PLATFORM_INFO, true); vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Exit_reason other than KVM_EXIT_IO: %u (%s),\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); + get_ucall(vcpu, &uc); TEST_ASSERT(uc.cmd == UCALL_SYNC, "Received ucall other than UCALL_SYNC: %lu\n", uc.cmd); @@ -56,14 +53,9 @@ static void test_msr_platform_info_enabled(struct kvm_vcpu *vcpu) static void test_msr_platform_info_disabled(struct kvm_vcpu *vcpu) { - struct kvm_run *run = vcpu->run; - vm_enable_cap(vcpu->vm, KVM_CAP_MSR_PLATFORM_INFO, false); vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN, - "Exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN); } int main(int argc, char *argv[]) diff --git a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c index bad7ef8c5b92..2feef25ba691 100644 --- a/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c +++ b/tools/testing/selftests/kvm/x86_64/pmu_event_filter_test.c @@ -151,14 +151,10 @@ static void amd_guest_code(void) */ static uint64_t run_vcpu_to_sync(struct kvm_vcpu *vcpu) { - struct kvm_run *run = vcpu->run; struct ucall uc; vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Exit_reason other than KVM_EXIT_IO: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); get_ucall(vcpu, &uc); TEST_ASSERT(uc.cmd == UCALL_SYNC, "Received ucall other than UCALL_SYNC: %lu", uc.cmd); diff --git a/tools/testing/selftests/kvm/x86_64/smm_test.c b/tools/testing/selftests/kvm/x86_64/smm_test.c index cb38a478e1f6..e18b86666e1f 100644 --- a/tools/testing/selftests/kvm/x86_64/smm_test.c +++ b/tools/testing/selftests/kvm/x86_64/smm_test.c @@ -133,7 +133,6 @@ int main(int argc, char *argv[]) struct kvm_vcpu *vcpu; struct kvm_regs regs; struct kvm_vm *vm; - struct kvm_run *run; struct kvm_x86_state *state; int stage, stage_reported; @@ -142,8 +141,6 @@ int main(int argc, char *argv[]) /* Create VM */ vm = vm_create_with_one_vcpu(&vcpu, guest_code); - run = vcpu->run; - vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, SMRAM_GPA, SMRAM_MEMSLOT, SMRAM_PAGES, 0); TEST_ASSERT(vm_phy_pages_alloc(vm, SMRAM_PAGES, SMRAM_GPA, SMRAM_MEMSLOT) @@ -169,10 +166,7 @@ int main(int argc, char *argv[]) for (stage = 1;; stage++) { vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Stage %d: unexpected exit reason: %u (%s),\n", - stage, run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); memset(®s, 0, sizeof(regs)); vcpu_regs_get(vcpu, ®s); @@ -208,7 +202,6 @@ int main(int argc, char *argv[]) vcpu = vm_recreate_with_one_vcpu(vm); vcpu_load_state(vcpu, state); - run = vcpu->run; kvm_x86_state_cleanup(state); } diff --git a/tools/testing/selftests/kvm/x86_64/state_test.c b/tools/testing/selftests/kvm/x86_64/state_test.c index ea578971fb9f..4c4925a8ab45 100644 --- a/tools/testing/selftests/kvm/x86_64/state_test.c +++ b/tools/testing/selftests/kvm/x86_64/state_test.c @@ -158,14 +158,12 @@ int main(int argc, char *argv[]) struct kvm_regs regs1, regs2; struct kvm_vcpu *vcpu; struct kvm_vm *vm; - struct kvm_run *run; struct kvm_x86_state *state; struct ucall uc; int stage; /* Create VM */ vm = vm_create_with_one_vcpu(&vcpu, guest_code); - run = vcpu->run; vcpu_regs_get(vcpu, ®s1); @@ -183,10 +181,7 @@ int main(int argc, char *argv[]) for (stage = 1;; stage++) { vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Stage %d: unexpected exit reason: %u (%s),\n", - stage, run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: @@ -214,7 +209,6 @@ int main(int argc, char *argv[]) /* Restore state in a new VM. */ vcpu = vm_recreate_with_one_vcpu(vm); vcpu_load_state(vcpu, state); - run = vcpu->run; kvm_x86_state_cleanup(state); memset(®s2, 0, sizeof(regs2)); diff --git a/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c index 4a07ba227b99..32bef39bec21 100644 --- a/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c +++ b/tools/testing/selftests/kvm/x86_64/svm_int_ctl_test.c @@ -85,7 +85,6 @@ static void l1_guest_code(struct svm_test_data *svm) int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; - struct kvm_run *run; vm_vaddr_t svm_gva; struct kvm_vm *vm; struct ucall uc; @@ -103,13 +102,8 @@ int main(int argc, char *argv[]) vcpu_alloc_svm(vm, &svm_gva); vcpu_args_set(vcpu, 1, svm_gva); - run = vcpu->run; - vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: diff --git a/tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c b/tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c index e73fcdef47bb..d6fcdcc3af31 100644 --- a/tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c +++ b/tools/testing/selftests/kvm/x86_64/svm_nested_shutdown_test.c @@ -42,7 +42,6 @@ static void l1_guest_code(struct svm_test_data *svm, struct idt_entry *idt) int main(int argc, char *argv[]) { struct kvm_vcpu *vcpu; - struct kvm_run *run; vm_vaddr_t svm_gva; struct kvm_vm *vm; @@ -55,13 +54,9 @@ int main(int argc, char *argv[]) vcpu_alloc_svm(vm, &svm_gva); vcpu_args_set(vcpu, 2, svm_gva, vm->idt); - run = vcpu->run; vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN, - "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN); kvm_vm_free(vm); } diff --git a/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c b/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c index b34980d45648..4e2479716da6 100644 --- a/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c +++ b/tools/testing/selftests/kvm/x86_64/svm_nested_soft_inject_test.c @@ -176,16 +176,12 @@ static void run_test(bool is_nmi) memset(&debug, 0, sizeof(debug)); vcpu_guest_debug_set(vcpu, &debug); - struct kvm_run *run = vcpu->run; struct ucall uc; alarm(2); vcpu_run(vcpu); alarm(0); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: diff --git a/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c index c3ac45df7483..8a62cca28cfb 100644 --- a/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c +++ b/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c @@ -47,14 +47,10 @@ int main(int argc, char *argv[]) vcpu_args_set(vcpu, 1, svm_gva); for (;;) { - volatile struct kvm_run *run = vcpu->run; struct ucall uc; vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: diff --git a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c index d2f9b5bdfab2..2da89fdc2471 100644 --- a/tools/testing/selftests/kvm/x86_64/sync_regs_test.c +++ b/tools/testing/selftests/kvm/x86_64/sync_regs_test.c @@ -132,10 +132,7 @@ int main(int argc, char *argv[]) /* TODO: BUILD TIME CHECK: TEST_ASSERT(KVM_SYNC_X86_NUM_FIELDS != 3); */ run->kvm_valid_regs = TEST_SYNC_FIELDS; rv = _vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Unexpected exit reason: %u (%s),\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); vcpu_regs_get(vcpu, ®s); compare_regs(®s, &run->s.regs.regs); @@ -154,10 +151,7 @@ int main(int argc, char *argv[]) run->kvm_valid_regs = TEST_SYNC_FIELDS; run->kvm_dirty_regs = KVM_SYNC_X86_REGS | KVM_SYNC_X86_SREGS; rv = _vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Unexpected exit reason: %u (%s),\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); TEST_ASSERT(run->s.regs.regs.rbx == 0xBAD1DEA + 1, "rbx sync regs value incorrect 0x%llx.", run->s.regs.regs.rbx); @@ -181,10 +175,7 @@ int main(int argc, char *argv[]) run->kvm_dirty_regs = 0; run->s.regs.regs.rbx = 0xDEADBEEF; rv = _vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Unexpected exit reason: %u (%s),\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); TEST_ASSERT(run->s.regs.regs.rbx != 0xDEADBEEF, "rbx sync regs value incorrect 0x%llx.", run->s.regs.regs.rbx); @@ -199,10 +190,7 @@ int main(int argc, char *argv[]) regs.rbx = 0xBAC0; vcpu_regs_set(vcpu, ®s); rv = _vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Unexpected exit reason: %u (%s),\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); TEST_ASSERT(run->s.regs.regs.rbx == 0xAAAA, "rbx sync regs value incorrect 0x%llx.", run->s.regs.regs.rbx); @@ -219,10 +207,7 @@ int main(int argc, char *argv[]) run->kvm_dirty_regs = TEST_SYNC_FIELDS; run->s.regs.regs.rbx = 0xBBBB; rv = _vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Unexpected exit reason: %u (%s),\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); TEST_ASSERT(run->s.regs.regs.rbx == 0xBBBB, "rbx sync regs value incorrect 0x%llx.", run->s.regs.regs.rbx); diff --git a/tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c b/tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c index ead5d878a71c..56306a19144a 100644 --- a/tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c +++ b/tools/testing/selftests/kvm/x86_64/triple_fault_event_test.c @@ -89,9 +89,7 @@ int main(void) run = vcpu->run; vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Expected KVM_EXIT_IO, got: %u (%s)\n", - run->exit_reason, exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT, "Expected IN from port %d from L2, got port %d", ARBITRARY_IO_PORT, run->io.port); @@ -111,10 +109,7 @@ int main(void) if (has_svm) { - TEST_ASSERT(run->exit_reason == KVM_EXIT_SHUTDOWN, - "Got exit_reason other than KVM_EXIT_SHUTDOWN: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_SHUTDOWN); } else { switch (get_ucall(vcpu, &uc)) { case UCALL_DONE: diff --git a/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c b/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c index 47139aab7408..5b669818e39a 100644 --- a/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c +++ b/tools/testing/selftests/kvm/x86_64/tsc_scaling_sync.c @@ -64,14 +64,10 @@ static void *run_vcpu(void *_cpu_nr) pthread_spin_unlock(&create_lock); for (;;) { - volatile struct kvm_run *run = vcpu->run; struct ucall uc; vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_DONE: diff --git a/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c b/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c index a897c7fd8abe..85f34ca7e49e 100644 --- a/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c +++ b/tools/testing/selftests/kvm/x86_64/ucna_injection_test.c @@ -137,15 +137,11 @@ static void guest_gp_handler(struct ex_regs *regs) static void run_vcpu_expect_gp(struct kvm_vcpu *vcpu) { - unsigned int exit_reason; struct ucall uc; vcpu_run(vcpu); - exit_reason = vcpu->run->exit_reason; - TEST_ASSERT(exit_reason == KVM_EXIT_IO, - "exited with unexpected exit reason %u-%s, expected KVM_EXIT_IO", - exit_reason, exit_reason_str(exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_SYNC, "Expect UCALL_SYNC\n"); TEST_ASSERT(uc.args[1] == SYNC_GP, "#GP is expected."); @@ -182,7 +178,6 @@ static void *run_ucna_injection(void *arg) struct ucall uc; int old; int r; - unsigned int exit_reason; r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old); TEST_ASSERT(r == 0, @@ -191,10 +186,7 @@ static void *run_ucna_injection(void *arg) vcpu_run(params->vcpu); - exit_reason = params->vcpu->run->exit_reason; - TEST_ASSERT(exit_reason == KVM_EXIT_IO, - "unexpected exit reason %u-%s, expected KVM_EXIT_IO", - exit_reason, exit_reason_str(exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(params->vcpu, KVM_EXIT_IO); TEST_ASSERT(get_ucall(params->vcpu, &uc) == UCALL_SYNC, "Expect UCALL_SYNC\n"); TEST_ASSERT(uc.args[1] == SYNC_FIRST_UCNA, "Injecting first UCNA."); @@ -204,10 +196,7 @@ static void *run_ucna_injection(void *arg) inject_ucna(params->vcpu, FIRST_UCNA_ADDR); vcpu_run(params->vcpu); - exit_reason = params->vcpu->run->exit_reason; - TEST_ASSERT(exit_reason == KVM_EXIT_IO, - "unexpected exit reason %u-%s, expected KVM_EXIT_IO", - exit_reason, exit_reason_str(exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(params->vcpu, KVM_EXIT_IO); TEST_ASSERT(get_ucall(params->vcpu, &uc) == UCALL_SYNC, "Expect UCALL_SYNC\n"); TEST_ASSERT(uc.args[1] == SYNC_SECOND_UCNA, "Injecting second UCNA."); @@ -217,10 +206,7 @@ static void *run_ucna_injection(void *arg) inject_ucna(params->vcpu, SECOND_UCNA_ADDR); vcpu_run(params->vcpu); - exit_reason = params->vcpu->run->exit_reason; - TEST_ASSERT(exit_reason == KVM_EXIT_IO, - "unexpected exit reason %u-%s, expected KVM_EXIT_IO", - exit_reason, exit_reason_str(exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(params->vcpu, KVM_EXIT_IO); if (get_ucall(params->vcpu, &uc) == UCALL_ABORT) { TEST_ASSERT(false, "vCPU assertion failure: %s.\n", (const char *)uc.args[0]); diff --git a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c index 91076c9787b4..0cb51fa42773 100644 --- a/tools/testing/selftests/kvm/x86_64/userspace_io_test.c +++ b/tools/testing/selftests/kvm/x86_64/userspace_io_test.c @@ -63,11 +63,7 @@ int main(int argc, char *argv[]) while (1) { vcpu_run(vcpu); - - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Unexpected exit reason: %u (%s),\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); if (get_ucall(vcpu, &uc)) break; diff --git a/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c b/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c index 25fa55344a10..3533dc2fbfee 100644 --- a/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c +++ b/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c @@ -410,10 +410,7 @@ static void process_rdmsr(struct kvm_vcpu *vcpu, uint32_t msr_index) check_for_guest_assert(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_RDMSR, - "Unexpected exit reason: %u (%s),\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_X86_RDMSR); TEST_ASSERT(run->msr.index == msr_index, "Unexpected msr (0x%04x), expected 0x%04x", run->msr.index, msr_index); @@ -445,10 +442,7 @@ static void process_wrmsr(struct kvm_vcpu *vcpu, uint32_t msr_index) check_for_guest_assert(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_WRMSR, - "Unexpected exit reason: %u (%s),\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_X86_WRMSR); TEST_ASSERT(run->msr.index == msr_index, "Unexpected msr (0x%04x), expected 0x%04x", run->msr.index, msr_index); @@ -472,15 +466,11 @@ static void process_wrmsr(struct kvm_vcpu *vcpu, uint32_t msr_index) static void process_ucall_done(struct kvm_vcpu *vcpu) { - struct kvm_run *run = vcpu->run; struct ucall uc; check_for_guest_assert(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Unexpected exit reason: %u (%s)", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_DONE, "Unexpected ucall command: %lu, expected UCALL_DONE (%d)", @@ -489,15 +479,11 @@ static void process_ucall_done(struct kvm_vcpu *vcpu) static uint64_t process_ucall(struct kvm_vcpu *vcpu) { - struct kvm_run *run = vcpu->run; struct ucall uc = {}; check_for_guest_assert(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Unexpected exit reason: %u (%s)", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_SYNC: diff --git a/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c b/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c index 5abecf06329e..2bed5fb3a0d6 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_apic_access_test.c @@ -96,21 +96,14 @@ int main(int argc, char *argv[]) vcpu_run(vcpu); if (apic_access_addr == high_gpa) { - TEST_ASSERT(run->exit_reason == - KVM_EXIT_INTERNAL_ERROR, - "Got exit reason other than KVM_EXIT_INTERNAL_ERROR: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR); TEST_ASSERT(run->internal.suberror == KVM_INTERNAL_ERROR_EMULATION, "Got internal suberror other than KVM_INTERNAL_ERROR_EMULATION: %u\n", run->internal.suberror); break; } - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: diff --git a/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c b/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c index d79651b02740..dad988351493 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_close_while_nested_test.c @@ -64,10 +64,7 @@ int main(int argc, char *argv[]) struct ucall uc; vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); if (run->io.port == PORT_L0_EXIT) break; diff --git a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c index f0456fb031b1..e4ad5fef52ff 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_dirty_log_test.c @@ -73,7 +73,6 @@ int main(int argc, char *argv[]) struct kvm_vcpu *vcpu; struct kvm_vm *vm; - struct kvm_run *run; struct ucall uc; bool done = false; @@ -84,7 +83,6 @@ int main(int argc, char *argv[]) vm = vm_create_with_one_vcpu(&vcpu, l1_guest_code); vmx = vcpu_alloc_vmx(vm, &vmx_pages_gva); vcpu_args_set(vcpu, 1, vmx_pages_gva); - run = vcpu->run; /* Add an extra memory slot for testing dirty logging */ vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, @@ -117,10 +115,7 @@ int main(int argc, char *argv[]) while (!done) { memset(host_test_mem, 0xaa, TEST_MEM_PAGES * 4096); vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Unexpected exit reason: %u (%s),\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: diff --git a/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c b/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c index ccdfa5dc1a4d..be0bdb8c6f78 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_exception_with_invalid_guest_state.c @@ -26,9 +26,7 @@ static void __run_vcpu_with_invalid_state(struct kvm_vcpu *vcpu) vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR, - "Expected KVM_EXIT_INTERNAL_ERROR, got %d (%s)\n", - run->exit_reason, exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_INTERNAL_ERROR); TEST_ASSERT(run->emulation_failure.suberror == KVM_INTERNAL_ERROR_EMULATION, "Expected emulation failure, got %d\n", run->emulation_failure.suberror); diff --git a/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c b/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c index 6bfb4bb471ca..a100ee5f0009 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_invalid_nested_guest_state.c @@ -74,9 +74,7 @@ int main(int argc, char *argv[]) * The first exit to L0 userspace should be an I/O access from L2. * Running L1 should launch L2 without triggering an exit to userspace. */ - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Expected KVM_EXIT_IO, got: %u (%s)\n", - run->exit_reason, exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); TEST_ASSERT(run->io.port == ARBITRARY_IO_PORT, "Expected IN from port %d from L2, got port %d", diff --git a/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c b/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c index 465a9434d61c..d427eb146bc5 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_nested_tsc_scaling_test.c @@ -183,14 +183,10 @@ int main(int argc, char *argv[]) vcpu_ioctl(vcpu, KVM_SET_TSC_KHZ, (void *) (tsc_khz / l1_scale_factor)); for (;;) { - volatile struct kvm_run *run = vcpu->run; struct ucall uc; vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: diff --git a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c index 0efdc05969a5..affc32800158 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c @@ -157,7 +157,6 @@ int main(int argc, char *argv[]) struct kvm_regs regs1, regs2; struct kvm_vm *vm; - struct kvm_run *run; struct kvm_vcpu *vcpu; struct kvm_x86_state *state; struct ucall uc; @@ -173,7 +172,6 @@ int main(int argc, char *argv[]) /* Create VM */ vm = vm_create_with_one_vcpu(&vcpu, guest_code); - run = vcpu->run; vcpu_regs_get(vcpu, ®s1); @@ -182,10 +180,7 @@ int main(int argc, char *argv[]) for (stage = 1;; stage++) { vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Stage %d: unexpected exit reason: %u (%s),\n", - stage, run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: @@ -237,7 +232,6 @@ int main(int argc, char *argv[]) /* Restore state in a new VM. */ vcpu = vm_recreate_with_one_vcpu(vm); vcpu_load_state(vcpu, state); - run = vcpu->run; kvm_x86_state_cleanup(state); memset(®s2, 0, sizeof(regs2)); diff --git a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c index ff8ecdf32ae0..2ceb5c78c442 100644 --- a/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c +++ b/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c @@ -131,14 +131,10 @@ int main(int argc, char *argv[]) vcpu_args_set(vcpu, 1, vmx_pages_gva); for (;;) { - volatile struct kvm_run *run = vcpu->run; struct ucall uc; vcpu_run(vcpu); - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: diff --git a/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c b/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c index 3d272d7f961e..67ac2a3292ef 100644 --- a/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c +++ b/tools/testing/selftests/kvm/x86_64/xapic_ipi_test.c @@ -198,7 +198,6 @@ static void *vcpu_thread(void *arg) struct ucall uc; int old; int r; - unsigned int exit_reason; r = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old); TEST_ASSERT(r == 0, @@ -207,11 +206,8 @@ static void *vcpu_thread(void *arg) fprintf(stderr, "vCPU thread running vCPU %u\n", vcpu->id); vcpu_run(vcpu); - exit_reason = vcpu->run->exit_reason; - TEST_ASSERT(exit_reason == KVM_EXIT_IO, - "vCPU %u exited with unexpected exit reason %u-%s, expected KVM_EXIT_IO", - vcpu->id, exit_reason, exit_reason_str(exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); if (get_ucall(vcpu, &uc) == UCALL_ABORT) { TEST_ASSERT(false, diff --git a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c index 5a3bf8f61417..05898ad9f4d9 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_shinfo_test.c @@ -26,6 +26,9 @@ #define DUMMY_REGION_GPA (SHINFO_REGION_GPA + (3 * PAGE_SIZE)) #define DUMMY_REGION_SLOT 11 +#define DUMMY_REGION_GPA_2 (SHINFO_REGION_GPA + (4 * PAGE_SIZE)) +#define DUMMY_REGION_SLOT_2 12 + #define SHINFO_ADDR (SHINFO_REGION_GPA) #define VCPU_INFO_ADDR (SHINFO_REGION_GPA + 0x40) #define PVTIME_ADDR (SHINFO_REGION_GPA + PAGE_SIZE) @@ -41,6 +44,37 @@ #define EVTCHN_TEST2 66 #define EVTCHN_TIMER 13 +enum { + TEST_INJECT_VECTOR = 0, + TEST_RUNSTATE_runnable, + TEST_RUNSTATE_blocked, + TEST_RUNSTATE_offline, + TEST_RUNSTATE_ADJUST, + TEST_RUNSTATE_DATA, + TEST_STEAL_TIME, + TEST_EVTCHN_MASKED, + TEST_EVTCHN_UNMASKED, + TEST_EVTCHN_SLOWPATH, + TEST_EVTCHN_SEND_IOCTL, + TEST_EVTCHN_HCALL, + TEST_EVTCHN_HCALL_SLOWPATH, + TEST_EVTCHN_HCALL_EVENTFD, + TEST_TIMER_SETUP, + TEST_TIMER_WAIT, + TEST_TIMER_RESTORE, + TEST_POLL_READY, + TEST_POLL_TIMEOUT, + TEST_POLL_MASKED, + TEST_POLL_WAKE, + TEST_TIMER_PAST, + TEST_LOCKING_SEND_RACE, + TEST_LOCKING_POLL_RACE, + TEST_LOCKING_POLL_TIMEOUT, + TEST_DONE, + + TEST_GUEST_SAW_IRQ, +}; + #define XEN_HYPERCALL_MSR 0x40000000 #define MIN_STEAL_TIME 50000 @@ -144,7 +178,7 @@ static void evtchn_handler(struct ex_regs *regs) vi->evtchn_pending_sel = 0; guest_saw_irq = true; - GUEST_SYNC(0x20); + GUEST_SYNC(TEST_GUEST_SAW_IRQ); } static void guest_wait_for_irq(void) @@ -165,41 +199,41 @@ static void guest_code(void) ); /* Trigger an interrupt injection */ - GUEST_SYNC(0); + GUEST_SYNC(TEST_INJECT_VECTOR); guest_wait_for_irq(); /* Test having the host set runstates manually */ - GUEST_SYNC(RUNSTATE_runnable); + GUEST_SYNC(TEST_RUNSTATE_runnable); GUEST_ASSERT(rs->time[RUNSTATE_runnable] != 0); GUEST_ASSERT(rs->state == 0); - GUEST_SYNC(RUNSTATE_blocked); + GUEST_SYNC(TEST_RUNSTATE_blocked); GUEST_ASSERT(rs->time[RUNSTATE_blocked] != 0); GUEST_ASSERT(rs->state == 0); - GUEST_SYNC(RUNSTATE_offline); + GUEST_SYNC(TEST_RUNSTATE_offline); GUEST_ASSERT(rs->time[RUNSTATE_offline] != 0); GUEST_ASSERT(rs->state == 0); /* Test runstate time adjust */ - GUEST_SYNC(4); + GUEST_SYNC(TEST_RUNSTATE_ADJUST); GUEST_ASSERT(rs->time[RUNSTATE_blocked] == 0x5a); GUEST_ASSERT(rs->time[RUNSTATE_offline] == 0x6b6b); /* Test runstate time set */ - GUEST_SYNC(5); + GUEST_SYNC(TEST_RUNSTATE_DATA); GUEST_ASSERT(rs->state_entry_time >= 0x8000); GUEST_ASSERT(rs->time[RUNSTATE_runnable] == 0); GUEST_ASSERT(rs->time[RUNSTATE_blocked] == 0x6b6b); GUEST_ASSERT(rs->time[RUNSTATE_offline] == 0x5a); /* sched_yield() should result in some 'runnable' time */ - GUEST_SYNC(6); + GUEST_SYNC(TEST_STEAL_TIME); GUEST_ASSERT(rs->time[RUNSTATE_runnable] >= MIN_STEAL_TIME); /* Attempt to deliver a *masked* interrupt */ - GUEST_SYNC(7); + GUEST_SYNC(TEST_EVTCHN_MASKED); /* Wait until we see the bit set */ struct shared_info *si = (void *)SHINFO_VADDR; @@ -207,71 +241,65 @@ static void guest_code(void) __asm__ __volatile__ ("rep nop" : : : "memory"); /* Now deliver an *unmasked* interrupt */ - GUEST_SYNC(8); + GUEST_SYNC(TEST_EVTCHN_UNMASKED); guest_wait_for_irq(); /* Change memslots and deliver an interrupt */ - GUEST_SYNC(9); + GUEST_SYNC(TEST_EVTCHN_SLOWPATH); guest_wait_for_irq(); /* Deliver event channel with KVM_XEN_HVM_EVTCHN_SEND */ - GUEST_SYNC(10); + GUEST_SYNC(TEST_EVTCHN_SEND_IOCTL); guest_wait_for_irq(); - GUEST_SYNC(11); + GUEST_SYNC(TEST_EVTCHN_HCALL); /* Our turn. Deliver event channel (to ourselves) with * EVTCHNOP_send hypercall. */ - unsigned long rax; struct evtchn_send s = { .port = 127 }; - __asm__ __volatile__ ("vmcall" : - "=a" (rax) : - "a" (__HYPERVISOR_event_channel_op), - "D" (EVTCHNOP_send), - "S" (&s)); + xen_hypercall(__HYPERVISOR_event_channel_op, EVTCHNOP_send, &s); + + guest_wait_for_irq(); + + GUEST_SYNC(TEST_EVTCHN_HCALL_SLOWPATH); - GUEST_ASSERT(rax == 0); + /* + * Same again, but this time the host has messed with memslots so it + * should take the slow path in kvm_xen_set_evtchn(). + */ + xen_hypercall(__HYPERVISOR_event_channel_op, EVTCHNOP_send, &s); guest_wait_for_irq(); - GUEST_SYNC(12); + GUEST_SYNC(TEST_EVTCHN_HCALL_EVENTFD); /* Deliver "outbound" event channel to an eventfd which * happens to be one of our own irqfds. */ s.port = 197; - __asm__ __volatile__ ("vmcall" : - "=a" (rax) : - "a" (__HYPERVISOR_event_channel_op), - "D" (EVTCHNOP_send), - "S" (&s)); - - GUEST_ASSERT(rax == 0); + xen_hypercall(__HYPERVISOR_event_channel_op, EVTCHNOP_send, &s); guest_wait_for_irq(); - GUEST_SYNC(13); + GUEST_SYNC(TEST_TIMER_SETUP); /* Set a timer 100ms in the future. */ - __asm__ __volatile__ ("vmcall" : - "=a" (rax) : - "a" (__HYPERVISOR_set_timer_op), - "D" (rs->state_entry_time + 100000000)); - GUEST_ASSERT(rax == 0); + xen_hypercall(__HYPERVISOR_set_timer_op, + rs->state_entry_time + 100000000, NULL); - GUEST_SYNC(14); + GUEST_SYNC(TEST_TIMER_WAIT); /* Now wait for the timer */ guest_wait_for_irq(); - GUEST_SYNC(15); + GUEST_SYNC(TEST_TIMER_RESTORE); /* The host has 'restored' the timer. Just wait for it. */ guest_wait_for_irq(); - GUEST_SYNC(16); + GUEST_SYNC(TEST_POLL_READY); /* Poll for an event channel port which is already set */ u32 ports[1] = { EVTCHN_TIMER }; @@ -281,65 +309,41 @@ static void guest_code(void) .timeout = 0, }; - __asm__ __volatile__ ("vmcall" : - "=a" (rax) : - "a" (__HYPERVISOR_sched_op), - "D" (SCHEDOP_poll), - "S" (&p)); + xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p); - GUEST_ASSERT(rax == 0); - - GUEST_SYNC(17); + GUEST_SYNC(TEST_POLL_TIMEOUT); /* Poll for an unset port and wait for the timeout. */ p.timeout = 100000000; - __asm__ __volatile__ ("vmcall" : - "=a" (rax) : - "a" (__HYPERVISOR_sched_op), - "D" (SCHEDOP_poll), - "S" (&p)); - - GUEST_ASSERT(rax == 0); + xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p); - GUEST_SYNC(18); + GUEST_SYNC(TEST_POLL_MASKED); /* A timer will wake the masked port we're waiting on, while we poll */ p.timeout = 0; - __asm__ __volatile__ ("vmcall" : - "=a" (rax) : - "a" (__HYPERVISOR_sched_op), - "D" (SCHEDOP_poll), - "S" (&p)); - - GUEST_ASSERT(rax == 0); + xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p); - GUEST_SYNC(19); + GUEST_SYNC(TEST_POLL_WAKE); /* A timer wake an *unmasked* port which should wake us with an * actual interrupt, while we're polling on a different port. */ ports[0]++; p.timeout = 0; - __asm__ __volatile__ ("vmcall" : - "=a" (rax) : - "a" (__HYPERVISOR_sched_op), - "D" (SCHEDOP_poll), - "S" (&p)); - - GUEST_ASSERT(rax == 0); + xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p); guest_wait_for_irq(); - GUEST_SYNC(20); + GUEST_SYNC(TEST_TIMER_PAST); /* Timer should have fired already */ guest_wait_for_irq(); - GUEST_SYNC(21); + GUEST_SYNC(TEST_LOCKING_SEND_RACE); /* Racing host ioctls */ guest_wait_for_irq(); - GUEST_SYNC(22); + GUEST_SYNC(TEST_LOCKING_POLL_RACE); /* Racing vmcall against host ioctl */ ports[0] = 0; @@ -360,24 +364,19 @@ wait_for_timer: * timer IRQ is dropped due to an invalid event channel. */ for (i = 0; i < 100 && !guest_saw_irq; i++) - asm volatile("vmcall" - : "=a" (rax) - : "a" (__HYPERVISOR_sched_op), - "D" (SCHEDOP_poll), - "S" (&p) - : "memory"); + __xen_hypercall(__HYPERVISOR_sched_op, SCHEDOP_poll, &p); /* * Re-send the timer IRQ if it was (likely) dropped due to the timer * expiring while the event channel was invalid. */ if (!guest_saw_irq) { - GUEST_SYNC(23); + GUEST_SYNC(TEST_LOCKING_POLL_TIMEOUT); goto wait_for_timer; } guest_saw_irq = false; - GUEST_SYNC(24); + GUEST_SYNC(TEST_DONE); } static int cmp_timespec(struct timespec *a, struct timespec *b) @@ -623,15 +622,10 @@ int main(int argc, char *argv[]) bool evtchn_irq_expected = false; for (;;) { - volatile struct kvm_run *run = vcpu->run; struct ucall uc; vcpu_run(vcpu); - - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: @@ -647,25 +641,26 @@ int main(int argc, char *argv[]) "runstate times don't add up"); switch (uc.args[1]) { - case 0: + case TEST_INJECT_VECTOR: if (verbose) printf("Delivering evtchn upcall\n"); evtchn_irq_expected = true; vinfo->evtchn_upcall_pending = 1; break; - case RUNSTATE_runnable...RUNSTATE_offline: + case TEST_RUNSTATE_runnable...TEST_RUNSTATE_offline: TEST_ASSERT(!evtchn_irq_expected, "Event channel IRQ not seen"); if (!do_runstate_tests) goto done; if (verbose) printf("Testing runstate %s\n", runstate_names[uc.args[1]]); rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_CURRENT; - rst.u.runstate.state = uc.args[1]; + rst.u.runstate.state = uc.args[1] + RUNSTATE_runnable - + TEST_RUNSTATE_runnable; vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst); break; - case 4: + case TEST_RUNSTATE_ADJUST: if (verbose) printf("Testing RUNSTATE_ADJUST\n"); rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_ADJUST; @@ -680,7 +675,7 @@ int main(int argc, char *argv[]) vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst); break; - case 5: + case TEST_RUNSTATE_DATA: if (verbose) printf("Testing RUNSTATE_DATA\n"); rst.type = KVM_XEN_VCPU_ATTR_TYPE_RUNSTATE_DATA; @@ -692,7 +687,7 @@ int main(int argc, char *argv[]) vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &rst); break; - case 6: + case TEST_STEAL_TIME: if (verbose) printf("Testing steal time\n"); /* Yield until scheduler delay exceeds target */ @@ -702,7 +697,7 @@ int main(int argc, char *argv[]) } while (get_run_delay() < rundelay); break; - case 7: + case TEST_EVTCHN_MASKED: if (!do_eventfd_tests) goto done; if (verbose) @@ -712,7 +707,7 @@ int main(int argc, char *argv[]) alarm(1); break; - case 8: + case TEST_EVTCHN_UNMASKED: if (verbose) printf("Testing unmasked event channel\n"); /* Unmask that, but deliver the other one */ @@ -723,7 +718,7 @@ int main(int argc, char *argv[]) alarm(1); break; - case 9: + case TEST_EVTCHN_SLOWPATH: TEST_ASSERT(!evtchn_irq_expected, "Expected event channel IRQ but it didn't happen"); shinfo->evtchn_pending[1] = 0; @@ -736,7 +731,7 @@ int main(int argc, char *argv[]) alarm(1); break; - case 10: + case TEST_EVTCHN_SEND_IOCTL: TEST_ASSERT(!evtchn_irq_expected, "Expected event channel IRQ but it didn't happen"); if (!do_evtchn_tests) @@ -756,7 +751,7 @@ int main(int argc, char *argv[]) alarm(1); break; - case 11: + case TEST_EVTCHN_HCALL: TEST_ASSERT(!evtchn_irq_expected, "Expected event channel IRQ but it didn't happen"); shinfo->evtchn_pending[1] = 0; @@ -767,7 +762,20 @@ int main(int argc, char *argv[]) alarm(1); break; - case 12: + case TEST_EVTCHN_HCALL_SLOWPATH: + TEST_ASSERT(!evtchn_irq_expected, + "Expected event channel IRQ but it didn't happen"); + shinfo->evtchn_pending[0] = 0; + + if (verbose) + printf("Testing guest EVTCHNOP_send direct to evtchn after memslot change\n"); + vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, + DUMMY_REGION_GPA_2, DUMMY_REGION_SLOT_2, 1, 0); + evtchn_irq_expected = true; + alarm(1); + break; + + case TEST_EVTCHN_HCALL_EVENTFD: TEST_ASSERT(!evtchn_irq_expected, "Expected event channel IRQ but it didn't happen"); shinfo->evtchn_pending[0] = 0; @@ -778,7 +786,7 @@ int main(int argc, char *argv[]) alarm(1); break; - case 13: + case TEST_TIMER_SETUP: TEST_ASSERT(!evtchn_irq_expected, "Expected event channel IRQ but it didn't happen"); shinfo->evtchn_pending[1] = 0; @@ -787,7 +795,7 @@ int main(int argc, char *argv[]) printf("Testing guest oneshot timer\n"); break; - case 14: + case TEST_TIMER_WAIT: memset(&tmr, 0, sizeof(tmr)); tmr.type = KVM_XEN_VCPU_ATTR_TYPE_TIMER; vcpu_ioctl(vcpu, KVM_XEN_VCPU_GET_ATTR, &tmr); @@ -801,7 +809,7 @@ int main(int argc, char *argv[]) alarm(1); break; - case 15: + case TEST_TIMER_RESTORE: TEST_ASSERT(!evtchn_irq_expected, "Expected event channel IRQ but it didn't happen"); shinfo->evtchn_pending[0] = 0; @@ -815,7 +823,7 @@ int main(int argc, char *argv[]) alarm(1); break; - case 16: + case TEST_POLL_READY: TEST_ASSERT(!evtchn_irq_expected, "Expected event channel IRQ but it didn't happen"); @@ -825,14 +833,14 @@ int main(int argc, char *argv[]) alarm(1); break; - case 17: + case TEST_POLL_TIMEOUT: if (verbose) printf("Testing SCHEDOP_poll timeout\n"); shinfo->evtchn_pending[0] = 0; alarm(1); break; - case 18: + case TEST_POLL_MASKED: if (verbose) printf("Testing SCHEDOP_poll wake on masked event\n"); @@ -841,7 +849,7 @@ int main(int argc, char *argv[]) alarm(1); break; - case 19: + case TEST_POLL_WAKE: shinfo->evtchn_pending[0] = shinfo->evtchn_mask[0] = 0; if (verbose) printf("Testing SCHEDOP_poll wake on unmasked event\n"); @@ -858,7 +866,7 @@ int main(int argc, char *argv[]) alarm(1); break; - case 20: + case TEST_TIMER_PAST: TEST_ASSERT(!evtchn_irq_expected, "Expected event channel IRQ but it didn't happen"); /* Read timer and check it is no longer pending */ @@ -875,7 +883,7 @@ int main(int argc, char *argv[]) alarm(1); break; - case 21: + case TEST_LOCKING_SEND_RACE: TEST_ASSERT(!evtchn_irq_expected, "Expected event channel IRQ but it didn't happen"); alarm(0); @@ -897,7 +905,7 @@ int main(int argc, char *argv[]) __vm_ioctl(vm, KVM_XEN_HVM_EVTCHN_SEND, &uxe); break; - case 22: + case TEST_LOCKING_POLL_RACE: TEST_ASSERT(!evtchn_irq_expected, "Expected event channel IRQ but it didn't happen"); @@ -912,7 +920,7 @@ int main(int argc, char *argv[]) vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr); break; - case 23: + case TEST_LOCKING_POLL_TIMEOUT: /* * Optional and possibly repeated sync point. * Injecting the timer IRQ may fail if the @@ -934,7 +942,7 @@ int main(int argc, char *argv[]) SHINFO_RACE_TIMEOUT * 1000000000ULL; vcpu_ioctl(vcpu, KVM_XEN_VCPU_SET_ATTR, &tmr); break; - case 24: + case TEST_DONE: TEST_ASSERT(!evtchn_irq_expected, "Expected event channel IRQ but it didn't happen"); @@ -945,7 +953,7 @@ int main(int argc, char *argv[]) TEST_ASSERT(ret == 0, "pthread_join() failed: %s", strerror(ret)); goto done; - case 0x20: + case TEST_GUEST_SAW_IRQ: TEST_ASSERT(evtchn_irq_expected, "Unexpected event channel IRQ"); evtchn_irq_expected = false; break; diff --git a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c index 88914d48c65e..c94cde3b523f 100644 --- a/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c +++ b/tools/testing/selftests/kvm/x86_64/xen_vmcall_test.c @@ -122,10 +122,7 @@ int main(int argc, char *argv[]) continue; } - TEST_ASSERT(run->exit_reason == KVM_EXIT_IO, - "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n", - run->exit_reason, - exit_reason_str(run->exit_reason)); + TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO); switch (get_ucall(vcpu, &uc)) { case UCALL_ABORT: diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk index f7900e75d230..05400462c779 100644 --- a/tools/testing/selftests/lib.mk +++ b/tools/testing/selftests/lib.mk @@ -10,12 +10,14 @@ endif CLANG_TARGET_FLAGS_arm := arm-linux-gnueabi CLANG_TARGET_FLAGS_arm64 := aarch64-linux-gnu CLANG_TARGET_FLAGS_hexagon := hexagon-linux-musl +CLANG_TARGET_FLAGS_i386 := i386-linux-gnu CLANG_TARGET_FLAGS_m68k := m68k-linux-gnu CLANG_TARGET_FLAGS_mips := mipsel-linux-gnu CLANG_TARGET_FLAGS_powerpc := powerpc64le-linux-gnu CLANG_TARGET_FLAGS_riscv := riscv64-linux-gnu CLANG_TARGET_FLAGS_s390 := s390x-linux-gnu CLANG_TARGET_FLAGS_x86 := x86_64-linux-gnu +CLANG_TARGET_FLAGS_x86_64 := x86_64-linux-gnu CLANG_TARGET_FLAGS := $(CLANG_TARGET_FLAGS_$(ARCH)) ifeq ($(CROSS_COMPILE),) diff --git a/tools/testing/selftests/net/.gitignore b/tools/testing/selftests/net/.gitignore index a6911cae368c..80f06aa62034 100644 --- a/tools/testing/selftests/net/.gitignore +++ b/tools/testing/selftests/net/.gitignore @@ -1,6 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only bind_bhash bind_timewait +bind_wildcard csum cmsg_sender diag_uid diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile index 6cd8993454d7..80fbfe0330f6 100644 --- a/tools/testing/selftests/net/Makefile +++ b/tools/testing/selftests/net/Makefile @@ -80,6 +80,7 @@ TEST_GEN_FILES += sctp_hello TEST_GEN_FILES += csum TEST_GEN_FILES += nat6to4.o TEST_GEN_FILES += ip_local_port_range +TEST_GEN_FILES += bind_wildcard TEST_FILES := settings diff --git a/tools/testing/selftests/net/bind_wildcard.c b/tools/testing/selftests/net/bind_wildcard.c new file mode 100644 index 000000000000..58edfc15d28b --- /dev/null +++ b/tools/testing/selftests/net/bind_wildcard.c @@ -0,0 +1,114 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright Amazon.com Inc. or its affiliates. */ + +#include <sys/socket.h> +#include <netinet/in.h> + +#include "../kselftest_harness.h" + +FIXTURE(bind_wildcard) +{ + struct sockaddr_in addr4; + struct sockaddr_in6 addr6; + int expected_errno; +}; + +FIXTURE_VARIANT(bind_wildcard) +{ + const __u32 addr4_const; + const struct in6_addr *addr6_const; +}; + +FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_any) +{ + .addr4_const = INADDR_ANY, + .addr6_const = &in6addr_any, +}; + +FIXTURE_VARIANT_ADD(bind_wildcard, v4_any_v6_local) +{ + .addr4_const = INADDR_ANY, + .addr6_const = &in6addr_loopback, +}; + +FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_any) +{ + .addr4_const = INADDR_LOOPBACK, + .addr6_const = &in6addr_any, +}; + +FIXTURE_VARIANT_ADD(bind_wildcard, v4_local_v6_local) +{ + .addr4_const = INADDR_LOOPBACK, + .addr6_const = &in6addr_loopback, +}; + +FIXTURE_SETUP(bind_wildcard) +{ + self->addr4.sin_family = AF_INET; + self->addr4.sin_port = htons(0); + self->addr4.sin_addr.s_addr = htonl(variant->addr4_const); + + self->addr6.sin6_family = AF_INET6; + self->addr6.sin6_port = htons(0); + self->addr6.sin6_addr = *variant->addr6_const; + + if (variant->addr6_const == &in6addr_any) + self->expected_errno = EADDRINUSE; + else + self->expected_errno = 0; +} + +FIXTURE_TEARDOWN(bind_wildcard) +{ +} + +void bind_sockets(struct __test_metadata *_metadata, + FIXTURE_DATA(bind_wildcard) *self, + struct sockaddr *addr1, socklen_t addrlen1, + struct sockaddr *addr2, socklen_t addrlen2) +{ + int fd[2]; + int ret; + + fd[0] = socket(addr1->sa_family, SOCK_STREAM, 0); + ASSERT_GT(fd[0], 0); + + ret = bind(fd[0], addr1, addrlen1); + ASSERT_EQ(ret, 0); + + ret = getsockname(fd[0], addr1, &addrlen1); + ASSERT_EQ(ret, 0); + + ((struct sockaddr_in *)addr2)->sin_port = ((struct sockaddr_in *)addr1)->sin_port; + + fd[1] = socket(addr2->sa_family, SOCK_STREAM, 0); + ASSERT_GT(fd[1], 0); + + ret = bind(fd[1], addr2, addrlen2); + if (self->expected_errno) { + ASSERT_EQ(ret, -1); + ASSERT_EQ(errno, self->expected_errno); + } else { + ASSERT_EQ(ret, 0); + } + + close(fd[1]); + close(fd[0]); +} + +TEST_F(bind_wildcard, v4_v6) +{ + bind_sockets(_metadata, self, + (struct sockaddr *)&self->addr4, sizeof(self->addr6), + (struct sockaddr *)&self->addr6, sizeof(self->addr6)); +} + +TEST_F(bind_wildcard, v6_v4) +{ + bind_sockets(_metadata, self, + (struct sockaddr *)&self->addr6, sizeof(self->addr6), + (struct sockaddr *)&self->addr4, sizeof(self->addr4)); +} + +TEST_HARNESS_MAIN diff --git a/tools/testing/selftests/net/devlink_port_split.py b/tools/testing/selftests/net/devlink_port_split.py index 2b5d6ff87373..2d84c7a0be6b 100755 --- a/tools/testing/selftests/net/devlink_port_split.py +++ b/tools/testing/selftests/net/devlink_port_split.py @@ -59,6 +59,8 @@ class devlink_ports(object): assert stderr == "" ports = json.loads(stdout)['port'] + validate_devlink_output(ports, 'flavour') + for port in ports: if dev in port: if ports[port]['flavour'] == 'physical': @@ -220,6 +222,27 @@ def split_splittable_port(port, k, lanes, dev): unsplit(port.bus_info) +def validate_devlink_output(devlink_data, target_property=None): + """ + Determine if test should be skipped by checking: + 1. devlink_data contains values + 2. The target_property exist in devlink_data + """ + skip_reason = None + if any(devlink_data.values()): + if target_property: + skip_reason = "{} not found in devlink output, test skipped".format(target_property) + for key in devlink_data: + if target_property in devlink_data[key]: + skip_reason = None + else: + skip_reason = 'devlink output is empty, test skipped' + + if skip_reason: + print(skip_reason) + sys.exit(KSFT_SKIP) + + def make_parser(): parser = argparse.ArgumentParser(description='A test for port splitting.') parser.add_argument('--dev', @@ -240,12 +263,9 @@ def main(cmdline=None): stdout, stderr = run_command(cmd) assert stderr == "" + validate_devlink_output(json.loads(stdout)) devs = json.loads(stdout)['dev'] - if devs: - dev = list(devs.keys())[0] - else: - print("no devlink device was found, test skipped") - sys.exit(KSFT_SKIP) + dev = list(devs.keys())[0] cmd = "devlink dev show %s" % dev stdout, stderr = run_command(cmd) @@ -255,6 +275,7 @@ def main(cmdline=None): ports = devlink_ports(dev) + found_max_lanes = False for port in ports.if_names: max_lanes = get_max_lanes(port.name) @@ -277,6 +298,11 @@ def main(cmdline=None): split_splittable_port(port, lane, max_lanes, dev) lane //= 2 + found_max_lanes = True + + if not found_max_lanes: + print(f"Test not started, no port of device {dev} reports max_lanes") + sys.exit(KSFT_SKIP) if __name__ == "__main__": diff --git a/tools/testing/selftests/net/mptcp/userspace_pm.sh b/tools/testing/selftests/net/mptcp/userspace_pm.sh index 66c5be25c13d..48e52f995a98 100755 --- a/tools/testing/selftests/net/mptcp/userspace_pm.sh +++ b/tools/testing/selftests/net/mptcp/userspace_pm.sh @@ -240,7 +240,7 @@ check_expected_one() fi stdbuf -o0 -e0 printf "\tExpected value for '%s': '%s', got '%s'.\n" \ - "${var}" "${!var}" "${!exp}" + "${var}" "${!exp}" "${!var}" return 1 } diff --git a/tools/testing/vsock/vsock_test.c b/tools/testing/vsock/vsock_test.c index 67e9f9df3a8c..3de10dbb50f5 100644 --- a/tools/testing/vsock/vsock_test.c +++ b/tools/testing/vsock/vsock_test.c @@ -860,6 +860,114 @@ static void test_stream_poll_rcvlowat_client(const struct test_opts *opts) close(fd); } +#define INV_BUF_TEST_DATA_LEN 512 + +static void test_inv_buf_client(const struct test_opts *opts, bool stream) +{ + unsigned char data[INV_BUF_TEST_DATA_LEN] = {0}; + ssize_t ret; + int fd; + + if (stream) + fd = vsock_stream_connect(opts->peer_cid, 1234); + else + fd = vsock_seqpacket_connect(opts->peer_cid, 1234); + + if (fd < 0) { + perror("connect"); + exit(EXIT_FAILURE); + } + + control_expectln("SENDDONE"); + + /* Use invalid buffer here. */ + ret = recv(fd, NULL, sizeof(data), 0); + if (ret != -1) { + fprintf(stderr, "expected recv(2) failure, got %zi\n", ret); + exit(EXIT_FAILURE); + } + + if (errno != ENOMEM) { + fprintf(stderr, "unexpected recv(2) errno %d\n", errno); + exit(EXIT_FAILURE); + } + + ret = recv(fd, data, sizeof(data), MSG_DONTWAIT); + + if (stream) { + /* For SOCK_STREAM we must continue reading. */ + if (ret != sizeof(data)) { + fprintf(stderr, "expected recv(2) success, got %zi\n", ret); + exit(EXIT_FAILURE); + } + /* Don't check errno in case of success. */ + } else { + /* For SOCK_SEQPACKET socket's queue must be empty. */ + if (ret != -1) { + fprintf(stderr, "expected recv(2) failure, got %zi\n", ret); + exit(EXIT_FAILURE); + } + + if (errno != EAGAIN) { + fprintf(stderr, "unexpected recv(2) errno %d\n", errno); + exit(EXIT_FAILURE); + } + } + + control_writeln("DONE"); + + close(fd); +} + +static void test_inv_buf_server(const struct test_opts *opts, bool stream) +{ + unsigned char data[INV_BUF_TEST_DATA_LEN] = {0}; + ssize_t res; + int fd; + + if (stream) + fd = vsock_stream_accept(VMADDR_CID_ANY, 1234, NULL); + else + fd = vsock_seqpacket_accept(VMADDR_CID_ANY, 1234, NULL); + + if (fd < 0) { + perror("accept"); + exit(EXIT_FAILURE); + } + + res = send(fd, data, sizeof(data), 0); + if (res != sizeof(data)) { + fprintf(stderr, "unexpected send(2) result %zi\n", res); + exit(EXIT_FAILURE); + } + + control_writeln("SENDDONE"); + + control_expectln("DONE"); + + close(fd); +} + +static void test_stream_inv_buf_client(const struct test_opts *opts) +{ + test_inv_buf_client(opts, true); +} + +static void test_stream_inv_buf_server(const struct test_opts *opts) +{ + test_inv_buf_server(opts, true); +} + +static void test_seqpacket_inv_buf_client(const struct test_opts *opts) +{ + test_inv_buf_client(opts, false); +} + +static void test_seqpacket_inv_buf_server(const struct test_opts *opts) +{ + test_inv_buf_server(opts, false); +} + static struct test_case test_cases[] = { { .name = "SOCK_STREAM connection reset", @@ -920,6 +1028,16 @@ static struct test_case test_cases[] = { .run_client = test_seqpacket_bigmsg_client, .run_server = test_seqpacket_bigmsg_server, }, + { + .name = "SOCK_STREAM test invalid buffer", + .run_client = test_stream_inv_buf_client, + .run_server = test_stream_inv_buf_server, + }, + { + .name = "SOCK_SEQPACKET test invalid buffer", + .run_client = test_seqpacket_inv_buf_client, + .run_server = test_seqpacket_inv_buf_server, + }, {}, }; diff --git a/tools/virtio/.gitignore b/tools/virtio/.gitignore index 075588c4da08..9934d48d9a55 100644 --- a/tools/virtio/.gitignore +++ b/tools/virtio/.gitignore @@ -2,3 +2,4 @@ *.d virtio_test vringh_test +virtio-trace/trace-agent |