summaryrefslogtreecommitdiffstats
path: root/arch/riscv
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/Kconfig24
-rw-r--r--arch/riscv/Kconfig.errata1
-rw-r--r--arch/riscv/Makefile2
-rw-r--r--arch/riscv/configs/defconfig1
-rw-r--r--arch/riscv/configs/rv32_defconfig139
-rw-r--r--arch/riscv/errata/andes/errata.c20
-rw-r--r--arch/riscv/errata/thead/errata.c69
-rw-r--r--arch/riscv/include/asm/asm-extable.h15
-rw-r--r--arch/riscv/include/asm/cpu_ops.h14
-rw-r--r--arch/riscv/include/asm/cpufeature.h4
-rw-r--r--arch/riscv/include/asm/errata_list.h50
-rw-r--r--arch/riscv/include/asm/hwcap.h38
-rw-r--r--arch/riscv/include/asm/hwprobe.h24
-rw-r--r--arch/riscv/include/asm/kfence.h4
-rw-r--r--arch/riscv/include/asm/pgtable-64.h22
-rw-r--r--arch/riscv/include/asm/pgtable.h35
-rw-r--r--arch/riscv/include/asm/processor.h2
-rw-r--r--arch/riscv/include/asm/sbi.h19
-rw-r--r--arch/riscv/include/asm/sections.h1
-rw-r--r--arch/riscv/include/asm/thread_info.h1
-rw-r--r--arch/riscv/include/asm/tlbbatch.h15
-rw-r--r--arch/riscv/include/asm/tlbflush.h8
-rw-r--r--arch/riscv/include/asm/word-at-a-time.h27
-rw-r--r--arch/riscv/include/asm/xip_fixup.h2
-rw-r--r--arch/riscv/include/uapi/asm/hwprobe.h32
-rw-r--r--arch/riscv/kernel/Makefile1
-rw-r--r--arch/riscv/kernel/cpu-hotplug.c19
-rw-r--r--arch/riscv/kernel/cpu_ops.c14
-rw-r--r--arch/riscv/kernel/cpu_ops_sbi.c19
-rw-r--r--arch/riscv/kernel/cpu_ops_spinwait.c11
-rw-r--r--arch/riscv/kernel/cpufeature.c195
-rw-r--r--arch/riscv/kernel/efi.c2
-rw-r--r--arch/riscv/kernel/head.S8
-rw-r--r--arch/riscv/kernel/mcount-dyn.S2
-rw-r--r--arch/riscv/kernel/mcount.S2
-rw-r--r--arch/riscv/kernel/module.c121
-rw-r--r--arch/riscv/kernel/patch.c11
-rw-r--r--arch/riscv/kernel/pi/cmdline_early.c3
-rw-r--r--arch/riscv/kernel/sbi.c66
-rw-r--r--arch/riscv/kernel/setup.c1
-rw-r--r--arch/riscv/kernel/signal.c2
-rw-r--r--arch/riscv/kernel/smp.c2
-rw-r--r--arch/riscv/kernel/smpboot.c38
-rw-r--r--arch/riscv/kernel/suspend.c44
-rw-r--r--arch/riscv/kernel/sys_hwprobe.c411
-rw-r--r--arch/riscv/kernel/sys_riscv.c285
-rw-r--r--arch/riscv/kernel/tests/module_test/test_uleb128.S8
-rw-r--r--arch/riscv/kernel/traps_misaligned.c12
-rw-r--r--arch/riscv/kernel/vdso/hwprobe.c86
-rw-r--r--arch/riscv/kernel/vmlinux-xip.lds.S2
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S2
-rw-r--r--arch/riscv/kvm/mmu.c22
-rw-r--r--arch/riscv/lib/clear_page.S2
-rw-r--r--arch/riscv/lib/tishift.S2
-rw-r--r--arch/riscv/lib/uaccess.S2
-rw-r--r--arch/riscv/mm/Makefile3
-rw-r--r--arch/riscv/mm/extable.c31
-rw-r--r--arch/riscv/mm/fault.c16
-rw-r--r--arch/riscv/mm/hugetlbpage.c12
-rw-r--r--arch/riscv/mm/init.c29
-rw-r--r--arch/riscv/mm/kasan_init.c45
-rw-r--r--arch/riscv/mm/pageattr.c55
-rw-r--r--arch/riscv/mm/pgtable.c51
-rw-r--r--arch/riscv/mm/tlbflush.c69
64 files changed, 1450 insertions, 825 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 0a03d72706b5..b834d2daf95e 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -53,19 +53,21 @@ config RISCV
select ARCH_USE_MEMTEST
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USES_CFI_TRAPS if CFI_CLANG
+ select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP && MMU
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
select ARCH_WANT_FRAME_POINTERS
select ARCH_WANT_GENERAL_HUGETLB if !RISCV_ISA_SVNAPOT
select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
select ARCH_WANT_LD_ORPHAN_WARN if !XIP_KERNEL
select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
+ select ARCH_WANTS_NO_INSTR
select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
select BUILDTIME_TABLE_SORT if MMU
select CLINT_TIMER if !MMU
select CLONE_BACKWARDS
select COMMON_CLK
- select CPU_PM if CPU_IDLE || HIBERNATION
+ select CPU_PM if CPU_IDLE || HIBERNATION || SUSPEND
select EDAC_SUPPORT
select FRAME_POINTER if PERF_EVENTS || (FUNCTION_TRACER && !DYNAMIC_FTRACE)
select GENERIC_ARCH_TOPOLOGY
@@ -673,6 +675,20 @@ config RISCV_MISALIGNED
load/store for both kernel and userspace. When disable, misaligned
accesses will generate SIGBUS in userspace and panic in kernel.
+config RISCV_EFFICIENT_UNALIGNED_ACCESS
+ bool "Assume the CPU supports fast unaligned memory accesses"
+ depends on NONPORTABLE
+ select DCACHE_WORD_ACCESS if MMU
+ select HAVE_EFFICIENT_UNALIGNED_ACCESS
+ help
+ Say Y here if you want the kernel to assume that the CPU supports
+ efficient unaligned memory accesses. When enabled, this option
+ improves the performance of the kernel on such CPUs. However, the
+ kernel will run much more slowly, or will not be able to run at all,
+ on CPUs that do not support efficient unaligned memory accesses.
+
+ If unsure what to do here, say N.
+
endmenu # "Platform type"
menu "Kernel features"
@@ -926,13 +942,13 @@ config RISCV_ISA_FALLBACK
on the replacement properties, "riscv,isa-base" and
"riscv,isa-extensions".
-endmenu # "Boot options"
-
config BUILTIN_DTB
- bool
+ bool "Built-in device tree"
depends on OF && NONPORTABLE
default y if XIP_KERNEL
+endmenu # "Boot options"
+
config PORTABLE
bool
default !NONPORTABLE
diff --git a/arch/riscv/Kconfig.errata b/arch/riscv/Kconfig.errata
index e2c731cfed8c..dedb8b238e73 100644
--- a/arch/riscv/Kconfig.errata
+++ b/arch/riscv/Kconfig.errata
@@ -79,6 +79,7 @@ config ERRATA_THEAD_CMO
depends on ERRATA_THEAD && MMU
select DMA_DIRECT_REMAP
select RISCV_DMA_NONCOHERENT
+ select RISCV_NONSTANDARD_CACHE_OPS
default y
help
This will apply the cache management errata to handle the
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index a74be78678eb..ebbe02628a27 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -108,7 +108,9 @@ KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
# unaligned accesses. While unaligned accesses are explicitly allowed in the
# RISC-V ISA, they're emulated by machine mode traps on all extant
# architectures. It's faster to have GCC emit only aligned accesses.
+ifneq ($(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS),y)
KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
+endif
ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y)
prepare: stack_protector_prepare
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index 905881282a7c..eaf34e871e30 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -149,6 +149,7 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_8250_DW=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_SH_SCI=y
+CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_VIRTIO=y
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
deleted file mode 100644
index 89b601e253a6..000000000000
--- a/arch/riscv/configs/rv32_defconfig
+++ /dev/null
@@ -1,139 +0,0 @@
-CONFIG_SYSVIPC=y
-CONFIG_POSIX_MQUEUE=y
-CONFIG_NO_HZ_IDLE=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BPF_SYSCALL=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_CGROUPS=y
-CONFIG_CGROUP_SCHED=y
-CONFIG_CFS_BANDWIDTH=y
-CONFIG_CGROUP_BPF=y
-CONFIG_NAMESPACES=y
-CONFIG_USER_NS=y
-CONFIG_CHECKPOINT_RESTORE=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_EXPERT=y
-# CONFIG_SYSFS_SYSCALL is not set
-CONFIG_PROFILING=y
-CONFIG_SOC_SIFIVE=y
-CONFIG_SOC_VIRT=y
-CONFIG_NONPORTABLE=y
-CONFIG_ARCH_RV32I=y
-CONFIG_SMP=y
-CONFIG_HOTPLUG_CPU=y
-CONFIG_PM=y
-CONFIG_CPU_IDLE=y
-CONFIG_VIRTUALIZATION=y
-CONFIG_KVM=m
-CONFIG_JUMP_LABEL=y
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_IP_ADVANCED_ROUTER=y
-CONFIG_IP_PNP=y
-CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
-CONFIG_IP_PNP_RARP=y
-CONFIG_NETLINK_DIAG=y
-CONFIG_NET_9P=y
-CONFIG_NET_9P_VIRTIO=y
-CONFIG_PCI=y
-CONFIG_PCIEPORTBUS=y
-CONFIG_PCI_HOST_GENERIC=y
-CONFIG_PCIE_XILINX=y
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_VIRTIO_BLK=y
-CONFIG_BLK_DEV_SD=y
-CONFIG_BLK_DEV_SR=y
-CONFIG_SCSI_VIRTIO=y
-CONFIG_ATA=y
-CONFIG_SATA_AHCI=y
-CONFIG_SATA_AHCI_PLATFORM=y
-CONFIG_NETDEVICES=y
-CONFIG_VIRTIO_NET=y
-CONFIG_MACB=y
-CONFIG_E1000E=y
-CONFIG_R8169=y
-CONFIG_MICROSEMI_PHY=y
-CONFIG_INPUT_MOUSEDEV=y
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_VIRTIO_CONSOLE=y
-CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_VIRTIO=y
-CONFIG_SPI=y
-CONFIG_SPI_SIFIVE=y
-# CONFIG_PTP_1588_CLOCK is not set
-CONFIG_DRM=y
-CONFIG_DRM_RADEON=y
-CONFIG_DRM_VIRTIO_GPU=y
-CONFIG_FB=y
-CONFIG_FRAMEBUFFER_CONSOLE=y
-CONFIG_USB=y
-CONFIG_USB_XHCI_HCD=y
-CONFIG_USB_XHCI_PLATFORM=y
-CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_OHCI_HCD=y
-CONFIG_USB_OHCI_HCD_PLATFORM=y
-CONFIG_USB_STORAGE=y
-CONFIG_USB_UAS=y
-CONFIG_MMC=y
-CONFIG_MMC_SPI=y
-CONFIG_RTC_CLASS=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_BALLOON=y
-CONFIG_VIRTIO_INPUT=y
-CONFIG_VIRTIO_MMIO=y
-CONFIG_RPMSG_CHAR=y
-CONFIG_RPMSG_CTRL=y
-CONFIG_RPMSG_VIRTIO=y
-CONFIG_EXT4_FS=y
-CONFIG_EXT4_FS_POSIX_ACL=y
-CONFIG_AUTOFS_FS=y
-CONFIG_MSDOS_FS=y
-CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
-CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_HUGETLBFS=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_NFS_V4_1=y
-CONFIG_NFS_V4_2=y
-CONFIG_ROOT_NFS=y
-CONFIG_9P_FS=y
-CONFIG_CRYPTO_USER_API_HASH=y
-CONFIG_CRYPTO_DEV_VIRTIO=y
-CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_FS=y
-CONFIG_DEBUG_PAGEALLOC=y
-CONFIG_SCHED_STACK_END_CHECK=y
-CONFIG_DEBUG_VM=y
-CONFIG_DEBUG_VM_PGFLAGS=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_DEBUG_PER_CPU_MAPS=y
-CONFIG_SOFTLOCKUP_DETECTOR=y
-CONFIG_WQ_WATCHDOG=y
-CONFIG_DEBUG_TIMEKEEPING=y
-CONFIG_DEBUG_RT_MUTEXES=y
-CONFIG_DEBUG_SPINLOCK=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_DEBUG_RWSEMS=y
-CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_STACKTRACE=y
-CONFIG_DEBUG_LIST=y
-CONFIG_DEBUG_PLIST=y
-CONFIG_DEBUG_SG=y
-# CONFIG_RCU_TRACE is not set
-CONFIG_RCU_EQS_DEBUG=y
-# CONFIG_FTRACE is not set
-# CONFIG_RUNTIME_TESTING_MENU is not set
-CONFIG_MEMTEST=y
diff --git a/arch/riscv/errata/andes/errata.c b/arch/riscv/errata/andes/errata.c
index 197db68cc8da..17a904869724 100644
--- a/arch/riscv/errata/andes/errata.c
+++ b/arch/riscv/errata/andes/errata.c
@@ -38,29 +38,35 @@ static long ax45mp_iocp_sw_workaround(void)
return ret.error ? 0 : ret.value;
}
-static bool errata_probe_iocp(unsigned int stage, unsigned long arch_id, unsigned long impid)
+static void errata_probe_iocp(unsigned int stage, unsigned long arch_id, unsigned long impid)
{
+ static bool done;
+
if (!IS_ENABLED(CONFIG_ERRATA_ANDES_CMO))
- return false;
+ return;
+
+ if (done)
+ return;
+
+ done = true;
if (arch_id != ANDESTECH_AX45MP_MARCHID || impid != ANDESTECH_AX45MP_MIMPID)
- return false;
+ return;
if (!ax45mp_iocp_sw_workaround())
- return false;
+ return;
/* Set this just to make core cbo code happy */
riscv_cbom_block_size = 1;
riscv_noncoherent_supported();
-
- return true;
}
void __init_or_module andes_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
unsigned long archid, unsigned long impid,
unsigned int stage)
{
- errata_probe_iocp(stage, archid, impid);
+ if (stage == RISCV_ALTERNATIVES_BOOT)
+ errata_probe_iocp(stage, archid, impid);
/* we have nothing to patch here ATM so just return back */
}
diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c
index 0554ed4bf087..b1c410bbc1ae 100644
--- a/arch/riscv/errata/thead/errata.c
+++ b/arch/riscv/errata/thead/errata.c
@@ -12,8 +12,10 @@
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
+#include <asm/dma-noncoherent.h>
#include <asm/errata_list.h>
#include <asm/hwprobe.h>
+#include <asm/io.h>
#include <asm/patch.h>
#include <asm/vendorid_list.h>
@@ -33,6 +35,69 @@ static bool errata_probe_pbmt(unsigned int stage,
return false;
}
+/*
+ * th.dcache.ipa rs1 (invalidate, physical address)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000001 01010 rs1 000 00000 0001011
+ * th.dcache.iva rs1 (invalidate, virtual address)
+ * 0000001 00110 rs1 000 00000 0001011
+ *
+ * th.dcache.cpa rs1 (clean, physical address)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000001 01001 rs1 000 00000 0001011
+ * th.dcache.cva rs1 (clean, virtual address)
+ * 0000001 00101 rs1 000 00000 0001011
+ *
+ * th.dcache.cipa rs1 (clean then invalidate, physical address)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000001 01011 rs1 000 00000 0001011
+ * th.dcache.civa rs1 (clean then invalidate, virtual address)
+ * 0000001 00111 rs1 000 00000 0001011
+ *
+ * th.sync.s (make sure all cache operations finished)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000000 11001 00000 000 00000 0001011
+ */
+#define THEAD_INVAL_A0 ".long 0x02a5000b"
+#define THEAD_CLEAN_A0 ".long 0x0295000b"
+#define THEAD_FLUSH_A0 ".long 0x02b5000b"
+#define THEAD_SYNC_S ".long 0x0190000b"
+
+#define THEAD_CMO_OP(_op, _start, _size, _cachesize) \
+asm volatile("mv a0, %1\n\t" \
+ "j 2f\n\t" \
+ "3:\n\t" \
+ THEAD_##_op##_A0 "\n\t" \
+ "add a0, a0, %0\n\t" \
+ "2:\n\t" \
+ "bltu a0, %2, 3b\n\t" \
+ THEAD_SYNC_S \
+ : : "r"(_cachesize), \
+ "r"((unsigned long)(_start) & ~((_cachesize) - 1UL)), \
+ "r"((unsigned long)(_start) + (_size)) \
+ : "a0")
+
+static void thead_errata_cache_inv(phys_addr_t paddr, size_t size)
+{
+ THEAD_CMO_OP(INVAL, paddr, size, riscv_cbom_block_size);
+}
+
+static void thead_errata_cache_wback(phys_addr_t paddr, size_t size)
+{
+ THEAD_CMO_OP(CLEAN, paddr, size, riscv_cbom_block_size);
+}
+
+static void thead_errata_cache_wback_inv(phys_addr_t paddr, size_t size)
+{
+ THEAD_CMO_OP(FLUSH, paddr, size, riscv_cbom_block_size);
+}
+
+static const struct riscv_nonstd_cache_ops thead_errata_cmo_ops = {
+ .wback = &thead_errata_cache_wback,
+ .inv = &thead_errata_cache_inv,
+ .wback_inv = &thead_errata_cache_wback_inv,
+};
+
static bool errata_probe_cmo(unsigned int stage,
unsigned long arch_id, unsigned long impid)
{
@@ -48,6 +113,7 @@ static bool errata_probe_cmo(unsigned int stage,
if (stage == RISCV_ALTERNATIVES_BOOT) {
riscv_cbom_block_size = L1_CACHE_BYTES;
riscv_noncoherent_supported();
+ riscv_noncoherent_register_cache_ops(&thead_errata_cmo_ops);
}
return true;
@@ -77,8 +143,7 @@ static u32 thead_errata_probe(unsigned int stage,
if (errata_probe_pbmt(stage, archid, impid))
cpu_req_errata |= BIT(ERRATA_THEAD_PBMT);
- if (errata_probe_cmo(stage, archid, impid))
- cpu_req_errata |= BIT(ERRATA_THEAD_CMO);
+ errata_probe_cmo(stage, archid, impid);
if (errata_probe_pmu(stage, archid, impid))
cpu_req_errata |= BIT(ERRATA_THEAD_PMU);
diff --git a/arch/riscv/include/asm/asm-extable.h b/arch/riscv/include/asm/asm-extable.h
index 00a96e7a9664..0c8bfd54fc4e 100644
--- a/arch/riscv/include/asm/asm-extable.h
+++ b/arch/riscv/include/asm/asm-extable.h
@@ -6,6 +6,7 @@
#define EX_TYPE_FIXUP 1
#define EX_TYPE_BPF 2
#define EX_TYPE_UACCESS_ERR_ZERO 3
+#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4
#ifdef CONFIG_MMU
@@ -47,6 +48,11 @@
#define EX_DATA_REG_ZERO_SHIFT 5
#define EX_DATA_REG_ZERO GENMASK(9, 5)
+#define EX_DATA_REG_DATA_SHIFT 0
+#define EX_DATA_REG_DATA GENMASK(4, 0)
+#define EX_DATA_REG_ADDR_SHIFT 5
+#define EX_DATA_REG_ADDR GENMASK(9, 5)
+
#define EX_DATA_REG(reg, gpr) \
"((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
@@ -62,6 +68,15 @@
#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
_ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)
+#define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr) \
+ __DEFINE_ASM_GPR_NUMS \
+ __ASM_EXTABLE_RAW(#insn, #fixup, \
+ __stringify(EX_TYPE_LOAD_UNALIGNED_ZEROPAD), \
+ "(" \
+ EX_DATA_REG(DATA, data) " | " \
+ EX_DATA_REG(ADDR, addr) \
+ ")")
+
#endif /* __ASSEMBLY__ */
#else /* CONFIG_MMU */
diff --git a/arch/riscv/include/asm/cpu_ops.h b/arch/riscv/include/asm/cpu_ops.h
index aa128466c4d4..176b570ef982 100644
--- a/arch/riscv/include/asm/cpu_ops.h
+++ b/arch/riscv/include/asm/cpu_ops.h
@@ -13,33 +13,23 @@
/**
* struct cpu_operations - Callback operations for hotplugging CPUs.
*
- * @name: Name of the boot protocol.
- * @cpu_prepare: Early one-time preparation step for a cpu. If there
- * is a mechanism for doing so, tests whether it is
- * possible to boot the given HART.
* @cpu_start: Boots a cpu into the kernel.
- * @cpu_disable: Prepares a cpu to die. May fail for some
- * mechanism-specific reason, which will cause the hot
- * unplug to be aborted. Called from the cpu to be killed.
* @cpu_stop: Makes a cpu leave the kernel. Must not fail. Called from
* the cpu being stopped.
* @cpu_is_stopped: Ensures a cpu has left the kernel. Called from another
* cpu.
*/
struct cpu_operations {
- const char *name;
- int (*cpu_prepare)(unsigned int cpu);
int (*cpu_start)(unsigned int cpu,
struct task_struct *tidle);
#ifdef CONFIG_HOTPLUG_CPU
- int (*cpu_disable)(unsigned int cpu);
void (*cpu_stop)(void);
int (*cpu_is_stopped)(unsigned int cpu);
#endif
};
extern const struct cpu_operations cpu_ops_spinwait;
-extern const struct cpu_operations *cpu_ops[NR_CPUS];
-void __init cpu_set_ops(int cpu);
+extern const struct cpu_operations *cpu_ops;
+void __init cpu_set_ops(void);
#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index a418c3112cd6..fbdde8b8a47e 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -59,6 +59,8 @@ struct riscv_isa_ext_data {
const unsigned int id;
const char *name;
const char *property;
+ const unsigned int *subset_ext_ids;
+ const unsigned int subset_ext_size;
};
extern const struct riscv_isa_ext_data riscv_isa_ext[];
@@ -67,7 +69,7 @@ extern bool riscv_isa_fallback;
unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
-bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
+bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit);
#define riscv_isa_extension_available(isa_bitmap, ext) \
__riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index 83ed25e43553..ea33288f8a25 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -24,9 +24,8 @@
#ifdef CONFIG_ERRATA_THEAD
#define ERRATA_THEAD_PBMT 0
-#define ERRATA_THEAD_CMO 1
-#define ERRATA_THEAD_PMU 2
-#define ERRATA_THEAD_NUMBER 3
+#define ERRATA_THEAD_PMU 1
+#define ERRATA_THEAD_NUMBER 2
#endif
#ifdef __ASSEMBLY__
@@ -94,54 +93,17 @@ asm volatile(ALTERNATIVE( \
#define ALT_THEAD_PMA(_val)
#endif
-/*
- * th.dcache.ipa rs1 (invalidate, physical address)
- * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
- * 0000001 01010 rs1 000 00000 0001011
- * th.dache.iva rs1 (invalida, virtual address)
- * 0000001 00110 rs1 000 00000 0001011
- *
- * th.dcache.cpa rs1 (clean, physical address)
- * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
- * 0000001 01001 rs1 000 00000 0001011
- * th.dcache.cva rs1 (clean, virtual address)
- * 0000001 00101 rs1 000 00000 0001011
- *
- * th.dcache.cipa rs1 (clean then invalidate, physical address)
- * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
- * 0000001 01011 rs1 000 00000 0001011
- * th.dcache.civa rs1 (... virtual address)
- * 0000001 00111 rs1 000 00000 0001011
- *
- * th.sync.s (make sure all cache operations finished)
- * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
- * 0000000 11001 00000 000 00000 0001011
- */
-#define THEAD_INVAL_A0 ".long 0x0265000b"
-#define THEAD_CLEAN_A0 ".long 0x0255000b"
-#define THEAD_FLUSH_A0 ".long 0x0275000b"
-#define THEAD_SYNC_S ".long 0x0190000b"
-
#define ALT_CMO_OP(_op, _start, _size, _cachesize) \
-asm volatile(ALTERNATIVE_2( \
- __nops(6), \
+asm volatile(ALTERNATIVE( \
+ __nops(5), \
"mv a0, %1\n\t" \
"j 2f\n\t" \
"3:\n\t" \
CBO_##_op(a0) \
"add a0, a0, %0\n\t" \
"2:\n\t" \
- "bltu a0, %2, 3b\n\t" \
- "nop", 0, RISCV_ISA_EXT_ZICBOM, CONFIG_RISCV_ISA_ZICBOM, \
- "mv a0, %1\n\t" \
- "j 2f\n\t" \
- "3:\n\t" \
- THEAD_##_op##_A0 "\n\t" \
- "add a0, a0, %0\n\t" \
- "2:\n\t" \
- "bltu a0, %2, 3b\n\t" \
- THEAD_SYNC_S, THEAD_VENDOR_ID, \
- ERRATA_THEAD_CMO, CONFIG_ERRATA_THEAD_CMO) \
+ "bltu a0, %2, 3b\n\t", \
+ 0, RISCV_ISA_EXT_ZICBOM, CONFIG_RISCV_ISA_ZICBOM) \
: : "r"(_cachesize), \
"r"((unsigned long)(_start) & ~((_cachesize) - 1UL)), \
"r"((unsigned long)(_start) + (_size)) \
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 06d30526ef3b..5340f818746b 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -11,19 +11,13 @@
#include <uapi/asm/hwcap.h>
#define RISCV_ISA_EXT_a ('a' - 'a')
-#define RISCV_ISA_EXT_b ('b' - 'a')
#define RISCV_ISA_EXT_c ('c' - 'a')
#define RISCV_ISA_EXT_d ('d' - 'a')
#define RISCV_ISA_EXT_f ('f' - 'a')
#define RISCV_ISA_EXT_h ('h' - 'a')
#define RISCV_ISA_EXT_i ('i' - 'a')
-#define RISCV_ISA_EXT_j ('j' - 'a')
-#define RISCV_ISA_EXT_k ('k' - 'a')
#define RISCV_ISA_EXT_m ('m' - 'a')
-#define RISCV_ISA_EXT_p ('p' - 'a')
#define RISCV_ISA_EXT_q ('q' - 'a')
-#define RISCV_ISA_EXT_s ('s' - 'a')
-#define RISCV_ISA_EXT_u ('u' - 'a')
#define RISCV_ISA_EXT_v ('v' - 'a')
/*
@@ -57,8 +51,38 @@
#define RISCV_ISA_EXT_ZIHPM 42
#define RISCV_ISA_EXT_SMSTATEEN 43
#define RISCV_ISA_EXT_ZICOND 44
+#define RISCV_ISA_EXT_ZBC 45
+#define RISCV_ISA_EXT_ZBKB 46
+#define RISCV_ISA_EXT_ZBKC 47
+#define RISCV_ISA_EXT_ZBKX 48
+#define RISCV_ISA_EXT_ZKND 49
+#define RISCV_ISA_EXT_ZKNE 50
+#define RISCV_ISA_EXT_ZKNH 51
+#define RISCV_ISA_EXT_ZKR 52
+#define RISCV_ISA_EXT_ZKSED 53
+#define RISCV_ISA_EXT_ZKSH 54
+#define RISCV_ISA_EXT_ZKT 55
+#define RISCV_ISA_EXT_ZVBB 56
+#define RISCV_ISA_EXT_ZVBC 57
+#define RISCV_ISA_EXT_ZVKB 58
+#define RISCV_ISA_EXT_ZVKG 59
+#define RISCV_ISA_EXT_ZVKNED 60
+#define RISCV_ISA_EXT_ZVKNHA 61
+#define RISCV_ISA_EXT_ZVKNHB 62
+#define RISCV_ISA_EXT_ZVKSED 63
+#define RISCV_ISA_EXT_ZVKSH 64
+#define RISCV_ISA_EXT_ZVKT 65
+#define RISCV_ISA_EXT_ZFH 66
+#define RISCV_ISA_EXT_ZFHMIN 67
+#define RISCV_ISA_EXT_ZIHINTNTL 68
+#define RISCV_ISA_EXT_ZVFH 69
+#define RISCV_ISA_EXT_ZVFHMIN 70
+#define RISCV_ISA_EXT_ZFA 71
+#define RISCV_ISA_EXT_ZTSO 72
+#define RISCV_ISA_EXT_ZACAS 73
-#define RISCV_ISA_EXT_MAX 64
+#define RISCV_ISA_EXT_MAX 128
+#define RISCV_ISA_EXT_INVALID U32_MAX
#ifdef CONFIG_RISCV_M_MODE
#define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SMAIA
diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
index 5c48f48e79a6..630507dff5ea 100644
--- a/arch/riscv/include/asm/hwprobe.h
+++ b/arch/riscv/include/asm/hwprobe.h
@@ -15,4 +15,28 @@ static inline bool riscv_hwprobe_key_is_valid(__s64 key)
return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY;
}
+static inline bool hwprobe_key_is_bitmask(__s64 key)
+{
+ switch (key) {
+ case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
+ case RISCV_HWPROBE_KEY_IMA_EXT_0:
+ case RISCV_HWPROBE_KEY_CPUPERF_0:
+ return true;
+ }
+
+ return false;
+}
+
+static inline bool riscv_hwprobe_pair_cmp(struct riscv_hwprobe *pair,
+ struct riscv_hwprobe *other_pair)
+{
+ if (pair->key != other_pair->key)
+ return false;
+
+ if (hwprobe_key_is_bitmask(pair->key))
+ return (pair->value & other_pair->value) == other_pair->value;
+
+ return pair->value == other_pair->value;
+}
+
#endif
diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h
index 0bbffd528096..7388edd88986 100644
--- a/arch/riscv/include/asm/kfence.h
+++ b/arch/riscv/include/asm/kfence.h
@@ -18,9 +18,9 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
pte_t *pte = virt_to_kpte(addr);
if (protect)
- set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+ set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~_PAGE_PRESENT));
else
- set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+ set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT));
flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index 9a2c780a11e9..b42017d76924 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -202,7 +202,7 @@ static inline int pud_user(pud_t pud)
static inline void set_pud(pud_t *pudp, pud_t pud)
{
- *pudp = pud;
+ WRITE_ONCE(*pudp, pud);
}
static inline void pud_clear(pud_t *pudp)
@@ -278,7 +278,7 @@ static inline unsigned long _pmd_pfn(pmd_t pmd)
static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
{
if (pgtable_l4_enabled)
- *p4dp = p4d;
+ WRITE_ONCE(*p4dp, p4d);
else
set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) });
}
@@ -340,18 +340,12 @@ static inline struct page *p4d_page(p4d_t p4d)
#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
#define pud_offset pud_offset
-static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
-{
- if (pgtable_l4_enabled)
- return p4d_pgtable(*p4d) + pud_index(address);
-
- return (pud_t *)p4d;
-}
+pud_t *pud_offset(p4d_t *p4d, unsigned long address);
static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
{
if (pgtable_l5_enabled)
- *pgdp = pgd;
+ WRITE_ONCE(*pgdp, pgd);
else
set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) });
}
@@ -404,12 +398,6 @@ static inline struct page *pgd_page(pgd_t pgd)
#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
#define p4d_offset p4d_offset
-static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
-{
- if (pgtable_l5_enabled)
- return pgd_pgtable(*pgd) + p4d_index(address);
-
- return (p4d_t *)pgd;
-}
+p4d_t *p4d_offset(pgd_t *pgd, unsigned long address);
#endif /* _ASM_RISCV_PGTABLE_64_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 294044429e8e..bd24cf5fa2ac 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -248,7 +248,7 @@ static inline int pmd_leaf(pmd_t pmd)
static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
{
- *pmdp = pmd;
+ WRITE_ONCE(*pmdp, pmd);
}
static inline void pmd_clear(pmd_t *pmdp)
@@ -510,7 +510,7 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b)
*/
static inline void set_pte(pte_t *ptep, pte_t pteval)
{
- *ptep = pteval;
+ WRITE_ONCE(*ptep, pteval);
}
void flush_icache_pte(pte_t pte);
@@ -544,19 +544,12 @@ static inline void pte_clear(struct mm_struct *mm,
__set_pte_at(ptep, __pte(0));
}
-#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
-static inline int ptep_set_access_flags(struct vm_area_struct *vma,
- unsigned long address, pte_t *ptep,
- pte_t entry, int dirty)
-{
- if (!pte_same(*ptep, entry))
- __set_pte_at(ptep, entry);
- /*
- * update_mmu_cache will unconditionally execute, handling both
- * the case that the PTE changed and the spurious fault case.
- */
- return true;
-}
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */
+extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, pte_t entry, int dirty);
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG /* defined in mm/pgtable.c */
+extern int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep);
#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
@@ -569,16 +562,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
return pte;
}
-#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
-static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
- unsigned long address,
- pte_t *ptep)
-{
- if (!pte_young(*ptep))
- return 0;
- return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
-}
-
#define __HAVE_ARCH_PTEP_SET_WRPROTECT
static inline void ptep_set_wrprotect(struct mm_struct *mm,
unsigned long address, pte_t *ptep)
@@ -881,7 +864,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
#ifdef CONFIG_COMPAT
-#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
+#define TASK_SIZE_32 (_AC(0x80000000, UL))
#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
TASK_SIZE_32 : TASK_SIZE_64)
#else
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index b02119ff08fc..a8509cc31ab2 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -16,7 +16,7 @@
#ifdef CONFIG_64BIT
#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1))
-#define STACK_TOP_MAX TASK_SIZE_64
+#define STACK_TOP_MAX TASK_SIZE
#define arch_get_mmap_end(addr, len, flags) \
({ \
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 0892f4421bc4..6dd41ff3c66e 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -29,6 +29,7 @@ enum sbi_ext_id {
SBI_EXT_RFENCE = 0x52464E43,
SBI_EXT_HSM = 0x48534D,
SBI_EXT_SRST = 0x53525354,
+ SBI_EXT_SUSP = 0x53555350,
SBI_EXT_PMU = 0x504D55,
SBI_EXT_DBCN = 0x4442434E,
@@ -114,6 +115,14 @@ enum sbi_srst_reset_reason {
SBI_SRST_RESET_REASON_SYS_FAILURE,
};
+enum sbi_ext_susp_fid {
+ SBI_EXT_SUSP_SYSTEM_SUSPEND = 0,
+};
+
+enum sbi_ext_susp_sleep_type {
+ SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM = 0,
+};
+
enum sbi_ext_pmu_fid {
SBI_EXT_PMU_NUM_COUNTERS = 0,
SBI_EXT_PMU_COUNTER_GET_INFO,
@@ -271,8 +280,13 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
unsigned long arg3, unsigned long arg4,
unsigned long arg5);
+#ifdef CONFIG_RISCV_SBI_V01
void sbi_console_putchar(int ch);
int sbi_console_getchar(void);
+#else
+static inline void sbi_console_putchar(int ch) { }
+static inline int sbi_console_getchar(void) { return -ENOENT; }
+#endif
long sbi_get_mvendorid(void);
long sbi_get_marchid(void);
long sbi_get_mimpid(void);
@@ -329,6 +343,11 @@ static inline unsigned long sbi_mk_version(unsigned long major,
}
int sbi_err_map_linux_errno(int err);
+
+extern bool sbi_debug_console_available;
+int sbi_debug_console_write(const char *bytes, unsigned int num_bytes);
+int sbi_debug_console_read(char *bytes, unsigned int num_bytes);
+
#else /* CONFIG_RISCV_SBI */
static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; }
static inline void sbi_init(void) {}
diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h
index 32336e8a17cb..a393d5035c54 100644
--- a/arch/riscv/include/asm/sections.h
+++ b/arch/riscv/include/asm/sections.h
@@ -13,6 +13,7 @@ extern char _start_kernel[];
extern char __init_data_begin[], __init_data_end[];
extern char __init_text_begin[], __init_text_end[];
extern char __alt_start[], __alt_end[];
+extern char __exittext_begin[], __exittext_end[];
static inline bool is_va_kernel_text(uintptr_t va)
{
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
index 1047a97ddbc8..5d473343634b 100644
--- a/arch/riscv/include/asm/thread_info.h
+++ b/arch/riscv/include/asm/thread_info.h
@@ -28,7 +28,6 @@
#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER)
#define OVERFLOW_STACK_SIZE SZ_4K
-#define SHADOW_OVERFLOW_STACK_SIZE (1024)
#define IRQ_STACK_SIZE THREAD_SIZE
diff --git a/arch/riscv/include/asm/tlbbatch.h b/arch/riscv/include/asm/tlbbatch.h
new file mode 100644
index 000000000000..46014f70b9da
--- /dev/null
+++ b/arch/riscv/include/asm/tlbbatch.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+#ifndef _ASM_RISCV_TLBBATCH_H
+#define _ASM_RISCV_TLBBATCH_H
+
+#include <linux/cpumask.h>
+
+struct arch_tlbflush_unmap_batch {
+ struct cpumask cpumask;
+};
+
+#endif /* _ASM_RISCV_TLBBATCH_H */
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
index 8f3418c5f172..9c8a67b1285e 100644
--- a/arch/riscv/include/asm/tlbflush.h
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -46,6 +46,14 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end);
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end);
#endif
+
+bool arch_tlbbatch_should_defer(struct mm_struct *mm);
+void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
+ struct mm_struct *mm,
+ unsigned long uaddr);
+void arch_flush_tlb_batched_pending(struct mm_struct *mm);
+void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
+
#else /* CONFIG_SMP && CONFIG_MMU */
#define flush_tlb_all() local_flush_tlb_all()
diff --git a/arch/riscv/include/asm/word-at-a-time.h b/arch/riscv/include/asm/word-at-a-time.h
index 7c086ac6ecd4..f3f031e34191 100644
--- a/arch/riscv/include/asm/word-at-a-time.h
+++ b/arch/riscv/include/asm/word-at-a-time.h
@@ -9,6 +9,7 @@
#define _ASM_RISCV_WORD_AT_A_TIME_H
+#include <asm/asm-extable.h>
#include <linux/kernel.h>
struct word_at_a_time {
@@ -45,4 +46,30 @@ static inline unsigned long find_zero(unsigned long mask)
/* The mask we created is directly usable as a bytemask */
#define zero_bytemask(mask) (mask)
+#ifdef CONFIG_DCACHE_WORD_ACCESS
+
+/*
+ * Load an unaligned word from kernel space.
+ *
+ * In the (very unlikely) case of the word being a page-crosser
+ * and the next page not being mapped, take the exception and
+ * return zeroes in the non-existing part.
+ */
+static inline unsigned long load_unaligned_zeropad(const void *addr)
+{
+ unsigned long ret;
+
+ /* Load word from unaligned pointer addr */
+ asm(
+ "1: " REG_L " %0, %2\n"
+ "2:\n"
+ _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(1b, 2b, %0, %1)
+ : "=&r" (ret)
+ : "r" (addr), "m" (*(unsigned long *)addr));
+
+ return ret;
+}
+
+#endif /* CONFIG_DCACHE_WORD_ACCESS */
+
#endif /* _ASM_RISCV_WORD_AT_A_TIME_H */
diff --git a/arch/riscv/include/asm/xip_fixup.h b/arch/riscv/include/asm/xip_fixup.h
index d4ffc3c37649..b65bf6306f69 100644
--- a/arch/riscv/include/asm/xip_fixup.h
+++ b/arch/riscv/include/asm/xip_fixup.h
@@ -13,7 +13,7 @@
add \reg, \reg, t0
.endm
.macro XIP_FIXUP_FLASH_OFFSET reg
- la t1, __data_loc
+ la t0, __data_loc
REG_L t1, _xip_phys_offset
sub \reg, \reg, t1
add \reg, \reg, t0
diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h
index b659ffcfcdb4..9f2a8e3ff204 100644
--- a/arch/riscv/include/uapi/asm/hwprobe.h
+++ b/arch/riscv/include/uapi/asm/hwprobe.h
@@ -30,6 +30,35 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_EXT_ZBB (1 << 4)
#define RISCV_HWPROBE_EXT_ZBS (1 << 5)
#define RISCV_HWPROBE_EXT_ZICBOZ (1 << 6)
+#define RISCV_HWPROBE_EXT_ZBC (1 << 7)
+#define RISCV_HWPROBE_EXT_ZBKB (1 << 8)
+#define RISCV_HWPROBE_EXT_ZBKC (1 << 9)
+#define RISCV_HWPROBE_EXT_ZBKX (1 << 10)
+#define RISCV_HWPROBE_EXT_ZKND (1 << 11)
+#define RISCV_HWPROBE_EXT_ZKNE (1 << 12)
+#define RISCV_HWPROBE_EXT_ZKNH (1 << 13)
+#define RISCV_HWPROBE_EXT_ZKSED (1 << 14)
+#define RISCV_HWPROBE_EXT_ZKSH (1 << 15)
+#define RISCV_HWPROBE_EXT_ZKT (1 << 16)
+#define RISCV_HWPROBE_EXT_ZVBB (1 << 17)
+#define RISCV_HWPROBE_EXT_ZVBC (1 << 18)
+#define RISCV_HWPROBE_EXT_ZVKB (1 << 19)
+#define RISCV_HWPROBE_EXT_ZVKG (1 << 20)
+#define RISCV_HWPROBE_EXT_ZVKNED (1 << 21)
+#define RISCV_HWPROBE_EXT_ZVKNHA (1 << 22)
+#define RISCV_HWPROBE_EXT_ZVKNHB (1 << 23)
+#define RISCV_HWPROBE_EXT_ZVKSED (1 << 24)
+#define RISCV_HWPROBE_EXT_ZVKSH (1 << 25)
+#define RISCV_HWPROBE_EXT_ZVKT (1 << 26)
+#define RISCV_HWPROBE_EXT_ZFH (1 << 27)
+#define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28)
+#define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29)
+#define RISCV_HWPROBE_EXT_ZVFH (1 << 30)
+#define RISCV_HWPROBE_EXT_ZVFHMIN (1 << 31)
+#define RISCV_HWPROBE_EXT_ZFA (1ULL << 32)
+#define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33)
+#define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34)
+#define RISCV_HWPROBE_EXT_ZICOND (1ULL << 35)
#define RISCV_HWPROBE_KEY_CPUPERF_0 5
#define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0)
#define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0)
@@ -40,4 +69,7 @@ struct riscv_hwprobe {
#define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6
/* Increase RISCV_HWPROBE_MAX_KEY when adding items. */
+/* Flags */
+#define RISCV_HWPROBE_WHICH_CPUS (1 << 0)
+
#endif
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 8c58595696b3..5a66432eb520 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -50,6 +50,7 @@ obj-y += setup.o
obj-y += signal.o
obj-y += syscall_table.o
obj-y += sys_riscv.o
+obj-y += sys_hwprobe.o
obj-y += time.o
obj-y += traps.o
obj-y += riscv_ksyms.o
diff --git a/arch/riscv/kernel/cpu-hotplug.c b/arch/riscv/kernel/cpu-hotplug.c
index 457a18efcb11..28b58fc5ad19 100644
--- a/arch/riscv/kernel/cpu-hotplug.c
+++ b/arch/riscv/kernel/cpu-hotplug.c
@@ -18,7 +18,7 @@
bool cpu_has_hotplug(unsigned int cpu)
{
- if (cpu_ops[cpu]->cpu_stop)
+ if (cpu_ops->cpu_stop)
return true;
return false;
@@ -29,25 +29,18 @@ bool cpu_has_hotplug(unsigned int cpu)
*/
int __cpu_disable(void)
{
- int ret = 0;
unsigned int cpu = smp_processor_id();
- if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_stop)
+ if (!cpu_ops->cpu_stop)
return -EOPNOTSUPP;
- if (cpu_ops[cpu]->cpu_disable)
- ret = cpu_ops[cpu]->cpu_disable(cpu);
-
- if (ret)
- return ret;
-
remove_cpu_topology(cpu);
numa_remove_cpu(cpu);
set_cpu_online(cpu, false);
riscv_ipi_disable();
irq_migrate_all_off_this_cpu();
- return ret;
+ return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
@@ -62,8 +55,8 @@ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu)
pr_notice("CPU%u: off\n", cpu);
/* Verify from the firmware if the cpu is really stopped*/
- if (cpu_ops[cpu]->cpu_is_stopped)
- ret = cpu_ops[cpu]->cpu_is_stopped(cpu);
+ if (cpu_ops->cpu_is_stopped)
+ ret = cpu_ops->cpu_is_stopped(cpu);
if (ret)
pr_warn("CPU%d may not have stopped: %d\n", cpu, ret);
}
@@ -77,7 +70,7 @@ void __noreturn arch_cpu_idle_dead(void)
cpuhp_ap_report_dead();
- cpu_ops[smp_processor_id()]->cpu_stop();
+ cpu_ops->cpu_stop();
/* It should never reach here */
BUG();
}
diff --git a/arch/riscv/kernel/cpu_ops.c b/arch/riscv/kernel/cpu_ops.c
index eb479a88a954..6a8bd8f4db07 100644
--- a/arch/riscv/kernel/cpu_ops.c
+++ b/arch/riscv/kernel/cpu_ops.c
@@ -13,25 +13,21 @@
#include <asm/sbi.h>
#include <asm/smp.h>
-const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
+const struct cpu_operations *cpu_ops __ro_after_init = &cpu_ops_spinwait;
extern const struct cpu_operations cpu_ops_sbi;
#ifndef CONFIG_RISCV_BOOT_SPINWAIT
const struct cpu_operations cpu_ops_spinwait = {
- .name = "",
- .cpu_prepare = NULL,
.cpu_start = NULL,
};
#endif
-void __init cpu_set_ops(int cpuid)
+void __init cpu_set_ops(void)
{
#if IS_ENABLED(CONFIG_RISCV_SBI)
if (sbi_probe_extension(SBI_EXT_HSM)) {
- if (!cpuid)
- pr_info("SBI HSM extension detected\n");
- cpu_ops[cpuid] = &cpu_ops_sbi;
- } else
+ pr_info("SBI HSM extension detected\n");
+ cpu_ops = &cpu_ops_sbi;
+ }
#endif
- cpu_ops[cpuid] = &cpu_ops_spinwait;
}
diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
index efa0f0816634..1cc7df740edd 100644
--- a/arch/riscv/kernel/cpu_ops_sbi.c
+++ b/arch/riscv/kernel/cpu_ops_sbi.c
@@ -79,23 +79,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
return sbi_hsm_hart_start(hartid, boot_addr, hsm_data);
}
-static int sbi_cpu_prepare(unsigned int cpuid)
-{
- if (!cpu_ops_sbi.cpu_start) {
- pr_err("cpu start method not defined for CPU [%d]\n", cpuid);
- return -ENODEV;
- }
- return 0;
-}
-
#ifdef CONFIG_HOTPLUG_CPU
-static int sbi_cpu_disable(unsigned int cpuid)
-{
- if (!cpu_ops_sbi.cpu_stop)
- return -EOPNOTSUPP;
- return 0;
-}
-
static void sbi_cpu_stop(void)
{
int ret;
@@ -118,11 +102,8 @@ static int sbi_cpu_is_stopped(unsigned int cpuid)
#endif
const struct cpu_operations cpu_ops_sbi = {
- .name = "sbi",
- .cpu_prepare = sbi_cpu_prepare,
.cpu_start = sbi_cpu_start,
#ifdef CONFIG_HOTPLUG_CPU
- .cpu_disable = sbi_cpu_disable,
.cpu_stop = sbi_cpu_stop,
.cpu_is_stopped = sbi_cpu_is_stopped,
#endif
diff --git a/arch/riscv/kernel/cpu_ops_spinwait.c b/arch/riscv/kernel/cpu_ops_spinwait.c
index d98d19226b5f..613872b0a21a 100644
--- a/arch/riscv/kernel/cpu_ops_spinwait.c
+++ b/arch/riscv/kernel/cpu_ops_spinwait.c
@@ -39,15 +39,6 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid,
WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle);
}
-static int spinwait_cpu_prepare(unsigned int cpuid)
-{
- if (!cpu_ops_spinwait.cpu_start) {
- pr_err("cpu start method not defined for CPU [%d]\n", cpuid);
- return -ENODEV;
- }
- return 0;
-}
-
static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle)
{
/*
@@ -64,7 +55,5 @@ static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle)
}
const struct cpu_operations cpu_ops_spinwait = {
- .name = "spinwait",
- .cpu_prepare = spinwait_cpu_prepare,
.cpu_start = spinwait_cpu_start,
};
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index b3785ffc1570..e32591e9da90 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -70,7 +70,7 @@ EXPORT_SYMBOL_GPL(riscv_isa_extension_base);
*
* NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
*/
-bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit)
+bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit)
{
const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa;
@@ -102,17 +102,101 @@ static bool riscv_isa_extension_check(int id)
return false;
}
return true;
+ case RISCV_ISA_EXT_INVALID:
+ return false;
}
return true;
}
-#define __RISCV_ISA_EXT_DATA(_name, _id) { \
- .name = #_name, \
- .property = #_name, \
- .id = _id, \
+#define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size) { \
+ .name = #_name, \
+ .property = #_name, \
+ .id = _id, \
+ .subset_ext_ids = _subset_exts, \
+ .subset_ext_size = _subset_exts_size \
}
+#define __RISCV_ISA_EXT_DATA(_name, _id) _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0)
+
+/* Used to declare pure "lasso" extension (Zk for instance) */
+#define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \
+ _RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, ARRAY_SIZE(_bundled_exts))
+
+/* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */
+#define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \
+ _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts))
+
+static const unsigned int riscv_zk_bundled_exts[] = {
+ RISCV_ISA_EXT_ZBKB,
+ RISCV_ISA_EXT_ZBKC,
+ RISCV_ISA_EXT_ZBKX,
+ RISCV_ISA_EXT_ZKND,
+ RISCV_ISA_EXT_ZKNE,
+ RISCV_ISA_EXT_ZKR,
+ RISCV_ISA_EXT_ZKT,
+};
+
+static const unsigned int riscv_zkn_bundled_exts[] = {
+ RISCV_ISA_EXT_ZBKB,
+ RISCV_ISA_EXT_ZBKC,
+ RISCV_ISA_EXT_ZBKX,
+ RISCV_ISA_EXT_ZKND,
+ RISCV_ISA_EXT_ZKNE,
+ RISCV_ISA_EXT_ZKNH,
+};
+
+static const unsigned int riscv_zks_bundled_exts[] = {
+ RISCV_ISA_EXT_ZBKB,
+ RISCV_ISA_EXT_ZBKC,
+ RISCV_ISA_EXT_ZKSED,
+ RISCV_ISA_EXT_ZKSH
+};
+
+#define RISCV_ISA_EXT_ZVKN \
+ RISCV_ISA_EXT_ZVKNED, \
+ RISCV_ISA_EXT_ZVKNHB, \
+ RISCV_ISA_EXT_ZVKB, \
+ RISCV_ISA_EXT_ZVKT
+
+static const unsigned int riscv_zvkn_bundled_exts[] = {
+ RISCV_ISA_EXT_ZVKN
+};
+
+static const unsigned int riscv_zvknc_bundled_exts[] = {
+ RISCV_ISA_EXT_ZVKN,
+ RISCV_ISA_EXT_ZVBC
+};
+
+static const unsigned int riscv_zvkng_bundled_exts[] = {
+ RISCV_ISA_EXT_ZVKN,
+ RISCV_ISA_EXT_ZVKG
+};
+
+#define RISCV_ISA_EXT_ZVKS \
+ RISCV_ISA_EXT_ZVKSED, \
+ RISCV_ISA_EXT_ZVKSH, \
+ RISCV_ISA_EXT_ZVKB, \
+ RISCV_ISA_EXT_ZVKT
+
+static const unsigned int riscv_zvks_bundled_exts[] = {
+ RISCV_ISA_EXT_ZVKS
+};
+
+static const unsigned int riscv_zvksc_bundled_exts[] = {
+ RISCV_ISA_EXT_ZVKS,
+ RISCV_ISA_EXT_ZVBC
+};
+
+static const unsigned int riscv_zvksg_bundled_exts[] = {
+ RISCV_ISA_EXT_ZVKS,
+ RISCV_ISA_EXT_ZVKG
+};
+
+static const unsigned int riscv_zvbb_exts[] = {
+ RISCV_ISA_EXT_ZVKB
+};
+
/*
* The canonical order of ISA extension names in the ISA string is defined in
* chapter 27 of the unprivileged specification.
@@ -160,10 +244,6 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d),
__RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q),
__RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c),
- __RISCV_ISA_EXT_DATA(b, RISCV_ISA_EXT_b),
- __RISCV_ISA_EXT_DATA(k, RISCV_ISA_EXT_k),
- __RISCV_ISA_EXT_DATA(j, RISCV_ISA_EXT_j),
- __RISCV_ISA_EXT_DATA(p, RISCV_ISA_EXT_p),
__RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v),
__RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
__RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
@@ -172,11 +252,49 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND),
__RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
__RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI),
+ __RISCV_ISA_EXT_DATA(zihintntl, RISCV_ISA_EXT_ZIHINTNTL),
__RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
__RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM),
+ __RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS),
+ __RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA),
+ __RISCV_ISA_EXT_DATA(zfh, RISCV_ISA_EXT_ZFH),
+ __RISCV_ISA_EXT_DATA(zfhmin, RISCV_ISA_EXT_ZFHMIN),
__RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA),
__RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB),
+ __RISCV_ISA_EXT_DATA(zbc, RISCV_ISA_EXT_ZBC),
+ __RISCV_ISA_EXT_DATA(zbkb, RISCV_ISA_EXT_ZBKB),
+ __RISCV_ISA_EXT_DATA(zbkc, RISCV_ISA_EXT_ZBKC),
+ __RISCV_ISA_EXT_DATA(zbkx, RISCV_ISA_EXT_ZBKX),
__RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS),
+ __RISCV_ISA_EXT_BUNDLE(zk, riscv_zk_bundled_exts),
+ __RISCV_ISA_EXT_BUNDLE(zkn, riscv_zkn_bundled_exts),
+ __RISCV_ISA_EXT_DATA(zknd, RISCV_ISA_EXT_ZKND),
+ __RISCV_ISA_EXT_DATA(zkne, RISCV_ISA_EXT_ZKNE),
+ __RISCV_ISA_EXT_DATA(zknh, RISCV_ISA_EXT_ZKNH),
+ __RISCV_ISA_EXT_DATA(zkr, RISCV_ISA_EXT_ZKR),
+ __RISCV_ISA_EXT_BUNDLE(zks, riscv_zks_bundled_exts),
+ __RISCV_ISA_EXT_DATA(zkt, RISCV_ISA_EXT_ZKT),
+ __RISCV_ISA_EXT_DATA(zksed, RISCV_ISA_EXT_ZKSED),
+ __RISCV_ISA_EXT_DATA(zksh, RISCV_ISA_EXT_ZKSH),
+ __RISCV_ISA_EXT_DATA(ztso, RISCV_ISA_EXT_ZTSO),
+ __RISCV_ISA_EXT_SUPERSET(zvbb, RISCV_ISA_EXT_ZVBB, riscv_zvbb_exts),
+ __RISCV_ISA_EXT_DATA(zvbc, RISCV_ISA_EXT_ZVBC),
+ __RISCV_ISA_EXT_DATA(zvfh, RISCV_ISA_EXT_ZVFH),
+ __RISCV_ISA_EXT_DATA(zvfhmin, RISCV_ISA_EXT_ZVFHMIN),
+ __RISCV_ISA_EXT_DATA(zvkb, RISCV_ISA_EXT_ZVKB),
+ __RISCV_ISA_EXT_DATA(zvkg, RISCV_ISA_EXT_ZVKG),
+ __RISCV_ISA_EXT_BUNDLE(zvkn, riscv_zvkn_bundled_exts),
+ __RISCV_ISA_EXT_BUNDLE(zvknc, riscv_zvknc_bundled_exts),
+ __RISCV_ISA_EXT_DATA(zvkned, RISCV_ISA_EXT_ZVKNED),
+ __RISCV_ISA_EXT_BUNDLE(zvkng, riscv_zvkng_bundled_exts),
+ __RISCV_ISA_EXT_DATA(zvknha, RISCV_ISA_EXT_ZVKNHA),
+ __RISCV_ISA_EXT_DATA(zvknhb, RISCV_ISA_EXT_ZVKNHB),
+ __RISCV_ISA_EXT_BUNDLE(zvks, riscv_zvks_bundled_exts),
+ __RISCV_ISA_EXT_BUNDLE(zvksc, riscv_zvksc_bundled_exts),
+ __RISCV_ISA_EXT_DATA(zvksed, RISCV_ISA_EXT_ZVKSED),
+ __RISCV_ISA_EXT_DATA(zvksh, RISCV_ISA_EXT_ZVKSH),
+ __RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts),
+ __RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT),
__RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA),
__RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN),
__RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA),
@@ -189,6 +307,31 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
+static void __init match_isa_ext(const struct riscv_isa_ext_data *ext, const char *name,
+ const char *name_end, struct riscv_isainfo *isainfo)
+{
+ if ((name_end - name == strlen(ext->name)) &&
+ !strncasecmp(name, ext->name, name_end - name)) {
+ /*
+ * If this is a bundle, enable all the ISA extensions that
+ * comprise the bundle.
+ */
+ if (ext->subset_ext_size) {
+ for (int i = 0; i < ext->subset_ext_size; i++) {
+ if (riscv_isa_extension_check(ext->subset_ext_ids[i]))
+ set_bit(ext->subset_ext_ids[i], isainfo->isa);
+ }
+ }
+
+ /*
+ * This is valid even for bundle extensions which uses the RISCV_ISA_EXT_INVALID id
+ * (rejected by riscv_isa_extension_check()).
+ */
+ if (riscv_isa_extension_check(ext->id))
+ set_bit(ext->id, isainfo->isa);
+ }
+}
+
static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct riscv_isainfo *isainfo,
unsigned long *isa2hwcap, const char *isa)
{
@@ -321,14 +464,6 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc
if (*isa == '_')
++isa;
-#define SET_ISA_EXT_MAP(name, bit) \
- do { \
- if ((ext_end - ext == strlen(name)) && \
- !strncasecmp(ext, name, strlen(name)) && \
- riscv_isa_extension_check(bit)) \
- set_bit(bit, isainfo->isa); \
- } while (false) \
-
if (unlikely(ext_err))
continue;
if (!ext_long) {
@@ -340,10 +475,8 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc
}
} else {
for (int i = 0; i < riscv_isa_ext_count; i++)
- SET_ISA_EXT_MAP(riscv_isa_ext[i].name,
- riscv_isa_ext[i].id);
+ match_isa_ext(&riscv_isa_ext[i], ext, ext_end, isainfo);
}
-#undef SET_ISA_EXT_MAP
}
}
@@ -442,18 +575,26 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
}
for (int i = 0; i < riscv_isa_ext_count; i++) {
+ const struct riscv_isa_ext_data *ext = &riscv_isa_ext[i];
+
if (of_property_match_string(cpu_node, "riscv,isa-extensions",
- riscv_isa_ext[i].property) < 0)
+ ext->property) < 0)
continue;
- if (!riscv_isa_extension_check(riscv_isa_ext[i].id))
- continue;
+ if (ext->subset_ext_size) {
+ for (int j = 0; j < ext->subset_ext_size; j++) {
+ if (riscv_isa_extension_check(ext->subset_ext_ids[i]))
+ set_bit(ext->subset_ext_ids[j], isainfo->isa);
+ }
+ }
- /* Only single letter extensions get set in hwcap */
- if (strnlen(riscv_isa_ext[i].name, 2) == 1)
- this_hwcap |= isa2hwcap[riscv_isa_ext[i].id];
+ if (riscv_isa_extension_check(ext->id)) {
+ set_bit(ext->id, isainfo->isa);
- set_bit(riscv_isa_ext[i].id, isainfo->isa);
+ /* Only single letter extensions get set in hwcap */
+ if (strnlen(riscv_isa_ext[i].name, 2) == 1)
+ this_hwcap |= isa2hwcap[riscv_isa_ext[i].id];
+ }
}
of_node_put(cpu_node);
diff --git a/arch/riscv/kernel/efi.c b/arch/riscv/kernel/efi.c
index aa6209a74c83..b64bf1624a05 100644
--- a/arch/riscv/kernel/efi.c
+++ b/arch/riscv/kernel/efi.c
@@ -60,7 +60,7 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
{
efi_memory_desc_t *md = data;
- pte_t pte = READ_ONCE(*ptep);
+ pte_t pte = ptep_get(ptep);
unsigned long val;
if (md->attribute & EFI_MEMORY_RO) {
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index b77397432403..4236a69c35cb 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -11,7 +11,6 @@
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/csr.h>
-#include <asm/cpu_ops_sbi.h>
#include <asm/hwcap.h>
#include <asm/image.h>
#include <asm/scs.h>
@@ -89,6 +88,7 @@ relocate_enable_mmu:
/* Compute satp for kernel page tables, but don't load it yet */
srl a2, a0, PAGE_SHIFT
la a1, satp_mode
+ XIP_FIXUP_OFFSET a1
REG_L a1, 0(a1)
or a2, a2, a1
@@ -154,7 +154,6 @@ secondary_start_sbi:
XIP_FIXUP_OFFSET a3
add a3, a3, a1
REG_L sp, (a3)
- scs_load_current
.Lsecondary_start_common:
@@ -165,6 +164,7 @@ secondary_start_sbi:
call relocate_enable_mmu
#endif
call .Lsetup_trap_vector
+ scs_load_current
tail smp_callin
#endif /* CONFIG_SMP */
@@ -265,10 +265,12 @@ SYM_CODE_START(_start_kernel)
la sp, _end + THREAD_SIZE
XIP_FIXUP_OFFSET sp
mv s0, a0
+ mv s1, a1
call __copy_data
- /* Restore a0 copy */
+ /* Restore a0 & a1 copy */
mv a0, s0
+ mv a1, s1
#endif
#ifndef CONFIG_XIP_KERNEL
diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S
index 58dd96a2a153..79dc81223238 100644
--- a/arch/riscv/kernel/mcount-dyn.S
+++ b/arch/riscv/kernel/mcount-dyn.S
@@ -3,12 +3,12 @@
#include <linux/init.h>
#include <linux/linkage.h>
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/csr.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
-#include <asm-generic/export.h>
#include <asm/ftrace.h>
.text
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
index b4dd9ed6849e..d7ec69ac6910 100644
--- a/arch/riscv/kernel/mcount.S
+++ b/arch/riscv/kernel/mcount.S
@@ -4,12 +4,12 @@
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/cfi_types.h>
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/csr.h>
#include <asm/unistd.h>
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
-#include <asm-generic/export.h>
#include <asm/ftrace.h>
.text
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index 56a8c78e9e21..c9d59a5448b6 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -40,15 +40,6 @@ struct relocation_handlers {
long buffer);
};
-unsigned int initialize_relocation_hashtable(unsigned int num_relocations);
-void process_accumulated_relocations(struct module *me);
-int add_relocation_to_accumulate(struct module *me, int type, void *location,
- unsigned int hashtable_bits, Elf_Addr v);
-
-struct hlist_head *relocation_hashtable;
-
-struct list_head used_buckets_list;
-
/*
* The auipc+jalr instruction pair can reach any PC-relative offset
* in the range [-2^31 - 2^11, 2^31 - 2^11)
@@ -64,7 +55,7 @@ static bool riscv_insn_valid_32bit_offset(ptrdiff_t val)
static int riscv_insn_rmw(void *location, u32 keep, u32 set)
{
- u16 *parcel = location;
+ __le16 *parcel = location;
u32 insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16;
insn &= keep;
@@ -77,7 +68,7 @@ static int riscv_insn_rmw(void *location, u32 keep, u32 set)
static int riscv_insn_rvc_rmw(void *location, u16 keep, u16 set)
{
- u16 *parcel = location;
+ __le16 *parcel = location;
u16 insn = le16_to_cpu(*parcel);
insn &= keep;
@@ -604,7 +595,10 @@ static const struct relocation_handlers reloc_handlers[] = {
/* 192-255 nonstandard ABI extensions */
};
-void process_accumulated_relocations(struct module *me)
+static void
+process_accumulated_relocations(struct module *me,
+ struct hlist_head **relocation_hashtable,
+ struct list_head *used_buckets_list)
{
/*
* Only ADD/SUB/SET/ULEB128 should end up here.
@@ -624,18 +618,25 @@ void process_accumulated_relocations(struct module *me)
* - Each relocation entry for a location address
*/
struct used_bucket *bucket_iter;
+ struct used_bucket *bucket_iter_tmp;
struct relocation_head *rel_head_iter;
+ struct hlist_node *rel_head_iter_tmp;
struct relocation_entry *rel_entry_iter;
+ struct relocation_entry *rel_entry_iter_tmp;
int curr_type;
void *location;
long buffer;
- list_for_each_entry(bucket_iter, &used_buckets_list, head) {
- hlist_for_each_entry(rel_head_iter, bucket_iter->bucket, node) {
+ list_for_each_entry_safe(bucket_iter, bucket_iter_tmp,
+ used_buckets_list, head) {
+ hlist_for_each_entry_safe(rel_head_iter, rel_head_iter_tmp,
+ bucket_iter->bucket, node) {
buffer = 0;
location = rel_head_iter->location;
- list_for_each_entry(rel_entry_iter,
- rel_head_iter->rel_entry, head) {
+ list_for_each_entry_safe(rel_entry_iter,
+ rel_entry_iter_tmp,
+ rel_head_iter->rel_entry,
+ head) {
curr_type = rel_entry_iter->type;
reloc_handlers[curr_type].reloc_handler(
me, &buffer, rel_entry_iter->value);
@@ -648,11 +649,14 @@ void process_accumulated_relocations(struct module *me)
kfree(bucket_iter);
}
- kfree(relocation_hashtable);
+ kfree(*relocation_hashtable);
}
-int add_relocation_to_accumulate(struct module *me, int type, void *location,
- unsigned int hashtable_bits, Elf_Addr v)
+static int add_relocation_to_accumulate(struct module *me, int type,
+ void *location,
+ unsigned int hashtable_bits, Elf_Addr v,
+ struct hlist_head *relocation_hashtable,
+ struct list_head *used_buckets_list)
{
struct relocation_entry *entry;
struct relocation_head *rel_head;
@@ -661,6 +665,10 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
unsigned long hash;
entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+
+ if (!entry)
+ return -ENOMEM;
+
INIT_LIST_HEAD(&entry->head);
entry->type = type;
entry->value = v;
@@ -669,7 +677,10 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
current_head = &relocation_hashtable[hash];
- /* Find matching location (if any) */
+ /*
+ * Search for the relocation_head for the relocations that happen at the
+ * provided location
+ */
bool found = false;
struct relocation_head *rel_head_iter;
@@ -681,19 +692,45 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
}
}
+ /*
+ * If there has not yet been any relocations at the provided location,
+ * create a relocation_head for that location and populate it with this
+ * relocation_entry.
+ */
if (!found) {
rel_head = kmalloc(sizeof(*rel_head), GFP_KERNEL);
+
+ if (!rel_head) {
+ kfree(entry);
+ return -ENOMEM;
+ }
+
rel_head->rel_entry =
kmalloc(sizeof(struct list_head), GFP_KERNEL);
+
+ if (!rel_head->rel_entry) {
+ kfree(entry);
+ kfree(rel_head);
+ return -ENOMEM;
+ }
+
INIT_LIST_HEAD(rel_head->rel_entry);
rel_head->location = location;
INIT_HLIST_NODE(&rel_head->node);
if (!current_head->first) {
bucket =
kmalloc(sizeof(struct used_bucket), GFP_KERNEL);
+
+ if (!bucket) {
+ kfree(entry);
+ kfree(rel_head->rel_entry);
+ kfree(rel_head);
+ return -ENOMEM;
+ }
+
INIT_LIST_HEAD(&bucket->head);
bucket->bucket = current_head;
- list_add(&bucket->head, &used_buckets_list);
+ list_add(&bucket->head, used_buckets_list);
}
hlist_add_head(&rel_head->node, current_head);
}
@@ -704,10 +741,16 @@ int add_relocation_to_accumulate(struct module *me, int type, void *location,
return 0;
}
-unsigned int initialize_relocation_hashtable(unsigned int num_relocations)
+static unsigned int
+initialize_relocation_hashtable(unsigned int num_relocations,
+ struct hlist_head **relocation_hashtable)
{
/* Can safely assume that bits is not greater than sizeof(long) */
unsigned long hashtable_size = roundup_pow_of_two(num_relocations);
+ /*
+ * When hashtable_size == 1, hashtable_bits == 0.
+ * This is valid because the hashing algorithm returns 0 in this case.
+ */
unsigned int hashtable_bits = ilog2(hashtable_size);
/*
@@ -720,12 +763,13 @@ unsigned int initialize_relocation_hashtable(unsigned int num_relocations)
hashtable_size <<= should_double_size;
- relocation_hashtable = kmalloc_array(hashtable_size,
- sizeof(*relocation_hashtable),
- GFP_KERNEL);
- __hash_init(relocation_hashtable, hashtable_size);
+ *relocation_hashtable = kmalloc_array(hashtable_size,
+ sizeof(**relocation_hashtable),
+ GFP_KERNEL);
+ if (!*relocation_hashtable)
+ return 0;
- INIT_LIST_HEAD(&used_buckets_list);
+ __hash_init(*relocation_hashtable, hashtable_size);
return hashtable_bits;
}
@@ -742,7 +786,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
Elf_Addr v;
int res;
unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
- unsigned int hashtable_bits = initialize_relocation_hashtable(num_relocations);
+ struct hlist_head *relocation_hashtable;
+ struct list_head used_buckets_list;
+ unsigned int hashtable_bits;
+
+ hashtable_bits = initialize_relocation_hashtable(num_relocations,
+ &relocation_hashtable);
+
+ if (!relocation_hashtable)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&used_buckets_list);
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
@@ -823,14 +877,18 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
}
if (reloc_handlers[type].accumulate_handler)
- res = add_relocation_to_accumulate(me, type, location, hashtable_bits, v);
+ res = add_relocation_to_accumulate(me, type, location,
+ hashtable_bits, v,
+ relocation_hashtable,
+ &used_buckets_list);
else
res = handler(me, location, v);
if (res)
return res;
}
- process_accumulated_relocations(me);
+ process_accumulated_relocations(me, &relocation_hashtable,
+ &used_buckets_list);
return 0;
}
@@ -840,7 +898,8 @@ void *module_alloc(unsigned long size)
{
return __vmalloc_node_range(size, 1, MODULES_VADDR,
MODULES_END, GFP_KERNEL,
- PAGE_KERNEL, 0, NUMA_NO_NODE,
+ PAGE_KERNEL, VM_FLUSH_RESET_PERMS,
+ NUMA_NO_NODE,
__builtin_return_address(0));
}
#endif
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
index 13ee7bf589a1..37e87fdcf6a0 100644
--- a/arch/riscv/kernel/patch.c
+++ b/arch/riscv/kernel/patch.c
@@ -14,6 +14,7 @@
#include <asm/fixmap.h>
#include <asm/ftrace.h>
#include <asm/patch.h>
+#include <asm/sections.h>
struct patch_insn {
void *addr;
@@ -25,6 +26,14 @@ struct patch_insn {
int riscv_patch_in_stop_machine = false;
#ifdef CONFIG_MMU
+
+static inline bool is_kernel_exittext(uintptr_t addr)
+{
+ return system_state < SYSTEM_RUNNING &&
+ addr >= (uintptr_t)__exittext_begin &&
+ addr < (uintptr_t)__exittext_end;
+}
+
/*
* The fix_to_virt(, idx) needs a const value (not a dynamic variable of
* reg-a0) or BUILD_BUG_ON failed with "idx >= __end_of_fixed_addresses".
@@ -35,7 +44,7 @@ static __always_inline void *patch_map(void *addr, const unsigned int fixmap)
uintptr_t uintaddr = (uintptr_t) addr;
struct page *page;
- if (core_kernel_text(uintaddr))
+ if (core_kernel_text(uintaddr) || is_kernel_exittext(uintaddr))
page = phys_to_page(__pa_symbol(addr));
else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
page = vmalloc_to_page(addr);
diff --git a/arch/riscv/kernel/pi/cmdline_early.c b/arch/riscv/kernel/pi/cmdline_early.c
index 68e786c84c94..f6d4dedffb84 100644
--- a/arch/riscv/kernel/pi/cmdline_early.c
+++ b/arch/riscv/kernel/pi/cmdline_early.c
@@ -38,8 +38,7 @@ static char *get_early_cmdline(uintptr_t dtb_pa)
if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) ||
IS_ENABLED(CONFIG_CMDLINE_FORCE) ||
fdt_cmdline_size == 0 /* CONFIG_CMDLINE_FALLBACK */) {
- strncat(early_cmdline, CONFIG_CMDLINE,
- COMMAND_LINE_SIZE - fdt_cmdline_size);
+ strlcat(early_cmdline, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
}
return early_cmdline;
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index 5a62ed1da453..e66e0999a800 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -7,6 +7,7 @@
#include <linux/bits.h>
#include <linux/init.h>
+#include <linux/mm.h>
#include <linux/pm.h>
#include <linux/reboot.h>
#include <asm/sbi.h>
@@ -571,6 +572,66 @@ long sbi_get_mimpid(void)
}
EXPORT_SYMBOL_GPL(sbi_get_mimpid);
+bool sbi_debug_console_available;
+
+int sbi_debug_console_write(const char *bytes, unsigned int num_bytes)
+{
+ phys_addr_t base_addr;
+ struct sbiret ret;
+
+ if (!sbi_debug_console_available)
+ return -EOPNOTSUPP;
+
+ if (is_vmalloc_addr(bytes))
+ base_addr = page_to_phys(vmalloc_to_page(bytes)) +
+ offset_in_page(bytes);
+ else
+ base_addr = __pa(bytes);
+ if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes))
+ num_bytes = PAGE_SIZE - offset_in_page(bytes);
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
+ num_bytes, lower_32_bits(base_addr),
+ upper_32_bits(base_addr), 0, 0, 0);
+ else
+ ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
+ num_bytes, base_addr, 0, 0, 0, 0);
+
+ if (ret.error == SBI_ERR_FAILURE)
+ return -EIO;
+ return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value;
+}
+
+int sbi_debug_console_read(char *bytes, unsigned int num_bytes)
+{
+ phys_addr_t base_addr;
+ struct sbiret ret;
+
+ if (!sbi_debug_console_available)
+ return -EOPNOTSUPP;
+
+ if (is_vmalloc_addr(bytes))
+ base_addr = page_to_phys(vmalloc_to_page(bytes)) +
+ offset_in_page(bytes);
+ else
+ base_addr = __pa(bytes);
+ if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes))
+ num_bytes = PAGE_SIZE - offset_in_page(bytes);
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ,
+ num_bytes, lower_32_bits(base_addr),
+ upper_32_bits(base_addr), 0, 0, 0);
+ else
+ ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ,
+ num_bytes, base_addr, 0, 0, 0, 0);
+
+ if (ret.error == SBI_ERR_FAILURE)
+ return -EIO;
+ return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value;
+}
+
void __init sbi_init(void)
{
int ret;
@@ -612,6 +673,11 @@ void __init sbi_init(void)
sbi_srst_reboot_nb.priority = 192;
register_restart_handler(&sbi_srst_reboot_nb);
}
+ if ((sbi_spec_version >= sbi_mk_version(2, 0)) &&
+ (sbi_probe_extension(SBI_EXT_DBCN) > 0)) {
+ pr_info("SBI DBCN extension detected\n");
+ sbi_debug_console_available = true;
+ }
} else {
__sbi_set_timer = __sbi_set_timer_v01;
__sbi_send_ipi = __sbi_send_ipi_v01;
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index 535a837de55d..2bf882804624 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -26,7 +26,6 @@
#include <asm/alternative.h>
#include <asm/cacheflush.h>
#include <asm/cpufeature.h>
-#include <asm/cpu_ops.h>
#include <asm/early_ioremap.h>
#include <asm/pgtable.h>
#include <asm/setup.h>
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 5d69f4db9e8f..501e66debf69 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -94,7 +94,7 @@ static long save_v_state(struct pt_regs *regs, void __user **sc_vec)
err = __copy_to_user(&state->v_state, &current->thread.vstate,
offsetof(struct __riscv_v_ext_state, datap));
/* Copy the pointer datap itself. */
- err |= __put_user(datap, &state->v_state.datap);
+ err |= __put_user((__force void *)datap, &state->v_state.datap);
/* Copy the whole vector content to user space datap. */
err |= __copy_to_user(datap, current->thread.vstate.datap, riscv_v_vsize);
/* Copy magic to the user space after saving all vector conetext */
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
index 40420afbb1a0..45dd4035416e 100644
--- a/arch/riscv/kernel/smp.c
+++ b/arch/riscv/kernel/smp.c
@@ -81,7 +81,7 @@ static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs)
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_has_hotplug(cpu))
- cpu_ops[cpu]->cpu_stop();
+ cpu_ops->cpu_stop();
#endif
for(;;)
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index d162bf339beb..519b6bd946e5 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -49,7 +49,6 @@ void __init smp_prepare_boot_cpu(void)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
int cpuid;
- int ret;
unsigned int curr_cpuid;
init_cpu_topology();
@@ -66,11 +65,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
for_each_possible_cpu(cpuid) {
if (cpuid == curr_cpuid)
continue;
- if (cpu_ops[cpuid]->cpu_prepare) {
- ret = cpu_ops[cpuid]->cpu_prepare(cpuid);
- if (ret)
- continue;
- }
set_cpu_present(cpuid, true);
numa_store_cpu_info(cpuid);
}
@@ -125,18 +119,7 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un
static void __init acpi_parse_and_init_cpus(void)
{
- int cpuid;
-
- cpu_set_ops(0);
-
acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, acpi_parse_rintc, 0);
-
- for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
- if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
- cpu_set_ops(cpuid);
- set_cpu_possible(cpuid, true);
- }
- }
}
#else
#define acpi_parse_and_init_cpus(...) do { } while (0)
@@ -150,8 +133,6 @@ static void __init of_parse_and_init_cpus(void)
int cpuid = 1;
int rc;
- cpu_set_ops(0);
-
for_each_of_cpu_node(dn) {
rc = riscv_early_of_processor_hartid(dn, &hart);
if (rc < 0)
@@ -179,27 +160,28 @@ static void __init of_parse_and_init_cpus(void)
if (cpuid > nr_cpu_ids)
pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
cpuid, nr_cpu_ids);
-
- for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
- if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
- cpu_set_ops(cpuid);
- set_cpu_possible(cpuid, true);
- }
- }
}
void __init setup_smp(void)
{
+ int cpuid;
+
+ cpu_set_ops();
+
if (acpi_disabled)
of_parse_and_init_cpus();
else
acpi_parse_and_init_cpus();
+
+ for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++)
+ if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID)
+ set_cpu_possible(cpuid, true);
}
static int start_secondary_cpu(int cpu, struct task_struct *tidle)
{
- if (cpu_ops[cpu]->cpu_start)
- return cpu_ops[cpu]->cpu_start(cpu, tidle);
+ if (cpu_ops->cpu_start)
+ return cpu_ops->cpu_start(cpu, tidle);
return -EOPNOTSUPP;
}
diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c
index 3c89b8ec69c4..239509367e42 100644
--- a/arch/riscv/kernel/suspend.c
+++ b/arch/riscv/kernel/suspend.c
@@ -4,8 +4,12 @@
* Copyright (c) 2022 Ventana Micro Systems Inc.
*/
+#define pr_fmt(fmt) "suspend: " fmt
+
#include <linux/ftrace.h>
+#include <linux/suspend.h>
#include <asm/csr.h>
+#include <asm/sbi.h>
#include <asm/suspend.h>
void suspend_save_csrs(struct suspend_context *context)
@@ -85,3 +89,43 @@ int cpu_suspend(unsigned long arg,
return rc;
}
+
+#ifdef CONFIG_RISCV_SBI
+static int sbi_system_suspend(unsigned long sleep_type,
+ unsigned long resume_addr,
+ unsigned long opaque)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_SUSP, SBI_EXT_SUSP_SYSTEM_SUSPEND,
+ sleep_type, resume_addr, opaque, 0, 0, 0);
+ if (ret.error)
+ return sbi_err_map_linux_errno(ret.error);
+
+ return ret.value;
+}
+
+static int sbi_system_suspend_enter(suspend_state_t state)
+{
+ return cpu_suspend(SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM, sbi_system_suspend);
+}
+
+static const struct platform_suspend_ops sbi_system_suspend_ops = {
+ .valid = suspend_valid_only_mem,
+ .enter = sbi_system_suspend_enter,
+};
+
+static int __init sbi_system_suspend_init(void)
+{
+ if (sbi_spec_version >= sbi_mk_version(2, 0) &&
+ sbi_probe_extension(SBI_EXT_SUSP) > 0) {
+ pr_info("SBI SUSP extension detected\n");
+ if (IS_ENABLED(CONFIG_SUSPEND))
+ suspend_set_ops(&sbi_system_suspend_ops);
+ }
+
+ return 0;
+}
+
+arch_initcall(sbi_system_suspend_init);
+#endif /* CONFIG_RISCV_SBI */
diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c
new file mode 100644
index 000000000000..a7c56b41efd2
--- /dev/null
+++ b/arch/riscv/kernel/sys_hwprobe.c
@@ -0,0 +1,411 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * The hwprobe interface, for allowing userspace to probe to see which features
+ * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for
+ * more details.
+ */
+#include <linux/syscalls.h>
+#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
+#include <asm/hwprobe.h>
+#include <asm/sbi.h>
+#include <asm/switch_to.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/vector.h>
+#include <vdso/vsyscall.h>
+
+
+static void hwprobe_arch_id(struct riscv_hwprobe *pair,
+ const struct cpumask *cpus)
+{
+ u64 id = -1ULL;
+ bool first = true;
+ int cpu;
+
+ for_each_cpu(cpu, cpus) {
+ u64 cpu_id;
+
+ switch (pair->key) {
+ case RISCV_HWPROBE_KEY_MVENDORID:
+ cpu_id = riscv_cached_mvendorid(cpu);
+ break;
+ case RISCV_HWPROBE_KEY_MIMPID:
+ cpu_id = riscv_cached_mimpid(cpu);
+ break;
+ case RISCV_HWPROBE_KEY_MARCHID:
+ cpu_id = riscv_cached_marchid(cpu);
+ break;
+ }
+
+ if (first) {
+ id = cpu_id;
+ first = false;
+ }
+
+ /*
+ * If there's a mismatch for the given set, return -1 in the
+ * value.
+ */
+ if (id != cpu_id) {
+ id = -1ULL;
+ break;
+ }
+ }
+
+ pair->value = id;
+}
+
+static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
+ const struct cpumask *cpus)
+{
+ int cpu;
+ u64 missing = 0;
+
+ pair->value = 0;
+ if (has_fpu())
+ pair->value |= RISCV_HWPROBE_IMA_FD;
+
+ if (riscv_isa_extension_available(NULL, c))
+ pair->value |= RISCV_HWPROBE_IMA_C;
+
+ if (has_vector())
+ pair->value |= RISCV_HWPROBE_IMA_V;
+
+ /*
+ * Loop through and record extensions that 1) anyone has, and 2) anyone
+ * doesn't have.
+ */
+ for_each_cpu(cpu, cpus) {
+ struct riscv_isainfo *isainfo = &hart_isa[cpu];
+
+#define EXT_KEY(ext) \
+ do { \
+ if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
+ pair->value |= RISCV_HWPROBE_EXT_##ext; \
+ else \
+ missing |= RISCV_HWPROBE_EXT_##ext; \
+ } while (false)
+
+ /*
+ * Only use EXT_KEY() for extensions which can be exposed to userspace,
+ * regardless of the kernel's configuration, as no other checks, besides
+ * presence in the hart_isa bitmap, are made.
+ */
+ EXT_KEY(ZBA);
+ EXT_KEY(ZBB);
+ EXT_KEY(ZBS);
+ EXT_KEY(ZICBOZ);
+ EXT_KEY(ZBC);
+
+ EXT_KEY(ZBKB);
+ EXT_KEY(ZBKC);
+ EXT_KEY(ZBKX);
+ EXT_KEY(ZKND);
+ EXT_KEY(ZKNE);
+ EXT_KEY(ZKNH);
+ EXT_KEY(ZKSED);
+ EXT_KEY(ZKSH);
+ EXT_KEY(ZKT);
+ EXT_KEY(ZIHINTNTL);
+ EXT_KEY(ZTSO);
+ EXT_KEY(ZACAS);
+ EXT_KEY(ZICOND);
+
+ if (has_vector()) {
+ EXT_KEY(ZVBB);
+ EXT_KEY(ZVBC);
+ EXT_KEY(ZVKB);
+ EXT_KEY(ZVKG);
+ EXT_KEY(ZVKNED);
+ EXT_KEY(ZVKNHA);
+ EXT_KEY(ZVKNHB);
+ EXT_KEY(ZVKSED);
+ EXT_KEY(ZVKSH);
+ EXT_KEY(ZVKT);
+ EXT_KEY(ZVFH);
+ EXT_KEY(ZVFHMIN);
+ }
+
+ if (has_fpu()) {
+ EXT_KEY(ZFH);
+ EXT_KEY(ZFHMIN);
+ EXT_KEY(ZFA);
+ }
+#undef EXT_KEY
+ }
+
+ /* Now turn off reporting features if any CPU is missing it. */
+ pair->value &= ~missing;
+}
+
+static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
+{
+ struct riscv_hwprobe pair;
+
+ hwprobe_isa_ext0(&pair, cpus);
+ return (pair.value & ext);
+}
+
+static u64 hwprobe_misaligned(const struct cpumask *cpus)
+{
+ int cpu;
+ u64 perf = -1ULL;
+
+ for_each_cpu(cpu, cpus) {
+ int this_perf = per_cpu(misaligned_access_speed, cpu);
+
+ if (perf == -1ULL)
+ perf = this_perf;
+
+ if (perf != this_perf) {
+ perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
+ break;
+ }
+ }
+
+ if (perf == -1ULL)
+ return RISCV_HWPROBE_MISALIGNED_UNKNOWN;
+
+ return perf;
+}
+
+static void hwprobe_one_pair(struct riscv_hwprobe *pair,
+ const struct cpumask *cpus)
+{
+ switch (pair->key) {
+ case RISCV_HWPROBE_KEY_MVENDORID:
+ case RISCV_HWPROBE_KEY_MARCHID:
+ case RISCV_HWPROBE_KEY_MIMPID:
+ hwprobe_arch_id(pair, cpus);
+ break;
+ /*
+ * The kernel already assumes that the base single-letter ISA
+ * extensions are supported on all harts, and only supports the
+ * IMA base, so just cheat a bit here and tell that to
+ * userspace.
+ */
+ case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
+ pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
+ break;
+
+ case RISCV_HWPROBE_KEY_IMA_EXT_0:
+ hwprobe_isa_ext0(pair, cpus);
+ break;
+
+ case RISCV_HWPROBE_KEY_CPUPERF_0:
+ pair->value = hwprobe_misaligned(cpus);
+ break;
+
+ case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
+ pair->value = 0;
+ if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
+ pair->value = riscv_cboz_block_size;
+ break;
+
+ /*
+ * For forward compatibility, unknown keys don't fail the whole
+ * call, but get their element key set to -1 and value set to 0
+ * indicating they're unrecognized.
+ */
+ default:
+ pair->key = -1;
+ pair->value = 0;
+ break;
+ }
+}
+
+static int hwprobe_get_values(struct riscv_hwprobe __user *pairs,
+ size_t pair_count, size_t cpusetsize,
+ unsigned long __user *cpus_user,
+ unsigned int flags)
+{
+ size_t out;
+ int ret;
+ cpumask_t cpus;
+
+ /* Check the reserved flags. */
+ if (flags != 0)
+ return -EINVAL;
+
+ /*
+ * The interface supports taking in a CPU mask, and returns values that
+ * are consistent across that mask. Allow userspace to specify NULL and
+ * 0 as a shortcut to all online CPUs.
+ */
+ cpumask_clear(&cpus);
+ if (!cpusetsize && !cpus_user) {
+ cpumask_copy(&cpus, cpu_online_mask);
+ } else {
+ if (cpusetsize > cpumask_size())
+ cpusetsize = cpumask_size();
+
+ ret = copy_from_user(&cpus, cpus_user, cpusetsize);
+ if (ret)
+ return -EFAULT;
+
+ /*
+ * Userspace must provide at least one online CPU, without that
+ * there's no way to define what is supported.
+ */
+ cpumask_and(&cpus, &cpus, cpu_online_mask);
+ if (cpumask_empty(&cpus))
+ return -EINVAL;
+ }
+
+ for (out = 0; out < pair_count; out++, pairs++) {
+ struct riscv_hwprobe pair;
+
+ if (get_user(pair.key, &pairs->key))
+ return -EFAULT;
+
+ pair.value = 0;
+ hwprobe_one_pair(&pair, &cpus);
+ ret = put_user(pair.key, &pairs->key);
+ if (ret == 0)
+ ret = put_user(pair.value, &pairs->value);
+
+ if (ret)
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
+ size_t pair_count, size_t cpusetsize,
+ unsigned long __user *cpus_user,
+ unsigned int flags)
+{
+ cpumask_t cpus, one_cpu;
+ bool clear_all = false;
+ size_t i;
+ int ret;
+
+ if (flags != RISCV_HWPROBE_WHICH_CPUS)
+ return -EINVAL;
+
+ if (!cpusetsize || !cpus_user)
+ return -EINVAL;
+
+ if (cpusetsize > cpumask_size())
+ cpusetsize = cpumask_size();
+
+ ret = copy_from_user(&cpus, cpus_user, cpusetsize);
+ if (ret)
+ return -EFAULT;
+
+ if (cpumask_empty(&cpus))
+ cpumask_copy(&cpus, cpu_online_mask);
+
+ cpumask_and(&cpus, &cpus, cpu_online_mask);
+
+ cpumask_clear(&one_cpu);
+
+ for (i = 0; i < pair_count; i++) {
+ struct riscv_hwprobe pair, tmp;
+ int cpu;
+
+ ret = copy_from_user(&pair, &pairs[i], sizeof(pair));
+ if (ret)
+ return -EFAULT;
+
+ if (!riscv_hwprobe_key_is_valid(pair.key)) {
+ clear_all = true;
+ pair = (struct riscv_hwprobe){ .key = -1, };
+ ret = copy_to_user(&pairs[i], &pair, sizeof(pair));
+ if (ret)
+ return -EFAULT;
+ }
+
+ if (clear_all)
+ continue;
+
+ tmp = (struct riscv_hwprobe){ .key = pair.key, };
+
+ for_each_cpu(cpu, &cpus) {
+ cpumask_set_cpu(cpu, &one_cpu);
+
+ hwprobe_one_pair(&tmp, &one_cpu);
+
+ if (!riscv_hwprobe_pair_cmp(&tmp, &pair))
+ cpumask_clear_cpu(cpu, &cpus);
+
+ cpumask_clear_cpu(cpu, &one_cpu);
+ }
+ }
+
+ if (clear_all)
+ cpumask_clear(&cpus);
+
+ ret = copy_to_user(cpus_user, &cpus, cpusetsize);
+ if (ret)
+ return -EFAULT;
+
+ return 0;
+}
+
+static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
+ size_t pair_count, size_t cpusetsize,
+ unsigned long __user *cpus_user,
+ unsigned int flags)
+{
+ if (flags & RISCV_HWPROBE_WHICH_CPUS)
+ return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
+ cpus_user, flags);
+
+ return hwprobe_get_values(pairs, pair_count, cpusetsize,
+ cpus_user, flags);
+}
+
+#ifdef CONFIG_MMU
+
+static int __init init_hwprobe_vdso_data(void)
+{
+ struct vdso_data *vd = __arch_get_k_vdso_data();
+ struct arch_vdso_data *avd = &vd->arch_data;
+ u64 id_bitsmash = 0;
+ struct riscv_hwprobe pair;
+ int key;
+
+ /*
+ * Initialize vDSO data with the answers for the "all CPUs" case, to
+ * save a syscall in the common case.
+ */
+ for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
+ pair.key = key;
+ hwprobe_one_pair(&pair, cpu_online_mask);
+
+ WARN_ON_ONCE(pair.key < 0);
+
+ avd->all_cpu_hwprobe_values[key] = pair.value;
+ /*
+ * Smash together the vendor, arch, and impl IDs to see if
+ * they're all 0 or any negative.
+ */
+ if (key <= RISCV_HWPROBE_KEY_MIMPID)
+ id_bitsmash |= pair.value;
+ }
+
+ /*
+ * If the arch, vendor, and implementation ID are all the same across
+ * all harts, then assume all CPUs are the same, and allow the vDSO to
+ * answer queries for arbitrary masks. However if all values are 0 (not
+ * populated) or any value returns -1 (varies across CPUs), then the
+ * vDSO should defer to the kernel for exotic cpu masks.
+ */
+ avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
+ return 0;
+}
+
+arch_initcall_sync(init_hwprobe_vdso_data);
+
+#endif /* CONFIG_MMU */
+
+SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
+ size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
+ cpus, unsigned int, flags)
+{
+ return do_riscv_hwprobe(pairs, pair_count, cpusetsize,
+ cpus, flags);
+}
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index c712037dbe10..f1c1416a9f1e 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -7,15 +7,7 @@
#include <linux/syscalls.h>
#include <asm/cacheflush.h>
-#include <asm/cpufeature.h>
-#include <asm/hwprobe.h>
-#include <asm/sbi.h>
-#include <asm/vector.h>
-#include <asm/switch_to.h>
-#include <asm/uaccess.h>
-#include <asm/unistd.h>
#include <asm-generic/mman-common.h>
-#include <vdso/vsyscall.h>
static long riscv_sys_mmap(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
@@ -77,283 +69,6 @@ SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
return 0;
}
-/*
- * The hwprobe interface, for allowing userspace to probe to see which features
- * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for more
- * details.
- */
-static void hwprobe_arch_id(struct riscv_hwprobe *pair,
- const struct cpumask *cpus)
-{
- u64 id = -1ULL;
- bool first = true;
- int cpu;
-
- for_each_cpu(cpu, cpus) {
- u64 cpu_id;
-
- switch (pair->key) {
- case RISCV_HWPROBE_KEY_MVENDORID:
- cpu_id = riscv_cached_mvendorid(cpu);
- break;
- case RISCV_HWPROBE_KEY_MIMPID:
- cpu_id = riscv_cached_mimpid(cpu);
- break;
- case RISCV_HWPROBE_KEY_MARCHID:
- cpu_id = riscv_cached_marchid(cpu);
- break;
- }
-
- if (first) {
- id = cpu_id;
- first = false;
- }
-
- /*
- * If there's a mismatch for the given set, return -1 in the
- * value.
- */
- if (id != cpu_id) {
- id = -1ULL;
- break;
- }
- }
-
- pair->value = id;
-}
-
-static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
- const struct cpumask *cpus)
-{
- int cpu;
- u64 missing = 0;
-
- pair->value = 0;
- if (has_fpu())
- pair->value |= RISCV_HWPROBE_IMA_FD;
-
- if (riscv_isa_extension_available(NULL, c))
- pair->value |= RISCV_HWPROBE_IMA_C;
-
- if (has_vector())
- pair->value |= RISCV_HWPROBE_IMA_V;
-
- /*
- * Loop through and record extensions that 1) anyone has, and 2) anyone
- * doesn't have.
- */
- for_each_cpu(cpu, cpus) {
- struct riscv_isainfo *isainfo = &hart_isa[cpu];
-
-#define EXT_KEY(ext) \
- do { \
- if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
- pair->value |= RISCV_HWPROBE_EXT_##ext; \
- else \
- missing |= RISCV_HWPROBE_EXT_##ext; \
- } while (false)
-
- /*
- * Only use EXT_KEY() for extensions which can be exposed to userspace,
- * regardless of the kernel's configuration, as no other checks, besides
- * presence in the hart_isa bitmap, are made.
- */
- EXT_KEY(ZBA);
- EXT_KEY(ZBB);
- EXT_KEY(ZBS);
- EXT_KEY(ZICBOZ);
-#undef EXT_KEY
- }
-
- /* Now turn off reporting features if any CPU is missing it. */
- pair->value &= ~missing;
-}
-
-static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
-{
- struct riscv_hwprobe pair;
-
- hwprobe_isa_ext0(&pair, cpus);
- return (pair.value & ext);
-}
-
-static u64 hwprobe_misaligned(const struct cpumask *cpus)
-{
- int cpu;
- u64 perf = -1ULL;
-
- for_each_cpu(cpu, cpus) {
- int this_perf = per_cpu(misaligned_access_speed, cpu);
-
- if (perf == -1ULL)
- perf = this_perf;
-
- if (perf != this_perf) {
- perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
- break;
- }
- }
-
- if (perf == -1ULL)
- return RISCV_HWPROBE_MISALIGNED_UNKNOWN;
-
- return perf;
-}
-
-static void hwprobe_one_pair(struct riscv_hwprobe *pair,
- const struct cpumask *cpus)
-{
- switch (pair->key) {
- case RISCV_HWPROBE_KEY_MVENDORID:
- case RISCV_HWPROBE_KEY_MARCHID:
- case RISCV_HWPROBE_KEY_MIMPID:
- hwprobe_arch_id(pair, cpus);
- break;
- /*
- * The kernel already assumes that the base single-letter ISA
- * extensions are supported on all harts, and only supports the
- * IMA base, so just cheat a bit here and tell that to
- * userspace.
- */
- case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
- pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
- break;
-
- case RISCV_HWPROBE_KEY_IMA_EXT_0:
- hwprobe_isa_ext0(pair, cpus);
- break;
-
- case RISCV_HWPROBE_KEY_CPUPERF_0:
- pair->value = hwprobe_misaligned(cpus);
- break;
-
- case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
- pair->value = 0;
- if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
- pair->value = riscv_cboz_block_size;
- break;
-
- /*
- * For forward compatibility, unknown keys don't fail the whole
- * call, but get their element key set to -1 and value set to 0
- * indicating they're unrecognized.
- */
- default:
- pair->key = -1;
- pair->value = 0;
- break;
- }
-}
-
-static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
- size_t pair_count, size_t cpu_count,
- unsigned long __user *cpus_user,
- unsigned int flags)
-{
- size_t out;
- int ret;
- cpumask_t cpus;
-
- /* Check the reserved flags. */
- if (flags != 0)
- return -EINVAL;
-
- /*
- * The interface supports taking in a CPU mask, and returns values that
- * are consistent across that mask. Allow userspace to specify NULL and
- * 0 as a shortcut to all online CPUs.
- */
- cpumask_clear(&cpus);
- if (!cpu_count && !cpus_user) {
- cpumask_copy(&cpus, cpu_online_mask);
- } else {
- if (cpu_count > cpumask_size())
- cpu_count = cpumask_size();
-
- ret = copy_from_user(&cpus, cpus_user, cpu_count);
- if (ret)
- return -EFAULT;
-
- /*
- * Userspace must provide at least one online CPU, without that
- * there's no way to define what is supported.
- */
- cpumask_and(&cpus, &cpus, cpu_online_mask);
- if (cpumask_empty(&cpus))
- return -EINVAL;
- }
-
- for (out = 0; out < pair_count; out++, pairs++) {
- struct riscv_hwprobe pair;
-
- if (get_user(pair.key, &pairs->key))
- return -EFAULT;
-
- pair.value = 0;
- hwprobe_one_pair(&pair, &cpus);
- ret = put_user(pair.key, &pairs->key);
- if (ret == 0)
- ret = put_user(pair.value, &pairs->value);
-
- if (ret)
- return -EFAULT;
- }
-
- return 0;
-}
-
-#ifdef CONFIG_MMU
-
-static int __init init_hwprobe_vdso_data(void)
-{
- struct vdso_data *vd = __arch_get_k_vdso_data();
- struct arch_vdso_data *avd = &vd->arch_data;
- u64 id_bitsmash = 0;
- struct riscv_hwprobe pair;
- int key;
-
- /*
- * Initialize vDSO data with the answers for the "all CPUs" case, to
- * save a syscall in the common case.
- */
- for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
- pair.key = key;
- hwprobe_one_pair(&pair, cpu_online_mask);
-
- WARN_ON_ONCE(pair.key < 0);
-
- avd->all_cpu_hwprobe_values[key] = pair.value;
- /*
- * Smash together the vendor, arch, and impl IDs to see if
- * they're all 0 or any negative.
- */
- if (key <= RISCV_HWPROBE_KEY_MIMPID)
- id_bitsmash |= pair.value;
- }
-
- /*
- * If the arch, vendor, and implementation ID are all the same across
- * all harts, then assume all CPUs are the same, and allow the vDSO to
- * answer queries for arbitrary masks. However if all values are 0 (not
- * populated) or any value returns -1 (varies across CPUs), then the
- * vDSO should defer to the kernel for exotic cpu masks.
- */
- avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
- return 0;
-}
-
-arch_initcall_sync(init_hwprobe_vdso_data);
-
-#endif /* CONFIG_MMU */
-
-SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
- size_t, pair_count, size_t, cpu_count, unsigned long __user *,
- cpus, unsigned int, flags)
-{
- return do_riscv_hwprobe(pairs, pair_count, cpu_count,
- cpus, flags);
-}
-
/* Not defined using SYSCALL_DEFINE0 to avoid error injection */
asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *__unused)
{
diff --git a/arch/riscv/kernel/tests/module_test/test_uleb128.S b/arch/riscv/kernel/tests/module_test/test_uleb128.S
index 90f22049d553..8515ed7cd8c1 100644
--- a/arch/riscv/kernel/tests/module_test/test_uleb128.S
+++ b/arch/riscv/kernel/tests/module_test/test_uleb128.S
@@ -6,13 +6,13 @@
.text
.global test_uleb_basic
test_uleb_basic:
- ld a0, second
+ lw a0, second
addi a0, a0, -127
ret
.global test_uleb_large
test_uleb_large:
- ld a0, fourth
+ lw a0, fourth
addi a0, a0, -0x07e8
ret
@@ -22,10 +22,10 @@ first:
second:
.reloc second, R_RISCV_SET_ULEB128, second
.reloc second, R_RISCV_SUB_ULEB128, first
- .dword 0
+ .word 0
third:
.space 1000
fourth:
.reloc fourth, R_RISCV_SET_ULEB128, fourth
.reloc fourth, R_RISCV_SUB_ULEB128, third
- .dword 0
+ .word 0
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 5eba37147caa..8ded225e8c5b 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -319,7 +319,7 @@ static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn)
static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
{
if (user_mode(regs)) {
- return __get_user(*r_val, addr);
+ return __get_user(*r_val, (u8 __user *)addr);
} else {
*r_val = *addr;
return 0;
@@ -329,7 +329,7 @@ static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
{
if (user_mode(regs)) {
- return __put_user(val, addr);
+ return __put_user(val, (u8 __user *)addr);
} else {
*addr = val;
return 0;
@@ -343,7 +343,7 @@ static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
if (user_mode(regs)) { \
__ret = __get_user(insn, insn_addr); \
} else { \
- insn = *insn_addr; \
+ insn = *(__force u16 *)insn_addr; \
__ret = 0; \
} \
\
@@ -550,16 +550,14 @@ int handle_misaligned_store(struct pt_regs *regs)
} else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
len = 8;
val.data_ulong = GET_RS2S(insn, regs);
- } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP &&
- ((insn >> SH_RD) & 0x1f)) {
+ } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP) {
len = 8;
val.data_ulong = GET_RS2C(insn, regs);
#endif
} else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
len = 4;
val.data_ulong = GET_RS2S(insn, regs);
- } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP &&
- ((insn >> SH_RD) & 0x1f)) {
+ } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP) {
len = 4;
val.data_ulong = GET_RS2C(insn, regs);
} else if ((insn & INSN_MASK_C_FSD) == INSN_MATCH_C_FSD) {
diff --git a/arch/riscv/kernel/vdso/hwprobe.c b/arch/riscv/kernel/vdso/hwprobe.c
index cadf725ef798..1e926e4b5881 100644
--- a/arch/riscv/kernel/vdso/hwprobe.c
+++ b/arch/riscv/kernel/vdso/hwprobe.c
@@ -3,26 +3,22 @@
* Copyright 2023 Rivos, Inc
*/
+#include <linux/string.h>
#include <linux/types.h>
#include <vdso/datapage.h>
#include <vdso/helpers.h>
extern int riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
- size_t cpu_count, unsigned long *cpus,
+ size_t cpusetsize, unsigned long *cpus,
unsigned int flags);
-/* Add a prototype to avoid -Wmissing-prototypes warning. */
-int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
- size_t cpu_count, unsigned long *cpus,
- unsigned int flags);
-
-int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
- size_t cpu_count, unsigned long *cpus,
- unsigned int flags)
+static int riscv_vdso_get_values(struct riscv_hwprobe *pairs, size_t pair_count,
+ size_t cpusetsize, unsigned long *cpus,
+ unsigned int flags)
{
const struct vdso_data *vd = __arch_get_vdso_data();
const struct arch_vdso_data *avd = &vd->arch_data;
- bool all_cpus = !cpu_count && !cpus;
+ bool all_cpus = !cpusetsize && !cpus;
struct riscv_hwprobe *p = pairs;
struct riscv_hwprobe *end = pairs + pair_count;
@@ -33,7 +29,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
* masks.
*/
if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus))
- return riscv_hwprobe(pairs, pair_count, cpu_count, cpus, flags);
+ return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags);
/* This is something we can handle, fill out the pairs. */
while (p < end) {
@@ -50,3 +46,71 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
return 0;
}
+
+static int riscv_vdso_get_cpus(struct riscv_hwprobe *pairs, size_t pair_count,
+ size_t cpusetsize, unsigned long *cpus,
+ unsigned int flags)
+{
+ const struct vdso_data *vd = __arch_get_vdso_data();
+ const struct arch_vdso_data *avd = &vd->arch_data;
+ struct riscv_hwprobe *p = pairs;
+ struct riscv_hwprobe *end = pairs + pair_count;
+ unsigned char *c = (unsigned char *)cpus;
+ bool empty_cpus = true;
+ bool clear_all = false;
+ int i;
+
+ if (!cpusetsize || !cpus)
+ return -EINVAL;
+
+ for (i = 0; i < cpusetsize; i++) {
+ if (c[i]) {
+ empty_cpus = false;
+ break;
+ }
+ }
+
+ if (empty_cpus || flags != RISCV_HWPROBE_WHICH_CPUS || !avd->homogeneous_cpus)
+ return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags);
+
+ while (p < end) {
+ if (riscv_hwprobe_key_is_valid(p->key)) {
+ struct riscv_hwprobe t = {
+ .key = p->key,
+ .value = avd->all_cpu_hwprobe_values[p->key],
+ };
+
+ if (!riscv_hwprobe_pair_cmp(&t, p))
+ clear_all = true;
+ } else {
+ clear_all = true;
+ p->key = -1;
+ p->value = 0;
+ }
+ p++;
+ }
+
+ if (clear_all) {
+ for (i = 0; i < cpusetsize; i++)
+ c[i] = 0;
+ }
+
+ return 0;
+}
+
+/* Add a prototype to avoid -Wmissing-prototypes warning. */
+int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
+ size_t cpusetsize, unsigned long *cpus,
+ unsigned int flags);
+
+int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count,
+ size_t cpusetsize, unsigned long *cpus,
+ unsigned int flags)
+{
+ if (flags & RISCV_HWPROBE_WHICH_CPUS)
+ return riscv_vdso_get_cpus(pairs, pair_count, cpusetsize,
+ cpus, flags);
+
+ return riscv_vdso_get_values(pairs, pair_count, cpusetsize,
+ cpus, flags);
+}
diff --git a/arch/riscv/kernel/vmlinux-xip.lds.S b/arch/riscv/kernel/vmlinux-xip.lds.S
index 50767647fbc6..8c3daa1b0531 100644
--- a/arch/riscv/kernel/vmlinux-xip.lds.S
+++ b/arch/riscv/kernel/vmlinux-xip.lds.S
@@ -29,10 +29,12 @@ SECTIONS
HEAD_TEXT_SECTION
INIT_TEXT_SECTION(PAGE_SIZE)
/* we have to discard exit text and such at runtime, not link time */
+ __exittext_begin = .;
.exit.text :
{
EXIT_TEXT
}
+ __exittext_end = .;
.text : {
_text = .;
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
index 492dd4b8f3d6..002ca58dd998 100644
--- a/arch/riscv/kernel/vmlinux.lds.S
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -69,10 +69,12 @@ SECTIONS
__soc_builtin_dtb_table_end = .;
}
/* we have to discard exit text and such at runtime, not link time */
+ __exittext_begin = .;
.exit.text :
{
EXIT_TEXT
}
+ __exittext_end = .;
__init_text_end = .;
. = ALIGN(SECTION_ALIGN);
diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index 068c74593871..a9e2fd7245e1 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -103,7 +103,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
*ptep_level = current_level;
ptep = (pte_t *)kvm->arch.pgd;
ptep = &ptep[gstage_pte_index(addr, current_level)];
- while (ptep && pte_val(*ptep)) {
+ while (ptep && pte_val(ptep_get(ptep))) {
if (gstage_pte_leaf(ptep)) {
*ptep_level = current_level;
*ptepp = ptep;
@@ -113,7 +113,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr,
if (current_level) {
current_level--;
*ptep_level = current_level;
- ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
+ ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
ptep = &ptep[gstage_pte_index(addr, current_level)];
} else {
ptep = NULL;
@@ -149,25 +149,25 @@ static int gstage_set_pte(struct kvm *kvm, u32 level,
if (gstage_pte_leaf(ptep))
return -EEXIST;
- if (!pte_val(*ptep)) {
+ if (!pte_val(ptep_get(ptep))) {
if (!pcache)
return -ENOMEM;
next_ptep = kvm_mmu_memory_cache_alloc(pcache);
if (!next_ptep)
return -ENOMEM;
- *ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)),
- __pgprot(_PAGE_TABLE));
+ set_pte(ptep, pfn_pte(PFN_DOWN(__pa(next_ptep)),
+ __pgprot(_PAGE_TABLE)));
} else {
if (gstage_pte_leaf(ptep))
return -EEXIST;
- next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
+ next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
}
current_level--;
ptep = &next_ptep[gstage_pte_index(addr, current_level)];
}
- *ptep = *new_pte;
+ set_pte(ptep, *new_pte);
if (gstage_pte_leaf(ptep))
gstage_remote_tlb_flush(kvm, current_level, addr);
@@ -239,11 +239,11 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
BUG_ON(addr & (page_size - 1));
- if (!pte_val(*ptep))
+ if (!pte_val(ptep_get(ptep)))
return;
if (ptep_level && !gstage_pte_leaf(ptep)) {
- next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep);
+ next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep));
next_ptep_level = ptep_level - 1;
ret = gstage_level_to_page_size(next_ptep_level,
&next_page_size);
@@ -261,7 +261,7 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr,
if (op == GSTAGE_OP_CLEAR)
set_pte(ptep, __pte(0));
else if (op == GSTAGE_OP_WP)
- set_pte(ptep, __pte(pte_val(*ptep) & ~_PAGE_WRITE));
+ set_pte(ptep, __pte(pte_val(ptep_get(ptep)) & ~_PAGE_WRITE));
gstage_remote_tlb_flush(kvm, ptep_level, addr);
}
}
@@ -603,7 +603,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
&ptep, &ptep_level))
return false;
- return pte_young(*ptep);
+ return pte_young(ptep_get(ptep));
}
int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
diff --git a/arch/riscv/lib/clear_page.S b/arch/riscv/lib/clear_page.S
index b22de1231144..20ff03f5b0f2 100644
--- a/arch/riscv/lib/clear_page.S
+++ b/arch/riscv/lib/clear_page.S
@@ -4,9 +4,9 @@
*/
#include <linux/linkage.h>
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/alternative-macros.h>
-#include <asm-generic/export.h>
#include <asm/hwcap.h>
#include <asm/insn-def.h>
#include <asm/page.h>
diff --git a/arch/riscv/lib/tishift.S b/arch/riscv/lib/tishift.S
index ef90075c4b0a..c8294bf72c06 100644
--- a/arch/riscv/lib/tishift.S
+++ b/arch/riscv/lib/tishift.S
@@ -4,7 +4,7 @@
*/
#include <linux/linkage.h>
-#include <asm-generic/export.h>
+#include <linux/export.h>
SYM_FUNC_START(__lshrti3)
beqz a2, .L1
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
index a1e4a3c42925..bc22c078aba8 100644
--- a/arch/riscv/lib/uaccess.S
+++ b/arch/riscv/lib/uaccess.S
@@ -1,5 +1,5 @@
#include <linux/linkage.h>
-#include <asm-generic/export.h>
+#include <linux/export.h>
#include <asm/asm.h>
#include <asm/asm-extable.h>
#include <asm/csr.h>
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
index 3a4dfc8babcf..2c869f8026a8 100644
--- a/arch/riscv/mm/Makefile
+++ b/arch/riscv/mm/Makefile
@@ -13,10 +13,9 @@ endif
KCOV_INSTRUMENT_init.o := n
obj-y += init.o
-obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o
+obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o
obj-y += cacheflush.o
obj-y += context.o
-obj-y += pgtable.o
obj-y += pmem.o
ifeq ($(CONFIG_MMU),y)
diff --git a/arch/riscv/mm/extable.c b/arch/riscv/mm/extable.c
index 35484d830fd6..dd1530af3ef1 100644
--- a/arch/riscv/mm/extable.c
+++ b/arch/riscv/mm/extable.c
@@ -27,6 +27,14 @@ static bool ex_handler_fixup(const struct exception_table_entry *ex,
return true;
}
+static inline unsigned long regs_get_gpr(struct pt_regs *regs, unsigned int offset)
+{
+ if (unlikely(!offset || offset > MAX_REG_OFFSET))
+ return 0;
+
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
static inline void regs_set_gpr(struct pt_regs *regs, unsigned int offset,
unsigned long val)
{
@@ -50,6 +58,27 @@ static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
return true;
}
+static bool
+ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex,
+ struct pt_regs *regs)
+{
+ int reg_data = FIELD_GET(EX_DATA_REG_DATA, ex->data);
+ int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data);
+ unsigned long data, addr, offset;
+
+ addr = regs_get_gpr(regs, reg_addr * sizeof(unsigned long));
+
+ offset = addr & 0x7UL;
+ addr &= ~0x7UL;
+
+ data = *(unsigned long *)addr >> (offset * 8);
+
+ regs_set_gpr(regs, reg_data * sizeof(unsigned long), data);
+
+ regs->epc = get_ex_fixup(ex);
+ return true;
+}
+
bool fixup_exception(struct pt_regs *regs)
{
const struct exception_table_entry *ex;
@@ -65,6 +94,8 @@ bool fixup_exception(struct pt_regs *regs)
return ex_handler_bpf(ex, regs);
case EX_TYPE_UACCESS_ERR_ZERO:
return ex_handler_uaccess_err_zero(ex, regs);
+ case EX_TYPE_LOAD_UNALIGNED_ZEROPAD:
+ return ex_handler_load_unaligned_zeropad(ex, regs);
}
BUG();
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 90d4ba36d1d0..76f1df709a21 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -136,24 +136,24 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
pgd = (pgd_t *)pfn_to_virt(pfn) + index;
pgd_k = init_mm.pgd + index;
- if (!pgd_present(*pgd_k)) {
+ if (!pgd_present(pgdp_get(pgd_k))) {
no_context(regs, addr);
return;
}
- set_pgd(pgd, *pgd_k);
+ set_pgd(pgd, pgdp_get(pgd_k));
p4d_k = p4d_offset(pgd_k, addr);
- if (!p4d_present(*p4d_k)) {
+ if (!p4d_present(p4dp_get(p4d_k))) {
no_context(regs, addr);
return;
}
pud_k = pud_offset(p4d_k, addr);
- if (!pud_present(*pud_k)) {
+ if (!pud_present(pudp_get(pud_k))) {
no_context(regs, addr);
return;
}
- if (pud_leaf(*pud_k))
+ if (pud_leaf(pudp_get(pud_k)))
goto flush_tlb;
/*
@@ -161,11 +161,11 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
* to copy individual PTEs
*/
pmd_k = pmd_offset(pud_k, addr);
- if (!pmd_present(*pmd_k)) {
+ if (!pmd_present(pmdp_get(pmd_k))) {
no_context(regs, addr);
return;
}
- if (pmd_leaf(*pmd_k))
+ if (pmd_leaf(pmdp_get(pmd_k)))
goto flush_tlb;
/*
@@ -175,7 +175,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a
* silently loop forever.
*/
pte_k = pte_offset_kernel(pmd_k, addr);
- if (!pte_present(*pte_k)) {
+ if (!pte_present(ptep_get(pte_k))) {
no_context(regs, addr);
return;
}
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
index b52f0210481f..431596c0e20e 100644
--- a/arch/riscv/mm/hugetlbpage.c
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -54,7 +54,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm,
}
if (sz == PMD_SIZE) {
- if (want_pmd_share(vma, addr) && pud_none(*pud))
+ if (want_pmd_share(vma, addr) && pud_none(pudp_get(pud)))
pte = huge_pmd_share(mm, vma, addr, pud);
else
pte = (pte_t *)pmd_alloc(mm, pud, addr);
@@ -93,11 +93,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
pmd_t *pmd;
pgd = pgd_offset(mm, addr);
- if (!pgd_present(*pgd))
+ if (!pgd_present(pgdp_get(pgd)))
return NULL;
p4d = p4d_offset(pgd, addr);
- if (!p4d_present(*p4d))
+ if (!p4d_present(p4dp_get(p4d)))
return NULL;
pud = pud_offset(p4d, addr);
@@ -105,7 +105,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
/* must be pud huge, non-present or none */
return (pte_t *)pud;
- if (!pud_present(*pud))
+ if (!pud_present(pudp_get(pud)))
return NULL;
pmd = pmd_offset(pud, addr);
@@ -113,7 +113,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
/* must be pmd huge, non-present or none */
return (pte_t *)pmd;
- if (!pmd_present(*pmd))
+ if (!pmd_present(pmdp_get(pmd)))
return NULL;
for_each_napot_order(order) {
@@ -293,7 +293,7 @@ void huge_pte_clear(struct mm_struct *mm,
pte_t *ptep,
unsigned long sz)
{
- pte_t pte = READ_ONCE(*ptep);
+ pte_t pte = ptep_get(ptep);
int i, pte_num;
if (!pte_napot(pte)) {
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 2e011cbddf3a..f533dd667a83 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -174,6 +174,9 @@ void __init mem_init(void)
/* Limit the memory size via mem. */
static phys_addr_t memory_limit;
+#ifdef CONFIG_XIP_KERNEL
+#define memory_limit (*(phys_addr_t *)XIP_FIXUP(&memory_limit))
+#endif /* CONFIG_XIP_KERNEL */
static int __init early_mem(char *p)
{
@@ -952,7 +955,7 @@ static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
* setup_vm_final installs the linear mapping. For 32-bit kernel, as the
* kernel is mapped in the linear mapping, that makes no difference.
*/
- dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
+ dtb_early_va = kernel_mapping_pa_to_va(dtb_pa);
#endif
dtb_early_pa = dtb_pa;
@@ -1055,9 +1058,9 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#endif
kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset;
- kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
#ifdef CONFIG_XIP_KERNEL
+ kernel_map.page_offset = PAGE_OFFSET_L3;
kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
@@ -1067,6 +1070,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
#else
+ kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
kernel_map.phys_addr = (uintptr_t)(&_start);
kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
#endif
@@ -1383,10 +1387,29 @@ void __init misc_mem_init(void)
}
#ifdef CONFIG_SPARSEMEM_VMEMMAP
+void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
+ unsigned long addr, unsigned long next)
+{
+ pmd_set_huge(pmd, virt_to_phys(p), PAGE_KERNEL);
+}
+
+int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node,
+ unsigned long addr, unsigned long next)
+{
+ vmemmap_verify((pte_t *)pmdp, node, addr, next);
+ return 1;
+}
+
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
struct vmem_altmap *altmap)
{
- return vmemmap_populate_basepages(start, end, node, NULL);
+ /*
+ * Note that SPARSEMEM_VMEMMAP is only selected for rv64 and that we
+ * can't use hugepage mappings for 2-level page table because in case of
+ * memory hotplug, we are not able to update all the page tables with
+ * the new PMDs.
+ */
+ return vmemmap_populate_hugepages(start, end, node, NULL);
}
#endif
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index 5e39dcf23fdb..e96251853037 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -31,7 +31,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
phys_addr_t phys_addr;
pte_t *ptep, *p;
- if (pmd_none(*pmd)) {
+ if (pmd_none(pmdp_get(pmd))) {
p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -39,7 +39,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned
ptep = pte_offset_kernel(pmd, vaddr);
do {
- if (pte_none(*ptep)) {
+ if (pte_none(ptep_get(ptep))) {
phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE);
@@ -53,7 +53,7 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
pmd_t *pmdp, *p;
unsigned long next;
- if (pud_none(*pud)) {
+ if (pud_none(pudp_get(pud))) {
p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -63,7 +63,8 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned
do {
next = pmd_addr_end(vaddr, end);
- if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
+ if (pmd_none(pmdp_get(pmdp)) && IS_ALIGNED(vaddr, PMD_SIZE) &&
+ (next - vaddr) >= PMD_SIZE) {
phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
if (phys_addr) {
set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
@@ -83,7 +84,7 @@ static void __init kasan_populate_pud(p4d_t *p4d,
pud_t *pudp, *p;
unsigned long next;
- if (p4d_none(*p4d)) {
+ if (p4d_none(p4dp_get(p4d))) {
p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE);
set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -93,7 +94,8 @@ static void __init kasan_populate_pud(p4d_t *p4d,
do {
next = pud_addr_end(vaddr, end);
- if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) {
+ if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
+ (next - vaddr) >= PUD_SIZE) {
phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE);
if (phys_addr) {
set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL));
@@ -113,7 +115,7 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
p4d_t *p4dp, *p;
unsigned long next;
- if (pgd_none(*pgd)) {
+ if (pgd_none(pgdp_get(pgd))) {
p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE);
set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
}
@@ -123,7 +125,8 @@ static void __init kasan_populate_p4d(pgd_t *pgd,
do {
next = p4d_addr_end(vaddr, end);
- if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) {
+ if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
+ (next - vaddr) >= P4D_SIZE) {
phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE);
if (phys_addr) {
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL));
@@ -145,7 +148,7 @@ static void __init kasan_populate_pgd(pgd_t *pgdp,
do {
next = pgd_addr_end(vaddr, end);
- if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
+ if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
(next - vaddr) >= PGDIR_SIZE) {
phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
if (phys_addr) {
@@ -168,7 +171,7 @@ static void __init kasan_early_clear_pud(p4d_t *p4dp,
if (!pgtable_l4_enabled) {
pudp = (pud_t *)p4dp;
} else {
- base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
+ base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp))));
pudp = base_pud + pud_index(vaddr);
}
@@ -193,7 +196,7 @@ static void __init kasan_early_clear_p4d(pgd_t *pgdp,
if (!pgtable_l5_enabled) {
p4dp = (p4d_t *)pgdp;
} else {
- base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
+ base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp))));
p4dp = base_p4d + p4d_index(vaddr);
}
@@ -239,14 +242,14 @@ static void __init kasan_early_populate_pud(p4d_t *p4dp,
if (!pgtable_l4_enabled) {
pudp = (pud_t *)p4dp;
} else {
- base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp)));
+ base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp))));
pudp = base_pud + pud_index(vaddr);
}
do {
next = pud_addr_end(vaddr, end);
- if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) &&
+ if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) &&
(next - vaddr) >= PUD_SIZE) {
phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd);
set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE));
@@ -277,14 +280,14 @@ static void __init kasan_early_populate_p4d(pgd_t *pgdp,
if (!pgtable_l5_enabled) {
p4dp = (p4d_t *)pgdp;
} else {
- base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp)));
+ base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp))));
p4dp = base_p4d + p4d_index(vaddr);
}
do {
next = p4d_addr_end(vaddr, end);
- if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) &&
+ if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) &&
(next - vaddr) >= P4D_SIZE) {
phys_addr = __pa((uintptr_t)kasan_early_shadow_pud);
set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE));
@@ -305,7 +308,7 @@ static void __init kasan_early_populate_pgd(pgd_t *pgdp,
do {
next = pgd_addr_end(vaddr, end);
- if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
+ if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) &&
(next - vaddr) >= PGDIR_SIZE) {
phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d);
set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE));
@@ -381,7 +384,7 @@ static void __init kasan_shallow_populate_pud(p4d_t *p4d,
do {
next = pud_addr_end(vaddr, end);
- if (pud_none(*pud_k)) {
+ if (pud_none(pudp_get(pud_k))) {
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
@@ -401,7 +404,7 @@ static void __init kasan_shallow_populate_p4d(pgd_t *pgd,
do {
next = p4d_addr_end(vaddr, end);
- if (p4d_none(*p4d_k)) {
+ if (p4d_none(p4dp_get(p4d_k))) {
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
@@ -420,7 +423,7 @@ static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long
do {
next = pgd_addr_end(vaddr, end);
- if (pgd_none(*pgd_k)) {
+ if (pgd_none(pgdp_get(pgd_k))) {
p = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE));
continue;
@@ -451,7 +454,7 @@ static void __init create_tmp_mapping(void)
/* Copy the last p4d since it is shared with the kernel mapping. */
if (pgtable_l5_enabled) {
- ptr = (p4d_t *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
+ ptr = (p4d_t *)pgd_page_vaddr(pgdp_get(pgd_offset_k(KASAN_SHADOW_END)));
memcpy(tmp_p4d, ptr, sizeof(p4d_t) * PTRS_PER_P4D);
set_pgd(&tmp_pg_dir[pgd_index(KASAN_SHADOW_END)],
pfn_pgd(PFN_DOWN(__pa(tmp_p4d)), PAGE_TABLE));
@@ -462,7 +465,7 @@ static void __init create_tmp_mapping(void)
/* Copy the last pud since it is shared with the kernel mapping. */
if (pgtable_l4_enabled) {
- ptr = (pud_t *)p4d_page_vaddr(*(base_p4d + p4d_index(KASAN_SHADOW_END)));
+ ptr = (pud_t *)p4d_page_vaddr(p4dp_get(base_p4d + p4d_index(KASAN_SHADOW_END)));
memcpy(tmp_pud, ptr, sizeof(pud_t) * PTRS_PER_PUD);
set_p4d(&base_p4d[p4d_index(KASAN_SHADOW_END)],
pfn_p4d(PFN_DOWN(__pa(tmp_pud)), PAGE_TABLE));
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
index fc5fc4f785c4..410056a50aa9 100644
--- a/arch/riscv/mm/pageattr.c
+++ b/arch/riscv/mm/pageattr.c
@@ -29,7 +29,7 @@ static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
- p4d_t val = READ_ONCE(*p4d);
+ p4d_t val = p4dp_get(p4d);
if (p4d_leaf(val)) {
val = __p4d(set_pageattr_masks(p4d_val(val), walk));
@@ -42,7 +42,7 @@ static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
- pud_t val = READ_ONCE(*pud);
+ pud_t val = pudp_get(pud);
if (pud_leaf(val)) {
val = __pud(set_pageattr_masks(pud_val(val), walk));
@@ -55,7 +55,7 @@ static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
- pmd_t val = READ_ONCE(*pmd);
+ pmd_t val = pmdp_get(pmd);
if (pmd_leaf(val)) {
val = __pmd(set_pageattr_masks(pmd_val(val), walk));
@@ -68,7 +68,7 @@ static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk)
{
- pte_t val = READ_ONCE(*pte);
+ pte_t val = ptep_get(pte);
val = __pte(set_pageattr_masks(pte_val(val), walk));
set_pte(pte, val);
@@ -108,10 +108,10 @@ static int __split_linear_mapping_pmd(pud_t *pudp,
vaddr <= (vaddr & PMD_MASK) && end >= next)
continue;
- if (pmd_leaf(*pmdp)) {
+ if (pmd_leaf(pmdp_get(pmdp))) {
struct page *pte_page;
- unsigned long pfn = _pmd_pfn(*pmdp);
- pgprot_t prot = __pgprot(pmd_val(*pmdp) & ~_PAGE_PFN_MASK);
+ unsigned long pfn = _pmd_pfn(pmdp_get(pmdp));
+ pgprot_t prot = __pgprot(pmd_val(pmdp_get(pmdp)) & ~_PAGE_PFN_MASK);
pte_t *ptep_new;
int i;
@@ -148,10 +148,10 @@ static int __split_linear_mapping_pud(p4d_t *p4dp,
vaddr <= (vaddr & PUD_MASK) && end >= next)
continue;
- if (pud_leaf(*pudp)) {
+ if (pud_leaf(pudp_get(pudp))) {
struct page *pmd_page;
- unsigned long pfn = _pud_pfn(*pudp);
- pgprot_t prot = __pgprot(pud_val(*pudp) & ~_PAGE_PFN_MASK);
+ unsigned long pfn = _pud_pfn(pudp_get(pudp));
+ pgprot_t prot = __pgprot(pud_val(pudp_get(pudp)) & ~_PAGE_PFN_MASK);
pmd_t *pmdp_new;
int i;
@@ -197,10 +197,10 @@ static int __split_linear_mapping_p4d(pgd_t *pgdp,
vaddr <= (vaddr & P4D_MASK) && end >= next)
continue;
- if (p4d_leaf(*p4dp)) {
+ if (p4d_leaf(p4dp_get(p4dp))) {
struct page *pud_page;
- unsigned long pfn = _p4d_pfn(*p4dp);
- pgprot_t prot = __pgprot(p4d_val(*p4dp) & ~_PAGE_PFN_MASK);
+ unsigned long pfn = _p4d_pfn(p4dp_get(p4dp));
+ pgprot_t prot = __pgprot(p4d_val(p4dp_get(p4dp)) & ~_PAGE_PFN_MASK);
pud_t *pudp_new;
int i;
@@ -305,8 +305,13 @@ static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
goto unlock;
}
} else if (is_kernel_mapping(start) || is_linear_mapping(start)) {
- lm_start = (unsigned long)lm_alias(start);
- lm_end = (unsigned long)lm_alias(end);
+ if (is_kernel_mapping(start)) {
+ lm_start = (unsigned long)lm_alias(start);
+ lm_end = (unsigned long)lm_alias(end);
+ } else {
+ lm_start = start;
+ lm_end = end;
+ }
ret = split_linear_mapping(lm_start, lm_end);
if (ret)
@@ -378,7 +383,7 @@ int set_direct_map_invalid_noflush(struct page *page)
int set_direct_map_default_noflush(struct page *page)
{
return __set_memory((unsigned long)page_address(page), 1,
- PAGE_KERNEL, __pgprot(0));
+ PAGE_KERNEL, __pgprot(_PAGE_EXEC));
}
#ifdef CONFIG_DEBUG_PAGEALLOC
@@ -406,29 +411,29 @@ bool kernel_page_present(struct page *page)
pte_t *pte;
pgd = pgd_offset_k(addr);
- if (!pgd_present(*pgd))
+ if (!pgd_present(pgdp_get(pgd)))
return false;
- if (pgd_leaf(*pgd))
+ if (pgd_leaf(pgdp_get(pgd)))
return true;
p4d = p4d_offset(pgd, addr);
- if (!p4d_present(*p4d))
+ if (!p4d_present(p4dp_get(p4d)))
return false;
- if (p4d_leaf(*p4d))
+ if (p4d_leaf(p4dp_get(p4d)))
return true;
pud = pud_offset(p4d, addr);
- if (!pud_present(*pud))
+ if (!pud_present(pudp_get(pud)))
return false;
- if (pud_leaf(*pud))
+ if (pud_leaf(pudp_get(pud)))
return true;
pmd = pmd_offset(pud, addr);
- if (!pmd_present(*pmd))
+ if (!pmd_present(pmdp_get(pmd)))
return false;
- if (pmd_leaf(*pmd))
+ if (pmd_leaf(pmdp_get(pmd)))
return true;
pte = pte_offset_kernel(pmd, addr);
- return pte_present(*pte);
+ return pte_present(ptep_get(pte));
}
diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c
index fef4e7328e49..ef887efcb679 100644
--- a/arch/riscv/mm/pgtable.c
+++ b/arch/riscv/mm/pgtable.c
@@ -5,6 +5,47 @@
#include <linux/kernel.h>
#include <linux/pgtable.h>
+int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty)
+{
+ if (!pte_same(ptep_get(ptep), entry))
+ __set_pte_at(ptep, entry);
+ /*
+ * update_mmu_cache will unconditionally execute, handling both
+ * the case that the PTE changed and the spurious fault case.
+ */
+ return true;
+}
+
+int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
+{
+ if (!pte_young(ptep_get(ptep)))
+ return 0;
+ return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
+}
+EXPORT_SYMBOL_GPL(ptep_test_and_clear_young);
+
+#ifdef CONFIG_64BIT
+pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+ if (pgtable_l4_enabled)
+ return p4d_pgtable(p4dp_get(p4d)) + pud_index(address);
+
+ return (pud_t *)p4d;
+}
+
+p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+{
+ if (pgtable_l5_enabled)
+ return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address);
+
+ return (p4d_t *)pgd;
+}
+#endif
+
#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
@@ -25,7 +66,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot)
int pud_clear_huge(pud_t *pud)
{
- if (!pud_leaf(READ_ONCE(*pud)))
+ if (!pud_leaf(pudp_get(pud)))
return 0;
pud_clear(pud);
return 1;
@@ -33,7 +74,7 @@ int pud_clear_huge(pud_t *pud)
int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
- pmd_t *pmd = pud_pgtable(*pud);
+ pmd_t *pmd = pud_pgtable(pudp_get(pud));
int i;
pud_clear(pud);
@@ -63,7 +104,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot)
int pmd_clear_huge(pmd_t *pmd)
{
- if (!pmd_leaf(READ_ONCE(*pmd)))
+ if (!pmd_leaf(pmdp_get(pmd)))
return 0;
pmd_clear(pmd);
return 1;
@@ -71,7 +112,7 @@ int pmd_clear_huge(pmd_t *pmd)
int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
- pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd);
+ pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd));
pmd_clear(pmd);
@@ -88,7 +129,7 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
- VM_BUG_ON(pmd_trans_huge(*pmdp));
+ VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp)));
/*
* When leaf PTE entries (regular pages) are collapsed into a leaf
* PMD entry (huge page), a valid non-leaf PTE is converted into a
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index e6659d7368b3..f0190f5fdd05 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -93,29 +93,23 @@ static void __ipi_flush_tlb_range_asid(void *info)
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
}
-static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
- unsigned long size, unsigned long stride)
+static void __flush_tlb_range(struct cpumask *cmask, unsigned long asid,
+ unsigned long start, unsigned long size,
+ unsigned long stride)
{
struct flush_tlb_range_data ftd;
- const struct cpumask *cmask;
- unsigned long asid = FLUSH_TLB_NO_ASID;
bool broadcast;
- if (mm) {
- unsigned int cpuid;
+ if (cpumask_empty(cmask))
+ return;
- cmask = mm_cpumask(mm);
- if (cpumask_empty(cmask))
- return;
+ if (cmask != cpu_online_mask) {
+ unsigned int cpuid;
cpuid = get_cpu();
/* check if the tlbflush needs to be sent to other CPUs */
broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
-
- if (static_branch_unlikely(&use_asid_allocator))
- asid = atomic_long_read(&mm->context.id) & asid_mask;
} else {
- cmask = cpu_online_mask;
broadcast = true;
}
@@ -135,25 +129,34 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
local_flush_tlb_range_asid(start, size, stride, asid);
}
- if (mm)
+ if (cmask != cpu_online_mask)
put_cpu();
}
+static inline unsigned long get_mm_asid(struct mm_struct *mm)
+{
+ return static_branch_unlikely(&use_asid_allocator) ?
+ atomic_long_read(&mm->context.id) & asid_mask : FLUSH_TLB_NO_ASID;
+}
+
void flush_tlb_mm(struct mm_struct *mm)
{
- __flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+ __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
+ 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
}
void flush_tlb_mm_range(struct mm_struct *mm,
unsigned long start, unsigned long end,
unsigned int page_size)
{
- __flush_tlb_range(mm, start, end - start, page_size);
+ __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm),
+ start, end - start, page_size);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
{
- __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE);
+ __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+ addr, PAGE_SIZE, PAGE_SIZE);
}
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
@@ -185,18 +188,44 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
}
}
- __flush_tlb_range(vma->vm_mm, start, end - start, stride_size);
+ __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+ start, end - start, stride_size);
}
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
- __flush_tlb_range(NULL, start, end - start, PAGE_SIZE);
+ __flush_tlb_range((struct cpumask *)cpu_online_mask, FLUSH_TLB_NO_ASID,
+ start, end - start, PAGE_SIZE);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
- __flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE);
+ __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm),
+ start, end - start, PMD_SIZE);
}
#endif
+
+bool arch_tlbbatch_should_defer(struct mm_struct *mm)
+{
+ return true;
+}
+
+void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
+ struct mm_struct *mm,
+ unsigned long uaddr)
+{
+ cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
+}
+
+void arch_flush_tlb_batched_pending(struct mm_struct *mm)
+{
+ flush_tlb_mm(mm);
+}
+
+void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
+{
+ __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0,
+ FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+}