summaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2022-02-25 11:16:17 +0100
committerArnd Bergmann <arnd@arndb.de>2022-02-25 11:16:58 +0100
commitdd865f090f0382ba9e74dc4fe1008c08a67a6fca (patch)
treef41fbecea37957bdb6246b867e086fc40b5d0d77 /arch/ia64
parentsparc64: fix building assembly files (diff)
parentuaccess: remove CONFIG_SET_FS (diff)
downloadlinux-dd865f090f0382ba9e74dc4fe1008c08a67a6fca.tar.xz
linux-dd865f090f0382ba9e74dc4fe1008c08a67a6fca.zip
Merge branch 'set_fs-4' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic into asm-generic
Christoph Hellwig and a few others spent a huge effort on removing set_fs() from most of the important architectures, but about half the other architectures were never completed even though most of them don't actually use set_fs() at all. I did a patch for microblaze at some point, which turned out to be fairly generic, and now ported it to most other architectures, using new generic implementations of access_ok() and __{get,put}_kernel_nocheck(). Three architectures (sparc64, ia64, and sh) needed some extra work, which I also completed. * 'set_fs-4' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/asm-generic: uaccess: remove CONFIG_SET_FS ia64: remove CONFIG_SET_FS support sh: remove CONFIG_SET_FS support sparc64: remove CONFIG_SET_FS support lib/test_lockup: fix kernel pointer check for separate address spaces uaccess: generalize access_ok() uaccess: fix type mismatch warnings from access_ok() arm64: simplify access_ok() m68k: fix access_ok for coldfire MIPS: use simpler access_ok() MIPS: Handle address errors for accesses above CPU max virtual user address uaccess: add generic __{get,put}_kernel_nofault nios2: drop access_ok() check from __put_user() x86: use more conventional access_ok() definition x86: remove __range_not_ok() sparc64: add __{get,put}_kernel_nofault() nds32: fix access_ok() checks in get/put_user uaccess: fix nios2 and microblaze get_user_8() uaccess: fix integer overflow on access_ok()
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig1
-rw-r--r--arch/ia64/include/asm/processor.h4
-rw-r--r--arch/ia64/include/asm/thread_info.h2
-rw-r--r--arch/ia64/include/asm/uaccess.h26
-rw-r--r--arch/ia64/kernel/unaligned.c60
5 files changed, 47 insertions, 46 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index a7e01573abd8..6b6a35b3d959 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -61,7 +61,6 @@ config IA64
select NEED_SG_DMA_LENGTH
select NUMA if !FLATMEM
select PCI_MSI_ARCH_FALLBACKS if PCI_MSI
- select SET_FS
select ZONE_DMA32
default y
help
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 45365c2ef598..7cbce290f4e5 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -243,10 +243,6 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
extern void print_cpu_info (struct cpuinfo_ia64 *);
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
#define SET_UNALIGN_CTL(task,value) \
({ \
(task)->thread.flags = (((task)->thread.flags & ~IA64_THREAD_UAC_MASK) \
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index 51d20cb37706..ef83493e6778 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -27,7 +27,6 @@ struct thread_info {
__u32 cpu; /* current CPU */
__u32 last_cpu; /* Last CPU thread ran on */
__u32 status; /* Thread synchronous flags */
- mm_segment_t addr_limit; /* user-level address space limit */
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
__u64 utime;
@@ -48,7 +47,6 @@ struct thread_info {
.task = &tsk, \
.flags = 0, \
.cpu = 0, \
- .addr_limit = KERNEL_DS, \
.preempt_count = INIT_PREEMPT_COUNT, \
}
diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
index e19d2dcc0ced..60adadeb3e9e 100644
--- a/arch/ia64/include/asm/uaccess.h
+++ b/arch/ia64/include/asm/uaccess.h
@@ -42,30 +42,20 @@
#include <asm/extable.h>
/*
- * For historical reasons, the following macros are grossly misnamed:
- */
-#define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */
-#define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */
-
-#define get_fs() (current_thread_info()->addr_limit)
-#define set_fs(x) (current_thread_info()->addr_limit = (x))
-
-#define uaccess_kernel() (get_fs().seg == KERNEL_DS.seg)
-
-/*
- * When accessing user memory, we need to make sure the entire area really is in
- * user-level space. In order to do this efficiently, we make sure that the page at
- * address TASK_SIZE is never valid. We also need to make sure that the address doesn't
+ * When accessing user memory, we need to make sure the entire area really is
+ * in user-level space. We also need to make sure that the address doesn't
* point inside the virtually mapped linear page table.
*/
static inline int __access_ok(const void __user *p, unsigned long size)
{
+ unsigned long limit = TASK_SIZE;
unsigned long addr = (unsigned long)p;
- unsigned long seg = get_fs().seg;
- return likely(addr <= seg) &&
- (seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT));
+
+ return likely((size <= limit) && (addr <= (limit - size)) &&
+ likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT));
}
-#define access_ok(addr, size) __access_ok((addr), (size))
+#define __access_ok __access_ok
+#include <asm-generic/access_ok.h>
/*
* These are the main single-value transfer routines. They automatically
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 6c1a8951dfbb..0acb5a0cd7ab 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -749,9 +749,25 @@ emulate_load_updates (update_t type, load_store_t ld, struct pt_regs *regs, unsi
}
}
+static int emulate_store(unsigned long ifa, void *val, int len, bool kernel_mode)
+{
+ if (kernel_mode)
+ return copy_to_kernel_nofault((void *)ifa, val, len);
+
+ return copy_to_user((void __user *)ifa, val, len);
+}
+
+static int emulate_load(void *val, unsigned long ifa, int len, bool kernel_mode)
+{
+ if (kernel_mode)
+ return copy_from_kernel_nofault(val, (void *)ifa, len);
+
+ return copy_from_user(val, (void __user *)ifa, len);
+}
static int
-emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
+emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
+ bool kernel_mode)
{
unsigned int len = 1 << ld.x6_sz;
unsigned long val = 0;
@@ -774,7 +790,7 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
return -1;
}
/* this assumes little-endian byte-order: */
- if (copy_from_user(&val, (void __user *) ifa, len))
+ if (emulate_load(&val, ifa, len, kernel_mode))
return -1;
setreg(ld.r1, val, 0, regs);
@@ -872,7 +888,8 @@ emulate_load_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
}
static int
-emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
+emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
+ bool kernel_mode)
{
unsigned long r2;
unsigned int len = 1 << ld.x6_sz;
@@ -901,7 +918,7 @@ emulate_store_int (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
}
/* this assumes little-endian byte-order: */
- if (copy_to_user((void __user *) ifa, &r2, len))
+ if (emulate_store(ifa, &r2, len, kernel_mode))
return -1;
/*
@@ -1021,7 +1038,7 @@ float2mem_double (struct ia64_fpreg *init, struct ia64_fpreg *final)
}
static int
-emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
+emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs, bool kernel_mode)
{
struct ia64_fpreg fpr_init[2];
struct ia64_fpreg fpr_final[2];
@@ -1050,8 +1067,8 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs
* This assumes little-endian byte-order. Note that there is no "ldfpe"
* instruction:
*/
- if (copy_from_user(&fpr_init[0], (void __user *) ifa, len)
- || copy_from_user(&fpr_init[1], (void __user *) (ifa + len), len))
+ if (emulate_load(&fpr_init[0], ifa, len, kernel_mode)
+ || emulate_load(&fpr_init[1], (ifa + len), len, kernel_mode))
return -1;
DPRINT("ld.r1=%d ld.imm=%d x6_sz=%d\n", ld.r1, ld.imm, ld.x6_sz);
@@ -1126,7 +1143,8 @@ emulate_load_floatpair (unsigned long ifa, load_store_t ld, struct pt_regs *regs
static int
-emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
+emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
+ bool kernel_mode)
{
struct ia64_fpreg fpr_init;
struct ia64_fpreg fpr_final;
@@ -1152,7 +1170,7 @@ emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
* See comments in ldX for descriptions on how the various loads are handled.
*/
if (ld.x6_op != 0x2) {
- if (copy_from_user(&fpr_init, (void __user *) ifa, len))
+ if (emulate_load(&fpr_init, ifa, len, kernel_mode))
return -1;
DPRINT("ld.r1=%d x6_sz=%d\n", ld.r1, ld.x6_sz);
@@ -1202,7 +1220,8 @@ emulate_load_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
static int
-emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
+emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs,
+ bool kernel_mode)
{
struct ia64_fpreg fpr_init;
struct ia64_fpreg fpr_final;
@@ -1244,7 +1263,7 @@ emulate_store_float (unsigned long ifa, load_store_t ld, struct pt_regs *regs)
DDUMP("fpr_init =", &fpr_init, len);
DDUMP("fpr_final =", &fpr_final, len);
- if (copy_to_user((void __user *) ifa, &fpr_final, len))
+ if (emulate_store(ifa, &fpr_final, len, kernel_mode))
return -1;
/*
@@ -1295,7 +1314,6 @@ void
ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
{
struct ia64_psr *ipsr = ia64_psr(regs);
- mm_segment_t old_fs = get_fs();
unsigned long bundle[2];
unsigned long opcode;
const struct exception_table_entry *eh = NULL;
@@ -1304,6 +1322,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
load_store_t insn;
} u;
int ret = -1;
+ bool kernel_mode = false;
if (ia64_psr(regs)->be) {
/* we don't support big-endian accesses */
@@ -1367,13 +1386,13 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
if (unaligned_dump_stack)
dump_stack();
}
- set_fs(KERNEL_DS);
+ kernel_mode = true;
}
DPRINT("iip=%lx ifa=%lx isr=%lx (ei=%d, sp=%d)\n",
regs->cr_iip, ifa, regs->cr_ipsr, ipsr->ri, ipsr->it);
- if (__copy_from_user(bundle, (void __user *) regs->cr_iip, 16))
+ if (emulate_load(bundle, regs->cr_iip, 16, kernel_mode))
goto failure;
/*
@@ -1467,7 +1486,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
case LDCCLR_IMM_OP:
case LDCNC_IMM_OP:
case LDCCLRACQ_IMM_OP:
- ret = emulate_load_int(ifa, u.insn, regs);
+ ret = emulate_load_int(ifa, u.insn, regs, kernel_mode);
break;
case ST_OP:
@@ -1478,7 +1497,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
fallthrough;
case ST_IMM_OP:
case STREL_IMM_OP:
- ret = emulate_store_int(ifa, u.insn, regs);
+ ret = emulate_store_int(ifa, u.insn, regs, kernel_mode);
break;
case LDF_OP:
@@ -1486,21 +1505,21 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
case LDFCCLR_OP:
case LDFCNC_OP:
if (u.insn.x)
- ret = emulate_load_floatpair(ifa, u.insn, regs);
+ ret = emulate_load_floatpair(ifa, u.insn, regs, kernel_mode);
else
- ret = emulate_load_float(ifa, u.insn, regs);
+ ret = emulate_load_float(ifa, u.insn, regs, kernel_mode);
break;
case LDF_IMM_OP:
case LDFA_IMM_OP:
case LDFCCLR_IMM_OP:
case LDFCNC_IMM_OP:
- ret = emulate_load_float(ifa, u.insn, regs);
+ ret = emulate_load_float(ifa, u.insn, regs, kernel_mode);
break;
case STF_OP:
case STF_IMM_OP:
- ret = emulate_store_float(ifa, u.insn, regs);
+ ret = emulate_store_float(ifa, u.insn, regs, kernel_mode);
break;
default:
@@ -1521,7 +1540,6 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
DPRINT("ipsr->ri=%d iip=%lx\n", ipsr->ri, regs->cr_iip);
done:
- set_fs(old_fs); /* restore original address limit */
return;
failure: