diff options
author | Eugeniy Paltsev <eugeniy.paltsev@synopsys.com> | 2019-01-30 17:32:41 +0100 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2019-02-25 21:10:58 +0100 |
commit | 76551468833cd5c356b1d9ff4bc9393fcf768a59 (patch) | |
tree | 8e2552cc13389bc3c90c916b0342bfac7f3a90b1 /arch/arc | |
parent | ARCv2: lib: introduce memcpy optimized for unaligned access (diff) | |
download | linux-76551468833cd5c356b1d9ff4bc9393fcf768a59.tar.xz linux-76551468833cd5c356b1d9ff4bc9393fcf768a59.zip |
ARCv2: Add explcit unaligned access support (and ability to disable too)
As of today we enable unaligned access unconditionally on ARCv2.
Do this under a Kconfig option to allow disable it for test, benchmarking
etc. Also while at it
- Select HAVE_EFFICIENT_UNALIGNED_ACCESS
- Although gcc defaults to unaligned access (since GNU 2018.03), add the
right toggles for enabling or disabling as appropriate
- update bootlog to prints both HW feature status (exists, enabled/disabled)
and SW status (used / not used).
- wire up the relaxed memcpy for unaligned access
Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
[vgupta: squashed patches, handle gcc -mno-unaligned-access quick]
Diffstat (limited to 'arch/arc')
-rw-r--r-- | arch/arc/Kconfig | 9 | ||||
-rw-r--r-- | arch/arc/Makefile | 6 | ||||
-rw-r--r-- | arch/arc/include/asm/arcregs.h | 1 | ||||
-rw-r--r-- | arch/arc/include/asm/irqflags-arcv2.h | 8 | ||||
-rw-r--r-- | arch/arc/kernel/head.S | 5 | ||||
-rw-r--r-- | arch/arc/kernel/intc-arcv2.c | 2 | ||||
-rw-r--r-- | arch/arc/kernel/setup.c | 25 | ||||
-rw-r--r-- | arch/arc/kernel/troubleshoot.c | 5 | ||||
-rw-r--r-- | arch/arc/lib/Makefile | 8 |
9 files changed, 56 insertions, 13 deletions
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index d750b302d5ab..95fb11a85566 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -386,6 +386,15 @@ config ARC_HAS_SWAPE if ISA_ARCV2 +config ARC_USE_UNALIGNED_MEM_ACCESS + bool "Enable unaligned access in HW" + default y + select HAVE_EFFICIENT_UNALIGNED_ACCESS + help + The ARC HS architecture supports unaligned memory access + which is disabled by default. Enable unaligned access in + hardware and use software to use it + config ARC_HAS_LL64 bool "Insn: 64bit LDD/STD" help diff --git a/arch/arc/Makefile b/arch/arc/Makefile index df00578c279d..e2b991f75bc5 100644 --- a/arch/arc/Makefile +++ b/arch/arc/Makefile @@ -28,6 +28,12 @@ cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape ifdef CONFIG_ISA_ARCV2 +ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS +cflags-y += -munaligned-access +else +cflags-y += -mno-unaligned-access +endif + ifndef CONFIG_ARC_HAS_LL64 cflags-y += -mno-ll64 endif diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h index a27eafdc8260..0c13317af1d6 100644 --- a/arch/arc/include/asm/arcregs.h +++ b/arch/arc/include/asm/arcregs.h @@ -82,6 +82,7 @@ #define ECR_V_DTLB_MISS 0x05 #define ECR_V_PROTV 0x06 #define ECR_V_TRAP 0x09 +#define ECR_V_MISALIGN 0x0d #endif /* DTLB Miss and Protection Violation Cause Codes */ diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h index 8a4f77ea3238..e66d0339e1d8 100644 --- a/arch/arc/include/asm/irqflags-arcv2.h +++ b/arch/arc/include/asm/irqflags-arcv2.h @@ -44,7 +44,13 @@ #define ARCV2_IRQ_DEF_PRIO 1 /* seed value for status register */ -#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \ +#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS +#define __AD_ENB STATUS_AD_MASK +#else +#define __AD_ENB 0 +#endif + +#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | __AD_ENB | \ (ARCV2_IRQ_DEF_PRIO << 1)) #ifndef __ASSEMBLY__ diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S index 30e090625916..36774348d23b 100644 --- a/arch/arc/kernel/head.S +++ b/arch/arc/kernel/head.S @@ -54,7 +54,12 @@ ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access ; by default lr r5, [status32] +#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS bset r5, r5, STATUS_AD_BIT +#else + ; Although disabled at reset, bootloader might have enabled it + bclr r5, r5, STATUS_AD_BIT +#endif kflag r5 #endif .endm diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c index cf18b3e5a934..c0d0124de089 100644 --- a/arch/arc/kernel/intc-arcv2.c +++ b/arch/arc/kernel/intc-arcv2.c @@ -95,7 +95,7 @@ void arc_init_IRQ(void) /* setup status32, don't enable intr yet as kernel doesn't want */ tmp = read_aux_reg(ARC_REG_STATUS32); - tmp |= STATUS_AD_MASK | (ARCV2_IRQ_DEF_PRIO << 1); + tmp |= ARCV2_IRQ_DEF_PRIO << 1; tmp &= ~STATUS_IE_MASK; asm volatile("kflag %0 \n"::"r"(tmp)); } diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c index 7b2340996cf8..2266e4ee4142 100644 --- a/arch/arc/kernel/setup.c +++ b/arch/arc/kernel/setup.c @@ -263,7 +263,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) { struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; struct bcr_identity *core = &cpu->core; - int i, n = 0, ua = 0; + int n = 0; FIX_PTR(cpu); @@ -283,16 +283,23 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT), IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT)); -#ifdef __ARC_UNALIGNED__ - ua = 1; + n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s", + IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC), + IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64), + IS_AVAIL2(cpu->isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS)); + +#if defined(__ARC_UNALIGNED__) && !defined(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS) + /* + * gcc 7.3.1 (GNU 2018.03) onwards generate unaligned access by default + * but -mno-unaligned-access to disable that didn't work until gcc 8.2.1 + * (GNU 2019.03). So landing here implies the interim period, when + * despite Kconfig being off, gcc is generating unaligned accesses which + * could bomb later on. So better to disallow such broken builds + */ + BUILD_BUG_ON_MSG(1, "gcc doesn't support -mno-unaligned-access"); #endif - n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s%s", - IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC), - IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64), - IS_AVAIL1(cpu->isa.unalign, "unalign "), IS_USED_RUN(ua)); - if (i) - n += scnprintf(buf + n, len - n, "\n\t\t: "); + n += scnprintf(buf + n, len - n, "\n\t\t: "); if (cpu->extn_mpy.ver) { if (cpu->extn_mpy.ver <= 0x2) { /* ARCompact */ diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c index 215f515442e0..b0aa8c028331 100644 --- a/arch/arc/kernel/troubleshoot.c +++ b/arch/arc/kernel/troubleshoot.c @@ -145,7 +145,8 @@ static void show_ecr_verbose(struct pt_regs *regs) } else if (vec == ECR_V_PROTV) { if (cause_code == ECR_C_PROTV_INST_FETCH) pr_cont("Execute from Non-exec Page\n"); - else if (cause_code == ECR_C_PROTV_MISALIG_DATA) + else if (cause_code == ECR_C_PROTV_MISALIG_DATA && + IS_ENABLED(CONFIG_ISA_ARCOMPACT)) pr_cont("Misaligned r/w from 0x%08lx\n", address); else pr_cont("%s access not allowed on page\n", @@ -161,6 +162,8 @@ static void show_ecr_verbose(struct pt_regs *regs) pr_cont("Bus Error from Data Mem\n"); else pr_cont("Bus Error, check PRM\n"); + } else if (vec == ECR_V_MISALIGN) { + pr_cont("Misaligned r/w from 0x%08lx\n", address); #endif } else if (vec == ECR_V_TRAP) { if (regs->ecr_param == 5) diff --git a/arch/arc/lib/Makefile b/arch/arc/lib/Makefile index b1656d156097..f7537b466b23 100644 --- a/arch/arc/lib/Makefile +++ b/arch/arc/lib/Makefile @@ -8,4 +8,10 @@ lib-y := strchr-700.o strcpy-700.o strlen.o memcmp.o lib-$(CONFIG_ISA_ARCOMPACT) += memcpy-700.o memset.o strcmp.o -lib-$(CONFIG_ISA_ARCV2) += memcpy-archs.o memset-archs.o strcmp-archs.o +lib-$(CONFIG_ISA_ARCV2) += memset-archs.o strcmp-archs.o + +ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS +lib-$(CONFIG_ISA_ARCV2) +=memcpy-archs-unaligned.o +else +lib-$(CONFIG_ISA_ARCV2) +=memcpy-archs.o +endif |