summaryrefslogtreecommitdiffstats
path: root/arch/arc/kernel
diff options
context:
space:
mode:
authorEugeniy Paltsev <eugeniy.paltsev@synopsys.com>2019-01-30 17:32:41 +0100
committerVineet Gupta <vgupta@synopsys.com>2019-02-25 21:10:58 +0100
commit76551468833cd5c356b1d9ff4bc9393fcf768a59 (patch)
tree8e2552cc13389bc3c90c916b0342bfac7f3a90b1 /arch/arc/kernel
parentARCv2: lib: introduce memcpy optimized for unaligned access (diff)
downloadlinux-76551468833cd5c356b1d9ff4bc9393fcf768a59.tar.xz
linux-76551468833cd5c356b1d9ff4bc9393fcf768a59.zip
ARCv2: Add explcit unaligned access support (and ability to disable too)
As of today we enable unaligned access unconditionally on ARCv2. Do this under a Kconfig option to allow disable it for test, benchmarking etc. Also while at it - Select HAVE_EFFICIENT_UNALIGNED_ACCESS - Although gcc defaults to unaligned access (since GNU 2018.03), add the right toggles for enabling or disabling as appropriate - update bootlog to prints both HW feature status (exists, enabled/disabled) and SW status (used / not used). - wire up the relaxed memcpy for unaligned access Signed-off-by: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com> Signed-off-by: Vineet Gupta <vgupta@synopsys.com> [vgupta: squashed patches, handle gcc -mno-unaligned-access quick]
Diffstat (limited to 'arch/arc/kernel')
-rw-r--r--arch/arc/kernel/head.S5
-rw-r--r--arch/arc/kernel/intc-arcv2.c2
-rw-r--r--arch/arc/kernel/setup.c25
-rw-r--r--arch/arc/kernel/troubleshoot.c5
4 files changed, 26 insertions, 11 deletions
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 30e090625916..36774348d23b 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -54,7 +54,12 @@
; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
; by default
lr r5, [status32]
+#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
bset r5, r5, STATUS_AD_BIT
+#else
+ ; Although disabled at reset, bootloader might have enabled it
+ bclr r5, r5, STATUS_AD_BIT
+#endif
kflag r5
#endif
.endm
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index cf18b3e5a934..c0d0124de089 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -95,7 +95,7 @@ void arc_init_IRQ(void)
/* setup status32, don't enable intr yet as kernel doesn't want */
tmp = read_aux_reg(ARC_REG_STATUS32);
- tmp |= STATUS_AD_MASK | (ARCV2_IRQ_DEF_PRIO << 1);
+ tmp |= ARCV2_IRQ_DEF_PRIO << 1;
tmp &= ~STATUS_IE_MASK;
asm volatile("kflag %0 \n"::"r"(tmp));
}
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 7b2340996cf8..2266e4ee4142 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -263,7 +263,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
{
struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
struct bcr_identity *core = &cpu->core;
- int i, n = 0, ua = 0;
+ int n = 0;
FIX_PTR(cpu);
@@ -283,16 +283,23 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT));
-#ifdef __ARC_UNALIGNED__
- ua = 1;
+ n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s",
+ IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
+ IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
+ IS_AVAIL2(cpu->isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS));
+
+#if defined(__ARC_UNALIGNED__) && !defined(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS)
+ /*
+ * gcc 7.3.1 (GNU 2018.03) onwards generate unaligned access by default
+ * but -mno-unaligned-access to disable that didn't work until gcc 8.2.1
+ * (GNU 2019.03). So landing here implies the interim period, when
+ * despite Kconfig being off, gcc is generating unaligned accesses which
+ * could bomb later on. So better to disallow such broken builds
+ */
+ BUILD_BUG_ON_MSG(1, "gcc doesn't support -mno-unaligned-access");
#endif
- n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s%s",
- IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
- IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
- IS_AVAIL1(cpu->isa.unalign, "unalign "), IS_USED_RUN(ua));
- if (i)
- n += scnprintf(buf + n, len - n, "\n\t\t: ");
+ n += scnprintf(buf + n, len - n, "\n\t\t: ");
if (cpu->extn_mpy.ver) {
if (cpu->extn_mpy.ver <= 0x2) { /* ARCompact */
diff --git a/arch/arc/kernel/troubleshoot.c b/arch/arc/kernel/troubleshoot.c
index 215f515442e0..b0aa8c028331 100644
--- a/arch/arc/kernel/troubleshoot.c
+++ b/arch/arc/kernel/troubleshoot.c
@@ -145,7 +145,8 @@ static void show_ecr_verbose(struct pt_regs *regs)
} else if (vec == ECR_V_PROTV) {
if (cause_code == ECR_C_PROTV_INST_FETCH)
pr_cont("Execute from Non-exec Page\n");
- else if (cause_code == ECR_C_PROTV_MISALIG_DATA)
+ else if (cause_code == ECR_C_PROTV_MISALIG_DATA &&
+ IS_ENABLED(CONFIG_ISA_ARCOMPACT))
pr_cont("Misaligned r/w from 0x%08lx\n", address);
else
pr_cont("%s access not allowed on page\n",
@@ -161,6 +162,8 @@ static void show_ecr_verbose(struct pt_regs *regs)
pr_cont("Bus Error from Data Mem\n");
else
pr_cont("Bus Error, check PRM\n");
+ } else if (vec == ECR_V_MISALIGN) {
+ pr_cont("Misaligned r/w from 0x%08lx\n", address);
#endif
} else if (vec == ECR_V_TRAP) {
if (regs->ecr_param == 5)