summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig12
-rw-r--r--arch/arm/Kconfig.debug33
-rw-r--r--arch/arm/Makefile6
-rw-r--r--arch/arm/boot/compressed/string.c5
-rw-r--r--arch/arm/boot/compressed/vmlinux.lds.S8
-rw-r--r--arch/arm/common/bL_switcher_dummy_if.c4
-rw-r--r--arch/arm/common/sa1111.c328
-rw-r--r--arch/arm/include/asm/exception.h3
-rw-r--r--arch/arm/include/asm/glue-cache.h4
-rw-r--r--arch/arm/include/asm/hardware/cache-b15-rac.h10
-rw-r--r--arch/arm/include/asm/hardware/sa1111.h32
-rw-r--r--arch/arm/include/asm/memory.h1
-rw-r--r--arch/arm/include/asm/ptdump.h43
-rw-r--r--arch/arm/include/asm/sections.h21
-rw-r--r--arch/arm/include/asm/string.h14
-rw-r--r--arch/arm/include/asm/traps.h12
-rw-r--r--arch/arm/include/asm/unified.h77
-rw-r--r--arch/arm/kernel/armksyms.c1
-rw-r--r--arch/arm/kernel/entry-armv.S6
-rw-r--r--arch/arm/kernel/entry-common.S1
-rw-r--r--arch/arm/kernel/head-common.S5
-rw-r--r--arch/arm/kernel/hw_breakpoint.c10
-rw-r--r--arch/arm/kernel/smp.c3
-rw-r--r--arch/arm/kernel/stacktrace.c14
-rw-r--r--arch/arm/kernel/traps.c4
-rw-r--r--arch/arm/kernel/vmlinux-xip.lds.S6
-rw-r--r--arch/arm/kernel/vmlinux.lds.S6
-rw-r--r--arch/arm/lib/Makefile2
-rw-r--r--arch/arm/lib/memzero.S137
-rw-r--r--arch/arm/mm/Kconfig8
-rw-r--r--arch/arm/mm/Makefile4
-rw-r--r--arch/arm/mm/cache-b15-rac.c356
-rw-r--r--arch/arm/mm/cache-v7.S21
-rw-r--r--arch/arm/mm/dump.c151
-rw-r--r--arch/arm/mm/fault.c5
-rw-r--r--arch/arm/mm/idmap.c4
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/nommu.c4
-rw-r--r--arch/arm/mm/pmsa-v7.c4
-rw-r--r--arch/arm/mm/proc-v7.S6
-rw-r--r--arch/arm/mm/ptdump_debugfs.c34
-rw-r--r--arch/arm/net/bpf_jit_32.c225
-rw-r--r--arch/arm/probes/kprobes/core.c14
43 files changed, 988 insertions, 658 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 51c8df561077..fea5ae8ecd0d 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -3,8 +3,8 @@ config ARM
bool
default y
select ARCH_CLOCKSOURCE_DATA
- select ARCH_DISCARD_MEMBLOCK if !HAVE_ARCH_PFN_VALID
- select ARCH_HAS_DEBUG_VIRTUAL
+ select ARCH_DISCARD_MEMBLOCK if !HAVE_ARCH_PFN_VALID && !KEXEC
+ select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_SET_MEMORY
@@ -99,6 +99,7 @@ config ARM
select OLD_SIGACTION
select OLD_SIGSUSPEND3
select PERF_USE_VMALLOC
+ select REFCOUNT_FULL
select RTC_LIB
select SYS_SUPPORTS_APM_EMULATION
# Above selects are sorted alphabetically; please add new ones
@@ -1524,12 +1525,10 @@ config THUMB2_KERNEL
bool "Compile the kernel in Thumb-2 mode" if !CPU_THUMBONLY
depends on (CPU_V7 || CPU_V7M) && !CPU_V6 && !CPU_V6K
default y if CPU_THUMBONLY
- select ARM_ASM_UNIFIED
select ARM_UNWIND
help
By enabling this option, the kernel will be compiled in
- Thumb-2 mode. A compiler/assembler that understand the unified
- ARM-Thumb syntax is needed.
+ Thumb-2 mode.
If unsure, say N.
@@ -1564,9 +1563,6 @@ config THUMB2_AVOID_R_ARM_THM_JUMP11
Unless you are sure your tools don't have this problem, say Y.
-config ARM_ASM_UNIFIED
- bool
-
config ARM_PATCH_IDIV
bool "Runtime patch udiv/sdiv instructions into __aeabi_{u}idiv()"
depends on CPU_32v7 && !XIP_KERNEL
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 17685e19aed8..78a647080ebc 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -3,10 +3,14 @@ menu "Kernel hacking"
source "lib/Kconfig.debug"
-config ARM_PTDUMP
+config ARM_PTDUMP_CORE
+ def_bool n
+
+config ARM_PTDUMP_DEBUGFS
bool "Export kernel pagetable layout to userspace via debugfs"
depends on DEBUG_KERNEL
depends on MMU
+ select ARM_PTDUMP_CORE
select DEBUG_FS
---help---
Say Y here if you want to show the kernel pagetable layout in a
@@ -16,6 +20,33 @@ config ARM_PTDUMP
kernel.
If in doubt, say "N"
+config DEBUG_WX
+ bool "Warn on W+X mappings at boot"
+ select ARM_PTDUMP_CORE
+ ---help---
+ Generate a warning if any W+X mappings are found at boot.
+
+ This is useful for discovering cases where the kernel is leaving
+ W+X mappings after applying NX, as such mappings are a security risk.
+
+ Look for a message in dmesg output like this:
+
+ arm/mm: Checked W+X mappings: passed, no W+X pages found.
+
+ or like this, if the check failed:
+
+ arm/mm: Checked W+X mappings: FAILED, <N> W+X pages found.
+
+ Note that even if the check fails, your kernel is possibly
+ still fine, as W+X mappings are not a security hole in
+ themselves, what they do is that they make the exploitation
+ of other unfixed kernel bugs easier.
+
+ There is no runtime or memory usage effect of this option
+ once the kernel has booted up - it's a one time check.
+
+ If in doubt, say "Y".
+
# RMK wants arm kernels compiled with frame pointers or stack unwinding.
# If you know what you are doing and are willing to live without stack
# traces, you can get a slightly smaller kernel by setting this option to
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 80351e505fd5..e83f5161fdd8 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -115,9 +115,11 @@ ifeq ($(CONFIG_ARM_UNWIND),y)
CFLAGS_ABI +=-funwind-tables
endif
+# Accept old syntax despite ".syntax unified"
+AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
+
ifeq ($(CONFIG_THUMB2_KERNEL),y)
AFLAGS_AUTOIT :=$(call as-option,-Wa$(comma)-mimplicit-it=always,-Wa$(comma)-mauto-it)
-AFLAGS_NOWARN :=$(call as-option,-Wa$(comma)-mno-warn-deprecated,-Wa$(comma)-W)
CFLAGS_ISA :=-mthumb $(AFLAGS_AUTOIT) $(AFLAGS_NOWARN)
AFLAGS_ISA :=$(CFLAGS_ISA) -Wa$(comma)-mthumb
# Work around buggy relocation from gas if requested:
@@ -125,7 +127,7 @@ ifeq ($(CONFIG_THUMB2_AVOID_R_ARM_THM_JUMP11),y)
KBUILD_CFLAGS_MODULE +=-fno-optimize-sibling-calls
endif
else
-CFLAGS_ISA :=$(call cc-option,-marm,)
+CFLAGS_ISA :=$(call cc-option,-marm,) $(AFLAGS_NOWARN)
AFLAGS_ISA :=$(CFLAGS_ISA)
endif
diff --git a/arch/arm/boot/compressed/string.c b/arch/arm/boot/compressed/string.c
index 309e1bbad75d..13c90abc68d6 100644
--- a/arch/arm/boot/compressed/string.c
+++ b/arch/arm/boot/compressed/string.c
@@ -130,8 +130,3 @@ void *memset(void *s, int c, size_t count)
*xs++ = c;
return s;
}
-
-void __memzero(void *s, size_t count)
-{
- memset(s, 0, count);
-}
diff --git a/arch/arm/boot/compressed/vmlinux.lds.S b/arch/arm/boot/compressed/vmlinux.lds.S
index e6bf6774c4bb..2b963d8e76dd 100644
--- a/arch/arm/boot/compressed/vmlinux.lds.S
+++ b/arch/arm/boot/compressed/vmlinux.lds.S
@@ -56,6 +56,7 @@ SECTIONS
.rodata : {
*(.rodata)
*(.rodata.*)
+ *(.data.rel.ro)
}
.piggydata : {
*(.piggydata)
@@ -101,6 +102,12 @@ SECTIONS
* this symbol allows further debug in the near future.
*/
.image_end (NOLOAD) : {
+ /*
+ * EFI requires that the image is aligned to 512 bytes, and appended
+ * DTB requires that we know where the end of the image is. Ensure
+ * that both are satisfied by ensuring that there are no additional
+ * sections emitted into the decompressor image.
+ */
_edata_real = .;
}
@@ -128,3 +135,4 @@ SECTIONS
.stab.indexstr 0 : { *(.stab.indexstr) }
.comment 0 : { *(.comment) }
}
+ASSERT(_edata_real == _edata, "error: zImage file size is incorrect");
diff --git a/arch/arm/common/bL_switcher_dummy_if.c b/arch/arm/common/bL_switcher_dummy_if.c
index 4c10c6452678..f4dc1714a79e 100644
--- a/arch/arm/common/bL_switcher_dummy_if.c
+++ b/arch/arm/common/bL_switcher_dummy_if.c
@@ -57,3 +57,7 @@ static struct miscdevice bL_switcher_device = {
&bL_switcher_fops
};
module_misc_device(bL_switcher_device);
+
+MODULE_AUTHOR("Nicolas Pitre <nico@linaro.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("big.LITTLE switcher dummy user interface");
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index 4ecd5120fce7..a2c878769eaf 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -108,6 +108,7 @@ struct sa1111 {
spinlock_t lock;
void __iomem *base;
struct sa1111_platform_data *pdata;
+ struct irq_domain *irqdomain;
struct gpio_chip gc;
#ifdef CONFIG_PM
void *saved_state;
@@ -125,7 +126,7 @@ struct sa1111_dev_info {
unsigned long skpcr_mask;
bool dma;
unsigned int devid;
- unsigned int irq[6];
+ unsigned int hwirq[6];
};
static struct sa1111_dev_info sa1111_devices[] = {
@@ -134,7 +135,7 @@ static struct sa1111_dev_info sa1111_devices[] = {
.skpcr_mask = SKPCR_UCLKEN,
.dma = true,
.devid = SA1111_DEVID_USB,
- .irq = {
+ .hwirq = {
IRQ_USBPWR,
IRQ_HCIM,
IRQ_HCIBUFFACC,
@@ -148,7 +149,7 @@ static struct sa1111_dev_info sa1111_devices[] = {
.skpcr_mask = SKPCR_I2SCLKEN | SKPCR_L3CLKEN,
.dma = true,
.devid = SA1111_DEVID_SAC,
- .irq = {
+ .hwirq = {
AUDXMTDMADONEA,
AUDXMTDMADONEB,
AUDRCVDMADONEA,
@@ -164,7 +165,7 @@ static struct sa1111_dev_info sa1111_devices[] = {
.offset = SA1111_KBD,
.skpcr_mask = SKPCR_PTCLKEN,
.devid = SA1111_DEVID_PS2_KBD,
- .irq = {
+ .hwirq = {
IRQ_TPRXINT,
IRQ_TPTXINT
},
@@ -173,7 +174,7 @@ static struct sa1111_dev_info sa1111_devices[] = {
.offset = SA1111_MSE,
.skpcr_mask = SKPCR_PMCLKEN,
.devid = SA1111_DEVID_PS2_MSE,
- .irq = {
+ .hwirq = {
IRQ_MSRXINT,
IRQ_MSTXINT
},
@@ -182,7 +183,7 @@ static struct sa1111_dev_info sa1111_devices[] = {
.offset = 0x1800,
.skpcr_mask = 0,
.devid = SA1111_DEVID_PCMCIA,
- .irq = {
+ .hwirq = {
IRQ_S0_READY_NINT,
IRQ_S0_CD_VALID,
IRQ_S0_BVD1_STSCHG,
@@ -193,6 +194,19 @@ static struct sa1111_dev_info sa1111_devices[] = {
},
};
+static int sa1111_map_irq(struct sa1111 *sachip, irq_hw_number_t hwirq)
+{
+ return irq_create_mapping(sachip->irqdomain, hwirq);
+}
+
+static void sa1111_handle_irqdomain(struct irq_domain *irqdomain, int irq)
+{
+ struct irq_desc *d = irq_to_desc(irq_linear_revmap(irqdomain, irq));
+
+ if (d)
+ generic_handle_irq_desc(d);
+}
+
/*
* SA1111 interrupt support. Since clearing an IRQ while there are
* active IRQs causes the interrupt output to pulse, the upper levels
@@ -202,49 +216,45 @@ static void sa1111_irq_handler(struct irq_desc *desc)
{
unsigned int stat0, stat1, i;
struct sa1111 *sachip = irq_desc_get_handler_data(desc);
+ struct irq_domain *irqdomain;
void __iomem *mapbase = sachip->base + SA1111_INTC;
- stat0 = sa1111_readl(mapbase + SA1111_INTSTATCLR0);
- stat1 = sa1111_readl(mapbase + SA1111_INTSTATCLR1);
+ stat0 = readl_relaxed(mapbase + SA1111_INTSTATCLR0);
+ stat1 = readl_relaxed(mapbase + SA1111_INTSTATCLR1);
- sa1111_writel(stat0, mapbase + SA1111_INTSTATCLR0);
+ writel_relaxed(stat0, mapbase + SA1111_INTSTATCLR0);
desc->irq_data.chip->irq_ack(&desc->irq_data);
- sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1);
+ writel_relaxed(stat1, mapbase + SA1111_INTSTATCLR1);
if (stat0 == 0 && stat1 == 0) {
do_bad_IRQ(desc);
return;
}
+ irqdomain = sachip->irqdomain;
+
for (i = 0; stat0; i++, stat0 >>= 1)
if (stat0 & 1)
- generic_handle_irq(i + sachip->irq_base);
+ sa1111_handle_irqdomain(irqdomain, i);
for (i = 32; stat1; i++, stat1 >>= 1)
if (stat1 & 1)
- generic_handle_irq(i + sachip->irq_base);
+ sa1111_handle_irqdomain(irqdomain, i);
/* For level-based interrupts */
desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
-#define SA1111_IRQMASK_LO(x) (1 << (x - sachip->irq_base))
-#define SA1111_IRQMASK_HI(x) (1 << (x - sachip->irq_base - 32))
-
static u32 sa1111_irqmask(struct irq_data *d)
{
- struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
-
- return BIT((d->irq - sachip->irq_base) & 31);
+ return BIT(irqd_to_hwirq(d) & 31);
}
static int sa1111_irqbank(struct irq_data *d)
{
- struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
-
- return ((d->irq - sachip->irq_base) / 32) * 4;
+ return (irqd_to_hwirq(d) / 32) * 4;
}
static void sa1111_ack_irq(struct irq_data *d)
@@ -257,9 +267,9 @@ static void sa1111_mask_irq(struct irq_data *d)
void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d);
u32 ie;
- ie = sa1111_readl(mapbase + SA1111_INTEN0);
+ ie = readl_relaxed(mapbase + SA1111_INTEN0);
ie &= ~sa1111_irqmask(d);
- sa1111_writel(ie, mapbase + SA1111_INTEN0);
+ writel(ie, mapbase + SA1111_INTEN0);
}
static void sa1111_unmask_irq(struct irq_data *d)
@@ -268,9 +278,9 @@ static void sa1111_unmask_irq(struct irq_data *d)
void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d);
u32 ie;
- ie = sa1111_readl(mapbase + SA1111_INTEN0);
+ ie = readl_relaxed(mapbase + SA1111_INTEN0);
ie |= sa1111_irqmask(d);
- sa1111_writel(ie, mapbase + SA1111_INTEN0);
+ writel_relaxed(ie, mapbase + SA1111_INTEN0);
}
/*
@@ -287,11 +297,11 @@ static int sa1111_retrigger_irq(struct irq_data *d)
u32 ip, mask = sa1111_irqmask(d);
int i;
- ip = sa1111_readl(mapbase + SA1111_INTPOL0);
+ ip = readl_relaxed(mapbase + SA1111_INTPOL0);
for (i = 0; i < 8; i++) {
- sa1111_writel(ip ^ mask, mapbase + SA1111_INTPOL0);
- sa1111_writel(ip, mapbase + SA1111_INTPOL0);
- if (sa1111_readl(mapbase + SA1111_INTSTATCLR0) & mask)
+ writel_relaxed(ip ^ mask, mapbase + SA1111_INTPOL0);
+ writel_relaxed(ip, mapbase + SA1111_INTPOL0);
+ if (readl_relaxed(mapbase + SA1111_INTSTATCLR0) & mask)
break;
}
@@ -313,13 +323,13 @@ static int sa1111_type_irq(struct irq_data *d, unsigned int flags)
if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0)
return -EINVAL;
- ip = sa1111_readl(mapbase + SA1111_INTPOL0);
+ ip = readl_relaxed(mapbase + SA1111_INTPOL0);
if (flags & IRQ_TYPE_EDGE_RISING)
ip &= ~mask;
else
ip |= mask;
- sa1111_writel(ip, mapbase + SA1111_INTPOL0);
- sa1111_writel(ip, mapbase + SA1111_WAKEPOL0);
+ writel_relaxed(ip, mapbase + SA1111_INTPOL0);
+ writel_relaxed(ip, mapbase + SA1111_WAKEPOL0);
return 0;
}
@@ -330,12 +340,12 @@ static int sa1111_wake_irq(struct irq_data *d, unsigned int on)
void __iomem *mapbase = sachip->base + SA1111_INTC + sa1111_irqbank(d);
u32 we, mask = sa1111_irqmask(d);
- we = sa1111_readl(mapbase + SA1111_WAKEEN0);
+ we = readl_relaxed(mapbase + SA1111_WAKEEN0);
if (on)
we |= mask;
else
we &= ~mask;
- sa1111_writel(we, mapbase + SA1111_WAKEEN0);
+ writel_relaxed(we, mapbase + SA1111_WAKEEN0);
return 0;
}
@@ -350,10 +360,30 @@ static struct irq_chip sa1111_irq_chip = {
.irq_set_wake = sa1111_wake_irq,
};
+static int sa1111_irqdomain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct sa1111 *sachip = d->host_data;
+
+ /* Disallow unavailable interrupts */
+ if (hwirq > SSPROR && hwirq < AUDXMTDMADONEA)
+ return -EINVAL;
+
+ irq_set_chip_data(irq, sachip);
+ irq_set_chip_and_handler(irq, &sa1111_irq_chip, handle_edge_irq);
+ irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
+
+ return 0;
+}
+
+static const struct irq_domain_ops sa1111_irqdomain_ops = {
+ .map = sa1111_irqdomain_map,
+ .xlate = irq_domain_xlate_twocell,
+};
+
static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
{
void __iomem *irqbase = sachip->base + SA1111_INTC;
- unsigned i, irq;
int ret;
/*
@@ -373,38 +403,40 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
sachip->irq_base = ret;
/* disable all IRQs */
- sa1111_writel(0, irqbase + SA1111_INTEN0);
- sa1111_writel(0, irqbase + SA1111_INTEN1);
- sa1111_writel(0, irqbase + SA1111_WAKEEN0);
- sa1111_writel(0, irqbase + SA1111_WAKEEN1);
+ writel_relaxed(0, irqbase + SA1111_INTEN0);
+ writel_relaxed(0, irqbase + SA1111_INTEN1);
+ writel_relaxed(0, irqbase + SA1111_WAKEEN0);
+ writel_relaxed(0, irqbase + SA1111_WAKEEN1);
/*
* detect on rising edge. Note: Feb 2001 Errata for SA1111
* specifies that S0ReadyInt and S1ReadyInt should be '1'.
*/
- sa1111_writel(0, irqbase + SA1111_INTPOL0);
- sa1111_writel(BIT(IRQ_S0_READY_NINT & 31) |
- BIT(IRQ_S1_READY_NINT & 31),
- irqbase + SA1111_INTPOL1);
+ writel_relaxed(0, irqbase + SA1111_INTPOL0);
+ writel_relaxed(BIT(IRQ_S0_READY_NINT & 31) |
+ BIT(IRQ_S1_READY_NINT & 31),
+ irqbase + SA1111_INTPOL1);
/* clear all IRQs */
- sa1111_writel(~0, irqbase + SA1111_INTSTATCLR0);
- sa1111_writel(~0, irqbase + SA1111_INTSTATCLR1);
-
- for (i = IRQ_GPAIN0; i <= SSPROR; i++) {
- irq = sachip->irq_base + i;
- irq_set_chip_and_handler(irq, &sa1111_irq_chip, handle_edge_irq);
- irq_set_chip_data(irq, sachip);
- irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
- }
+ writel_relaxed(~0, irqbase + SA1111_INTSTATCLR0);
+ writel_relaxed(~0, irqbase + SA1111_INTSTATCLR1);
- for (i = AUDXMTDMADONEA; i <= IRQ_S1_BVD1_STSCHG; i++) {
- irq = sachip->irq_base + i;
- irq_set_chip_and_handler(irq, &sa1111_irq_chip, handle_edge_irq);
- irq_set_chip_data(irq, sachip);
- irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
+ sachip->irqdomain = irq_domain_add_linear(NULL, SA1111_IRQ_NR,
+ &sa1111_irqdomain_ops,
+ sachip);
+ if (!sachip->irqdomain) {
+ irq_free_descs(sachip->irq_base, SA1111_IRQ_NR);
+ return -ENOMEM;
}
+ irq_domain_associate_many(sachip->irqdomain,
+ sachip->irq_base + IRQ_GPAIN0,
+ IRQ_GPAIN0, SSPROR + 1 - IRQ_GPAIN0);
+ irq_domain_associate_many(sachip->irqdomain,
+ sachip->irq_base + AUDXMTDMADONEA,
+ AUDXMTDMADONEA,
+ IRQ_S1_BVD1_STSCHG + 1 - AUDXMTDMADONEA);
+
/*
* Register SA1111 interrupt
*/
@@ -420,20 +452,22 @@ static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
static void sa1111_remove_irq(struct sa1111 *sachip)
{
+ struct irq_domain *domain = sachip->irqdomain;
void __iomem *irqbase = sachip->base + SA1111_INTC;
+ int i;
/* disable all IRQs */
- sa1111_writel(0, irqbase + SA1111_INTEN0);
- sa1111_writel(0, irqbase + SA1111_INTEN1);
- sa1111_writel(0, irqbase + SA1111_WAKEEN0);
- sa1111_writel(0, irqbase + SA1111_WAKEEN1);
+ writel_relaxed(0, irqbase + SA1111_INTEN0);
+ writel_relaxed(0, irqbase + SA1111_INTEN1);
+ writel_relaxed(0, irqbase + SA1111_WAKEEN0);
+ writel_relaxed(0, irqbase + SA1111_WAKEEN1);
- if (sachip->irq != NO_IRQ) {
- irq_set_chained_handler_and_data(sachip->irq, NULL, NULL);
- irq_free_descs(sachip->irq_base, SA1111_IRQ_NR);
+ irq_set_chained_handler_and_data(sachip->irq, NULL, NULL);
+ for (i = 0; i < SA1111_IRQ_NR; i++)
+ irq_dispose_mapping(irq_find_mapping(domain, i));
+ irq_domain_remove(domain);
- release_mem_region(sachip->phys + SA1111_INTC, 512);
- }
+ release_mem_region(sachip->phys + SA1111_INTC, 512);
}
enum {
@@ -572,7 +606,7 @@ static int sa1111_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct sa1111 *sachip = gc_to_sa1111(gc);
- return sachip->irq_base + offset;
+ return sa1111_map_irq(sachip, offset);
}
static int sa1111_setup_gpios(struct sa1111 *sachip)
@@ -618,11 +652,11 @@ static void sa1111_wake(struct sa1111 *sachip)
/*
* Turn VCO on, and disable PLL Bypass.
*/
- r = sa1111_readl(sachip->base + SA1111_SKCR);
+ r = readl_relaxed(sachip->base + SA1111_SKCR);
r &= ~SKCR_VCO_OFF;
- sa1111_writel(r, sachip->base + SA1111_SKCR);
+ writel_relaxed(r, sachip->base + SA1111_SKCR);
r |= SKCR_PLL_BYPASS | SKCR_OE_EN;
- sa1111_writel(r, sachip->base + SA1111_SKCR);
+ writel_relaxed(r, sachip->base + SA1111_SKCR);
/*
* Wait lock time. SA1111 manual _doesn't_
@@ -634,7 +668,7 @@ static void sa1111_wake(struct sa1111 *sachip)
* Enable RCLK. We also ensure that RDYEN is set.
*/
r |= SKCR_RCLKEN | SKCR_RDYEN;
- sa1111_writel(r, sachip->base + SA1111_SKCR);
+ writel_relaxed(r, sachip->base + SA1111_SKCR);
/*
* Wait 14 RCLK cycles for the chip to finish coming out
@@ -645,7 +679,7 @@ static void sa1111_wake(struct sa1111 *sachip)
/*
* Ensure all clocks are initially off.
*/
- sa1111_writel(0, sachip->base + SA1111_SKPCR);
+ writel_relaxed(0, sachip->base + SA1111_SKPCR);
spin_unlock_irqrestore(&sachip->lock, flags);
}
@@ -675,7 +709,7 @@ sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac,
if (cas_latency == 3)
smcr |= SMCR_CLAT;
- sa1111_writel(smcr, sachip->base + SA1111_SMCR);
+ writel_relaxed(smcr, sachip->base + SA1111_SMCR);
/*
* Now clear the bits in the DMA mask to work around the SA1111
@@ -723,8 +757,8 @@ sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
dev->mapbase = sachip->base + info->offset;
dev->skpcr_mask = info->skpcr_mask;
- for (i = 0; i < ARRAY_SIZE(info->irq); i++)
- dev->irq[i] = sachip->irq_base + info->irq[i];
+ for (i = 0; i < ARRAY_SIZE(info->hwirq); i++)
+ dev->hwirq[i] = info->hwirq[i];
/*
* If the parent device has a DMA mask associated with it, and
@@ -814,7 +848,7 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
/*
* Probe for the chip. Only touch the SBI registers.
*/
- id = sa1111_readl(sachip->base + SA1111_SKID);
+ id = readl_relaxed(sachip->base + SA1111_SKID);
if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
printk(KERN_DEBUG "SA1111 not detected: ID = %08lx\n", id);
ret = -ENODEV;
@@ -833,11 +867,9 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
* The interrupt controller must be initialised before any
* other device to ensure that the interrupts are available.
*/
- if (sachip->irq != NO_IRQ) {
- ret = sa1111_setup_irq(sachip, pd->irq_base);
- if (ret)
- goto err_clk;
- }
+ ret = sa1111_setup_irq(sachip, pd->irq_base);
+ if (ret)
+ goto err_clk;
/* Setup the GPIOs - should really be done after the IRQ setup */
ret = sa1111_setup_gpios(sachip);
@@ -864,8 +896,8 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
* DMA. It can otherwise be held firmly in the off position.
* (currently, we always enable it.)
*/
- val = sa1111_readl(sachip->base + SA1111_SKPCR);
- sa1111_writel(val | SKPCR_DCLKEN, sachip->base + SA1111_SKPCR);
+ val = readl_relaxed(sachip->base + SA1111_SKPCR);
+ writel_relaxed(val | SKPCR_DCLKEN, sachip->base + SA1111_SKPCR);
/*
* Enable the SA1110 memory bus request and grant signals.
@@ -962,31 +994,31 @@ static int sa1111_suspend_noirq(struct device *dev)
* Save state.
*/
base = sachip->base;
- save->skcr = sa1111_readl(base + SA1111_SKCR);
- save->skpcr = sa1111_readl(base + SA1111_SKPCR);
- save->skcdr = sa1111_readl(base + SA1111_SKCDR);
- save->skaud = sa1111_readl(base + SA1111_SKAUD);
- save->skpwm0 = sa1111_readl(base + SA1111_SKPWM0);
- save->skpwm1 = sa1111_readl(base + SA1111_SKPWM1);
+ save->skcr = readl_relaxed(base + SA1111_SKCR);
+ save->skpcr = readl_relaxed(base + SA1111_SKPCR);
+ save->skcdr = readl_relaxed(base + SA1111_SKCDR);
+ save->skaud = readl_relaxed(base + SA1111_SKAUD);
+ save->skpwm0 = readl_relaxed(base + SA1111_SKPWM0);
+ save->skpwm1 = readl_relaxed(base + SA1111_SKPWM1);
- sa1111_writel(0, sachip->base + SA1111_SKPWM0);
- sa1111_writel(0, sachip->base + SA1111_SKPWM1);
+ writel_relaxed(0, sachip->base + SA1111_SKPWM0);
+ writel_relaxed(0, sachip->base + SA1111_SKPWM1);
base = sachip->base + SA1111_INTC;
- save->intpol0 = sa1111_readl(base + SA1111_INTPOL0);
- save->intpol1 = sa1111_readl(base + SA1111_INTPOL1);
- save->inten0 = sa1111_readl(base + SA1111_INTEN0);
- save->inten1 = sa1111_readl(base + SA1111_INTEN1);
- save->wakepol0 = sa1111_readl(base + SA1111_WAKEPOL0);
- save->wakepol1 = sa1111_readl(base + SA1111_WAKEPOL1);
- save->wakeen0 = sa1111_readl(base + SA1111_WAKEEN0);
- save->wakeen1 = sa1111_readl(base + SA1111_WAKEEN1);
+ save->intpol0 = readl_relaxed(base + SA1111_INTPOL0);
+ save->intpol1 = readl_relaxed(base + SA1111_INTPOL1);
+ save->inten0 = readl_relaxed(base + SA1111_INTEN0);
+ save->inten1 = readl_relaxed(base + SA1111_INTEN1);
+ save->wakepol0 = readl_relaxed(base + SA1111_WAKEPOL0);
+ save->wakepol1 = readl_relaxed(base + SA1111_WAKEPOL1);
+ save->wakeen0 = readl_relaxed(base + SA1111_WAKEEN0);
+ save->wakeen1 = readl_relaxed(base + SA1111_WAKEEN1);
/*
* Disable.
*/
- val = sa1111_readl(sachip->base + SA1111_SKCR);
- sa1111_writel(val | SKCR_SLEEP, sachip->base + SA1111_SKCR);
+ val = readl_relaxed(sachip->base + SA1111_SKCR);
+ writel_relaxed(val | SKCR_SLEEP, sachip->base + SA1111_SKCR);
clk_disable(sachip->clk);
@@ -1023,7 +1055,7 @@ static int sa1111_resume_noirq(struct device *dev)
* Ensure that the SA1111 is still here.
* FIXME: shouldn't do this here.
*/
- id = sa1111_readl(sachip->base + SA1111_SKID);
+ id = readl_relaxed(sachip->base + SA1111_SKID);
if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
__sa1111_remove(sachip);
dev_set_drvdata(dev, NULL);
@@ -1047,26 +1079,26 @@ static int sa1111_resume_noirq(struct device *dev)
*/
spin_lock_irqsave(&sachip->lock, flags);
- sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN0);
- sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN1);
+ writel_relaxed(0, sachip->base + SA1111_INTC + SA1111_INTEN0);
+ writel_relaxed(0, sachip->base + SA1111_INTC + SA1111_INTEN1);
base = sachip->base;
- sa1111_writel(save->skcr, base + SA1111_SKCR);
- sa1111_writel(save->skpcr, base + SA1111_SKPCR);
- sa1111_writel(save->skcdr, base + SA1111_SKCDR);
- sa1111_writel(save->skaud, base + SA1111_SKAUD);
- sa1111_writel(save->skpwm0, base + SA1111_SKPWM0);
- sa1111_writel(save->skpwm1, base + SA1111_SKPWM1);
+ writel_relaxed(save->skcr, base + SA1111_SKCR);
+ writel_relaxed(save->skpcr, base + SA1111_SKPCR);
+ writel_relaxed(save->skcdr, base + SA1111_SKCDR);
+ writel_relaxed(save->skaud, base + SA1111_SKAUD);
+ writel_relaxed(save->skpwm0, base + SA1111_SKPWM0);
+ writel_relaxed(save->skpwm1, base + SA1111_SKPWM1);
base = sachip->base + SA1111_INTC;
- sa1111_writel(save->intpol0, base + SA1111_INTPOL0);
- sa1111_writel(save->intpol1, base + SA1111_INTPOL1);
- sa1111_writel(save->inten0, base + SA1111_INTEN0);
- sa1111_writel(save->inten1, base + SA1111_INTEN1);
- sa1111_writel(save->wakepol0, base + SA1111_WAKEPOL0);
- sa1111_writel(save->wakepol1, base + SA1111_WAKEPOL1);
- sa1111_writel(save->wakeen0, base + SA1111_WAKEEN0);
- sa1111_writel(save->wakeen1, base + SA1111_WAKEEN1);
+ writel_relaxed(save->intpol0, base + SA1111_INTPOL0);
+ writel_relaxed(save->intpol1, base + SA1111_INTPOL1);
+ writel_relaxed(save->inten0, base + SA1111_INTEN0);
+ writel_relaxed(save->inten1, base + SA1111_INTEN1);
+ writel_relaxed(save->wakepol0, base + SA1111_WAKEPOL0);
+ writel_relaxed(save->wakepol1, base + SA1111_WAKEPOL1);
+ writel_relaxed(save->wakeen0, base + SA1111_WAKEEN0);
+ writel_relaxed(save->wakeen1, base + SA1111_WAKEEN1);
spin_unlock_irqrestore(&sachip->lock, flags);
@@ -1153,7 +1185,7 @@ static unsigned int __sa1111_pll_clock(struct sa1111 *sachip)
{
unsigned int skcdr, fbdiv, ipdiv, opdiv;
- skcdr = sa1111_readl(sachip->base + SA1111_SKCDR);
+ skcdr = readl_relaxed(sachip->base + SA1111_SKCDR);
fbdiv = (skcdr & 0x007f) + 2;
ipdiv = ((skcdr & 0x0f80) >> 7) + 2;
@@ -1195,13 +1227,13 @@ void sa1111_select_audio_mode(struct sa1111_dev *sadev, int mode)
spin_lock_irqsave(&sachip->lock, flags);
- val = sa1111_readl(sachip->base + SA1111_SKCR);
+ val = readl_relaxed(sachip->base + SA1111_SKCR);
if (mode == SA1111_AUDIO_I2S) {
val &= ~SKCR_SELAC;
} else {
val |= SKCR_SELAC;
}
- sa1111_writel(val, sachip->base + SA1111_SKCR);
+ writel_relaxed(val, sachip->base + SA1111_SKCR);
spin_unlock_irqrestore(&sachip->lock, flags);
}
@@ -1226,7 +1258,7 @@ int sa1111_set_audio_rate(struct sa1111_dev *sadev, int rate)
if (div > 128)
div = 128;
- sa1111_writel(div - 1, sachip->base + SA1111_SKAUD);
+ writel_relaxed(div - 1, sachip->base + SA1111_SKAUD);
return 0;
}
@@ -1244,7 +1276,7 @@ int sa1111_get_audio_rate(struct sa1111_dev *sadev)
if (sadev->devid != SA1111_DEVID_SAC)
return -EINVAL;
- div = sa1111_readl(sachip->base + SA1111_SKAUD) + 1;
+ div = readl_relaxed(sachip->base + SA1111_SKAUD) + 1;
return __sa1111_pll_clock(sachip) / (256 * div);
}
@@ -1261,10 +1293,10 @@ void sa1111_set_io_dir(struct sa1111_dev *sadev,
#define MODIFY_BITS(port, mask, dir) \
if (mask) { \
- val = sa1111_readl(port); \
+ val = readl_relaxed(port); \
val &= ~(mask); \
val |= (dir) & (mask); \
- sa1111_writel(val, port); \
+ writel_relaxed(val, port); \
}
spin_lock_irqsave(&sachip->lock, flags);
@@ -1329,8 +1361,8 @@ int sa1111_enable_device(struct sa1111_dev *sadev)
if (ret == 0) {
spin_lock_irqsave(&sachip->lock, flags);
- val = sa1111_readl(sachip->base + SA1111_SKPCR);
- sa1111_writel(val | sadev->skpcr_mask, sachip->base + SA1111_SKPCR);
+ val = readl_relaxed(sachip->base + SA1111_SKPCR);
+ writel_relaxed(val | sadev->skpcr_mask, sachip->base + SA1111_SKPCR);
spin_unlock_irqrestore(&sachip->lock, flags);
}
return ret;
@@ -1348,8 +1380,8 @@ void sa1111_disable_device(struct sa1111_dev *sadev)
unsigned int val;
spin_lock_irqsave(&sachip->lock, flags);
- val = sa1111_readl(sachip->base + SA1111_SKPCR);
- sa1111_writel(val & ~sadev->skpcr_mask, sachip->base + SA1111_SKPCR);
+ val = readl_relaxed(sachip->base + SA1111_SKPCR);
+ writel_relaxed(val & ~sadev->skpcr_mask, sachip->base + SA1111_SKPCR);
spin_unlock_irqrestore(&sachip->lock, flags);
if (sachip->pdata && sachip->pdata->disable)
@@ -1359,9 +1391,10 @@ EXPORT_SYMBOL(sa1111_disable_device);
int sa1111_get_irq(struct sa1111_dev *sadev, unsigned num)
{
- if (num >= ARRAY_SIZE(sadev->irq))
+ struct sa1111 *sachip = sa1111_chip_driver(sadev);
+ if (num >= ARRAY_SIZE(sadev->hwirq))
return -EINVAL;
- return sadev->irq[num];
+ return sa1111_map_irq(sachip, sadev->hwirq[num]);
}
EXPORT_SYMBOL_GPL(sa1111_get_irq);
@@ -1379,36 +1412,6 @@ static int sa1111_match(struct device *_dev, struct device_driver *_drv)
return !!(dev->devid & drv->devid);
}
-static int sa1111_bus_suspend(struct device *dev, pm_message_t state)
-{
- struct sa1111_dev *sadev = to_sa1111_device(dev);
- struct sa1111_driver *drv = SA1111_DRV(dev->driver);
- int ret = 0;
-
- if (drv && drv->suspend)
- ret = drv->suspend(sadev, state);
- return ret;
-}
-
-static int sa1111_bus_resume(struct device *dev)
-{
- struct sa1111_dev *sadev = to_sa1111_device(dev);
- struct sa1111_driver *drv = SA1111_DRV(dev->driver);
- int ret = 0;
-
- if (drv && drv->resume)
- ret = drv->resume(sadev);
- return ret;
-}
-
-static void sa1111_bus_shutdown(struct device *dev)
-{
- struct sa1111_driver *drv = SA1111_DRV(dev->driver);
-
- if (drv && drv->shutdown)
- drv->shutdown(to_sa1111_device(dev));
-}
-
static int sa1111_bus_probe(struct device *dev)
{
struct sa1111_dev *sadev = to_sa1111_device(dev);
@@ -1436,9 +1439,6 @@ struct bus_type sa1111_bus_type = {
.match = sa1111_match,
.probe = sa1111_bus_probe,
.remove = sa1111_bus_remove,
- .suspend = sa1111_bus_suspend,
- .resume = sa1111_bus_resume,
- .shutdown = sa1111_bus_shutdown,
};
EXPORT_SYMBOL(sa1111_bus_type);
diff --git a/arch/arm/include/asm/exception.h b/arch/arm/include/asm/exception.h
index a7273ad9587a..58e039a851af 100644
--- a/arch/arm/include/asm/exception.h
+++ b/arch/arm/include/asm/exception.h
@@ -10,11 +10,10 @@
#include <linux/interrupt.h>
-#define __exception __attribute__((section(".exception.text")))
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry __irq_entry
#else
-#define __exception_irq_entry __exception
+#define __exception_irq_entry
#endif
#endif /* __ASM_ARM_EXCEPTION_H */
diff --git a/arch/arm/include/asm/glue-cache.h b/arch/arm/include/asm/glue-cache.h
index 01c3d92624e5..8d1f498e5dd8 100644
--- a/arch/arm/include/asm/glue-cache.h
+++ b/arch/arm/include/asm/glue-cache.h
@@ -117,6 +117,10 @@
# endif
#endif
+#if defined(CONFIG_CACHE_B15_RAC)
+# define MULTI_CACHE 1
+#endif
+
#if defined(CONFIG_CPU_V7M)
# define MULTI_CACHE 1
#endif
diff --git a/arch/arm/include/asm/hardware/cache-b15-rac.h b/arch/arm/include/asm/hardware/cache-b15-rac.h
new file mode 100644
index 000000000000..3d43ec06fd35
--- /dev/null
+++ b/arch/arm/include/asm/hardware/cache-b15-rac.h
@@ -0,0 +1,10 @@
+#ifndef __ASM_ARM_HARDWARE_CACHE_B15_RAC_H
+#define __ASM_ARM_HARDWARE_CACHE_B15_RAC_H
+
+#ifndef __ASSEMBLY__
+
+void b15_flush_kern_cache_all(void);
+
+#endif
+
+#endif
diff --git a/arch/arm/include/asm/hardware/sa1111.h b/arch/arm/include/asm/hardware/sa1111.h
index 0bbf163d1ed3..798e520e8a49 100644
--- a/arch/arm/include/asm/hardware/sa1111.h
+++ b/arch/arm/include/asm/hardware/sa1111.h
@@ -16,33 +16,6 @@
#include <mach/bitfield.h>
/*
- * The SA1111 is always located at virtual 0xf4000000, and is always
- * "native" endian.
- */
-
-#define SA1111_VBASE 0xf4000000
-
-/* Don't use these! */
-#define SA1111_p2v( x ) ((x) - SA1111_BASE + SA1111_VBASE)
-#define SA1111_v2p( x ) ((x) - SA1111_VBASE + SA1111_BASE)
-
-#ifndef __ASSEMBLY__
-#define _SA1111(x) ((x) + sa1111->resource.start)
-#endif
-
-#define sa1111_writel(val,addr) __raw_writel(val, addr)
-#define sa1111_readl(addr) __raw_readl(addr)
-
-/*
- * 26 bits of the SA-1110 address bus are available to the SA-1111.
- * Use these when feeding target addresses to the DMA engines.
- */
-
-#define SA1111_ADDR_WIDTH (26)
-#define SA1111_ADDR_MASK ((1<<SA1111_ADDR_WIDTH)-1)
-#define SA1111_DMA_ADDR(x) ((x)&SA1111_ADDR_MASK)
-
-/*
* Don't ask the (SAC) DMA engines to move less than this amount.
*/
@@ -417,7 +390,7 @@ struct sa1111_dev {
struct resource res;
void __iomem *mapbase;
unsigned int skpcr_mask;
- unsigned int irq[6];
+ unsigned int hwirq[6];
u64 dma_mask;
};
@@ -431,9 +404,6 @@ struct sa1111_driver {
unsigned int devid;
int (*probe)(struct sa1111_dev *);
int (*remove)(struct sa1111_dev *);
- int (*suspend)(struct sa1111_dev *, pm_message_t);
- int (*resume)(struct sa1111_dev *);
- void (*shutdown)(struct sa1111_dev *);
};
#define SA1111_DRV(_d) container_of((_d), struct sa1111_driver, drv)
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 1f54e4e98c1e..496667703693 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -88,6 +88,7 @@
#else /* CONFIG_MMU */
#ifndef __ASSEMBLY__
+extern unsigned long setup_vectors_base(void);
extern unsigned long vectors_base;
#define VECTORS_BASE vectors_base
#endif
diff --git a/arch/arm/include/asm/ptdump.h b/arch/arm/include/asm/ptdump.h
new file mode 100644
index 000000000000..3ebf9718288d
--- /dev/null
+++ b/arch/arm/include/asm/ptdump.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2014 ARM Ltd. */
+#ifndef __ASM_PTDUMP_H
+#define __ASM_PTDUMP_H
+
+#ifdef CONFIG_ARM_PTDUMP_CORE
+
+#include <linux/mm_types.h>
+#include <linux/seq_file.h>
+
+struct addr_marker {
+ unsigned long start_address;
+ char *name;
+};
+
+struct ptdump_info {
+ struct mm_struct *mm;
+ const struct addr_marker *markers;
+ unsigned long base_addr;
+};
+
+void ptdump_walk_pgd(struct seq_file *s, struct ptdump_info *info);
+#ifdef CONFIG_ARM_PTDUMP_DEBUGFS
+int ptdump_debugfs_register(struct ptdump_info *info, const char *name);
+#else
+static inline int ptdump_debugfs_register(struct ptdump_info *info,
+ const char *name)
+{
+ return 0;
+}
+#endif /* CONFIG_ARM_PTDUMP_DEBUGFS */
+
+void ptdump_check_wx(void);
+
+#endif /* CONFIG_ARM_PTDUMP_CORE */
+
+#ifdef CONFIG_DEBUG_WX
+#define debug_checkwx() ptdump_check_wx()
+#else
+#define debug_checkwx() do { } while (0)
+#endif
+
+#endif /* __ASM_PTDUMP_H */
diff --git a/arch/arm/include/asm/sections.h b/arch/arm/include/asm/sections.h
index 63dfe1f10335..4ceb4f757d4d 100644
--- a/arch/arm/include/asm/sections.h
+++ b/arch/arm/include/asm/sections.h
@@ -6,4 +6,25 @@
extern char _exiprom[];
+extern char __idmap_text_start[];
+extern char __idmap_text_end[];
+extern char __entry_text_start[];
+extern char __entry_text_end[];
+extern char __hyp_idmap_text_start[];
+extern char __hyp_idmap_text_end[];
+
+static inline bool in_entry_text(unsigned long addr)
+{
+ return memory_contains(__entry_text_start, __entry_text_end,
+ (void *)addr, 1);
+}
+
+static inline bool in_idmap_text(unsigned long addr)
+{
+ void *a = (void *)addr;
+ return memory_contains(__idmap_text_start, __idmap_text_end, a, 1) ||
+ memory_contains(__hyp_idmap_text_start, __hyp_idmap_text_end,
+ a, 1);
+}
+
#endif /* _ASM_ARM_SECTIONS_H */
diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
index f54a3136aac6..111a1d8a41dd 100644
--- a/arch/arm/include/asm/string.h
+++ b/arch/arm/include/asm/string.h
@@ -39,18 +39,4 @@ static inline void *memset64(uint64_t *p, uint64_t v, __kernel_size_t n)
return __memset64(p, v, n * 8, v >> 32);
}
-extern void __memzero(void *ptr, __kernel_size_t n);
-
-#define memset(p,v,n) \
- ({ \
- void *__p = (p); size_t __n = n; \
- if ((__n) != 0) { \
- if (__builtin_constant_p((v)) && (v) == 0) \
- __memzero((__p),(__n)); \
- else \
- memset((__p),(v),(__n)); \
- } \
- (__p); \
- })
-
#endif
diff --git a/arch/arm/include/asm/traps.h b/arch/arm/include/asm/traps.h
index f9a6c5fc3fd1..a00288d75ee6 100644
--- a/arch/arm/include/asm/traps.h
+++ b/arch/arm/include/asm/traps.h
@@ -28,18 +28,6 @@ static inline int __in_irqentry_text(unsigned long ptr)
ptr < (unsigned long)&__irqentry_text_end;
}
-static inline int in_exception_text(unsigned long ptr)
-{
- extern char __exception_text_start[];
- extern char __exception_text_end[];
- int in;
-
- in = ptr >= (unsigned long)&__exception_text_start &&
- ptr < (unsigned long)&__exception_text_end;
-
- return in ? : __in_irqentry_text(ptr);
-}
-
extern void __init early_trap_init(void *);
extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
extern void ptrace_break(struct task_struct *tsk, struct pt_regs *regs);
diff --git a/arch/arm/include/asm/unified.h b/arch/arm/include/asm/unified.h
index a91ae499614c..2c3b952be63e 100644
--- a/arch/arm/include/asm/unified.h
+++ b/arch/arm/include/asm/unified.h
@@ -20,8 +20,10 @@
#ifndef __ASM_UNIFIED_H
#define __ASM_UNIFIED_H
-#if defined(__ASSEMBLY__) && defined(CONFIG_ARM_ASM_UNIFIED)
+#if defined(__ASSEMBLY__)
.syntax unified
+#else
+__asm__(".syntax unified");
#endif
#ifdef CONFIG_CPU_V7M
@@ -64,77 +66,4 @@
#endif /* CONFIG_THUMB2_KERNEL */
-#ifndef CONFIG_ARM_ASM_UNIFIED
-
-/*
- * If the unified assembly syntax isn't used (in ARM mode), these
- * macros expand to an empty string
- */
-#ifdef __ASSEMBLY__
- .macro it, cond
- .endm
- .macro itt, cond
- .endm
- .macro ite, cond
- .endm
- .macro ittt, cond
- .endm
- .macro itte, cond
- .endm
- .macro itet, cond
- .endm
- .macro itee, cond
- .endm
- .macro itttt, cond
- .endm
- .macro ittte, cond
- .endm
- .macro ittet, cond
- .endm
- .macro ittee, cond
- .endm
- .macro itett, cond
- .endm
- .macro itete, cond
- .endm
- .macro iteet, cond
- .endm
- .macro iteee, cond
- .endm
-#else /* !__ASSEMBLY__ */
-__asm__(
-" .macro it, cond\n"
-" .endm\n"
-" .macro itt, cond\n"
-" .endm\n"
-" .macro ite, cond\n"
-" .endm\n"
-" .macro ittt, cond\n"
-" .endm\n"
-" .macro itte, cond\n"
-" .endm\n"
-" .macro itet, cond\n"
-" .endm\n"
-" .macro itee, cond\n"
-" .endm\n"
-" .macro itttt, cond\n"
-" .endm\n"
-" .macro ittte, cond\n"
-" .endm\n"
-" .macro ittet, cond\n"
-" .endm\n"
-" .macro ittee, cond\n"
-" .endm\n"
-" .macro itett, cond\n"
-" .endm\n"
-" .macro itete, cond\n"
-" .endm\n"
-" .macro iteet, cond\n"
-" .endm\n"
-" .macro iteee, cond\n"
-" .endm\n");
-#endif /* __ASSEMBLY__ */
-
-#endif /* CONFIG_ARM_ASM_UNIFIED */
-
#endif /* !__ASM_UNIFIED_H */
diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
index 5266fd9ad6b4..783fbb4de5f9 100644
--- a/arch/arm/kernel/armksyms.c
+++ b/arch/arm/kernel/armksyms.c
@@ -92,7 +92,6 @@ EXPORT_SYMBOL(__memset64);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memchr);
-EXPORT_SYMBOL(__memzero);
EXPORT_SYMBOL(mmioset);
EXPORT_SYMBOL(mmiocpy);
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index fbc707626b3e..1752033b0070 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -82,11 +82,7 @@
#endif
.endm
-#ifdef CONFIG_KPROBES
- .section .kprobes.text,"ax",%progbits
-#else
- .text
-#endif
+ .section .entry.text,"ax",%progbits
/*
* Invalid mode handlers
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index e655dcd0a933..3c4f88701f22 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -37,6 +37,7 @@ saved_pc .req lr
#define TRACE(x...)
#endif
+ .section .entry.text,"ax",%progbits
.align 5
#if !(IS_ENABLED(CONFIG_TRACE_IRQFLAGS) || IS_ENABLED(CONFIG_CONTEXT_TRACKING))
/*
diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S
index 21dde771a7dd..6e0375e7db05 100644
--- a/arch/arm/kernel/head-common.S
+++ b/arch/arm/kernel/head-common.S
@@ -105,8 +105,9 @@ __mmap_switched:
ARM( ldmia r4!, {r0, r1, sp} )
THUMB( ldmia r4!, {r0, r1, r3} )
THUMB( mov sp, r3 )
- sub r1, r1, r0
- bl __memzero @ clear .bss
+ sub r2, r1, r0
+ mov r1, #0
+ bl memset @ clear .bss
ldmia r4, {r0, r1, r2, r3}
str r9, [r0] @ Save processor ID
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index af2a7f1e3103..629e25152c0d 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -44,17 +44,17 @@ static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[ARM_MAX_BRP]);
static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[ARM_MAX_WRP]);
/* Number of BRP/WRP registers on this CPU. */
-static int core_num_brps;
-static int core_num_wrps;
+static int core_num_brps __ro_after_init;
+static int core_num_wrps __ro_after_init;
/* Debug architecture version. */
-static u8 debug_arch;
+static u8 debug_arch __ro_after_init;
/* Does debug architecture support OS Save and Restore? */
-static bool has_ossr;
+static bool has_ossr __ro_after_init;
/* Maximum supported watchpoint length. */
-static u8 max_watchpoint_len;
+static u8 max_watchpoint_len __ro_after_init;
#define READ_WB_REG_CASE(OP2, M, VAL) \
case ((OP2 << 4) + M): \
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index b4fbf00ee4ad..2da087926ebe 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -379,6 +379,9 @@ asmlinkage void secondary_start_kernel(void)
cpu_init();
+#ifndef CONFIG_MMU
+ setup_vectors_base();
+#endif
pr_debug("CPU%u: Booted secondary processor\n", cpu);
preempt_disable();
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 65228bf4c6df..a56e7c856ab5 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -3,6 +3,7 @@
#include <linux/sched/debug.h>
#include <linux/stacktrace.h>
+#include <asm/sections.h>
#include <asm/stacktrace.h>
#include <asm/traps.h>
@@ -63,7 +64,6 @@ EXPORT_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE
struct stack_trace_data {
struct stack_trace *trace;
- unsigned long last_pc;
unsigned int no_sched_functions;
unsigned int skip;
};
@@ -87,16 +87,7 @@ static int save_trace(struct stackframe *frame, void *d)
if (trace->nr_entries >= trace->max_entries)
return 1;
- /*
- * in_exception_text() is designed to test if the PC is one of
- * the functions which has an exception stack above it, but
- * unfortunately what is in frame->pc is the return LR value,
- * not the saved PC value. So, we need to track the previous
- * frame PC value when doing this.
- */
- addr = data->last_pc;
- data->last_pc = frame->pc;
- if (!in_exception_text(addr))
+ if (!in_entry_text(frame->pc))
return 0;
regs = (struct pt_regs *)frame->sp;
@@ -114,7 +105,6 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
struct stackframe frame;
data.trace = trace;
- data.last_pc = ULONG_MAX;
data.skip = trace->skip;
data.no_sched_functions = nosched;
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 5cf04888c581..e344bdd2e5ac 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -72,7 +72,7 @@ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long
printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
#endif
- if (in_exception_text(where))
+ if (in_entry_text(from))
dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
}
@@ -433,7 +433,7 @@ static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
return fn ? fn(regs, instr) : 1;
}
-asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
+asmlinkage void do_undefinstr(struct pt_regs *regs)
{
unsigned int instr;
siginfo_t info;
diff --git a/arch/arm/kernel/vmlinux-xip.lds.S b/arch/arm/kernel/vmlinux-xip.lds.S
index ec4b3f94ad80..12b87591eb7c 100644
--- a/arch/arm/kernel/vmlinux-xip.lds.S
+++ b/arch/arm/kernel/vmlinux-xip.lds.S
@@ -96,9 +96,9 @@ SECTIONS
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
IDMAP_TEXT
- __exception_text_start = .;
- *(.exception.text)
- __exception_text_end = .;
+ __entry_text_start = .;
+ *(.entry.text)
+ __entry_text_end = .;
IRQENTRY_TEXT
TEXT_TEXT
SCHED_TEXT
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index ee53f6518872..84a1ae3ce46e 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -105,9 +105,9 @@ SECTIONS
.text : { /* Real text segment */
_stext = .; /* Text and read-only data */
IDMAP_TEXT
- __exception_text_start = .;
- *(.exception.text)
- __exception_text_end = .;
+ __entry_text_start = .;
+ *(.entry.text)
+ __entry_text_end = .;
IRQENTRY_TEXT
SOFTIRQENTRY_TEXT
TEXT_TEXT
diff --git a/arch/arm/lib/Makefile b/arch/arm/lib/Makefile
index 4cb0b9624d8f..ad25fd1872c7 100644
--- a/arch/arm/lib/Makefile
+++ b/arch/arm/lib/Makefile
@@ -8,7 +8,7 @@
lib-y := backtrace.o changebit.o csumipv6.o csumpartial.o \
csumpartialcopy.o csumpartialcopyuser.o clearbit.o \
delay.o delay-loop.o findbit.o memchr.o memcpy.o \
- memmove.o memset.o memzero.o setbit.o \
+ memmove.o memset.o setbit.o \
strchr.o strrchr.o \
testchangebit.o testclearbit.o testsetbit.o \
ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S
deleted file mode 100644
index 0eded952e089..000000000000
--- a/arch/arm/lib/memzero.S
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * linux/arch/arm/lib/memzero.S
- *
- * Copyright (C) 1995-2000 Russell King
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-#include <linux/linkage.h>
-#include <asm/assembler.h>
-#include <asm/unwind.h>
-
- .text
- .align 5
- .word 0
-/*
- * Align the pointer in r0. r3 contains the number of bytes that we are
- * mis-aligned by, and r1 is the number of bytes. If r1 < 4, then we
- * don't bother; we use byte stores instead.
- */
-UNWIND( .fnstart )
-1: subs r1, r1, #4 @ 1 do we have enough
- blt 5f @ 1 bytes to align with?
- cmp r3, #2 @ 1
- strltb r2, [r0], #1 @ 1
- strleb r2, [r0], #1 @ 1
- strb r2, [r0], #1 @ 1
- add r1, r1, r3 @ 1 (r1 = r1 - (4 - r3))
-/*
- * The pointer is now aligned and the length is adjusted. Try doing the
- * memzero again.
- */
-
-ENTRY(__memzero)
- mov r2, #0 @ 1
- ands r3, r0, #3 @ 1 unaligned?
- bne 1b @ 1
-/*
- * r3 = 0, and we know that the pointer in r0 is aligned to a word boundary.
- */
- cmp r1, #16 @ 1 we can skip this chunk if we
- blt 4f @ 1 have < 16 bytes
-
-#if ! CALGN(1)+0
-
-/*
- * We need an extra register for this loop - save the return address and
- * use the LR
- */
- str lr, [sp, #-4]! @ 1
-UNWIND( .fnend )
-UNWIND( .fnstart )
-UNWIND( .save {lr} )
- mov ip, r2 @ 1
- mov lr, r2 @ 1
-
-3: subs r1, r1, #64 @ 1 write 32 bytes out per loop
- stmgeia r0!, {r2, r3, ip, lr} @ 4
- stmgeia r0!, {r2, r3, ip, lr} @ 4
- stmgeia r0!, {r2, r3, ip, lr} @ 4
- stmgeia r0!, {r2, r3, ip, lr} @ 4
- bgt 3b @ 1
- ldmeqfd sp!, {pc} @ 1/2 quick exit
-/*
- * No need to correct the count; we're only testing bits from now on
- */
- tst r1, #32 @ 1
- stmneia r0!, {r2, r3, ip, lr} @ 4
- stmneia r0!, {r2, r3, ip, lr} @ 4
- tst r1, #16 @ 1 16 bytes or more?
- stmneia r0!, {r2, r3, ip, lr} @ 4
- ldr lr, [sp], #4 @ 1
-UNWIND( .fnend )
-
-#else
-
-/*
- * This version aligns the destination pointer in order to write
- * whole cache lines at once.
- */
-
- stmfd sp!, {r4-r7, lr}
-UNWIND( .fnend )
-UNWIND( .fnstart )
-UNWIND( .save {r4-r7, lr} )
- mov r4, r2
- mov r5, r2
- mov r6, r2
- mov r7, r2
- mov ip, r2
- mov lr, r2
-
- cmp r1, #96
- andgts ip, r0, #31
- ble 3f
-
- rsb ip, ip, #32
- sub r1, r1, ip
- movs ip, ip, lsl #(32 - 4)
- stmcsia r0!, {r4, r5, r6, r7}
- stmmiia r0!, {r4, r5}
- movs ip, ip, lsl #2
- strcs r2, [r0], #4
-
-3: subs r1, r1, #64
- stmgeia r0!, {r2-r7, ip, lr}
- stmgeia r0!, {r2-r7, ip, lr}
- bgt 3b
- ldmeqfd sp!, {r4-r7, pc}
-
- tst r1, #32
- stmneia r0!, {r2-r7, ip, lr}
- tst r1, #16
- stmneia r0!, {r4-r7}
- ldmfd sp!, {r4-r7, lr}
-UNWIND( .fnend )
-
-#endif
-
-UNWIND( .fnstart )
-4: tst r1, #8 @ 1 8 bytes or more?
- stmneia r0!, {r2, r3} @ 2
- tst r1, #4 @ 1 4 bytes or more?
- strne r2, [r0], #4 @ 1
-/*
- * When we get here, we've got less than 4 bytes to zero. We
- * may have an unaligned pointer as well.
- */
-5: tst r1, #2 @ 1 2 bytes or more?
- strneb r2, [r0], #1 @ 1
- strneb r2, [r0], #1 @ 1
- tst r1, #1 @ 1 a byte left over
- strneb r2, [r0], #1 @ 1
- ret lr @ 1
-UNWIND( .fnend )
-ENDPROC(__memzero)
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index fd9077a74fce..7f14acf67caf 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -909,6 +909,14 @@ config OUTER_CACHE_SYNC
The outer cache has a outer_cache_fns.sync function pointer
that can be used to drain the write buffer of the outer cache.
+config CACHE_B15_RAC
+ bool "Enable the Broadcom Brahma-B15 read-ahead cache controller"
+ depends on ARCH_BRCMSTB
+ default y
+ help
+ This option enables the Broadcom Brahma-B15 read-ahead cache
+ controller. If disabled, the read-ahead cache remains off.
+
config CACHE_FEROCEON_L2
bool "Enable the Feroceon L2 cache controller"
depends on ARCH_MV78XX0 || ARCH_MVEBU
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 01bcc33f59e3..9dbb84923e12 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -13,7 +13,8 @@ obj-y += nommu.o
obj-$(CONFIG_ARM_MPU) += pmsa-v7.o
endif
-obj-$(CONFIG_ARM_PTDUMP) += dump.o
+obj-$(CONFIG_ARM_PTDUMP_CORE) += dump.o
+obj-$(CONFIG_ARM_PTDUMP_DEBUGFS) += ptdump_debugfs.o
obj-$(CONFIG_MODULES) += proc-syms.o
obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
@@ -103,6 +104,7 @@ AFLAGS_proc-v6.o :=-Wa,-march=armv6
AFLAGS_proc-v7.o :=-Wa,-march=armv7-a
obj-$(CONFIG_OUTER_CACHE) += l2c-common.o
+obj-$(CONFIG_CACHE_B15_RAC) += cache-b15-rac.o
obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o
obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o l2c-l2x0-resume.o
obj-$(CONFIG_CACHE_L2X0_PMU) += cache-l2x0-pmu.o
diff --git a/arch/arm/mm/cache-b15-rac.c b/arch/arm/mm/cache-b15-rac.c
new file mode 100644
index 000000000000..d9586ba2e63c
--- /dev/null
+++ b/arch/arm/mm/cache-b15-rac.c
@@ -0,0 +1,356 @@
+/*
+ * Broadcom Brahma-B15 CPU read-ahead cache management functions
+ *
+ * Copyright (C) 2015-2016 Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/spinlock.h>
+#include <linux/io.h>
+#include <linux/bitops.h>
+#include <linux/of_address.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/syscore_ops.h>
+#include <linux/reboot.h>
+
+#include <asm/cacheflush.h>
+#include <asm/hardware/cache-b15-rac.h>
+
+extern void v7_flush_kern_cache_all(void);
+
+/* RAC register offsets, relative to the HIF_CPU_BIUCTRL register base */
+#define RAC_CONFIG0_REG (0x78)
+#define RACENPREF_MASK (0x3)
+#define RACPREFINST_SHIFT (0)
+#define RACENINST_SHIFT (2)
+#define RACPREFDATA_SHIFT (4)
+#define RACENDATA_SHIFT (6)
+#define RAC_CPU_SHIFT (8)
+#define RACCFG_MASK (0xff)
+#define RAC_CONFIG1_REG (0x7c)
+#define RAC_FLUSH_REG (0x80)
+#define FLUSH_RAC (1 << 0)
+
+/* Bitmask to enable instruction and data prefetching with a 256-bytes stride */
+#define RAC_DATA_INST_EN_MASK (1 << RACPREFINST_SHIFT | \
+ RACENPREF_MASK << RACENINST_SHIFT | \
+ 1 << RACPREFDATA_SHIFT | \
+ RACENPREF_MASK << RACENDATA_SHIFT)
+
+#define RAC_ENABLED 0
+/* Special state where we want to bypass the spinlock and call directly
+ * into the v7 cache maintenance operations during suspend/resume
+ */
+#define RAC_SUSPENDED 1
+
+static void __iomem *b15_rac_base;
+static DEFINE_SPINLOCK(rac_lock);
+
+static u32 rac_config0_reg;
+
+/* Initialization flag to avoid checking for b15_rac_base, and to prevent
+ * multi-platform kernels from crashing here as well.
+ */
+static unsigned long b15_rac_flags;
+
+static inline u32 __b15_rac_disable(void)
+{
+ u32 val = __raw_readl(b15_rac_base + RAC_CONFIG0_REG);
+ __raw_writel(0, b15_rac_base + RAC_CONFIG0_REG);
+ dmb();
+ return val;
+}
+
+static inline void __b15_rac_flush(void)
+{
+ u32 reg;
+
+ __raw_writel(FLUSH_RAC, b15_rac_base + RAC_FLUSH_REG);
+ do {
+ /* This dmb() is required to force the Bus Interface Unit
+ * to clean oustanding writes, and forces an idle cycle
+ * to be inserted.
+ */
+ dmb();
+ reg = __raw_readl(b15_rac_base + RAC_FLUSH_REG);
+ } while (reg & FLUSH_RAC);
+}
+
+static inline u32 b15_rac_disable_and_flush(void)
+{
+ u32 reg;
+
+ reg = __b15_rac_disable();
+ __b15_rac_flush();
+ return reg;
+}
+
+static inline void __b15_rac_enable(u32 val)
+{
+ __raw_writel(val, b15_rac_base + RAC_CONFIG0_REG);
+ /* dsb() is required here to be consistent with __flush_icache_all() */
+ dsb();
+}
+
+#define BUILD_RAC_CACHE_OP(name, bar) \
+void b15_flush_##name(void) \
+{ \
+ unsigned int do_flush; \
+ u32 val = 0; \
+ \
+ if (test_bit(RAC_SUSPENDED, &b15_rac_flags)) { \
+ v7_flush_##name(); \
+ bar; \
+ return; \
+ } \
+ \
+ spin_lock(&rac_lock); \
+ do_flush = test_bit(RAC_ENABLED, &b15_rac_flags); \
+ if (do_flush) \
+ val = b15_rac_disable_and_flush(); \
+ v7_flush_##name(); \
+ if (!do_flush) \
+ bar; \
+ else \
+ __b15_rac_enable(val); \
+ spin_unlock(&rac_lock); \
+}
+
+#define nobarrier
+
+/* The readahead cache present in the Brahma-B15 CPU is a special piece of
+ * hardware after the integrated L2 cache of the B15 CPU complex whose purpose
+ * is to prefetch instruction and/or data with a line size of either 64 bytes
+ * or 256 bytes. The rationale is that the data-bus of the CPU interface is
+ * optimized for 256-bytes transactions, and enabling the readahead cache
+ * provides a significant performance boost we want it enabled (typically
+ * twice the performance for a memcpy benchmark application).
+ *
+ * The readahead cache is transparent for Modified Virtual Addresses
+ * cache maintenance operations: ICIMVAU, DCIMVAC, DCCMVAC, DCCMVAU and
+ * DCCIMVAC.
+ *
+ * It is however not transparent for the following cache maintenance
+ * operations: DCISW, DCCSW, DCCISW, ICIALLUIS and ICIALLU which is precisely
+ * what we are patching here with our BUILD_RAC_CACHE_OP here.
+ */
+BUILD_RAC_CACHE_OP(kern_cache_all, nobarrier);
+
+static void b15_rac_enable(void)
+{
+ unsigned int cpu;
+ u32 enable = 0;
+
+ for_each_possible_cpu(cpu)
+ enable |= (RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT));
+
+ b15_rac_disable_and_flush();
+ __b15_rac_enable(enable);
+}
+
+static int b15_rac_reboot_notifier(struct notifier_block *nb,
+ unsigned long action,
+ void *data)
+{
+ /* During kexec, we are not yet migrated on the boot CPU, so we need to
+ * make sure we are SMP safe here. Once the RAC is disabled, flag it as
+ * suspended such that the hotplug notifier returns early.
+ */
+ if (action == SYS_RESTART) {
+ spin_lock(&rac_lock);
+ b15_rac_disable_and_flush();
+ clear_bit(RAC_ENABLED, &b15_rac_flags);
+ set_bit(RAC_SUSPENDED, &b15_rac_flags);
+ spin_unlock(&rac_lock);
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block b15_rac_reboot_nb = {
+ .notifier_call = b15_rac_reboot_notifier,
+};
+
+/* The CPU hotplug case is the most interesting one, we basically need to make
+ * sure that the RAC is disabled for the entire system prior to having a CPU
+ * die, in particular prior to this dying CPU having exited the coherency
+ * domain.
+ *
+ * Once this CPU is marked dead, we can safely re-enable the RAC for the
+ * remaining CPUs in the system which are still online.
+ *
+ * Offlining a CPU is the problematic case, onlining a CPU is not much of an
+ * issue since the CPU and its cache-level hierarchy will start filling with
+ * the RAC disabled, so L1 and L2 only.
+ *
+ * In this function, we should NOT have to verify any unsafe setting/condition
+ * b15_rac_base:
+ *
+ * It is protected by the RAC_ENABLED flag which is cleared by default, and
+ * being cleared when initial procedure is done. b15_rac_base had been set at
+ * that time.
+ *
+ * RAC_ENABLED:
+ * There is a small timing windows, in b15_rac_init(), between
+ * cpuhp_setup_state_*()
+ * ...
+ * set RAC_ENABLED
+ * However, there is no hotplug activity based on the Linux booting procedure.
+ *
+ * Since we have to disable RAC for all cores, we keep RAC on as long as as
+ * possible (disable it as late as possible) to gain the cache benefit.
+ *
+ * Thus, dying/dead states are chosen here
+ *
+ * We are choosing not do disable the RAC on a per-CPU basis, here, if we did
+ * we would want to consider disabling it as early as possible to benefit the
+ * other active CPUs.
+ */
+
+/* Running on the dying CPU */
+static int b15_rac_dying_cpu(unsigned int cpu)
+{
+ /* During kexec/reboot, the RAC is disabled via the reboot notifier
+ * return early here.
+ */
+ if (test_bit(RAC_SUSPENDED, &b15_rac_flags))
+ return 0;
+
+ spin_lock(&rac_lock);
+
+ /* Indicate that we are starting a hotplug procedure */
+ __clear_bit(RAC_ENABLED, &b15_rac_flags);
+
+ /* Disable the readahead cache and save its value to a global */
+ rac_config0_reg = b15_rac_disable_and_flush();
+
+ spin_unlock(&rac_lock);
+
+ return 0;
+}
+
+/* Running on a non-dying CPU */
+static int b15_rac_dead_cpu(unsigned int cpu)
+{
+ /* During kexec/reboot, the RAC is disabled via the reboot notifier
+ * return early here.
+ */
+ if (test_bit(RAC_SUSPENDED, &b15_rac_flags))
+ return 0;
+
+ spin_lock(&rac_lock);
+
+ /* And enable it */
+ __b15_rac_enable(rac_config0_reg);
+ __set_bit(RAC_ENABLED, &b15_rac_flags);
+
+ spin_unlock(&rac_lock);
+
+ return 0;
+}
+
+static int b15_rac_suspend(void)
+{
+ /* Suspend the read-ahead cache oeprations, forcing our cache
+ * implementation to fallback to the regular ARMv7 calls.
+ *
+ * We are guaranteed to be running on the boot CPU at this point and
+ * with every other CPU quiesced, so setting RAC_SUSPENDED is not racy
+ * here.
+ */
+ rac_config0_reg = b15_rac_disable_and_flush();
+ set_bit(RAC_SUSPENDED, &b15_rac_flags);
+
+ return 0;
+}
+
+static void b15_rac_resume(void)
+{
+ /* Coming out of a S3 suspend/resume cycle, the read-ahead cache
+ * register RAC_CONFIG0_REG will be restored to its default value, make
+ * sure we re-enable it and set the enable flag, we are also guaranteed
+ * to run on the boot CPU, so not racy again.
+ */
+ __b15_rac_enable(rac_config0_reg);
+ clear_bit(RAC_SUSPENDED, &b15_rac_flags);
+}
+
+static struct syscore_ops b15_rac_syscore_ops = {
+ .suspend = b15_rac_suspend,
+ .resume = b15_rac_resume,
+};
+
+static int __init b15_rac_init(void)
+{
+ struct device_node *dn;
+ int ret = 0, cpu;
+ u32 reg, en_mask = 0;
+
+ dn = of_find_compatible_node(NULL, NULL, "brcm,brcmstb-cpu-biu-ctrl");
+ if (!dn)
+ return -ENODEV;
+
+ if (WARN(num_possible_cpus() > 4, "RAC only supports 4 CPUs\n"))
+ goto out;
+
+ b15_rac_base = of_iomap(dn, 0);
+ if (!b15_rac_base) {
+ pr_err("failed to remap BIU control base\n");
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ ret = register_reboot_notifier(&b15_rac_reboot_nb);
+ if (ret) {
+ pr_err("failed to register reboot notifier\n");
+ iounmap(b15_rac_base);
+ goto out;
+ }
+
+ if (IS_ENABLED(CONFIG_HOTPLUG_CPU)) {
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
+ "arm/cache-b15-rac:dead",
+ NULL, b15_rac_dead_cpu);
+ if (ret)
+ goto out_unmap;
+
+ ret = cpuhp_setup_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING,
+ "arm/cache-b15-rac:dying",
+ NULL, b15_rac_dying_cpu);
+ if (ret)
+ goto out_cpu_dead;
+ }
+
+ if (IS_ENABLED(CONFIG_PM_SLEEP))
+ register_syscore_ops(&b15_rac_syscore_ops);
+
+ spin_lock(&rac_lock);
+ reg = __raw_readl(b15_rac_base + RAC_CONFIG0_REG);
+ for_each_possible_cpu(cpu)
+ en_mask |= ((1 << RACPREFDATA_SHIFT) << (cpu * RAC_CPU_SHIFT));
+ WARN(reg & en_mask, "Read-ahead cache not previously disabled\n");
+
+ b15_rac_enable();
+ set_bit(RAC_ENABLED, &b15_rac_flags);
+ spin_unlock(&rac_lock);
+
+ pr_info("Broadcom Brahma-B15 readahead cache at: 0x%p\n",
+ b15_rac_base + RAC_CONFIG0_REG);
+
+ goto out;
+
+out_cpu_dead:
+ cpuhp_remove_state_nocalls(CPUHP_AP_ARM_CACHE_B15_RAC_DYING);
+out_unmap:
+ unregister_reboot_notifier(&b15_rac_reboot_nb);
+ iounmap(b15_rac_base);
+out:
+ of_node_put(dn);
+ return ret;
+}
+arch_initcall(b15_rac_init);
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index de78109d002d..215df435bfb9 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -15,6 +15,7 @@
#include <asm/assembler.h>
#include <asm/errno.h>
#include <asm/unwind.h>
+#include <asm/hardware/cache-b15-rac.h>
#include "proc-macros.S"
@@ -446,3 +447,23 @@ ENDPROC(v7_dma_unmap_area)
@ define struct cpu_cache_fns (see <asm/cacheflush.h> and proc-macros.S)
define_cache_functions v7
+
+ /* The Broadcom Brahma-B15 read-ahead cache requires some modifications
+ * to the v7_cache_fns, we only override the ones we need
+ */
+#ifndef CONFIG_CACHE_B15_RAC
+ globl_equ b15_flush_kern_cache_all, v7_flush_kern_cache_all
+#endif
+ globl_equ b15_flush_icache_all, v7_flush_icache_all
+ globl_equ b15_flush_kern_cache_louis, v7_flush_kern_cache_louis
+ globl_equ b15_flush_user_cache_all, v7_flush_user_cache_all
+ globl_equ b15_flush_user_cache_range, v7_flush_user_cache_range
+ globl_equ b15_coherent_kern_range, v7_coherent_kern_range
+ globl_equ b15_coherent_user_range, v7_coherent_user_range
+ globl_equ b15_flush_kern_dcache_area, v7_flush_kern_dcache_area
+
+ globl_equ b15_dma_map_area, v7_dma_map_area
+ globl_equ b15_dma_unmap_area, v7_dma_unmap_area
+ globl_equ b15_dma_flush_range, v7_dma_flush_range
+
+ define_cache_functions b15
diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c
index fc3b44028cfb..084779c5c893 100644
--- a/arch/arm/mm/dump.c
+++ b/arch/arm/mm/dump.c
@@ -21,11 +21,7 @@
#include <asm/fixmap.h>
#include <asm/memory.h>
#include <asm/pgtable.h>
-
-struct addr_marker {
- unsigned long start_address;
- const char *name;
-};
+#include <asm/ptdump.h>
static struct addr_marker address_markers[] = {
{ MODULES_VADDR, "Modules" },
@@ -38,12 +34,26 @@ static struct addr_marker address_markers[] = {
{ -1, NULL },
};
+#define pt_dump_seq_printf(m, fmt, args...) \
+({ \
+ if (m) \
+ seq_printf(m, fmt, ##args); \
+})
+
+#define pt_dump_seq_puts(m, fmt) \
+({ \
+ if (m) \
+ seq_printf(m, fmt); \
+})
+
struct pg_state {
struct seq_file *seq;
const struct addr_marker *marker;
unsigned long start_address;
unsigned level;
u64 current_prot;
+ bool check_wx;
+ unsigned long wx_pages;
const char *current_domain;
};
@@ -52,6 +62,8 @@ struct prot_bits {
u64 val;
const char *set;
const char *clear;
+ bool ro_bit;
+ bool nx_bit;
};
static const struct prot_bits pte_bits[] = {
@@ -65,11 +77,13 @@ static const struct prot_bits pte_bits[] = {
.val = L_PTE_RDONLY,
.set = "ro",
.clear = "RW",
+ .ro_bit = true,
}, {
.mask = L_PTE_XN,
.val = L_PTE_XN,
.set = "NX",
.clear = "x ",
+ .nx_bit = true,
}, {
.mask = L_PTE_SHARED,
.val = L_PTE_SHARED,
@@ -133,11 +147,13 @@ static const struct prot_bits section_bits[] = {
.val = L_PMD_SECT_RDONLY | PMD_SECT_AP2,
.set = "ro",
.clear = "RW",
+ .ro_bit = true,
#elif __LINUX_ARM_ARCH__ >= 6
{
.mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_APX | PMD_SECT_AP_WRITE,
.set = " ro",
+ .ro_bit = true,
}, {
.mask = PMD_SECT_APX | PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_WRITE,
@@ -156,6 +172,7 @@ static const struct prot_bits section_bits[] = {
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = 0,
.set = " ro",
+ .ro_bit = true,
}, {
.mask = PMD_SECT_AP_READ | PMD_SECT_AP_WRITE,
.val = PMD_SECT_AP_WRITE,
@@ -174,6 +191,7 @@ static const struct prot_bits section_bits[] = {
.val = PMD_SECT_XN,
.set = "NX",
.clear = "x ",
+ .nx_bit = true,
}, {
.mask = PMD_SECT_S,
.val = PMD_SECT_S,
@@ -186,6 +204,8 @@ struct pg_level {
const struct prot_bits *bits;
size_t num;
u64 mask;
+ const struct prot_bits *ro_bit;
+ const struct prot_bits *nx_bit;
};
static struct pg_level pg_level[] = {
@@ -214,10 +234,27 @@ static void dump_prot(struct pg_state *st, const struct prot_bits *bits, size_t
s = bits->clear;
if (s)
- seq_printf(st->seq, " %s", s);
+ pt_dump_seq_printf(st->seq, " %s", s);
}
}
+static void note_prot_wx(struct pg_state *st, unsigned long addr)
+{
+ if (!st->check_wx)
+ return;
+ if ((st->current_prot & pg_level[st->level].ro_bit->mask) ==
+ pg_level[st->level].ro_bit->val)
+ return;
+ if ((st->current_prot & pg_level[st->level].nx_bit->mask) ==
+ pg_level[st->level].nx_bit->val)
+ return;
+
+ WARN_ONCE(1, "arm/mm: Found insecure W+X mapping at address %pS\n",
+ (void *)st->start_address);
+
+ st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
+}
+
static void note_page(struct pg_state *st, unsigned long addr,
unsigned int level, u64 val, const char *domain)
{
@@ -228,7 +265,7 @@ static void note_page(struct pg_state *st, unsigned long addr,
st->level = level;
st->current_prot = prot;
st->current_domain = domain;
- seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
} else if (prot != st->current_prot || level != st->level ||
domain != st->current_domain ||
addr >= st->marker[1].start_address) {
@@ -236,7 +273,8 @@ static void note_page(struct pg_state *st, unsigned long addr,
unsigned long delta;
if (st->current_prot) {
- seq_printf(st->seq, "0x%08lx-0x%08lx ",
+ note_prot_wx(st, addr);
+ pt_dump_seq_printf(st->seq, "0x%08lx-0x%08lx ",
st->start_address, addr);
delta = (addr - st->start_address) >> 10;
@@ -244,17 +282,19 @@ static void note_page(struct pg_state *st, unsigned long addr,
delta >>= 10;
unit++;
}
- seq_printf(st->seq, "%9lu%c", delta, *unit);
+ pt_dump_seq_printf(st->seq, "%9lu%c", delta, *unit);
if (st->current_domain)
- seq_printf(st->seq, " %s", st->current_domain);
+ pt_dump_seq_printf(st->seq, " %s",
+ st->current_domain);
if (pg_level[st->level].bits)
dump_prot(st, pg_level[st->level].bits, pg_level[st->level].num);
- seq_printf(st->seq, "\n");
+ pt_dump_seq_printf(st->seq, "\n");
}
if (addr >= st->marker[1].start_address) {
st->marker++;
- seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ pt_dump_seq_printf(st->seq, "---[ %s ]---\n",
+ st->marker->name);
}
st->start_address = addr;
st->current_prot = prot;
@@ -335,61 +375,82 @@ static void walk_pud(struct pg_state *st, pgd_t *pgd, unsigned long start)
}
}
-static void walk_pgd(struct seq_file *m)
+static void walk_pgd(struct pg_state *st, struct mm_struct *mm,
+ unsigned long start)
{
- pgd_t *pgd = swapper_pg_dir;
- struct pg_state st;
- unsigned long addr;
+ pgd_t *pgd = pgd_offset(mm, 0UL);
unsigned i;
-
- memset(&st, 0, sizeof(st));
- st.seq = m;
- st.marker = address_markers;
+ unsigned long addr;
for (i = 0; i < PTRS_PER_PGD; i++, pgd++) {
- addr = i * PGDIR_SIZE;
+ addr = start + i * PGDIR_SIZE;
if (!pgd_none(*pgd)) {
- walk_pud(&st, pgd, addr);
+ walk_pud(st, pgd, addr);
} else {
- note_page(&st, addr, 1, pgd_val(*pgd), NULL);
+ note_page(st, addr, 1, pgd_val(*pgd), NULL);
}
}
-
- note_page(&st, 0, 0, 0, NULL);
}
-static int ptdump_show(struct seq_file *m, void *v)
+void ptdump_walk_pgd(struct seq_file *m, struct ptdump_info *info)
{
- walk_pgd(m);
- return 0;
-}
+ struct pg_state st = {
+ .seq = m,
+ .marker = info->markers,
+ .check_wx = false,
+ };
-static int ptdump_open(struct inode *inode, struct file *file)
-{
- return single_open(file, ptdump_show, NULL);
+ walk_pgd(&st, info->mm, info->base_addr);
+ note_page(&st, 0, 0, 0, NULL);
}
-static const struct file_operations ptdump_fops = {
- .open = ptdump_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
-};
-
-static int ptdump_init(void)
+static void ptdump_initialize(void)
{
- struct dentry *pe;
unsigned i, j;
for (i = 0; i < ARRAY_SIZE(pg_level); i++)
if (pg_level[i].bits)
- for (j = 0; j < pg_level[i].num; j++)
+ for (j = 0; j < pg_level[i].num; j++) {
pg_level[i].mask |= pg_level[i].bits[j].mask;
+ if (pg_level[i].bits[j].ro_bit)
+ pg_level[i].ro_bit = &pg_level[i].bits[j];
+ if (pg_level[i].bits[j].nx_bit)
+ pg_level[i].nx_bit = &pg_level[i].bits[j];
+ }
address_markers[2].start_address = VMALLOC_START;
+}
+
+static struct ptdump_info kernel_ptdump_info = {
+ .mm = &init_mm,
+ .markers = address_markers,
+ .base_addr = 0,
+};
- pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
- &ptdump_fops);
- return pe ? 0 : -ENOMEM;
+void ptdump_check_wx(void)
+{
+ struct pg_state st = {
+ .seq = NULL,
+ .marker = (struct addr_marker[]) {
+ { 0, NULL},
+ { -1, NULL},
+ },
+ .check_wx = true,
+ };
+
+ walk_pgd(&st, &init_mm, 0);
+ note_page(&st, 0, 0, 0, NULL);
+ if (st.wx_pages)
+ pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n",
+ st.wx_pages);
+ else
+ pr_info("Checked W+X mappings: passed, no W+X pages found\n");
+}
+
+static int ptdump_init(void)
+{
+ ptdump_initialize();
+ return ptdump_debugfs_register(&kernel_ptdump_info,
+ "kernel_page_tables");
}
__initcall(ptdump_init);
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 42f585379e19..b75eada23d0a 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -21,7 +21,6 @@
#include <linux/highmem.h>
#include <linux/perf_event.h>
-#include <asm/exception.h>
#include <asm/pgtable.h>
#include <asm/system_misc.h>
#include <asm/system_info.h>
@@ -545,7 +544,7 @@ hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *)
/*
* Dispatch a data abort to the relevant handler.
*/
-asmlinkage void __exception
+asmlinkage void
do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
{
const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
@@ -578,7 +577,7 @@ hook_ifault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *
ifsr_info[nr].name = name;
}
-asmlinkage void __exception
+asmlinkage void
do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
{
const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
diff --git a/arch/arm/mm/idmap.c b/arch/arm/mm/idmap.c
index 10bfba85eb96..1d1edd064199 100644
--- a/arch/arm/mm/idmap.c
+++ b/arch/arm/mm/idmap.c
@@ -16,8 +16,8 @@
* are not supported on any CPU using the idmap tables as its current
* page tables.
*/
-pgd_t *idmap_pgd;
-long long arch_phys_to_idmap_offset;
+pgd_t *idmap_pgd __ro_after_init;
+long long arch_phys_to_idmap_offset __ro_after_init;
#ifdef CONFIG_ARM_LPAE
static void idmap_add_pmd(pud_t *pud, unsigned long addr, unsigned long end,
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index a1f11a7ee81b..bd6f4513539a 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -36,6 +36,7 @@
#include <asm/system_info.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
+#include <asm/ptdump.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
@@ -738,6 +739,7 @@ static int __mark_rodata_ro(void *unused)
void mark_rodata_ro(void)
{
stop_machine(__mark_rodata_ro, NULL, NULL);
+ debug_checkwx();
}
void set_kernel_text_rw(void)
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index e4370810f4f1..7c087961b7ce 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -31,7 +31,7 @@ struct mpu_rgn_info mpu_rgn_info;
#ifdef CONFIG_CPU_CP15
#ifdef CONFIG_CPU_HIGH_VECTOR
-static unsigned long __init setup_vectors_base(void)
+unsigned long setup_vectors_base(void)
{
unsigned long reg = get_cr();
@@ -57,7 +57,7 @@ static inline bool security_extensions_enabled(void)
return 0;
}
-static unsigned long __init setup_vectors_base(void)
+unsigned long setup_vectors_base(void)
{
unsigned long base = 0, reg = get_cr();
diff --git a/arch/arm/mm/pmsa-v7.c b/arch/arm/mm/pmsa-v7.c
index 976df60ac426..e2853bfff74e 100644
--- a/arch/arm/mm/pmsa-v7.c
+++ b/arch/arm/mm/pmsa-v7.c
@@ -6,6 +6,7 @@
#include <linux/bitops.h>
#include <linux/memblock.h>
+#include <linux/string.h>
#include <asm/cacheflush.h>
#include <asm/cp15.h>
@@ -296,6 +297,7 @@ void __init adjust_lowmem_bounds_mpu(void)
}
}
+ memset(mem, 0, sizeof(mem));
num = allocate_region(mem_start, specified_mem_size, mem_max_regions, mem);
for (i = 0; i < num; i++) {
@@ -433,7 +435,7 @@ void __init mpu_setup(void)
/* Background */
err |= mpu_setup_region(region++, 0, 32,
- MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0NA,
+ MPU_ACR_XN | MPU_RGN_STRONGLY_ORDERED | MPU_AP_PL1RW_PL0RW,
0, false);
#ifdef CONFIG_XIP_KERNEL
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 01d64c0b2563..d55d493f9a1e 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -567,7 +567,7 @@ __v7_setup_stack:
/*
* Standard v7 proc info content
*/
-.macro __v7_proc name, initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions
+.macro __v7_proc name, initfunc, mm_mmuflags = 0, io_mmuflags = 0, hwcaps = 0, proc_fns = v7_processor_functions, cache_fns = v7_cache_fns
ALT_SMP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
PMD_SECT_AF | PMD_FLAGS_SMP | \mm_mmuflags)
ALT_UP(.long PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | \
@@ -583,7 +583,7 @@ __v7_setup_stack:
.long \proc_fns
.long v7wbi_tlb_fns
.long v6_user_fns
- .long v7_cache_fns
+ .long \cache_fns
.endm
#ifndef CONFIG_ARM_LPAE
@@ -678,7 +678,7 @@ __v7_ca15mp_proc_info:
__v7_b15mp_proc_info:
.long 0x420f00f0
.long 0xff0ffff0
- __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup
+ __v7_proc __v7_b15mp_proc_info, __v7_b15mp_setup, cache_fns = b15_cache_fns
.size __v7_b15mp_proc_info, . - __v7_b15mp_proc_info
/*
diff --git a/arch/arm/mm/ptdump_debugfs.c b/arch/arm/mm/ptdump_debugfs.c
new file mode 100644
index 000000000000..be8d87be4b93
--- /dev/null
+++ b/arch/arm/mm/ptdump_debugfs.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include <asm/ptdump.h>
+
+static int ptdump_show(struct seq_file *m, void *v)
+{
+ struct ptdump_info *info = m->private;
+
+ ptdump_walk_pgd(m, info);
+ return 0;
+}
+
+static int ptdump_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, ptdump_show, inode->i_private);
+}
+
+static const struct file_operations ptdump_fops = {
+ .open = ptdump_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+int ptdump_debugfs_register(struct ptdump_info *info, const char *name)
+{
+ struct dentry *pe;
+
+ pe = debugfs_create_file(name, 0400, NULL, info, &ptdump_fops);
+ return pe ? 0 : -ENOMEM;
+
+}
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index c199990e12b6..323a4df59a6c 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -27,14 +27,58 @@
int bpf_jit_enable __read_mostly;
+/*
+ * eBPF prog stack layout:
+ *
+ * high
+ * original ARM_SP => +-----+
+ * | | callee saved registers
+ * +-----+ <= (BPF_FP + SCRATCH_SIZE)
+ * | ... | eBPF JIT scratch space
+ * eBPF fp register => +-----+
+ * (BPF_FP) | ... | eBPF prog stack
+ * +-----+
+ * |RSVD | JIT scratchpad
+ * current ARM_SP => +-----+ <= (BPF_FP - STACK_SIZE + SCRATCH_SIZE)
+ * | |
+ * | ... | Function call stack
+ * | |
+ * +-----+
+ * low
+ *
+ * The callee saved registers depends on whether frame pointers are enabled.
+ * With frame pointers (to be compliant with the ABI):
+ *
+ * high
+ * original ARM_SP => +------------------+ \
+ * | pc | |
+ * current ARM_FP => +------------------+ } callee saved registers
+ * |r4-r8,r10,fp,ip,lr| |
+ * +------------------+ /
+ * low
+ *
+ * Without frame pointers:
+ *
+ * high
+ * original ARM_SP => +------------------+
+ * | r4-r8,r10,fp,lr | callee saved registers
+ * current ARM_FP => +------------------+
+ * low
+ *
+ * When popping registers off the stack at the end of a BPF function, we
+ * reference them via the current ARM_FP register.
+ */
+#define CALLEE_MASK (1 << ARM_R4 | 1 << ARM_R5 | 1 << ARM_R6 | \
+ 1 << ARM_R7 | 1 << ARM_R8 | 1 << ARM_R10 | \
+ 1 << ARM_FP)
+#define CALLEE_PUSH_MASK (CALLEE_MASK | 1 << ARM_LR)
+#define CALLEE_POP_MASK (CALLEE_MASK | 1 << ARM_PC)
+
#define STACK_OFFSET(k) (k)
#define TMP_REG_1 (MAX_BPF_JIT_REG + 0) /* TEMP Register 1 */
#define TMP_REG_2 (MAX_BPF_JIT_REG + 1) /* TEMP Register 2 */
#define TCALL_CNT (MAX_BPF_JIT_REG + 2) /* Tail Call Count */
-/* Flags used for JIT optimization */
-#define SEEN_CALL (1 << 0)
-
#define FLAG_IMM_OVERFLOW (1 << 0)
/*
@@ -95,7 +139,6 @@ static const u8 bpf2a32[][2] = {
* idx : index of current last JITed instruction.
* prologue_bytes : bytes used in prologue.
* epilogue_offset : offset of epilogue starting.
- * seen : bit mask used for JIT optimization.
* offsets : array of eBPF instruction offsets in
* JITed code.
* target : final JITed code.
@@ -110,7 +153,6 @@ struct jit_ctx {
unsigned int idx;
unsigned int prologue_bytes;
unsigned int epilogue_offset;
- u32 seen;
u32 flags;
u32 *offsets;
u32 *target;
@@ -179,8 +221,13 @@ static void jit_fill_hole(void *area, unsigned int size)
*ptr++ = __opcode_to_mem_arm(ARM_INST_UDF);
}
-/* Stack must be multiples of 16 Bytes */
-#define STACK_ALIGN(sz) (((sz) + 3) & ~3)
+#if defined(CONFIG_AEABI) && (__LINUX_ARM_ARCH__ >= 5)
+/* EABI requires the stack to be aligned to 64-bit boundaries */
+#define STACK_ALIGNMENT 8
+#else
+/* Stack must be aligned to 32-bit boundaries */
+#define STACK_ALIGNMENT 4
+#endif
/* Stack space for BPF_REG_2, BPF_REG_3, BPF_REG_4,
* BPF_REG_5, BPF_REG_7, BPF_REG_8, BPF_REG_9,
@@ -194,7 +241,7 @@ static void jit_fill_hole(void *area, unsigned int size)
+ SCRATCH_SIZE + \
+ 4 /* extra for skb_copy_bits buffer */)
-#define STACK_SIZE STACK_ALIGN(_STACK_SIZE)
+#define STACK_SIZE ALIGN(_STACK_SIZE, STACK_ALIGNMENT)
/* Get the offset of eBPF REGISTERs stored on scratch space. */
#define STACK_VAR(off) (STACK_SIZE-off-4)
@@ -285,16 +332,19 @@ static inline void emit_mov_i(const u8 rd, u32 val, struct jit_ctx *ctx)
emit_mov_i_no8m(rd, val, ctx);
}
-static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
+static void emit_bx_r(u8 tgt_reg, struct jit_ctx *ctx)
{
- ctx->seen |= SEEN_CALL;
-#if __LINUX_ARM_ARCH__ < 5
- emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
-
if (elf_hwcap & HWCAP_THUMB)
emit(ARM_BX(tgt_reg), ctx);
else
emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
+}
+
+static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
+{
+#if __LINUX_ARM_ARCH__ < 5
+ emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
+ emit_bx_r(tgt_reg, ctx);
#else
emit(ARM_BLX_R(tgt_reg), ctx);
#endif
@@ -354,7 +404,6 @@ static inline void emit_udivmod(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx, u8 op)
}
/* Call appropriate function */
- ctx->seen |= SEEN_CALL;
emit_mov_i(ARM_IP, op == BPF_DIV ?
(u32)jit_udiv32 : (u32)jit_mod32, ctx);
emit_blx_r(ARM_IP, ctx);
@@ -620,8 +669,6 @@ static inline void emit_a32_lsh_r64(const u8 dst[], const u8 src[], bool dstk,
/* Do LSH operation */
emit(ARM_SUB_I(ARM_IP, rt, 32), ctx);
emit(ARM_RSB_I(tmp2[0], rt, 32), ctx);
- /* As we are using ARM_LR */
- ctx->seen |= SEEN_CALL;
emit(ARM_MOV_SR(ARM_LR, rm, SRTYPE_ASL, rt), ctx);
emit(ARM_ORR_SR(ARM_LR, ARM_LR, rd, SRTYPE_ASL, ARM_IP), ctx);
emit(ARM_ORR_SR(ARM_IP, ARM_LR, rd, SRTYPE_LSR, tmp2[0]), ctx);
@@ -656,8 +703,6 @@ static inline void emit_a32_arsh_r64(const u8 dst[], const u8 src[], bool dstk,
/* Do the ARSH operation */
emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
- /* As we are using ARM_LR */
- ctx->seen |= SEEN_CALL;
emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
_emit(ARM_COND_MI, ARM_B(0), ctx);
@@ -692,8 +737,6 @@ static inline void emit_a32_lsr_r64(const u8 dst[], const u8 src[], bool dstk,
/* Do LSH operation */
emit(ARM_RSB_I(ARM_IP, rt, 32), ctx);
emit(ARM_SUBS_I(tmp2[0], rt, 32), ctx);
- /* As we are using ARM_LR */
- ctx->seen |= SEEN_CALL;
emit(ARM_MOV_SR(ARM_LR, rd, SRTYPE_LSR, rt), ctx);
emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_ASL, ARM_IP), ctx);
emit(ARM_ORR_SR(ARM_LR, ARM_LR, rm, SRTYPE_LSR, tmp2[0]), ctx);
@@ -828,8 +871,6 @@ static inline void emit_a32_mul_r64(const u8 dst[], const u8 src[], bool dstk,
/* Do Multiplication */
emit(ARM_MUL(ARM_IP, rd, rn), ctx);
emit(ARM_MUL(ARM_LR, rm, rt), ctx);
- /* As we are using ARM_LR */
- ctx->seen |= SEEN_CALL;
emit(ARM_ADD_R(ARM_LR, ARM_IP, ARM_LR), ctx);
emit(ARM_UMULL(ARM_IP, rm, rd, rt), ctx);
@@ -872,33 +913,53 @@ static inline void emit_str_r(const u8 dst, const u8 src, bool dstk,
}
/* dst = *(size*)(src + off) */
-static inline void emit_ldx_r(const u8 dst, const u8 src, bool dstk,
- const s32 off, struct jit_ctx *ctx, const u8 sz){
+static inline void emit_ldx_r(const u8 dst[], const u8 src, bool dstk,
+ s32 off, struct jit_ctx *ctx, const u8 sz){
const u8 *tmp = bpf2a32[TMP_REG_1];
- u8 rd = dstk ? tmp[1] : dst;
+ const u8 *rd = dstk ? tmp : dst;
u8 rm = src;
+ s32 off_max;
- if (off) {
+ if (sz == BPF_H)
+ off_max = 0xff;
+ else
+ off_max = 0xfff;
+
+ if (off < 0 || off > off_max) {
emit_a32_mov_i(tmp[0], off, false, ctx);
emit(ARM_ADD_R(tmp[0], tmp[0], src), ctx);
rm = tmp[0];
+ off = 0;
+ } else if (rd[1] == rm) {
+ emit(ARM_MOV_R(tmp[0], rm), ctx);
+ rm = tmp[0];
}
switch (sz) {
- case BPF_W:
- /* Load a Word */
- emit(ARM_LDR_I(rd, rm, 0), ctx);
+ case BPF_B:
+ /* Load a Byte */
+ emit(ARM_LDRB_I(rd[1], rm, off), ctx);
+ emit_a32_mov_i(dst[0], 0, dstk, ctx);
break;
case BPF_H:
/* Load a HalfWord */
- emit(ARM_LDRH_I(rd, rm, 0), ctx);
+ emit(ARM_LDRH_I(rd[1], rm, off), ctx);
+ emit_a32_mov_i(dst[0], 0, dstk, ctx);
break;
- case BPF_B:
- /* Load a Byte */
- emit(ARM_LDRB_I(rd, rm, 0), ctx);
+ case BPF_W:
+ /* Load a Word */
+ emit(ARM_LDR_I(rd[1], rm, off), ctx);
+ emit_a32_mov_i(dst[0], 0, dstk, ctx);
+ break;
+ case BPF_DW:
+ /* Load a Double Word */
+ emit(ARM_LDR_I(rd[1], rm, off), ctx);
+ emit(ARM_LDR_I(rd[0], rm, off + 4), ctx);
break;
}
if (dstk)
- emit(ARM_STR_I(rd, ARM_SP, STACK_VAR(dst)), ctx);
+ emit(ARM_STR_I(rd[1], ARM_SP, STACK_VAR(dst[1])), ctx);
+ if (dstk && sz == BPF_DW)
+ emit(ARM_STR_I(rd[0], ARM_SP, STACK_VAR(dst[0])), ctx);
}
/* Arithmatic Operation */
@@ -906,7 +967,6 @@ static inline void emit_ar_r(const u8 rd, const u8 rt, const u8 rm,
const u8 rn, struct jit_ctx *ctx, u8 op) {
switch (op) {
case BPF_JSET:
- ctx->seen |= SEEN_CALL;
emit(ARM_AND_R(ARM_IP, rt, rn), ctx);
emit(ARM_AND_R(ARM_LR, rd, rm), ctx);
emit(ARM_ORRS_R(ARM_IP, ARM_LR, ARM_IP), ctx);
@@ -945,7 +1005,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
const u8 *tcc = bpf2a32[TCALL_CNT];
const int idx0 = ctx->idx;
#define cur_offset (ctx->idx - idx0)
-#define jmp_offset (out_offset - (cur_offset))
+#define jmp_offset (out_offset - (cur_offset) - 2)
u32 off, lo, hi;
/* if (index >= array->map.max_entries)
@@ -956,7 +1016,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
emit_a32_mov_i(tmp[1], off, false, ctx);
emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r2[1])), ctx);
emit(ARM_LDR_R(tmp[1], tmp2[1], tmp[1]), ctx);
- /* index (64 bit) */
+ /* index is 32-bit for arrays */
emit(ARM_LDR_I(tmp2[1], ARM_SP, STACK_VAR(r3[1])), ctx);
/* index >= array->map.max_entries */
emit(ARM_CMP_R(tmp2[1], tmp[1]), ctx);
@@ -997,7 +1057,7 @@ static int emit_bpf_tail_call(struct jit_ctx *ctx)
emit_a32_mov_i(tmp2[1], off, false, ctx);
emit(ARM_LDR_R(tmp[1], tmp[1], tmp2[1]), ctx);
emit(ARM_ADD_I(tmp[1], tmp[1], ctx->prologue_bytes), ctx);
- emit(ARM_BX(tmp[1]), ctx);
+ emit_bx_r(tmp[1], ctx);
/* out: */
if (out_offset == -1)
@@ -1070,54 +1130,22 @@ static void build_prologue(struct jit_ctx *ctx)
const u8 r2 = bpf2a32[BPF_REG_1][1];
const u8 r3 = bpf2a32[BPF_REG_1][0];
const u8 r4 = bpf2a32[BPF_REG_6][1];
- const u8 r5 = bpf2a32[BPF_REG_6][0];
- const u8 r6 = bpf2a32[TMP_REG_1][1];
- const u8 r7 = bpf2a32[TMP_REG_1][0];
- const u8 r8 = bpf2a32[TMP_REG_2][1];
- const u8 r10 = bpf2a32[TMP_REG_2][0];
const u8 fplo = bpf2a32[BPF_REG_FP][1];
const u8 fphi = bpf2a32[BPF_REG_FP][0];
- const u8 sp = ARM_SP;
const u8 *tcc = bpf2a32[TCALL_CNT];
- u16 reg_set = 0;
-
- /*
- * eBPF prog stack layout
- *
- * high
- * original ARM_SP => +-----+ eBPF prologue
- * |FP/LR|
- * current ARM_FP => +-----+
- * | ... | callee saved registers
- * eBPF fp register => +-----+ <= (BPF_FP)
- * | ... | eBPF JIT scratch space
- * | | eBPF prog stack
- * +-----+
- * |RSVD | JIT scratchpad
- * current A64_SP => +-----+ <= (BPF_FP - STACK_SIZE)
- * | |
- * | ... | Function call stack
- * | |
- * +-----+
- * low
- */
-
/* Save callee saved registers. */
- reg_set |= (1<<r4) | (1<<r5) | (1<<r6) | (1<<r7) | (1<<r8) | (1<<r10);
#ifdef CONFIG_FRAME_POINTER
- reg_set |= (1<<ARM_FP) | (1<<ARM_IP) | (1<<ARM_LR) | (1<<ARM_PC);
- emit(ARM_MOV_R(ARM_IP, sp), ctx);
+ u16 reg_set = CALLEE_PUSH_MASK | 1 << ARM_IP | 1 << ARM_PC;
+ emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
emit(ARM_PUSH(reg_set), ctx);
emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
#else
- /* Check if call instruction exists in BPF body */
- if (ctx->seen & SEEN_CALL)
- reg_set |= (1<<ARM_LR);
- emit(ARM_PUSH(reg_set), ctx);
+ emit(ARM_PUSH(CALLEE_PUSH_MASK), ctx);
+ emit(ARM_MOV_R(ARM_FP, ARM_SP), ctx);
#endif
/* Save frame pointer for later */
- emit(ARM_SUB_I(ARM_IP, sp, SCRATCH_SIZE), ctx);
+ emit(ARM_SUB_I(ARM_IP, ARM_SP, SCRATCH_SIZE), ctx);
ctx->stack_size = imm8m(STACK_SIZE);
@@ -1140,33 +1168,19 @@ static void build_prologue(struct jit_ctx *ctx)
/* end of prologue */
}
+/* restore callee saved registers. */
static void build_epilogue(struct jit_ctx *ctx)
{
- const u8 r4 = bpf2a32[BPF_REG_6][1];
- const u8 r5 = bpf2a32[BPF_REG_6][0];
- const u8 r6 = bpf2a32[TMP_REG_1][1];
- const u8 r7 = bpf2a32[TMP_REG_1][0];
- const u8 r8 = bpf2a32[TMP_REG_2][1];
- const u8 r10 = bpf2a32[TMP_REG_2][0];
- u16 reg_set = 0;
-
- /* unwind function call stack */
- emit(ARM_ADD_I(ARM_SP, ARM_SP, ctx->stack_size), ctx);
-
- /* restore callee saved registers. */
- reg_set |= (1<<r4) | (1<<r5) | (1<<r6) | (1<<r7) | (1<<r8) | (1<<r10);
#ifdef CONFIG_FRAME_POINTER
- /* the first instruction of the prologue was: mov ip, sp */
- reg_set |= (1<<ARM_FP) | (1<<ARM_SP) | (1<<ARM_PC);
+ /* When using frame pointers, some additional registers need to
+ * be loaded. */
+ u16 reg_set = CALLEE_POP_MASK | 1 << ARM_SP;
+ emit(ARM_SUB_I(ARM_SP, ARM_FP, hweight16(reg_set) * 4), ctx);
emit(ARM_LDM(ARM_SP, reg_set), ctx);
#else
- if (ctx->seen & SEEN_CALL)
- reg_set |= (1<<ARM_PC);
/* Restore callee saved registers. */
- emit(ARM_POP(reg_set), ctx);
- /* Return back to the callee function */
- if (!(ctx->seen & SEEN_CALL))
- emit(ARM_BX(ARM_LR), ctx);
+ emit(ARM_MOV_R(ARM_SP, ARM_FP), ctx);
+ emit(ARM_POP(CALLEE_POP_MASK), ctx);
#endif
}
@@ -1394,8 +1408,6 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
emit_rev32(rt, rt, ctx);
goto emit_bswap_uxt;
case 64:
- /* Because of the usage of ARM_LR */
- ctx->seen |= SEEN_CALL;
emit_rev32(ARM_LR, rt, ctx);
emit_rev32(rt, rd, ctx);
emit(ARM_MOV_R(rd, ARM_LR), ctx);
@@ -1448,22 +1460,7 @@ exit:
rn = sstk ? tmp2[1] : src_lo;
if (sstk)
emit(ARM_LDR_I(rn, ARM_SP, STACK_VAR(src_lo)), ctx);
- switch (BPF_SIZE(code)) {
- case BPF_W:
- /* Load a Word */
- case BPF_H:
- /* Load a Half-Word */
- case BPF_B:
- /* Load a Byte */
- emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_SIZE(code));
- emit_a32_mov_i(dst_hi, 0, dstk, ctx);
- break;
- case BPF_DW:
- /* Load a double word */
- emit_ldx_r(dst_lo, rn, dstk, off, ctx, BPF_W);
- emit_ldx_r(dst_hi, rn, dstk, off+4, ctx, BPF_W);
- break;
- }
+ emit_ldx_r(dst, rn, dstk, off, ctx, BPF_SIZE(code));
break;
/* R0 = ntohx(*(size *)(((struct sk_buff *)R6)->data + imm)) */
case BPF_LD | BPF_ABS | BPF_W:
diff --git a/arch/arm/probes/kprobes/core.c b/arch/arm/probes/kprobes/core.c
index 52d1cd14fda4..e90cc8a08186 100644
--- a/arch/arm/probes/kprobes/core.c
+++ b/arch/arm/probes/kprobes/core.c
@@ -32,6 +32,7 @@
#include <linux/percpu.h>
#include <linux/bug.h>
#include <asm/patch.h>
+#include <asm/sections.h>
#include "../decode-arm.h"
#include "../decode-thumb.h"
@@ -64,9 +65,6 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
int is;
const struct decode_checker **checkers;
- if (in_exception_text(addr))
- return -EINVAL;
-
#ifdef CONFIG_THUMB2_KERNEL
thumb = true;
addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */
@@ -680,3 +678,13 @@ int __init arch_init_kprobes()
#endif
return 0;
}
+
+bool arch_within_kprobe_blacklist(unsigned long addr)
+{
+ void *a = (void *)addr;
+
+ return __in_irqentry_text(addr) ||
+ in_entry_text(addr) ||
+ in_idmap_text(addr) ||
+ memory_contains(__kprobes_text_start, __kprobes_text_end, a, 1);
+}