From 8f51965e7033441cb10ce577d1ef2d580a80af08 Mon Sep 17 00:00:00 2001 From: Dave Martin Date: Thu, 23 Jun 2011 17:10:05 +0100 Subject: ARM: assembler.h: Add string declaration macro Declaring strings in assembler source involves a certain amount of tedious boilerplate code in order to annotate the resulting symbol correctly. Encapsulating this boilerplate in a macro should help to avoid some duplication and the occasional mistake. Signed-off-by: Dave Martin Acked-by: Nicolas Pitre --- arch/arm/include/asm/assembler.h | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h index 65c3f2474f5e..29035e86a59d 100644 --- a/arch/arm/include/asm/assembler.h +++ b/arch/arm/include/asm/assembler.h @@ -293,4 +293,13 @@ .macro ldrusr, reg, ptr, inc, cond=al, rept=1, abort=9001f usracc ldr, \reg, \ptr, \inc, \cond, \rept, \abort .endm + +/* Utility macro for declaring string literals */ + .macro string name:req, string + .type \name , #object +\name: + .asciz "\string" + .size \name , . - \name + .endm + #endif /* __ASM_ASSEMBLER_H__ */ -- cgit v1.2.3 From 194a7f720f6f009867a01b760f311b68f1e81872 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 3 Jun 2011 14:09:37 +0100 Subject: ARM: hwcaps: use shifts instead of hardcoded constants The HWCAP numbers are defined as constants, each one being a power of 2. This has become slightly unwieldy now that we have reached 32k. This patch changes the HWCAP defines to use (1 << n) instead of coding the constant directly. The values remain unchanged. Signed-off-by: Will Deacon --- arch/arm/include/asm/hwcap.h | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h index c1062c317103..81512db2b628 100644 --- a/arch/arm/include/asm/hwcap.h +++ b/arch/arm/include/asm/hwcap.h @@ -4,22 +4,22 @@ /* * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP */ -#define HWCAP_SWP 1 -#define HWCAP_HALF 2 -#define HWCAP_THUMB 4 -#define HWCAP_26BIT 8 /* Play it safe */ -#define HWCAP_FAST_MULT 16 -#define HWCAP_FPA 32 -#define HWCAP_VFP 64 -#define HWCAP_EDSP 128 -#define HWCAP_JAVA 256 -#define HWCAP_IWMMXT 512 -#define HWCAP_CRUNCH 1024 -#define HWCAP_THUMBEE 2048 -#define HWCAP_NEON 4096 -#define HWCAP_VFPv3 8192 -#define HWCAP_VFPv3D16 16384 -#define HWCAP_TLS 32768 +#define HWCAP_SWP (1 << 0) +#define HWCAP_HALF (1 << 1) +#define HWCAP_THUMB (1 << 2) +#define HWCAP_26BIT (1 << 3) /* Play it safe */ +#define HWCAP_FAST_MULT (1 << 4) +#define HWCAP_FPA (1 << 5) +#define HWCAP_VFP (1 << 6) +#define HWCAP_EDSP (1 << 7) +#define HWCAP_JAVA (1 << 8) +#define HWCAP_IWMMXT (1 << 9) +#define HWCAP_CRUNCH (1 << 10) +#define HWCAP_THUMBEE (1 << 11) +#define HWCAP_NEON (1 << 12) +#define HWCAP_VFPv3 (1 << 13) +#define HWCAP_VFPv3D16 (1 << 14) +#define HWCAP_TLS (1 << 15) #if defined(__KERNEL__) && !defined(__ASSEMBLY__) /* -- cgit v1.2.3 From 254cdf8ec39653d19cce71b6622f38a6b62ac3a8 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 3 Jun 2011 14:15:22 +0100 Subject: ARM: hwcaps: add new HWCAP defines for ARMv7-A Modern ARMv7-A cores can optionally implement these new hardware features: - VFPv4: The latest version of the ARMv7 vector floating-point extensions, including hardware support for fused multiple accumulate. D16 or D32 variants may be implemented. - Integer divide: The SDIV and UDIV instructions provide signed and unsigned integer division in hardware. When implemented, these instructions may be available in either both Thumb and ARM, or Thumb only. This patch adds new HWCAP defines to describe these new features. The integer divide capabilities are split into two bits for ARM and Thumb respectively. Whilst HWCAP_IDIVA should never be set if HWCAP_IDIVT is clear, separating the bits makes it easier to interpret from userspace. Signed-off-by: Will Deacon --- arch/arm/include/asm/hwcap.h | 4 ++++ arch/arm/kernel/setup.c | 4 ++++ 2 files changed, 8 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/hwcap.h b/arch/arm/include/asm/hwcap.h index 81512db2b628..c93a22a8b924 100644 --- a/arch/arm/include/asm/hwcap.h +++ b/arch/arm/include/asm/hwcap.h @@ -20,6 +20,10 @@ #define HWCAP_VFPv3 (1 << 13) #define HWCAP_VFPv3D16 (1 << 14) #define HWCAP_TLS (1 << 15) +#define HWCAP_VFPv4 (1 << 16) +#define HWCAP_IDIVA (1 << 17) +#define HWCAP_IDIVT (1 << 18) +#define HWCAP_IDIV (HWCAP_IDIVA | HWCAP_IDIVT) #if defined(__KERNEL__) && !defined(__ASSEMBLY__) /* diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index ed11fb08b05a..699df68fc840 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -977,6 +977,10 @@ static const char *hwcap_str[] = { "neon", "vfpv3", "vfpv3d16", + "tls", + "vfpv4", + "idiva", + "idivt", NULL }; -- cgit v1.2.3 From 0c205cbe20654616e2f8389c0c1ff707d9dccb63 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Fri, 3 Jun 2011 17:40:15 +0100 Subject: ARM: perf: add support for the Cortex-A5 PMU This patch adds support for the Cortex-A5 PMU to the ARMv7 perf-event backend. Signed-off-by: Will Deacon --- arch/arm/include/asm/perf_event.h | 1 + arch/arm/kernel/perf_event.c | 3 + arch/arm/kernel/perf_event_v7.c | 146 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 150 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index c4aa4e8c6af9..207bd3c79ab6 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -24,6 +24,7 @@ enum arm_perf_pmu_ids { ARM_PERF_PMU_ID_V6MP, ARM_PERF_PMU_ID_CA8, ARM_PERF_PMU_ID_CA9, + ARM_PERF_PMU_ID_CA5, ARM_NUM_PMU_IDS, }; diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index d53c0abc4dd3..df4e517687bf 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -660,6 +660,9 @@ init_hw_perf_events(void) case 0xC090: /* Cortex-A9 */ armpmu = armv7_a9_pmu_init(); break; + case 0xC050: /* Cortex-A5 */ + armpmu = armv7_a5_pmu_init(); + break; } /* Intel CPUs [xscale]. */ } else if (0x69 == implementor) { diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 01b1145f07e5..db1d6c4a32ac 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -153,6 +153,21 @@ enum armv7_a9_perf_types { ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5 }; +/* ARMv7 Cortex-A5 specific event types */ +enum armv7_a5_perf_types { + ARMV7_PERFCTR_IRQ_TAKEN = 0x86, + ARMV7_PERFCTR_FIQ_TAKEN = 0x87, + + ARMV7_PERFCTR_EXT_MEM_RQST = 0xc0, + ARMV7_PERFCTR_NC_EXT_MEM_RQST = 0xc1, + ARMV7_PERFCTR_PREFETCH_LINEFILL = 0xc2, + ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3, + ARMV7_PERFCTR_ENTER_READ_ALLOC = 0xc4, + ARMV7_PERFCTR_READ_ALLOC = 0xc5, + + ARMV7_PERFCTR_STALL_SB_FULL = 0xc9, +}; + /* * Cortex-A8 HW events mapping * @@ -378,6 +393,122 @@ static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, }; +/* + * Cortex-A5 HW events mapping + */ +static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, +}; + +static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_DCACHE_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_DCACHE_ACCESS, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_DCACHE_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_PREFETCH_LINEFILL, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP, + }, + }, + [C(L1I)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, + }, + /* + * The prefetch counters don't differentiate between the I + * side and the D side. + */ + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_PREFETCH_LINEFILL, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + /* * Perf Events counters */ @@ -910,6 +1041,16 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void) armv7pmu.num_events = armv7_read_num_pmnc_events(); return &armv7pmu; } + +static const struct arm_pmu *__init armv7_a5_pmu_init(void) +{ + armv7pmu.id = ARM_PERF_PMU_ID_CA5; + armv7pmu.name = "ARMv7 Cortex-A5"; + armv7pmu.cache_map = &armv7_a5_perf_cache_map; + armv7pmu.event_map = &armv7_a5_perf_map; + armv7pmu.num_events = armv7_read_num_pmnc_events(); + return &armv7pmu; +} #else static const struct arm_pmu *__init armv7_a8_pmu_init(void) { @@ -920,4 +1061,9 @@ static const struct arm_pmu *__init armv7_a9_pmu_init(void) { return NULL; } + +static const struct arm_pmu *__init armv7_a5_pmu_init(void) +{ + return NULL; +} #endif /* CONFIG_CPU_V7 */ -- cgit v1.2.3 From 14abd038a7a209193c58ee7dde01ef4bf1523a91 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Wed, 19 Jan 2011 14:24:38 +0000 Subject: ARM: perf: add support for the Cortex-A15 PMU This patch adds support for the Cortex-A15 PMU to the ARMv7 perf-event backend. Signed-off-by: Will Deacon --- arch/arm/include/asm/perf_event.h | 1 + arch/arm/kernel/perf_event.c | 3 + arch/arm/kernel/perf_event_v7.c | 153 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 157 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h index 207bd3c79ab6..0f8e3827a89b 100644 --- a/arch/arm/include/asm/perf_event.h +++ b/arch/arm/include/asm/perf_event.h @@ -25,6 +25,7 @@ enum arm_perf_pmu_ids { ARM_PERF_PMU_ID_CA8, ARM_PERF_PMU_ID_CA9, ARM_PERF_PMU_ID_CA5, + ARM_PERF_PMU_ID_CA15, ARM_NUM_PMU_IDS, }; diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index df4e517687bf..262ea67f60ae 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -663,6 +663,9 @@ init_hw_perf_events(void) case 0xC050: /* Cortex-A5 */ armpmu = armv7_a5_pmu_init(); break; + case 0xC0F0: /* Cortex-A15 */ + armpmu = armv7_a15_pmu_init(); + break; } /* Intel CPUs [xscale]. */ } else if (0x69 == implementor) { diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index db1d6c4a32ac..963317896c80 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c @@ -168,6 +168,24 @@ enum armv7_a5_perf_types { ARMV7_PERFCTR_STALL_SB_FULL = 0xc9, }; +/* ARMv7 Cortex-A15 specific event types */ +enum armv7_a15_perf_types { + ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS = 0x40, + ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS = 0x41, + ARMV7_PERFCTR_L1_DCACHE_READ_REFILL = 0x42, + ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL = 0x43, + + ARMV7_PERFCTR_L1_DTLB_READ_REFILL = 0x4C, + ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL = 0x4D, + + ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS = 0x50, + ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS = 0x51, + ARMV7_PERFCTR_L2_DCACHE_READ_REFILL = 0x52, + ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL = 0x53, + + ARMV7_PERFCTR_SPEC_PC_WRITE = 0x76, +}; + /* * Cortex-A8 HW events mapping * @@ -509,6 +527,126 @@ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] }, }; +/* + * Cortex-A15 HW events mapping + */ +static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = { + [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, + [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, + [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, + [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE, + [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES, +}; + +static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] + [PERF_COUNT_HW_CACHE_OP_MAX] + [PERF_COUNT_HW_CACHE_RESULT_MAX] = { + [C(L1D)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_L1_DCACHE_READ_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(L1I)] = { + /* + * Not all performance counters differentiate between read + * and write accesses/misses so we're not always strictly + * correct, but it's the best we can do. Writes and reads get + * combined in these cases. + */ + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS, + [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(LL)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_L2_DCACHE_READ_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] + = ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(DTLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_L1_DTLB_READ_REFILL, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(ITLB)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, + [C(BPU)] = { + [C(OP_READ)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_WRITE)] = { + [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, + [C(RESULT_MISS)] + = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, + }, + [C(OP_PREFETCH)] = { + [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, + [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, + }, + }, +}; + /* * Perf Events counters */ @@ -1051,6 +1189,16 @@ static const struct arm_pmu *__init armv7_a5_pmu_init(void) armv7pmu.num_events = armv7_read_num_pmnc_events(); return &armv7pmu; } + +static const struct arm_pmu *__init armv7_a15_pmu_init(void) +{ + armv7pmu.id = ARM_PERF_PMU_ID_CA15; + armv7pmu.name = "ARMv7 Cortex-A15"; + armv7pmu.cache_map = &armv7_a15_perf_cache_map; + armv7pmu.event_map = &armv7_a15_perf_map; + armv7pmu.num_events = armv7_read_num_pmnc_events(); + return &armv7pmu; +} #else static const struct arm_pmu *__init armv7_a8_pmu_init(void) { @@ -1066,4 +1214,9 @@ static const struct arm_pmu *__init armv7_a5_pmu_init(void) { return NULL; } + +static const struct arm_pmu *__init armv7_a15_pmu_init(void) +{ + return NULL; +} #endif /* CONFIG_CPU_V7 */ -- cgit v1.2.3 From 592201a9f154cdd5db59304d1369e94d8b551803 Mon Sep 17 00:00:00 2001 From: Jon Medhurst Date: Sat, 26 Mar 2011 19:19:07 +0000 Subject: ARM: Thumb-2: Support Thumb-2 in undefined instruction handler This patch allows undef_hook's to be specified for 32-bit Thumb instructions and also to be used for thumb kernel-side code. 32-bit Thumb instructions are specified in the form: ((first_half << 16 ) | second_half) which matches the layout used by the ARM ARM. ptrace was handling 32-bit Thumb instructions by hooking the first halfword and manually checking the second half. This method would be broken by this patch so it is migrated to make use of the new Thumb-2 support. Signed-off-by: Jon Medhurst Acked-by: Nicolas Pitre --- arch/arm/include/asm/ptrace.h | 8 ++++++++ arch/arm/kernel/ptrace.c | 28 +++------------------------- arch/arm/kernel/traps.c | 17 ++++++++++++++++- 3 files changed, 27 insertions(+), 26 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index 312d10877bd7..d484871698da 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -199,6 +199,14 @@ extern unsigned long profile_pc(struct pt_regs *regs); #define predicate(x) ((x) & 0xf0000000) #define PREDICATE_ALWAYS 0xe0000000 +/* + * True if instr is a 32-bit thumb instruction. This works if instr + * is the first or only half-word of a thumb instruction. It also works + * when instr holds all 32-bits of a wide thumb instruction if stored + * in the form (first_half<<16)|(second_half) + */ +#define is_wide_instruction(instr) ((unsigned)(instr) >= 0xe800) + /* * kprobe-based event tracer support */ diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c index 97260060bf26..897ade059f58 100644 --- a/arch/arm/kernel/ptrace.c +++ b/arch/arm/kernel/ptrace.c @@ -228,34 +228,12 @@ static struct undef_hook thumb_break_hook = { .fn = break_trap, }; -static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr) -{ - unsigned int instr2; - void __user *pc; - - /* Check the second half of the instruction. */ - pc = (void __user *)(instruction_pointer(regs) + 2); - - if (processor_mode(regs) == SVC_MODE) { - instr2 = *(u16 *) pc; - } else { - get_user(instr2, (u16 __user *)pc); - } - - if (instr2 == 0xa000) { - ptrace_break(current, regs); - return 0; - } else { - return 1; - } -} - static struct undef_hook thumb2_break_hook = { - .instr_mask = 0xffff, - .instr_val = 0xf7f0, + .instr_mask = 0xffffffff, + .instr_val = 0xf7f0a000, .cpsr_mask = PSR_T_BIT, .cpsr_val = PSR_T_BIT, - .fn = thumb2_break_trap, + .fn = break_trap, }; static int __init ptrace_break_init(void) diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c index 6807cb1e76dd..2d3436e9f71f 100644 --- a/arch/arm/kernel/traps.c +++ b/arch/arm/kernel/traps.c @@ -355,9 +355,24 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs) pc = (void __user *)instruction_pointer(regs); if (processor_mode(regs) == SVC_MODE) { - instr = *(u32 *) pc; +#ifdef CONFIG_THUMB2_KERNEL + if (thumb_mode(regs)) { + instr = ((u16 *)pc)[0]; + if (is_wide_instruction(instr)) { + instr <<= 16; + instr |= ((u16 *)pc)[1]; + } + } else +#endif + instr = *(u32 *) pc; } else if (thumb_mode(regs)) { get_user(instr, (u16 __user *)pc); + if (is_wide_instruction(instr)) { + unsigned int instr2; + get_user(instr2, (u16 __user *)pc+1); + instr <<= 16; + instr |= instr2; + } } else { get_user(instr, (u32 __user *)pc); } -- cgit v1.2.3 From 221bf15ffd2ad6cdc624aa4274f706499501c123 Mon Sep 17 00:00:00 2001 From: Jon Medhurst Date: Wed, 20 Apr 2011 10:52:38 +0100 Subject: ARM: kprobes: Split out internal parts of kprobes.h Later, we will be adding a considerable amount of internal implementation definitions to kprobe header files and it would be good to have these in local header file along side the source code, rather than pollute the existing header which is include by all users of kprobes. To this end, we add arch/arm/kernel/kprobes.h and move into this the existing internal defintions from arch/arm/include/asm/kprobes.h Signed-off-by: Jon Medhurst Acked-by: Nicolas Pitre --- arch/arm/include/asm/kprobes.h | 17 ----------------- arch/arm/kernel/kprobes-arm.c | 2 ++ arch/arm/kernel/kprobes.c | 2 ++ arch/arm/kernel/kprobes.h | 37 +++++++++++++++++++++++++++++++++++++ 4 files changed, 41 insertions(+), 17 deletions(-) create mode 100644 arch/arm/kernel/kprobes.h (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index e46bdd0097eb..57d37d52d71e 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h @@ -24,12 +24,6 @@ #define MAX_INSN_SIZE 2 #define MAX_STACK_SIZE 64 /* 32 would probably be OK */ -/* - * This undefined instruction must be unique and - * reserved solely for kprobes' use. - */ -#define KPROBE_BREAKPOINT_INSTRUCTION 0xe7f001f8 - #define regs_return_value(regs) ((regs)->ARM_r0) #define flush_insn_slot(p) do { } while (0) #define kretprobe_blacklist_size 0 @@ -62,20 +56,9 @@ struct kprobe_ctlblk { }; void arch_remove_kprobe(struct kprobe *); -void kretprobe_trampoline(void); - int kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr); int kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *data); -enum kprobe_insn { - INSN_REJECTED, - INSN_GOOD, - INSN_GOOD_NO_SLOT -}; - -enum kprobe_insn arm_kprobe_decode_insn(kprobe_opcode_t, - struct arch_specific_insn *); -void __init arm_kprobe_decode_init(void); #endif /* _ARM_KPROBES_H */ diff --git a/arch/arm/kernel/kprobes-arm.c b/arch/arm/kernel/kprobes-arm.c index 15eeff6aea0e..976ce14c5576 100644 --- a/arch/arm/kernel/kprobes-arm.c +++ b/arch/arm/kernel/kprobes-arm.c @@ -61,6 +61,8 @@ #include #include +#include "kprobes.h" + #define sign_extend(x, signbit) ((x) | (0 - ((x) & (1 << (signbit))))) #define branch_displacement(insn) sign_extend(((insn) & 0xffffff) << 2, 25) diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c index 1656c87501c0..3ba5f8d0d82c 100644 --- a/arch/arm/kernel/kprobes.c +++ b/arch/arm/kernel/kprobes.c @@ -28,6 +28,8 @@ #include #include +#include "kprobes.h" + #define MIN_STACK_SIZE(addr) \ min((unsigned long)MAX_STACK_SIZE, \ (unsigned long)current_thread_info() + THREAD_START_SP - (addr)) diff --git a/arch/arm/kernel/kprobes.h b/arch/arm/kernel/kprobes.h new file mode 100644 index 000000000000..87a5241b2f18 --- /dev/null +++ b/arch/arm/kernel/kprobes.h @@ -0,0 +1,37 @@ +/* + * arch/arm/kernel/kprobes.h + * + * Contents moved from arch/arm/include/asm/kprobes.h which is + * Copyright (C) 2006, 2007 Motorola Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + */ + +#ifndef _ARM_KERNEL_KPROBES_H +#define _ARM_KERNEL_KPROBES_H + +/* + * This undefined instruction must be unique and + * reserved solely for kprobes' use. + */ +#define KPROBE_BREAKPOINT_INSTRUCTION 0xe7f001f8 + +enum kprobe_insn { + INSN_REJECTED, + INSN_GOOD, + INSN_GOOD_NO_SLOT +}; + +enum kprobe_insn arm_kprobe_decode_insn(kprobe_opcode_t, + struct arch_specific_insn *); + +void __init arm_kprobe_decode_init(void); + +#endif /* _ARM_KERNEL_KPROBES_H */ -- cgit v1.2.3 From c6a7d97d57ef41477a85f4c0f48ea5243132ee1f Mon Sep 17 00:00:00 2001 From: Jon Medhurst Date: Thu, 9 Jun 2011 12:11:27 +0100 Subject: ARM: kprobes: Add hooks to override singlestep() When a probe fires we must single-step the instruction which was replaced by a breakpoint. As the steps to do this vary between ARM and Thumb instructions we need a way to customise single-stepping. This is done by adding a new hook called insn_singlestep to arch_specific_insn which is initialised by the instruction decoding functions. These single-step hooks must update PC and call the instruction handler. For Thumb instructions an additional step of updating ITSTATE is needed. We do this after calling the handler because some handlers will need to test if they are running in an IT block. Signed-off-by: Jon Medhurst Acked-by: Nicolas Pitre --- arch/arm/include/asm/kprobes.h | 9 +++++---- arch/arm/kernel/kprobes-arm.c | 7 +++++++ arch/arm/kernel/kprobes-thumb.c | 16 ++++++++++++++++ arch/arm/kernel/kprobes.c | 8 +++----- 4 files changed, 31 insertions(+), 9 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index 57d37d52d71e..1e9ff56d40c7 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h @@ -32,14 +32,15 @@ typedef u32 kprobe_opcode_t; struct kprobe; typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *); - typedef unsigned long (kprobe_check_cc)(unsigned long); +typedef void (kprobe_insn_singlestep_t)(struct kprobe *, struct pt_regs *); /* Architecture specific copy of original instruction. */ struct arch_specific_insn { - kprobe_opcode_t *insn; - kprobe_insn_handler_t *insn_handler; - kprobe_check_cc *insn_check_cc; + kprobe_opcode_t *insn; + kprobe_insn_handler_t *insn_handler; + kprobe_check_cc *insn_check_cc; + kprobe_insn_singlestep_t *insn_singlestep; }; struct prev_kprobe { diff --git a/arch/arm/kernel/kprobes-arm.c b/arch/arm/kernel/kprobes-arm.c index 0262b29163d1..a1143e86a09a 100644 --- a/arch/arm/kernel/kprobes-arm.c +++ b/arch/arm/kernel/kprobes-arm.c @@ -1494,6 +1494,12 @@ space_cccc_11xx(kprobe_opcode_t insn, struct arch_specific_insn *asi) return INSN_REJECTED; } +static void __kprobes arm_singlestep(struct kprobe *p, struct pt_regs *regs) +{ + regs->ARM_pc += 4; + p->ainsn.insn_handler(p, regs); +} + /* Return: * INSN_REJECTED If instruction is one not allowed to kprobe, * INSN_GOOD If instruction is supported and uses instruction slot, @@ -1509,6 +1515,7 @@ space_cccc_11xx(kprobe_opcode_t insn, struct arch_specific_insn *asi) enum kprobe_insn __kprobes arm_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) { + asi->insn_singlestep = arm_singlestep; asi->insn_check_cc = kprobe_condition_checks[insn>>28]; asi->insn[1] = KPROBE_RETURN_INSTRUCTION; diff --git a/arch/arm/kernel/kprobes-thumb.c b/arch/arm/kernel/kprobes-thumb.c index 24a188b1601a..973c3eb1243a 100644 --- a/arch/arm/kernel/kprobes-thumb.c +++ b/arch/arm/kernel/kprobes-thumb.c @@ -33,9 +33,24 @@ static unsigned long __kprobes thumb_check_cc(unsigned long cpsr) return true; } +static void __kprobes thumb16_singlestep(struct kprobe *p, struct pt_regs *regs) +{ + regs->ARM_pc += 2; + p->ainsn.insn_handler(p, regs); + regs->ARM_cpsr = it_advance(regs->ARM_cpsr); +} + +static void __kprobes thumb32_singlestep(struct kprobe *p, struct pt_regs *regs) +{ + regs->ARM_pc += 4; + p->ainsn.insn_handler(p, regs); + regs->ARM_cpsr = it_advance(regs->ARM_cpsr); +} + enum kprobe_insn __kprobes thumb16_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) { + asi->insn_singlestep = thumb16_singlestep; asi->insn_check_cc = thumb_check_cc; return INSN_REJECTED; } @@ -43,6 +58,7 @@ thumb16_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) enum kprobe_insn __kprobes thumb32_kprobe_decode_insn(kprobe_opcode_t insn, struct arch_specific_insn *asi) { + asi->insn_singlestep = thumb32_singlestep; asi->insn_check_cc = thumb_check_cc; return INSN_REJECTED; } diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c index 0003dfd3b854..77b7c6974802 100644 --- a/arch/arm/kernel/kprobes.c +++ b/arch/arm/kernel/kprobes.c @@ -227,12 +227,10 @@ singlestep_skip(struct kprobe *p, struct pt_regs *regs) #endif } -static void __kprobes singlestep(struct kprobe *p, struct pt_regs *regs, - struct kprobe_ctlblk *kcb) +static inline void __kprobes +singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb) { - regs->ARM_pc += 4; - if (p->ainsn.insn_check_cc(regs->ARM_cpsr)) - p->ainsn.insn_handler(p, regs); + p->ainsn.insn_singlestep(p, regs); } /* -- cgit v1.2.3 From e2960317d4581689bf80dbad4d75e7a59f11a3f7 Mon Sep 17 00:00:00 2001 From: Jon Medhurst Date: Thu, 9 Jun 2011 14:05:51 +0100 Subject: ARM: kprobes: Extend arch_specific_insn to add pointer to emulated instruction When we come to emulating Thumb instructions then, to interwork correctly, the code on in the instruction slot must be invoked with a function pointer which has the least significant bit set. Rather that set this by hand in every Thumb emulation function we will add a new field for this purpose to arch_specific_insn, called insn_fn. This also enables us to seamlessly share emulation functions between ARM and Thumb code. Signed-off-by: Jon Medhurst Acked-by: Nicolas Pitre --- arch/arm/include/asm/kprobes.h | 2 ++ arch/arm/kernel/kprobes.c | 5 +++++ 2 files changed, 7 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/kprobes.h b/arch/arm/include/asm/kprobes.h index 1e9ff56d40c7..feec86768f9c 100644 --- a/arch/arm/include/asm/kprobes.h +++ b/arch/arm/include/asm/kprobes.h @@ -34,6 +34,7 @@ struct kprobe; typedef void (kprobe_insn_handler_t)(struct kprobe *, struct pt_regs *); typedef unsigned long (kprobe_check_cc)(unsigned long); typedef void (kprobe_insn_singlestep_t)(struct kprobe *, struct pt_regs *); +typedef void (kprobe_insn_fn_t)(void); /* Architecture specific copy of original instruction. */ struct arch_specific_insn { @@ -41,6 +42,7 @@ struct arch_specific_insn { kprobe_insn_handler_t *insn_handler; kprobe_check_cc *insn_check_cc; kprobe_insn_singlestep_t *insn_singlestep; + kprobe_insn_fn_t *insn_fn; }; struct prev_kprobe { diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c index 77b7c6974802..129c1163248b 100644 --- a/arch/arm/kernel/kprobes.c +++ b/arch/arm/kernel/kprobes.c @@ -51,6 +51,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) kprobe_opcode_t insn; kprobe_opcode_t tmp_insn[MAX_INSN_SIZE]; unsigned long addr = (unsigned long)p->addr; + bool thumb; kprobe_decode_insn_t *decode_insn; int is; @@ -58,6 +59,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) return -EINVAL; #ifdef CONFIG_THUMB2_KERNEL + thumb = true; addr &= ~1; /* Bit 0 would normally be set to indicate Thumb code */ insn = ((u16 *)addr)[0]; if (is_wide_instruction(insn)) { @@ -67,6 +69,7 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) } else decode_insn = thumb16_kprobe_decode_insn; #else /* !CONFIG_THUMB2_KERNEL */ + thumb = false; if (addr & 0x3) return -EINVAL; insn = *p->addr; @@ -88,6 +91,8 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p) p->ainsn.insn[is] = tmp_insn[is]; flush_insns(p->ainsn.insn, sizeof(p->ainsn.insn[0]) * MAX_INSN_SIZE); + p->ainsn.insn_fn = (kprobe_insn_fn_t *) + ((uintptr_t)p->ainsn.insn | thumb); break; case INSN_GOOD_NO_SLOT: /* instruction doesn't need insn slot */ -- cgit v1.2.3 From 7460bce42323df6570c7ba5091cb5201c7af1944 Mon Sep 17 00:00:00 2001 From: Jon Medhurst Date: Fri, 3 Jun 2011 12:12:33 +0100 Subject: ARM: ptrace: Add APSR_MASK definition to ptrace.h APSR_MASK can be used to extract the APSR bits from the CPSR. The comment for these definitions is also changed because it was inaccurate as the existing defines didn't refer to any part of the APSR. Signed-off-by: Jon Medhurst Acked-by: Nicolas Pitre --- arch/arm/include/asm/ptrace.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h index d484871698da..96187ff58c24 100644 --- a/arch/arm/include/asm/ptrace.h +++ b/arch/arm/include/asm/ptrace.h @@ -69,8 +69,9 @@ #define PSR_c 0x000000ff /* Control */ /* - * ARMv7 groups of APSR bits + * ARMv7 groups of PSR bits */ +#define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */ #define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */ #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */ #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */ -- cgit v1.2.3 From 650320181a08b64d4421c65c639cf47ad8cc2cd6 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 18 Jul 2011 15:05:10 -0400 Subject: ARM: change ARM_DMA_ZONE_SIZE into a variable Having this value defined at compile time prevents multiple machines with conflicting definitions to coexist. Move it to a variable in preparation for having a per machine value selected at run time. This is relevant only when CONFIG_ZONE_DMA is selected. Signed-off-by: Nicolas Pitre --- arch/arm/include/asm/dma.h | 9 ++++++--- arch/arm/mm/init.c | 25 ++++++++++++++++--------- 2 files changed, 22 insertions(+), 12 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index 42005542932b..fcf15de8cadb 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -6,10 +6,13 @@ /* * This is the maximum virtual address which can be DMA'd from. */ -#ifndef ARM_DMA_ZONE_SIZE -#define MAX_DMA_ADDRESS 0xffffffff +#ifndef CONFIG_ZONE_DMA +#define MAX_DMA_ADDRESS 0xffffffffUL #else -#define MAX_DMA_ADDRESS (PAGE_OFFSET + ARM_DMA_ZONE_SIZE) +#define MAX_DMA_ADDRESS ({ \ + extern unsigned long arm_dma_zone_size; \ + arm_dma_zone_size ? \ + (PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; }) #endif #ifdef CONFIG_ISA_DMA_API diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 17d6cd0c57ed..4a8a01e0c3ab 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -28,6 +28,7 @@ #include #include #include +#include #include #include @@ -212,6 +213,14 @@ static void __init arm_bootmem_init(unsigned long start_pfn, } #ifdef CONFIG_ZONE_DMA + +#ifdef ARM_DMA_ZONE_SIZE +unsigned long arm_dma_zone_size = ARM_DMA_ZONE_SIZE; +#else +unsigned long arm_dma_zone_size __read_mostly; +#endif +EXPORT_SYMBOL(arm_dma_zone_size); + /* * The DMA mask corresponding to the maximum bus address allocatable * using GFP_DMA. The default here places no restriction on DMA @@ -275,19 +284,17 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, #endif } -#ifdef ARM_DMA_ZONE_SIZE -#ifndef CONFIG_ZONE_DMA -#error ARM_DMA_ZONE_SIZE set but no DMA zone to limit allocations -#endif - +#ifdef CONFIG_ZONE_DMA /* * Adjust the sizes according to any special requirements for * this machine type. */ - arm_adjust_dma_zone(zone_size, zhole_size, - ARM_DMA_ZONE_SIZE >> PAGE_SHIFT); - - arm_dma_limit = PHYS_OFFSET + ARM_DMA_ZONE_SIZE - 1; + if (arm_dma_zone_size) { + arm_adjust_dma_zone(zone_size, zhole_size, + arm_dma_zone_size >> PAGE_SHIFT); + arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1; + } else + arm_dma_limit = 0xffffffff; #endif free_area_init_node(0, zone_size, min, zhole_size); -- cgit v1.2.3 From 4fddcaebb9014b4814f859420595cc419400fba6 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Tue, 5 Jul 2011 22:28:08 -0400 Subject: ARM: add dma_zone_size to the machine_desc structure Signed-off-by: Nicolas Pitre --- arch/arm/include/asm/mach/arch.h | 4 ++++ arch/arm/kernel/setup.c | 6 ++++++ 2 files changed, 10 insertions(+) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h index 946f4d778f71..3281fb4b12e3 100644 --- a/arch/arm/include/asm/mach/arch.h +++ b/arch/arm/include/asm/mach/arch.h @@ -23,6 +23,10 @@ struct machine_desc { unsigned int nr_irqs; /* number of IRQs */ +#ifdef CONFIG_ZONE_DMA + unsigned long dma_zone_size; /* size of DMA-able area */ +#endif + unsigned int video_start; /* start of video RAM */ unsigned int video_end; /* end of video RAM */ diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c index ed11fb08b05a..e0db84d7e384 100644 --- a/arch/arm/kernel/setup.c +++ b/arch/arm/kernel/setup.c @@ -916,6 +916,12 @@ void __init setup_arch(char **cmdline_p) cpu_init(); tcm_init(); +#ifdef CONFIG_ZONE_DMA + if (mdesc->dma_zone_size) { + extern unsigned long arm_dma_zone_size; + arm_dma_zone_size = mdesc->dma_zone_size; + } +#endif #ifdef CONFIG_MULTI_IRQ_HANDLER handle_arch_irq = mdesc->handle_irq; #endif -- cgit v1.2.3 From fb89fcfb151698776be6c900aec8161b01990e92 Mon Sep 17 00:00:00 2001 From: Nicolas Pitre Date: Mon, 18 Jul 2011 15:17:15 -0400 Subject: ARM: ARM_DMA_ZONE_SIZE is no more One less dependency on mach/memory.h. Signed-off-by: Nicolas Pitre --- arch/arm/include/asm/dma.h | 2 -- arch/arm/mm/init.c | 5 ----- 2 files changed, 7 deletions(-) (limited to 'arch/arm/include') diff --git a/arch/arm/include/asm/dma.h b/arch/arm/include/asm/dma.h index fcf15de8cadb..628670e9d7c9 100644 --- a/arch/arm/include/asm/dma.h +++ b/arch/arm/include/asm/dma.h @@ -1,8 +1,6 @@ #ifndef __ASM_ARM_DMA_H #define __ASM_ARM_DMA_H -#include - /* * This is the maximum virtual address which can be DMA'd from. */ diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 4a8a01e0c3ab..90a38c6baca4 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -28,7 +28,6 @@ #include #include #include -#include #include #include @@ -214,11 +213,7 @@ static void __init arm_bootmem_init(unsigned long start_pfn, #ifdef CONFIG_ZONE_DMA -#ifdef ARM_DMA_ZONE_SIZE -unsigned long arm_dma_zone_size = ARM_DMA_ZONE_SIZE; -#else unsigned long arm_dma_zone_size __read_mostly; -#endif EXPORT_SYMBOL(arm_dma_zone_size); /* -- cgit v1.2.3