summaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel')
-rw-r--r--arch/arm/kernel/bios32.c3
-rw-r--r--arch/arm/kernel/dma.c36
-rw-r--r--arch/arm/kernel/entry-armv.S12
-rw-r--r--arch/arm/kernel/ftrace.c8
-rw-r--r--arch/arm/kernel/kgdb.c5
-rw-r--r--arch/arm/kernel/perf_event.c928
-rw-r--r--arch/arm/kernel/pmu.c127
-rw-r--r--arch/arm/kernel/process.c8
-rw-r--r--arch/arm/kernel/smp.c8
-rw-r--r--arch/arm/kernel/smp_twd.c17
-rw-r--r--arch/arm/kernel/time.c70
11 files changed, 1046 insertions, 176 deletions
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index bd397e0b663e..c6273a3bfc25 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -527,6 +527,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw)
if (!sys)
panic("PCI: unable to allocate sys data!");
+#ifdef CONFIG_PCI_DOMAINS
+ sys->domain = hw->domain;
+#endif
sys->hw = hw;
sys->busnr = busnr;
sys->swizzle = hw->swizzle;
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c
index 7d5b9fb01e71..2c4a185f92cd 100644
--- a/arch/arm/kernel/dma.c
+++ b/arch/arm/kernel/dma.c
@@ -16,6 +16,8 @@
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/scatterlist.h>
+#include <linux/seq_file.h>
+#include <linux/proc_fs.h>
#include <asm/dma.h>
@@ -264,3 +266,37 @@ int get_dma_residue(unsigned int chan)
return ret;
}
EXPORT_SYMBOL(get_dma_residue);
+
+#ifdef CONFIG_PROC_FS
+static int proc_dma_show(struct seq_file *m, void *v)
+{
+ int i;
+
+ for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) {
+ dma_t *dma = dma_channel(i);
+ if (dma && dma->lock)
+ seq_printf(m, "%2d: %s\n", i, dma->device_id);
+ }
+ return 0;
+}
+
+static int proc_dma_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, proc_dma_show, NULL);
+}
+
+static const struct file_operations proc_dma_operations = {
+ .open = proc_dma_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int __init proc_dma_init(void)
+{
+ proc_create("dma", 0, NULL, &proc_dma_operations);
+ return 0;
+}
+
+__initcall(proc_dma_init);
+#endif
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 6c5cf369183b..7ee48e7f8f31 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -523,16 +523,16 @@ ENDPROC(__und_usr)
/*
* The out of line fixup for the ldrt above.
*/
- .section .fixup, "ax"
+ .pushsection .fixup, "ax"
4: mov pc, r9
- .previous
- .section __ex_table,"a"
+ .popsection
+ .pushsection __ex_table,"a"
.long 1b, 4b
#if __LINUX_ARM_ARCH__ >= 7
.long 2b, 4b
.long 3b, 4b
#endif
- .previous
+ .popsection
/*
* Check whether the instruction is a co-processor instruction.
@@ -676,10 +676,10 @@ do_fpe:
* lr = unrecognised FP instruction return address
*/
- .data
+ .pushsection .data
ENTRY(fp_enter)
.word no_fp
- .previous
+ .popsection
ENTRY(no_fp)
mov pc, lr
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index c63842766229..0298286ad4ad 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -62,15 +62,15 @@ int ftrace_modify_code(unsigned long pc, unsigned char *old_code,
" movne %0, #2 \n"
"3:\n"
- ".section .fixup, \"ax\"\n"
+ ".pushsection .fixup, \"ax\"\n"
"4: mov %0, #1 \n"
" b 3b \n"
- ".previous\n"
+ ".popsection\n"
- ".section __ex_table, \"a\"\n"
+ ".pushsection __ex_table, \"a\"\n"
" .long 1b, 4b \n"
" .long 2b, 4b \n"
- ".previous\n"
+ ".popsection\n"
: "=r"(err), "=r"(replaced)
: "r"(pc), "r"(new), "r"(old), "0"(err), "1"(replaced)
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index a5b846b9895d..c868a8864117 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -98,6 +98,11 @@ sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
gdb_regs[_CPSR] = thread_regs->ARM_cpsr;
}
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->ARM_pc = pc;
+}
+
static int compiled_break;
int kgdb_arch_handle_exception(int exception_vector, int signo,
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 9e70f2053f9a..c45768614c8a 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -16,7 +16,9 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/perf_event.h>
+#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/uaccess.h>
@@ -26,7 +28,7 @@
#include <asm/pmu.h>
#include <asm/stacktrace.h>
-static const struct pmu_irqs *pmu_irqs;
+static struct platform_device *pmu_device;
/*
* Hardware lock to serialize accesses to PMU registers. Needed for the
@@ -67,8 +69,18 @@ struct cpu_hw_events {
};
DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
+/* PMU names. */
+static const char *arm_pmu_names[] = {
+ [ARM_PERF_PMU_ID_XSCALE1] = "xscale1",
+ [ARM_PERF_PMU_ID_XSCALE2] = "xscale2",
+ [ARM_PERF_PMU_ID_V6] = "v6",
+ [ARM_PERF_PMU_ID_V6MP] = "v6mpcore",
+ [ARM_PERF_PMU_ID_CA8] = "ARMv7 Cortex-A8",
+ [ARM_PERF_PMU_ID_CA9] = "ARMv7 Cortex-A9",
+};
+
struct arm_pmu {
- char *name;
+ enum arm_perf_pmu_ids id;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct hw_perf_event *evt, int idx);
void (*disable)(struct hw_perf_event *evt, int idx);
@@ -87,6 +99,30 @@ struct arm_pmu {
/* Set at runtime when we know what CPU type we are. */
static const struct arm_pmu *armpmu;
+enum arm_perf_pmu_ids
+armpmu_get_pmu_id(void)
+{
+ int id = -ENODEV;
+
+ if (armpmu != NULL)
+ id = armpmu->id;
+
+ return id;
+}
+EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
+
+int
+armpmu_get_max_events(void)
+{
+ int max_events = 0;
+
+ if (armpmu != NULL)
+ max_events = armpmu->num_events;
+
+ return max_events;
+}
+EXPORT_SYMBOL_GPL(armpmu_get_max_events);
+
#define HW_OP_UNSUPPORTED 0xFFFF
#define C(_x) \
@@ -314,38 +350,44 @@ validate_group(struct perf_event *event)
static int
armpmu_reserve_hardware(void)
{
- int i;
- int err;
+ int i, err = -ENODEV, irq;
- pmu_irqs = reserve_pmu();
- if (IS_ERR(pmu_irqs)) {
+ pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU);
+ if (IS_ERR(pmu_device)) {
pr_warning("unable to reserve pmu\n");
- return PTR_ERR(pmu_irqs);
+ return PTR_ERR(pmu_device);
}
- init_pmu();
+ init_pmu(ARM_PMU_DEVICE_CPU);
- if (pmu_irqs->num_irqs < 1) {
+ if (pmu_device->num_resources < 1) {
pr_err("no irqs for PMUs defined\n");
return -ENODEV;
}
- for (i = 0; i < pmu_irqs->num_irqs; ++i) {
- err = request_irq(pmu_irqs->irqs[i], armpmu->handle_irq,
+ for (i = 0; i < pmu_device->num_resources; ++i) {
+ irq = platform_get_irq(pmu_device, i);
+ if (irq < 0)
+ continue;
+
+ err = request_irq(irq, armpmu->handle_irq,
IRQF_DISABLED | IRQF_NOBALANCING,
"armpmu", NULL);
if (err) {
- pr_warning("unable to request IRQ%d for ARM "
- "perf counters\n", pmu_irqs->irqs[i]);
+ pr_warning("unable to request IRQ%d for ARM perf "
+ "counters\n", irq);
break;
}
}
if (err) {
- for (i = i - 1; i >= 0; --i)
- free_irq(pmu_irqs->irqs[i], NULL);
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
+ for (i = i - 1; i >= 0; --i) {
+ irq = platform_get_irq(pmu_device, i);
+ if (irq >= 0)
+ free_irq(irq, NULL);
+ }
+ release_pmu(pmu_device);
+ pmu_device = NULL;
}
return err;
@@ -354,14 +396,17 @@ armpmu_reserve_hardware(void)
static void
armpmu_release_hardware(void)
{
- int i;
+ int i, irq;
- for (i = pmu_irqs->num_irqs - 1; i >= 0; --i)
- free_irq(pmu_irqs->irqs[i], NULL);
+ for (i = pmu_device->num_resources - 1; i >= 0; --i) {
+ irq = platform_get_irq(pmu_device, i);
+ if (irq >= 0)
+ free_irq(irq, NULL);
+ }
armpmu->stop();
- release_pmu(pmu_irqs);
- pmu_irqs = NULL;
+ release_pmu(pmu_device);
+ pmu_device = NULL;
}
static atomic_t active_events = ATOMIC_INIT(0);
@@ -1144,7 +1189,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
}
static const struct arm_pmu armv6pmu = {
- .name = "v6",
+ .id = ARM_PERF_PMU_ID_V6,
.handle_irq = armv6pmu_handle_irq,
.enable = armv6pmu_enable_event,
.disable = armv6pmu_disable_event,
@@ -1167,7 +1212,7 @@ static const struct arm_pmu armv6pmu = {
* reset the period and enable the interrupt reporting.
*/
static const struct arm_pmu armv6mpcore_pmu = {
- .name = "v6mpcore",
+ .id = ARM_PERF_PMU_ID_V6MP,
.handle_irq = armv6pmu_handle_irq,
.enable = armv6pmu_enable_event,
.disable = armv6mpcore_pmu_disable_event,
@@ -1197,10 +1242,6 @@ static const struct arm_pmu armv6mpcore_pmu = {
* counter and all 4 performance counters together can be reset separately.
*/
-#define ARMV7_PMU_CORTEX_A8_NAME "ARMv7 Cortex-A8"
-
-#define ARMV7_PMU_CORTEX_A9_NAME "ARMv7 Cortex-A9"
-
/* Common ARMv7 event types */
enum armv7_perf_types {
ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
@@ -2079,6 +2120,803 @@ static u32 __init armv7_reset_read_pmnc(void)
return nb_cnt + 1;
}
+/*
+ * ARMv5 [xscale] Performance counter handling code.
+ *
+ * Based on xscale OProfile code.
+ *
+ * There are two variants of the xscale PMU that we support:
+ * - xscale1pmu: 2 event counters and a cycle counter
+ * - xscale2pmu: 4 event counters and a cycle counter
+ * The two variants share event definitions, but have different
+ * PMU structures.
+ */
+
+enum xscale_perf_types {
+ XSCALE_PERFCTR_ICACHE_MISS = 0x00,
+ XSCALE_PERFCTR_ICACHE_NO_DELIVER = 0x01,
+ XSCALE_PERFCTR_DATA_STALL = 0x02,
+ XSCALE_PERFCTR_ITLB_MISS = 0x03,
+ XSCALE_PERFCTR_DTLB_MISS = 0x04,
+ XSCALE_PERFCTR_BRANCH = 0x05,
+ XSCALE_PERFCTR_BRANCH_MISS = 0x06,
+ XSCALE_PERFCTR_INSTRUCTION = 0x07,
+ XSCALE_PERFCTR_DCACHE_FULL_STALL = 0x08,
+ XSCALE_PERFCTR_DCACHE_FULL_STALL_CONTIG = 0x09,
+ XSCALE_PERFCTR_DCACHE_ACCESS = 0x0A,
+ XSCALE_PERFCTR_DCACHE_MISS = 0x0B,
+ XSCALE_PERFCTR_DCACHE_WRITE_BACK = 0x0C,
+ XSCALE_PERFCTR_PC_CHANGED = 0x0D,
+ XSCALE_PERFCTR_BCU_REQUEST = 0x10,
+ XSCALE_PERFCTR_BCU_FULL = 0x11,
+ XSCALE_PERFCTR_BCU_DRAIN = 0x12,
+ XSCALE_PERFCTR_BCU_ECC_NO_ELOG = 0x14,
+ XSCALE_PERFCTR_BCU_1_BIT_ERR = 0x15,
+ XSCALE_PERFCTR_RMW = 0x16,
+ /* XSCALE_PERFCTR_CCNT is not hardware defined */
+ XSCALE_PERFCTR_CCNT = 0xFE,
+ XSCALE_PERFCTR_UNUSED = 0xFF,
+};
+
+enum xscale_counters {
+ XSCALE_CYCLE_COUNTER = 1,
+ XSCALE_COUNTER0,
+ XSCALE_COUNTER1,
+ XSCALE_COUNTER2,
+ XSCALE_COUNTER3,
+};
+
+static const unsigned xscale_perf_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = XSCALE_PERFCTR_CCNT,
+ [PERF_COUNT_HW_INSTRUCTIONS] = XSCALE_PERFCTR_INSTRUCTION,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = XSCALE_PERFCTR_BRANCH,
+ [PERF_COUNT_HW_BRANCH_MISSES] = XSCALE_PERFCTR_BRANCH_MISS,
+ [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED,
+};
+
+static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = XSCALE_PERFCTR_DCACHE_ACCESS,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_DCACHE_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_ICACHE_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_DTLB_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = XSCALE_PERFCTR_ITLB_MISS,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED,
+ [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED,
+ },
+ },
+};
+
+#define XSCALE_PMU_ENABLE 0x001
+#define XSCALE_PMN_RESET 0x002
+#define XSCALE_CCNT_RESET 0x004
+#define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET)
+#define XSCALE_PMU_CNT64 0x008
+
+static inline int
+xscalepmu_event_map(int config)
+{
+ int mapping = xscale_perf_map[config];
+ if (HW_OP_UNSUPPORTED == mapping)
+ mapping = -EOPNOTSUPP;
+ return mapping;
+}
+
+static u64
+xscalepmu_raw_event(u64 config)
+{
+ return config & 0xff;
+}
+
+#define XSCALE1_OVERFLOWED_MASK 0x700
+#define XSCALE1_CCOUNT_OVERFLOW 0x400
+#define XSCALE1_COUNT0_OVERFLOW 0x100
+#define XSCALE1_COUNT1_OVERFLOW 0x200
+#define XSCALE1_CCOUNT_INT_EN 0x040
+#define XSCALE1_COUNT0_INT_EN 0x010
+#define XSCALE1_COUNT1_INT_EN 0x020
+#define XSCALE1_COUNT0_EVT_SHFT 12
+#define XSCALE1_COUNT0_EVT_MASK (0xff << XSCALE1_COUNT0_EVT_SHFT)
+#define XSCALE1_COUNT1_EVT_SHFT 20
+#define XSCALE1_COUNT1_EVT_MASK (0xff << XSCALE1_COUNT1_EVT_SHFT)
+
+static inline u32
+xscale1pmu_read_pmnc(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c0, c0, 0" : "=r" (val));
+ return val;
+}
+
+static inline void
+xscale1pmu_write_pmnc(u32 val)
+{
+ /* upper 4bits and 7, 11 are write-as-0 */
+ val &= 0xffff77f;
+ asm volatile("mcr p14, 0, %0, c0, c0, 0" : : "r" (val));
+}
+
+static inline int
+xscale1_pmnc_counter_has_overflowed(unsigned long pmnc,
+ enum xscale_counters counter)
+{
+ int ret = 0;
+
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ ret = pmnc & XSCALE1_CCOUNT_OVERFLOW;
+ break;
+ case XSCALE_COUNTER0:
+ ret = pmnc & XSCALE1_COUNT0_OVERFLOW;
+ break;
+ case XSCALE_COUNTER1:
+ ret = pmnc & XSCALE1_COUNT1_OVERFLOW;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+ }
+
+ return ret;
+}
+
+static irqreturn_t
+xscale1pmu_handle_irq(int irq_num, void *dev)
+{
+ unsigned long pmnc;
+ struct perf_sample_data data;
+ struct cpu_hw_events *cpuc;
+ struct pt_regs *regs;
+ int idx;
+
+ /*
+ * NOTE: there's an A stepping erratum that states if an overflow
+ * bit already exists and another occurs, the previous
+ * Overflow bit gets cleared. There's no workaround.
+ * Fixed in B stepping or later.
+ */
+ pmnc = xscale1pmu_read_pmnc();
+
+ /*
+ * Write the value back to clear the overflow flags. Overflow
+ * flags remain in pmnc for use below. We also disable the PMU
+ * while we process the interrupt.
+ */
+ xscale1pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
+
+ if (!(pmnc & XSCALE1_OVERFLOWED_MASK))
+ return IRQ_NONE;
+
+ regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0);
+
+ cpuc = &__get_cpu_var(cpu_hw_events);
+ for (idx = 0; idx <= armpmu->num_events; ++idx) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+
+ if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx))
+ continue;
+
+ hwc = &event->hw;
+ armpmu_event_update(event, hwc, idx);
+ data.period = event->hw.last_period;
+ if (!armpmu_event_set_period(event, hwc, idx))
+ continue;
+
+ if (perf_event_overflow(event, 0, &data, regs))
+ armpmu->disable(hwc, idx);
+ }
+
+ perf_event_do_pending();
+
+ /*
+ * Re-enable the PMU.
+ */
+ pmnc = xscale1pmu_read_pmnc() | XSCALE_PMU_ENABLE;
+ xscale1pmu_write_pmnc(pmnc);
+
+ return IRQ_HANDLED;
+}
+
+static void
+xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long val, mask, evt, flags;
+
+ switch (idx) {
+ case XSCALE_CYCLE_COUNTER:
+ mask = 0;
+ evt = XSCALE1_CCOUNT_INT_EN;
+ break;
+ case XSCALE_COUNTER0:
+ mask = XSCALE1_COUNT0_EVT_MASK;
+ evt = (hwc->config_base << XSCALE1_COUNT0_EVT_SHFT) |
+ XSCALE1_COUNT0_INT_EN;
+ break;
+ case XSCALE_COUNTER1:
+ mask = XSCALE1_COUNT1_EVT_MASK;
+ evt = (hwc->config_base << XSCALE1_COUNT1_EVT_SHFT) |
+ XSCALE1_COUNT1_INT_EN;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = xscale1pmu_read_pmnc();
+ val &= ~mask;
+ val |= evt;
+ xscale1pmu_write_pmnc(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long val, mask, evt, flags;
+
+ switch (idx) {
+ case XSCALE_CYCLE_COUNTER:
+ mask = XSCALE1_CCOUNT_INT_EN;
+ evt = 0;
+ break;
+ case XSCALE_COUNTER0:
+ mask = XSCALE1_COUNT0_INT_EN | XSCALE1_COUNT0_EVT_MASK;
+ evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT0_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER1:
+ mask = XSCALE1_COUNT1_INT_EN | XSCALE1_COUNT1_EVT_MASK;
+ evt = XSCALE_PERFCTR_UNUSED << XSCALE1_COUNT1_EVT_SHFT;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = xscale1pmu_read_pmnc();
+ val &= ~mask;
+ val |= evt;
+ xscale1pmu_write_pmnc(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static int
+xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *event)
+{
+ if (XSCALE_PERFCTR_CCNT == event->config_base) {
+ if (test_and_set_bit(XSCALE_CYCLE_COUNTER, cpuc->used_mask))
+ return -EAGAIN;
+
+ return XSCALE_CYCLE_COUNTER;
+ } else {
+ if (!test_and_set_bit(XSCALE_COUNTER1, cpuc->used_mask)) {
+ return XSCALE_COUNTER1;
+ }
+
+ if (!test_and_set_bit(XSCALE_COUNTER0, cpuc->used_mask)) {
+ return XSCALE_COUNTER0;
+ }
+
+ return -EAGAIN;
+ }
+}
+
+static void
+xscale1pmu_start(void)
+{
+ unsigned long flags, val;
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = xscale1pmu_read_pmnc();
+ val |= XSCALE_PMU_ENABLE;
+ xscale1pmu_write_pmnc(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+xscale1pmu_stop(void)
+{
+ unsigned long flags, val;
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = xscale1pmu_read_pmnc();
+ val &= ~XSCALE_PMU_ENABLE;
+ xscale1pmu_write_pmnc(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static inline u32
+xscale1pmu_read_counter(int counter)
+{
+ u32 val = 0;
+
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ asm volatile("mrc p14, 0, %0, c1, c0, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER0:
+ asm volatile("mrc p14, 0, %0, c2, c0, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER1:
+ asm volatile("mrc p14, 0, %0, c3, c0, 0" : "=r" (val));
+ break;
+ }
+
+ return val;
+}
+
+static inline void
+xscale1pmu_write_counter(int counter, u32 val)
+{
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ asm volatile("mcr p14, 0, %0, c1, c0, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER0:
+ asm volatile("mcr p14, 0, %0, c2, c0, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER1:
+ asm volatile("mcr p14, 0, %0, c3, c0, 0" : : "r" (val));
+ break;
+ }
+}
+
+static const struct arm_pmu xscale1pmu = {
+ .id = ARM_PERF_PMU_ID_XSCALE1,
+ .handle_irq = xscale1pmu_handle_irq,
+ .enable = xscale1pmu_enable_event,
+ .disable = xscale1pmu_disable_event,
+ .event_map = xscalepmu_event_map,
+ .raw_event = xscalepmu_raw_event,
+ .read_counter = xscale1pmu_read_counter,
+ .write_counter = xscale1pmu_write_counter,
+ .get_event_idx = xscale1pmu_get_event_idx,
+ .start = xscale1pmu_start,
+ .stop = xscale1pmu_stop,
+ .num_events = 3,
+ .max_period = (1LLU << 32) - 1,
+};
+
+#define XSCALE2_OVERFLOWED_MASK 0x01f
+#define XSCALE2_CCOUNT_OVERFLOW 0x001
+#define XSCALE2_COUNT0_OVERFLOW 0x002
+#define XSCALE2_COUNT1_OVERFLOW 0x004
+#define XSCALE2_COUNT2_OVERFLOW 0x008
+#define XSCALE2_COUNT3_OVERFLOW 0x010
+#define XSCALE2_CCOUNT_INT_EN 0x001
+#define XSCALE2_COUNT0_INT_EN 0x002
+#define XSCALE2_COUNT1_INT_EN 0x004
+#define XSCALE2_COUNT2_INT_EN 0x008
+#define XSCALE2_COUNT3_INT_EN 0x010
+#define XSCALE2_COUNT0_EVT_SHFT 0
+#define XSCALE2_COUNT0_EVT_MASK (0xff << XSCALE2_COUNT0_EVT_SHFT)
+#define XSCALE2_COUNT1_EVT_SHFT 8
+#define XSCALE2_COUNT1_EVT_MASK (0xff << XSCALE2_COUNT1_EVT_SHFT)
+#define XSCALE2_COUNT2_EVT_SHFT 16
+#define XSCALE2_COUNT2_EVT_MASK (0xff << XSCALE2_COUNT2_EVT_SHFT)
+#define XSCALE2_COUNT3_EVT_SHFT 24
+#define XSCALE2_COUNT3_EVT_MASK (0xff << XSCALE2_COUNT3_EVT_SHFT)
+
+static inline u32
+xscale2pmu_read_pmnc(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c0, c1, 0" : "=r" (val));
+ /* bits 1-2 and 4-23 are read-unpredictable */
+ return val & 0xff000009;
+}
+
+static inline void
+xscale2pmu_write_pmnc(u32 val)
+{
+ /* bits 4-23 are write-as-0, 24-31 are write ignored */
+ val &= 0xf;
+ asm volatile("mcr p14, 0, %0, c0, c1, 0" : : "r" (val));
+}
+
+static inline u32
+xscale2pmu_read_overflow_flags(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c5, c1, 0" : "=r" (val));
+ return val;
+}
+
+static inline void
+xscale2pmu_write_overflow_flags(u32 val)
+{
+ asm volatile("mcr p14, 0, %0, c5, c1, 0" : : "r" (val));
+}
+
+static inline u32
+xscale2pmu_read_event_select(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c8, c1, 0" : "=r" (val));
+ return val;
+}
+
+static inline void
+xscale2pmu_write_event_select(u32 val)
+{
+ asm volatile("mcr p14, 0, %0, c8, c1, 0" : : "r"(val));
+}
+
+static inline u32
+xscale2pmu_read_int_enable(void)
+{
+ u32 val;
+ asm volatile("mrc p14, 0, %0, c4, c1, 0" : "=r" (val));
+ return val;
+}
+
+static void
+xscale2pmu_write_int_enable(u32 val)
+{
+ asm volatile("mcr p14, 0, %0, c4, c1, 0" : : "r" (val));
+}
+
+static inline int
+xscale2_pmnc_counter_has_overflowed(unsigned long of_flags,
+ enum xscale_counters counter)
+{
+ int ret = 0;
+
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ ret = of_flags & XSCALE2_CCOUNT_OVERFLOW;
+ break;
+ case XSCALE_COUNTER0:
+ ret = of_flags & XSCALE2_COUNT0_OVERFLOW;
+ break;
+ case XSCALE_COUNTER1:
+ ret = of_flags & XSCALE2_COUNT1_OVERFLOW;
+ break;
+ case XSCALE_COUNTER2:
+ ret = of_flags & XSCALE2_COUNT2_OVERFLOW;
+ break;
+ case XSCALE_COUNTER3:
+ ret = of_flags & XSCALE2_COUNT3_OVERFLOW;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", counter);
+ }
+
+ return ret;
+}
+
+static irqreturn_t
+xscale2pmu_handle_irq(int irq_num, void *dev)
+{
+ unsigned long pmnc, of_flags;
+ struct perf_sample_data data;
+ struct cpu_hw_events *cpuc;
+ struct pt_regs *regs;
+ int idx;
+
+ /* Disable the PMU. */
+ pmnc = xscale2pmu_read_pmnc();
+ xscale2pmu_write_pmnc(pmnc & ~XSCALE_PMU_ENABLE);
+
+ /* Check the overflow flag register. */
+ of_flags = xscale2pmu_read_overflow_flags();
+ if (!(of_flags & XSCALE2_OVERFLOWED_MASK))
+ return IRQ_NONE;
+
+ /* Clear the overflow bits. */
+ xscale2pmu_write_overflow_flags(of_flags);
+
+ regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0);
+
+ cpuc = &__get_cpu_var(cpu_hw_events);
+ for (idx = 0; idx <= armpmu->num_events; ++idx) {
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc;
+
+ if (!test_bit(idx, cpuc->active_mask))
+ continue;
+
+ if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx))
+ continue;
+
+ hwc = &event->hw;
+ armpmu_event_update(event, hwc, idx);
+ data.period = event->hw.last_period;
+ if (!armpmu_event_set_period(event, hwc, idx))
+ continue;
+
+ if (perf_event_overflow(event, 0, &data, regs))
+ armpmu->disable(hwc, idx);
+ }
+
+ perf_event_do_pending();
+
+ /*
+ * Re-enable the PMU.
+ */
+ pmnc = xscale2pmu_read_pmnc() | XSCALE_PMU_ENABLE;
+ xscale2pmu_write_pmnc(pmnc);
+
+ return IRQ_HANDLED;
+}
+
+static void
+xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long flags, ien, evtsel;
+
+ ien = xscale2pmu_read_int_enable();
+ evtsel = xscale2pmu_read_event_select();
+
+ switch (idx) {
+ case XSCALE_CYCLE_COUNTER:
+ ien |= XSCALE2_CCOUNT_INT_EN;
+ break;
+ case XSCALE_COUNTER0:
+ ien |= XSCALE2_COUNT0_INT_EN;
+ evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
+ evtsel |= hwc->config_base << XSCALE2_COUNT0_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER1:
+ ien |= XSCALE2_COUNT1_INT_EN;
+ evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
+ evtsel |= hwc->config_base << XSCALE2_COUNT1_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER2:
+ ien |= XSCALE2_COUNT2_INT_EN;
+ evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
+ evtsel |= hwc->config_base << XSCALE2_COUNT2_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER3:
+ ien |= XSCALE2_COUNT3_INT_EN;
+ evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
+ evtsel |= hwc->config_base << XSCALE2_COUNT3_EVT_SHFT;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ xscale2pmu_write_event_select(evtsel);
+ xscale2pmu_write_int_enable(ien);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx)
+{
+ unsigned long flags, ien, evtsel;
+
+ ien = xscale2pmu_read_int_enable();
+ evtsel = xscale2pmu_read_event_select();
+
+ switch (idx) {
+ case XSCALE_CYCLE_COUNTER:
+ ien &= ~XSCALE2_CCOUNT_INT_EN;
+ break;
+ case XSCALE_COUNTER0:
+ ien &= ~XSCALE2_COUNT0_INT_EN;
+ evtsel &= ~XSCALE2_COUNT0_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER1:
+ ien &= ~XSCALE2_COUNT1_INT_EN;
+ evtsel &= ~XSCALE2_COUNT1_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER2:
+ ien &= ~XSCALE2_COUNT2_INT_EN;
+ evtsel &= ~XSCALE2_COUNT2_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT;
+ break;
+ case XSCALE_COUNTER3:
+ ien &= ~XSCALE2_COUNT3_INT_EN;
+ evtsel &= ~XSCALE2_COUNT3_EVT_MASK;
+ evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT;
+ break;
+ default:
+ WARN_ONCE(1, "invalid counter number (%d)\n", idx);
+ return;
+ }
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ xscale2pmu_write_event_select(evtsel);
+ xscale2pmu_write_int_enable(ien);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static int
+xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *event)
+{
+ int idx = xscale1pmu_get_event_idx(cpuc, event);
+ if (idx >= 0)
+ goto out;
+
+ if (!test_and_set_bit(XSCALE_COUNTER3, cpuc->used_mask))
+ idx = XSCALE_COUNTER3;
+ else if (!test_and_set_bit(XSCALE_COUNTER2, cpuc->used_mask))
+ idx = XSCALE_COUNTER2;
+out:
+ return idx;
+}
+
+static void
+xscale2pmu_start(void)
+{
+ unsigned long flags, val;
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64;
+ val |= XSCALE_PMU_ENABLE;
+ xscale2pmu_write_pmnc(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static void
+xscale2pmu_stop(void)
+{
+ unsigned long flags, val;
+
+ spin_lock_irqsave(&pmu_lock, flags);
+ val = xscale2pmu_read_pmnc();
+ val &= ~XSCALE_PMU_ENABLE;
+ xscale2pmu_write_pmnc(val);
+ spin_unlock_irqrestore(&pmu_lock, flags);
+}
+
+static inline u32
+xscale2pmu_read_counter(int counter)
+{
+ u32 val = 0;
+
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ asm volatile("mrc p14, 0, %0, c1, c1, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER0:
+ asm volatile("mrc p14, 0, %0, c0, c2, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER1:
+ asm volatile("mrc p14, 0, %0, c1, c2, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER2:
+ asm volatile("mrc p14, 0, %0, c2, c2, 0" : "=r" (val));
+ break;
+ case XSCALE_COUNTER3:
+ asm volatile("mrc p14, 0, %0, c3, c2, 0" : "=r" (val));
+ break;
+ }
+
+ return val;
+}
+
+static inline void
+xscale2pmu_write_counter(int counter, u32 val)
+{
+ switch (counter) {
+ case XSCALE_CYCLE_COUNTER:
+ asm volatile("mcr p14, 0, %0, c1, c1, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER0:
+ asm volatile("mcr p14, 0, %0, c0, c2, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER1:
+ asm volatile("mcr p14, 0, %0, c1, c2, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER2:
+ asm volatile("mcr p14, 0, %0, c2, c2, 0" : : "r" (val));
+ break;
+ case XSCALE_COUNTER3:
+ asm volatile("mcr p14, 0, %0, c3, c2, 0" : : "r" (val));
+ break;
+ }
+}
+
+static const struct arm_pmu xscale2pmu = {
+ .id = ARM_PERF_PMU_ID_XSCALE2,
+ .handle_irq = xscale2pmu_handle_irq,
+ .enable = xscale2pmu_enable_event,
+ .disable = xscale2pmu_disable_event,
+ .event_map = xscalepmu_event_map,
+ .raw_event = xscalepmu_raw_event,
+ .read_counter = xscale2pmu_read_counter,
+ .write_counter = xscale2pmu_write_counter,
+ .get_event_idx = xscale2pmu_get_event_idx,
+ .start = xscale2pmu_start,
+ .stop = xscale2pmu_stop,
+ .num_events = 5,
+ .max_period = (1LLU << 32) - 1,
+};
+
static int __init
init_hw_perf_events(void)
{
@@ -2086,7 +2924,7 @@ init_hw_perf_events(void)
unsigned long implementor = (cpuid & 0xFF000000) >> 24;
unsigned long part_number = (cpuid & 0xFFF0);
- /* We only support ARM CPUs implemented by ARM at the moment. */
+ /* ARM Ltd CPUs. */
if (0x41 == implementor) {
switch (part_number) {
case 0xB360: /* ARM1136 */
@@ -2105,7 +2943,7 @@ init_hw_perf_events(void)
perf_max_events = armv6mpcore_pmu.num_events;
break;
case 0xC080: /* Cortex-A8 */
- armv7pmu.name = ARMV7_PMU_CORTEX_A8_NAME;
+ armv7pmu.id = ARM_PERF_PMU_ID_CA8;
memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map,
sizeof(armv7_a8_perf_cache_map));
armv7pmu.event_map = armv7_a8_pmu_event_map;
@@ -2117,7 +2955,7 @@ init_hw_perf_events(void)
perf_max_events = armv7pmu.num_events;
break;
case 0xC090: /* Cortex-A9 */
- armv7pmu.name = ARMV7_PMU_CORTEX_A9_NAME;
+ armv7pmu.id = ARM_PERF_PMU_ID_CA9;
memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map,
sizeof(armv7_a9_perf_cache_map));
armv7pmu.event_map = armv7_a9_pmu_event_map;
@@ -2128,15 +2966,33 @@ init_hw_perf_events(void)
armv7pmu.num_events = armv7_reset_read_pmnc();
perf_max_events = armv7pmu.num_events;
break;
- default:
- pr_info("no hardware support available\n");
- perf_max_events = -1;
+ }
+ /* Intel CPUs [xscale]. */
+ } else if (0x69 == implementor) {
+ part_number = (cpuid >> 13) & 0x7;
+ switch (part_number) {
+ case 1:
+ armpmu = &xscale1pmu;
+ memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
+ sizeof(xscale_perf_cache_map));
+ perf_max_events = xscale1pmu.num_events;
+ break;
+ case 2:
+ armpmu = &xscale2pmu;
+ memcpy(armpmu_perf_cache_map, xscale_perf_cache_map,
+ sizeof(xscale_perf_cache_map));
+ perf_max_events = xscale2pmu.num_events;
+ break;
}
}
- if (armpmu)
+ if (armpmu) {
pr_info("enabled with %s PMU driver, %d counters available\n",
- armpmu->name, armpmu->num_events);
+ arm_pmu_names[armpmu->id], armpmu->num_events);
+ } else {
+ pr_info("no hardware support available\n");
+ perf_max_events = -1;
+ }
return 0;
}
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index a124312e343f..b8af96ea62e6 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -2,6 +2,7 @@
* linux/arch/arm/kernel/pmu.c
*
* Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles
+ * Copyright (C) 2010 ARM Ltd, Will Deacon
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -9,65 +10,78 @@
*
*/
+#define pr_fmt(fmt) "PMU: " fmt
+
#include <linux/cpumask.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/platform_device.h>
#include <asm/pmu.h>
-/*
- * Define the IRQs for the system. We could use something like a platform
- * device but that seems fairly heavyweight for this. Also, the performance
- * counters can't be removed or hotplugged.
- *
- * Ordering is important: init_pmu() will use the ordering to set the affinity
- * to the corresponding core. e.g. the first interrupt will go to cpu 0, the
- * second goes to cpu 1 etc.
- */
-static const int irqs[] = {
-#if defined(CONFIG_ARCH_OMAP2)
- 3,
-#elif defined(CONFIG_ARCH_BCMRING)
- IRQ_PMUIRQ,
-#elif defined(CONFIG_MACH_REALVIEW_EB)
- IRQ_EB11MP_PMU_CPU0,
- IRQ_EB11MP_PMU_CPU1,
- IRQ_EB11MP_PMU_CPU2,
- IRQ_EB11MP_PMU_CPU3,
-#elif defined(CONFIG_ARCH_OMAP3)
- INT_34XX_BENCH_MPU_EMUL,
-#elif defined(CONFIG_ARCH_IOP32X)
- IRQ_IOP32X_CORE_PMU,
-#elif defined(CONFIG_ARCH_IOP33X)
- IRQ_IOP33X_CORE_PMU,
-#elif defined(CONFIG_ARCH_PXA)
- IRQ_PMU,
-#endif
-};
+static volatile long pmu_lock;
+
+static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES];
+
+static int __devinit pmu_device_probe(struct platform_device *pdev)
+{
+
+ if (pdev->id < 0 || pdev->id >= ARM_NUM_PMU_DEVICES) {
+ pr_warning("received registration request for unknown "
+ "device %d\n", pdev->id);
+ return -EINVAL;
+ }
+
+ if (pmu_devices[pdev->id])
+ pr_warning("registering new PMU device type %d overwrites "
+ "previous registration!\n", pdev->id);
+ else
+ pr_info("registered new PMU device of type %d\n",
+ pdev->id);
-static const struct pmu_irqs pmu_irqs = {
- .irqs = irqs,
- .num_irqs = ARRAY_SIZE(irqs),
+ pmu_devices[pdev->id] = pdev;
+ return 0;
+}
+
+static struct platform_driver pmu_driver = {
+ .driver = {
+ .name = "arm-pmu",
+ },
+ .probe = pmu_device_probe,
};
-static volatile long pmu_lock;
+static int __init register_pmu_driver(void)
+{
+ return platform_driver_register(&pmu_driver);
+}
+device_initcall(register_pmu_driver);
-const struct pmu_irqs *
-reserve_pmu(void)
+struct platform_device *
+reserve_pmu(enum arm_pmu_type device)
{
- return test_and_set_bit_lock(0, &pmu_lock) ? ERR_PTR(-EBUSY) :
- &pmu_irqs;
+ struct platform_device *pdev;
+
+ if (test_and_set_bit_lock(device, &pmu_lock)) {
+ pdev = ERR_PTR(-EBUSY);
+ } else if (pmu_devices[device] == NULL) {
+ clear_bit_unlock(device, &pmu_lock);
+ pdev = ERR_PTR(-ENODEV);
+ } else {
+ pdev = pmu_devices[device];
+ }
+
+ return pdev;
}
EXPORT_SYMBOL_GPL(reserve_pmu);
int
-release_pmu(const struct pmu_irqs *irqs)
+release_pmu(struct platform_device *pdev)
{
- if (WARN_ON(irqs != &pmu_irqs))
+ if (WARN_ON(pdev != pmu_devices[pdev->id]))
return -EINVAL;
- clear_bit_unlock(0, &pmu_lock);
+ clear_bit_unlock(pdev->id, &pmu_lock);
return 0;
}
EXPORT_SYMBOL_GPL(release_pmu);
@@ -87,17 +101,42 @@ set_irq_affinity(int irq,
#endif
}
-int
-init_pmu(void)
+static int
+init_cpu_pmu(void)
{
int i, err = 0;
+ struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
+
+ if (!pdev) {
+ err = -ENODEV;
+ goto out;
+ }
- for (i = 0; i < pmu_irqs.num_irqs; ++i) {
- err = set_irq_affinity(pmu_irqs.irqs[i], i);
+ for (i = 0; i < pdev->num_resources; ++i) {
+ err = set_irq_affinity(platform_get_irq(pdev, i), i);
if (err)
break;
}
+out:
+ return err;
+}
+
+int
+init_pmu(enum arm_pmu_type device)
+{
+ int err = 0;
+
+ switch (device) {
+ case ARM_PMU_DEVICE_CPU:
+ err = init_cpu_pmu();
+ break;
+ default:
+ pr_warning("attempt to initialise unknown device %d\n",
+ device);
+ err = -EINVAL;
+ }
+
return err;
}
EXPORT_SYMBOL_GPL(init_pmu);
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 0e12e0acbf26..acf5e6fdb6dc 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -355,7 +355,7 @@ EXPORT_SYMBOL(dump_fpu);
* the thread function, and r3 points to the exit function.
*/
extern void kernel_thread_helper(void);
-asm( ".section .text\n"
+asm( ".pushsection .text\n"
" .align\n"
" .type kernel_thread_helper, #function\n"
"kernel_thread_helper:\n"
@@ -363,11 +363,11 @@ asm( ".section .text\n"
" mov lr, r3\n"
" mov pc, r2\n"
" .size kernel_thread_helper, . - kernel_thread_helper\n"
-" .previous");
+" .popsection");
#ifdef CONFIG_ARM_UNWIND
extern void kernel_thread_exit(long code);
-asm( ".section .text\n"
+asm( ".pushsection .text\n"
" .align\n"
" .type kernel_thread_exit, #function\n"
"kernel_thread_exit:\n"
@@ -377,7 +377,7 @@ asm( ".section .text\n"
" nop\n"
" .fnend\n"
" .size kernel_thread_exit, . - kernel_thread_exit\n"
-" .previous");
+" .popsection");
#else
#define kernel_thread_exit do_exit
#endif
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 577543f3857f..b8c3d0f689d9 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -86,6 +86,12 @@ int __cpuinit __cpu_up(unsigned int cpu)
return PTR_ERR(idle);
}
ci->idle = idle;
+ } else {
+ /*
+ * Since this idle thread is being re-used, call
+ * init_idle() to reinitialize the thread structure.
+ */
+ init_idle(idle, cpu);
}
/*
@@ -162,7 +168,7 @@ int __cpu_disable(void)
struct task_struct *p;
int ret;
- ret = mach_cpu_disable(cpu);
+ ret = platform_cpu_disable(cpu);
if (ret)
return ret;
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index ea02a7b1c244..7c5f0c024db7 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -21,23 +21,6 @@
#include <asm/smp_twd.h>
#include <asm/hardware/gic.h>
-#define TWD_TIMER_LOAD 0x00
-#define TWD_TIMER_COUNTER 0x04
-#define TWD_TIMER_CONTROL 0x08
-#define TWD_TIMER_INTSTAT 0x0C
-
-#define TWD_WDOG_LOAD 0x20
-#define TWD_WDOG_COUNTER 0x24
-#define TWD_WDOG_CONTROL 0x28
-#define TWD_WDOG_INTSTAT 0x2C
-#define TWD_WDOG_RESETSTAT 0x30
-#define TWD_WDOG_DISABLE 0x34
-
-#define TWD_TIMER_CONTROL_ENABLE (1 << 0)
-#define TWD_TIMER_CONTROL_ONESHOT (0 << 1)
-#define TWD_TIMER_CONTROL_PERIODIC (1 << 1)
-#define TWD_TIMER_CONTROL_IT_ENABLE (1 << 2)
-
/* set up by the platform code */
void __iomem *twd_base;
diff --git a/arch/arm/kernel/time.c b/arch/arm/kernel/time.c
index 28753805d2d1..38c261f9951c 100644
--- a/arch/arm/kernel/time.c
+++ b/arch/arm/kernel/time.c
@@ -72,12 +72,15 @@ unsigned long profile_pc(struct pt_regs *regs)
EXPORT_SYMBOL(profile_pc);
#endif
-#ifndef CONFIG_GENERIC_TIME
-static unsigned long dummy_gettimeoffset(void)
+#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
+u32 arch_gettimeoffset(void)
{
+ if (system_timer->offset != NULL)
+ return system_timer->offset() * 1000;
+
return 0;
}
-#endif
+#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */
#ifdef CONFIG_LEDS_TIMER
static inline void do_leds(void)
@@ -93,63 +96,6 @@ static inline void do_leds(void)
#define do_leds()
#endif
-#ifndef CONFIG_GENERIC_TIME
-void do_gettimeofday(struct timeval *tv)
-{
- unsigned long flags;
- unsigned long seq;
- unsigned long usec, sec;
-
- do {
- seq = read_seqbegin_irqsave(&xtime_lock, flags);
- usec = system_timer->offset();
- sec = xtime.tv_sec;
- usec += xtime.tv_nsec / 1000;
- } while (read_seqretry_irqrestore(&xtime_lock, seq, flags));
-
- /* usec may have gone up a lot: be safe */
- while (usec >= 1000000) {
- usec -= 1000000;
- sec++;
- }
-
- tv->tv_sec = sec;
- tv->tv_usec = usec;
-}
-
-EXPORT_SYMBOL(do_gettimeofday);
-
-int do_settimeofday(struct timespec *tv)
-{
- time_t wtm_sec, sec = tv->tv_sec;
- long wtm_nsec, nsec = tv->tv_nsec;
-
- if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
- return -EINVAL;
-
- write_seqlock_irq(&xtime_lock);
- /*
- * This is revolting. We need to set "xtime" correctly. However, the
- * value in this location is the value at the most recent update of
- * wall time. Discover what correction gettimeofday() would have
- * done, and then undo it!
- */
- nsec -= system_timer->offset() * NSEC_PER_USEC;
-
- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
-
- set_normalized_timespec(&xtime, sec, nsec);
- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
-
- ntp_clear();
- write_sequnlock_irq(&xtime_lock);
- clock_was_set();
- return 0;
-}
-
-EXPORT_SYMBOL(do_settimeofday);
-#endif /* !CONFIG_GENERIC_TIME */
#ifndef CONFIG_GENERIC_CLOCKEVENTS
/*
@@ -214,10 +160,6 @@ device_initcall(timer_init_sysfs);
void __init time_init(void)
{
-#ifndef CONFIG_GENERIC_TIME
- if (system_timer->offset == NULL)
- system_timer->offset = dummy_gettimeoffset;
-#endif
system_timer->init();
}