summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/Makefile9
-rw-r--r--arch/powerpc/kernel/align.c20
-rw-r--r--arch/powerpc/kernel/asm-offsets.c33
-rw-r--r--arch/powerpc/kernel/cputable.c6
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c163
-rw-r--r--arch/powerpc/kernel/dma.c4
-rw-r--r--arch/powerpc/kernel/entry_64.S9
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S978
-rw-r--r--arch/powerpc/kernel/ftrace.c51
-rw-r--r--arch/powerpc/kernel/head_32.S101
-rw-r--r--arch/powerpc/kernel/head_64.S1095
-rw-r--r--arch/powerpc/kernel/head_booke.h10
-rw-r--r--arch/powerpc/kernel/init_task.c4
-rw-r--r--arch/powerpc/kernel/irq.c129
-rw-r--r--arch/powerpc/kernel/lparcfg.c40
-rw-r--r--arch/powerpc/kernel/misc_64.S92
-rw-r--r--arch/powerpc/kernel/module.c2
-rw-r--r--arch/powerpc/kernel/paca.c14
-rw-r--r--arch/powerpc/kernel/pci-common.c3
-rw-r--r--arch/powerpc/kernel/pci_32.c19
-rw-r--r--arch/powerpc/kernel/pci_64.c17
-rw-r--r--arch/powerpc/kernel/pci_dn.c28
-rw-r--r--arch/powerpc/kernel/perf_counter.c1263
-rw-r--r--arch/powerpc/kernel/power4-pmu.c598
-rw-r--r--arch/powerpc/kernel/power5+-pmu.c671
-rw-r--r--arch/powerpc/kernel/power5-pmu.c611
-rw-r--r--arch/powerpc/kernel/power6-pmu.c532
-rw-r--r--arch/powerpc/kernel/power7-pmu.c357
-rw-r--r--arch/powerpc/kernel/ppc970-pmu.c482
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/prom.c2
-rw-r--r--arch/powerpc/kernel/prom_init.c3
-rw-r--r--arch/powerpc/kernel/ptrace.c23
-rw-r--r--arch/powerpc/kernel/rtas_pci.c10
-rw-r--r--arch/powerpc/kernel/setup_32.c6
-rw-r--r--arch/powerpc/kernel/setup_64.c15
-rw-r--r--arch/powerpc/kernel/time.c31
-rw-r--r--arch/powerpc/kernel/traps.c130
-rw-r--r--arch/powerpc/kernel/vector.S210
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S1
40 files changed, 6291 insertions, 1483 deletions
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 71901fbda4a5..612b0c4dc26d 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -36,7 +36,7 @@ obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
firmware.o nvram_64.o
obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
obj-$(CONFIG_PPC64) += vdso64/
-obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
+obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
obj-$(CONFIG_PPC_OF) += of_device.o of_platform.o prom_parse.o
obj-$(CONFIG_PPC_CLOCK) += clock.o
@@ -82,6 +82,7 @@ obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_KPROBES) += kprobes.o
obj-$(CONFIG_PPC_UDBG_16550) += legacy_serial.o udbg_16550.o
obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_SWIOTLB) += dma-swiotlb.o
pci64-$(CONFIG_PPC64) += pci_dn.o isa-bridge.o
obj-$(CONFIG_PCI) += pci_$(CONFIG_WORD_SIZE).o $(pci64-y) \
@@ -94,6 +95,9 @@ obj64-$(CONFIG_AUDIT) += compat_audit.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
+obj-$(CONFIG_PERF_COUNTERS) += perf_counter.o power4-pmu.o ppc970-pmu.o \
+ power5-pmu.o power5+-pmu.o power6-pmu.o \
+ power7-pmu.o
obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
@@ -108,6 +112,7 @@ obj-y += ppc_save_regs.o
endif
extra-$(CONFIG_PPC_FPU) += fpu.o
+extra-$(CONFIG_ALTIVEC) += vector.o
extra-$(CONFIG_PPC64) += entry_64.o
extra-y += systbl_chk.i
@@ -120,6 +125,7 @@ PHONY += systbl_chk
systbl_chk: $(src)/systbl_chk.sh $(obj)/systbl_chk.i
$(call cmd,systbl_chk)
+ifeq ($(CONFIG_PPC_OF_BOOT_TRAMPOLINE),y)
$(obj)/built-in.o: prom_init_check
quiet_cmd_prom_init_check = CALL $<
@@ -128,5 +134,6 @@ quiet_cmd_prom_init_check = CALL $<
PHONY += prom_init_check
prom_init_check: $(src)/prom_init_check.sh $(obj)/prom_init.o
$(call cmd,prom_init_check)
+endif
clean-files := vmlinux.lds
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 5ffcfaa77d6a..a5b632e52fae 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -24,6 +24,7 @@
#include <asm/system.h>
#include <asm/cache.h>
#include <asm/cputable.h>
+#include <asm/emulated_ops.h>
struct aligninfo {
unsigned char len;
@@ -730,8 +731,10 @@ int fix_alignment(struct pt_regs *regs)
areg = dsisr & 0x1f; /* register to update */
#ifdef CONFIG_SPE
- if ((instr >> 26) == 0x4)
+ if ((instr >> 26) == 0x4) {
+ PPC_WARN_EMULATED(spe);
return emulate_spe(regs, reg, instr);
+ }
#endif
instr = (dsisr >> 10) & 0x7f;
@@ -783,23 +786,28 @@ int fix_alignment(struct pt_regs *regs)
flags |= SPLT;
nb = 8;
}
+ PPC_WARN_EMULATED(vsx);
return emulate_vsx(addr, reg, areg, regs, flags, nb);
}
#endif
/* A size of 0 indicates an instruction we don't support, with
* the exception of DCBZ which is handled as a special case here
*/
- if (instr == DCBZ)
+ if (instr == DCBZ) {
+ PPC_WARN_EMULATED(dcbz);
return emulate_dcbz(regs, addr);
+ }
if (unlikely(nb == 0))
return 0;
/* Load/Store Multiple instructions are handled in their own
* function
*/
- if (flags & M)
+ if (flags & M) {
+ PPC_WARN_EMULATED(multiple);
return emulate_multiple(regs, addr, reg, nb,
flags, instr, swiz);
+ }
/* Verify the address of the operand */
if (unlikely(user_mode(regs) &&
@@ -816,8 +824,12 @@ int fix_alignment(struct pt_regs *regs)
}
/* Special case for 16-byte FP loads and stores */
- if (nb == 16)
+ if (nb == 16) {
+ PPC_WARN_EMULATED(fp_pair);
return emulate_fp_pair(addr, reg, flags);
+ }
+
+ PPC_WARN_EMULATED(unaligned);
/* If we are loading, get the data from user space, else
* get it from register values
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 1e40bc053946..561b64652311 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -122,8 +122,6 @@ int main(void)
DEFINE(PACAKSAVE, offsetof(struct paca_struct, kstack));
DEFINE(PACACURRENT, offsetof(struct paca_struct, __current));
DEFINE(PACASAVEDMSR, offsetof(struct paca_struct, saved_msr));
- DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
- DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
DEFINE(PACASTABRR, offsetof(struct paca_struct, stab_rr));
DEFINE(PACAR1, offsetof(struct paca_struct, saved_r1));
DEFINE(PACATOC, offsetof(struct paca_struct, kernel_toc));
@@ -131,35 +129,31 @@ int main(void)
DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled));
- DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
- DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
+ DEFINE(PACAPERFPEND, offsetof(struct paca_struct, perf_counter_pending));
DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
- DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
#ifdef CONFIG_PPC_MM_SLICES
DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
context.low_slices_psize));
DEFINE(PACAHIGHSLICEPSIZE, offsetof(struct paca_struct,
context.high_slices_psize));
DEFINE(MMUPSIZEDEFSIZE, sizeof(struct mmu_psize_def));
+#endif /* CONFIG_PPC_MM_SLICES */
+#ifdef CONFIG_PPC_STD_MMU_64
+ DEFINE(PACASTABREAL, offsetof(struct paca_struct, stab_real));
+ DEFINE(PACASTABVIRT, offsetof(struct paca_struct, stab_addr));
+ DEFINE(PACASLBCACHE, offsetof(struct paca_struct, slb_cache));
+ DEFINE(PACASLBCACHEPTR, offsetof(struct paca_struct, slb_cache_ptr));
+ DEFINE(PACAVMALLOCSLLP, offsetof(struct paca_struct, vmalloc_sllp));
+#ifdef CONFIG_PPC_MM_SLICES
DEFINE(MMUPSIZESLLP, offsetof(struct mmu_psize_def, sllp));
#else
DEFINE(PACACONTEXTSLLP, offsetof(struct paca_struct, context.sllp));
-
#endif /* CONFIG_PPC_MM_SLICES */
DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen));
DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc));
DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb));
- DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
DEFINE(PACALPPACAPTR, offsetof(struct paca_struct, lppaca_ptr));
- DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
- DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
- DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
- DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
- DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr));
- DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
- DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
-
DEFINE(SLBSHADOW_STACKVSID,
offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid));
DEFINE(SLBSHADOW_STACKESID,
@@ -169,6 +163,15 @@ int main(void)
DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area));
+#endif /* CONFIG_PPC_STD_MMU_64 */
+ DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp));
+ DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id));
+ DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
+ DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr));
+ DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
+ DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
+ DEFINE(PACA_DATA_OFFSET, offsetof(struct paca_struct, data_offset));
+ DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save));
#endif /* CONFIG_PPC64 */
/* RTAS */
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 3e33fb933d99..4a24a2fc4574 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -427,7 +427,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7 (architected)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTR_HPTE_TABLE |
+ MMU_FTR_TLBIE_206,
.icache_bsize = 128,
.dcache_bsize = 128,
.machine_check = machine_check_generic,
@@ -441,7 +442,8 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_name = "POWER7 (raw)",
.cpu_features = CPU_FTRS_POWER7,
.cpu_user_features = COMMON_USER_POWER7,
- .mmu_features = MMU_FTR_HPTE_TABLE,
+ .mmu_features = MMU_FTR_HPTE_TABLE |
+ MMU_FTR_TLBIE_206,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
new file mode 100644
index 000000000000..68ccf11e4f19
--- /dev/null
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -0,0 +1,163 @@
+/*
+ * Contains routines needed to support swiotlb for ppc.
+ *
+ * Copyright (C) 2009 Becky Bruce, Freescale Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/pfn.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pci.h>
+
+#include <asm/machdep.h>
+#include <asm/swiotlb.h>
+#include <asm/dma.h>
+#include <asm/abs_addr.h>
+
+int swiotlb __read_mostly;
+unsigned int ppc_swiotlb_enable;
+
+void *swiotlb_bus_to_virt(struct device *hwdev, dma_addr_t addr)
+{
+ unsigned long pfn = PFN_DOWN(swiotlb_bus_to_phys(hwdev, addr));
+ void *pageaddr = page_address(pfn_to_page(pfn));
+
+ if (pageaddr != NULL)
+ return pageaddr + (addr % PAGE_SIZE);
+ return NULL;
+}
+
+dma_addr_t swiotlb_phys_to_bus(struct device *hwdev, phys_addr_t paddr)
+{
+ return paddr + get_dma_direct_offset(hwdev);
+}
+
+phys_addr_t swiotlb_bus_to_phys(struct device *hwdev, dma_addr_t baddr)
+
+{
+ return baddr - get_dma_direct_offset(hwdev);
+}
+
+/*
+ * Determine if an address needs bounce buffering via swiotlb.
+ * Going forward I expect the swiotlb code to generalize on using
+ * a dma_ops->addr_needs_map, and this function will move from here to the
+ * generic swiotlb code.
+ */
+int
+swiotlb_arch_address_needs_mapping(struct device *hwdev, dma_addr_t addr,
+ size_t size)
+{
+ struct dma_mapping_ops *dma_ops = get_dma_ops(hwdev);
+
+ BUG_ON(!dma_ops);
+ return dma_ops->addr_needs_map(hwdev, addr, size);
+}
+
+/*
+ * Determine if an address is reachable by a pci device, or if we must bounce.
+ */
+static int
+swiotlb_pci_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
+{
+ u64 mask = dma_get_mask(hwdev);
+ dma_addr_t max;
+ struct pci_controller *hose;
+ struct pci_dev *pdev = to_pci_dev(hwdev);
+
+ hose = pci_bus_to_host(pdev->bus);
+ max = hose->dma_window_base_cur + hose->dma_window_size;
+
+ /* check that we're within mapped pci window space */
+ if ((addr + size > max) | (addr < hose->dma_window_base_cur))
+ return 1;
+
+ return !is_buffer_dma_capable(mask, addr, size);
+}
+
+static int
+swiotlb_addr_needs_map(struct device *hwdev, dma_addr_t addr, size_t size)
+{
+ return !is_buffer_dma_capable(dma_get_mask(hwdev), addr, size);
+}
+
+
+/*
+ * At the moment, all platforms that use this code only require
+ * swiotlb to be used if we're operating on HIGHMEM. Since
+ * we don't ever call anything other than map_sg, unmap_sg,
+ * map_page, and unmap_page on highmem, use normal dma_ops
+ * for everything else.
+ */
+struct dma_mapping_ops swiotlb_dma_ops = {
+ .alloc_coherent = dma_direct_alloc_coherent,
+ .free_coherent = dma_direct_free_coherent,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .dma_supported = swiotlb_dma_supported,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .addr_needs_map = swiotlb_addr_needs_map,
+ .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
+ .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device
+};
+
+struct dma_mapping_ops swiotlb_pci_dma_ops = {
+ .alloc_coherent = dma_direct_alloc_coherent,
+ .free_coherent = dma_direct_free_coherent,
+ .map_sg = swiotlb_map_sg_attrs,
+ .unmap_sg = swiotlb_unmap_sg_attrs,
+ .dma_supported = swiotlb_dma_supported,
+ .map_page = swiotlb_map_page,
+ .unmap_page = swiotlb_unmap_page,
+ .addr_needs_map = swiotlb_pci_addr_needs_map,
+ .sync_single_range_for_cpu = swiotlb_sync_single_range_for_cpu,
+ .sync_single_range_for_device = swiotlb_sync_single_range_for_device,
+ .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
+ .sync_sg_for_device = swiotlb_sync_sg_for_device
+};
+
+static int ppc_swiotlb_bus_notify(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ struct device *dev = data;
+
+ /* We are only intereted in device addition */
+ if (action != BUS_NOTIFY_ADD_DEVICE)
+ return 0;
+
+ /* May need to bounce if the device can't address all of DRAM */
+ if (dma_get_mask(dev) < lmb_end_of_DRAM())
+ set_dma_ops(dev, &swiotlb_dma_ops);
+
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block ppc_swiotlb_plat_bus_notifier = {
+ .notifier_call = ppc_swiotlb_bus_notify,
+ .priority = 0,
+};
+
+static struct notifier_block ppc_swiotlb_of_bus_notifier = {
+ .notifier_call = ppc_swiotlb_bus_notify,
+ .priority = 0,
+};
+
+int __init swiotlb_setup_bus_notifier(void)
+{
+ bus_register_notifier(&platform_bus_type,
+ &ppc_swiotlb_plat_bus_notifier);
+ bus_register_notifier(&of_platform_bus_type,
+ &ppc_swiotlb_of_bus_notifier);
+
+ return 0;
+}
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index 53c7788cba78..20a60d661ba8 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -19,7 +19,7 @@
* default the offset is PCI_DRAM_OFFSET.
*/
-static unsigned long get_dma_direct_offset(struct device *dev)
+unsigned long get_dma_direct_offset(struct device *dev)
{
if (dev)
return (unsigned long)dev->archdata.dma_data;
@@ -32,7 +32,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
{
void *ret;
#ifdef CONFIG_NOT_COHERENT_CACHE
- ret = __dma_alloc_coherent(size, dma_handle, flag);
+ ret = __dma_alloc_coherent(dev, size, dma_handle, flag);
if (ret == NULL)
return NULL;
*dma_handle += get_dma_direct_offset(dev);
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index abfc32330479..43e073477c34 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -526,6 +526,15 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
2:
TRACE_AND_RESTORE_IRQ(r5);
+#ifdef CONFIG_PERF_COUNTERS
+ /* check paca->perf_counter_pending if we're enabling ints */
+ lbz r3,PACAPERFPEND(r13)
+ and. r3,r3,r5
+ beq 27f
+ bl .perf_counter_do_pending
+27:
+#endif /* CONFIG_PERF_COUNTERS */
+
/* extract EE bit and use it to restore paca->hard_enabled */
ld r3,_MSR(r1)
rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
new file mode 100644
index 000000000000..eb898112e577
--- /dev/null
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -0,0 +1,978 @@
+/*
+ * This file contains the 64-bit "server" PowerPC variant
+ * of the low level exception handling including exception
+ * vectors, exception return, part of the slb and stab
+ * handling and other fixed offset specific things.
+ *
+ * This file is meant to be #included from head_64.S due to
+ * position dependant assembly.
+ *
+ * Most of this originates from head_64.S and thus has the same
+ * copyright history.
+ *
+ */
+
+/*
+ * We layout physical memory as follows:
+ * 0x0000 - 0x00ff : Secondary processor spin code
+ * 0x0100 - 0x2fff : pSeries Interrupt prologs
+ * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
+ * 0x6000 - 0x6fff : Initial (CPU0) segment table
+ * 0x7000 - 0x7fff : FWNMI data area
+ * 0x8000 - : Early init and support code
+ */
+
+
+/*
+ * SPRG Usage
+ *
+ * Register Definition
+ *
+ * SPRG0 reserved for hypervisor
+ * SPRG1 temp - used to save gpr
+ * SPRG2 temp - used to save gpr
+ * SPRG3 virt addr of paca
+ */
+
+/*
+ * This is the start of the interrupt handlers for pSeries
+ * This code runs with relocation off.
+ * Code from here to __end_interrupts gets copied down to real
+ * address 0x100 when we are running a relocatable kernel.
+ * Therefore any relative branches in this section must only
+ * branch to labels in this section.
+ */
+ . = 0x100
+ .globl __start_interrupts
+__start_interrupts:
+
+ STD_EXCEPTION_PSERIES(0x100, system_reset)
+
+ . = 0x200
+_machine_check_pSeries:
+ HMT_MEDIUM
+ mtspr SPRN_SPRG1,r13 /* save r13 */
+ EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
+
+ . = 0x300
+ .globl data_access_pSeries
+data_access_pSeries:
+ HMT_MEDIUM
+ mtspr SPRN_SPRG1,r13
+BEGIN_FTR_SECTION
+ mtspr SPRN_SPRG2,r12
+ mfspr r13,SPRN_DAR
+ mfspr r12,SPRN_DSISR
+ srdi r13,r13,60
+ rlwimi r13,r12,16,0x20
+ mfcr r12
+ cmpwi r13,0x2c
+ beq do_stab_bolted_pSeries
+ mtcrf 0x80,r12
+ mfspr r12,SPRN_SPRG2
+END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
+
+ . = 0x380
+ .globl data_access_slb_pSeries
+data_access_slb_pSeries:
+ HMT_MEDIUM
+ mtspr SPRN_SPRG1,r13
+ mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
+ std r3,PACA_EXSLB+EX_R3(r13)
+ mfspr r3,SPRN_DAR
+ std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
+ mfcr r9
+#ifdef __DISABLED__
+ /* Keep that around for when we re-implement dynamic VSIDs */
+ cmpdi r3,0
+ bge slb_miss_user_pseries
+#endif /* __DISABLED__ */
+ std r10,PACA_EXSLB+EX_R10(r13)
+ std r11,PACA_EXSLB+EX_R11(r13)
+ std r12,PACA_EXSLB+EX_R12(r13)
+ mfspr r10,SPRN_SPRG1
+ std r10,PACA_EXSLB+EX_R13(r13)
+ mfspr r12,SPRN_SRR1 /* and SRR1 */
+#ifndef CONFIG_RELOCATABLE
+ b .slb_miss_realmode
+#else
+ /*
+ * We can't just use a direct branch to .slb_miss_realmode
+ * because the distance from here to there depends on where
+ * the kernel ends up being put.
+ */
+ mfctr r11
+ ld r10,PACAKBASE(r13)
+ LOAD_HANDLER(r10, .slb_miss_realmode)
+ mtctr r10
+ bctr
+#endif
+
+ STD_EXCEPTION_PSERIES(0x400, instruction_access)
+
+ . = 0x480
+ .globl instruction_access_slb_pSeries
+instruction_access_slb_pSeries:
+ HMT_MEDIUM
+ mtspr SPRN_SPRG1,r13
+ mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
+ std r3,PACA_EXSLB+EX_R3(r13)
+ mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
+ std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
+ mfcr r9
+#ifdef __DISABLED__
+ /* Keep that around for when we re-implement dynamic VSIDs */
+ cmpdi r3,0
+ bge slb_miss_user_pseries
+#endif /* __DISABLED__ */
+ std r10,PACA_EXSLB+EX_R10(r13)
+ std r11,PACA_EXSLB+EX_R11(r13)
+ std r12,PACA_EXSLB+EX_R12(r13)
+ mfspr r10,SPRN_SPRG1
+ std r10,PACA_EXSLB+EX_R13(r13)
+ mfspr r12,SPRN_SRR1 /* and SRR1 */
+#ifndef CONFIG_RELOCATABLE
+ b .slb_miss_realmode
+#else
+ mfctr r11
+ ld r10,PACAKBASE(r13)
+ LOAD_HANDLER(r10, .slb_miss_realmode)
+ mtctr r10
+ bctr
+#endif
+
+ MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
+ STD_EXCEPTION_PSERIES(0x600, alignment)
+ STD_EXCEPTION_PSERIES(0x700, program_check)
+ STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
+ MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
+ STD_EXCEPTION_PSERIES(0xa00, trap_0a)
+ STD_EXCEPTION_PSERIES(0xb00, trap_0b)
+
+ . = 0xc00
+ .globl system_call_pSeries
+system_call_pSeries:
+ HMT_MEDIUM
+BEGIN_FTR_SECTION
+ cmpdi r0,0x1ebe
+ beq- 1f
+END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
+ mr r9,r13
+ mfspr r13,SPRN_SPRG3
+ mfspr r11,SPRN_SRR0
+ ld r12,PACAKBASE(r13)
+ ld r10,PACAKMSR(r13)
+ LOAD_HANDLER(r12, system_call_entry)
+ mtspr SPRN_SRR0,r12
+ mfspr r12,SPRN_SRR1
+ mtspr SPRN_SRR1,r10
+ rfid
+ b . /* prevent speculative execution */
+
+/* Fast LE/BE switch system call */
+1: mfspr r12,SPRN_SRR1
+ xori r12,r12,MSR_LE
+ mtspr SPRN_SRR1,r12
+ rfid /* return to userspace */
+ b .
+
+ STD_EXCEPTION_PSERIES(0xd00, single_step)
+ STD_EXCEPTION_PSERIES(0xe00, trap_0e)
+
+ /* We need to deal with the Altivec unavailable exception
+ * here which is at 0xf20, thus in the middle of the
+ * prolog code of the PerformanceMonitor one. A little
+ * trickery is thus necessary
+ */
+ . = 0xf00
+ b performance_monitor_pSeries
+
+ . = 0xf20
+ b altivec_unavailable_pSeries
+
+ . = 0xf40
+ b vsx_unavailable_pSeries
+
+#ifdef CONFIG_CBE_RAS
+ HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
+#endif /* CONFIG_CBE_RAS */
+ STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
+#ifdef CONFIG_CBE_RAS
+ HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
+#endif /* CONFIG_CBE_RAS */
+ STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
+#ifdef CONFIG_CBE_RAS
+ HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
+#endif /* CONFIG_CBE_RAS */
+
+ . = 0x3000
+
+/*** pSeries interrupt support ***/
+
+ /* moved from 0xf00 */
+ STD_EXCEPTION_PSERIES(., performance_monitor)
+ STD_EXCEPTION_PSERIES(., altivec_unavailable)
+ STD_EXCEPTION_PSERIES(., vsx_unavailable)
+
+/*
+ * An interrupt came in while soft-disabled; clear EE in SRR1,
+ * clear paca->hard_enabled and return.
+ */
+masked_interrupt:
+ stb r10,PACAHARDIRQEN(r13)
+ mtcrf 0x80,r9
+ ld r9,PACA_EXGEN+EX_R9(r13)
+ mfspr r10,SPRN_SRR1
+ rldicl r10,r10,48,1 /* clear MSR_EE */
+ rotldi r10,r10,16
+ mtspr SPRN_SRR1,r10
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ mfspr r13,SPRN_SPRG1
+ rfid
+ b .
+
+ .align 7
+do_stab_bolted_pSeries:
+ mtcrf 0x80,r12
+ mfspr r12,SPRN_SPRG2
+ EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
+
+#ifdef CONFIG_PPC_PSERIES
+/*
+ * Vectors for the FWNMI option. Share common code.
+ */
+ .globl system_reset_fwnmi
+ .align 7
+system_reset_fwnmi:
+ HMT_MEDIUM
+ mtspr SPRN_SPRG1,r13 /* save r13 */
+ EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
+
+ .globl machine_check_fwnmi
+ .align 7
+machine_check_fwnmi:
+ HMT_MEDIUM
+ mtspr SPRN_SPRG1,r13 /* save r13 */
+ EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
+
+#endif /* CONFIG_PPC_PSERIES */
+
+#ifdef __DISABLED__
+/*
+ * This is used for when the SLB miss handler has to go virtual,
+ * which doesn't happen for now anymore but will once we re-implement
+ * dynamic VSIDs for shared page tables
+ */
+slb_miss_user_pseries:
+ std r10,PACA_EXGEN+EX_R10(r13)
+ std r11,PACA_EXGEN+EX_R11(r13)
+ std r12,PACA_EXGEN+EX_R12(r13)
+ mfspr r10,SPRG1
+ ld r11,PACA_EXSLB+EX_R9(r13)
+ ld r12,PACA_EXSLB+EX_R3(r13)
+ std r10,PACA_EXGEN+EX_R13(r13)
+ std r11,PACA_EXGEN+EX_R9(r13)
+ std r12,PACA_EXGEN+EX_R3(r13)
+ clrrdi r12,r13,32
+ mfmsr r10
+ mfspr r11,SRR0 /* save SRR0 */
+ ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
+ ori r10,r10,MSR_IR|MSR_DR|MSR_RI
+ mtspr SRR0,r12
+ mfspr r12,SRR1 /* and SRR1 */
+ mtspr SRR1,r10
+ rfid
+ b . /* prevent spec. execution */
+#endif /* __DISABLED__ */
+
+ .align 7
+ .globl __end_interrupts
+__end_interrupts:
+
+/*
+ * Code from here down to __end_handlers is invoked from the
+ * exception prologs above. Because the prologs assemble the
+ * addresses of these handlers using the LOAD_HANDLER macro,
+ * which uses an addi instruction, these handlers must be in
+ * the first 32k of the kernel image.
+ */
+
+/*** Common interrupt handlers ***/
+
+ STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
+
+ /*
+ * Machine check is different because we use a different
+ * save area: PACA_EXMC instead of PACA_EXGEN.
+ */
+ .align 7
+ .globl machine_check_common
+machine_check_common:
+ EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
+ FINISH_NAP
+ DISABLE_INTS
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .machine_check_exception
+ b .ret_from_except
+
+ STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
+ STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
+ STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
+ STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
+ STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
+ STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
+ STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
+#ifdef CONFIG_ALTIVEC
+ STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
+#else
+ STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
+#endif
+#ifdef CONFIG_CBE_RAS
+ STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
+ STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
+ STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
+#endif /* CONFIG_CBE_RAS */
+
+ .align 7
+system_call_entry:
+ b system_call_common
+
+/*
+ * Here we have detected that the kernel stack pointer is bad.
+ * R9 contains the saved CR, r13 points to the paca,
+ * r10 contains the (bad) kernel stack pointer,
+ * r11 and r12 contain the saved SRR0 and SRR1.
+ * We switch to using an emergency stack, save the registers there,
+ * and call kernel_bad_stack(), which panics.
+ */
+bad_stack:
+ ld r1,PACAEMERGSP(r13)
+ subi r1,r1,64+INT_FRAME_SIZE
+ std r9,_CCR(r1)
+ std r10,GPR1(r1)
+ std r11,_NIP(r1)
+ std r12,_MSR(r1)
+ mfspr r11,SPRN_DAR
+ mfspr r12,SPRN_DSISR
+ std r11,_DAR(r1)
+ std r12,_DSISR(r1)
+ mflr r10
+ mfctr r11
+ mfxer r12
+ std r10,_LINK(r1)
+ std r11,_CTR(r1)
+ std r12,_XER(r1)
+ SAVE_GPR(0,r1)
+ SAVE_GPR(2,r1)
+ SAVE_4GPRS(3,r1)
+ SAVE_2GPRS(7,r1)
+ SAVE_10GPRS(12,r1)
+ SAVE_10GPRS(22,r1)
+ lhz r12,PACA_TRAP_SAVE(r13)
+ std r12,_TRAP(r1)
+ addi r11,r1,INT_FRAME_SIZE
+ std r11,0(r1)
+ li r12,0
+ std r12,0(r11)
+ ld r2,PACATOC(r13)
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .kernel_bad_stack
+ b 1b
+
+/*
+ * Here r13 points to the paca, r9 contains the saved CR,
+ * SRR0 and SRR1 are saved in r11 and r12,
+ * r9 - r13 are saved in paca->exgen.
+ */
+ .align 7
+ .globl data_access_common
+data_access_common:
+ mfspr r10,SPRN_DAR
+ std r10,PACA_EXGEN+EX_DAR(r13)
+ mfspr r10,SPRN_DSISR
+ stw r10,PACA_EXGEN+EX_DSISR(r13)
+ EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
+ ld r3,PACA_EXGEN+EX_DAR(r13)
+ lwz r4,PACA_EXGEN+EX_DSISR(r13)
+ li r5,0x300
+ b .do_hash_page /* Try to handle as hpte fault */
+
+ .align 7
+ .globl instruction_access_common
+instruction_access_common:
+ EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
+ ld r3,_NIP(r1)
+ andis. r4,r12,0x5820
+ li r5,0x400
+ b .do_hash_page /* Try to handle as hpte fault */
+
+/*
+ * Here is the common SLB miss user that is used when going to virtual
+ * mode for SLB misses, that is currently not used
+ */
+#ifdef __DISABLED__
+ .align 7
+ .globl slb_miss_user_common
+slb_miss_user_common:
+ mflr r10
+ std r3,PACA_EXGEN+EX_DAR(r13)
+ stw r9,PACA_EXGEN+EX_CCR(r13)
+ std r10,PACA_EXGEN+EX_LR(r13)
+ std r11,PACA_EXGEN+EX_SRR0(r13)
+ bl .slb_allocate_user
+
+ ld r10,PACA_EXGEN+EX_LR(r13)
+ ld r3,PACA_EXGEN+EX_R3(r13)
+ lwz r9,PACA_EXGEN+EX_CCR(r13)
+ ld r11,PACA_EXGEN+EX_SRR0(r13)
+ mtlr r10
+ beq- slb_miss_fault
+
+ andi. r10,r12,MSR_RI /* check for unrecoverable exception */
+ beq- unrecov_user_slb
+ mfmsr r10
+
+.machine push
+.machine "power4"
+ mtcrf 0x80,r9
+.machine pop
+
+ clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
+ mtmsrd r10,1
+
+ mtspr SRR0,r11
+ mtspr SRR1,r12
+
+ ld r9,PACA_EXGEN+EX_R9(r13)
+ ld r10,PACA_EXGEN+EX_R10(r13)
+ ld r11,PACA_EXGEN+EX_R11(r13)
+ ld r12,PACA_EXGEN+EX_R12(r13)
+ ld r13,PACA_EXGEN+EX_R13(r13)
+ rfid
+ b .
+
+slb_miss_fault:
+ EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
+ ld r4,PACA_EXGEN+EX_DAR(r13)
+ li r5,0
+ std r4,_DAR(r1)
+ std r5,_DSISR(r1)
+ b handle_page_fault
+
+unrecov_user_slb:
+ EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
+ DISABLE_INTS
+ bl .save_nvgprs
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unrecoverable_exception
+ b 1b
+
+#endif /* __DISABLED__ */
+
+
+/*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r12 contain the saved SRR1, SRR0 is still ready for return
+ * r3 has the faulting address
+ * r9 - r13 are saved in paca->exslb.
+ * r3 is saved in paca->slb_r3
+ * We assume we aren't going to take any exceptions during this procedure.
+ */
+_GLOBAL(slb_miss_realmode)
+ mflr r10
+#ifdef CONFIG_RELOCATABLE
+ mtctr r11
+#endif
+
+ stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
+ std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
+
+ bl .slb_allocate_realmode
+
+ /* All done -- return from exception. */
+
+ ld r10,PACA_EXSLB+EX_LR(r13)
+ ld r3,PACA_EXSLB+EX_R3(r13)
+ lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
+#ifdef CONFIG_PPC_ISERIES
+BEGIN_FW_FTR_SECTION
+ ld r11,PACALPPACAPTR(r13)
+ ld r11,LPPACASRR0(r11) /* get SRR0 value */
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+#endif /* CONFIG_PPC_ISERIES */
+
+ mtlr r10
+
+ andi. r10,r12,MSR_RI /* check for unrecoverable exception */
+ beq- 2f
+
+.machine push
+.machine "power4"
+ mtcrf 0x80,r9
+ mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
+.machine pop
+
+#ifdef CONFIG_PPC_ISERIES
+BEGIN_FW_FTR_SECTION
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+#endif /* CONFIG_PPC_ISERIES */
+ ld r9,PACA_EXSLB+EX_R9(r13)
+ ld r10,PACA_EXSLB+EX_R10(r13)
+ ld r11,PACA_EXSLB+EX_R11(r13)
+ ld r12,PACA_EXSLB+EX_R12(r13)
+ ld r13,PACA_EXSLB+EX_R13(r13)
+ rfid
+ b . /* prevent speculative execution */
+
+2:
+#ifdef CONFIG_PPC_ISERIES
+BEGIN_FW_FTR_SECTION
+ b unrecov_slb
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+#endif /* CONFIG_PPC_ISERIES */
+ mfspr r11,SPRN_SRR0
+ ld r10,PACAKBASE(r13)
+ LOAD_HANDLER(r10,unrecov_slb)
+ mtspr SPRN_SRR0,r10
+ ld r10,PACAKMSR(r13)
+ mtspr SPRN_SRR1,r10
+ rfid
+ b .
+
+unrecov_slb:
+ EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
+ DISABLE_INTS
+ bl .save_nvgprs
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unrecoverable_exception
+ b 1b
+
+ .align 7
+ .globl hardware_interrupt_common
+ .globl hardware_interrupt_entry
+hardware_interrupt_common:
+ EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
+ FINISH_NAP
+hardware_interrupt_entry:
+ DISABLE_INTS
+BEGIN_FTR_SECTION
+ bl .ppc64_runlatch_on
+END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .do_IRQ
+ b .ret_from_except_lite
+
+#ifdef CONFIG_PPC_970_NAP
+power4_fixup_nap:
+ andc r9,r9,r10
+ std r9,TI_LOCAL_FLAGS(r11)
+ ld r10,_LINK(r1) /* make idle task do the */
+ std r10,_NIP(r1) /* equivalent of a blr */
+ blr
+#endif
+
+ .align 7
+ .globl alignment_common
+alignment_common:
+ mfspr r10,SPRN_DAR
+ std r10,PACA_EXGEN+EX_DAR(r13)
+ mfspr r10,SPRN_DSISR
+ stw r10,PACA_EXGEN+EX_DSISR(r13)
+ EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
+ ld r3,PACA_EXGEN+EX_DAR(r13)
+ lwz r4,PACA_EXGEN+EX_DSISR(r13)
+ std r3,_DAR(r1)
+ std r4,_DSISR(r1)
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ENABLE_INTS
+ bl .alignment_exception
+ b .ret_from_except
+
+ .align 7
+ .globl program_check_common
+program_check_common:
+ EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ENABLE_INTS
+ bl .program_check_exception
+ b .ret_from_except
+
+ .align 7
+ .globl fp_unavailable_common
+fp_unavailable_common:
+ EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
+ bne 1f /* if from user, just load it up */
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ENABLE_INTS
+ bl .kernel_fp_unavailable_exception
+ BUG_OPCODE
+1: bl .load_up_fpu
+ b fast_exception_return
+
+ .align 7
+ .globl altivec_unavailable_common
+altivec_unavailable_common:
+ EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
+#ifdef CONFIG_ALTIVEC
+BEGIN_FTR_SECTION
+ beq 1f
+ bl .load_up_altivec
+ b fast_exception_return
+1:
+END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
+#endif
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ENABLE_INTS
+ bl .altivec_unavailable_exception
+ b .ret_from_except
+
+ .align 7
+ .globl vsx_unavailable_common
+vsx_unavailable_common:
+ EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ bne .load_up_vsx
+1:
+END_FTR_SECTION_IFSET(CPU_FTR_VSX)
+#endif
+ bl .save_nvgprs
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ENABLE_INTS
+ bl .vsx_unavailable_exception
+ b .ret_from_except
+
+ .align 7
+ .globl __end_handlers
+__end_handlers:
+
+/*
+ * Return from an exception with minimal checks.
+ * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
+ * If interrupts have been enabled, or anything has been
+ * done that might have changed the scheduling status of
+ * any task or sent any task a signal, you should use
+ * ret_from_except or ret_from_except_lite instead of this.
+ */
+fast_exc_return_irq: /* restores irq state too */
+ ld r3,SOFTE(r1)
+ TRACE_AND_RESTORE_IRQ(r3);
+ ld r12,_MSR(r1)
+ rldicl r4,r12,49,63 /* get MSR_EE to LSB */
+ stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
+ b 1f
+
+ .globl fast_exception_return
+fast_exception_return:
+ ld r12,_MSR(r1)
+1: ld r11,_NIP(r1)
+ andi. r3,r12,MSR_RI /* check if RI is set */
+ beq- unrecov_fer
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+ andi. r3,r12,MSR_PR
+ beq 2f
+ ACCOUNT_CPU_USER_EXIT(r3, r4)
+2:
+#endif
+
+ ld r3,_CCR(r1)
+ ld r4,_LINK(r1)
+ ld r5,_CTR(r1)
+ ld r6,_XER(r1)
+ mtcr r3
+ mtlr r4
+ mtctr r5
+ mtxer r6
+ REST_GPR(0, r1)
+ REST_8GPRS(2, r1)
+
+ mfmsr r10
+ rldicl r10,r10,48,1 /* clear EE */
+ rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
+ mtmsrd r10,1
+
+ mtspr SPRN_SRR1,r12
+ mtspr SPRN_SRR0,r11
+ REST_4GPRS(10, r1)
+ ld r1,GPR1(r1)
+ rfid
+ b . /* prevent speculative execution */
+
+unrecov_fer:
+ bl .save_nvgprs
+1: addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .unrecoverable_exception
+ b 1b
+
+
+/*
+ * Hash table stuff
+ */
+ .align 7
+_STATIC(do_hash_page)
+ std r3,_DAR(r1)
+ std r4,_DSISR(r1)
+
+ andis. r0,r4,0xa450 /* weird error? */
+ bne- handle_page_fault /* if not, try to insert a HPTE */
+BEGIN_FTR_SECTION
+ andis. r0,r4,0x0020 /* Is it a segment table fault? */
+ bne- do_ste_alloc /* If so handle it */
+END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
+
+ /*
+ * On iSeries, we soft-disable interrupts here, then
+ * hard-enable interrupts so that the hash_page code can spin on
+ * the hash_table_lock without problems on a shared processor.
+ */
+ DISABLE_INTS
+
+ /*
+ * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
+ * and will clobber volatile registers when irq tracing is enabled
+ * so we need to reload them. It may be possible to be smarter here
+ * and move the irq tracing elsewhere but let's keep it simple for
+ * now
+ */
+#ifdef CONFIG_TRACE_IRQFLAGS
+ ld r3,_DAR(r1)
+ ld r4,_DSISR(r1)
+ ld r5,_TRAP(r1)
+ ld r12,_MSR(r1)
+ clrrdi r5,r5,4
+#endif /* CONFIG_TRACE_IRQFLAGS */
+ /*
+ * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
+ * accessing a userspace segment (even from the kernel). We assume
+ * kernel addresses always have the high bit set.
+ */
+ rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
+ rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
+ orc r0,r12,r0 /* MSR_PR | ~high_bit */
+ rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
+ ori r4,r4,1 /* add _PAGE_PRESENT */
+ rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
+
+ /*
+ * r3 contains the faulting address
+ * r4 contains the required access permissions
+ * r5 contains the trap number
+ *
+ * at return r3 = 0 for success
+ */
+ bl .hash_page /* build HPTE if possible */
+ cmpdi r3,0 /* see if hash_page succeeded */
+
+BEGIN_FW_FTR_SECTION
+ /*
+ * If we had interrupts soft-enabled at the point where the
+ * DSI/ISI occurred, and an interrupt came in during hash_page,
+ * handle it now.
+ * We jump to ret_from_except_lite rather than fast_exception_return
+ * because ret_from_except_lite will check for and handle pending
+ * interrupts if necessary.
+ */
+ beq 13f
+END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
+
+BEGIN_FW_FTR_SECTION
+ /*
+ * Here we have interrupts hard-disabled, so it is sufficient
+ * to restore paca->{soft,hard}_enable and get out.
+ */
+ beq fast_exc_return_irq /* Return from exception on success */
+END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
+
+ /* For a hash failure, we don't bother re-enabling interrupts */
+ ble- 12f
+
+ /*
+ * hash_page couldn't handle it, set soft interrupt enable back
+ * to what it was before the trap. Note that .raw_local_irq_restore
+ * handles any interrupts pending at this point.
+ */
+ ld r3,SOFTE(r1)
+ TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
+ bl .raw_local_irq_restore
+ b 11f
+
+/* Here we have a page fault that hash_page can't handle. */
+handle_page_fault:
+ ENABLE_INTS
+11: ld r4,_DAR(r1)
+ ld r5,_DSISR(r1)
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ bl .do_page_fault
+ cmpdi r3,0
+ beq+ 13f
+ bl .save_nvgprs
+ mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ lwz r4,_DAR(r1)
+ bl .bad_page_fault
+ b .ret_from_except
+
+13: b .ret_from_except_lite
+
+/* We have a page fault that hash_page could handle but HV refused
+ * the PTE insertion
+ */
+12: bl .save_nvgprs
+ mr r5,r3
+ addi r3,r1,STACK_FRAME_OVERHEAD
+ ld r4,_DAR(r1)
+ bl .low_hash_fault
+ b .ret_from_except
+
+ /* here we have a segment miss */
+do_ste_alloc:
+ bl .ste_allocate /* try to insert stab entry */
+ cmpdi r3,0
+ bne- handle_page_fault
+ b fast_exception_return
+
+/*
+ * r13 points to the PACA, r9 contains the saved CR,
+ * r11 and r12 contain the saved SRR0 and SRR1.
+ * r9 - r13 are saved in paca->exslb.
+ * We assume we aren't going to take any exceptions during this procedure.
+ * We assume (DAR >> 60) == 0xc.
+ */
+ .align 7
+_GLOBAL(do_stab_bolted)
+ stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
+ std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
+
+ /* Hash to the primary group */
+ ld r10,PACASTABVIRT(r13)
+ mfspr r11,SPRN_DAR
+ srdi r11,r11,28
+ rldimi r10,r11,7,52 /* r10 = first ste of the group */
+
+ /* Calculate VSID */
+ /* This is a kernel address, so protovsid = ESID */
+ ASM_VSID_SCRAMBLE(r11, r9, 256M)
+ rldic r9,r11,12,16 /* r9 = vsid << 12 */
+
+ /* Search the primary group for a free entry */
+1: ld r11,0(r10) /* Test valid bit of the current ste */
+ andi. r11,r11,0x80
+ beq 2f
+ addi r10,r10,16
+ andi. r11,r10,0x70
+ bne 1b
+
+ /* Stick for only searching the primary group for now. */
+ /* At least for now, we use a very simple random castout scheme */
+ /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
+ mftb r11
+ rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
+ ori r11,r11,0x10
+
+ /* r10 currently points to an ste one past the group of interest */
+ /* make it point to the randomly selected entry */
+ subi r10,r10,128
+ or r10,r10,r11 /* r10 is the entry to invalidate */
+
+ isync /* mark the entry invalid */
+ ld r11,0(r10)
+ rldicl r11,r11,56,1 /* clear the valid bit */
+ rotldi r11,r11,8
+ std r11,0(r10)
+ sync
+
+ clrrdi r11,r11,28 /* Get the esid part of the ste */
+ slbie r11
+
+2: std r9,8(r10) /* Store the vsid part of the ste */
+ eieio
+
+ mfspr r11,SPRN_DAR /* Get the new esid */
+ clrrdi r11,r11,28 /* Permits a full 32b of ESID */
+ ori r11,r11,0x90 /* Turn on valid and kp */
+ std r11,0(r10) /* Put new entry back into the stab */
+
+ sync
+
+ /* All done -- return from exception. */
+ lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
+ ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
+
+ andi. r10,r12,MSR_RI
+ beq- unrecov_slb
+
+ mtcrf 0x80,r9 /* restore CR */
+
+ mfmsr r10
+ clrrdi r10,r10,2
+ mtmsrd r10,1
+
+ mtspr SPRN_SRR0,r11
+ mtspr SPRN_SRR1,r12
+ ld r9,PACA_EXSLB+EX_R9(r13)
+ ld r10,PACA_EXSLB+EX_R10(r13)
+ ld r11,PACA_EXSLB+EX_R11(r13)
+ ld r12,PACA_EXSLB+EX_R12(r13)
+ ld r13,PACA_EXSLB+EX_R13(r13)
+ rfid
+ b . /* prevent speculative execution */
+
+/*
+ * Space for CPU0's segment table.
+ *
+ * On iSeries, the hypervisor must fill in at least one entry before
+ * we get control (with relocate on). The address is given to the hv
+ * as a page number (see xLparMap below), so this must be at a
+ * fixed address (the linker can't compute (u64)&initial_stab >>
+ * PAGE_SHIFT).
+ */
+ . = STAB0_OFFSET /* 0x6000 */
+ .globl initial_stab
+initial_stab:
+ .space 4096
+
+#ifdef CONFIG_PPC_PSERIES
+/*
+ * Data area reserved for FWNMI option.
+ * This address (0x7000) is fixed by the RPA.
+ */
+ .= 0x7000
+ .globl fwnmi_data_area
+fwnmi_data_area:
+#endif /* CONFIG_PPC_PSERIES */
+
+ /* iSeries does not use the FWNMI stuff, so it is safe to put
+ * this here, even if we later allow kernels that will boot on
+ * both pSeries and iSeries */
+#ifdef CONFIG_PPC_ISERIES
+ . = LPARMAP_PHYS
+ .globl xLparMap
+xLparMap:
+ .quad HvEsidsToMap /* xNumberEsids */
+ .quad HvRangesToMap /* xNumberRanges */
+ .quad STAB0_PAGE /* xSegmentTableOffs */
+ .zero 40 /* xRsvd */
+ /* xEsids (HvEsidsToMap entries of 2 quads) */
+ .quad PAGE_OFFSET_ESID /* xKernelEsid */
+ .quad PAGE_OFFSET_VSID /* xKernelVsid */
+ .quad VMALLOC_START_ESID /* xKernelEsid */
+ .quad VMALLOC_START_VSID /* xKernelVsid */
+ /* xRanges (HvRangesToMap entries of 3 quads) */
+ .quad HvPagesToMap /* xPages */
+ .quad 0 /* xOffset */
+ .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */
+
+#endif /* CONFIG_PPC_ISERIES */
+
+#ifdef CONFIG_PPC_PSERIES
+ . = 0x8000
+#endif /* CONFIG_PPC_PSERIES */
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index 70e2a736be1f..1b12696cca06 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -23,25 +23,14 @@
#include <asm/code-patching.h>
#include <asm/ftrace.h>
-#ifdef CONFIG_PPC32
-# define GET_ADDR(addr) addr
-#else
-/* PowerPC64's functions are data that points to the functions */
-# define GET_ADDR(addr) (*(unsigned long *)addr)
-#endif
#ifdef CONFIG_DYNAMIC_FTRACE
-static unsigned int ftrace_nop_replace(void)
-{
- return PPC_INST_NOP;
-}
-
static unsigned int
ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
{
unsigned int op;
- addr = GET_ADDR(addr);
+ addr = ppc_function_entry((void *)addr);
/* if (link) set op to 'bl' else 'b' */
op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
@@ -49,14 +38,6 @@ ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
return op;
}
-#ifdef CONFIG_PPC64
-# define _ASM_ALIGN " .align 3 "
-# define _ASM_PTR " .llong "
-#else
-# define _ASM_ALIGN " .align 2 "
-# define _ASM_PTR " .long "
-#endif
-
static int
ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
{
@@ -157,7 +138,7 @@ __ftrace_make_nop(struct module *mod,
* 0xe8, 0x4c, 0x00, 0x28, ld r2,40(r12)
*/
- pr_debug("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
+ pr_devel("ip:%lx jumps to %lx r2: %lx", ip, tramp, mod->arch.toc);
/* Find where the trampoline jumps to */
if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
@@ -165,7 +146,7 @@ __ftrace_make_nop(struct module *mod,
return -EFAULT;
}
- pr_debug(" %08x %08x", jmp[0], jmp[1]);
+ pr_devel(" %08x %08x", jmp[0], jmp[1]);
/* verify that this is what we expect it to be */
if (((jmp[0] & 0xffff0000) != 0x3d820000) ||
@@ -181,23 +162,23 @@ __ftrace_make_nop(struct module *mod,
offset = ((unsigned)((unsigned short)jmp[0]) << 16) +
(int)((short)jmp[1]);
- pr_debug(" %x ", offset);
+ pr_devel(" %x ", offset);
/* get the address this jumps too */
tramp = mod->arch.toc + offset + 32;
- pr_debug("toc: %lx", tramp);
+ pr_devel("toc: %lx", tramp);
if (probe_kernel_read(jmp, (void *)tramp, 8)) {
printk(KERN_ERR "Failed to read %lx\n", tramp);
return -EFAULT;
}
- pr_debug(" %08x %08x\n", jmp[0], jmp[1]);
+ pr_devel(" %08x %08x\n", jmp[0], jmp[1]);
ptr = ((unsigned long)jmp[0] << 32) + jmp[1];
/* This should match what was called */
- if (ptr != GET_ADDR(addr)) {
+ if (ptr != ppc_function_entry((void *)addr)) {
printk(KERN_ERR "addr does not match %lx\n", ptr);
return -EINVAL;
}
@@ -269,7 +250,7 @@ __ftrace_make_nop(struct module *mod,
* 0x4e, 0x80, 0x04, 0x20 bctr
*/
- pr_debug("ip:%lx jumps to %lx", ip, tramp);
+ pr_devel("ip:%lx jumps to %lx", ip, tramp);
/* Find where the trampoline jumps to */
if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
@@ -277,7 +258,7 @@ __ftrace_make_nop(struct module *mod,
return -EFAULT;
}
- pr_debug(" %08x %08x ", jmp[0], jmp[1]);
+ pr_devel(" %08x %08x ", jmp[0], jmp[1]);
/* verify that this is what we expect it to be */
if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
@@ -293,7 +274,7 @@ __ftrace_make_nop(struct module *mod,
if (tramp & 0x8000)
tramp -= 0x10000;
- pr_debug(" %lx ", tramp);
+ pr_devel(" %lx ", tramp);
if (tramp != addr) {
printk(KERN_ERR
@@ -328,7 +309,7 @@ int ftrace_make_nop(struct module *mod,
if (test_24bit_addr(ip, addr)) {
/* within range */
old = ftrace_call_replace(ip, addr, 1);
- new = ftrace_nop_replace();
+ new = PPC_INST_NOP;
return ftrace_modify_code(ip, old, new);
}
@@ -402,7 +383,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
/* ld r2,40(r1) */
op[1] = 0xe8410028;
- pr_debug("write to %lx\n", rec->ip);
+ pr_devel("write to %lx\n", rec->ip);
if (probe_kernel_write((void *)ip, op, MCOUNT_INSN_SIZE * 2))
return -EPERM;
@@ -442,7 +423,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
return -EINVAL;
}
- pr_debug("write to %lx\n", rec->ip);
+ pr_devel("write to %lx\n", rec->ip);
if (probe_kernel_write((void *)ip, &op, MCOUNT_INSN_SIZE))
return -EPERM;
@@ -466,7 +447,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
*/
if (test_24bit_addr(ip, addr)) {
/* within range */
- old = ftrace_nop_replace();
+ old = PPC_INST_NOP;
new = ftrace_call_replace(ip, addr, 1);
return ftrace_modify_code(ip, old, new);
}
@@ -570,7 +551,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
return_hooker = (unsigned long)&mod_return_to_handler;
#endif
- return_hooker = GET_ADDR(return_hooker);
+ return_hooker = ppc_function_entry((void *)return_hooker);
/*
* Protect against fault, even if it shouldn't
@@ -594,7 +575,7 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
PPC_LONG "2b,4b\n"
".previous"
- : [old] "=r" (old), [faulted] "=r" (faulted)
+ : [old] "=&r" (old), [faulted] "=r" (faulted)
: [parent] "r" (parent), [return_hooker] "r" (return_hooker)
: "memory"
);
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index c01467f952d3..48469463f89e 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -733,9 +733,11 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
AltiVecUnavailable:
EXCEPTION_PROLOG
#ifdef CONFIG_ALTIVEC
- bne load_up_altivec /* if from user, just load it up */
+ beq 1f
+ bl load_up_altivec /* if from user, just load it up */
+ b fast_exception_return
#endif /* CONFIG_ALTIVEC */
- addi r3,r1,STACK_FRAME_OVERHEAD
+1: addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception)
PerformanceMonitor:
@@ -743,101 +745,6 @@ PerformanceMonitor:
addi r3,r1,STACK_FRAME_OVERHEAD
EXC_XFER_STD(0xf00, performance_monitor_exception)
-#ifdef CONFIG_ALTIVEC
-/* Note that the AltiVec support is closely modeled after the FP
- * support. Changes to one are likely to be applicable to the
- * other! */
-load_up_altivec:
-/*
- * Disable AltiVec for the task which had AltiVec previously,
- * and save its AltiVec registers in its thread_struct.
- * Enables AltiVec for use in the kernel on return.
- * On SMP we know the AltiVec units are free, since we give it up every
- * switch. -- Kumar
- */
- mfmsr r5
- oris r5,r5,MSR_VEC@h
- MTMSRD(r5) /* enable use of AltiVec now */
- isync
-/*
- * For SMP, we don't do lazy AltiVec switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another. Instead we call giveup_altivec in switch_to.
- */
-#ifndef CONFIG_SMP
- tophys(r6,0)
- addis r3,r6,last_task_used_altivec@ha
- lwz r4,last_task_used_altivec@l(r3)
- cmpwi 0,r4,0
- beq 1f
- add r4,r4,r6
- addi r4,r4,THREAD /* want THREAD of last_task_used_altivec */
- SAVE_32VRS(0,r10,r4)
- mfvscr vr0
- li r10,THREAD_VSCR
- stvx vr0,r10,r4
- lwz r5,PT_REGS(r4)
- add r5,r5,r6
- lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- lis r10,MSR_VEC@h
- andc r4,r4,r10 /* disable altivec for previous task */
- stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
- /* enable use of AltiVec after return */
- oris r9,r9,MSR_VEC@h
- mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
- li r4,1
- li r10,THREAD_VSCR
- stw r4,THREAD_USED_VR(r5)
- lvx vr0,r10,r5
- mtvscr vr0
- REST_32VRS(0,r10,r5)
-#ifndef CONFIG_SMP
- subi r4,r5,THREAD
- sub r4,r4,r6
- stw r4,last_task_used_altivec@l(r3)
-#endif /* CONFIG_SMP */
- /* restore registers and return */
- /* we haven't used ctr or xer or lr */
- b fast_exception_return
-
-/*
- * giveup_altivec(tsk)
- * Disable AltiVec for the task given as the argument,
- * and save the AltiVec registers in its thread_struct.
- * Enables AltiVec for use in the kernel on return.
- */
-
- .globl giveup_altivec
-giveup_altivec:
- mfmsr r5
- oris r5,r5,MSR_VEC@h
- SYNC
- MTMSRD(r5) /* enable use of AltiVec now */
- isync
- cmpwi 0,r3,0
- beqlr- /* if no previous owner, done */
- addi r3,r3,THREAD /* want THREAD of task */
- lwz r5,PT_REGS(r3)
- cmpwi 0,r5,0
- SAVE_32VRS(0, r4, r3)
- mfvscr vr0
- li r4,THREAD_VSCR
- stvx vr0,r4,r3
- beq 1f
- lwz r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- lis r3,MSR_VEC@h
- andc r4,r4,r3 /* disable AltiVec for previous task */
- stw r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#ifndef CONFIG_SMP
- li r5,0
- lis r4,last_task_used_altivec@ha
- stw r5,last_task_used_altivec@l(r4)
-#endif /* CONFIG_SMP */
- blr
-#endif /* CONFIG_ALTIVEC */
/*
* This code is jumped to from the startup code to copy
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 50ef505b8fb6..012505ebd9f9 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -12,8 +12,9 @@
* Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
* Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
*
- * This file contains the low-level support and setup for the
- * PowerPC-64 platform, including trap and interrupt dispatch.
+ * This file contains the entry point for the 64-bit kernel along
+ * with some early initialization code common to all 64-bit powerpc
+ * variants.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -38,36 +39,25 @@
#include <asm/exception.h>
#include <asm/irqflags.h>
-/*
- * We layout physical memory as follows:
- * 0x0000 - 0x00ff : Secondary processor spin code
- * 0x0100 - 0x2fff : pSeries Interrupt prologs
- * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs
- * 0x6000 - 0x6fff : Initial (CPU0) segment table
- * 0x7000 - 0x7fff : FWNMI data area
- * 0x8000 - : Early init and support code
- */
-
-/*
- * SPRG Usage
- *
- * Register Definition
- *
- * SPRG0 reserved for hypervisor
- * SPRG1 temp - used to save gpr
- * SPRG2 temp - used to save gpr
- * SPRG3 virt addr of paca
+/* The physical memory is layed out such that the secondary processor
+ * spin code sits at 0x0000...0x00ff. On server, the vectors follow
+ * using the layout described in exceptions-64s.S
*/
/*
* Entering into this code we make the following assumptions:
- * For pSeries:
+ *
+ * For pSeries or server processors:
* 1. The MMU is off & open firmware is running in real mode.
* 2. The kernel is entered at __start
*
* For iSeries:
* 1. The MMU is on (as it always is for iSeries)
* 2. The kernel is entered at system_reset_iSeries
+ *
+ * For Book3E processors:
+ * 1. The MMU is on running in AS0 in a state defined in ePAPR
+ * 2. The kernel is entered at __start
*/
.text
@@ -166,1065 +156,14 @@ exception_marker:
.text
/*
- * This is the start of the interrupt handlers for pSeries
- * This code runs with relocation off.
- * Code from here to __end_interrupts gets copied down to real
- * address 0x100 when we are running a relocatable kernel.
- * Therefore any relative branches in this section must only
- * branch to labels in this section.
- */
- . = 0x100
- .globl __start_interrupts
-__start_interrupts:
-
- STD_EXCEPTION_PSERIES(0x100, system_reset)
-
- . = 0x200
-_machine_check_pSeries:
- HMT_MEDIUM
- mtspr SPRN_SPRG1,r13 /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
-
- . = 0x300
- .globl data_access_pSeries
-data_access_pSeries:
- HMT_MEDIUM
- mtspr SPRN_SPRG1,r13
-BEGIN_FTR_SECTION
- mtspr SPRN_SPRG2,r12
- mfspr r13,SPRN_DAR
- mfspr r12,SPRN_DSISR
- srdi r13,r13,60
- rlwimi r13,r12,16,0x20
- mfcr r12
- cmpwi r13,0x2c
- beq do_stab_bolted_pSeries
- mtcrf 0x80,r12
- mfspr r12,SPRN_SPRG2
-END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common)
-
- . = 0x380
- .globl data_access_slb_pSeries
-data_access_slb_pSeries:
- HMT_MEDIUM
- mtspr SPRN_SPRG1,r13
- mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
- std r3,PACA_EXSLB+EX_R3(r13)
- mfspr r3,SPRN_DAR
- std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
- mfcr r9
-#ifdef __DISABLED__
- /* Keep that around for when we re-implement dynamic VSIDs */
- cmpdi r3,0
- bge slb_miss_user_pseries
-#endif /* __DISABLED__ */
- std r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- mfspr r10,SPRN_SPRG1
- std r10,PACA_EXSLB+EX_R13(r13)
- mfspr r12,SPRN_SRR1 /* and SRR1 */
-#ifndef CONFIG_RELOCATABLE
- b .slb_miss_realmode
-#else
- /*
- * We can't just use a direct branch to .slb_miss_realmode
- * because the distance from here to there depends on where
- * the kernel ends up being put.
- */
- mfctr r11
- ld r10,PACAKBASE(r13)
- LOAD_HANDLER(r10, .slb_miss_realmode)
- mtctr r10
- bctr
-#endif
-
- STD_EXCEPTION_PSERIES(0x400, instruction_access)
-
- . = 0x480
- .globl instruction_access_slb_pSeries
-instruction_access_slb_pSeries:
- HMT_MEDIUM
- mtspr SPRN_SPRG1,r13
- mfspr r13,SPRN_SPRG3 /* get paca address into r13 */
- std r3,PACA_EXSLB+EX_R3(r13)
- mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */
- std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */
- mfcr r9
-#ifdef __DISABLED__
- /* Keep that around for when we re-implement dynamic VSIDs */
- cmpdi r3,0
- bge slb_miss_user_pseries
-#endif /* __DISABLED__ */
- std r10,PACA_EXSLB+EX_R10(r13)
- std r11,PACA_EXSLB+EX_R11(r13)
- std r12,PACA_EXSLB+EX_R12(r13)
- mfspr r10,SPRN_SPRG1
- std r10,PACA_EXSLB+EX_R13(r13)
- mfspr r12,SPRN_SRR1 /* and SRR1 */
-#ifndef CONFIG_RELOCATABLE
- b .slb_miss_realmode
-#else
- mfctr r11
- ld r10,PACAKBASE(r13)
- LOAD_HANDLER(r10, .slb_miss_realmode)
- mtctr r10
- bctr
-#endif
-
- MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt)
- STD_EXCEPTION_PSERIES(0x600, alignment)
- STD_EXCEPTION_PSERIES(0x700, program_check)
- STD_EXCEPTION_PSERIES(0x800, fp_unavailable)
- MASKABLE_EXCEPTION_PSERIES(0x900, decrementer)
- STD_EXCEPTION_PSERIES(0xa00, trap_0a)
- STD_EXCEPTION_PSERIES(0xb00, trap_0b)
-
- . = 0xc00
- .globl system_call_pSeries
-system_call_pSeries:
- HMT_MEDIUM
-BEGIN_FTR_SECTION
- cmpdi r0,0x1ebe
- beq- 1f
-END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)
- mr r9,r13
- mfspr r13,SPRN_SPRG3
- mfspr r11,SPRN_SRR0
- ld r12,PACAKBASE(r13)
- ld r10,PACAKMSR(r13)
- LOAD_HANDLER(r12, system_call_entry)
- mtspr SPRN_SRR0,r12
- mfspr r12,SPRN_SRR1
- mtspr SPRN_SRR1,r10
- rfid
- b . /* prevent speculative execution */
-
-/* Fast LE/BE switch system call */
-1: mfspr r12,SPRN_SRR1
- xori r12,r12,MSR_LE
- mtspr SPRN_SRR1,r12
- rfid /* return to userspace */
- b .
-
- STD_EXCEPTION_PSERIES(0xd00, single_step)
- STD_EXCEPTION_PSERIES(0xe00, trap_0e)
-
- /* We need to deal with the Altivec unavailable exception
- * here which is at 0xf20, thus in the middle of the
- * prolog code of the PerformanceMonitor one. A little
- * trickery is thus necessary
- */
- . = 0xf00
- b performance_monitor_pSeries
-
- . = 0xf20
- b altivec_unavailable_pSeries
-
- . = 0xf40
- b vsx_unavailable_pSeries
-
-#ifdef CONFIG_CBE_RAS
- HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error)
-#endif /* CONFIG_CBE_RAS */
- STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint)
-#ifdef CONFIG_CBE_RAS
- HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance)
-#endif /* CONFIG_CBE_RAS */
- STD_EXCEPTION_PSERIES(0x1700, altivec_assist)
-#ifdef CONFIG_CBE_RAS
- HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal)
-#endif /* CONFIG_CBE_RAS */
-
- . = 0x3000
-
-/*** pSeries interrupt support ***/
-
- /* moved from 0xf00 */
- STD_EXCEPTION_PSERIES(., performance_monitor)
- STD_EXCEPTION_PSERIES(., altivec_unavailable)
- STD_EXCEPTION_PSERIES(., vsx_unavailable)
-
-/*
- * An interrupt came in while soft-disabled; clear EE in SRR1,
- * clear paca->hard_enabled and return.
- */
-masked_interrupt:
- stb r10,PACAHARDIRQEN(r13)
- mtcrf 0x80,r9
- ld r9,PACA_EXGEN+EX_R9(r13)
- mfspr r10,SPRN_SRR1
- rldicl r10,r10,48,1 /* clear MSR_EE */
- rotldi r10,r10,16
- mtspr SPRN_SRR1,r10
- ld r10,PACA_EXGEN+EX_R10(r13)
- mfspr r13,SPRN_SPRG1
- rfid
- b .
-
- .align 7
-do_stab_bolted_pSeries:
- mtcrf 0x80,r12
- mfspr r12,SPRN_SPRG2
- EXCEPTION_PROLOG_PSERIES(PACA_EXSLB, .do_stab_bolted)
-
-#ifdef CONFIG_PPC_PSERIES
-/*
- * Vectors for the FWNMI option. Share common code.
- */
- .globl system_reset_fwnmi
- .align 7
-system_reset_fwnmi:
- HMT_MEDIUM
- mtspr SPRN_SPRG1,r13 /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common)
-
- .globl machine_check_fwnmi
- .align 7
-machine_check_fwnmi:
- HMT_MEDIUM
- mtspr SPRN_SPRG1,r13 /* save r13 */
- EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common)
-
-#endif /* CONFIG_PPC_PSERIES */
-
-#ifdef __DISABLED__
-/*
- * This is used for when the SLB miss handler has to go virtual,
- * which doesn't happen for now anymore but will once we re-implement
- * dynamic VSIDs for shared page tables
- */
-slb_miss_user_pseries:
- std r10,PACA_EXGEN+EX_R10(r13)
- std r11,PACA_EXGEN+EX_R11(r13)
- std r12,PACA_EXGEN+EX_R12(r13)
- mfspr r10,SPRG1
- ld r11,PACA_EXSLB+EX_R9(r13)
- ld r12,PACA_EXSLB+EX_R3(r13)
- std r10,PACA_EXGEN+EX_R13(r13)
- std r11,PACA_EXGEN+EX_R9(r13)
- std r12,PACA_EXGEN+EX_R3(r13)
- clrrdi r12,r13,32
- mfmsr r10
- mfspr r11,SRR0 /* save SRR0 */
- ori r12,r12,slb_miss_user_common@l /* virt addr of handler */
- ori r10,r10,MSR_IR|MSR_DR|MSR_RI
- mtspr SRR0,r12
- mfspr r12,SRR1 /* and SRR1 */
- mtspr SRR1,r10
- rfid
- b . /* prevent spec. execution */
-#endif /* __DISABLED__ */
-
- .align 7
- .globl __end_interrupts
-__end_interrupts:
-
-/*
- * Code from here down to __end_handlers is invoked from the
- * exception prologs above. Because the prologs assemble the
- * addresses of these handlers using the LOAD_HANDLER macro,
- * which uses an addi instruction, these handlers must be in
- * the first 32k of the kernel image.
- */
-
-/*** Common interrupt handlers ***/
-
- STD_EXCEPTION_COMMON(0x100, system_reset, .system_reset_exception)
-
- /*
- * Machine check is different because we use a different
- * save area: PACA_EXMC instead of PACA_EXGEN.
- */
- .align 7
- .globl machine_check_common
-machine_check_common:
- EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
- FINISH_NAP
- DISABLE_INTS
- bl .save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .machine_check_exception
- b .ret_from_except
-
- STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt)
- STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
- STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
- STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
- STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
- STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception)
- STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
-#ifdef CONFIG_ALTIVEC
- STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
-#else
- STD_EXCEPTION_COMMON(0x1700, altivec_assist, .unknown_exception)
-#endif
-#ifdef CONFIG_CBE_RAS
- STD_EXCEPTION_COMMON(0x1200, cbe_system_error, .cbe_system_error_exception)
- STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, .cbe_maintenance_exception)
- STD_EXCEPTION_COMMON(0x1800, cbe_thermal, .cbe_thermal_exception)
-#endif /* CONFIG_CBE_RAS */
-
- .align 7
-system_call_entry:
- b system_call_common
-
-/*
- * Here we have detected that the kernel stack pointer is bad.
- * R9 contains the saved CR, r13 points to the paca,
- * r10 contains the (bad) kernel stack pointer,
- * r11 and r12 contain the saved SRR0 and SRR1.
- * We switch to using an emergency stack, save the registers there,
- * and call kernel_bad_stack(), which panics.
- */
-bad_stack:
- ld r1,PACAEMERGSP(r13)
- subi r1,r1,64+INT_FRAME_SIZE
- std r9,_CCR(r1)
- std r10,GPR1(r1)
- std r11,_NIP(r1)
- std r12,_MSR(r1)
- mfspr r11,SPRN_DAR
- mfspr r12,SPRN_DSISR
- std r11,_DAR(r1)
- std r12,_DSISR(r1)
- mflr r10
- mfctr r11
- mfxer r12
- std r10,_LINK(r1)
- std r11,_CTR(r1)
- std r12,_XER(r1)
- SAVE_GPR(0,r1)
- SAVE_GPR(2,r1)
- SAVE_4GPRS(3,r1)
- SAVE_2GPRS(7,r1)
- SAVE_10GPRS(12,r1)
- SAVE_10GPRS(22,r1)
- lhz r12,PACA_TRAP_SAVE(r13)
- std r12,_TRAP(r1)
- addi r11,r1,INT_FRAME_SIZE
- std r11,0(r1)
- li r12,0
- std r12,0(r11)
- ld r2,PACATOC(r13)
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- bl .kernel_bad_stack
- b 1b
-
-/*
- * Here r13 points to the paca, r9 contains the saved CR,
- * SRR0 and SRR1 are saved in r11 and r12,
- * r9 - r13 are saved in paca->exgen.
- */
- .align 7
- .globl data_access_common
-data_access_common:
- mfspr r10,SPRN_DAR
- std r10,PACA_EXGEN+EX_DAR(r13)
- mfspr r10,SPRN_DSISR
- stw r10,PACA_EXGEN+EX_DSISR(r13)
- EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
- ld r3,PACA_EXGEN+EX_DAR(r13)
- lwz r4,PACA_EXGEN+EX_DSISR(r13)
- li r5,0x300
- b .do_hash_page /* Try to handle as hpte fault */
-
- .align 7
- .globl instruction_access_common
-instruction_access_common:
- EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
- ld r3,_NIP(r1)
- andis. r4,r12,0x5820
- li r5,0x400
- b .do_hash_page /* Try to handle as hpte fault */
-
-/*
- * Here is the common SLB miss user that is used when going to virtual
- * mode for SLB misses, that is currently not used
- */
-#ifdef __DISABLED__
- .align 7
- .globl slb_miss_user_common
-slb_miss_user_common:
- mflr r10
- std r3,PACA_EXGEN+EX_DAR(r13)
- stw r9,PACA_EXGEN+EX_CCR(r13)
- std r10,PACA_EXGEN+EX_LR(r13)
- std r11,PACA_EXGEN+EX_SRR0(r13)
- bl .slb_allocate_user
-
- ld r10,PACA_EXGEN+EX_LR(r13)
- ld r3,PACA_EXGEN+EX_R3(r13)
- lwz r9,PACA_EXGEN+EX_CCR(r13)
- ld r11,PACA_EXGEN+EX_SRR0(r13)
- mtlr r10
- beq- slb_miss_fault
-
- andi. r10,r12,MSR_RI /* check for unrecoverable exception */
- beq- unrecov_user_slb
- mfmsr r10
-
-.machine push
-.machine "power4"
- mtcrf 0x80,r9
-.machine pop
-
- clrrdi r10,r10,2 /* clear RI before setting SRR0/1 */
- mtmsrd r10,1
-
- mtspr SRR0,r11
- mtspr SRR1,r12
-
- ld r9,PACA_EXGEN+EX_R9(r13)
- ld r10,PACA_EXGEN+EX_R10(r13)
- ld r11,PACA_EXGEN+EX_R11(r13)
- ld r12,PACA_EXGEN+EX_R12(r13)
- ld r13,PACA_EXGEN+EX_R13(r13)
- rfid
- b .
-
-slb_miss_fault:
- EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
- ld r4,PACA_EXGEN+EX_DAR(r13)
- li r5,0
- std r4,_DAR(r1)
- std r5,_DSISR(r1)
- b handle_page_fault
-
-unrecov_user_slb:
- EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
- DISABLE_INTS
- bl .save_nvgprs
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- bl .unrecoverable_exception
- b 1b
-
-#endif /* __DISABLED__ */
-
-
-/*
- * r13 points to the PACA, r9 contains the saved CR,
- * r12 contain the saved SRR1, SRR0 is still ready for return
- * r3 has the faulting address
- * r9 - r13 are saved in paca->exslb.
- * r3 is saved in paca->slb_r3
- * We assume we aren't going to take any exceptions during this procedure.
- */
-_GLOBAL(slb_miss_realmode)
- mflr r10
-#ifdef CONFIG_RELOCATABLE
- mtctr r11
-#endif
-
- stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
- std r10,PACA_EXSLB+EX_LR(r13) /* save LR */
-
- bl .slb_allocate_realmode
-
- /* All done -- return from exception. */
-
- ld r10,PACA_EXSLB+EX_LR(r13)
- ld r3,PACA_EXSLB+EX_R3(r13)
- lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
- ld r11,PACALPPACAPTR(r13)
- ld r11,LPPACASRR0(r11) /* get SRR0 value */
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif /* CONFIG_PPC_ISERIES */
-
- mtlr r10
-
- andi. r10,r12,MSR_RI /* check for unrecoverable exception */
- beq- 2f
-
-.machine push
-.machine "power4"
- mtcrf 0x80,r9
- mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
-.machine pop
-
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r12
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif /* CONFIG_PPC_ISERIES */
- ld r9,PACA_EXSLB+EX_R9(r13)
- ld r10,PACA_EXSLB+EX_R10(r13)
- ld r11,PACA_EXSLB+EX_R11(r13)
- ld r12,PACA_EXSLB+EX_R12(r13)
- ld r13,PACA_EXSLB+EX_R13(r13)
- rfid
- b . /* prevent speculative execution */
-
-2:
-#ifdef CONFIG_PPC_ISERIES
-BEGIN_FW_FTR_SECTION
- b unrecov_slb
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-#endif /* CONFIG_PPC_ISERIES */
- mfspr r11,SPRN_SRR0
- ld r10,PACAKBASE(r13)
- LOAD_HANDLER(r10,unrecov_slb)
- mtspr SPRN_SRR0,r10
- ld r10,PACAKMSR(r13)
- mtspr SPRN_SRR1,r10
- rfid
- b .
-
-unrecov_slb:
- EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
- DISABLE_INTS
- bl .save_nvgprs
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- bl .unrecoverable_exception
- b 1b
-
- .align 7
- .globl hardware_interrupt_common
- .globl hardware_interrupt_entry
-hardware_interrupt_common:
- EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
- FINISH_NAP
-hardware_interrupt_entry:
- DISABLE_INTS
-BEGIN_FTR_SECTION
- bl .ppc64_runlatch_on
-END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_IRQ
- b .ret_from_except_lite
-
-#ifdef CONFIG_PPC_970_NAP
-power4_fixup_nap:
- andc r9,r9,r10
- std r9,TI_LOCAL_FLAGS(r11)
- ld r10,_LINK(r1) /* make idle task do the */
- std r10,_NIP(r1) /* equivalent of a blr */
- blr
-#endif
-
- .align 7
- .globl alignment_common
-alignment_common:
- mfspr r10,SPRN_DAR
- std r10,PACA_EXGEN+EX_DAR(r13)
- mfspr r10,SPRN_DSISR
- stw r10,PACA_EXGEN+EX_DSISR(r13)
- EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
- ld r3,PACA_EXGEN+EX_DAR(r13)
- lwz r4,PACA_EXGEN+EX_DSISR(r13)
- std r3,_DAR(r1)
- std r4,_DSISR(r1)
- bl .save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- ENABLE_INTS
- bl .alignment_exception
- b .ret_from_except
-
- .align 7
- .globl program_check_common
-program_check_common:
- EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
- bl .save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- ENABLE_INTS
- bl .program_check_exception
- b .ret_from_except
-
- .align 7
- .globl fp_unavailable_common
-fp_unavailable_common:
- EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
- bne 1f /* if from user, just load it up */
- bl .save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- ENABLE_INTS
- bl .kernel_fp_unavailable_exception
- BUG_OPCODE
-1: bl .load_up_fpu
- b fast_exception_return
-
- .align 7
- .globl altivec_unavailable_common
-altivec_unavailable_common:
- EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
-#ifdef CONFIG_ALTIVEC
-BEGIN_FTR_SECTION
- beq 1f
- bl .load_up_altivec
- b fast_exception_return
-1:
-END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
-#endif
- bl .save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- ENABLE_INTS
- bl .altivec_unavailable_exception
- b .ret_from_except
-
- .align 7
- .globl vsx_unavailable_common
-vsx_unavailable_common:
- EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
- bne .load_up_vsx
-1:
-END_FTR_SECTION_IFSET(CPU_FTR_VSX)
-#endif
- bl .save_nvgprs
- addi r3,r1,STACK_FRAME_OVERHEAD
- ENABLE_INTS
- bl .vsx_unavailable_exception
- b .ret_from_except
-
- .align 7
- .globl __end_handlers
-__end_handlers:
-
-/*
- * Return from an exception with minimal checks.
- * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
- * If interrupts have been enabled, or anything has been
- * done that might have changed the scheduling status of
- * any task or sent any task a signal, you should use
- * ret_from_except or ret_from_except_lite instead of this.
+ * On server, we include the exception vectors code here as it
+ * relies on absolute addressing which is only possible within
+ * this compilation unit
*/
-fast_exc_return_irq: /* restores irq state too */
- ld r3,SOFTE(r1)
- TRACE_AND_RESTORE_IRQ(r3);
- ld r12,_MSR(r1)
- rldicl r4,r12,49,63 /* get MSR_EE to LSB */
- stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
- b 1f
-
- .globl fast_exception_return
-fast_exception_return:
- ld r12,_MSR(r1)
-1: ld r11,_NIP(r1)
- andi. r3,r12,MSR_RI /* check if RI is set */
- beq- unrecov_fer
-
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
- andi. r3,r12,MSR_PR
- beq 2f
- ACCOUNT_CPU_USER_EXIT(r3, r4)
-2:
+#ifdef CONFIG_PPC_BOOK3S
+#include "exceptions-64s.S"
#endif
- ld r3,_CCR(r1)
- ld r4,_LINK(r1)
- ld r5,_CTR(r1)
- ld r6,_XER(r1)
- mtcr r3
- mtlr r4
- mtctr r5
- mtxer r6
- REST_GPR(0, r1)
- REST_8GPRS(2, r1)
-
- mfmsr r10
- rldicl r10,r10,48,1 /* clear EE */
- rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
- mtmsrd r10,1
-
- mtspr SPRN_SRR1,r12
- mtspr SPRN_SRR0,r11
- REST_4GPRS(10, r1)
- ld r1,GPR1(r1)
- rfid
- b . /* prevent speculative execution */
-
-unrecov_fer:
- bl .save_nvgprs
-1: addi r3,r1,STACK_FRAME_OVERHEAD
- bl .unrecoverable_exception
- b 1b
-
-#ifdef CONFIG_ALTIVEC
-/*
- * load_up_altivec(unused, unused, tsk)
- * Disable VMX for the task which had it previously,
- * and save its vector registers in its thread_struct.
- * Enables the VMX for use in the kernel on return.
- * On SMP we know the VMX is free, since we give it up every
- * switch (ie, no lazy save of the vector registers).
- * On entry: r13 == 'current' && last_task_used_altivec != 'current'
- */
-_STATIC(load_up_altivec)
- mfmsr r5 /* grab the current MSR */
- oris r5,r5,MSR_VEC@h
- mtmsrd r5 /* enable use of VMX now */
- isync
-
-/*
- * For SMP, we don't do lazy VMX switching because it just gets too
- * horrendously complex, especially when a task switches from one CPU
- * to another. Instead we call giveup_altvec in switch_to.
- * VRSAVE isn't dealt with here, that is done in the normal context
- * switch code. Note that we could rely on vrsave value to eventually
- * avoid saving all of the VREGs here...
- */
-#ifndef CONFIG_SMP
- ld r3,last_task_used_altivec@got(r2)
- ld r4,0(r3)
- cmpdi 0,r4,0
- beq 1f
- /* Save VMX state to last_task_used_altivec's THREAD struct */
- addi r4,r4,THREAD
- SAVE_32VRS(0,r5,r4)
- mfvscr vr0
- li r10,THREAD_VSCR
- stvx vr0,r10,r4
- /* Disable VMX for last_task_used_altivec */
- ld r5,PT_REGS(r4)
- ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- lis r6,MSR_VEC@h
- andc r4,r4,r6
- std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
- /* Hack: if we get an altivec unavailable trap with VRSAVE
- * set to all zeros, we assume this is a broken application
- * that fails to set it properly, and thus we switch it to
- * all 1's
- */
- mfspr r4,SPRN_VRSAVE
- cmpdi 0,r4,0
- bne+ 1f
- li r4,-1
- mtspr SPRN_VRSAVE,r4
-1:
- /* enable use of VMX after return */
- ld r4,PACACURRENT(r13)
- addi r5,r4,THREAD /* Get THREAD */
- oris r12,r12,MSR_VEC@h
- std r12,_MSR(r1)
- li r4,1
- li r10,THREAD_VSCR
- stw r4,THREAD_USED_VR(r5)
- lvx vr0,r10,r5
- mtvscr vr0
- REST_32VRS(0,r4,r5)
-#ifndef CONFIG_SMP
- /* Update last_task_used_math to 'current' */
- subi r4,r5,THREAD /* Back to 'current' */
- std r4,0(r3)
-#endif /* CONFIG_SMP */
- /* restore registers and return */
- blr
-#endif /* CONFIG_ALTIVEC */
-
-#ifdef CONFIG_VSX
-/*
- * load_up_vsx(unused, unused, tsk)
- * Disable VSX for the task which had it previously,
- * and save its vector registers in its thread_struct.
- * Reuse the fp and vsx saves, but first check to see if they have
- * been saved already.
- * On entry: r13 == 'current' && last_task_used_vsx != 'current'
- */
-_STATIC(load_up_vsx)
-/* Load FP and VSX registers if they haven't been done yet */
- andi. r5,r12,MSR_FP
- beql+ load_up_fpu /* skip if already loaded */
- andis. r5,r12,MSR_VEC@h
- beql+ load_up_altivec /* skip if already loaded */
-
-#ifndef CONFIG_SMP
- ld r3,last_task_used_vsx@got(r2)
- ld r4,0(r3)
- cmpdi 0,r4,0
- beq 1f
- /* Disable VSX for last_task_used_vsx */
- addi r4,r4,THREAD
- ld r5,PT_REGS(r4)
- ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- lis r6,MSR_VSX@h
- andc r6,r4,r6
- std r6,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#endif /* CONFIG_SMP */
- ld r4,PACACURRENT(r13)
- addi r4,r4,THREAD /* Get THREAD */
- li r6,1
- stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
- /* enable use of VSX after return */
- oris r12,r12,MSR_VSX@h
- std r12,_MSR(r1)
-#ifndef CONFIG_SMP
- /* Update last_task_used_math to 'current' */
- ld r4,PACACURRENT(r13)
- std r4,0(r3)
-#endif /* CONFIG_SMP */
- b fast_exception_return
-#endif /* CONFIG_VSX */
-
-/*
- * Hash table stuff
- */
- .align 7
-_STATIC(do_hash_page)
- std r3,_DAR(r1)
- std r4,_DSISR(r1)
-
- andis. r0,r4,0xa450 /* weird error? */
- bne- handle_page_fault /* if not, try to insert a HPTE */
-BEGIN_FTR_SECTION
- andis. r0,r4,0x0020 /* Is it a segment table fault? */
- bne- do_ste_alloc /* If so handle it */
-END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
-
- /*
- * On iSeries, we soft-disable interrupts here, then
- * hard-enable interrupts so that the hash_page code can spin on
- * the hash_table_lock without problems on a shared processor.
- */
- DISABLE_INTS
-
- /*
- * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
- * and will clobber volatile registers when irq tracing is enabled
- * so we need to reload them. It may be possible to be smarter here
- * and move the irq tracing elsewhere but let's keep it simple for
- * now
- */
-#ifdef CONFIG_TRACE_IRQFLAGS
- ld r3,_DAR(r1)
- ld r4,_DSISR(r1)
- ld r5,_TRAP(r1)
- ld r12,_MSR(r1)
- clrrdi r5,r5,4
-#endif /* CONFIG_TRACE_IRQFLAGS */
- /*
- * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
- * accessing a userspace segment (even from the kernel). We assume
- * kernel addresses always have the high bit set.
- */
- rlwinm r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
- rotldi r0,r3,15 /* Move high bit into MSR_PR posn */
- orc r0,r12,r0 /* MSR_PR | ~high_bit */
- rlwimi r4,r0,32-13,30,30 /* becomes _PAGE_USER access bit */
- ori r4,r4,1 /* add _PAGE_PRESENT */
- rlwimi r4,r5,22+2,31-2,31-2 /* Set _PAGE_EXEC if trap is 0x400 */
-
- /*
- * r3 contains the faulting address
- * r4 contains the required access permissions
- * r5 contains the trap number
- *
- * at return r3 = 0 for success
- */
- bl .hash_page /* build HPTE if possible */
- cmpdi r3,0 /* see if hash_page succeeded */
-
-BEGIN_FW_FTR_SECTION
- /*
- * If we had interrupts soft-enabled at the point where the
- * DSI/ISI occurred, and an interrupt came in during hash_page,
- * handle it now.
- * We jump to ret_from_except_lite rather than fast_exception_return
- * because ret_from_except_lite will check for and handle pending
- * interrupts if necessary.
- */
- beq 13f
-END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
-
-BEGIN_FW_FTR_SECTION
- /*
- * Here we have interrupts hard-disabled, so it is sufficient
- * to restore paca->{soft,hard}_enable and get out.
- */
- beq fast_exc_return_irq /* Return from exception on success */
-END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
-
- /* For a hash failure, we don't bother re-enabling interrupts */
- ble- 12f
-
- /*
- * hash_page couldn't handle it, set soft interrupt enable back
- * to what it was before the trap. Note that .raw_local_irq_restore
- * handles any interrupts pending at this point.
- */
- ld r3,SOFTE(r1)
- TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
- bl .raw_local_irq_restore
- b 11f
-
-/* Here we have a page fault that hash_page can't handle. */
-handle_page_fault:
- ENABLE_INTS
-11: ld r4,_DAR(r1)
- ld r5,_DSISR(r1)
- addi r3,r1,STACK_FRAME_OVERHEAD
- bl .do_page_fault
- cmpdi r3,0
- beq+ 13f
- bl .save_nvgprs
- mr r5,r3
- addi r3,r1,STACK_FRAME_OVERHEAD
- lwz r4,_DAR(r1)
- bl .bad_page_fault
- b .ret_from_except
-
-13: b .ret_from_except_lite
-
-/* We have a page fault that hash_page could handle but HV refused
- * the PTE insertion
- */
-12: bl .save_nvgprs
- mr r5,r3
- addi r3,r1,STACK_FRAME_OVERHEAD
- ld r4,_DAR(r1)
- bl .low_hash_fault
- b .ret_from_except
-
- /* here we have a segment miss */
-do_ste_alloc:
- bl .ste_allocate /* try to insert stab entry */
- cmpdi r3,0
- bne- handle_page_fault
- b fast_exception_return
-
-/*
- * r13 points to the PACA, r9 contains the saved CR,
- * r11 and r12 contain the saved SRR0 and SRR1.
- * r9 - r13 are saved in paca->exslb.
- * We assume we aren't going to take any exceptions during this procedure.
- * We assume (DAR >> 60) == 0xc.
- */
- .align 7
-_GLOBAL(do_stab_bolted)
- stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
- std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
-
- /* Hash to the primary group */
- ld r10,PACASTABVIRT(r13)
- mfspr r11,SPRN_DAR
- srdi r11,r11,28
- rldimi r10,r11,7,52 /* r10 = first ste of the group */
-
- /* Calculate VSID */
- /* This is a kernel address, so protovsid = ESID */
- ASM_VSID_SCRAMBLE(r11, r9, 256M)
- rldic r9,r11,12,16 /* r9 = vsid << 12 */
-
- /* Search the primary group for a free entry */
-1: ld r11,0(r10) /* Test valid bit of the current ste */
- andi. r11,r11,0x80
- beq 2f
- addi r10,r10,16
- andi. r11,r10,0x70
- bne 1b
-
- /* Stick for only searching the primary group for now. */
- /* At least for now, we use a very simple random castout scheme */
- /* Use the TB as a random number ; OR in 1 to avoid entry 0 */
- mftb r11
- rldic r11,r11,4,57 /* r11 = (r11 << 4) & 0x70 */
- ori r11,r11,0x10
-
- /* r10 currently points to an ste one past the group of interest */
- /* make it point to the randomly selected entry */
- subi r10,r10,128
- or r10,r10,r11 /* r10 is the entry to invalidate */
-
- isync /* mark the entry invalid */
- ld r11,0(r10)
- rldicl r11,r11,56,1 /* clear the valid bit */
- rotldi r11,r11,8
- std r11,0(r10)
- sync
-
- clrrdi r11,r11,28 /* Get the esid part of the ste */
- slbie r11
-
-2: std r9,8(r10) /* Store the vsid part of the ste */
- eieio
-
- mfspr r11,SPRN_DAR /* Get the new esid */
- clrrdi r11,r11,28 /* Permits a full 32b of ESID */
- ori r11,r11,0x90 /* Turn on valid and kp */
- std r11,0(r10) /* Put new entry back into the stab */
-
- sync
-
- /* All done -- return from exception. */
- lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
- ld r11,PACA_EXSLB+EX_SRR0(r13) /* get saved SRR0 */
-
- andi. r10,r12,MSR_RI
- beq- unrecov_slb
-
- mtcrf 0x80,r9 /* restore CR */
-
- mfmsr r10
- clrrdi r10,r10,2
- mtmsrd r10,1
-
- mtspr SPRN_SRR0,r11
- mtspr SPRN_SRR1,r12
- ld r9,PACA_EXSLB+EX_R9(r13)
- ld r10,PACA_EXSLB+EX_R10(r13)
- ld r11,PACA_EXSLB+EX_R11(r13)
- ld r12,PACA_EXSLB+EX_R12(r13)
- ld r13,PACA_EXSLB+EX_R13(r13)
- rfid
- b . /* prevent speculative execution */
-
-/*
- * Space for CPU0's segment table.
- *
- * On iSeries, the hypervisor must fill in at least one entry before
- * we get control (with relocate on). The address is given to the hv
- * as a page number (see xLparMap below), so this must be at a
- * fixed address (the linker can't compute (u64)&initial_stab >>
- * PAGE_SHIFT).
- */
- . = STAB0_OFFSET /* 0x6000 */
- .globl initial_stab
-initial_stab:
- .space 4096
-
-#ifdef CONFIG_PPC_PSERIES
-/*
- * Data area reserved for FWNMI option.
- * This address (0x7000) is fixed by the RPA.
- */
- .= 0x7000
- .globl fwnmi_data_area
-fwnmi_data_area:
-#endif /* CONFIG_PPC_PSERIES */
-
- /* iSeries does not use the FWNMI stuff, so it is safe to put
- * this here, even if we later allow kernels that will boot on
- * both pSeries and iSeries */
-#ifdef CONFIG_PPC_ISERIES
- . = LPARMAP_PHYS
- .globl xLparMap
-xLparMap:
- .quad HvEsidsToMap /* xNumberEsids */
- .quad HvRangesToMap /* xNumberRanges */
- .quad STAB0_PAGE /* xSegmentTableOffs */
- .zero 40 /* xRsvd */
- /* xEsids (HvEsidsToMap entries of 2 quads) */
- .quad PAGE_OFFSET_ESID /* xKernelEsid */
- .quad PAGE_OFFSET_VSID /* xKernelVsid */
- .quad VMALLOC_START_ESID /* xKernelEsid */
- .quad VMALLOC_START_VSID /* xKernelVsid */
- /* xRanges (HvRangesToMap entries of 3 quads) */
- .quad HvPagesToMap /* xPages */
- .quad 0 /* xOffset */
- .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */
-
-#endif /* CONFIG_PPC_ISERIES */
-
-#ifdef CONFIG_PPC_PSERIES
- . = 0x8000
-#endif /* CONFIG_PPC_PSERIES */
/*
* On pSeries and most other platforms, secondary processors spin
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 95f39f1e68d4..5f9febc8d143 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -256,7 +256,7 @@ label:
* off DE in the DSRR1 value and clearing the debug status. \
*/ \
mfspr r10,SPRN_DBSR; /* check single-step/branch taken */ \
- andis. r10,r10,DBSR_IC@h; \
+ andis. r10,r10,(DBSR_IC|DBSR_BT)@h; \
beq+ 2f; \
\
lis r10,KERNELBASE@h; /* check if exception in vectors */ \
@@ -271,7 +271,7 @@ label:
\
/* here it looks like we got an inappropriate debug exception. */ \
1: rlwinm r9,r9,0,~MSR_DE; /* clear DE in the CDRR1 value */ \
- lis r10,DBSR_IC@h; /* clear the IC event */ \
+ lis r10,(DBSR_IC|DBSR_BT)@h; /* clear the IC event */ \
mtspr SPRN_DBSR,r10; \
/* restore state and get out */ \
lwz r10,_CCR(r11); \
@@ -309,7 +309,7 @@ label:
* off DE in the CSRR1 value and clearing the debug status. \
*/ \
mfspr r10,SPRN_DBSR; /* check single-step/branch taken */ \
- andis. r10,r10,DBSR_IC@h; \
+ andis. r10,r10,(DBSR_IC|DBSR_BT)@h; \
beq+ 2f; \
\
lis r10,KERNELBASE@h; /* check if exception in vectors */ \
@@ -317,14 +317,14 @@ label:
cmplw r12,r10; \
blt+ 2f; /* addr below exception vectors */ \
\
- lis r10,DebugCrit@h; \
+ lis r10,DebugCrit@h; \
ori r10,r10,DebugCrit@l; \
cmplw r12,r10; \
bgt+ 2f; /* addr above exception vectors */ \
\
/* here it looks like we got an inappropriate debug exception. */ \
1: rlwinm r9,r9,0,~MSR_DE; /* clear DE in the CSRR1 value */ \
- lis r10,DBSR_IC@h; /* clear the IC event */ \
+ lis r10,(DBSR_IC|DBSR_BT)@h; /* clear the IC event */ \
mtspr SPRN_DBSR,r10; \
/* restore state and get out */ \
lwz r10,_CCR(r11); \
diff --git a/arch/powerpc/kernel/init_task.c b/arch/powerpc/kernel/init_task.c
index 688b329800bd..ffc4253fef55 100644
--- a/arch/powerpc/kernel/init_task.c
+++ b/arch/powerpc/kernel/init_task.c
@@ -9,10 +9,6 @@
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
-struct mm_struct init_mm = INIT_MM(init_mm);
-
-EXPORT_SYMBOL(init_mm);
-
/*
* Initial thread structure.
*
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 8c1a4966867e..f7f376ea7b17 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -53,6 +53,7 @@
#include <linux/bootmem.h>
#include <linux/pci.h>
#include <linux/debugfs.h>
+#include <linux/perf_counter.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -117,6 +118,7 @@ notrace void raw_local_irq_restore(unsigned long en)
if (!en)
return;
+#ifdef CONFIG_PPC_STD_MMU_64
if (firmware_has_feature(FW_FEATURE_ISERIES)) {
/*
* Do we need to disable preemption here? Not really: in the
@@ -134,6 +136,12 @@ notrace void raw_local_irq_restore(unsigned long en)
if (local_paca->lppaca_ptr->int_dword.any_int)
iseries_handle_interrupts();
}
+#endif /* CONFIG_PPC_STD_MMU_64 */
+
+ if (test_perf_counter_pending()) {
+ clear_perf_counter_pending();
+ perf_counter_do_pending();
+ }
/*
* if (get_paca()->hard_enabled) return;
@@ -248,77 +256,84 @@ void fixup_irqs(cpumask_t map)
}
#endif
-void do_IRQ(struct pt_regs *regs)
-{
- struct pt_regs *old_regs = set_irq_regs(regs);
- unsigned int irq;
#ifdef CONFIG_IRQSTACKS
+static inline void handle_one_irq(unsigned int irq)
+{
struct thread_info *curtp, *irqtp;
-#endif
+ unsigned long saved_sp_limit;
+ struct irq_desc *desc;
- irq_enter();
+ /* Switch to the irq stack to handle this */
+ curtp = current_thread_info();
+ irqtp = hardirq_ctx[smp_processor_id()];
+
+ if (curtp == irqtp) {
+ /* We're already on the irq stack, just handle it */
+ generic_handle_irq(irq);
+ return;
+ }
+
+ desc = irq_desc + irq;
+ saved_sp_limit = current->thread.ksp_limit;
+
+ irqtp->task = curtp->task;
+ irqtp->flags = 0;
+
+ /* Copy the softirq bits in preempt_count so that the
+ * softirq checks work in the hardirq context. */
+ irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
+ (curtp->preempt_count & SOFTIRQ_MASK);
+
+ current->thread.ksp_limit = (unsigned long)irqtp +
+ _ALIGN_UP(sizeof(struct thread_info), 16);
+
+ call_handle_irq(irq, desc, irqtp, desc->handle_irq);
+ current->thread.ksp_limit = saved_sp_limit;
+ irqtp->task = NULL;
+ /* Set any flag that may have been set on the
+ * alternate stack
+ */
+ if (irqtp->flags)
+ set_bits(irqtp->flags, &curtp->flags);
+}
+#else
+static inline void handle_one_irq(unsigned int irq)
+{
+ generic_handle_irq(irq);
+}
+#endif
+
+static inline void check_stack_overflow(void)
+{
#ifdef CONFIG_DEBUG_STACKOVERFLOW
- /* Debugging check for stack overflow: is there less than 2KB free? */
- {
- long sp;
+ long sp;
- sp = __get_SP() & (THREAD_SIZE-1);
+ sp = __get_SP() & (THREAD_SIZE-1);
- if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
- printk("do_IRQ: stack overflow: %ld\n",
- sp - sizeof(struct thread_info));
- dump_stack();
- }
+ /* check for stack overflow: is there less than 2KB free? */
+ if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
+ printk("do_IRQ: stack overflow: %ld\n",
+ sp - sizeof(struct thread_info));
+ dump_stack();
}
#endif
+}
- /*
- * Every platform is required to implement ppc_md.get_irq.
- * This function will either return an irq number or NO_IRQ to
- * indicate there are no more pending.
- * The value NO_IRQ_IGNORE is for buggy hardware and means that this
- * IRQ has already been handled. -- Tom
- */
- irq = ppc_md.get_irq();
+void do_IRQ(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ unsigned int irq;
- if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) {
-#ifdef CONFIG_IRQSTACKS
- /* Switch to the irq stack to handle this */
- curtp = current_thread_info();
- irqtp = hardirq_ctx[smp_processor_id()];
- if (curtp != irqtp) {
- struct irq_desc *desc = irq_desc + irq;
- void *handler = desc->handle_irq;
- unsigned long saved_sp_limit = current->thread.ksp_limit;
- if (handler == NULL)
- handler = &__do_IRQ;
- irqtp->task = curtp->task;
- irqtp->flags = 0;
-
- /* Copy the softirq bits in preempt_count so that the
- * softirq checks work in the hardirq context.
- */
- irqtp->preempt_count =
- (irqtp->preempt_count & ~SOFTIRQ_MASK) |
- (curtp->preempt_count & SOFTIRQ_MASK);
+ irq_enter();
- current->thread.ksp_limit = (unsigned long)irqtp +
- _ALIGN_UP(sizeof(struct thread_info), 16);
- call_handle_irq(irq, desc, irqtp, handler);
- current->thread.ksp_limit = saved_sp_limit;
- irqtp->task = NULL;
+ check_stack_overflow();
+ irq = ppc_md.get_irq();
- /* Set any flag that may have been set on the
- * alternate stack
- */
- if (irqtp->flags)
- set_bits(irqtp->flags, &curtp->flags);
- } else
-#endif
- generic_handle_irq(irq);
- } else if (irq != NO_IRQ_IGNORE)
+ if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
+ handle_one_irq(irq);
+ else if (irq != NO_IRQ_IGNORE)
/* That's not SMP safe ... but who cares ? */
ppc_spurious_interrupts++;
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 78b3f7840ade..2419cc706ff1 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -169,6 +169,9 @@ struct hvcall_ppp_data {
u8 unallocated_weight;
u16 active_procs_in_pool;
u16 active_system_procs;
+ u16 phys_platform_procs;
+ u32 max_proc_cap_avail;
+ u32 entitled_proc_cap_avail;
};
/*
@@ -190,13 +193,18 @@ struct hvcall_ppp_data {
* XX - Unallocated Variable Processor Capacity Weight.
* XXXX - Active processors in Physical Processor Pool.
* XXXX - Processors active on platform.
+ * R8 (QQQQRRRRRRSSSSSS). if ibm,partition-performance-parameters-level >= 1
+ * XXXX - Physical platform procs allocated to virtualization.
+ * XXXXXX - Max procs capacity % available to the partitions pool.
+ * XXXXXX - Entitled procs capacity % available to the
+ * partitions pool.
*/
static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
{
unsigned long rc;
- unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
+ unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
- rc = plpar_hcall(H_GET_PPP, retbuf);
+ rc = plpar_hcall9(H_GET_PPP, retbuf);
ppp_data->entitlement = retbuf[0];
ppp_data->unallocated_entitlement = retbuf[1];
@@ -210,6 +218,10 @@ static unsigned int h_get_ppp(struct hvcall_ppp_data *ppp_data)
ppp_data->active_procs_in_pool = (retbuf[3] >> 2 * 8) & 0xffff;
ppp_data->active_system_procs = retbuf[3] & 0xffff;
+ ppp_data->phys_platform_procs = retbuf[4] >> 6 * 8;
+ ppp_data->max_proc_cap_avail = (retbuf[4] >> 3 * 8) & 0xffffff;
+ ppp_data->entitled_proc_cap_avail = retbuf[4] & 0xffffff;
+
return rc;
}
@@ -234,6 +246,8 @@ static unsigned h_pic(unsigned long *pool_idle_time,
static void parse_ppp_data(struct seq_file *m)
{
struct hvcall_ppp_data ppp_data;
+ struct device_node *root;
+ const int *perf_level;
int rc;
rc = h_get_ppp(&ppp_data);
@@ -267,6 +281,28 @@ static void parse_ppp_data(struct seq_file *m)
seq_printf(m, "capped=%d\n", ppp_data.capped);
seq_printf(m, "unallocated_capacity=%lld\n",
ppp_data.unallocated_entitlement);
+
+ /* The last bits of information returned from h_get_ppp are only
+ * valid if the ibm,partition-performance-parameters-level
+ * property is >= 1.
+ */
+ root = of_find_node_by_path("/");
+ if (root) {
+ perf_level = of_get_property(root,
+ "ibm,partition-performance-parameters-level",
+ NULL);
+ if (perf_level && (*perf_level >= 1)) {
+ seq_printf(m,
+ "physical_procs_allocated_to_virtualization=%d\n",
+ ppp_data.phys_platform_procs);
+ seq_printf(m, "max_proc_capacity_available=%d\n",
+ ppp_data.max_proc_cap_avail);
+ seq_printf(m, "entitled_proc_capacity_available=%d\n",
+ ppp_data.entitled_proc_cap_avail);
+ }
+
+ of_node_put(root);
+ }
}
/**
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index b9530b2395a2..a5cf9c1356a6 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -457,98 +457,6 @@ _GLOBAL(disable_kernel_fp)
isync
blr
-#ifdef CONFIG_ALTIVEC
-
-#if 0 /* this has no callers for now */
-/*
- * disable_kernel_altivec()
- * Disable the VMX.
- */
-_GLOBAL(disable_kernel_altivec)
- mfmsr r3
- rldicl r0,r3,(63-MSR_VEC_LG),1
- rldicl r3,r0,(MSR_VEC_LG+1),0
- mtmsrd r3 /* disable use of VMX now */
- isync
- blr
-#endif /* 0 */
-
-/*
- * giveup_altivec(tsk)
- * Disable VMX for the task given as the argument,
- * and save the vector registers in its thread_struct.
- * Enables the VMX for use in the kernel on return.
- */
-_GLOBAL(giveup_altivec)
- mfmsr r5
- oris r5,r5,MSR_VEC@h
- mtmsrd r5 /* enable use of VMX now */
- isync
- cmpdi 0,r3,0
- beqlr- /* if no previous owner, done */
- addi r3,r3,THREAD /* want THREAD of task */
- ld r5,PT_REGS(r3)
- cmpdi 0,r5,0
- SAVE_32VRS(0,r4,r3)
- mfvscr vr0
- li r4,THREAD_VSCR
- stvx vr0,r4,r3
- beq 1f
- ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-#ifdef CONFIG_VSX
-BEGIN_FTR_SECTION
- lis r3,(MSR_VEC|MSR_VSX)@h
-FTR_SECTION_ELSE
- lis r3,MSR_VEC@h
-ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
-#else
- lis r3,MSR_VEC@h
-#endif
- andc r4,r4,r3 /* disable FP for previous task */
- std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#ifndef CONFIG_SMP
- li r5,0
- ld r4,last_task_used_altivec@got(r2)
- std r5,0(r4)
-#endif /* CONFIG_SMP */
- blr
-
-#endif /* CONFIG_ALTIVEC */
-
-#ifdef CONFIG_VSX
-/*
- * __giveup_vsx(tsk)
- * Disable VSX for the task given as the argument.
- * Does NOT save vsx registers.
- * Enables the VSX for use in the kernel on return.
- */
-_GLOBAL(__giveup_vsx)
- mfmsr r5
- oris r5,r5,MSR_VSX@h
- mtmsrd r5 /* enable use of VSX now */
- isync
-
- cmpdi 0,r3,0
- beqlr- /* if no previous owner, done */
- addi r3,r3,THREAD /* want THREAD of task */
- ld r5,PT_REGS(r3)
- cmpdi 0,r5,0
- beq 1f
- ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
- lis r3,MSR_VSX@h
- andc r4,r4,r3 /* disable VSX for previous task */
- std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
-1:
-#ifndef CONFIG_SMP
- li r5,0
- ld r4,last_task_used_vsx@got(r2)
- std r5,0(r4)
-#endif /* CONFIG_SMP */
- blr
-
-#endif /* CONFIG_VSX */
-
/* kexec_wait(phys_cpu)
*
* wait for the flag to change, indicating this kernel is going away but
diff --git a/arch/powerpc/kernel/module.c b/arch/powerpc/kernel/module.c
index 43e7e3a7f130..477c663e0140 100644
--- a/arch/powerpc/kernel/module.c
+++ b/arch/powerpc/kernel/module.c
@@ -43,8 +43,6 @@ void *module_alloc(unsigned long size)
void module_free(struct module *mod, void *module_region)
{
vfree(module_region);
- /* FIXME: If module_region == mod->init_region, trim exception
- table entries. */
}
static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index c744b327bcab..e9962c7f8a09 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -18,6 +18,8 @@
* field correctly */
extern unsigned long __toc_start;
+#ifdef CONFIG_PPC_BOOK3S
+
/*
* The structure which the hypervisor knows about - this structure
* should not cross a page boundary. The vpa_init/register_vpa call
@@ -41,6 +43,10 @@ struct lppaca lppaca[] = {
},
};
+#endif /* CONFIG_PPC_BOOK3S */
+
+#ifdef CONFIG_PPC_STD_MMU_64
+
/*
* 3 persistent SLBs are registered here. The buffer will be zero
* initially, hence will all be invaild until we actually write them.
@@ -52,6 +58,8 @@ struct slb_shadow slb_shadow[] __cacheline_aligned = {
},
};
+#endif /* CONFIG_PPC_STD_MMU_64 */
+
/* The Paca is an array with one entry per processor. Each contains an
* lppaca, which contains the information shared between the
* hypervisor and Linux.
@@ -77,15 +85,19 @@ void __init initialise_pacas(void)
for (cpu = 0; cpu < NR_CPUS; cpu++) {
struct paca_struct *new_paca = &paca[cpu];
+#ifdef CONFIG_PPC_BOOK3S
new_paca->lppaca_ptr = &lppaca[cpu];
+#endif
new_paca->lock_token = 0x8000;
new_paca->paca_index = cpu;
new_paca->kernel_toc = kernel_toc;
new_paca->kernelbase = (unsigned long) _stext;
new_paca->kernel_msr = MSR_KERNEL;
new_paca->hw_cpu_id = 0xffff;
- new_paca->slb_shadow_ptr = &slb_shadow[cpu];
new_paca->__current = &init_task;
+#ifdef CONFIG_PPC_STD_MMU_64
+ new_paca->slb_shadow_ptr = &slb_shadow[cpu];
+#endif /* CONFIG_PPC_STD_MMU_64 */
}
}
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 4fee63cb53ff..5a56e97c5ac0 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -1505,7 +1505,7 @@ void __init pcibios_resource_survey(void)
* rest of the code later, for now, keep it as-is as our main
* resource allocation function doesn't deal with sub-trees yet.
*/
-void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
+void pcibios_claim_one_bus(struct pci_bus *bus)
{
struct pci_dev *dev;
struct pci_bus *child_bus;
@@ -1533,7 +1533,6 @@ void __devinit pcibios_claim_one_bus(struct pci_bus *bus)
list_for_each_entry(child_bus, &bus->children, node)
pcibios_claim_one_bus(child_bus);
}
-EXPORT_SYMBOL_GPL(pcibios_claim_one_bus);
/* pcibios_finish_adding_to_bus
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index d473634e39e3..3ae1c666ff92 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -33,7 +33,6 @@ int pcibios_assign_bus_offset = 1;
void pcibios_make_OF_bus_map(void);
-static void fixup_broken_pcnet32(struct pci_dev* dev);
static void fixup_cpc710_pci64(struct pci_dev* dev);
#ifdef CONFIG_PPC_OF
static u8* pci_to_OF_bus_map;
@@ -72,16 +71,6 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MOTOROLA, PCI_ANY_ID, fixup_hide_host_res
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_FREESCALE, PCI_ANY_ID, fixup_hide_host_resource_fsl);
static void
-fixup_broken_pcnet32(struct pci_dev* dev)
-{
- if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
- dev->vendor = PCI_VENDOR_ID_AMD;
- pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
- }
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
-
-static void
fixup_cpc710_pci64(struct pci_dev* dev)
{
/* Hide the PCI64 BARs from the kernel as their content doesn't
@@ -447,14 +436,6 @@ static int __init pcibios_init(void)
subsys_initcall(pcibios_init);
-/* the next one is stolen from the alpha port... */
-void __init
-pcibios_update_irq(struct pci_dev *dev, int irq)
-{
- pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
- /* XXX FIXME - update OF device tree node interrupt property */
-}
-
static struct pci_controller*
pci_bus_to_hose(int bus)
{
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 96edb6f8babb..9e8902fa14c7 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -43,16 +43,6 @@ unsigned long pci_probe_only = 1;
unsigned long pci_io_base = ISA_IO_BASE;
EXPORT_SYMBOL(pci_io_base);
-static void fixup_broken_pcnet32(struct pci_dev* dev)
-{
- if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
- dev->vendor = PCI_VENDOR_ID_AMD;
- pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD);
- }
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32);
-
-
static u32 get_int_prop(struct device_node *np, const char *name, u32 def)
{
const u32 *prop;
@@ -430,6 +420,9 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
* so flushing the hash table is the only sane way to make sure
* that no hash entries are covering that removed bridge area
* while still allowing other busses overlapping those pages
+ *
+ * Note: If we ever support P2P hotplug on Book3E, we'll have
+ * to do an appropriate TLB flush here too
*/
if (bus->self) {
struct resource *res = bus->resource[0];
@@ -437,8 +430,10 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
pr_debug("IO unmapping for PCI-PCI bridge %s\n",
pci_name(bus->self));
+#ifdef CONFIG_PPC_STD_MMU_64
__flush_hash_table_range(&init_mm, res->start + _IO_BASE,
res->end + _IO_BASE + 1);
+#endif
return 0;
}
@@ -511,7 +506,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus)
pr_debug("IO mapping for PHB %s\n", hose->dn->full_name);
pr_debug(" phys=0x%016llx, virt=0x%p (alloc=0x%p)\n",
hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc);
- pr_debug(" size=0x%016lx (alloc=0x%016lx)\n",
+ pr_debug(" size=0x%016llx (alloc=0x%016lx)\n",
hose->pci_io_size, size_page);
/* Establish the mapping */
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 1c67de52e3ce..d5e36e5dc7c2 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -27,7 +27,6 @@
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/pci-bridge.h>
-#include <asm/pSeries_reconfig.h>
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
@@ -35,7 +34,7 @@
* Traverse_func that inits the PCI fields of the device node.
* NOTE: this *must* be done before read/write config to the device.
*/
-static void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
+void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
{
struct pci_controller *phb = data;
const int *type =
@@ -184,29 +183,6 @@ struct device_node *fetch_dev_dn(struct pci_dev *dev)
}
EXPORT_SYMBOL(fetch_dev_dn);
-static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node)
-{
- struct device_node *np = node;
- struct pci_dn *pci = NULL;
- int err = NOTIFY_OK;
-
- switch (action) {
- case PSERIES_RECONFIG_ADD:
- pci = np->parent->data;
- if (pci)
- update_dn_pci_info(np, pci->phb);
- break;
- default:
- err = NOTIFY_DONE;
- break;
- }
- return err;
-}
-
-static struct notifier_block pci_dn_reconfig_nb = {
- .notifier_call = pci_dn_reconfig_notifier,
-};
-
/**
* pci_devs_phb_init - Initialize phbs and pci devs under them.
*
@@ -223,6 +199,4 @@ void __init pci_devs_phb_init(void)
/* This must be done first so the device nodes have valid pci info! */
list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
pci_devs_phb_init_dynamic(phb);
-
- pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb);
}
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
new file mode 100644
index 000000000000..bb202388170e
--- /dev/null
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -0,0 +1,1263 @@
+/*
+ * Performance counter support - powerpc architecture code
+ *
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/perf_counter.h>
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
+#include <asm/reg.h>
+#include <asm/pmc.h>
+#include <asm/machdep.h>
+#include <asm/firmware.h>
+#include <asm/ptrace.h>
+
+struct cpu_hw_counters {
+ int n_counters;
+ int n_percpu;
+ int disabled;
+ int n_added;
+ int n_limited;
+ u8 pmcs_enabled;
+ struct perf_counter *counter[MAX_HWCOUNTERS];
+ u64 events[MAX_HWCOUNTERS];
+ unsigned int flags[MAX_HWCOUNTERS];
+ u64 mmcr[3];
+ struct perf_counter *limited_counter[MAX_LIMITED_HWCOUNTERS];
+ u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
+};
+DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters);
+
+struct power_pmu *ppmu;
+
+/*
+ * Normally, to ignore kernel events we set the FCS (freeze counters
+ * in supervisor mode) bit in MMCR0, but if the kernel runs with the
+ * hypervisor bit set in the MSR, or if we are running on a processor
+ * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
+ * then we need to use the FCHV bit to ignore kernel events.
+ */
+static unsigned int freeze_counters_kernel = MMCR0_FCS;
+
+static void perf_counter_interrupt(struct pt_regs *regs);
+
+void perf_counter_print_debug(void)
+{
+}
+
+/*
+ * Read one performance monitor counter (PMC).
+ */
+static unsigned long read_pmc(int idx)
+{
+ unsigned long val;
+
+ switch (idx) {
+ case 1:
+ val = mfspr(SPRN_PMC1);
+ break;
+ case 2:
+ val = mfspr(SPRN_PMC2);
+ break;
+ case 3:
+ val = mfspr(SPRN_PMC3);
+ break;
+ case 4:
+ val = mfspr(SPRN_PMC4);
+ break;
+ case 5:
+ val = mfspr(SPRN_PMC5);
+ break;
+ case 6:
+ val = mfspr(SPRN_PMC6);
+ break;
+ case 7:
+ val = mfspr(SPRN_PMC7);
+ break;
+ case 8:
+ val = mfspr(SPRN_PMC8);
+ break;
+ default:
+ printk(KERN_ERR "oops trying to read PMC%d\n", idx);
+ val = 0;
+ }
+ return val;
+}
+
+/*
+ * Write one PMC.
+ */
+static void write_pmc(int idx, unsigned long val)
+{
+ switch (idx) {
+ case 1:
+ mtspr(SPRN_PMC1, val);
+ break;
+ case 2:
+ mtspr(SPRN_PMC2, val);
+ break;
+ case 3:
+ mtspr(SPRN_PMC3, val);
+ break;
+ case 4:
+ mtspr(SPRN_PMC4, val);
+ break;
+ case 5:
+ mtspr(SPRN_PMC5, val);
+ break;
+ case 6:
+ mtspr(SPRN_PMC6, val);
+ break;
+ case 7:
+ mtspr(SPRN_PMC7, val);
+ break;
+ case 8:
+ mtspr(SPRN_PMC8, val);
+ break;
+ default:
+ printk(KERN_ERR "oops trying to write PMC%d\n", idx);
+ }
+}
+
+/*
+ * Check if a set of events can all go on the PMU at once.
+ * If they can't, this will look at alternative codes for the events
+ * and see if any combination of alternative codes is feasible.
+ * The feasible set is returned in event[].
+ */
+static int power_check_constraints(u64 event[], unsigned int cflags[],
+ int n_ev)
+{
+ u64 mask, value, nv;
+ u64 alternatives[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+ u64 amasks[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+ u64 avalues[MAX_HWCOUNTERS][MAX_EVENT_ALTERNATIVES];
+ u64 smasks[MAX_HWCOUNTERS], svalues[MAX_HWCOUNTERS];
+ int n_alt[MAX_HWCOUNTERS], choice[MAX_HWCOUNTERS];
+ int i, j;
+ u64 addf = ppmu->add_fields;
+ u64 tadd = ppmu->test_adder;
+
+ if (n_ev > ppmu->n_counter)
+ return -1;
+
+ /* First see if the events will go on as-is */
+ for (i = 0; i < n_ev; ++i) {
+ if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
+ && !ppmu->limited_pmc_event(event[i])) {
+ ppmu->get_alternatives(event[i], cflags[i],
+ alternatives[i]);
+ event[i] = alternatives[i][0];
+ }
+ if (ppmu->get_constraint(event[i], &amasks[i][0],
+ &avalues[i][0]))
+ return -1;
+ }
+ value = mask = 0;
+ for (i = 0; i < n_ev; ++i) {
+ nv = (value | avalues[i][0]) + (value & avalues[i][0] & addf);
+ if ((((nv + tadd) ^ value) & mask) != 0 ||
+ (((nv + tadd) ^ avalues[i][0]) & amasks[i][0]) != 0)
+ break;
+ value = nv;
+ mask |= amasks[i][0];
+ }
+ if (i == n_ev)
+ return 0; /* all OK */
+
+ /* doesn't work, gather alternatives... */
+ if (!ppmu->get_alternatives)
+ return -1;
+ for (i = 0; i < n_ev; ++i) {
+ choice[i] = 0;
+ n_alt[i] = ppmu->get_alternatives(event[i], cflags[i],
+ alternatives[i]);
+ for (j = 1; j < n_alt[i]; ++j)
+ ppmu->get_constraint(alternatives[i][j],
+ &amasks[i][j], &avalues[i][j]);
+ }
+
+ /* enumerate all possibilities and see if any will work */
+ i = 0;
+ j = -1;
+ value = mask = nv = 0;
+ while (i < n_ev) {
+ if (j >= 0) {
+ /* we're backtracking, restore context */
+ value = svalues[i];
+ mask = smasks[i];
+ j = choice[i];
+ }
+ /*
+ * See if any alternative k for event i,
+ * where k > j, will satisfy the constraints.
+ */
+ while (++j < n_alt[i]) {
+ nv = (value | avalues[i][j]) +
+ (value & avalues[i][j] & addf);
+ if ((((nv + tadd) ^ value) & mask) == 0 &&
+ (((nv + tadd) ^ avalues[i][j])
+ & amasks[i][j]) == 0)
+ break;
+ }
+ if (j >= n_alt[i]) {
+ /*
+ * No feasible alternative, backtrack
+ * to event i-1 and continue enumerating its
+ * alternatives from where we got up to.
+ */
+ if (--i < 0)
+ return -1;
+ } else {
+ /*
+ * Found a feasible alternative for event i,
+ * remember where we got up to with this event,
+ * go on to the next event, and start with
+ * the first alternative for it.
+ */
+ choice[i] = j;
+ svalues[i] = value;
+ smasks[i] = mask;
+ value = nv;
+ mask |= amasks[i][j];
+ ++i;
+ j = -1;
+ }
+ }
+
+ /* OK, we have a feasible combination, tell the caller the solution */
+ for (i = 0; i < n_ev; ++i)
+ event[i] = alternatives[i][choice[i]];
+ return 0;
+}
+
+/*
+ * Check if newly-added counters have consistent settings for
+ * exclude_{user,kernel,hv} with each other and any previously
+ * added counters.
+ */
+static int check_excludes(struct perf_counter **ctrs, unsigned int cflags[],
+ int n_prev, int n_new)
+{
+ int eu = 0, ek = 0, eh = 0;
+ int i, n, first;
+ struct perf_counter *counter;
+
+ n = n_prev + n_new;
+ if (n <= 1)
+ return 0;
+
+ first = 1;
+ for (i = 0; i < n; ++i) {
+ if (cflags[i] & PPMU_LIMITED_PMC_OK) {
+ cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
+ continue;
+ }
+ counter = ctrs[i];
+ if (first) {
+ eu = counter->attr.exclude_user;
+ ek = counter->attr.exclude_kernel;
+ eh = counter->attr.exclude_hv;
+ first = 0;
+ } else if (counter->attr.exclude_user != eu ||
+ counter->attr.exclude_kernel != ek ||
+ counter->attr.exclude_hv != eh) {
+ return -EAGAIN;
+ }
+ }
+
+ if (eu || ek || eh)
+ for (i = 0; i < n; ++i)
+ if (cflags[i] & PPMU_LIMITED_PMC_OK)
+ cflags[i] |= PPMU_LIMITED_PMC_REQD;
+
+ return 0;
+}
+
+static void power_pmu_read(struct perf_counter *counter)
+{
+ long val, delta, prev;
+
+ if (!counter->hw.idx)
+ return;
+ /*
+ * Performance monitor interrupts come even when interrupts
+ * are soft-disabled, as long as interrupts are hard-enabled.
+ * Therefore we treat them like NMIs.
+ */
+ do {
+ prev = atomic64_read(&counter->hw.prev_count);
+ barrier();
+ val = read_pmc(counter->hw.idx);
+ } while (atomic64_cmpxchg(&counter->hw.prev_count, prev, val) != prev);
+
+ /* The counters are only 32 bits wide */
+ delta = (val - prev) & 0xfffffffful;
+ atomic64_add(delta, &counter->count);
+ atomic64_sub(delta, &counter->hw.period_left);
+}
+
+/*
+ * On some machines, PMC5 and PMC6 can't be written, don't respect
+ * the freeze conditions, and don't generate interrupts. This tells
+ * us if `counter' is using such a PMC.
+ */
+static int is_limited_pmc(int pmcnum)
+{
+ return (ppmu->flags & PPMU_LIMITED_PMC5_6)
+ && (pmcnum == 5 || pmcnum == 6);
+}
+
+static void freeze_limited_counters(struct cpu_hw_counters *cpuhw,
+ unsigned long pmc5, unsigned long pmc6)
+{
+ struct perf_counter *counter;
+ u64 val, prev, delta;
+ int i;
+
+ for (i = 0; i < cpuhw->n_limited; ++i) {
+ counter = cpuhw->limited_counter[i];
+ if (!counter->hw.idx)
+ continue;
+ val = (counter->hw.idx == 5) ? pmc5 : pmc6;
+ prev = atomic64_read(&counter->hw.prev_count);
+ counter->hw.idx = 0;
+ delta = (val - prev) & 0xfffffffful;
+ atomic64_add(delta, &counter->count);
+ }
+}
+
+static void thaw_limited_counters(struct cpu_hw_counters *cpuhw,
+ unsigned long pmc5, unsigned long pmc6)
+{
+ struct perf_counter *counter;
+ u64 val;
+ int i;
+
+ for (i = 0; i < cpuhw->n_limited; ++i) {
+ counter = cpuhw->limited_counter[i];
+ counter->hw.idx = cpuhw->limited_hwidx[i];
+ val = (counter->hw.idx == 5) ? pmc5 : pmc6;
+ atomic64_set(&counter->hw.prev_count, val);
+ perf_counter_update_userpage(counter);
+ }
+}
+
+/*
+ * Since limited counters don't respect the freeze conditions, we
+ * have to read them immediately after freezing or unfreezing the
+ * other counters. We try to keep the values from the limited
+ * counters as consistent as possible by keeping the delay (in
+ * cycles and instructions) between freezing/unfreezing and reading
+ * the limited counters as small and consistent as possible.
+ * Therefore, if any limited counters are in use, we read them
+ * both, and always in the same order, to minimize variability,
+ * and do it inside the same asm that writes MMCR0.
+ */
+static void write_mmcr0(struct cpu_hw_counters *cpuhw, unsigned long mmcr0)
+{
+ unsigned long pmc5, pmc6;
+
+ if (!cpuhw->n_limited) {
+ mtspr(SPRN_MMCR0, mmcr0);
+ return;
+ }
+
+ /*
+ * Write MMCR0, then read PMC5 and PMC6 immediately.
+ * To ensure we don't get a performance monitor interrupt
+ * between writing MMCR0 and freezing/thawing the limited
+ * counters, we first write MMCR0 with the counter overflow
+ * interrupt enable bits turned off.
+ */
+ asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
+ : "=&r" (pmc5), "=&r" (pmc6)
+ : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
+ "i" (SPRN_MMCR0),
+ "i" (SPRN_PMC5), "i" (SPRN_PMC6));
+
+ if (mmcr0 & MMCR0_FC)
+ freeze_limited_counters(cpuhw, pmc5, pmc6);
+ else
+ thaw_limited_counters(cpuhw, pmc5, pmc6);
+
+ /*
+ * Write the full MMCR0 including the counter overflow interrupt
+ * enable bits, if necessary.
+ */
+ if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
+ mtspr(SPRN_MMCR0, mmcr0);
+}
+
+/*
+ * Disable all counters to prevent PMU interrupts and to allow
+ * counters to be added or removed.
+ */
+void hw_perf_disable(void)
+{
+ struct cpu_hw_counters *cpuhw;
+ unsigned long ret;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ cpuhw = &__get_cpu_var(cpu_hw_counters);
+
+ ret = cpuhw->disabled;
+ if (!ret) {
+ cpuhw->disabled = 1;
+ cpuhw->n_added = 0;
+
+ /*
+ * Check if we ever enabled the PMU on this cpu.
+ */
+ if (!cpuhw->pmcs_enabled) {
+ if (ppc_md.enable_pmcs)
+ ppc_md.enable_pmcs();
+ cpuhw->pmcs_enabled = 1;
+ }
+
+ /*
+ * Disable instruction sampling if it was enabled
+ */
+ if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
+ mtspr(SPRN_MMCRA,
+ cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
+ mb();
+ }
+
+ /*
+ * Set the 'freeze counters' bit.
+ * The barrier is to make sure the mtspr has been
+ * executed and the PMU has frozen the counters
+ * before we return.
+ */
+ write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
+ mb();
+ }
+ local_irq_restore(flags);
+}
+
+/*
+ * Re-enable all counters if disable == 0.
+ * If we were previously disabled and counters were added, then
+ * put the new config on the PMU.
+ */
+void hw_perf_enable(void)
+{
+ struct perf_counter *counter;
+ struct cpu_hw_counters *cpuhw;
+ unsigned long flags;
+ long i;
+ unsigned long val;
+ s64 left;
+ unsigned int hwc_index[MAX_HWCOUNTERS];
+ int n_lim;
+ int idx;
+
+ local_irq_save(flags);
+ cpuhw = &__get_cpu_var(cpu_hw_counters);
+ if (!cpuhw->disabled) {
+ local_irq_restore(flags);
+ return;
+ }
+ cpuhw->disabled = 0;
+
+ /*
+ * If we didn't change anything, or only removed counters,
+ * no need to recalculate MMCR* settings and reset the PMCs.
+ * Just reenable the PMU with the current MMCR* settings
+ * (possibly updated for removal of counters).
+ */
+ if (!cpuhw->n_added) {
+ mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
+ mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
+ if (cpuhw->n_counters == 0)
+ get_lppaca()->pmcregs_in_use = 0;
+ goto out_enable;
+ }
+
+ /*
+ * Compute MMCR* values for the new set of counters
+ */
+ if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_counters, hwc_index,
+ cpuhw->mmcr)) {
+ /* shouldn't ever get here */
+ printk(KERN_ERR "oops compute_mmcr failed\n");
+ goto out;
+ }
+
+ /*
+ * Add in MMCR0 freeze bits corresponding to the
+ * attr.exclude_* bits for the first counter.
+ * We have already checked that all counters have the
+ * same values for these bits as the first counter.
+ */
+ counter = cpuhw->counter[0];
+ if (counter->attr.exclude_user)
+ cpuhw->mmcr[0] |= MMCR0_FCP;
+ if (counter->attr.exclude_kernel)
+ cpuhw->mmcr[0] |= freeze_counters_kernel;
+ if (counter->attr.exclude_hv)
+ cpuhw->mmcr[0] |= MMCR0_FCHV;
+
+ /*
+ * Write the new configuration to MMCR* with the freeze
+ * bit set and set the hardware counters to their initial values.
+ * Then unfreeze the counters.
+ */
+ get_lppaca()->pmcregs_in_use = 1;
+ mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
+ mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
+ mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
+ | MMCR0_FC);
+
+ /*
+ * Read off any pre-existing counters that need to move
+ * to another PMC.
+ */
+ for (i = 0; i < cpuhw->n_counters; ++i) {
+ counter = cpuhw->counter[i];
+ if (counter->hw.idx && counter->hw.idx != hwc_index[i] + 1) {
+ power_pmu_read(counter);
+ write_pmc(counter->hw.idx, 0);
+ counter->hw.idx = 0;
+ }
+ }
+
+ /*
+ * Initialize the PMCs for all the new and moved counters.
+ */
+ cpuhw->n_limited = n_lim = 0;
+ for (i = 0; i < cpuhw->n_counters; ++i) {
+ counter = cpuhw->counter[i];
+ if (counter->hw.idx)
+ continue;
+ idx = hwc_index[i] + 1;
+ if (is_limited_pmc(idx)) {
+ cpuhw->limited_counter[n_lim] = counter;
+ cpuhw->limited_hwidx[n_lim] = idx;
+ ++n_lim;
+ continue;
+ }
+ val = 0;
+ if (counter->hw.sample_period) {
+ left = atomic64_read(&counter->hw.period_left);
+ if (left < 0x80000000L)
+ val = 0x80000000L - left;
+ }
+ atomic64_set(&counter->hw.prev_count, val);
+ counter->hw.idx = idx;
+ write_pmc(idx, val);
+ perf_counter_update_userpage(counter);
+ }
+ cpuhw->n_limited = n_lim;
+ cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
+
+ out_enable:
+ mb();
+ write_mmcr0(cpuhw, cpuhw->mmcr[0]);
+
+ /*
+ * Enable instruction sampling if necessary
+ */
+ if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
+ mb();
+ mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
+ }
+
+ out:
+ local_irq_restore(flags);
+}
+
+static int collect_events(struct perf_counter *group, int max_count,
+ struct perf_counter *ctrs[], u64 *events,
+ unsigned int *flags)
+{
+ int n = 0;
+ struct perf_counter *counter;
+
+ if (!is_software_counter(group)) {
+ if (n >= max_count)
+ return -1;
+ ctrs[n] = group;
+ flags[n] = group->hw.counter_base;
+ events[n++] = group->hw.config;
+ }
+ list_for_each_entry(counter, &group->sibling_list, list_entry) {
+ if (!is_software_counter(counter) &&
+ counter->state != PERF_COUNTER_STATE_OFF) {
+ if (n >= max_count)
+ return -1;
+ ctrs[n] = counter;
+ flags[n] = counter->hw.counter_base;
+ events[n++] = counter->hw.config;
+ }
+ }
+ return n;
+}
+
+static void counter_sched_in(struct perf_counter *counter, int cpu)
+{
+ counter->state = PERF_COUNTER_STATE_ACTIVE;
+ counter->oncpu = cpu;
+ counter->tstamp_running += counter->ctx->time - counter->tstamp_stopped;
+ if (is_software_counter(counter))
+ counter->pmu->enable(counter);
+}
+
+/*
+ * Called to enable a whole group of counters.
+ * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
+ * Assumes the caller has disabled interrupts and has
+ * frozen the PMU with hw_perf_save_disable.
+ */
+int hw_perf_group_sched_in(struct perf_counter *group_leader,
+ struct perf_cpu_context *cpuctx,
+ struct perf_counter_context *ctx, int cpu)
+{
+ struct cpu_hw_counters *cpuhw;
+ long i, n, n0;
+ struct perf_counter *sub;
+
+ cpuhw = &__get_cpu_var(cpu_hw_counters);
+ n0 = cpuhw->n_counters;
+ n = collect_events(group_leader, ppmu->n_counter - n0,
+ &cpuhw->counter[n0], &cpuhw->events[n0],
+ &cpuhw->flags[n0]);
+ if (n < 0)
+ return -EAGAIN;
+ if (check_excludes(cpuhw->counter, cpuhw->flags, n0, n))
+ return -EAGAIN;
+ i = power_check_constraints(cpuhw->events, cpuhw->flags, n + n0);
+ if (i < 0)
+ return -EAGAIN;
+ cpuhw->n_counters = n0 + n;
+ cpuhw->n_added += n;
+
+ /*
+ * OK, this group can go on; update counter states etc.,
+ * and enable any software counters
+ */
+ for (i = n0; i < n0 + n; ++i)
+ cpuhw->counter[i]->hw.config = cpuhw->events[i];
+ cpuctx->active_oncpu += n;
+ n = 1;
+ counter_sched_in(group_leader, cpu);
+ list_for_each_entry(sub, &group_leader->sibling_list, list_entry) {
+ if (sub->state != PERF_COUNTER_STATE_OFF) {
+ counter_sched_in(sub, cpu);
+ ++n;
+ }
+ }
+ ctx->nr_active += n;
+
+ return 1;
+}
+
+/*
+ * Add a counter to the PMU.
+ * If all counters are not already frozen, then we disable and
+ * re-enable the PMU in order to get hw_perf_enable to do the
+ * actual work of reconfiguring the PMU.
+ */
+static int power_pmu_enable(struct perf_counter *counter)
+{
+ struct cpu_hw_counters *cpuhw;
+ unsigned long flags;
+ int n0;
+ int ret = -EAGAIN;
+
+ local_irq_save(flags);
+ perf_disable();
+
+ /*
+ * Add the counter to the list (if there is room)
+ * and check whether the total set is still feasible.
+ */
+ cpuhw = &__get_cpu_var(cpu_hw_counters);
+ n0 = cpuhw->n_counters;
+ if (n0 >= ppmu->n_counter)
+ goto out;
+ cpuhw->counter[n0] = counter;
+ cpuhw->events[n0] = counter->hw.config;
+ cpuhw->flags[n0] = counter->hw.counter_base;
+ if (check_excludes(cpuhw->counter, cpuhw->flags, n0, 1))
+ goto out;
+ if (power_check_constraints(cpuhw->events, cpuhw->flags, n0 + 1))
+ goto out;
+
+ counter->hw.config = cpuhw->events[n0];
+ ++cpuhw->n_counters;
+ ++cpuhw->n_added;
+
+ ret = 0;
+ out:
+ perf_enable();
+ local_irq_restore(flags);
+ return ret;
+}
+
+/*
+ * Remove a counter from the PMU.
+ */
+static void power_pmu_disable(struct perf_counter *counter)
+{
+ struct cpu_hw_counters *cpuhw;
+ long i;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ perf_disable();
+
+ power_pmu_read(counter);
+
+ cpuhw = &__get_cpu_var(cpu_hw_counters);
+ for (i = 0; i < cpuhw->n_counters; ++i) {
+ if (counter == cpuhw->counter[i]) {
+ while (++i < cpuhw->n_counters)
+ cpuhw->counter[i-1] = cpuhw->counter[i];
+ --cpuhw->n_counters;
+ ppmu->disable_pmc(counter->hw.idx - 1, cpuhw->mmcr);
+ if (counter->hw.idx) {
+ write_pmc(counter->hw.idx, 0);
+ counter->hw.idx = 0;
+ }
+ perf_counter_update_userpage(counter);
+ break;
+ }
+ }
+ for (i = 0; i < cpuhw->n_limited; ++i)
+ if (counter == cpuhw->limited_counter[i])
+ break;
+ if (i < cpuhw->n_limited) {
+ while (++i < cpuhw->n_limited) {
+ cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
+ cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
+ }
+ --cpuhw->n_limited;
+ }
+ if (cpuhw->n_counters == 0) {
+ /* disable exceptions if no counters are running */
+ cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
+ }
+
+ perf_enable();
+ local_irq_restore(flags);
+}
+
+/*
+ * Re-enable interrupts on a counter after they were throttled
+ * because they were coming too fast.
+ */
+static void power_pmu_unthrottle(struct perf_counter *counter)
+{
+ s64 val, left;
+ unsigned long flags;
+
+ if (!counter->hw.idx || !counter->hw.sample_period)
+ return;
+ local_irq_save(flags);
+ perf_disable();
+ power_pmu_read(counter);
+ left = counter->hw.sample_period;
+ counter->hw.last_period = left;
+ val = 0;
+ if (left < 0x80000000L)
+ val = 0x80000000L - left;
+ write_pmc(counter->hw.idx, val);
+ atomic64_set(&counter->hw.prev_count, val);
+ atomic64_set(&counter->hw.period_left, left);
+ perf_counter_update_userpage(counter);
+ perf_enable();
+ local_irq_restore(flags);
+}
+
+struct pmu power_pmu = {
+ .enable = power_pmu_enable,
+ .disable = power_pmu_disable,
+ .read = power_pmu_read,
+ .unthrottle = power_pmu_unthrottle,
+};
+
+/*
+ * Return 1 if we might be able to put counter on a limited PMC,
+ * or 0 if not.
+ * A counter can only go on a limited PMC if it counts something
+ * that a limited PMC can count, doesn't require interrupts, and
+ * doesn't exclude any processor mode.
+ */
+static int can_go_on_limited_pmc(struct perf_counter *counter, u64 ev,
+ unsigned int flags)
+{
+ int n;
+ u64 alt[MAX_EVENT_ALTERNATIVES];
+
+ if (counter->attr.exclude_user
+ || counter->attr.exclude_kernel
+ || counter->attr.exclude_hv
+ || counter->attr.sample_period)
+ return 0;
+
+ if (ppmu->limited_pmc_event(ev))
+ return 1;
+
+ /*
+ * The requested event isn't on a limited PMC already;
+ * see if any alternative code goes on a limited PMC.
+ */
+ if (!ppmu->get_alternatives)
+ return 0;
+
+ flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
+ n = ppmu->get_alternatives(ev, flags, alt);
+
+ return n > 0;
+}
+
+/*
+ * Find an alternative event that goes on a normal PMC, if possible,
+ * and return the event code, or 0 if there is no such alternative.
+ * (Note: event code 0 is "don't count" on all machines.)
+ */
+static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
+{
+ u64 alt[MAX_EVENT_ALTERNATIVES];
+ int n;
+
+ flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
+ n = ppmu->get_alternatives(ev, flags, alt);
+ if (!n)
+ return 0;
+ return alt[0];
+}
+
+/* Number of perf_counters counting hardware events */
+static atomic_t num_counters;
+/* Used to avoid races in calling reserve/release_pmc_hardware */
+static DEFINE_MUTEX(pmc_reserve_mutex);
+
+/*
+ * Release the PMU if this is the last perf_counter.
+ */
+static void hw_perf_counter_destroy(struct perf_counter *counter)
+{
+ if (!atomic_add_unless(&num_counters, -1, 1)) {
+ mutex_lock(&pmc_reserve_mutex);
+ if (atomic_dec_return(&num_counters) == 0)
+ release_pmc_hardware();
+ mutex_unlock(&pmc_reserve_mutex);
+ }
+}
+
+/*
+ * Translate a generic cache event config to a raw event code.
+ */
+static int hw_perf_cache_event(u64 config, u64 *eventp)
+{
+ unsigned long type, op, result;
+ int ev;
+
+ if (!ppmu->cache_events)
+ return -EINVAL;
+
+ /* unpack config */
+ type = config & 0xff;
+ op = (config >> 8) & 0xff;
+ result = (config >> 16) & 0xff;
+
+ if (type >= PERF_COUNT_HW_CACHE_MAX ||
+ op >= PERF_COUNT_HW_CACHE_OP_MAX ||
+ result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return -EINVAL;
+
+ ev = (*ppmu->cache_events)[type][op][result];
+ if (ev == 0)
+ return -EOPNOTSUPP;
+ if (ev == -1)
+ return -EINVAL;
+ *eventp = ev;
+ return 0;
+}
+
+const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
+{
+ u64 ev;
+ unsigned long flags;
+ struct perf_counter *ctrs[MAX_HWCOUNTERS];
+ u64 events[MAX_HWCOUNTERS];
+ unsigned int cflags[MAX_HWCOUNTERS];
+ int n;
+ int err;
+
+ if (!ppmu)
+ return ERR_PTR(-ENXIO);
+ switch (counter->attr.type) {
+ case PERF_TYPE_HARDWARE:
+ ev = counter->attr.config;
+ if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
+ return ERR_PTR(-EOPNOTSUPP);
+ ev = ppmu->generic_events[ev];
+ break;
+ case PERF_TYPE_HW_CACHE:
+ err = hw_perf_cache_event(counter->attr.config, &ev);
+ if (err)
+ return ERR_PTR(err);
+ break;
+ case PERF_TYPE_RAW:
+ ev = counter->attr.config;
+ break;
+ }
+ counter->hw.config_base = ev;
+ counter->hw.idx = 0;
+
+ /*
+ * If we are not running on a hypervisor, force the
+ * exclude_hv bit to 0 so that we don't care what
+ * the user set it to.
+ */
+ if (!firmware_has_feature(FW_FEATURE_LPAR))
+ counter->attr.exclude_hv = 0;
+
+ /*
+ * If this is a per-task counter, then we can use
+ * PM_RUN_* events interchangeably with their non RUN_*
+ * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
+ * XXX we should check if the task is an idle task.
+ */
+ flags = 0;
+ if (counter->ctx->task)
+ flags |= PPMU_ONLY_COUNT_RUN;
+
+ /*
+ * If this machine has limited counters, check whether this
+ * event could go on a limited counter.
+ */
+ if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
+ if (can_go_on_limited_pmc(counter, ev, flags)) {
+ flags |= PPMU_LIMITED_PMC_OK;
+ } else if (ppmu->limited_pmc_event(ev)) {
+ /*
+ * The requested event is on a limited PMC,
+ * but we can't use a limited PMC; see if any
+ * alternative goes on a normal PMC.
+ */
+ ev = normal_pmc_alternative(ev, flags);
+ if (!ev)
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ /*
+ * If this is in a group, check if it can go on with all the
+ * other hardware counters in the group. We assume the counter
+ * hasn't been linked into its leader's sibling list at this point.
+ */
+ n = 0;
+ if (counter->group_leader != counter) {
+ n = collect_events(counter->group_leader, ppmu->n_counter - 1,
+ ctrs, events, cflags);
+ if (n < 0)
+ return ERR_PTR(-EINVAL);
+ }
+ events[n] = ev;
+ ctrs[n] = counter;
+ cflags[n] = flags;
+ if (check_excludes(ctrs, cflags, n, 1))
+ return ERR_PTR(-EINVAL);
+ if (power_check_constraints(events, cflags, n + 1))
+ return ERR_PTR(-EINVAL);
+
+ counter->hw.config = events[n];
+ counter->hw.counter_base = cflags[n];
+ counter->hw.last_period = counter->hw.sample_period;
+ atomic64_set(&counter->hw.period_left, counter->hw.last_period);
+
+ /*
+ * See if we need to reserve the PMU.
+ * If no counters are currently in use, then we have to take a
+ * mutex to ensure that we don't race with another task doing
+ * reserve_pmc_hardware or release_pmc_hardware.
+ */
+ err = 0;
+ if (!atomic_inc_not_zero(&num_counters)) {
+ mutex_lock(&pmc_reserve_mutex);
+ if (atomic_read(&num_counters) == 0 &&
+ reserve_pmc_hardware(perf_counter_interrupt))
+ err = -EBUSY;
+ else
+ atomic_inc(&num_counters);
+ mutex_unlock(&pmc_reserve_mutex);
+ }
+ counter->destroy = hw_perf_counter_destroy;
+
+ if (err)
+ return ERR_PTR(err);
+ return &power_pmu;
+}
+
+/*
+ * A counter has overflowed; update its count and record
+ * things if requested. Note that interrupts are hard-disabled
+ * here so there is no possibility of being interrupted.
+ */
+static void record_and_restart(struct perf_counter *counter, long val,
+ struct pt_regs *regs, int nmi)
+{
+ u64 period = counter->hw.sample_period;
+ s64 prev, delta, left;
+ int record = 0;
+ u64 addr, mmcra, sdsync;
+
+ /* we don't have to worry about interrupts here */
+ prev = atomic64_read(&counter->hw.prev_count);
+ delta = (val - prev) & 0xfffffffful;
+ atomic64_add(delta, &counter->count);
+
+ /*
+ * See if the total period for this counter has expired,
+ * and update for the next period.
+ */
+ val = 0;
+ left = atomic64_read(&counter->hw.period_left) - delta;
+ if (period) {
+ if (left <= 0) {
+ left += period;
+ if (left <= 0)
+ left = period;
+ record = 1;
+ }
+ if (left < 0x80000000L)
+ val = 0x80000000L - left;
+ }
+
+ /*
+ * Finally record data if requested.
+ */
+ if (record) {
+ struct perf_sample_data data = {
+ .regs = regs,
+ .addr = 0,
+ .period = counter->hw.last_period,
+ };
+
+ if (counter->attr.sample_type & PERF_SAMPLE_ADDR) {
+ /*
+ * The user wants a data address recorded.
+ * If we're not doing instruction sampling,
+ * give them the SDAR (sampled data address).
+ * If we are doing instruction sampling, then only
+ * give them the SDAR if it corresponds to the
+ * instruction pointed to by SIAR; this is indicated
+ * by the [POWER6_]MMCRA_SDSYNC bit in MMCRA.
+ */
+ mmcra = regs->dsisr;
+ sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
+ POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
+ if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
+ data.addr = mfspr(SPRN_SDAR);
+ }
+ if (perf_counter_overflow(counter, nmi, &data)) {
+ /*
+ * Interrupts are coming too fast - throttle them
+ * by setting the counter to 0, so it will be
+ * at least 2^30 cycles until the next interrupt
+ * (assuming each counter counts at most 2 counts
+ * per cycle).
+ */
+ val = 0;
+ left = ~0ULL >> 1;
+ }
+ }
+
+ write_pmc(counter->hw.idx, val);
+ atomic64_set(&counter->hw.prev_count, val);
+ atomic64_set(&counter->hw.period_left, left);
+ perf_counter_update_userpage(counter);
+}
+
+/*
+ * Called from generic code to get the misc flags (i.e. processor mode)
+ * for an event.
+ */
+unsigned long perf_misc_flags(struct pt_regs *regs)
+{
+ unsigned long mmcra;
+
+ if (TRAP(regs) != 0xf00) {
+ /* not a PMU interrupt */
+ return user_mode(regs) ? PERF_EVENT_MISC_USER :
+ PERF_EVENT_MISC_KERNEL;
+ }
+
+ mmcra = regs->dsisr;
+ if (ppmu->flags & PPMU_ALT_SIPR) {
+ if (mmcra & POWER6_MMCRA_SIHV)
+ return PERF_EVENT_MISC_HYPERVISOR;
+ return (mmcra & POWER6_MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
+ PERF_EVENT_MISC_KERNEL;
+ }
+ if (mmcra & MMCRA_SIHV)
+ return PERF_EVENT_MISC_HYPERVISOR;
+ return (mmcra & MMCRA_SIPR) ? PERF_EVENT_MISC_USER :
+ PERF_EVENT_MISC_KERNEL;
+}
+
+/*
+ * Called from generic code to get the instruction pointer
+ * for an event.
+ */
+unsigned long perf_instruction_pointer(struct pt_regs *regs)
+{
+ unsigned long mmcra;
+ unsigned long ip;
+ unsigned long slot;
+
+ if (TRAP(regs) != 0xf00)
+ return regs->nip; /* not a PMU interrupt */
+
+ ip = mfspr(SPRN_SIAR);
+ mmcra = regs->dsisr;
+ if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
+ slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
+ if (slot > 1)
+ ip += 4 * (slot - 1);
+ }
+ return ip;
+}
+
+/*
+ * Performance monitor interrupt stuff
+ */
+static void perf_counter_interrupt(struct pt_regs *regs)
+{
+ int i;
+ struct cpu_hw_counters *cpuhw = &__get_cpu_var(cpu_hw_counters);
+ struct perf_counter *counter;
+ long val;
+ int found = 0;
+ int nmi;
+
+ if (cpuhw->n_limited)
+ freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
+ mfspr(SPRN_PMC6));
+
+ /*
+ * Overload regs->dsisr to store MMCRA so we only need to read it once.
+ */
+ regs->dsisr = mfspr(SPRN_MMCRA);
+
+ /*
+ * If interrupts were soft-disabled when this PMU interrupt
+ * occurred, treat it as an NMI.
+ */
+ nmi = !regs->softe;
+ if (nmi)
+ nmi_enter();
+ else
+ irq_enter();
+
+ for (i = 0; i < cpuhw->n_counters; ++i) {
+ counter = cpuhw->counter[i];
+ if (!counter->hw.idx || is_limited_pmc(counter->hw.idx))
+ continue;
+ val = read_pmc(counter->hw.idx);
+ if ((int)val < 0) {
+ /* counter has overflowed */
+ found = 1;
+ record_and_restart(counter, val, regs, nmi);
+ }
+ }
+
+ /*
+ * In case we didn't find and reset the counter that caused
+ * the interrupt, scan all counters and reset any that are
+ * negative, to avoid getting continual interrupts.
+ * Any that we processed in the previous loop will not be negative.
+ */
+ if (!found) {
+ for (i = 0; i < ppmu->n_counter; ++i) {
+ if (is_limited_pmc(i + 1))
+ continue;
+ val = read_pmc(i + 1);
+ if ((int)val < 0)
+ write_pmc(i + 1, 0);
+ }
+ }
+
+ /*
+ * Reset MMCR0 to its normal value. This will set PMXE and
+ * clear FC (freeze counters) and PMAO (perf mon alert occurred)
+ * and thus allow interrupts to occur again.
+ * XXX might want to use MSR.PM to keep the counters frozen until
+ * we get back out of this interrupt.
+ */
+ write_mmcr0(cpuhw, cpuhw->mmcr[0]);
+
+ if (nmi)
+ nmi_exit();
+ else
+ irq_exit();
+}
+
+void hw_perf_counter_setup(int cpu)
+{
+ struct cpu_hw_counters *cpuhw = &per_cpu(cpu_hw_counters, cpu);
+
+ memset(cpuhw, 0, sizeof(*cpuhw));
+ cpuhw->mmcr[0] = MMCR0_FC;
+}
+
+extern struct power_pmu power4_pmu;
+extern struct power_pmu ppc970_pmu;
+extern struct power_pmu power5_pmu;
+extern struct power_pmu power5p_pmu;
+extern struct power_pmu power6_pmu;
+extern struct power_pmu power7_pmu;
+
+static int init_perf_counters(void)
+{
+ unsigned long pvr;
+
+ /* XXX should get this from cputable */
+ pvr = mfspr(SPRN_PVR);
+ switch (PVR_VER(pvr)) {
+ case PV_POWER4:
+ case PV_POWER4p:
+ ppmu = &power4_pmu;
+ break;
+ case PV_970:
+ case PV_970FX:
+ case PV_970MP:
+ ppmu = &ppc970_pmu;
+ break;
+ case PV_POWER5:
+ ppmu = &power5_pmu;
+ break;
+ case PV_POWER5p:
+ ppmu = &power5p_pmu;
+ break;
+ case 0x3e:
+ ppmu = &power6_pmu;
+ break;
+ case 0x3f:
+ ppmu = &power7_pmu;
+ break;
+ }
+
+ /*
+ * Use FCHV to ignore kernel events if MSR.HV is set.
+ */
+ if (mfmsr() & MSR_HV)
+ freeze_counters_kernel = MMCR0_FCHV;
+
+ return 0;
+}
+
+arch_initcall(init_perf_counters);
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c
new file mode 100644
index 000000000000..07bd308a5fa7
--- /dev/null
+++ b/arch/powerpc/kernel/power4-pmu.c
@@ -0,0 +1,598 @@
+/*
+ * Performance counter support for POWER4 (GP) and POWER4+ (GQ) processors.
+ *
+ * Copyright 2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/perf_counter.h>
+#include <asm/reg.h>
+
+/*
+ * Bits in event code for POWER4
+ */
+#define PM_PMC_SH 12 /* PMC number (1-based) for direct events */
+#define PM_PMC_MSK 0xf
+#define PM_UNIT_SH 8 /* TTMMUX number and setting - unit select */
+#define PM_UNIT_MSK 0xf
+#define PM_LOWER_SH 6
+#define PM_LOWER_MSK 1
+#define PM_LOWER_MSKS 0x40
+#define PM_BYTE_SH 4 /* Byte number of event bus to use */
+#define PM_BYTE_MSK 3
+#define PM_PMCSEL_MSK 7
+
+/*
+ * Unit code values
+ */
+#define PM_FPU 1
+#define PM_ISU1 2
+#define PM_IFU 3
+#define PM_IDU0 4
+#define PM_ISU1_ALT 6
+#define PM_ISU2 7
+#define PM_IFU_ALT 8
+#define PM_LSU0 9
+#define PM_LSU1 0xc
+#define PM_GPS 0xf
+
+/*
+ * Bits in MMCR0 for POWER4
+ */
+#define MMCR0_PMC1SEL_SH 8
+#define MMCR0_PMC2SEL_SH 1
+#define MMCR_PMCSEL_MSK 0x1f
+
+/*
+ * Bits in MMCR1 for POWER4
+ */
+#define MMCR1_TTM0SEL_SH 62
+#define MMCR1_TTC0SEL_SH 61
+#define MMCR1_TTM1SEL_SH 59
+#define MMCR1_TTC1SEL_SH 58
+#define MMCR1_TTM2SEL_SH 56
+#define MMCR1_TTC2SEL_SH 55
+#define MMCR1_TTM3SEL_SH 53
+#define MMCR1_TTC3SEL_SH 52
+#define MMCR1_TTMSEL_MSK 3
+#define MMCR1_TD_CP_DBG0SEL_SH 50
+#define MMCR1_TD_CP_DBG1SEL_SH 48
+#define MMCR1_TD_CP_DBG2SEL_SH 46
+#define MMCR1_TD_CP_DBG3SEL_SH 44
+#define MMCR1_DEBUG0SEL_SH 43
+#define MMCR1_DEBUG1SEL_SH 42
+#define MMCR1_DEBUG2SEL_SH 41
+#define MMCR1_DEBUG3SEL_SH 40
+#define MMCR1_PMC1_ADDER_SEL_SH 39
+#define MMCR1_PMC2_ADDER_SEL_SH 38
+#define MMCR1_PMC6_ADDER_SEL_SH 37
+#define MMCR1_PMC5_ADDER_SEL_SH 36
+#define MMCR1_PMC8_ADDER_SEL_SH 35
+#define MMCR1_PMC7_ADDER_SEL_SH 34
+#define MMCR1_PMC3_ADDER_SEL_SH 33
+#define MMCR1_PMC4_ADDER_SEL_SH 32
+#define MMCR1_PMC3SEL_SH 27
+#define MMCR1_PMC4SEL_SH 22
+#define MMCR1_PMC5SEL_SH 17
+#define MMCR1_PMC6SEL_SH 12
+#define MMCR1_PMC7SEL_SH 7
+#define MMCR1_PMC8SEL_SH 2 /* note bit 0 is in MMCRA for GP */
+
+static short mmcr1_adder_bits[8] = {
+ MMCR1_PMC1_ADDER_SEL_SH,
+ MMCR1_PMC2_ADDER_SEL_SH,
+ MMCR1_PMC3_ADDER_SEL_SH,
+ MMCR1_PMC4_ADDER_SEL_SH,
+ MMCR1_PMC5_ADDER_SEL_SH,
+ MMCR1_PMC6_ADDER_SEL_SH,
+ MMCR1_PMC7_ADDER_SEL_SH,
+ MMCR1_PMC8_ADDER_SEL_SH
+};
+
+/*
+ * Bits in MMCRA
+ */
+#define MMCRA_PMC8SEL0_SH 17 /* PMC8SEL bit 0 for GP */
+
+/*
+ * Layout of constraint bits:
+ * 6666555555555544444444443333333333222222222211111111110000000000
+ * 3210987654321098765432109876543210987654321098765432109876543210
+ * |[ >[ >[ >|||[ >[ >< >< >< >< ><><><><><><><><>
+ * | UC1 UC2 UC3 ||| PS1 PS2 B0 B1 B2 B3 P1P2P3P4P5P6P7P8
+ * \SMPL ||\TTC3SEL
+ * |\TTC_IFU_SEL
+ * \TTM2SEL0
+ *
+ * SMPL - SAMPLE_ENABLE constraint
+ * 56: SAMPLE_ENABLE value 0x0100_0000_0000_0000
+ *
+ * UC1 - unit constraint 1: can't have all three of FPU/ISU1/IDU0|ISU2
+ * 55: UC1 error 0x0080_0000_0000_0000
+ * 54: FPU events needed 0x0040_0000_0000_0000
+ * 53: ISU1 events needed 0x0020_0000_0000_0000
+ * 52: IDU0|ISU2 events needed 0x0010_0000_0000_0000
+ *
+ * UC2 - unit constraint 2: can't have all three of FPU/IFU/LSU0
+ * 51: UC2 error 0x0008_0000_0000_0000
+ * 50: FPU events needed 0x0004_0000_0000_0000
+ * 49: IFU events needed 0x0002_0000_0000_0000
+ * 48: LSU0 events needed 0x0001_0000_0000_0000
+ *
+ * UC3 - unit constraint 3: can't have all four of LSU0/IFU/IDU0|ISU2/ISU1
+ * 47: UC3 error 0x8000_0000_0000
+ * 46: LSU0 events needed 0x4000_0000_0000
+ * 45: IFU events needed 0x2000_0000_0000
+ * 44: IDU0|ISU2 events needed 0x1000_0000_0000
+ * 43: ISU1 events needed 0x0800_0000_0000
+ *
+ * TTM2SEL0
+ * 42: 0 = IDU0 events needed
+ * 1 = ISU2 events needed 0x0400_0000_0000
+ *
+ * TTC_IFU_SEL
+ * 41: 0 = IFU.U events needed
+ * 1 = IFU.L events needed 0x0200_0000_0000
+ *
+ * TTC3SEL
+ * 40: 0 = LSU1.U events needed
+ * 1 = LSU1.L events needed 0x0100_0000_0000
+ *
+ * PS1
+ * 39: PS1 error 0x0080_0000_0000
+ * 36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000
+ *
+ * PS2
+ * 35: PS2 error 0x0008_0000_0000
+ * 32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000
+ *
+ * B0
+ * 28-31: Byte 0 event source 0xf000_0000
+ * 1 = FPU
+ * 2 = ISU1
+ * 3 = IFU
+ * 4 = IDU0
+ * 7 = ISU2
+ * 9 = LSU0
+ * c = LSU1
+ * f = GPS
+ *
+ * B1, B2, B3
+ * 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources
+ *
+ * P8
+ * 15: P8 error 0x8000
+ * 14-15: Count of events needing PMC8
+ *
+ * P1..P7
+ * 0-13: Count of events needing PMC1..PMC7
+ *
+ * Note: this doesn't allow events using IFU.U to be combined with events
+ * using IFU.L, though that is feasible (using TTM0 and TTM2). However
+ * there are no listed events for IFU.L (they are debug events not
+ * verified for performance monitoring) so this shouldn't cause a
+ * problem.
+ */
+
+static struct unitinfo {
+ u64 value, mask;
+ int unit;
+ int lowerbit;
+} p4_unitinfo[16] = {
+ [PM_FPU] = { 0x44000000000000ull, 0x88000000000000ull, PM_FPU, 0 },
+ [PM_ISU1] = { 0x20080000000000ull, 0x88000000000000ull, PM_ISU1, 0 },
+ [PM_ISU1_ALT] =
+ { 0x20080000000000ull, 0x88000000000000ull, PM_ISU1, 0 },
+ [PM_IFU] = { 0x02200000000000ull, 0x08820000000000ull, PM_IFU, 41 },
+ [PM_IFU_ALT] =
+ { 0x02200000000000ull, 0x08820000000000ull, PM_IFU, 41 },
+ [PM_IDU0] = { 0x10100000000000ull, 0x80840000000000ull, PM_IDU0, 1 },
+ [PM_ISU2] = { 0x10140000000000ull, 0x80840000000000ull, PM_ISU2, 0 },
+ [PM_LSU0] = { 0x01400000000000ull, 0x08800000000000ull, PM_LSU0, 0 },
+ [PM_LSU1] = { 0x00000000000000ull, 0x00010000000000ull, PM_LSU1, 40 },
+ [PM_GPS] = { 0x00000000000000ull, 0x00000000000000ull, PM_GPS, 0 }
+};
+
+static unsigned char direct_marked_event[8] = {
+ (1<<2) | (1<<3), /* PMC1: PM_MRK_GRP_DISP, PM_MRK_ST_CMPL */
+ (1<<3) | (1<<5), /* PMC2: PM_THRESH_TIMEO, PM_MRK_BRU_FIN */
+ (1<<3), /* PMC3: PM_MRK_ST_CMPL_INT */
+ (1<<4) | (1<<5), /* PMC4: PM_MRK_GRP_CMPL, PM_MRK_CRU_FIN */
+ (1<<4) | (1<<5), /* PMC5: PM_MRK_GRP_TIMEO */
+ (1<<3) | (1<<4) | (1<<5),
+ /* PMC6: PM_MRK_ST_GPS, PM_MRK_FXU_FIN, PM_MRK_GRP_ISSUED */
+ (1<<4) | (1<<5), /* PMC7: PM_MRK_FPU_FIN, PM_MRK_INST_FIN */
+ (1<<4), /* PMC8: PM_MRK_LSU_FIN */
+};
+
+/*
+ * Returns 1 if event counts things relating to marked instructions
+ * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
+ */
+static int p4_marked_instr_event(u64 event)
+{
+ int pmc, psel, unit, byte, bit;
+ unsigned int mask;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ psel = event & PM_PMCSEL_MSK;
+ if (pmc) {
+ if (direct_marked_event[pmc - 1] & (1 << psel))
+ return 1;
+ if (psel == 0) /* add events */
+ bit = (pmc <= 4)? pmc - 1: 8 - pmc;
+ else if (psel == 6) /* decode events */
+ bit = 4;
+ else
+ return 0;
+ } else
+ bit = psel;
+
+ byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+ unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+ mask = 0;
+ switch (unit) {
+ case PM_LSU1:
+ if (event & PM_LOWER_MSKS)
+ mask = 1 << 28; /* byte 7 bit 4 */
+ else
+ mask = 6 << 24; /* byte 3 bits 1 and 2 */
+ break;
+ case PM_LSU0:
+ /* byte 3, bit 3; byte 2 bits 0,2,3,4,5; byte 1 */
+ mask = 0x083dff00;
+ }
+ return (mask >> (byte * 8 + bit)) & 1;
+}
+
+static int p4_get_constraint(u64 event, u64 *maskp, u64 *valp)
+{
+ int pmc, byte, unit, lower, sh;
+ u64 mask = 0, value = 0;
+ int grp = -1;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc) {
+ if (pmc > 8)
+ return -1;
+ sh = (pmc - 1) * 2;
+ mask |= 2 << sh;
+ value |= 1 << sh;
+ grp = ((pmc - 1) >> 1) & 1;
+ }
+ unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+ byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+ if (unit) {
+ lower = (event >> PM_LOWER_SH) & PM_LOWER_MSK;
+
+ /*
+ * Bus events on bytes 0 and 2 can be counted
+ * on PMC1/2/5/6; bytes 1 and 3 on PMC3/4/7/8.
+ */
+ if (!pmc)
+ grp = byte & 1;
+
+ if (!p4_unitinfo[unit].unit)
+ return -1;
+ mask |= p4_unitinfo[unit].mask;
+ value |= p4_unitinfo[unit].value;
+ sh = p4_unitinfo[unit].lowerbit;
+ if (sh > 1)
+ value |= (u64)lower << sh;
+ else if (lower != sh)
+ return -1;
+ unit = p4_unitinfo[unit].unit;
+
+ /* Set byte lane select field */
+ mask |= 0xfULL << (28 - 4 * byte);
+ value |= (u64)unit << (28 - 4 * byte);
+ }
+ if (grp == 0) {
+ /* increment PMC1/2/5/6 field */
+ mask |= 0x8000000000ull;
+ value |= 0x1000000000ull;
+ } else {
+ /* increment PMC3/4/7/8 field */
+ mask |= 0x800000000ull;
+ value |= 0x100000000ull;
+ }
+
+ /* Marked instruction events need sample_enable set */
+ if (p4_marked_instr_event(event)) {
+ mask |= 1ull << 56;
+ value |= 1ull << 56;
+ }
+
+ /* PMCSEL=6 decode events on byte 2 need sample_enable clear */
+ if (pmc && (event & PM_PMCSEL_MSK) == 6 && byte == 2)
+ mask |= 1ull << 56;
+
+ *maskp = mask;
+ *valp = value;
+ return 0;
+}
+
+static unsigned int ppc_inst_cmpl[] = {
+ 0x1001, 0x4001, 0x6001, 0x7001, 0x8001
+};
+
+static int p4_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+ int i, j, na;
+
+ alt[0] = event;
+ na = 1;
+
+ /* 2 possibilities for PM_GRP_DISP_REJECT */
+ if (event == 0x8003 || event == 0x0224) {
+ alt[1] = event ^ (0x8003 ^ 0x0224);
+ return 2;
+ }
+
+ /* 2 possibilities for PM_ST_MISS_L1 */
+ if (event == 0x0c13 || event == 0x0c23) {
+ alt[1] = event ^ (0x0c13 ^ 0x0c23);
+ return 2;
+ }
+
+ /* several possibilities for PM_INST_CMPL */
+ for (i = 0; i < ARRAY_SIZE(ppc_inst_cmpl); ++i) {
+ if (event == ppc_inst_cmpl[i]) {
+ for (j = 0; j < ARRAY_SIZE(ppc_inst_cmpl); ++j)
+ if (j != i)
+ alt[na++] = ppc_inst_cmpl[j];
+ break;
+ }
+ }
+
+ return na;
+}
+
+static int p4_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], u64 mmcr[])
+{
+ u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0;
+ unsigned int pmc, unit, byte, psel, lower;
+ unsigned int ttm, grp;
+ unsigned int pmc_inuse = 0;
+ unsigned int pmc_grp_use[2];
+ unsigned char busbyte[4];
+ unsigned char unituse[16];
+ unsigned int unitlower = 0;
+ int i;
+
+ if (n_ev > 8)
+ return -1;
+
+ /* First pass to count resource use */
+ pmc_grp_use[0] = pmc_grp_use[1] = 0;
+ memset(busbyte, 0, sizeof(busbyte));
+ memset(unituse, 0, sizeof(unituse));
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc) {
+ if (pmc_inuse & (1 << (pmc - 1)))
+ return -1;
+ pmc_inuse |= 1 << (pmc - 1);
+ /* count 1/2/5/6 vs 3/4/7/8 use */
+ ++pmc_grp_use[((pmc - 1) >> 1) & 1];
+ }
+ unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
+ byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
+ lower = (event[i] >> PM_LOWER_SH) & PM_LOWER_MSK;
+ if (unit) {
+ if (!pmc)
+ ++pmc_grp_use[byte & 1];
+ if (unit == 6 || unit == 8)
+ /* map alt ISU1/IFU codes: 6->2, 8->3 */
+ unit = (unit >> 1) - 1;
+ if (busbyte[byte] && busbyte[byte] != unit)
+ return -1;
+ busbyte[byte] = unit;
+ lower <<= unit;
+ if (unituse[unit] && lower != (unitlower & lower))
+ return -1;
+ unituse[unit] = 1;
+ unitlower |= lower;
+ }
+ }
+ if (pmc_grp_use[0] > 4 || pmc_grp_use[1] > 4)
+ return -1;
+
+ /*
+ * Assign resources and set multiplexer selects.
+ *
+ * Units 1,2,3 are on TTM0, 4,6,7 on TTM1, 8,10 on TTM2.
+ * Each TTMx can only select one unit, but since
+ * units 2 and 6 are both ISU1, and 3 and 8 are both IFU,
+ * we have some choices.
+ */
+ if (unituse[2] & (unituse[1] | (unituse[3] & unituse[9]))) {
+ unituse[6] = 1; /* Move 2 to 6 */
+ unituse[2] = 0;
+ }
+ if (unituse[3] & (unituse[1] | unituse[2])) {
+ unituse[8] = 1; /* Move 3 to 8 */
+ unituse[3] = 0;
+ unitlower = (unitlower & ~8) | ((unitlower & 8) << 5);
+ }
+ /* Check only one unit per TTMx */
+ if (unituse[1] + unituse[2] + unituse[3] > 1 ||
+ unituse[4] + unituse[6] + unituse[7] > 1 ||
+ unituse[8] + unituse[9] > 1 ||
+ (unituse[5] | unituse[10] | unituse[11] |
+ unituse[13] | unituse[14]))
+ return -1;
+
+ /* Set TTMxSEL fields. Note, units 1-3 => TTM0SEL codes 0-2 */
+ mmcr1 |= (u64)(unituse[3] * 2 + unituse[2]) << MMCR1_TTM0SEL_SH;
+ mmcr1 |= (u64)(unituse[7] * 3 + unituse[6] * 2) << MMCR1_TTM1SEL_SH;
+ mmcr1 |= (u64)unituse[9] << MMCR1_TTM2SEL_SH;
+
+ /* Set TTCxSEL fields. */
+ if (unitlower & 0xe)
+ mmcr1 |= 1ull << MMCR1_TTC0SEL_SH;
+ if (unitlower & 0xf0)
+ mmcr1 |= 1ull << MMCR1_TTC1SEL_SH;
+ if (unitlower & 0xf00)
+ mmcr1 |= 1ull << MMCR1_TTC2SEL_SH;
+ if (unitlower & 0x7000)
+ mmcr1 |= 1ull << MMCR1_TTC3SEL_SH;
+
+ /* Set byte lane select fields. */
+ for (byte = 0; byte < 4; ++byte) {
+ unit = busbyte[byte];
+ if (!unit)
+ continue;
+ if (unit == 0xf) {
+ /* special case for GPS */
+ mmcr1 |= 1ull << (MMCR1_DEBUG0SEL_SH - byte);
+ } else {
+ if (!unituse[unit])
+ ttm = unit - 1; /* 2->1, 3->2 */
+ else
+ ttm = unit >> 2;
+ mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2*byte);
+ }
+ }
+
+ /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+ unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
+ byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
+ psel = event[i] & PM_PMCSEL_MSK;
+ if (!pmc) {
+ /* Bus event or 00xxx direct event (off or cycles) */
+ if (unit)
+ psel |= 0x10 | ((byte & 2) << 2);
+ for (pmc = 0; pmc < 8; ++pmc) {
+ if (pmc_inuse & (1 << pmc))
+ continue;
+ grp = (pmc >> 1) & 1;
+ if (unit) {
+ if (grp == (byte & 1))
+ break;
+ } else if (pmc_grp_use[grp] < 4) {
+ ++pmc_grp_use[grp];
+ break;
+ }
+ }
+ pmc_inuse |= 1 << pmc;
+ } else {
+ /* Direct event */
+ --pmc;
+ if (psel == 0 && (byte & 2))
+ /* add events on higher-numbered bus */
+ mmcr1 |= 1ull << mmcr1_adder_bits[pmc];
+ else if (psel == 6 && byte == 3)
+ /* seem to need to set sample_enable here */
+ mmcra |= MMCRA_SAMPLE_ENABLE;
+ psel |= 8;
+ }
+ if (pmc <= 1)
+ mmcr0 |= psel << (MMCR0_PMC1SEL_SH - 7 * pmc);
+ else
+ mmcr1 |= psel << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2));
+ if (pmc == 7) /* PMC8 */
+ mmcra |= (psel & 1) << MMCRA_PMC8SEL0_SH;
+ hwc[i] = pmc;
+ if (p4_marked_instr_event(event[i]))
+ mmcra |= MMCRA_SAMPLE_ENABLE;
+ }
+
+ if (pmc_inuse & 1)
+ mmcr0 |= MMCR0_PMC1CE;
+ if (pmc_inuse & 0xfe)
+ mmcr0 |= MMCR0_PMCjCE;
+
+ mmcra |= 0x2000; /* mark only one IOP per PPC instruction */
+
+ /* Return MMCRx values */
+ mmcr[0] = mmcr0;
+ mmcr[1] = mmcr1;
+ mmcr[2] = mmcra;
+ return 0;
+}
+
+static void p4_disable_pmc(unsigned int pmc, u64 mmcr[])
+{
+ /*
+ * Setting the PMCxSEL field to 0 disables PMC x.
+ * (Note that pmc is 0-based here, not 1-based.)
+ */
+ if (pmc <= 1) {
+ mmcr[0] &= ~(0x1fUL << (MMCR0_PMC1SEL_SH - 7 * pmc));
+ } else {
+ mmcr[1] &= ~(0x1fUL << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2)));
+ if (pmc == 7)
+ mmcr[2] &= ~(1UL << MMCRA_PMC8SEL0_SH);
+ }
+}
+
+static int p4_generic_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = 7,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x1001,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x8c10, /* PM_LD_REF_L1 */
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x3c10, /* PM_LD_MISS_L1 */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x330, /* PM_BR_ISSUED */
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x331, /* PM_BR_MPRED_CR */
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int power4_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x8c10, 0x3c10 },
+ [C(OP_WRITE)] = { 0x7c10, 0xc13 },
+ [C(OP_PREFETCH)] = { 0xc35, 0 },
+ },
+ [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { 0, 0 },
+ },
+ [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0 },
+ [C(OP_WRITE)] = { 0, 0 },
+ [C(OP_PREFETCH)] = { 0xc34, 0 },
+ },
+ [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x904 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+ [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x900 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+ [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x330, 0x331 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+};
+
+struct power_pmu power4_pmu = {
+ .n_counter = 8,
+ .max_alternatives = 5,
+ .add_fields = 0x0000001100005555ull,
+ .test_adder = 0x0011083300000000ull,
+ .compute_mmcr = p4_compute_mmcr,
+ .get_constraint = p4_get_constraint,
+ .get_alternatives = p4_get_alternatives,
+ .disable_pmc = p4_disable_pmc,
+ .n_generic = ARRAY_SIZE(p4_generic_events),
+ .generic_events = p4_generic_events,
+ .cache_events = &power4_cache_events,
+};
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c
new file mode 100644
index 000000000000..41e5d2d958d4
--- /dev/null
+++ b/arch/powerpc/kernel/power5+-pmu.c
@@ -0,0 +1,671 @@
+/*
+ * Performance counter support for POWER5+/++ (not POWER5) processors.
+ *
+ * Copyright 2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/perf_counter.h>
+#include <asm/reg.h>
+
+/*
+ * Bits in event code for POWER5+ (POWER5 GS) and POWER5++ (POWER5 GS DD3)
+ */
+#define PM_PMC_SH 20 /* PMC number (1-based) for direct events */
+#define PM_PMC_MSK 0xf
+#define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH)
+#define PM_UNIT_SH 16 /* TTMMUX number and setting - unit select */
+#define PM_UNIT_MSK 0xf
+#define PM_BYTE_SH 12 /* Byte number of event bus to use */
+#define PM_BYTE_MSK 7
+#define PM_GRS_SH 8 /* Storage subsystem mux select */
+#define PM_GRS_MSK 7
+#define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */
+#define PM_PMCSEL_MSK 0x7f
+
+/* Values in PM_UNIT field */
+#define PM_FPU 0
+#define PM_ISU0 1
+#define PM_IFU 2
+#define PM_ISU1 3
+#define PM_IDU 4
+#define PM_ISU0_ALT 6
+#define PM_GRS 7
+#define PM_LSU0 8
+#define PM_LSU1 0xc
+#define PM_LASTUNIT 0xc
+
+/*
+ * Bits in MMCR1 for POWER5+
+ */
+#define MMCR1_TTM0SEL_SH 62
+#define MMCR1_TTM1SEL_SH 60
+#define MMCR1_TTM2SEL_SH 58
+#define MMCR1_TTM3SEL_SH 56
+#define MMCR1_TTMSEL_MSK 3
+#define MMCR1_TD_CP_DBG0SEL_SH 54
+#define MMCR1_TD_CP_DBG1SEL_SH 52
+#define MMCR1_TD_CP_DBG2SEL_SH 50
+#define MMCR1_TD_CP_DBG3SEL_SH 48
+#define MMCR1_GRS_L2SEL_SH 46
+#define MMCR1_GRS_L2SEL_MSK 3
+#define MMCR1_GRS_L3SEL_SH 44
+#define MMCR1_GRS_L3SEL_MSK 3
+#define MMCR1_GRS_MCSEL_SH 41
+#define MMCR1_GRS_MCSEL_MSK 7
+#define MMCR1_GRS_FABSEL_SH 39
+#define MMCR1_GRS_FABSEL_MSK 3
+#define MMCR1_PMC1_ADDER_SEL_SH 35
+#define MMCR1_PMC2_ADDER_SEL_SH 34
+#define MMCR1_PMC3_ADDER_SEL_SH 33
+#define MMCR1_PMC4_ADDER_SEL_SH 32
+#define MMCR1_PMC1SEL_SH 25
+#define MMCR1_PMC2SEL_SH 17
+#define MMCR1_PMC3SEL_SH 9
+#define MMCR1_PMC4SEL_SH 1
+#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8)
+#define MMCR1_PMCSEL_MSK 0x7f
+
+/*
+ * Bits in MMCRA
+ */
+
+/*
+ * Layout of constraint bits:
+ * 6666555555555544444444443333333333222222222211111111110000000000
+ * 3210987654321098765432109876543210987654321098765432109876543210
+ * [ ><><>< ><> <><>[ > < >< >< >< ><><><><><><>
+ * NC G0G1G2 G3 T0T1 UC B0 B1 B2 B3 P6P5P4P3P2P1
+ *
+ * NC - number of counters
+ * 51: NC error 0x0008_0000_0000_0000
+ * 48-50: number of events needing PMC1-4 0x0007_0000_0000_0000
+ *
+ * G0..G3 - GRS mux constraints
+ * 46-47: GRS_L2SEL value
+ * 44-45: GRS_L3SEL value
+ * 41-44: GRS_MCSEL value
+ * 39-40: GRS_FABSEL value
+ * Note that these match up with their bit positions in MMCR1
+ *
+ * T0 - TTM0 constraint
+ * 36-37: TTM0SEL value (0=FPU, 2=IFU, 3=ISU1) 0x30_0000_0000
+ *
+ * T1 - TTM1 constraint
+ * 34-35: TTM1SEL value (0=IDU, 3=GRS) 0x0c_0000_0000
+ *
+ * UC - unit constraint: can't have all three of FPU|IFU|ISU1, ISU0, IDU|GRS
+ * 33: UC3 error 0x02_0000_0000
+ * 32: FPU|IFU|ISU1 events needed 0x01_0000_0000
+ * 31: ISU0 events needed 0x01_8000_0000
+ * 30: IDU|GRS events needed 0x00_4000_0000
+ *
+ * B0
+ * 24-27: Byte 0 event source 0x0f00_0000
+ * Encoding as for the event code
+ *
+ * B1, B2, B3
+ * 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources
+ *
+ * P6
+ * 11: P6 error 0x800
+ * 10-11: Count of events needing PMC6
+ *
+ * P1..P5
+ * 0-9: Count of events needing PMC1..PMC5
+ */
+
+static const int grsel_shift[8] = {
+ MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH,
+ MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH,
+ MMCR1_GRS_MCSEL_SH, MMCR1_GRS_FABSEL_SH
+};
+
+/* Masks and values for using events from the various units */
+static u64 unit_cons[PM_LASTUNIT+1][2] = {
+ [PM_FPU] = { 0x3200000000ull, 0x0100000000ull },
+ [PM_ISU0] = { 0x0200000000ull, 0x0080000000ull },
+ [PM_ISU1] = { 0x3200000000ull, 0x3100000000ull },
+ [PM_IFU] = { 0x3200000000ull, 0x2100000000ull },
+ [PM_IDU] = { 0x0e00000000ull, 0x0040000000ull },
+ [PM_GRS] = { 0x0e00000000ull, 0x0c40000000ull },
+};
+
+static int power5p_get_constraint(u64 event, u64 *maskp, u64 *valp)
+{
+ int pmc, byte, unit, sh;
+ int bit, fmask;
+ u64 mask = 0, value = 0;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc) {
+ if (pmc > 6)
+ return -1;
+ sh = (pmc - 1) * 2;
+ mask |= 2 << sh;
+ value |= 1 << sh;
+ if (pmc >= 5 && !(event == 0x500009 || event == 0x600005))
+ return -1;
+ }
+ if (event & PM_BUSEVENT_MSK) {
+ unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+ if (unit > PM_LASTUNIT)
+ return -1;
+ if (unit == PM_ISU0_ALT)
+ unit = PM_ISU0;
+ mask |= unit_cons[unit][0];
+ value |= unit_cons[unit][1];
+ byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+ if (byte >= 4) {
+ if (unit != PM_LSU1)
+ return -1;
+ /* Map LSU1 low word (bytes 4-7) to unit LSU1+1 */
+ ++unit;
+ byte &= 3;
+ }
+ if (unit == PM_GRS) {
+ bit = event & 7;
+ fmask = (bit == 6)? 7: 3;
+ sh = grsel_shift[bit];
+ mask |= (u64)fmask << sh;
+ value |= (u64)((event >> PM_GRS_SH) & fmask) << sh;
+ }
+ /* Set byte lane select field */
+ mask |= 0xfULL << (24 - 4 * byte);
+ value |= (u64)unit << (24 - 4 * byte);
+ }
+ if (pmc < 5) {
+ /* need a counter from PMC1-4 set */
+ mask |= 0x8000000000000ull;
+ value |= 0x1000000000000ull;
+ }
+ *maskp = mask;
+ *valp = value;
+ return 0;
+}
+
+static int power5p_limited_pmc_event(u64 event)
+{
+ int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+
+ return pmc == 5 || pmc == 6;
+}
+
+#define MAX_ALT 3 /* at most 3 alternatives for any event */
+
+static const unsigned int event_alternatives[][MAX_ALT] = {
+ { 0x100c0, 0x40001f }, /* PM_GCT_FULL_CYC */
+ { 0x120e4, 0x400002 }, /* PM_GRP_DISP_REJECT */
+ { 0x230e2, 0x323087 }, /* PM_BR_PRED_CR */
+ { 0x230e3, 0x223087, 0x3230a0 }, /* PM_BR_PRED_TA */
+ { 0x410c7, 0x441084 }, /* PM_THRD_L2MISS_BOTH_CYC */
+ { 0x800c4, 0xc20e0 }, /* PM_DTLB_MISS */
+ { 0xc50c6, 0xc60e0 }, /* PM_MRK_DTLB_MISS */
+ { 0x100005, 0x600005 }, /* PM_RUN_CYC */
+ { 0x100009, 0x200009 }, /* PM_INST_CMPL */
+ { 0x200015, 0x300015 }, /* PM_LSU_LMQ_SRQ_EMPTY_CYC */
+ { 0x300009, 0x400009 }, /* PM_INST_DISP */
+};
+
+/*
+ * Scan the alternatives table for a match and return the
+ * index into the alternatives table if found, else -1.
+ */
+static int find_alternative(unsigned int event)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
+ if (event < event_alternatives[i][0])
+ break;
+ for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
+ if (event == event_alternatives[i][j])
+ return i;
+ }
+ return -1;
+}
+
+static const unsigned char bytedecode_alternatives[4][4] = {
+ /* PMC 1 */ { 0x21, 0x23, 0x25, 0x27 },
+ /* PMC 2 */ { 0x07, 0x17, 0x0e, 0x1e },
+ /* PMC 3 */ { 0x20, 0x22, 0x24, 0x26 },
+ /* PMC 4 */ { 0x07, 0x17, 0x0e, 0x1e }
+};
+
+/*
+ * Some direct events for decodes of event bus byte 3 have alternative
+ * PMCSEL values on other counters. This returns the alternative
+ * event code for those that do, or -1 otherwise. This also handles
+ * alternative PCMSEL values for add events.
+ */
+static s64 find_alternative_bdecode(u64 event)
+{
+ int pmc, altpmc, pp, j;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc == 0 || pmc > 4)
+ return -1;
+ altpmc = 5 - pmc; /* 1 <-> 4, 2 <-> 3 */
+ pp = event & PM_PMCSEL_MSK;
+ for (j = 0; j < 4; ++j) {
+ if (bytedecode_alternatives[pmc - 1][j] == pp) {
+ return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) |
+ (altpmc << PM_PMC_SH) |
+ bytedecode_alternatives[altpmc - 1][j];
+ }
+ }
+
+ /* new decode alternatives for power5+ */
+ if (pmc == 1 && (pp == 0x0d || pp == 0x0e))
+ return event + (2 << PM_PMC_SH) + (0x2e - 0x0d);
+ if (pmc == 3 && (pp == 0x2e || pp == 0x2f))
+ return event - (2 << PM_PMC_SH) - (0x2e - 0x0d);
+
+ /* alternative add event encodings */
+ if (pp == 0x10 || pp == 0x28)
+ return ((event ^ (0x10 ^ 0x28)) & ~PM_PMC_MSKS) |
+ (altpmc << PM_PMC_SH);
+
+ return -1;
+}
+
+static int power5p_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+ int i, j, nalt = 1;
+ int nlim;
+ s64 ae;
+
+ alt[0] = event;
+ nalt = 1;
+ nlim = power5p_limited_pmc_event(event);
+ i = find_alternative(event);
+ if (i >= 0) {
+ for (j = 0; j < MAX_ALT; ++j) {
+ ae = event_alternatives[i][j];
+ if (ae && ae != event)
+ alt[nalt++] = ae;
+ nlim += power5p_limited_pmc_event(ae);
+ }
+ } else {
+ ae = find_alternative_bdecode(event);
+ if (ae > 0)
+ alt[nalt++] = ae;
+ }
+
+ if (flags & PPMU_ONLY_COUNT_RUN) {
+ /*
+ * We're only counting in RUN state,
+ * so PM_CYC is equivalent to PM_RUN_CYC
+ * and PM_INST_CMPL === PM_RUN_INST_CMPL.
+ * This doesn't include alternatives that don't provide
+ * any extra flexibility in assigning PMCs (e.g.
+ * 0x100005 for PM_RUN_CYC vs. 0xf for PM_CYC).
+ * Note that even with these additional alternatives
+ * we never end up with more than 3 alternatives for any event.
+ */
+ j = nalt;
+ for (i = 0; i < nalt; ++i) {
+ switch (alt[i]) {
+ case 0xf: /* PM_CYC */
+ alt[j++] = 0x600005; /* PM_RUN_CYC */
+ ++nlim;
+ break;
+ case 0x600005: /* PM_RUN_CYC */
+ alt[j++] = 0xf;
+ break;
+ case 0x100009: /* PM_INST_CMPL */
+ alt[j++] = 0x500009; /* PM_RUN_INST_CMPL */
+ ++nlim;
+ break;
+ case 0x500009: /* PM_RUN_INST_CMPL */
+ alt[j++] = 0x100009; /* PM_INST_CMPL */
+ alt[j++] = 0x200009;
+ break;
+ }
+ }
+ nalt = j;
+ }
+
+ if (!(flags & PPMU_LIMITED_PMC_OK) && nlim) {
+ /* remove the limited PMC events */
+ j = 0;
+ for (i = 0; i < nalt; ++i) {
+ if (!power5p_limited_pmc_event(alt[i])) {
+ alt[j] = alt[i];
+ ++j;
+ }
+ }
+ nalt = j;
+ } else if ((flags & PPMU_LIMITED_PMC_REQD) && nlim < nalt) {
+ /* remove all but the limited PMC events */
+ j = 0;
+ for (i = 0; i < nalt; ++i) {
+ if (power5p_limited_pmc_event(alt[i])) {
+ alt[j] = alt[i];
+ ++j;
+ }
+ }
+ nalt = j;
+ }
+
+ return nalt;
+}
+
+/*
+ * Map of which direct events on which PMCs are marked instruction events.
+ * Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event.
+ * Bit 0 is set if it is marked for all PMCs.
+ * The 0x80 bit indicates a byte decode PMCSEL value.
+ */
+static unsigned char direct_event_is_marked[0x28] = {
+ 0, /* 00 */
+ 0x1f, /* 01 PM_IOPS_CMPL */
+ 0x2, /* 02 PM_MRK_GRP_DISP */
+ 0xe, /* 03 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */
+ 0, /* 04 */
+ 0x1c, /* 05 PM_MRK_BRU_FIN, PM_MRK_INST_FIN, PM_MRK_CRU_FIN */
+ 0x80, /* 06 */
+ 0x80, /* 07 */
+ 0, 0, 0,/* 08 - 0a */
+ 0x18, /* 0b PM_THRESH_TIMEO, PM_MRK_GRP_TIMEO */
+ 0, /* 0c */
+ 0x80, /* 0d */
+ 0x80, /* 0e */
+ 0, /* 0f */
+ 0, /* 10 */
+ 0x14, /* 11 PM_MRK_GRP_BR_REDIR, PM_MRK_GRP_IC_MISS */
+ 0, /* 12 */
+ 0x10, /* 13 PM_MRK_GRP_CMPL */
+ 0x1f, /* 14 PM_GRP_MRK, PM_MRK_{FXU,FPU,LSU}_FIN */
+ 0x2, /* 15 PM_MRK_GRP_ISSUED */
+ 0x80, /* 16 */
+ 0x80, /* 17 */
+ 0, 0, 0, 0, 0,
+ 0x80, /* 1d */
+ 0x80, /* 1e */
+ 0, /* 1f */
+ 0x80, /* 20 */
+ 0x80, /* 21 */
+ 0x80, /* 22 */
+ 0x80, /* 23 */
+ 0x80, /* 24 */
+ 0x80, /* 25 */
+ 0x80, /* 26 */
+ 0x80, /* 27 */
+};
+
+/*
+ * Returns 1 if event counts things relating to marked instructions
+ * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
+ */
+static int power5p_marked_instr_event(u64 event)
+{
+ int pmc, psel;
+ int bit, byte, unit;
+ u32 mask;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ psel = event & PM_PMCSEL_MSK;
+ if (pmc >= 5)
+ return 0;
+
+ bit = -1;
+ if (psel < sizeof(direct_event_is_marked)) {
+ if (direct_event_is_marked[psel] & (1 << pmc))
+ return 1;
+ if (direct_event_is_marked[psel] & 0x80)
+ bit = 4;
+ else if (psel == 0x08)
+ bit = pmc - 1;
+ else if (psel == 0x10)
+ bit = 4 - pmc;
+ else if (psel == 0x1b && (pmc == 1 || pmc == 3))
+ bit = 4;
+ } else if ((psel & 0x48) == 0x40) {
+ bit = psel & 7;
+ } else if (psel == 0x28) {
+ bit = pmc - 1;
+ } else if (pmc == 3 && (psel == 0x2e || psel == 0x2f)) {
+ bit = 4;
+ }
+
+ if (!(event & PM_BUSEVENT_MSK) || bit == -1)
+ return 0;
+
+ byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+ unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+ if (unit == PM_LSU0) {
+ /* byte 1 bits 0-7, byte 2 bits 0,2-4,6 */
+ mask = 0x5dff00;
+ } else if (unit == PM_LSU1 && byte >= 4) {
+ byte -= 4;
+ /* byte 5 bits 6-7, byte 6 bits 0,4, byte 7 bits 0-4,6 */
+ mask = 0x5f11c000;
+ } else
+ return 0;
+
+ return (mask >> (byte * 8 + bit)) & 1;
+}
+
+static int power5p_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], u64 mmcr[])
+{
+ u64 mmcr1 = 0;
+ u64 mmcra = 0;
+ unsigned int pmc, unit, byte, psel;
+ unsigned int ttm;
+ int i, isbus, bit, grsel;
+ unsigned int pmc_inuse = 0;
+ unsigned char busbyte[4];
+ unsigned char unituse[16];
+ int ttmuse;
+
+ if (n_ev > 6)
+ return -1;
+
+ /* First pass to count resource use */
+ memset(busbyte, 0, sizeof(busbyte));
+ memset(unituse, 0, sizeof(unituse));
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc) {
+ if (pmc > 6)
+ return -1;
+ if (pmc_inuse & (1 << (pmc - 1)))
+ return -1;
+ pmc_inuse |= 1 << (pmc - 1);
+ }
+ if (event[i] & PM_BUSEVENT_MSK) {
+ unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
+ byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
+ if (unit > PM_LASTUNIT)
+ return -1;
+ if (unit == PM_ISU0_ALT)
+ unit = PM_ISU0;
+ if (byte >= 4) {
+ if (unit != PM_LSU1)
+ return -1;
+ ++unit;
+ byte &= 3;
+ }
+ if (busbyte[byte] && busbyte[byte] != unit)
+ return -1;
+ busbyte[byte] = unit;
+ unituse[unit] = 1;
+ }
+ }
+
+ /*
+ * Assign resources and set multiplexer selects.
+ *
+ * PM_ISU0 can go either on TTM0 or TTM1, but that's the only
+ * choice we have to deal with.
+ */
+ if (unituse[PM_ISU0] &
+ (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_ISU1])) {
+ unituse[PM_ISU0_ALT] = 1; /* move ISU to TTM1 */
+ unituse[PM_ISU0] = 0;
+ }
+ /* Set TTM[01]SEL fields. */
+ ttmuse = 0;
+ for (i = PM_FPU; i <= PM_ISU1; ++i) {
+ if (!unituse[i])
+ continue;
+ if (ttmuse++)
+ return -1;
+ mmcr1 |= (u64)i << MMCR1_TTM0SEL_SH;
+ }
+ ttmuse = 0;
+ for (; i <= PM_GRS; ++i) {
+ if (!unituse[i])
+ continue;
+ if (ttmuse++)
+ return -1;
+ mmcr1 |= (u64)(i & 3) << MMCR1_TTM1SEL_SH;
+ }
+ if (ttmuse > 1)
+ return -1;
+
+ /* Set byte lane select fields, TTM[23]SEL and GRS_*SEL. */
+ for (byte = 0; byte < 4; ++byte) {
+ unit = busbyte[byte];
+ if (!unit)
+ continue;
+ if (unit == PM_ISU0 && unituse[PM_ISU0_ALT]) {
+ /* get ISU0 through TTM1 rather than TTM0 */
+ unit = PM_ISU0_ALT;
+ } else if (unit == PM_LSU1 + 1) {
+ /* select lower word of LSU1 for this byte */
+ mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
+ }
+ ttm = unit >> 2;
+ mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
+ }
+
+ /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+ unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
+ byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
+ psel = event[i] & PM_PMCSEL_MSK;
+ isbus = event[i] & PM_BUSEVENT_MSK;
+ if (!pmc) {
+ /* Bus event or any-PMC direct event */
+ for (pmc = 0; pmc < 4; ++pmc) {
+ if (!(pmc_inuse & (1 << pmc)))
+ break;
+ }
+ if (pmc >= 4)
+ return -1;
+ pmc_inuse |= 1 << pmc;
+ } else if (pmc <= 4) {
+ /* Direct event */
+ --pmc;
+ if (isbus && (byte & 2) &&
+ (psel == 8 || psel == 0x10 || psel == 0x28))
+ /* add events on higher-numbered bus */
+ mmcr1 |= 1ull << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
+ } else {
+ /* Instructions or run cycles on PMC5/6 */
+ --pmc;
+ }
+ if (isbus && unit == PM_GRS) {
+ bit = psel & 7;
+ grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK;
+ mmcr1 |= (u64)grsel << grsel_shift[bit];
+ }
+ if (power5p_marked_instr_event(event[i]))
+ mmcra |= MMCRA_SAMPLE_ENABLE;
+ if ((psel & 0x58) == 0x40 && (byte & 1) != ((pmc >> 1) & 1))
+ /* select alternate byte lane */
+ psel |= 0x10;
+ if (pmc <= 3)
+ mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
+ hwc[i] = pmc;
+ }
+
+ /* Return MMCRx values */
+ mmcr[0] = 0;
+ if (pmc_inuse & 1)
+ mmcr[0] = MMCR0_PMC1CE;
+ if (pmc_inuse & 0x3e)
+ mmcr[0] |= MMCR0_PMCjCE;
+ mmcr[1] = mmcr1;
+ mmcr[2] = mmcra;
+ return 0;
+}
+
+static void power5p_disable_pmc(unsigned int pmc, u64 mmcr[])
+{
+ if (pmc <= 3)
+ mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
+}
+
+static int power5p_generic_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = 0xf,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x100009,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x1c10a8, /* LD_REF_L1 */
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int power5p_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x1c10a8, 0x3c1088 },
+ [C(OP_WRITE)] = { 0x2c10a8, 0xc10c3 },
+ [C(OP_PREFETCH)] = { 0xc70e7, -1 },
+ },
+ [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { 0, 0 },
+ },
+ [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0 },
+ [C(OP_WRITE)] = { 0, 0 },
+ [C(OP_PREFETCH)] = { 0xc50c3, 0 },
+ },
+ [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0xc20e4, 0x800c4 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+ [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x800c0 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+ [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x230e4, 0x230e5 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+};
+
+struct power_pmu power5p_pmu = {
+ .n_counter = 6,
+ .max_alternatives = MAX_ALT,
+ .add_fields = 0x7000000000055ull,
+ .test_adder = 0x3000040000000ull,
+ .compute_mmcr = power5p_compute_mmcr,
+ .get_constraint = power5p_get_constraint,
+ .get_alternatives = power5p_get_alternatives,
+ .disable_pmc = power5p_disable_pmc,
+ .limited_pmc_event = power5p_limited_pmc_event,
+ .flags = PPMU_LIMITED_PMC5_6,
+ .n_generic = ARRAY_SIZE(power5p_generic_events),
+ .generic_events = power5p_generic_events,
+ .cache_events = &power5p_cache_events,
+};
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c
new file mode 100644
index 000000000000..05600b66221a
--- /dev/null
+++ b/arch/powerpc/kernel/power5-pmu.c
@@ -0,0 +1,611 @@
+/*
+ * Performance counter support for POWER5 (not POWER5++) processors.
+ *
+ * Copyright 2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/perf_counter.h>
+#include <asm/reg.h>
+
+/*
+ * Bits in event code for POWER5 (not POWER5++)
+ */
+#define PM_PMC_SH 20 /* PMC number (1-based) for direct events */
+#define PM_PMC_MSK 0xf
+#define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH)
+#define PM_UNIT_SH 16 /* TTMMUX number and setting - unit select */
+#define PM_UNIT_MSK 0xf
+#define PM_BYTE_SH 12 /* Byte number of event bus to use */
+#define PM_BYTE_MSK 7
+#define PM_GRS_SH 8 /* Storage subsystem mux select */
+#define PM_GRS_MSK 7
+#define PM_BUSEVENT_MSK 0x80 /* Set if event uses event bus */
+#define PM_PMCSEL_MSK 0x7f
+
+/* Values in PM_UNIT field */
+#define PM_FPU 0
+#define PM_ISU0 1
+#define PM_IFU 2
+#define PM_ISU1 3
+#define PM_IDU 4
+#define PM_ISU0_ALT 6
+#define PM_GRS 7
+#define PM_LSU0 8
+#define PM_LSU1 0xc
+#define PM_LASTUNIT 0xc
+
+/*
+ * Bits in MMCR1 for POWER5
+ */
+#define MMCR1_TTM0SEL_SH 62
+#define MMCR1_TTM1SEL_SH 60
+#define MMCR1_TTM2SEL_SH 58
+#define MMCR1_TTM3SEL_SH 56
+#define MMCR1_TTMSEL_MSK 3
+#define MMCR1_TD_CP_DBG0SEL_SH 54
+#define MMCR1_TD_CP_DBG1SEL_SH 52
+#define MMCR1_TD_CP_DBG2SEL_SH 50
+#define MMCR1_TD_CP_DBG3SEL_SH 48
+#define MMCR1_GRS_L2SEL_SH 46
+#define MMCR1_GRS_L2SEL_MSK 3
+#define MMCR1_GRS_L3SEL_SH 44
+#define MMCR1_GRS_L3SEL_MSK 3
+#define MMCR1_GRS_MCSEL_SH 41
+#define MMCR1_GRS_MCSEL_MSK 7
+#define MMCR1_GRS_FABSEL_SH 39
+#define MMCR1_GRS_FABSEL_MSK 3
+#define MMCR1_PMC1_ADDER_SEL_SH 35
+#define MMCR1_PMC2_ADDER_SEL_SH 34
+#define MMCR1_PMC3_ADDER_SEL_SH 33
+#define MMCR1_PMC4_ADDER_SEL_SH 32
+#define MMCR1_PMC1SEL_SH 25
+#define MMCR1_PMC2SEL_SH 17
+#define MMCR1_PMC3SEL_SH 9
+#define MMCR1_PMC4SEL_SH 1
+#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8)
+#define MMCR1_PMCSEL_MSK 0x7f
+
+/*
+ * Bits in MMCRA
+ */
+
+/*
+ * Layout of constraint bits:
+ * 6666555555555544444444443333333333222222222211111111110000000000
+ * 3210987654321098765432109876543210987654321098765432109876543210
+ * <><>[ ><><>< ><> [ >[ >[ >< >< >< >< ><><><><><><>
+ * T0T1 NC G0G1G2 G3 UC PS1PS2 B0 B1 B2 B3 P6P5P4P3P2P1
+ *
+ * T0 - TTM0 constraint
+ * 54-55: TTM0SEL value (0=FPU, 2=IFU, 3=ISU1) 0xc0_0000_0000_0000
+ *
+ * T1 - TTM1 constraint
+ * 52-53: TTM1SEL value (0=IDU, 3=GRS) 0x30_0000_0000_0000
+ *
+ * NC - number of counters
+ * 51: NC error 0x0008_0000_0000_0000
+ * 48-50: number of events needing PMC1-4 0x0007_0000_0000_0000
+ *
+ * G0..G3 - GRS mux constraints
+ * 46-47: GRS_L2SEL value
+ * 44-45: GRS_L3SEL value
+ * 41-44: GRS_MCSEL value
+ * 39-40: GRS_FABSEL value
+ * Note that these match up with their bit positions in MMCR1
+ *
+ * UC - unit constraint: can't have all three of FPU|IFU|ISU1, ISU0, IDU|GRS
+ * 37: UC3 error 0x20_0000_0000
+ * 36: FPU|IFU|ISU1 events needed 0x10_0000_0000
+ * 35: ISU0 events needed 0x08_0000_0000
+ * 34: IDU|GRS events needed 0x04_0000_0000
+ *
+ * PS1
+ * 33: PS1 error 0x2_0000_0000
+ * 31-32: count of events needing PMC1/2 0x1_8000_0000
+ *
+ * PS2
+ * 30: PS2 error 0x4000_0000
+ * 28-29: count of events needing PMC3/4 0x3000_0000
+ *
+ * B0
+ * 24-27: Byte 0 event source 0x0f00_0000
+ * Encoding as for the event code
+ *
+ * B1, B2, B3
+ * 20-23, 16-19, 12-15: Byte 1, 2, 3 event sources
+ *
+ * P1..P6
+ * 0-11: Count of events needing PMC1..PMC6
+ */
+
+static const int grsel_shift[8] = {
+ MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH, MMCR1_GRS_L2SEL_SH,
+ MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH, MMCR1_GRS_L3SEL_SH,
+ MMCR1_GRS_MCSEL_SH, MMCR1_GRS_FABSEL_SH
+};
+
+/* Masks and values for using events from the various units */
+static u64 unit_cons[PM_LASTUNIT+1][2] = {
+ [PM_FPU] = { 0xc0002000000000ull, 0x00001000000000ull },
+ [PM_ISU0] = { 0x00002000000000ull, 0x00000800000000ull },
+ [PM_ISU1] = { 0xc0002000000000ull, 0xc0001000000000ull },
+ [PM_IFU] = { 0xc0002000000000ull, 0x80001000000000ull },
+ [PM_IDU] = { 0x30002000000000ull, 0x00000400000000ull },
+ [PM_GRS] = { 0x30002000000000ull, 0x30000400000000ull },
+};
+
+static int power5_get_constraint(u64 event, u64 *maskp, u64 *valp)
+{
+ int pmc, byte, unit, sh;
+ int bit, fmask;
+ u64 mask = 0, value = 0;
+ int grp = -1;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc) {
+ if (pmc > 6)
+ return -1;
+ sh = (pmc - 1) * 2;
+ mask |= 2 << sh;
+ value |= 1 << sh;
+ if (pmc <= 4)
+ grp = (pmc - 1) >> 1;
+ else if (event != 0x500009 && event != 0x600005)
+ return -1;
+ }
+ if (event & PM_BUSEVENT_MSK) {
+ unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+ if (unit > PM_LASTUNIT)
+ return -1;
+ if (unit == PM_ISU0_ALT)
+ unit = PM_ISU0;
+ mask |= unit_cons[unit][0];
+ value |= unit_cons[unit][1];
+ byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+ if (byte >= 4) {
+ if (unit != PM_LSU1)
+ return -1;
+ /* Map LSU1 low word (bytes 4-7) to unit LSU1+1 */
+ ++unit;
+ byte &= 3;
+ }
+ if (unit == PM_GRS) {
+ bit = event & 7;
+ fmask = (bit == 6)? 7: 3;
+ sh = grsel_shift[bit];
+ mask |= (u64)fmask << sh;
+ value |= (u64)((event >> PM_GRS_SH) & fmask) << sh;
+ }
+ /*
+ * Bus events on bytes 0 and 2 can be counted
+ * on PMC1/2; bytes 1 and 3 on PMC3/4.
+ */
+ if (!pmc)
+ grp = byte & 1;
+ /* Set byte lane select field */
+ mask |= 0xfULL << (24 - 4 * byte);
+ value |= (u64)unit << (24 - 4 * byte);
+ }
+ if (grp == 0) {
+ /* increment PMC1/2 field */
+ mask |= 0x200000000ull;
+ value |= 0x080000000ull;
+ } else if (grp == 1) {
+ /* increment PMC3/4 field */
+ mask |= 0x40000000ull;
+ value |= 0x10000000ull;
+ }
+ if (pmc < 5) {
+ /* need a counter from PMC1-4 set */
+ mask |= 0x8000000000000ull;
+ value |= 0x1000000000000ull;
+ }
+ *maskp = mask;
+ *valp = value;
+ return 0;
+}
+
+#define MAX_ALT 3 /* at most 3 alternatives for any event */
+
+static const unsigned int event_alternatives[][MAX_ALT] = {
+ { 0x120e4, 0x400002 }, /* PM_GRP_DISP_REJECT */
+ { 0x410c7, 0x441084 }, /* PM_THRD_L2MISS_BOTH_CYC */
+ { 0x100005, 0x600005 }, /* PM_RUN_CYC */
+ { 0x100009, 0x200009, 0x500009 }, /* PM_INST_CMPL */
+ { 0x300009, 0x400009 }, /* PM_INST_DISP */
+};
+
+/*
+ * Scan the alternatives table for a match and return the
+ * index into the alternatives table if found, else -1.
+ */
+static int find_alternative(u64 event)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
+ if (event < event_alternatives[i][0])
+ break;
+ for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
+ if (event == event_alternatives[i][j])
+ return i;
+ }
+ return -1;
+}
+
+static const unsigned char bytedecode_alternatives[4][4] = {
+ /* PMC 1 */ { 0x21, 0x23, 0x25, 0x27 },
+ /* PMC 2 */ { 0x07, 0x17, 0x0e, 0x1e },
+ /* PMC 3 */ { 0x20, 0x22, 0x24, 0x26 },
+ /* PMC 4 */ { 0x07, 0x17, 0x0e, 0x1e }
+};
+
+/*
+ * Some direct events for decodes of event bus byte 3 have alternative
+ * PMCSEL values on other counters. This returns the alternative
+ * event code for those that do, or -1 otherwise.
+ */
+static s64 find_alternative_bdecode(u64 event)
+{
+ int pmc, altpmc, pp, j;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc == 0 || pmc > 4)
+ return -1;
+ altpmc = 5 - pmc; /* 1 <-> 4, 2 <-> 3 */
+ pp = event & PM_PMCSEL_MSK;
+ for (j = 0; j < 4; ++j) {
+ if (bytedecode_alternatives[pmc - 1][j] == pp) {
+ return (event & ~(PM_PMC_MSKS | PM_PMCSEL_MSK)) |
+ (altpmc << PM_PMC_SH) |
+ bytedecode_alternatives[altpmc - 1][j];
+ }
+ }
+ return -1;
+}
+
+static int power5_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+ int i, j, nalt = 1;
+ s64 ae;
+
+ alt[0] = event;
+ nalt = 1;
+ i = find_alternative(event);
+ if (i >= 0) {
+ for (j = 0; j < MAX_ALT; ++j) {
+ ae = event_alternatives[i][j];
+ if (ae && ae != event)
+ alt[nalt++] = ae;
+ }
+ } else {
+ ae = find_alternative_bdecode(event);
+ if (ae > 0)
+ alt[nalt++] = ae;
+ }
+ return nalt;
+}
+
+/*
+ * Map of which direct events on which PMCs are marked instruction events.
+ * Indexed by PMCSEL value, bit i (LE) set if PMC i is a marked event.
+ * Bit 0 is set if it is marked for all PMCs.
+ * The 0x80 bit indicates a byte decode PMCSEL value.
+ */
+static unsigned char direct_event_is_marked[0x28] = {
+ 0, /* 00 */
+ 0x1f, /* 01 PM_IOPS_CMPL */
+ 0x2, /* 02 PM_MRK_GRP_DISP */
+ 0xe, /* 03 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */
+ 0, /* 04 */
+ 0x1c, /* 05 PM_MRK_BRU_FIN, PM_MRK_INST_FIN, PM_MRK_CRU_FIN */
+ 0x80, /* 06 */
+ 0x80, /* 07 */
+ 0, 0, 0,/* 08 - 0a */
+ 0x18, /* 0b PM_THRESH_TIMEO, PM_MRK_GRP_TIMEO */
+ 0, /* 0c */
+ 0x80, /* 0d */
+ 0x80, /* 0e */
+ 0, /* 0f */
+ 0, /* 10 */
+ 0x14, /* 11 PM_MRK_GRP_BR_REDIR, PM_MRK_GRP_IC_MISS */
+ 0, /* 12 */
+ 0x10, /* 13 PM_MRK_GRP_CMPL */
+ 0x1f, /* 14 PM_GRP_MRK, PM_MRK_{FXU,FPU,LSU}_FIN */
+ 0x2, /* 15 PM_MRK_GRP_ISSUED */
+ 0x80, /* 16 */
+ 0x80, /* 17 */
+ 0, 0, 0, 0, 0,
+ 0x80, /* 1d */
+ 0x80, /* 1e */
+ 0, /* 1f */
+ 0x80, /* 20 */
+ 0x80, /* 21 */
+ 0x80, /* 22 */
+ 0x80, /* 23 */
+ 0x80, /* 24 */
+ 0x80, /* 25 */
+ 0x80, /* 26 */
+ 0x80, /* 27 */
+};
+
+/*
+ * Returns 1 if event counts things relating to marked instructions
+ * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
+ */
+static int power5_marked_instr_event(u64 event)
+{
+ int pmc, psel;
+ int bit, byte, unit;
+ u32 mask;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ psel = event & PM_PMCSEL_MSK;
+ if (pmc >= 5)
+ return 0;
+
+ bit = -1;
+ if (psel < sizeof(direct_event_is_marked)) {
+ if (direct_event_is_marked[psel] & (1 << pmc))
+ return 1;
+ if (direct_event_is_marked[psel] & 0x80)
+ bit = 4;
+ else if (psel == 0x08)
+ bit = pmc - 1;
+ else if (psel == 0x10)
+ bit = 4 - pmc;
+ else if (psel == 0x1b && (pmc == 1 || pmc == 3))
+ bit = 4;
+ } else if ((psel & 0x58) == 0x40)
+ bit = psel & 7;
+
+ if (!(event & PM_BUSEVENT_MSK))
+ return 0;
+
+ byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+ unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+ if (unit == PM_LSU0) {
+ /* byte 1 bits 0-7, byte 2 bits 0,2-4,6 */
+ mask = 0x5dff00;
+ } else if (unit == PM_LSU1 && byte >= 4) {
+ byte -= 4;
+ /* byte 4 bits 1,3,5,7, byte 5 bits 6-7, byte 7 bits 0-4,6 */
+ mask = 0x5f00c0aa;
+ } else
+ return 0;
+
+ return (mask >> (byte * 8 + bit)) & 1;
+}
+
+static int power5_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], u64 mmcr[])
+{
+ u64 mmcr1 = 0;
+ u64 mmcra = 0;
+ unsigned int pmc, unit, byte, psel;
+ unsigned int ttm, grp;
+ int i, isbus, bit, grsel;
+ unsigned int pmc_inuse = 0;
+ unsigned int pmc_grp_use[2];
+ unsigned char busbyte[4];
+ unsigned char unituse[16];
+ int ttmuse;
+
+ if (n_ev > 6)
+ return -1;
+
+ /* First pass to count resource use */
+ pmc_grp_use[0] = pmc_grp_use[1] = 0;
+ memset(busbyte, 0, sizeof(busbyte));
+ memset(unituse, 0, sizeof(unituse));
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc) {
+ if (pmc > 6)
+ return -1;
+ if (pmc_inuse & (1 << (pmc - 1)))
+ return -1;
+ pmc_inuse |= 1 << (pmc - 1);
+ /* count 1/2 vs 3/4 use */
+ if (pmc <= 4)
+ ++pmc_grp_use[(pmc - 1) >> 1];
+ }
+ if (event[i] & PM_BUSEVENT_MSK) {
+ unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
+ byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
+ if (unit > PM_LASTUNIT)
+ return -1;
+ if (unit == PM_ISU0_ALT)
+ unit = PM_ISU0;
+ if (byte >= 4) {
+ if (unit != PM_LSU1)
+ return -1;
+ ++unit;
+ byte &= 3;
+ }
+ if (!pmc)
+ ++pmc_grp_use[byte & 1];
+ if (busbyte[byte] && busbyte[byte] != unit)
+ return -1;
+ busbyte[byte] = unit;
+ unituse[unit] = 1;
+ }
+ }
+ if (pmc_grp_use[0] > 2 || pmc_grp_use[1] > 2)
+ return -1;
+
+ /*
+ * Assign resources and set multiplexer selects.
+ *
+ * PM_ISU0 can go either on TTM0 or TTM1, but that's the only
+ * choice we have to deal with.
+ */
+ if (unituse[PM_ISU0] &
+ (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_ISU1])) {
+ unituse[PM_ISU0_ALT] = 1; /* move ISU to TTM1 */
+ unituse[PM_ISU0] = 0;
+ }
+ /* Set TTM[01]SEL fields. */
+ ttmuse = 0;
+ for (i = PM_FPU; i <= PM_ISU1; ++i) {
+ if (!unituse[i])
+ continue;
+ if (ttmuse++)
+ return -1;
+ mmcr1 |= (u64)i << MMCR1_TTM0SEL_SH;
+ }
+ ttmuse = 0;
+ for (; i <= PM_GRS; ++i) {
+ if (!unituse[i])
+ continue;
+ if (ttmuse++)
+ return -1;
+ mmcr1 |= (u64)(i & 3) << MMCR1_TTM1SEL_SH;
+ }
+ if (ttmuse > 1)
+ return -1;
+
+ /* Set byte lane select fields, TTM[23]SEL and GRS_*SEL. */
+ for (byte = 0; byte < 4; ++byte) {
+ unit = busbyte[byte];
+ if (!unit)
+ continue;
+ if (unit == PM_ISU0 && unituse[PM_ISU0_ALT]) {
+ /* get ISU0 through TTM1 rather than TTM0 */
+ unit = PM_ISU0_ALT;
+ } else if (unit == PM_LSU1 + 1) {
+ /* select lower word of LSU1 for this byte */
+ mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
+ }
+ ttm = unit >> 2;
+ mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
+ }
+
+ /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+ unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
+ byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
+ psel = event[i] & PM_PMCSEL_MSK;
+ isbus = event[i] & PM_BUSEVENT_MSK;
+ if (!pmc) {
+ /* Bus event or any-PMC direct event */
+ for (pmc = 0; pmc < 4; ++pmc) {
+ if (pmc_inuse & (1 << pmc))
+ continue;
+ grp = (pmc >> 1) & 1;
+ if (isbus) {
+ if (grp == (byte & 1))
+ break;
+ } else if (pmc_grp_use[grp] < 2) {
+ ++pmc_grp_use[grp];
+ break;
+ }
+ }
+ pmc_inuse |= 1 << pmc;
+ } else if (pmc <= 4) {
+ /* Direct event */
+ --pmc;
+ if ((psel == 8 || psel == 0x10) && isbus && (byte & 2))
+ /* add events on higher-numbered bus */
+ mmcr1 |= 1ull << (MMCR1_PMC1_ADDER_SEL_SH - pmc);
+ } else {
+ /* Instructions or run cycles on PMC5/6 */
+ --pmc;
+ }
+ if (isbus && unit == PM_GRS) {
+ bit = psel & 7;
+ grsel = (event[i] >> PM_GRS_SH) & PM_GRS_MSK;
+ mmcr1 |= (u64)grsel << grsel_shift[bit];
+ }
+ if (power5_marked_instr_event(event[i]))
+ mmcra |= MMCRA_SAMPLE_ENABLE;
+ if (pmc <= 3)
+ mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
+ hwc[i] = pmc;
+ }
+
+ /* Return MMCRx values */
+ mmcr[0] = 0;
+ if (pmc_inuse & 1)
+ mmcr[0] = MMCR0_PMC1CE;
+ if (pmc_inuse & 0x3e)
+ mmcr[0] |= MMCR0_PMCjCE;
+ mmcr[1] = mmcr1;
+ mmcr[2] = mmcra;
+ return 0;
+}
+
+static void power5_disable_pmc(unsigned int pmc, u64 mmcr[])
+{
+ if (pmc <= 3)
+ mmcr[1] &= ~(0x7fUL << MMCR1_PMCSEL_SH(pmc));
+}
+
+static int power5_generic_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = 0xf,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 0x100009,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4c1090, /* LD_REF_L1 */
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x3c1088, /* LD_MISS_L1 */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x230e4, /* BR_ISSUED */
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x230e5, /* BR_MPRED_CR */
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int power5_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x4c1090, 0x3c1088 },
+ [C(OP_WRITE)] = { 0x3c1090, 0xc10c3 },
+ [C(OP_PREFETCH)] = { 0xc70e7, 0 },
+ },
+ [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { 0, 0 },
+ },
+ [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x3c309b },
+ [C(OP_WRITE)] = { 0, 0 },
+ [C(OP_PREFETCH)] = { 0xc50c3, 0 },
+ },
+ [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x2c4090, 0x800c4 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+ [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x800c0 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+ [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x230e4, 0x230e5 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+};
+
+struct power_pmu power5_pmu = {
+ .n_counter = 6,
+ .max_alternatives = MAX_ALT,
+ .add_fields = 0x7000090000555ull,
+ .test_adder = 0x3000490000000ull,
+ .compute_mmcr = power5_compute_mmcr,
+ .get_constraint = power5_get_constraint,
+ .get_alternatives = power5_get_alternatives,
+ .disable_pmc = power5_disable_pmc,
+ .n_generic = ARRAY_SIZE(power5_generic_events),
+ .generic_events = power5_generic_events,
+ .cache_events = &power5_cache_events,
+};
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c
new file mode 100644
index 000000000000..46f74bebcfd9
--- /dev/null
+++ b/arch/powerpc/kernel/power6-pmu.c
@@ -0,0 +1,532 @@
+/*
+ * Performance counter support for POWER6 processors.
+ *
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/perf_counter.h>
+#include <asm/reg.h>
+
+/*
+ * Bits in event code for POWER6
+ */
+#define PM_PMC_SH 20 /* PMC number (1-based) for direct events */
+#define PM_PMC_MSK 0x7
+#define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH)
+#define PM_UNIT_SH 16 /* Unit event comes (TTMxSEL encoding) */
+#define PM_UNIT_MSK 0xf
+#define PM_UNIT_MSKS (PM_UNIT_MSK << PM_UNIT_SH)
+#define PM_LLAV 0x8000 /* Load lookahead match value */
+#define PM_LLA 0x4000 /* Load lookahead match enable */
+#define PM_BYTE_SH 12 /* Byte of event bus to use */
+#define PM_BYTE_MSK 3
+#define PM_SUBUNIT_SH 8 /* Subunit event comes from (NEST_SEL enc.) */
+#define PM_SUBUNIT_MSK 7
+#define PM_SUBUNIT_MSKS (PM_SUBUNIT_MSK << PM_SUBUNIT_SH)
+#define PM_PMCSEL_MSK 0xff /* PMCxSEL value */
+#define PM_BUSEVENT_MSK 0xf3700
+
+/*
+ * Bits in MMCR1 for POWER6
+ */
+#define MMCR1_TTM0SEL_SH 60
+#define MMCR1_TTMSEL_SH(n) (MMCR1_TTM0SEL_SH - (n) * 4)
+#define MMCR1_TTMSEL_MSK 0xf
+#define MMCR1_TTMSEL(m, n) (((m) >> MMCR1_TTMSEL_SH(n)) & MMCR1_TTMSEL_MSK)
+#define MMCR1_NESTSEL_SH 45
+#define MMCR1_NESTSEL_MSK 0x7
+#define MMCR1_NESTSEL(m) (((m) >> MMCR1_NESTSEL_SH) & MMCR1_NESTSEL_MSK)
+#define MMCR1_PMC1_LLA ((u64)1 << 44)
+#define MMCR1_PMC1_LLA_VALUE ((u64)1 << 39)
+#define MMCR1_PMC1_ADDR_SEL ((u64)1 << 35)
+#define MMCR1_PMC1SEL_SH 24
+#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8)
+#define MMCR1_PMCSEL_MSK 0xff
+
+/*
+ * Map of which direct events on which PMCs are marked instruction events.
+ * Indexed by PMCSEL value >> 1.
+ * Bottom 4 bits are a map of which PMCs are interesting,
+ * top 4 bits say what sort of event:
+ * 0 = direct marked event,
+ * 1 = byte decode event,
+ * 4 = add/and event (PMC1 -> bits 0 & 4),
+ * 5 = add/and event (PMC1 -> bits 1 & 5),
+ * 6 = add/and event (PMC1 -> bits 2 & 6),
+ * 7 = add/and event (PMC1 -> bits 3 & 7).
+ */
+static unsigned char direct_event_is_marked[0x60 >> 1] = {
+ 0, /* 00 */
+ 0, /* 02 */
+ 0, /* 04 */
+ 0x07, /* 06 PM_MRK_ST_CMPL, PM_MRK_ST_GPS, PM_MRK_ST_CMPL_INT */
+ 0x04, /* 08 PM_MRK_DFU_FIN */
+ 0x06, /* 0a PM_MRK_IFU_FIN, PM_MRK_INST_FIN */
+ 0, /* 0c */
+ 0, /* 0e */
+ 0x02, /* 10 PM_MRK_INST_DISP */
+ 0x08, /* 12 PM_MRK_LSU_DERAT_MISS */
+ 0, /* 14 */
+ 0, /* 16 */
+ 0x0c, /* 18 PM_THRESH_TIMEO, PM_MRK_INST_FIN */
+ 0x0f, /* 1a PM_MRK_INST_DISP, PM_MRK_{FXU,FPU,LSU}_FIN */
+ 0x01, /* 1c PM_MRK_INST_ISSUED */
+ 0, /* 1e */
+ 0, /* 20 */
+ 0, /* 22 */
+ 0, /* 24 */
+ 0, /* 26 */
+ 0x15, /* 28 PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L3MISS */
+ 0, /* 2a */
+ 0, /* 2c */
+ 0, /* 2e */
+ 0x4f, /* 30 */
+ 0x7f, /* 32 */
+ 0x4f, /* 34 */
+ 0x5f, /* 36 */
+ 0x6f, /* 38 */
+ 0x4f, /* 3a */
+ 0, /* 3c */
+ 0x08, /* 3e PM_MRK_INST_TIMEO */
+ 0x1f, /* 40 */
+ 0x1f, /* 42 */
+ 0x1f, /* 44 */
+ 0x1f, /* 46 */
+ 0x1f, /* 48 */
+ 0x1f, /* 4a */
+ 0x1f, /* 4c */
+ 0x1f, /* 4e */
+ 0, /* 50 */
+ 0x05, /* 52 PM_MRK_BR_TAKEN, PM_MRK_BR_MPRED */
+ 0x1c, /* 54 PM_MRK_PTEG_FROM_L3MISS, PM_MRK_PTEG_FROM_L2MISS */
+ 0x02, /* 56 PM_MRK_LD_MISS_L1 */
+ 0, /* 58 */
+ 0, /* 5a */
+ 0, /* 5c */
+ 0, /* 5e */
+};
+
+/*
+ * Masks showing for each unit which bits are marked events.
+ * These masks are in LE order, i.e. 0x00000001 is byte 0, bit 0.
+ */
+static u32 marked_bus_events[16] = {
+ 0x01000000, /* direct events set 1: byte 3 bit 0 */
+ 0x00010000, /* direct events set 2: byte 2 bit 0 */
+ 0, 0, 0, 0, /* IDU, IFU, nest: nothing */
+ 0x00000088, /* VMX set 1: byte 0 bits 3, 7 */
+ 0x000000c0, /* VMX set 2: byte 0 bits 4-7 */
+ 0x04010000, /* LSU set 1: byte 2 bit 0, byte 3 bit 2 */
+ 0xff010000u, /* LSU set 2: byte 2 bit 0, all of byte 3 */
+ 0, /* LSU set 3 */
+ 0x00000010, /* VMX set 3: byte 0 bit 4 */
+ 0, /* BFP set 1 */
+ 0x00000022, /* BFP set 2: byte 0 bits 1, 5 */
+ 0, 0
+};
+
+/*
+ * Returns 1 if event counts things relating to marked instructions
+ * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
+ */
+static int power6_marked_instr_event(u64 event)
+{
+ int pmc, psel, ptype;
+ int bit, byte, unit;
+ u32 mask;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ psel = (event & PM_PMCSEL_MSK) >> 1; /* drop edge/level bit */
+ if (pmc >= 5)
+ return 0;
+
+ bit = -1;
+ if (psel < sizeof(direct_event_is_marked)) {
+ ptype = direct_event_is_marked[psel];
+ if (pmc == 0 || !(ptype & (1 << (pmc - 1))))
+ return 0;
+ ptype >>= 4;
+ if (ptype == 0)
+ return 1;
+ if (ptype == 1)
+ bit = 0;
+ else
+ bit = ptype ^ (pmc - 1);
+ } else if ((psel & 0x48) == 0x40)
+ bit = psel & 7;
+
+ if (!(event & PM_BUSEVENT_MSK) || bit == -1)
+ return 0;
+
+ byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+ unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+ mask = marked_bus_events[unit];
+ return (mask >> (byte * 8 + bit)) & 1;
+}
+
+/*
+ * Assign PMC numbers and compute MMCR1 value for a set of events
+ */
+static int p6_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], u64 mmcr[])
+{
+ u64 mmcr1 = 0;
+ u64 mmcra = 0;
+ int i;
+ unsigned int pmc, ev, b, u, s, psel;
+ unsigned int ttmset = 0;
+ unsigned int pmc_inuse = 0;
+
+ if (n_ev > 6)
+ return -1;
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc) {
+ if (pmc_inuse & (1 << (pmc - 1)))
+ return -1; /* collision! */
+ pmc_inuse |= 1 << (pmc - 1);
+ }
+ }
+ for (i = 0; i < n_ev; ++i) {
+ ev = event[i];
+ pmc = (ev >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc) {
+ --pmc;
+ } else {
+ /* can go on any PMC; find a free one */
+ for (pmc = 0; pmc < 4; ++pmc)
+ if (!(pmc_inuse & (1 << pmc)))
+ break;
+ if (pmc >= 4)
+ return -1;
+ pmc_inuse |= 1 << pmc;
+ }
+ hwc[i] = pmc;
+ psel = ev & PM_PMCSEL_MSK;
+ if (ev & PM_BUSEVENT_MSK) {
+ /* this event uses the event bus */
+ b = (ev >> PM_BYTE_SH) & PM_BYTE_MSK;
+ u = (ev >> PM_UNIT_SH) & PM_UNIT_MSK;
+ /* check for conflict on this byte of event bus */
+ if ((ttmset & (1 << b)) && MMCR1_TTMSEL(mmcr1, b) != u)
+ return -1;
+ mmcr1 |= (u64)u << MMCR1_TTMSEL_SH(b);
+ ttmset |= 1 << b;
+ if (u == 5) {
+ /* Nest events have a further mux */
+ s = (ev >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK;
+ if ((ttmset & 0x10) &&
+ MMCR1_NESTSEL(mmcr1) != s)
+ return -1;
+ ttmset |= 0x10;
+ mmcr1 |= (u64)s << MMCR1_NESTSEL_SH;
+ }
+ if (0x30 <= psel && psel <= 0x3d) {
+ /* these need the PMCx_ADDR_SEL bits */
+ if (b >= 2)
+ mmcr1 |= MMCR1_PMC1_ADDR_SEL >> pmc;
+ }
+ /* bus select values are different for PMC3/4 */
+ if (pmc >= 2 && (psel & 0x90) == 0x80)
+ psel ^= 0x20;
+ }
+ if (ev & PM_LLA) {
+ mmcr1 |= MMCR1_PMC1_LLA >> pmc;
+ if (ev & PM_LLAV)
+ mmcr1 |= MMCR1_PMC1_LLA_VALUE >> pmc;
+ }
+ if (power6_marked_instr_event(event[i]))
+ mmcra |= MMCRA_SAMPLE_ENABLE;
+ if (pmc < 4)
+ mmcr1 |= (u64)psel << MMCR1_PMCSEL_SH(pmc);
+ }
+ mmcr[0] = 0;
+ if (pmc_inuse & 1)
+ mmcr[0] = MMCR0_PMC1CE;
+ if (pmc_inuse & 0xe)
+ mmcr[0] |= MMCR0_PMCjCE;
+ mmcr[1] = mmcr1;
+ mmcr[2] = mmcra;
+ return 0;
+}
+
+/*
+ * Layout of constraint bits:
+ *
+ * 0-1 add field: number of uses of PMC1 (max 1)
+ * 2-3, 4-5, 6-7, 8-9, 10-11: ditto for PMC2, 3, 4, 5, 6
+ * 12-15 add field: number of uses of PMC1-4 (max 4)
+ * 16-19 select field: unit on byte 0 of event bus
+ * 20-23, 24-27, 28-31 ditto for bytes 1, 2, 3
+ * 32-34 select field: nest (subunit) event selector
+ */
+static int p6_get_constraint(u64 event, u64 *maskp, u64 *valp)
+{
+ int pmc, byte, sh, subunit;
+ u64 mask = 0, value = 0;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc) {
+ if (pmc > 4 && !(event == 0x500009 || event == 0x600005))
+ return -1;
+ sh = (pmc - 1) * 2;
+ mask |= 2 << sh;
+ value |= 1 << sh;
+ }
+ if (event & PM_BUSEVENT_MSK) {
+ byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+ sh = byte * 4 + (16 - PM_UNIT_SH);
+ mask |= PM_UNIT_MSKS << sh;
+ value |= (u64)(event & PM_UNIT_MSKS) << sh;
+ if ((event & PM_UNIT_MSKS) == (5 << PM_UNIT_SH)) {
+ subunit = (event >> PM_SUBUNIT_SH) & PM_SUBUNIT_MSK;
+ mask |= (u64)PM_SUBUNIT_MSK << 32;
+ value |= (u64)subunit << 32;
+ }
+ }
+ if (pmc <= 4) {
+ mask |= 0x8000; /* add field for count of PMC1-4 uses */
+ value |= 0x1000;
+ }
+ *maskp = mask;
+ *valp = value;
+ return 0;
+}
+
+static int p6_limited_pmc_event(u64 event)
+{
+ int pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+
+ return pmc == 5 || pmc == 6;
+}
+
+#define MAX_ALT 4 /* at most 4 alternatives for any event */
+
+static const unsigned int event_alternatives[][MAX_ALT] = {
+ { 0x0130e8, 0x2000f6, 0x3000fc }, /* PM_PTEG_RELOAD_VALID */
+ { 0x080080, 0x10000d, 0x30000c, 0x4000f0 }, /* PM_LD_MISS_L1 */
+ { 0x080088, 0x200054, 0x3000f0 }, /* PM_ST_MISS_L1 */
+ { 0x10000a, 0x2000f4, 0x600005 }, /* PM_RUN_CYC */
+ { 0x10000b, 0x2000f5 }, /* PM_RUN_COUNT */
+ { 0x10000e, 0x400010 }, /* PM_PURR */
+ { 0x100010, 0x4000f8 }, /* PM_FLUSH */
+ { 0x10001a, 0x200010 }, /* PM_MRK_INST_DISP */
+ { 0x100026, 0x3000f8 }, /* PM_TB_BIT_TRANS */
+ { 0x100054, 0x2000f0 }, /* PM_ST_FIN */
+ { 0x100056, 0x2000fc }, /* PM_L1_ICACHE_MISS */
+ { 0x1000f0, 0x40000a }, /* PM_INST_IMC_MATCH_CMPL */
+ { 0x1000f8, 0x200008 }, /* PM_GCT_EMPTY_CYC */
+ { 0x1000fc, 0x400006 }, /* PM_LSU_DERAT_MISS_CYC */
+ { 0x20000e, 0x400007 }, /* PM_LSU_DERAT_MISS */
+ { 0x200012, 0x300012 }, /* PM_INST_DISP */
+ { 0x2000f2, 0x3000f2 }, /* PM_INST_DISP */
+ { 0x2000f8, 0x300010 }, /* PM_EXT_INT */
+ { 0x2000fe, 0x300056 }, /* PM_DATA_FROM_L2MISS */
+ { 0x2d0030, 0x30001a }, /* PM_MRK_FPU_FIN */
+ { 0x30000a, 0x400018 }, /* PM_MRK_INST_FIN */
+ { 0x3000f6, 0x40000e }, /* PM_L1_DCACHE_RELOAD_VALID */
+ { 0x3000fe, 0x400056 }, /* PM_DATA_FROM_L3MISS */
+};
+
+/*
+ * This could be made more efficient with a binary search on
+ * a presorted list, if necessary
+ */
+static int find_alternatives_list(u64 event)
+{
+ int i, j;
+ unsigned int alt;
+
+ for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
+ if (event < event_alternatives[i][0])
+ return -1;
+ for (j = 0; j < MAX_ALT; ++j) {
+ alt = event_alternatives[i][j];
+ if (!alt || event < alt)
+ break;
+ if (event == alt)
+ return i;
+ }
+ }
+ return -1;
+}
+
+static int p6_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+ int i, j, nlim;
+ unsigned int psel, pmc;
+ unsigned int nalt = 1;
+ u64 aevent;
+
+ alt[0] = event;
+ nlim = p6_limited_pmc_event(event);
+
+ /* check the alternatives table */
+ i = find_alternatives_list(event);
+ if (i >= 0) {
+ /* copy out alternatives from list */
+ for (j = 0; j < MAX_ALT; ++j) {
+ aevent = event_alternatives[i][j];
+ if (!aevent)
+ break;
+ if (aevent != event)
+ alt[nalt++] = aevent;
+ nlim += p6_limited_pmc_event(aevent);
+ }
+
+ } else {
+ /* Check for alternative ways of computing sum events */
+ /* PMCSEL 0x32 counter N == PMCSEL 0x34 counter 5-N */
+ psel = event & (PM_PMCSEL_MSK & ~1); /* ignore edge bit */
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc && (psel == 0x32 || psel == 0x34))
+ alt[nalt++] = ((event ^ 0x6) & ~PM_PMC_MSKS) |
+ ((5 - pmc) << PM_PMC_SH);
+
+ /* PMCSEL 0x38 counter N == PMCSEL 0x3a counter N+/-2 */
+ if (pmc && (psel == 0x38 || psel == 0x3a))
+ alt[nalt++] = ((event ^ 0x2) & ~PM_PMC_MSKS) |
+ ((pmc > 2? pmc - 2: pmc + 2) << PM_PMC_SH);
+ }
+
+ if (flags & PPMU_ONLY_COUNT_RUN) {
+ /*
+ * We're only counting in RUN state,
+ * so PM_CYC is equivalent to PM_RUN_CYC,
+ * PM_INST_CMPL === PM_RUN_INST_CMPL, PM_PURR === PM_RUN_PURR.
+ * This doesn't include alternatives that don't provide
+ * any extra flexibility in assigning PMCs (e.g.
+ * 0x10000a for PM_RUN_CYC vs. 0x1e for PM_CYC).
+ * Note that even with these additional alternatives
+ * we never end up with more than 4 alternatives for any event.
+ */
+ j = nalt;
+ for (i = 0; i < nalt; ++i) {
+ switch (alt[i]) {
+ case 0x1e: /* PM_CYC */
+ alt[j++] = 0x600005; /* PM_RUN_CYC */
+ ++nlim;
+ break;
+ case 0x10000a: /* PM_RUN_CYC */
+ alt[j++] = 0x1e; /* PM_CYC */
+ break;
+ case 2: /* PM_INST_CMPL */
+ alt[j++] = 0x500009; /* PM_RUN_INST_CMPL */
+ ++nlim;
+ break;
+ case 0x500009: /* PM_RUN_INST_CMPL */
+ alt[j++] = 2; /* PM_INST_CMPL */
+ break;
+ case 0x10000e: /* PM_PURR */
+ alt[j++] = 0x4000f4; /* PM_RUN_PURR */
+ break;
+ case 0x4000f4: /* PM_RUN_PURR */
+ alt[j++] = 0x10000e; /* PM_PURR */
+ break;
+ }
+ }
+ nalt = j;
+ }
+
+ if (!(flags & PPMU_LIMITED_PMC_OK) && nlim) {
+ /* remove the limited PMC events */
+ j = 0;
+ for (i = 0; i < nalt; ++i) {
+ if (!p6_limited_pmc_event(alt[i])) {
+ alt[j] = alt[i];
+ ++j;
+ }
+ }
+ nalt = j;
+ } else if ((flags & PPMU_LIMITED_PMC_REQD) && nlim < nalt) {
+ /* remove all but the limited PMC events */
+ j = 0;
+ for (i = 0; i < nalt; ++i) {
+ if (p6_limited_pmc_event(alt[i])) {
+ alt[j] = alt[i];
+ ++j;
+ }
+ }
+ nalt = j;
+ }
+
+ return nalt;
+}
+
+static void p6_disable_pmc(unsigned int pmc, u64 mmcr[])
+{
+ /* Set PMCxSEL to 0 to disable PMCx */
+ if (pmc <= 3)
+ mmcr[1] &= ~(0xffUL << MMCR1_PMCSEL_SH(pmc));
+}
+
+static int power6_generic_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x1e,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 2,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x280030, /* LD_REF_L1 */
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x30000c, /* LD_MISS_L1 */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x410a0, /* BR_PRED */
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x400052, /* BR_MPRED */
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ * The "DTLB" and "ITLB" events relate to the DERAT and IERAT.
+ */
+static int power6_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x80082, 0x80080 },
+ [C(OP_WRITE)] = { 0x80086, 0x80088 },
+ [C(OP_PREFETCH)] = { 0x810a4, 0 },
+ },
+ [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x100056 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { 0x4008c, 0 },
+ },
+ [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x150730, 0x250532 },
+ [C(OP_WRITE)] = { 0x250432, 0x150432 },
+ [C(OP_PREFETCH)] = { 0x810a6, 0 },
+ },
+ [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x20000e },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+ [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x420ce },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+ [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x430e6, 0x400052 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+};
+
+struct power_pmu power6_pmu = {
+ .n_counter = 6,
+ .max_alternatives = MAX_ALT,
+ .add_fields = 0x1555,
+ .test_adder = 0x3000,
+ .compute_mmcr = p6_compute_mmcr,
+ .get_constraint = p6_get_constraint,
+ .get_alternatives = p6_get_alternatives,
+ .disable_pmc = p6_disable_pmc,
+ .limited_pmc_event = p6_limited_pmc_event,
+ .flags = PPMU_LIMITED_PMC5_6 | PPMU_ALT_SIPR,
+ .n_generic = ARRAY_SIZE(power6_generic_events),
+ .generic_events = power6_generic_events,
+ .cache_events = &power6_cache_events,
+};
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c
new file mode 100644
index 000000000000..b72e7a19d054
--- /dev/null
+++ b/arch/powerpc/kernel/power7-pmu.c
@@ -0,0 +1,357 @@
+/*
+ * Performance counter support for POWER7 processors.
+ *
+ * Copyright 2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/kernel.h>
+#include <linux/perf_counter.h>
+#include <asm/reg.h>
+
+/*
+ * Bits in event code for POWER7
+ */
+#define PM_PMC_SH 16 /* PMC number (1-based) for direct events */
+#define PM_PMC_MSK 0xf
+#define PM_PMC_MSKS (PM_PMC_MSK << PM_PMC_SH)
+#define PM_UNIT_SH 12 /* TTMMUX number and setting - unit select */
+#define PM_UNIT_MSK 0xf
+#define PM_COMBINE_SH 11 /* Combined event bit */
+#define PM_COMBINE_MSK 1
+#define PM_COMBINE_MSKS 0x800
+#define PM_L2SEL_SH 8 /* L2 event select */
+#define PM_L2SEL_MSK 7
+#define PM_PMCSEL_MSK 0xff
+
+/*
+ * Bits in MMCR1 for POWER7
+ */
+#define MMCR1_TTM0SEL_SH 60
+#define MMCR1_TTM1SEL_SH 56
+#define MMCR1_TTM2SEL_SH 52
+#define MMCR1_TTM3SEL_SH 48
+#define MMCR1_TTMSEL_MSK 0xf
+#define MMCR1_L2SEL_SH 45
+#define MMCR1_L2SEL_MSK 7
+#define MMCR1_PMC1_COMBINE_SH 35
+#define MMCR1_PMC2_COMBINE_SH 34
+#define MMCR1_PMC3_COMBINE_SH 33
+#define MMCR1_PMC4_COMBINE_SH 32
+#define MMCR1_PMC1SEL_SH 24
+#define MMCR1_PMC2SEL_SH 16
+#define MMCR1_PMC3SEL_SH 8
+#define MMCR1_PMC4SEL_SH 0
+#define MMCR1_PMCSEL_SH(n) (MMCR1_PMC1SEL_SH - (n) * 8)
+#define MMCR1_PMCSEL_MSK 0xff
+
+/*
+ * Bits in MMCRA
+ */
+
+/*
+ * Layout of constraint bits:
+ * 6666555555555544444444443333333333222222222211111111110000000000
+ * 3210987654321098765432109876543210987654321098765432109876543210
+ * [ ><><><><><><>
+ * NC P6P5P4P3P2P1
+ *
+ * NC - number of counters
+ * 15: NC error 0x8000
+ * 12-14: number of events needing PMC1-4 0x7000
+ *
+ * P6
+ * 11: P6 error 0x800
+ * 10-11: Count of events needing PMC6
+ *
+ * P1..P5
+ * 0-9: Count of events needing PMC1..PMC5
+ */
+
+static int power7_get_constraint(u64 event, u64 *maskp, u64 *valp)
+{
+ int pmc, sh;
+ u64 mask = 0, value = 0;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc) {
+ if (pmc > 6)
+ return -1;
+ sh = (pmc - 1) * 2;
+ mask |= 2 << sh;
+ value |= 1 << sh;
+ if (pmc >= 5 && !(event == 0x500fa || event == 0x600f4))
+ return -1;
+ }
+ if (pmc < 5) {
+ /* need a counter from PMC1-4 set */
+ mask |= 0x8000;
+ value |= 0x1000;
+ }
+ *maskp = mask;
+ *valp = value;
+ return 0;
+}
+
+#define MAX_ALT 2 /* at most 2 alternatives for any event */
+
+static const unsigned int event_alternatives[][MAX_ALT] = {
+ { 0x200f2, 0x300f2 }, /* PM_INST_DISP */
+ { 0x200f4, 0x600f4 }, /* PM_RUN_CYC */
+ { 0x400fa, 0x500fa }, /* PM_RUN_INST_CMPL */
+};
+
+/*
+ * Scan the alternatives table for a match and return the
+ * index into the alternatives table if found, else -1.
+ */
+static int find_alternative(u64 event)
+{
+ int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
+ if (event < event_alternatives[i][0])
+ break;
+ for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
+ if (event == event_alternatives[i][j])
+ return i;
+ }
+ return -1;
+}
+
+static s64 find_alternative_decode(u64 event)
+{
+ int pmc, psel;
+
+ /* this only handles the 4x decode events */
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ psel = event & PM_PMCSEL_MSK;
+ if ((pmc == 2 || pmc == 4) && (psel & ~7) == 0x40)
+ return event - (1 << PM_PMC_SH) + 8;
+ if ((pmc == 1 || pmc == 3) && (psel & ~7) == 0x48)
+ return event + (1 << PM_PMC_SH) - 8;
+ return -1;
+}
+
+static int power7_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+ int i, j, nalt = 1;
+ s64 ae;
+
+ alt[0] = event;
+ nalt = 1;
+ i = find_alternative(event);
+ if (i >= 0) {
+ for (j = 0; j < MAX_ALT; ++j) {
+ ae = event_alternatives[i][j];
+ if (ae && ae != event)
+ alt[nalt++] = ae;
+ }
+ } else {
+ ae = find_alternative_decode(event);
+ if (ae > 0)
+ alt[nalt++] = ae;
+ }
+
+ if (flags & PPMU_ONLY_COUNT_RUN) {
+ /*
+ * We're only counting in RUN state,
+ * so PM_CYC is equivalent to PM_RUN_CYC
+ * and PM_INST_CMPL === PM_RUN_INST_CMPL.
+ * This doesn't include alternatives that don't provide
+ * any extra flexibility in assigning PMCs.
+ */
+ j = nalt;
+ for (i = 0; i < nalt; ++i) {
+ switch (alt[i]) {
+ case 0x1e: /* PM_CYC */
+ alt[j++] = 0x600f4; /* PM_RUN_CYC */
+ break;
+ case 0x600f4: /* PM_RUN_CYC */
+ alt[j++] = 0x1e;
+ break;
+ case 0x2: /* PM_PPC_CMPL */
+ alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */
+ break;
+ case 0x500fa: /* PM_RUN_INST_CMPL */
+ alt[j++] = 0x2; /* PM_PPC_CMPL */
+ break;
+ }
+ }
+ nalt = j;
+ }
+
+ return nalt;
+}
+
+/*
+ * Returns 1 if event counts things relating to marked instructions
+ * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
+ */
+static int power7_marked_instr_event(u64 event)
+{
+ int pmc, psel;
+ int unit;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+ psel = event & PM_PMCSEL_MSK & ~1; /* trim off edge/level bit */
+ if (pmc >= 5)
+ return 0;
+
+ switch (psel >> 4) {
+ case 2:
+ return pmc == 2 || pmc == 4;
+ case 3:
+ if (psel == 0x3c)
+ return pmc == 1;
+ if (psel == 0x3e)
+ return pmc != 2;
+ return 1;
+ case 4:
+ case 5:
+ return unit == 0xd;
+ case 6:
+ if (psel == 0x64)
+ return pmc >= 3;
+ case 8:
+ return unit == 0xd;
+ }
+ return 0;
+}
+
+static int power7_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], u64 mmcr[])
+{
+ u64 mmcr1 = 0;
+ u64 mmcra = 0;
+ unsigned int pmc, unit, combine, l2sel, psel;
+ unsigned int pmc_inuse = 0;
+ int i;
+
+ /* First pass to count resource use */
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc) {
+ if (pmc > 6)
+ return -1;
+ if (pmc_inuse & (1 << (pmc - 1)))
+ return -1;
+ pmc_inuse |= 1 << (pmc - 1);
+ }
+ }
+
+ /* Second pass: assign PMCs, set all MMCR1 fields */
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+ unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
+ combine = (event[i] >> PM_COMBINE_SH) & PM_COMBINE_MSK;
+ l2sel = (event[i] >> PM_L2SEL_SH) & PM_L2SEL_MSK;
+ psel = event[i] & PM_PMCSEL_MSK;
+ if (!pmc) {
+ /* Bus event or any-PMC direct event */
+ for (pmc = 0; pmc < 4; ++pmc) {
+ if (!(pmc_inuse & (1 << pmc)))
+ break;
+ }
+ if (pmc >= 4)
+ return -1;
+ pmc_inuse |= 1 << pmc;
+ } else {
+ /* Direct or decoded event */
+ --pmc;
+ }
+ if (pmc <= 3) {
+ mmcr1 |= (u64) unit << (MMCR1_TTM0SEL_SH - 4 * pmc);
+ mmcr1 |= (u64) combine << (MMCR1_PMC1_COMBINE_SH - pmc);
+ mmcr1 |= psel << MMCR1_PMCSEL_SH(pmc);
+ if (unit == 6) /* L2 events */
+ mmcr1 |= (u64) l2sel << MMCR1_L2SEL_SH;
+ }
+ if (power7_marked_instr_event(event[i]))
+ mmcra |= MMCRA_SAMPLE_ENABLE;
+ hwc[i] = pmc;
+ }
+
+ /* Return MMCRx values */
+ mmcr[0] = 0;
+ if (pmc_inuse & 1)
+ mmcr[0] = MMCR0_PMC1CE;
+ if (pmc_inuse & 0x3e)
+ mmcr[0] |= MMCR0_PMCjCE;
+ mmcr[1] = mmcr1;
+ mmcr[2] = mmcra;
+ return 0;
+}
+
+static void power7_disable_pmc(unsigned int pmc, u64 mmcr[])
+{
+ if (pmc <= 3)
+ mmcr[1] &= ~(0xffULL << MMCR1_PMCSEL_SH(pmc));
+}
+
+static int power7_generic_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = 0x1e,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 2,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0xc880, /* LD_REF_L1_LSU*/
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x400f0, /* LD_MISS_L1 */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x10068, /* BRU_FIN */
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x400f6, /* BR_MPRED */
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int power7_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x400f0, 0xc880 },
+ [C(OP_WRITE)] = { 0, 0x300f0 },
+ [C(OP_PREFETCH)] = { 0xd8b8, 0 },
+ },
+ [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x200fc },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { 0x408a, 0 },
+ },
+ [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x6080, 0x6084 },
+ [C(OP_WRITE)] = { 0x6082, 0x6086 },
+ [C(OP_PREFETCH)] = { 0, 0 },
+ },
+ [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x300fc },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+ [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x400fc },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+ [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x10068, 0x400f6 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+};
+
+struct power_pmu power7_pmu = {
+ .n_counter = 6,
+ .max_alternatives = MAX_ALT + 1,
+ .add_fields = 0x1555ull,
+ .test_adder = 0x3000ull,
+ .compute_mmcr = power7_compute_mmcr,
+ .get_constraint = power7_get_constraint,
+ .get_alternatives = power7_get_alternatives,
+ .disable_pmc = power7_disable_pmc,
+ .n_generic = ARRAY_SIZE(power7_generic_events),
+ .generic_events = power7_generic_events,
+ .cache_events = &power7_cache_events,
+};
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c
new file mode 100644
index 000000000000..ba0a357a89f4
--- /dev/null
+++ b/arch/powerpc/kernel/ppc970-pmu.c
@@ -0,0 +1,482 @@
+/*
+ * Performance counter support for PPC970-family processors.
+ *
+ * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/string.h>
+#include <linux/perf_counter.h>
+#include <asm/reg.h>
+
+/*
+ * Bits in event code for PPC970
+ */
+#define PM_PMC_SH 12 /* PMC number (1-based) for direct events */
+#define PM_PMC_MSK 0xf
+#define PM_UNIT_SH 8 /* TTMMUX number and setting - unit select */
+#define PM_UNIT_MSK 0xf
+#define PM_SPCSEL_SH 6
+#define PM_SPCSEL_MSK 3
+#define PM_BYTE_SH 4 /* Byte number of event bus to use */
+#define PM_BYTE_MSK 3
+#define PM_PMCSEL_MSK 0xf
+
+/* Values in PM_UNIT field */
+#define PM_NONE 0
+#define PM_FPU 1
+#define PM_VPU 2
+#define PM_ISU 3
+#define PM_IFU 4
+#define PM_IDU 5
+#define PM_STS 6
+#define PM_LSU0 7
+#define PM_LSU1U 8
+#define PM_LSU1L 9
+#define PM_LASTUNIT 9
+
+/*
+ * Bits in MMCR0 for PPC970
+ */
+#define MMCR0_PMC1SEL_SH 8
+#define MMCR0_PMC2SEL_SH 1
+#define MMCR_PMCSEL_MSK 0x1f
+
+/*
+ * Bits in MMCR1 for PPC970
+ */
+#define MMCR1_TTM0SEL_SH 62
+#define MMCR1_TTM1SEL_SH 59
+#define MMCR1_TTM3SEL_SH 53
+#define MMCR1_TTMSEL_MSK 3
+#define MMCR1_TD_CP_DBG0SEL_SH 50
+#define MMCR1_TD_CP_DBG1SEL_SH 48
+#define MMCR1_TD_CP_DBG2SEL_SH 46
+#define MMCR1_TD_CP_DBG3SEL_SH 44
+#define MMCR1_PMC1_ADDER_SEL_SH 39
+#define MMCR1_PMC2_ADDER_SEL_SH 38
+#define MMCR1_PMC6_ADDER_SEL_SH 37
+#define MMCR1_PMC5_ADDER_SEL_SH 36
+#define MMCR1_PMC8_ADDER_SEL_SH 35
+#define MMCR1_PMC7_ADDER_SEL_SH 34
+#define MMCR1_PMC3_ADDER_SEL_SH 33
+#define MMCR1_PMC4_ADDER_SEL_SH 32
+#define MMCR1_PMC3SEL_SH 27
+#define MMCR1_PMC4SEL_SH 22
+#define MMCR1_PMC5SEL_SH 17
+#define MMCR1_PMC6SEL_SH 12
+#define MMCR1_PMC7SEL_SH 7
+#define MMCR1_PMC8SEL_SH 2
+
+static short mmcr1_adder_bits[8] = {
+ MMCR1_PMC1_ADDER_SEL_SH,
+ MMCR1_PMC2_ADDER_SEL_SH,
+ MMCR1_PMC3_ADDER_SEL_SH,
+ MMCR1_PMC4_ADDER_SEL_SH,
+ MMCR1_PMC5_ADDER_SEL_SH,
+ MMCR1_PMC6_ADDER_SEL_SH,
+ MMCR1_PMC7_ADDER_SEL_SH,
+ MMCR1_PMC8_ADDER_SEL_SH
+};
+
+/*
+ * Bits in MMCRA
+ */
+
+/*
+ * Layout of constraint bits:
+ * 6666555555555544444444443333333333222222222211111111110000000000
+ * 3210987654321098765432109876543210987654321098765432109876543210
+ * <><><>[ >[ >[ >< >< >< >< ><><><><><><><><>
+ * SPT0T1 UC PS1 PS2 B0 B1 B2 B3 P1P2P3P4P5P6P7P8
+ *
+ * SP - SPCSEL constraint
+ * 48-49: SPCSEL value 0x3_0000_0000_0000
+ *
+ * T0 - TTM0 constraint
+ * 46-47: TTM0SEL value (0=FPU, 2=IFU, 3=VPU) 0xC000_0000_0000
+ *
+ * T1 - TTM1 constraint
+ * 44-45: TTM1SEL value (0=IDU, 3=STS) 0x3000_0000_0000
+ *
+ * UC - unit constraint: can't have all three of FPU|IFU|VPU, ISU, IDU|STS
+ * 43: UC3 error 0x0800_0000_0000
+ * 42: FPU|IFU|VPU events needed 0x0400_0000_0000
+ * 41: ISU events needed 0x0200_0000_0000
+ * 40: IDU|STS events needed 0x0100_0000_0000
+ *
+ * PS1
+ * 39: PS1 error 0x0080_0000_0000
+ * 36-38: count of events needing PMC1/2/5/6 0x0070_0000_0000
+ *
+ * PS2
+ * 35: PS2 error 0x0008_0000_0000
+ * 32-34: count of events needing PMC3/4/7/8 0x0007_0000_0000
+ *
+ * B0
+ * 28-31: Byte 0 event source 0xf000_0000
+ * Encoding as for the event code
+ *
+ * B1, B2, B3
+ * 24-27, 20-23, 16-19: Byte 1, 2, 3 event sources
+ *
+ * P1
+ * 15: P1 error 0x8000
+ * 14-15: Count of events needing PMC1
+ *
+ * P2..P8
+ * 0-13: Count of events needing PMC2..PMC8
+ */
+
+static unsigned char direct_marked_event[8] = {
+ (1<<2) | (1<<3), /* PMC1: PM_MRK_GRP_DISP, PM_MRK_ST_CMPL */
+ (1<<3) | (1<<5), /* PMC2: PM_THRESH_TIMEO, PM_MRK_BRU_FIN */
+ (1<<3) | (1<<5), /* PMC3: PM_MRK_ST_CMPL_INT, PM_MRK_VMX_FIN */
+ (1<<4) | (1<<5), /* PMC4: PM_MRK_GRP_CMPL, PM_MRK_CRU_FIN */
+ (1<<4) | (1<<5), /* PMC5: PM_GRP_MRK, PM_MRK_GRP_TIMEO */
+ (1<<3) | (1<<4) | (1<<5),
+ /* PMC6: PM_MRK_ST_STS, PM_MRK_FXU_FIN, PM_MRK_GRP_ISSUED */
+ (1<<4) | (1<<5), /* PMC7: PM_MRK_FPU_FIN, PM_MRK_INST_FIN */
+ (1<<4) /* PMC8: PM_MRK_LSU_FIN */
+};
+
+/*
+ * Returns 1 if event counts things relating to marked instructions
+ * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
+ */
+static int p970_marked_instr_event(u64 event)
+{
+ int pmc, psel, unit, byte, bit;
+ unsigned int mask;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ psel = event & PM_PMCSEL_MSK;
+ if (pmc) {
+ if (direct_marked_event[pmc - 1] & (1 << psel))
+ return 1;
+ if (psel == 0) /* add events */
+ bit = (pmc <= 4)? pmc - 1: 8 - pmc;
+ else if (psel == 7 || psel == 13) /* decode events */
+ bit = 4;
+ else
+ return 0;
+ } else
+ bit = psel;
+
+ byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+ unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+ mask = 0;
+ switch (unit) {
+ case PM_VPU:
+ mask = 0x4c; /* byte 0 bits 2,3,6 */
+ case PM_LSU0:
+ /* byte 2 bits 0,2,3,4,6; all of byte 1 */
+ mask = 0x085dff00;
+ case PM_LSU1L:
+ mask = 0x50 << 24; /* byte 3 bits 4,6 */
+ break;
+ }
+ return (mask >> (byte * 8 + bit)) & 1;
+}
+
+/* Masks and values for using events from the various units */
+static u64 unit_cons[PM_LASTUNIT+1][2] = {
+ [PM_FPU] = { 0xc80000000000ull, 0x040000000000ull },
+ [PM_VPU] = { 0xc80000000000ull, 0xc40000000000ull },
+ [PM_ISU] = { 0x080000000000ull, 0x020000000000ull },
+ [PM_IFU] = { 0xc80000000000ull, 0x840000000000ull },
+ [PM_IDU] = { 0x380000000000ull, 0x010000000000ull },
+ [PM_STS] = { 0x380000000000ull, 0x310000000000ull },
+};
+
+static int p970_get_constraint(u64 event, u64 *maskp, u64 *valp)
+{
+ int pmc, byte, unit, sh, spcsel;
+ u64 mask = 0, value = 0;
+ int grp = -1;
+
+ pmc = (event >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc) {
+ if (pmc > 8)
+ return -1;
+ sh = (pmc - 1) * 2;
+ mask |= 2 << sh;
+ value |= 1 << sh;
+ grp = ((pmc - 1) >> 1) & 1;
+ }
+ unit = (event >> PM_UNIT_SH) & PM_UNIT_MSK;
+ if (unit) {
+ if (unit > PM_LASTUNIT)
+ return -1;
+ mask |= unit_cons[unit][0];
+ value |= unit_cons[unit][1];
+ byte = (event >> PM_BYTE_SH) & PM_BYTE_MSK;
+ /*
+ * Bus events on bytes 0 and 2 can be counted
+ * on PMC1/2/5/6; bytes 1 and 3 on PMC3/4/7/8.
+ */
+ if (!pmc)
+ grp = byte & 1;
+ /* Set byte lane select field */
+ mask |= 0xfULL << (28 - 4 * byte);
+ value |= (u64)unit << (28 - 4 * byte);
+ }
+ if (grp == 0) {
+ /* increment PMC1/2/5/6 field */
+ mask |= 0x8000000000ull;
+ value |= 0x1000000000ull;
+ } else if (grp == 1) {
+ /* increment PMC3/4/7/8 field */
+ mask |= 0x800000000ull;
+ value |= 0x100000000ull;
+ }
+ spcsel = (event >> PM_SPCSEL_SH) & PM_SPCSEL_MSK;
+ if (spcsel) {
+ mask |= 3ull << 48;
+ value |= (u64)spcsel << 48;
+ }
+ *maskp = mask;
+ *valp = value;
+ return 0;
+}
+
+static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[])
+{
+ alt[0] = event;
+
+ /* 2 alternatives for LSU empty */
+ if (event == 0x2002 || event == 0x3002) {
+ alt[1] = event ^ 0x1000;
+ return 2;
+ }
+
+ return 1;
+}
+
+static int p970_compute_mmcr(u64 event[], int n_ev,
+ unsigned int hwc[], u64 mmcr[])
+{
+ u64 mmcr0 = 0, mmcr1 = 0, mmcra = 0;
+ unsigned int pmc, unit, byte, psel;
+ unsigned int ttm, grp;
+ unsigned int pmc_inuse = 0;
+ unsigned int pmc_grp_use[2];
+ unsigned char busbyte[4];
+ unsigned char unituse[16];
+ unsigned char unitmap[] = { 0, 0<<3, 3<<3, 1<<3, 2<<3, 0|4, 3|4 };
+ unsigned char ttmuse[2];
+ unsigned char pmcsel[8];
+ int i;
+ int spcsel;
+
+ if (n_ev > 8)
+ return -1;
+
+ /* First pass to count resource use */
+ pmc_grp_use[0] = pmc_grp_use[1] = 0;
+ memset(busbyte, 0, sizeof(busbyte));
+ memset(unituse, 0, sizeof(unituse));
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+ if (pmc) {
+ if (pmc_inuse & (1 << (pmc - 1)))
+ return -1;
+ pmc_inuse |= 1 << (pmc - 1);
+ /* count 1/2/5/6 vs 3/4/7/8 use */
+ ++pmc_grp_use[((pmc - 1) >> 1) & 1];
+ }
+ unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
+ byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
+ if (unit) {
+ if (unit > PM_LASTUNIT)
+ return -1;
+ if (!pmc)
+ ++pmc_grp_use[byte & 1];
+ if (busbyte[byte] && busbyte[byte] != unit)
+ return -1;
+ busbyte[byte] = unit;
+ unituse[unit] = 1;
+ }
+ }
+ if (pmc_grp_use[0] > 4 || pmc_grp_use[1] > 4)
+ return -1;
+
+ /*
+ * Assign resources and set multiplexer selects.
+ *
+ * PM_ISU can go either on TTM0 or TTM1, but that's the only
+ * choice we have to deal with.
+ */
+ if (unituse[PM_ISU] &
+ (unituse[PM_FPU] | unituse[PM_IFU] | unituse[PM_VPU]))
+ unitmap[PM_ISU] = 2 | 4; /* move ISU to TTM1 */
+ /* Set TTM[01]SEL fields. */
+ ttmuse[0] = ttmuse[1] = 0;
+ for (i = PM_FPU; i <= PM_STS; ++i) {
+ if (!unituse[i])
+ continue;
+ ttm = unitmap[i];
+ ++ttmuse[(ttm >> 2) & 1];
+ mmcr1 |= (u64)(ttm & ~4) << MMCR1_TTM1SEL_SH;
+ }
+ /* Check only one unit per TTMx */
+ if (ttmuse[0] > 1 || ttmuse[1] > 1)
+ return -1;
+
+ /* Set byte lane select fields and TTM3SEL. */
+ for (byte = 0; byte < 4; ++byte) {
+ unit = busbyte[byte];
+ if (!unit)
+ continue;
+ if (unit <= PM_STS)
+ ttm = (unitmap[unit] >> 2) & 1;
+ else if (unit == PM_LSU0)
+ ttm = 2;
+ else {
+ ttm = 3;
+ if (unit == PM_LSU1L && byte >= 2)
+ mmcr1 |= 1ull << (MMCR1_TTM3SEL_SH + 3 - byte);
+ }
+ mmcr1 |= (u64)ttm << (MMCR1_TD_CP_DBG0SEL_SH - 2 * byte);
+ }
+
+ /* Second pass: assign PMCs, set PMCxSEL and PMCx_ADDER_SEL fields */
+ memset(pmcsel, 0x8, sizeof(pmcsel)); /* 8 means don't count */
+ for (i = 0; i < n_ev; ++i) {
+ pmc = (event[i] >> PM_PMC_SH) & PM_PMC_MSK;
+ unit = (event[i] >> PM_UNIT_SH) & PM_UNIT_MSK;
+ byte = (event[i] >> PM_BYTE_SH) & PM_BYTE_MSK;
+ psel = event[i] & PM_PMCSEL_MSK;
+ if (!pmc) {
+ /* Bus event or any-PMC direct event */
+ if (unit)
+ psel |= 0x10 | ((byte & 2) << 2);
+ else
+ psel |= 8;
+ for (pmc = 0; pmc < 8; ++pmc) {
+ if (pmc_inuse & (1 << pmc))
+ continue;
+ grp = (pmc >> 1) & 1;
+ if (unit) {
+ if (grp == (byte & 1))
+ break;
+ } else if (pmc_grp_use[grp] < 4) {
+ ++pmc_grp_use[grp];
+ break;
+ }
+ }
+ pmc_inuse |= 1 << pmc;
+ } else {
+ /* Direct event */
+ --pmc;
+ if (psel == 0 && (byte & 2))
+ /* add events on higher-numbered bus */
+ mmcr1 |= 1ull << mmcr1_adder_bits[pmc];
+ }
+ pmcsel[pmc] = psel;
+ hwc[i] = pmc;
+ spcsel = (event[i] >> PM_SPCSEL_SH) & PM_SPCSEL_MSK;
+ mmcr1 |= spcsel;
+ if (p970_marked_instr_event(event[i]))
+ mmcra |= MMCRA_SAMPLE_ENABLE;
+ }
+ for (pmc = 0; pmc < 2; ++pmc)
+ mmcr0 |= pmcsel[pmc] << (MMCR0_PMC1SEL_SH - 7 * pmc);
+ for (; pmc < 8; ++pmc)
+ mmcr1 |= (u64)pmcsel[pmc] << (MMCR1_PMC3SEL_SH - 5 * (pmc - 2));
+ if (pmc_inuse & 1)
+ mmcr0 |= MMCR0_PMC1CE;
+ if (pmc_inuse & 0xfe)
+ mmcr0 |= MMCR0_PMCjCE;
+
+ mmcra |= 0x2000; /* mark only one IOP per PPC instruction */
+
+ /* Return MMCRx values */
+ mmcr[0] = mmcr0;
+ mmcr[1] = mmcr1;
+ mmcr[2] = mmcra;
+ return 0;
+}
+
+static void p970_disable_pmc(unsigned int pmc, u64 mmcr[])
+{
+ int shift, i;
+
+ if (pmc <= 1) {
+ shift = MMCR0_PMC1SEL_SH - 7 * pmc;
+ i = 0;
+ } else {
+ shift = MMCR1_PMC3SEL_SH - 5 * (pmc - 2);
+ i = 1;
+ }
+ /*
+ * Setting the PMCxSEL field to 0x08 disables PMC x.
+ */
+ mmcr[i] = (mmcr[i] & ~(0x1fUL << shift)) | (0x08UL << shift);
+}
+
+static int ppc970_generic_events[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = 7,
+ [PERF_COUNT_HW_INSTRUCTIONS] = 1,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = 0x8810, /* PM_LD_REF_L1 */
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x3810, /* PM_LD_MISS_L1 */
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x431, /* PM_BR_ISSUED */
+ [PERF_COUNT_HW_BRANCH_MISSES] = 0x327, /* PM_GRP_BR_MPRED */
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+/*
+ * Table of generalized cache-related events.
+ * 0 means not supported, -1 means nonsensical, other values
+ * are event codes.
+ */
+static int ppc970_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
+ [C(L1D)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x8810, 0x3810 },
+ [C(OP_WRITE)] = { 0x7810, 0x813 },
+ [C(OP_PREFETCH)] = { 0x731, 0 },
+ },
+ [C(L1I)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { 0, 0 },
+ },
+ [C(LL)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0 },
+ [C(OP_WRITE)] = { 0, 0 },
+ [C(OP_PREFETCH)] = { 0x733, 0 },
+ },
+ [C(DTLB)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x704 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+ [C(ITLB)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0, 0x700 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+ [C(BPU)] = { /* RESULT_ACCESS RESULT_MISS */
+ [C(OP_READ)] = { 0x431, 0x327 },
+ [C(OP_WRITE)] = { -1, -1 },
+ [C(OP_PREFETCH)] = { -1, -1 },
+ },
+};
+
+struct power_pmu ppc970_pmu = {
+ .n_counter = 8,
+ .max_alternatives = 2,
+ .add_fields = 0x001100005555ull,
+ .test_adder = 0x013300000000ull,
+ .compute_mmcr = p970_compute_mmcr,
+ .get_constraint = p970_get_constraint,
+ .get_alternatives = p970_get_alternatives,
+ .disable_pmc = p970_disable_pmc,
+ .n_generic = ARRAY_SIZE(ppc970_generic_events),
+ .generic_events = ppc970_generic_events,
+ .cache_events = &ppc970_cache_events,
+};
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 7b44a33f03c2..3e7135bbe40f 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -650,7 +650,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
_ALIGN_UP(sizeof(struct thread_info), 16);
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_STD_MMU_64
if (cpu_has_feature(CPU_FTR_SLB)) {
unsigned long sp_vsid;
unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index ce01ff2474da..d4405b95bfaa 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -585,7 +585,7 @@ static void __init check_cpu_pa_features(unsigned long node)
ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
}
-#ifdef CONFIG_PPC64
+#ifdef CONFIG_PPC_STD_MMU_64
static void __init check_cpu_slb_size(unsigned long node)
{
u32 *slb_size_ptr;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 2f0e64b53642..ef6f64950e9b 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -44,10 +44,7 @@
#include <asm/sections.h>
#include <asm/machdep.h>
-#ifdef CONFIG_LOGO_LINUX_CLUT224
#include <linux/linux_logo.h>
-extern const struct linux_logo logo_linux_clut224;
-#endif
/*
* Properties whose value is longer than this get excluded from our
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 3635be61f899..9fa2c7dcd05a 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -704,15 +704,34 @@ void user_enable_single_step(struct task_struct *task)
if (regs != NULL) {
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+ task->thread.dbcr0 &= ~DBCR0_BT;
task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
regs->msr |= MSR_DE;
#else
+ regs->msr &= ~MSR_BE;
regs->msr |= MSR_SE;
#endif
}
set_tsk_thread_flag(task, TIF_SINGLESTEP);
}
+void user_enable_block_step(struct task_struct *task)
+{
+ struct pt_regs *regs = task->thread.regs;
+
+ if (regs != NULL) {
+#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
+ task->thread.dbcr0 &= ~DBCR0_IC;
+ task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT;
+ regs->msr |= MSR_DE;
+#else
+ regs->msr &= ~MSR_SE;
+ regs->msr |= MSR_BE;
+#endif
+ }
+ set_tsk_thread_flag(task, TIF_SINGLESTEP);
+}
+
void user_disable_single_step(struct task_struct *task)
{
struct pt_regs *regs = task->thread.regs;
@@ -726,10 +745,10 @@ void user_disable_single_step(struct task_struct *task)
if (regs != NULL) {
#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
- task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_IDM);
+ task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM);
regs->msr &= ~MSR_DE;
#else
- regs->msr &= ~MSR_SE;
+ regs->msr &= ~(MSR_SE | MSR_BE);
#endif
}
clear_tsk_thread_flag(task, TIF_SINGLESTEP);
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 8869001ab5d7..54e66da8f743 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -93,10 +93,7 @@ static int rtas_pci_read_config(struct pci_bus *bus,
{
struct device_node *busdn, *dn;
- if (bus->self)
- busdn = pci_device_to_OF_node(bus->self);
- else
- busdn = bus->sysdata; /* must be a phb */
+ busdn = pci_bus_to_OF_node(bus);
/* Search only direct children of the bus */
for (dn = busdn->child; dn; dn = dn->sibling) {
@@ -140,10 +137,7 @@ static int rtas_pci_write_config(struct pci_bus *bus,
{
struct device_node *busdn, *dn;
- if (bus->self)
- busdn = pci_device_to_OF_node(bus->self);
- else
- busdn = bus->sysdata; /* must be a phb */
+ busdn = pci_bus_to_OF_node(bus);
/* Search only direct children of the bus */
for (dn = busdn->child; dn; dn = dn->sibling) {
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 9e1ca745d8f0..1d154248cf40 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -39,6 +39,7 @@
#include <asm/serial.h>
#include <asm/udbg.h>
#include <asm/mmu_context.h>
+#include <asm/swiotlb.h>
#include "setup.h"
@@ -332,6 +333,11 @@ void __init setup_arch(char **cmdline_p)
ppc_md.setup_arch();
if ( ppc_md.progress ) ppc_md.progress("arch: exit", 0x3eab);
+#ifdef CONFIG_SWIOTLB
+ if (ppc_swiotlb_enable)
+ swiotlb_init();
+#endif
+
paging_init();
/* Initialize the MMU context management stuff */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index c410c606955d..1f6816003ebe 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -61,6 +61,7 @@
#include <asm/xmon.h>
#include <asm/udbg.h>
#include <asm/kexec.h>
+#include <asm/swiotlb.h>
#include "setup.h"
@@ -417,12 +418,14 @@ void __init setup_system(void)
if (ppc64_caches.iline_size != 0x80)
printk("ppc64_caches.icache_line_size = 0x%x\n",
ppc64_caches.iline_size);
+#ifdef CONFIG_PPC_STD_MMU_64
if (htab_address)
printk("htab_address = 0x%p\n", htab_address);
printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
+#endif /* CONFIG_PPC_STD_MMU_64 */
if (PHYSICAL_START > 0)
- printk("physical_start = 0x%lx\n",
- PHYSICAL_START);
+ printk("physical_start = 0x%llx\n",
+ (unsigned long long)PHYSICAL_START);
printk("-----------------------------------------------------\n");
DBG(" <- setup_system()\n");
@@ -511,8 +514,9 @@ void __init setup_arch(char **cmdline_p)
irqstack_early_init();
emergency_stack_init();
+#ifdef CONFIG_PPC_STD_MMU_64
stabs_alloc();
-
+#endif
/* set up the bootmem stuff with available memory */
do_init_bootmem();
sparse_init();
@@ -524,6 +528,11 @@ void __init setup_arch(char **cmdline_p)
if (ppc_md.setup_arch)
ppc_md.setup_arch();
+#ifdef CONFIG_SWIOTLB
+ if (ppc_swiotlb_enable)
+ swiotlb_init();
+#endif
+
paging_init();
ppc64_boot_msg(0x15, "Setup Done");
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 48571ac56fb7..15391c2ab013 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -52,6 +52,7 @@
#include <linux/jiffies.h>
#include <linux/posix-timers.h>
#include <linux/irq.h>
+#include <linux/delay.h>
#include <asm/io.h>
#include <asm/processor.h>
@@ -109,7 +110,7 @@ static void decrementer_set_mode(enum clock_event_mode mode,
static struct clock_event_device decrementer_clockevent = {
.name = "decrementer",
.rating = 200,
- .shift = 16,
+ .shift = 0, /* To be filled in */
.mult = 0, /* To be filled in */
.irq = 0,
.set_next_event = decrementer_set_next_event,
@@ -843,6 +844,22 @@ static void decrementer_set_mode(enum clock_event_mode mode,
decrementer_set_next_event(DECREMENTER_MAX, dev);
}
+static void __init setup_clockevent_multiplier(unsigned long hz)
+{
+ u64 mult, shift = 32;
+
+ while (1) {
+ mult = div_sc(hz, NSEC_PER_SEC, shift);
+ if (mult && (mult >> 32UL) == 0UL)
+ break;
+
+ shift--;
+ }
+
+ decrementer_clockevent.shift = shift;
+ decrementer_clockevent.mult = mult;
+}
+
static void register_decrementer_clockevent(int cpu)
{
struct clock_event_device *dec = &per_cpu(decrementers, cpu).event;
@@ -860,8 +877,7 @@ static void __init init_decrementer_clockevent(void)
{
int cpu = smp_processor_id();
- decrementer_clockevent.mult = div_sc(ppc_tb_freq, NSEC_PER_SEC,
- decrementer_clockevent.shift);
+ setup_clockevent_multiplier(ppc_tb_freq);
decrementer_clockevent.max_delta_ns =
clockevent_delta2ns(DECREMENTER_MAX, &decrementer_clockevent);
decrementer_clockevent.min_delta_ns =
@@ -1128,6 +1144,15 @@ void div128_by_32(u64 dividend_high, u64 dividend_low,
}
+/* We don't need to calibrate delay, we use the CPU timebase for that */
+void calibrate_delay(void)
+{
+ /* Some generic code (such as spinlock debug) use loops_per_jiffy
+ * as the number of __delay(1) in a jiffy, so make it so
+ */
+ loops_per_jiffy = tb_ticks_per_jiffy;
+}
+
static int __init rtc_init(void)
{
struct platform_device *pdev;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 678fbff0d206..6f0ae1a9bfae 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -33,7 +33,9 @@
#include <linux/backlight.h>
#include <linux/bug.h>
#include <linux/kdebug.h>
+#include <linux/debugfs.h>
+#include <asm/emulated_ops.h>
#include <asm/pgtable.h>
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -757,36 +759,44 @@ static int emulate_instruction(struct pt_regs *regs)
/* Emulate the mfspr rD, PVR. */
if ((instword & PPC_INST_MFSPR_PVR_MASK) == PPC_INST_MFSPR_PVR) {
+ PPC_WARN_EMULATED(mfpvr);
rd = (instword >> 21) & 0x1f;
regs->gpr[rd] = mfspr(SPRN_PVR);
return 0;
}
/* Emulating the dcba insn is just a no-op. */
- if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA)
+ if ((instword & PPC_INST_DCBA_MASK) == PPC_INST_DCBA) {
+ PPC_WARN_EMULATED(dcba);
return 0;
+ }
/* Emulate the mcrxr insn. */
if ((instword & PPC_INST_MCRXR_MASK) == PPC_INST_MCRXR) {
int shift = (instword >> 21) & 0x1c;
unsigned long msk = 0xf0000000UL >> shift;
+ PPC_WARN_EMULATED(mcrxr);
regs->ccr = (regs->ccr & ~msk) | ((regs->xer >> shift) & msk);
regs->xer &= ~0xf0000000UL;
return 0;
}
/* Emulate load/store string insn. */
- if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING)
+ if ((instword & PPC_INST_STRING_GEN_MASK) == PPC_INST_STRING) {
+ PPC_WARN_EMULATED(string);
return emulate_string_inst(regs, instword);
+ }
/* Emulate the popcntb (Population Count Bytes) instruction. */
if ((instword & PPC_INST_POPCNTB_MASK) == PPC_INST_POPCNTB) {
+ PPC_WARN_EMULATED(popcntb);
return emulate_popcntb_inst(regs, instword);
}
/* Emulate isel (Integer Select) instruction */
if ((instword & PPC_INST_ISEL_MASK) == PPC_INST_ISEL) {
+ PPC_WARN_EMULATED(isel);
return emulate_isel(regs, instword);
}
@@ -984,6 +994,8 @@ void SoftwareEmulation(struct pt_regs *regs)
#ifdef CONFIG_MATH_EMULATION
errcode = do_mathemu(regs);
+ if (errcode >= 0)
+ PPC_WARN_EMULATED(math);
switch (errcode) {
case 0:
@@ -1005,6 +1017,9 @@ void SoftwareEmulation(struct pt_regs *regs)
#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
errcode = Soft_emulate_8xx(regs);
+ if (errcode >= 0)
+ PPC_WARN_EMULATED(8xx);
+
switch (errcode) {
case 0:
emulate_single_step(regs);
@@ -1026,7 +1041,34 @@ void SoftwareEmulation(struct pt_regs *regs)
void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
{
- if (debug_status & DBSR_IC) { /* instruction completion */
+ /* Hack alert: On BookE, Branch Taken stops on the branch itself, while
+ * on server, it stops on the target of the branch. In order to simulate
+ * the server behaviour, we thus restart right away with a single step
+ * instead of stopping here when hitting a BT
+ */
+ if (debug_status & DBSR_BT) {
+ regs->msr &= ~MSR_DE;
+
+ /* Disable BT */
+ mtspr(SPRN_DBCR0, mfspr(SPRN_DBCR0) & ~DBCR0_BT);
+ /* Clear the BT event */
+ mtspr(SPRN_DBSR, DBSR_BT);
+
+ /* Do the single step trick only when coming from userspace */
+ if (user_mode(regs)) {
+ current->thread.dbcr0 &= ~DBCR0_BT;
+ current->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
+ regs->msr |= MSR_DE;
+ return;
+ }
+
+ if (notify_die(DIE_SSTEP, "block_step", regs, 5,
+ 5, SIGTRAP) == NOTIFY_STOP) {
+ return;
+ }
+ if (debugger_sstep(regs))
+ return;
+ } else if (debug_status & DBSR_IC) { /* Instruction complete */
regs->msr &= ~MSR_DE;
/* Disable instruction completion */
@@ -1042,9 +1084,8 @@ void __kprobes DebugException(struct pt_regs *regs, unsigned long debug_status)
if (debugger_sstep(regs))
return;
- if (user_mode(regs)) {
- current->thread.dbcr0 &= ~DBCR0_IC;
- }
+ if (user_mode(regs))
+ current->thread.dbcr0 &= ~(DBCR0_IC);
_exception(SIGTRAP, regs, TRAP_TRACE, regs->nip);
} else if (debug_status & (DBSR_DAC1R | DBSR_DAC1W)) {
@@ -1088,6 +1129,7 @@ void altivec_assist_exception(struct pt_regs *regs)
flush_altivec_to_thread(current);
+ PPC_WARN_EMULATED(altivec);
err = emulate_altivec(regs);
if (err == 0) {
regs->nip += 4; /* skip emulated instruction */
@@ -1286,3 +1328,79 @@ void kernel_bad_stack(struct pt_regs *regs)
void __init trap_init(void)
{
}
+
+
+#ifdef CONFIG_PPC_EMULATED_STATS
+
+#define WARN_EMULATED_SETUP(type) .type = { .name = #type }
+
+struct ppc_emulated ppc_emulated = {
+#ifdef CONFIG_ALTIVEC
+ WARN_EMULATED_SETUP(altivec),
+#endif
+ WARN_EMULATED_SETUP(dcba),
+ WARN_EMULATED_SETUP(dcbz),
+ WARN_EMULATED_SETUP(fp_pair),
+ WARN_EMULATED_SETUP(isel),
+ WARN_EMULATED_SETUP(mcrxr),
+ WARN_EMULATED_SETUP(mfpvr),
+ WARN_EMULATED_SETUP(multiple),
+ WARN_EMULATED_SETUP(popcntb),
+ WARN_EMULATED_SETUP(spe),
+ WARN_EMULATED_SETUP(string),
+ WARN_EMULATED_SETUP(unaligned),
+#ifdef CONFIG_MATH_EMULATION
+ WARN_EMULATED_SETUP(math),
+#elif defined(CONFIG_8XX_MINIMAL_FPEMU)
+ WARN_EMULATED_SETUP(8xx),
+#endif
+#ifdef CONFIG_VSX
+ WARN_EMULATED_SETUP(vsx),
+#endif
+};
+
+u32 ppc_warn_emulated;
+
+void ppc_warn_emulated_print(const char *type)
+{
+ if (printk_ratelimit())
+ pr_warning("%s used emulated %s instruction\n", current->comm,
+ type);
+}
+
+static int __init ppc_warn_emulated_init(void)
+{
+ struct dentry *dir, *d;
+ unsigned int i;
+ struct ppc_emulated_entry *entries = (void *)&ppc_emulated;
+
+ if (!powerpc_debugfs_root)
+ return -ENODEV;
+
+ dir = debugfs_create_dir("emulated_instructions",
+ powerpc_debugfs_root);
+ if (!dir)
+ return -ENOMEM;
+
+ d = debugfs_create_u32("do_warn", S_IRUGO | S_IWUSR, dir,
+ &ppc_warn_emulated);
+ if (!d)
+ goto fail;
+
+ for (i = 0; i < sizeof(ppc_emulated)/sizeof(*entries); i++) {
+ d = debugfs_create_u32(entries[i].name, S_IRUGO | S_IWUSR, dir,
+ (u32 *)&entries[i].val.counter);
+ if (!d)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ debugfs_remove_recursive(dir);
+ return -ENOMEM;
+}
+
+device_initcall(ppc_warn_emulated_init);
+
+#endif /* CONFIG_PPC_EMULATED_STATS */
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 49ac3d6e1399..ef36cbbc5882 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -1,5 +1,215 @@
+#include <asm/processor.h>
#include <asm/ppc_asm.h>
#include <asm/reg.h>
+#include <asm/asm-offsets.h>
+#include <asm/cputable.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+
+/*
+ * load_up_altivec(unused, unused, tsk)
+ * Disable VMX for the task which had it previously,
+ * and save its vector registers in its thread_struct.
+ * Enables the VMX for use in the kernel on return.
+ * On SMP we know the VMX is free, since we give it up every
+ * switch (ie, no lazy save of the vector registers).
+ */
+_GLOBAL(load_up_altivec)
+ mfmsr r5 /* grab the current MSR */
+ oris r5,r5,MSR_VEC@h
+ MTMSRD(r5) /* enable use of AltiVec now */
+ isync
+
+/*
+ * For SMP, we don't do lazy VMX switching because it just gets too
+ * horrendously complex, especially when a task switches from one CPU
+ * to another. Instead we call giveup_altvec in switch_to.
+ * VRSAVE isn't dealt with here, that is done in the normal context
+ * switch code. Note that we could rely on vrsave value to eventually
+ * avoid saving all of the VREGs here...
+ */
+#ifndef CONFIG_SMP
+ LOAD_REG_ADDRBASE(r3, last_task_used_altivec)
+ toreal(r3)
+ PPC_LL r4,ADDROFF(last_task_used_altivec)(r3)
+ PPC_LCMPI 0,r4,0
+ beq 1f
+
+ /* Save VMX state to last_task_used_altivec's THREAD struct */
+ toreal(r4)
+ addi r4,r4,THREAD
+ SAVE_32VRS(0,r5,r4)
+ mfvscr vr0
+ li r10,THREAD_VSCR
+ stvx vr0,r10,r4
+ /* Disable VMX for last_task_used_altivec */
+ PPC_LL r5,PT_REGS(r4)
+ toreal(r5)
+ PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ lis r10,MSR_VEC@h
+ andc r4,r4,r10
+ PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* CONFIG_SMP */
+
+ /* Hack: if we get an altivec unavailable trap with VRSAVE
+ * set to all zeros, we assume this is a broken application
+ * that fails to set it properly, and thus we switch it to
+ * all 1's
+ */
+ mfspr r4,SPRN_VRSAVE
+ cmpdi 0,r4,0
+ bne+ 1f
+ li r4,-1
+ mtspr SPRN_VRSAVE,r4
+1:
+ /* enable use of VMX after return */
+#ifdef CONFIG_PPC32
+ mfspr r5,SPRN_SPRG3 /* current task's THREAD (phys) */
+ oris r9,r9,MSR_VEC@h
+#else
+ ld r4,PACACURRENT(r13)
+ addi r5,r4,THREAD /* Get THREAD */
+ oris r12,r12,MSR_VEC@h
+ std r12,_MSR(r1)
+#endif
+ li r4,1
+ li r10,THREAD_VSCR
+ stw r4,THREAD_USED_VR(r5)
+ lvx vr0,r10,r5
+ mtvscr vr0
+ REST_32VRS(0,r4,r5)
+#ifndef CONFIG_SMP
+ /* Update last_task_used_math to 'current' */
+ subi r4,r5,THREAD /* Back to 'current' */
+ fromreal(r4)
+ PPC_STL r4,ADDROFF(last_task_used_math)(r3)
+#endif /* CONFIG_SMP */
+ /* restore registers and return */
+ blr
+
+/*
+ * giveup_altivec(tsk)
+ * Disable VMX for the task given as the argument,
+ * and save the vector registers in its thread_struct.
+ * Enables the VMX for use in the kernel on return.
+ */
+_GLOBAL(giveup_altivec)
+ mfmsr r5
+ oris r5,r5,MSR_VEC@h
+ SYNC
+ MTMSRD(r5) /* enable use of VMX now */
+ isync
+ PPC_LCMPI 0,r3,0
+ beqlr- /* if no previous owner, done */
+ addi r3,r3,THREAD /* want THREAD of task */
+ PPC_LL r5,PT_REGS(r3)
+ PPC_LCMPI 0,r5,0
+ SAVE_32VRS(0,r4,r3)
+ mfvscr vr0
+ li r4,THREAD_VSCR
+ stvx vr0,r4,r3
+ beq 1f
+ PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+#ifdef CONFIG_VSX
+BEGIN_FTR_SECTION
+ lis r3,(MSR_VEC|MSR_VSX)@h
+FTR_SECTION_ELSE
+ lis r3,MSR_VEC@h
+ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
+#else
+ lis r3,MSR_VEC@h
+#endif
+ andc r4,r4,r3 /* disable FP for previous task */
+ PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#ifndef CONFIG_SMP
+ li r5,0
+ LOAD_REG_ADDRBASE(r4,last_task_used_altivec)
+ PPC_STL r5,ADDROFF(last_task_used_altivec)(r4)
+#endif /* CONFIG_SMP */
+ blr
+
+#ifdef CONFIG_VSX
+
+#ifdef CONFIG_PPC32
+#error This asm code isn't ready for 32-bit kernels
+#endif
+
+/*
+ * load_up_vsx(unused, unused, tsk)
+ * Disable VSX for the task which had it previously,
+ * and save its vector registers in its thread_struct.
+ * Reuse the fp and vsx saves, but first check to see if they have
+ * been saved already.
+ */
+_GLOBAL(load_up_vsx)
+/* Load FP and VSX registers if they haven't been done yet */
+ andi. r5,r12,MSR_FP
+ beql+ load_up_fpu /* skip if already loaded */
+ andis. r5,r12,MSR_VEC@h
+ beql+ load_up_altivec /* skip if already loaded */
+
+#ifndef CONFIG_SMP
+ ld r3,last_task_used_vsx@got(r2)
+ ld r4,0(r3)
+ cmpdi 0,r4,0
+ beq 1f
+ /* Disable VSX for last_task_used_vsx */
+ addi r4,r4,THREAD
+ ld r5,PT_REGS(r4)
+ ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ lis r6,MSR_VSX@h
+ andc r6,r4,r6
+ std r6,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#endif /* CONFIG_SMP */
+ ld r4,PACACURRENT(r13)
+ addi r4,r4,THREAD /* Get THREAD */
+ li r6,1
+ stw r6,THREAD_USED_VSR(r4) /* ... also set thread used vsr */
+ /* enable use of VSX after return */
+ oris r12,r12,MSR_VSX@h
+ std r12,_MSR(r1)
+#ifndef CONFIG_SMP
+ /* Update last_task_used_math to 'current' */
+ ld r4,PACACURRENT(r13)
+ std r4,0(r3)
+#endif /* CONFIG_SMP */
+ b fast_exception_return
+
+/*
+ * __giveup_vsx(tsk)
+ * Disable VSX for the task given as the argument.
+ * Does NOT save vsx registers.
+ * Enables the VSX for use in the kernel on return.
+ */
+_GLOBAL(__giveup_vsx)
+ mfmsr r5
+ oris r5,r5,MSR_VSX@h
+ mtmsrd r5 /* enable use of VSX now */
+ isync
+
+ cmpdi 0,r3,0
+ beqlr- /* if no previous owner, done */
+ addi r3,r3,THREAD /* want THREAD of task */
+ ld r5,PT_REGS(r3)
+ cmpdi 0,r5,0
+ beq 1f
+ ld r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ lis r3,MSR_VSX@h
+ andc r4,r4,r3 /* disable VSX for previous task */
+ std r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+1:
+#ifndef CONFIG_SMP
+ li r5,0
+ ld r4,last_task_used_vsx@got(r2)
+ std r5,0(r4)
+#endif /* CONFIG_SMP */
+ blr
+
+#endif /* CONFIG_VSX */
+
/*
* The routines below are in assembler so we can closely control the
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index a047a6cfca4d..8ef8a14abc95 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -264,6 +264,7 @@ SECTIONS
*(.data.page_aligned)
}
+ . = ALIGN(L1_CACHE_BYTES);
.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
*(.data.cacheline_aligned)
}