summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-04-16 20:53:32 +0200
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-16 20:53:32 +0200
commitd19d5efd8c8840aa4f38a6dfbfe500d8cc27de46 (patch)
tree2e2f4f57de790c7de2ccd6d1afbec8695b2c7a46 /arch/powerpc
parentcrypto: fix broken crypto_register_instance() module handling (diff)
parentpowerpc/powermac: Fix build error seen with powermac smp builds (diff)
downloadlinux-d19d5efd8c8840aa4f38a6dfbfe500d8cc27de46.tar.xz
linux-d19d5efd8c8840aa4f38a6dfbfe500d8cc27de46.zip
Merge tag 'powerpc-4.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux
Pull powerpc updates from Michael Ellerman: - Numerous minor fixes, cleanups etc. - More EEH work from Gavin to remove its dependency on device_nodes. - Memory hotplug implemented entirely in the kernel from Nathan Fontenot. - Removal of redundant CONFIG_PPC_OF by Kevin Hao. - Rewrite of VPHN parsing logic & tests from Greg Kurz. - A fix from Nish Aravamudan to reduce memory usage by clamping nodes_possible_map. - Support for pstore on powernv from Hari Bathini. - Removal of old powerpc specific byte swap routines by David Gibson. - Fix from Vasant Hegde to prevent the flash driver telling you it was flashing your firmware when it wasn't. - Patch from Ben Herrenschmidt to add an OPAL heartbeat driver. - Fix for an oops causing get/put_cpu_var() imbalance in perf by Jan Stancek. - Some fixes for migration from Tyrel Datwyler. - A new syscall to switch the cpu endian by Michael Ellerman. - Large series from Wei Yang to implement SRIOV, reviewed and acked by Bjorn. - A fix for the OPAL sensor driver from Cédric Le Goater. - Fixes to get STRICT_MM_TYPECHECKS building again by Michael Ellerman. - Large series from Daniel Axtens to make our PCI hooks per PHB rather than per machine. - Small patch from Sam Bobroff to explicitly abort non-suspended transactions on syscalls, plus a test to exercise it. - Numerous reworks and fixes for the 24x7 PMU from Sukadev Bhattiprolu. - Small patch to enable the hard lockup detector from Anton Blanchard. - Fix from Dave Olson for missing L2 cache information on some CPUs. - Some fixes from Michael Ellerman to get Cell machines booting again. - Freescale updates from Scott: Highlights include BMan device tree nodes, an MSI erratum workaround, a couple minor performance improvements, config updates, and misc fixes/cleanup. * tag 'powerpc-4.1-1' of git://git.kernel.org/pub/scm/linux/kernel/git/mpe/linux: (196 commits) powerpc/powermac: Fix build error seen with powermac smp builds powerpc/pseries: Fix compile of memory hotplug without CONFIG_MEMORY_HOTREMOVE powerpc: Remove PPC32 code from pseries specific find_and_init_phbs() powerpc/cell: Fix iommu breakage caused by controller_ops change powerpc/eeh: Fix crash in eeh_add_device_early() on Cell powerpc/perf: Cap 64bit userspace backtraces to PERF_MAX_STACK_DEPTH powerpc/perf/hv-24x7: Fail 24x7 initcall if create_events_from_catalog() fails powerpc/pseries: Correct memory hotplug locking powerpc: Fix missing L2 cache size in /sys/devices/system/cpu powerpc: Add ppc64 hard lockup detector support oprofile: Disable oprofile NMI timer on ppc64 powerpc/perf/hv-24x7: Add missing put_cpu_var() powerpc/perf/hv-24x7: Break up single_24x7_request powerpc/perf/hv-24x7: Define update_event_count() powerpc/perf/hv-24x7: Whitespace cleanup powerpc/perf/hv-24x7: Define add_event_to_24x7_request() powerpc/perf/hv-24x7: Rename hv_24x7_event_update powerpc/perf/hv-24x7: Move debug prints to separate function powerpc/perf/hv-24x7: Drop event_24x7_request() powerpc/perf/hv-24x7: Use pr_devel() to log message ... Conflicts: tools/testing/selftests/powerpc/Makefile tools/testing/selftests/powerpc/tm/Makefile
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/Kconfig.debug9
-rw-r--r--arch/powerpc/Makefile4
-rw-r--r--arch/powerpc/boot/Makefile4
-rw-r--r--arch/powerpc/boot/crt0.S26
-rw-r--r--arch/powerpc/boot/dts/b4860emu.dts223
-rw-r--r--arch/powerpc/boot/dts/b4qds.dtsi17
-rw-r--r--arch/powerpc/boot/dts/fsl/b4860si-post.dtsi60
-rw-r--r--arch/powerpc/boot/dts/fsl/b4si-post.dtsi89
-rw-r--r--arch/powerpc/boot/dts/fsl/p1023si-post.dtsi37
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-post.dtsi11
-rw-r--r--arch/powerpc/boot/dts/fsl/p3041si-post.dtsi11
-rw-r--r--arch/powerpc/boot/dts/fsl/p4080si-post.dtsi11
-rw-r--r--arch/powerpc/boot/dts/fsl/p5020si-post.dtsi11
-rw-r--r--arch/powerpc/boot/dts/fsl/p5040si-post.dtsi11
-rw-r--r--arch/powerpc/boot/dts/fsl/t1040si-post.dtsi65
-rw-r--r--arch/powerpc/boot/dts/fsl/t2081si-post.dtsi105
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-post.dtsi265
-rw-r--r--arch/powerpc/boot/dts/kmcoge4.dts15
-rw-r--r--arch/powerpc/boot/dts/oca4080.dts15
-rw-r--r--arch/powerpc/boot/dts/p1023rdb.dts18
-rw-r--r--arch/powerpc/boot/dts/p2041rdb.dts17
-rw-r--r--arch/powerpc/boot/dts/p3041ds.dts17
-rw-r--r--arch/powerpc/boot/dts/p4080ds.dts17
-rw-r--r--arch/powerpc/boot/dts/p5020ds.dts17
-rw-r--r--arch/powerpc/boot/dts/p5040ds.dts17
-rw-r--r--arch/powerpc/boot/dts/t104xqds.dtsi17
-rw-r--r--arch/powerpc/boot/dts/t104xrdb.dtsi14
-rw-r--r--arch/powerpc/boot/dts/t208xqds.dtsi19
-rw-r--r--arch/powerpc/boot/dts/t208xrdb.dtsi15
-rw-r--r--arch/powerpc/boot/dts/t4240qds.dts17
-rw-r--r--arch/powerpc/boot/dts/t4240rdb.dts15
-rw-r--r--arch/powerpc/boot/libfdt-wrapper.c6
-rw-r--r--arch/powerpc/boot/libfdt_env.h14
-rw-r--r--arch/powerpc/boot/of.h8
-rw-r--r--arch/powerpc/boot/planetcore.c33
-rw-r--r--arch/powerpc/boot/planetcore.h3
-rwxr-xr-xarch/powerpc/boot/wrapper2
-rw-r--r--arch/powerpc/configs/cell_defconfig3
-rw-r--r--arch/powerpc/configs/celleb_defconfig152
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig7
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig15
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig3
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig2
-rw-r--r--arch/powerpc/configs/ppc64_defconfig3
-rw-r--r--arch/powerpc/include/asm/Kbuild4
-rw-r--r--arch/powerpc/include/asm/cache.h3
-rw-r--r--arch/powerpc/include/asm/cputable.h8
-rw-r--r--arch/powerpc/include/asm/dbdma.h12
-rw-r--r--arch/powerpc/include/asm/dcr-native.h2
-rw-r--r--arch/powerpc/include/asm/device.h6
-rw-r--r--arch/powerpc/include/asm/div64.h1
-rw-r--r--arch/powerpc/include/asm/dma-mapping.h4
-rw-r--r--arch/powerpc/include/asm/eeh.h29
-rw-r--r--arch/powerpc/include/asm/firmware.h10
-rw-r--r--arch/powerpc/include/asm/iommu.h6
-rw-r--r--arch/powerpc/include/asm/irq_regs.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h16
-rw-r--r--arch/powerpc/include/asm/kvm_host.h2
-rw-r--r--arch/powerpc/include/asm/local64.h1
-rw-r--r--arch/powerpc/include/asm/machdep.h19
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h1
-rw-r--r--arch/powerpc/include/asm/mpc85xx.h1
-rw-r--r--arch/powerpc/include/asm/mpic.h17
-rw-r--r--arch/powerpc/include/asm/nmi.h4
-rw-r--r--arch/powerpc/include/asm/nvram.h50
-rw-r--r--arch/powerpc/include/asm/opal-api.h735
-rw-r--r--arch/powerpc/include/asm/opal.h770
-rw-r--r--arch/powerpc/include/asm/paca.h4
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h60
-rw-r--r--arch/powerpc/include/asm/pci.h2
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h8
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h192
-rw-r--r--arch/powerpc/include/asm/rtas.h33
-rw-r--r--arch/powerpc/include/asm/setup.h1
-rw-r--r--arch/powerpc/include/asm/smp.h5
-rw-r--r--arch/powerpc/include/asm/swab.h26
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/ucc_slow.h13
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/asm/vga.h4
-rw-r--r--arch/powerpc/include/asm/xics.h2
-rw-r--r--arch/powerpc/include/uapi/asm/ptrace.h2
-rw-r--r--arch/powerpc/include/uapi/asm/tm.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/Makefile4
-rw-r--r--arch/powerpc/kernel/cacheinfo.c44
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S10
-rw-r--r--arch/powerpc/kernel/cputable.c4
-rw-r--r--arch/powerpc/kernel/dma-swiotlb.c11
-rw-r--r--arch/powerpc/kernel/eeh.c176
-rw-r--r--arch/powerpc/kernel/eeh_cache.c25
-rw-r--r--arch/powerpc/kernel/eeh_dev.c14
-rw-r--r--arch/powerpc/kernel/eeh_driver.c22
-rw-r--r--arch/powerpc/kernel/eeh_pe.c129
-rw-r--r--arch/powerpc/kernel/entry_64.S24
-rw-r--r--arch/powerpc/kernel/idle_power7.S1
-rw-r--r--arch/powerpc/kernel/mce_power.c53
-rw-r--r--arch/powerpc/kernel/nvram_64.c677
-rw-r--r--arch/powerpc/kernel/of_platform.c2
-rw-r--r--arch/powerpc/kernel/pci-common.c57
-rw-r--r--arch/powerpc/kernel/pci-hotplug.c9
-rw-r--r--arch/powerpc/kernel/pci_dn.c309
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c9
-rw-r--r--arch/powerpc/kernel/process.c9
-rw-r--r--arch/powerpc/kernel/prom_init.c4
-rw-r--r--arch/powerpc/kernel/rtas.c30
-rw-r--r--arch/powerpc/kernel/rtas_pci.c49
-rw-r--r--arch/powerpc/kernel/setup_64.c20
-rw-r--r--arch/powerpc/kernel/syscalls.c17
-rw-r--r--arch/powerpc/kernel/systbl.S2
-rw-r--r--arch/powerpc/kernel/systbl_chk.c2
-rw-r--r--arch/powerpc/kernel/tm.S8
-rw-r--r--arch/powerpc/kernel/udbg.c2
-rw-r--r--arch/powerpc/kernel/vector.S24
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c4
-rw-r--r--arch/powerpc/kvm/powerpc.c38
-rw-r--r--arch/powerpc/lib/alloc.c2
-rw-r--r--arch/powerpc/lib/copy_32.S127
-rw-r--r--arch/powerpc/lib/copypage_power7.S32
-rw-r--r--arch/powerpc/lib/copyuser_power7.S226
-rw-r--r--arch/powerpc/lib/crtsavres.S96
-rw-r--r--arch/powerpc/lib/ldstfp.S32
-rw-r--r--arch/powerpc/lib/locks.c1
-rw-r--r--arch/powerpc/lib/memcpy_power7.S226
-rw-r--r--arch/powerpc/lib/ppc_ksyms.c4
-rw-r--r--arch/powerpc/lib/rheap.c2
-rw-r--r--arch/powerpc/mm/Makefile1
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c2
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c2
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c2
-rw-r--r--arch/powerpc/mm/hugetlbpage.c4
-rw-r--r--arch/powerpc/mm/init_64.c1
-rw-r--r--arch/powerpc/mm/mem.c3
-rw-r--r--arch/powerpc/mm/mmu_decl.h2
-rw-r--r--arch/powerpc/mm/numa.c62
-rw-r--r--arch/powerpc/mm/pgtable_32.c18
-rw-r--r--arch/powerpc/mm/pgtable_64.c6
-rw-r--r--arch/powerpc/mm/ppc_mmu_32.c5
-rw-r--r--arch/powerpc/mm/tlb_hash64.c2
-rw-r--r--arch/powerpc/mm/vphn.c70
-rw-r--r--arch/powerpc/mm/vphn.h16
-rw-r--r--arch/powerpc/perf/callchain.c2
-rw-r--r--arch/powerpc/perf/core-book3s.c4
-rw-r--r--arch/powerpc/perf/hv-24x7.c251
-rw-r--r--arch/powerpc/perf/hv-24x7.h8
-rw-r--r--arch/powerpc/platforms/85xx/common.c1
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c12
-rw-r--r--arch/powerpc/platforms/85xx/smp.c4
-rw-r--r--arch/powerpc/platforms/Kconfig5
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype3
-rw-r--r--arch/powerpc/platforms/cell/Kconfig11
-rw-r--r--arch/powerpc/platforms/cell/Makefile15
-rw-r--r--arch/powerpc/platforms/cell/beat.c264
-rw-r--r--arch/powerpc/platforms/cell/beat.h39
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c445
-rw-r--r--arch/powerpc/platforms/cell/beat_hvCall.S285
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c253
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.h30
-rw-r--r--arch/powerpc/platforms/cell/beat_iommu.c115
-rw-r--r--arch/powerpc/platforms/cell/beat_spu_priv1.c205
-rw-r--r--arch/powerpc/platforms/cell/beat_syscall.h164
-rw-r--r--arch/powerpc/platforms/cell/beat_udbg.c98
-rw-r--r--arch/powerpc/platforms/cell/beat_wrapper.h290
-rw-r--r--arch/powerpc/platforms/cell/cell.h24
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.c500
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.h46
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc.h232
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_epci.c428
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_pciex.c538
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_sio.c99
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_uhc.c95
-rw-r--r--arch/powerpc/platforms/cell/celleb_setup.c243
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c2
-rw-r--r--arch/powerpc/platforms/cell/iommu.c11
-rw-r--r--arch/powerpc/platforms/cell/setup.c5
-rw-r--r--arch/powerpc/platforms/cell/smp.c9
-rw-r--r--arch/powerpc/platforms/cell/spu_callbacks.c1
-rw-r--r--arch/powerpc/platforms/chrp/setup.c2
-rw-r--r--arch/powerpc/platforms/maple/maple.h2
-rw-r--r--arch/powerpc/platforms/maple/pci.c4
-rw-r--r--arch/powerpc/platforms/maple/setup.c2
-rw-r--r--arch/powerpc/platforms/pasemi/iommu.c6
-rw-r--r--arch/powerpc/platforms/pasemi/pasemi.h1
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c5
-rw-r--r--arch/powerpc/platforms/powermac/bootx_init.c2
-rw-r--r--arch/powerpc/platforms/powermac/pci.c38
-rw-r--r--arch/powerpc/platforms/powermac/pic.c3
-rw-r--r--arch/powerpc/platforms/powermac/pmac.h3
-rw-r--r--arch/powerpc/platforms/powermac/setup.c22
-rw-r--r--arch/powerpc/platforms/powermac/smp.c18
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig7
-rw-r--r--arch/powerpc/platforms/powernv/Makefile2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c1149
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c1300
-rw-r--r--arch/powerpc/platforms/powernv/opal-dump.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-elog.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-flash.c8
-rw-r--r--arch/powerpc/platforms/powernv/opal-nvram.c10
-rw-r--r--arch/powerpc/platforms/powernv/opal-sensor.c30
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S5
-rw-r--r--arch/powerpc/platforms/powernv/opal.c92
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c797
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci.c190
-rw-r--r--arch/powerpc/platforms/powernv/pci.h38
-rw-r--r--arch/powerpc/platforms/powernv/powernv.h2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c54
-rw-r--r--arch/powerpc/platforms/powernv/smp.c13
-rw-r--r--arch/powerpc/platforms/ps3/smp.c4
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c118
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c98
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c489
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c9
-rw-r--r--arch/powerpc/platforms/pseries/mobility.c26
-rw-r--r--arch/powerpc/platforms/pseries/msi.c6
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c674
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c5
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h14
-rw-r--r--arch/powerpc/platforms/pseries/setup.c48
-rw-r--r--arch/powerpc/platforms/pseries/smp.c6
-rwxr-xr-xarch/powerpc/relocs_check.pl66
-rwxr-xr-xarch/powerpc/relocs_check.sh59
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c10
-rw-r--r--arch/powerpc/sysdev/dcr.c2
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c29
-rw-r--r--arch/powerpc/sysdev/fsl_msi.h2
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c15
-rw-r--r--arch/powerpc/sysdev/mpic.c30
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_io.c25
-rw-r--r--arch/powerpc/sysdev/qe_lib/ucc_slow.c5
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c4
233 files changed, 7188 insertions, 9344 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 9b780e0d2c18..190cc48abc0c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -152,6 +152,7 @@ config PPC
select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
select NO_BOOTMEM
select HAVE_GENERIC_RCU_GUP
+ select HAVE_PERF_EVENTS_NMI if PPC64
config GENERIC_CSUM
def_bool CPU_LITTLE_ENDIAN
@@ -189,9 +190,6 @@ config ARCH_MAY_HAVE_PC_FDC
bool
default PCI
-config PPC_OF
- def_bool y
-
config PPC_UDBG_16550
bool
default n
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index ec2e40f2cc11..0efa8f90a8f1 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -117,7 +117,7 @@ config BDI_SWITCH
config BOOTX_TEXT
bool "Support for early boot text console (BootX or OpenFirmware only)"
- depends on PPC_OF && PPC_BOOK3S
+ depends on PPC_BOOK3S
help
Say Y here to see progress messages from the boot firmware in text
mode. Requires either BootX or Open Firmware.
@@ -193,13 +193,6 @@ config PPC_EARLY_DEBUG_PAS_REALMODE
Select this to enable early debugging for PA Semi.
Output will be on UART0.
-config PPC_EARLY_DEBUG_BEAT
- bool "Beat HV Console"
- depends on PPC_CELLEB
- select PPC_UDBG_BEAT
- help
- Select this to enable early debugging for Celleb with Beat.
-
config PPC_EARLY_DEBUG_44x
bool "Early serial debugging for IBM/AMCC 44x CPUs"
depends on 44x
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index fc502e042438..07a480861f78 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -248,10 +248,10 @@ boot := arch/$(ARCH)/boot
ifeq ($(CONFIG_RELOCATABLE),y)
quiet_cmd_relocs_check = CALL $<
- cmd_relocs_check = perl $< "$(OBJDUMP)" "$(obj)/vmlinux"
+ cmd_relocs_check = $(CONFIG_SHELL) $< "$(OBJDUMP)" "$(obj)/vmlinux"
PHONY += relocs_check
-relocs_check: arch/powerpc/relocs_check.pl vmlinux
+relocs_check: arch/powerpc/relocs_check.sh vmlinux
$(call cmd,relocs_check)
zImage: relocs_check
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 8a5bc1cfc6aa..73eddda53b8e 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -110,7 +110,6 @@ src-plat-$(CONFIG_EPAPR_BOOT) += epapr.c epapr-wrapper.c
src-plat-$(CONFIG_PPC_PSERIES) += pseries-head.S
src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S
src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S
-src-plat-$(CONFIG_PPC_CELLEB) += pseries-head.S
src-plat-$(CONFIG_PPC_CELL_QPACE) += pseries-head.S
src-wlib := $(sort $(src-wlib-y))
@@ -215,7 +214,6 @@ image-$(CONFIG_PPC_POWERNV) += zImage.pseries
image-$(CONFIG_PPC_MAPLE) += zImage.maple
image-$(CONFIG_PPC_IBM_CELL_BLADE) += zImage.pseries
image-$(CONFIG_PPC_PS3) += dtbImage.ps3
-image-$(CONFIG_PPC_CELLEB) += zImage.pseries
image-$(CONFIG_PPC_CELL_QPACE) += zImage.pseries
image-$(CONFIG_PPC_CHRP) += zImage.chrp
image-$(CONFIG_PPC_EFIKA) += zImage.chrp
@@ -317,7 +315,7 @@ endif
# Allow extra targets to be added to the defconfig
image-y += $(subst ",,$(CONFIG_EXTRA_TARGETS))
-initrd- := $(patsubst zImage%, zImage.initrd%, $(image-n) $(image-))
+initrd- := $(patsubst zImage%, zImage.initrd%, $(image-))
initrd-y := $(patsubst zImage%, zImage.initrd%, \
$(patsubst dtbImage%, dtbImage.initrd%, \
$(patsubst simpleImage%, simpleImage.initrd%, \
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S
index 14de4f8778a7..12866ccb5694 100644
--- a/arch/powerpc/boot/crt0.S
+++ b/arch/powerpc/boot/crt0.S
@@ -155,29 +155,29 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */
ld r9,(p_rela-p_base)(r10)
add r9,r9,r10
- li r7,0
+ li r13,0
li r8,0
-9: ld r6,0(r11) /* get tag */
- cmpdi r6,0
+9: ld r12,0(r11) /* get tag */
+ cmpdi r12,0
beq 12f /* end of list */
- cmpdi r6,RELA
+ cmpdi r12,RELA
bne 10f
- ld r7,8(r11) /* get RELA pointer in r7 */
+ ld r13,8(r11) /* get RELA pointer in r13 */
b 11f
-10: addis r6,r6,(-RELACOUNT)@ha
- cmpdi r6,RELACOUNT@l
+10: addis r12,r12,(-RELACOUNT)@ha
+ cmpdi r12,RELACOUNT@l
bne 11f
ld r8,8(r11) /* get RELACOUNT value in r8 */
11: addi r11,r11,16
b 9b
12:
- cmpdi r7,0 /* check we have both RELA and RELACOUNT */
+ cmpdi r13,0 /* check we have both RELA and RELACOUNT */
cmpdi cr1,r8,0
beq 3f
beq cr1,3f
/* Calcuate the runtime offset. */
- subf r7,r7,r9
+ subf r13,r13,r9
/* Run through the list of relocations and process the
* R_PPC64_RELATIVE ones. */
@@ -185,10 +185,10 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */
13: ld r0,8(r9) /* ELF64_R_TYPE(reloc->r_info) */
cmpdi r0,22 /* R_PPC64_RELATIVE */
bne 3f
- ld r6,0(r9) /* reloc->r_offset */
+ ld r12,0(r9) /* reloc->r_offset */
ld r0,16(r9) /* reloc->r_addend */
- add r0,r0,r7
- stdx r0,r7,r6
+ add r0,r0,r13
+ stdx r0,r13,r12
addi r9,r9,24
bdnz 13b
@@ -218,7 +218,7 @@ p_base: mflr r10 /* r10 now points to runtime addr of p_base */
beq 6f
ld r1,0(r8)
li r0,0
- stdu r0,-16(r1) /* establish a stack frame */
+ stdu r0,-112(r1) /* establish a stack frame */
6:
#endif /* __powerpc64__ */
/* Call platform_init() */
diff --git a/arch/powerpc/boot/dts/b4860emu.dts b/arch/powerpc/boot/dts/b4860emu.dts
deleted file mode 100644
index 2aa5cd318ce8..000000000000
--- a/arch/powerpc/boot/dts/b4860emu.dts
+++ /dev/null
@@ -1,223 +0,0 @@
-/*
- * B4860 emulator Device Tree Source
- *
- * Copyright 2013 Freescale Semiconductor Inc.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are met:
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * * Neither the name of Freescale Semiconductor nor the
- * names of its contributors may be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- *
- * ALTERNATIVELY, this software may be distributed under the terms of the
- * GNU General Public License ("GPL") as published by the Free Software
- * Foundation, either version 2 of that License or (at your option) any
- * later version.
- *
- * This software is provided by Freescale Semiconductor "as is" and any
- * express or implied warranties, including, but not limited to, the implied
- * warranties of merchantability and fitness for a particular purpose are
- * disclaimed. In no event shall Freescale Semiconductor be liable for any
- * direct, indirect, incidental, special, exemplary, or consequential damages
- * (including, but not limited to, procurement of substitute goods or services;
- * loss of use, data, or profits; or business interruption) however caused and
- * on any theory of liability, whether in contract, strict liability, or tort
- * (including negligence or otherwise) arising in any way out of the use of
- * this software, even if advised of the possibility of such damage.
- */
-
-/dts-v1/;
-
-/include/ "fsl/e6500_power_isa.dtsi"
-
-/ {
- compatible = "fsl,B4860";
- #address-cells = <2>;
- #size-cells = <2>;
- interrupt-parent = <&mpic>;
-
- aliases {
- ccsr = &soc;
-
- serial0 = &serial0;
- serial1 = &serial1;
- serial2 = &serial2;
- serial3 = &serial3;
- dma0 = &dma0;
- dma1 = &dma1;
- };
-
- cpus {
- #address-cells = <1>;
- #size-cells = <0>;
-
- cpu0: PowerPC,e6500@0 {
- device_type = "cpu";
- reg = <0 1>;
- next-level-cache = <&L2>;
- fsl,portid-mapping = <0x80000000>;
- };
- cpu1: PowerPC,e6500@2 {
- device_type = "cpu";
- reg = <2 3>;
- next-level-cache = <&L2>;
- fsl,portid-mapping = <0x80000000>;
- };
- cpu2: PowerPC,e6500@4 {
- device_type = "cpu";
- reg = <4 5>;
- next-level-cache = <&L2>;
- fsl,portid-mapping = <0x80000000>;
- };
- cpu3: PowerPC,e6500@6 {
- device_type = "cpu";
- reg = <6 7>;
- next-level-cache = <&L2>;
- fsl,portid-mapping = <0x80000000>;
- };
- };
-};
-
-/ {
- model = "fsl,B4860QDS";
- compatible = "fsl,B4860EMU", "fsl,B4860QDS";
- #address-cells = <2>;
- #size-cells = <2>;
- interrupt-parent = <&mpic>;
-
- ifc: localbus@ffe124000 {
- reg = <0xf 0xfe124000 0 0x2000>;
- ranges = <0 0 0xf 0xe8000000 0x08000000
- 2 0 0xf 0xff800000 0x00010000
- 3 0 0xf 0xffdf0000 0x00008000>;
-
- nor@0,0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "cfi-flash";
- reg = <0x0 0x0 0x8000000>;
- bank-width = <2>;
- device-width = <1>;
- };
- };
-
- memory {
- device_type = "memory";
- };
-
- soc: soc@ffe000000 {
- ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
- reg = <0xf 0xfe000000 0 0x00001000>;
- };
-};
-
-&ifc {
- #address-cells = <2>;
- #size-cells = <1>;
- compatible = "fsl,ifc", "simple-bus";
- interrupts = <25 2 0 0>;
-};
-
-&soc {
- #address-cells = <1>;
- #size-cells = <1>;
- device_type = "soc";
- compatible = "simple-bus";
-
- soc-sram-error {
- compatible = "fsl,soc-sram-error";
- interrupts = <16 2 1 2>;
- };
-
- corenet-law@0 {
- compatible = "fsl,corenet-law";
- reg = <0x0 0x1000>;
- fsl,num-laws = <32>;
- };
-
- ddr1: memory-controller@8000 {
- compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
- reg = <0x8000 0x1000>;
- interrupts = <16 2 1 8>;
- };
-
- ddr2: memory-controller@9000 {
- compatible = "fsl,qoriq-memory-controller-v4.5","fsl,qoriq-memory-controller";
- reg = <0x9000 0x1000>;
- interrupts = <16 2 1 9>;
- };
-
- cpc: l3-cache-controller@10000 {
- compatible = "fsl,b4-l3-cache-controller", "cache";
- reg = <0x10000 0x1000
- 0x11000 0x1000>;
- interrupts = <16 2 1 4>;
- };
-
- corenet-cf@18000 {
- compatible = "fsl,corenet2-cf", "fsl,corenet-cf";
- reg = <0x18000 0x1000>;
- interrupts = <16 2 1 0>;
- fsl,ccf-num-csdids = <32>;
- fsl,ccf-num-snoopids = <32>;
- };
-
- iommu@20000 {
- compatible = "fsl,pamu-v1.0", "fsl,pamu";
- reg = <0x20000 0x4000>;
- fsl,portid-mapping = <0x8000>;
- #address-cells = <1>;
- #size-cells = <1>;
- interrupts = <
- 24 2 0 0
- 16 2 1 1>;
- pamu0: pamu@0 {
- reg = <0 0x1000>;
- fsl,primary-cache-geometry = <8 1>;
- fsl,secondary-cache-geometry = <32 2>;
- };
- };
-
-/include/ "fsl/qoriq-mpic.dtsi"
-
- guts: global-utilities@e0000 {
- compatible = "fsl,b4-device-config";
- reg = <0xe0000 0xe00>;
- fsl,has-rstcr;
- fsl,liodn-bits = <12>;
- };
-
-/include/ "fsl/qoriq-clockgen2.dtsi"
- global-utilities@e1000 {
- compatible = "fsl,b4-clockgen", "fsl,qoriq-clockgen-2.0";
- };
-
-/include/ "fsl/qoriq-dma-0.dtsi"
- dma@100300 {
- fsl,iommu-parent = <&pamu0>;
- fsl,liodn-reg = <&guts 0x580>; /* DMA1LIODNR */
- };
-
-/include/ "fsl/qoriq-dma-1.dtsi"
- dma@101300 {
- fsl,iommu-parent = <&pamu0>;
- fsl,liodn-reg = <&guts 0x584>; /* DMA2LIODNR */
- };
-
-/include/ "fsl/qoriq-i2c-0.dtsi"
-/include/ "fsl/qoriq-i2c-1.dtsi"
-/include/ "fsl/qoriq-duart-0.dtsi"
-/include/ "fsl/qoriq-duart-1.dtsi"
-
- L2: l2-cache-controller@c20000 {
- compatible = "fsl,b4-l2-cache-controller";
- reg = <0xc20000 0x1000>;
- next-level-cache = <&cpc>;
- };
-};
diff --git a/arch/powerpc/boot/dts/b4qds.dtsi b/arch/powerpc/boot/dts/b4qds.dtsi
index e5bde0b85135..24ed80dc2120 100644
--- a/arch/powerpc/boot/dts/b4qds.dtsi
+++ b/arch/powerpc/boot/dts/b4qds.dtsi
@@ -1,7 +1,7 @@
/*
* B4420DS Device Tree Source
*
- * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 - 2014 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -97,10 +97,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01052000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
index 65100b9636b7..f35e9e0a5445 100644
--- a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
@@ -1,7 +1,7 @@
/*
* B4860 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2012 Freescale Semiconductor Inc.
+ * Copyright 2012 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -109,6 +109,64 @@
};
};
+&bportals {
+ bman-portal@38000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x38000 0x4000>, <0x100e000 0x1000>;
+ interrupts = <133 2 0 0>;
+ };
+ bman-portal@3c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x3c000 0x4000>, <0x100f000 0x1000>;
+ interrupts = <135 2 0 0>;
+ };
+ bman-portal@40000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x40000 0x4000>, <0x1010000 0x1000>;
+ interrupts = <137 2 0 0>;
+ };
+ bman-portal@44000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x44000 0x4000>, <0x1011000 0x1000>;
+ interrupts = <139 2 0 0>;
+ };
+ bman-portal@48000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x48000 0x4000>, <0x1012000 0x1000>;
+ interrupts = <141 2 0 0>;
+ };
+ bman-portal@4c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x4c000 0x4000>, <0x1013000 0x1000>;
+ interrupts = <143 2 0 0>;
+ };
+ bman-portal@50000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x50000 0x4000>, <0x1014000 0x1000>;
+ interrupts = <145 2 0 0>;
+ };
+ bman-portal@54000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x54000 0x4000>, <0x1015000 0x1000>;
+ interrupts = <147 2 0 0>;
+ };
+ bman-portal@58000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x58000 0x4000>, <0x1016000 0x1000>;
+ interrupts = <149 2 0 0>;
+ };
+ bman-portal@5c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x5c000 0x4000>, <0x1017000 0x1000>;
+ interrupts = <151 2 0 0>;
+ };
+ bman-portal@60000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x60000 0x4000>, <0x1018000 0x1000>;
+ interrupts = <153 2 0 0>;
+ };
+};
+
&soc {
ddr2: memory-controller@9000 {
compatible = "fsl,qoriq-memory-controller-v4.5", "fsl,qoriq-memory-controller";
diff --git a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
index 1a54ba71f685..73136c0029d2 100644
--- a/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4si-post.dtsi
@@ -1,7 +1,7 @@
/*
* B4420 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2012 - 2014 Freescale Semiconductor, Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* this software, even if advised of the possibility of such damage.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10000 0>;
+};
+
&ifc {
#address-cells = <2>;
#size-cells = <1>;
@@ -128,6 +133,83 @@
};
};
+&bportals {
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ compatible = "simple-bus";
+
+ bman-portal@0 {
+ compatible = "fsl,bman-portal";
+ reg = <0x0 0x4000>, <0x1000000 0x1000>;
+ interrupts = <105 2 0 0>;
+ };
+ bman-portal@4000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x4000 0x4000>, <0x1001000 0x1000>;
+ interrupts = <107 2 0 0>;
+ };
+ bman-portal@8000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x8000 0x4000>, <0x1002000 0x1000>;
+ interrupts = <109 2 0 0>;
+ };
+ bman-portal@c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xc000 0x4000>, <0x1003000 0x1000>;
+ interrupts = <111 2 0 0>;
+ };
+ bman-portal@10000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x10000 0x4000>, <0x1004000 0x1000>;
+ interrupts = <113 2 0 0>;
+ };
+ bman-portal@14000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x14000 0x4000>, <0x1005000 0x1000>;
+ interrupts = <115 2 0 0>;
+ };
+ bman-portal@18000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x18000 0x4000>, <0x1006000 0x1000>;
+ interrupts = <117 2 0 0>;
+ };
+ bman-portal@1c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x1c000 0x4000>, <0x1007000 0x1000>;
+ interrupts = <119 2 0 0>;
+ };
+ bman-portal@20000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x20000 0x4000>, <0x1008000 0x1000>;
+ interrupts = <121 2 0 0>;
+ };
+ bman-portal@24000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x24000 0x4000>, <0x1009000 0x1000>;
+ interrupts = <123 2 0 0>;
+ };
+ bman-portal@28000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x28000 0x4000>, <0x100a000 0x1000>;
+ interrupts = <125 2 0 0>;
+ };
+ bman-portal@2c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x2c000 0x4000>, <0x100b000 0x1000>;
+ interrupts = <127 2 0 0>;
+ };
+ bman-portal@30000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x30000 0x4000>, <0x100c000 0x1000>;
+ interrupts = <129 2 0 0>;
+ };
+ bman-portal@34000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x34000 0x4000>, <0x100d000 0x1000>;
+ interrupts = <131 2 0 0>;
+ };
+};
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -261,6 +343,11 @@
/include/ "qoriq-duart-1.dtsi"
/include/ "qoriq-sec5.3-0.dtsi"
+/include/ "qoriq-bman1.dtsi"
+ bman: bman@31a000 {
+ interrupts = <16 2 1 29>;
+ };
+
L2: l2-cache-controller@c20000 {
compatible = "fsl,b4-l2-cache-controller";
reg = <0xc20000 0x1000>;
diff --git a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
index 81437fdf1db4..7780f21430cb 100644
--- a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
@@ -1,7 +1,7 @@
/*
* P1023/P1017 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10 0>;
+};
+
&lbc {
#address-cells = <2>;
#size-cells = <1>;
@@ -97,6 +102,28 @@
};
};
+&bportals {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "simple-bus";
+
+ bman-portal@0 {
+ compatible = "fsl,bman-portal";
+ reg = <0x0 0x4000>, <0x100000 0x1000>;
+ interrupts = <30 2 0 0>;
+ };
+ bman-portal@4000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x4000 0x4000>, <0x101000 0x1000>;
+ interrupts = <32 2 0 0>;
+ };
+ bman-portal@8000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x8000 0x4000>, <0x102000 0x1000>;
+ interrupts = <34 2 0 0>;
+ };
+};
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -221,6 +248,14 @@
/include/ "pq3-mpic.dtsi"
/include/ "pq3-mpic-timer-B.dtsi"
+ bman: bman@8a000 {
+ compatible = "fsl,bman";
+ reg = <0x8a000 0x1000>;
+ interrupts = <16 2 0 0>;
+ fsl,bman-portals = <&bportals>;
+ memory-region = <&bman_fbpr>;
+ };
+
global-utilities@e0000 {
compatible = "fsl,p1023-guts";
reg = <0xe0000 0x1000>;
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
index efd74db4f9b0..f2feacfd9a25 100644
--- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
@@ -1,7 +1,7 @@
/*
* P2041/P2040 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10 0>;
+};
+
&lbc {
compatible = "fsl,p2041-elbc", "fsl,elbc", "simple-bus";
interrupts = <25 2 0 0>;
@@ -216,6 +221,8 @@
};
};
+/include/ "qoriq-bman1-portals.dtsi"
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -407,4 +414,6 @@
crypto: crypto@300000 {
fsl,iommu-parent = <&pamu1>;
};
+
+/include/ "qoriq-bman1.dtsi"
};
diff --git a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
index d7425ef1ae41..d6fea37395ad 100644
--- a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
@@ -1,7 +1,7 @@
/*
* P3041 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10 0>;
+};
+
&lbc {
compatible = "fsl,p3041-elbc", "fsl,elbc", "simple-bus";
interrupts = <25 2 0 0>;
@@ -243,6 +248,8 @@
};
};
+/include/ "qoriq-bman1-portals.dtsi"
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -434,4 +441,6 @@
crypto: crypto@300000 {
fsl,iommu-parent = <&pamu1>;
};
+
+/include/ "qoriq-bman1.dtsi"
};
diff --git a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
index 7005a4a4cef0..89482c9b2301 100644
--- a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
@@ -1,7 +1,7 @@
/*
* P4080/P4040 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10 0>;
+};
+
&lbc {
compatible = "fsl,p4080-elbc", "fsl,elbc", "simple-bus";
interrupts = <25 2 0 0>;
@@ -243,6 +248,8 @@
};
+/include/ "qoriq-bman1-portals.dtsi"
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -490,4 +497,6 @@
crypto: crypto@300000 {
fsl,iommu-parent = <&pamu1>;
};
+
+/include/ "qoriq-bman1.dtsi"
};
diff --git a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
index 55834211bd28..6e04851e2fc9 100644
--- a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
@@ -1,7 +1,7 @@
/*
* P5020/5010 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10000 0>;
+};
+
&lbc {
compatible = "fsl,p5020-elbc", "fsl,elbc", "simple-bus";
interrupts = <25 2 0 0>;
@@ -240,6 +245,8 @@
};
};
+/include/ "qoriq-bman1-portals.dtsi"
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -421,6 +428,8 @@
fsl,iommu-parent = <&pamu1>;
};
+/include/ "qoriq-bman1.dtsi"
+
/include/ "qoriq-raid1.0-0.dtsi"
raideng@320000 {
fsl,iommu-parent = <&pamu1>;
diff --git a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
index 6e4cd6ce363c..5e44dfa1e1a5 100644
--- a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
@@ -1,7 +1,7 @@
/*
* P5040 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2012 Freescale Semiconductor Inc.
+ * Copyright 2012 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* software, even if advised of the possibility of such damage.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10000 0>;
+};
+
&lbc {
compatible = "fsl,p5040-elbc", "fsl,elbc", "simple-bus";
interrupts = <25 2 0 0>;
@@ -195,6 +200,8 @@
};
};
+/include/ "qoriq-bman1-portals.dtsi"
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -399,4 +406,6 @@
crypto@300000 {
fsl,iommu-parent = <&pamu4>;
};
+
+/include/ "qoriq-bman1.dtsi"
};
diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
index 15ae462e758f..5cc01be5b152 100644
--- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
@@ -1,7 +1,7 @@
/*
* T1040 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2013 Freescale Semiconductor Inc.
+ * Copyright 2013 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10000 0>;
+};
+
&ifc {
#address-cells = <2>;
#size-cells = <1>;
@@ -218,6 +223,63 @@
};
};
+&bportals {
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ compatible = "simple-bus";
+
+ bman-portal@0 {
+ compatible = "fsl,bman-portal";
+ reg = <0x0 0x4000>, <0x1000000 0x1000>;
+ interrupts = <105 2 0 0>;
+ };
+ bman-portal@4000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x4000 0x4000>, <0x1001000 0x1000>;
+ interrupts = <107 2 0 0>;
+ };
+ bman-portal@8000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x8000 0x4000>, <0x1002000 0x1000>;
+ interrupts = <109 2 0 0>;
+ };
+ bman-portal@c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xc000 0x4000>, <0x1003000 0x1000>;
+ interrupts = <111 2 0 0>;
+ };
+ bman-portal@10000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x10000 0x4000>, <0x1004000 0x1000>;
+ interrupts = <113 2 0 0>;
+ };
+ bman-portal@14000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x14000 0x4000>, <0x1005000 0x1000>;
+ interrupts = <115 2 0 0>;
+ };
+ bman-portal@18000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x18000 0x4000>, <0x1006000 0x1000>;
+ interrupts = <117 2 0 0>;
+ };
+ bman-portal@1c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x1c000 0x4000>, <0x1007000 0x1000>;
+ interrupts = <119 2 0 0>;
+ };
+ bman-portal@20000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x20000 0x4000>, <0x1008000 0x1000>;
+ interrupts = <121 2 0 0>;
+ };
+ bman-portal@24000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x24000 0x4000>, <0x1009000 0x1000>;
+ interrupts = <123 2 0 0>;
+ };
+};
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -401,4 +463,5 @@
fsl,liodn-reg = <&guts 0x554>; /* SATA2LIODNR */
};
/include/ "qoriq-sec5.0-0.dtsi"
+/include/ "qoriq-bman1.dtsi"
};
diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
index 1ce91e3485a9..86bdaf6cbd14 100644
--- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
@@ -1,7 +1,7 @@
/*
* T2081 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2013 Freescale Semiconductor Inc.
+ * Copyright 2013 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10000 0>;
+};
+
&ifc {
#address-cells = <2>;
#size-cells = <1>;
@@ -224,6 +229,103 @@
};
};
+&bportals {
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ compatible = "simple-bus";
+
+ bman-portal@0 {
+ compatible = "fsl,bman-portal";
+ reg = <0x0 0x4000>, <0x1000000 0x1000>;
+ interrupts = <105 2 0 0>;
+ };
+ bman-portal@4000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x4000 0x4000>, <0x1001000 0x1000>;
+ interrupts = <107 2 0 0>;
+ };
+ bman-portal@8000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x8000 0x4000>, <0x1002000 0x1000>;
+ interrupts = <109 2 0 0>;
+ };
+ bman-portal@c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xc000 0x4000>, <0x1003000 0x1000>;
+ interrupts = <111 2 0 0>;
+ };
+ bman-portal@10000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x10000 0x4000>, <0x1004000 0x1000>;
+ interrupts = <113 2 0 0>;
+ };
+ bman-portal@14000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x14000 0x4000>, <0x1005000 0x1000>;
+ interrupts = <115 2 0 0>;
+ };
+ bman-portal@18000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x18000 0x4000>, <0x1006000 0x1000>;
+ interrupts = <117 2 0 0>;
+ };
+ bman-portal@1c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x1c000 0x4000>, <0x1007000 0x1000>;
+ interrupts = <119 2 0 0>;
+ };
+ bman-portal@20000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x20000 0x4000>, <0x1008000 0x1000>;
+ interrupts = <121 2 0 0>;
+ };
+ bman-portal@24000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x24000 0x4000>, <0x1009000 0x1000>;
+ interrupts = <123 2 0 0>;
+ };
+ bman-portal@28000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x28000 0x4000>, <0x100a000 0x1000>;
+ interrupts = <125 2 0 0>;
+ };
+ bman-portal@2c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x2c000 0x4000>, <0x100b000 0x1000>;
+ interrupts = <127 2 0 0>;
+ };
+ bman-portal@30000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x30000 0x4000>, <0x100c000 0x1000>;
+ interrupts = <129 2 0 0>;
+ };
+ bman-portal@34000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x34000 0x4000>, <0x100d000 0x1000>;
+ interrupts = <131 2 0 0>;
+ };
+ bman-portal@38000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x38000 0x4000>, <0x100e000 0x1000>;
+ interrupts = <133 2 0 0>;
+ };
+ bman-portal@3c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x3c000 0x4000>, <0x100f000 0x1000>;
+ interrupts = <135 2 0 0>;
+ };
+ bman-portal@40000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x40000 0x4000>, <0x1010000 0x1000>;
+ interrupts = <137 2 0 0>;
+ };
+ bman-portal@44000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x44000 0x4000>, <0x1011000 0x1000>;
+ interrupts = <139 2 0 0>;
+ };
+};
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -400,6 +502,7 @@
phy_type = "utmi";
};
/include/ "qoriq-sec5.2-0.dtsi"
+/include/ "qoriq-bman1.dtsi"
L2_1: l2-cache-controller@c20000 {
/* Cluster 0 L2 cache */
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
index 0e96fcabe812..4d4f25895d8c 100644
--- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
@@ -1,7 +1,7 @@
/*
* T4240 Silicon/SoC Device Tree Source (post include)
*
- * Copyright 2012 Freescale Semiconductor Inc.
+ * Copyright 2012 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -32,6 +32,11 @@
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+&bman_fbpr {
+ compatible = "fsl,bman-fbpr";
+ alloc-ranges = <0 0 0x10000 0>;
+};
+
&ifc {
#address-cells = <2>;
#size-cells = <1>;
@@ -294,6 +299,263 @@
};
};
+&bportals {
+ #address-cells = <0x1>;
+ #size-cells = <0x1>;
+ compatible = "simple-bus";
+
+ bman-portal@0 {
+ compatible = "fsl,bman-portal";
+ reg = <0x0 0x4000>, <0x1000000 0x1000>;
+ interrupts = <105 2 0 0>;
+ };
+ bman-portal@4000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x4000 0x4000>, <0x1001000 0x1000>;
+ interrupts = <107 2 0 0>;
+ };
+ bman-portal@8000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x8000 0x4000>, <0x1002000 0x1000>;
+ interrupts = <109 2 0 0>;
+ };
+ bman-portal@c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xc000 0x4000>, <0x1003000 0x1000>;
+ interrupts = <111 2 0 0>;
+ };
+ bman-portal@10000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x10000 0x4000>, <0x1004000 0x1000>;
+ interrupts = <113 2 0 0>;
+ };
+ bman-portal@14000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x14000 0x4000>, <0x1005000 0x1000>;
+ interrupts = <115 2 0 0>;
+ };
+ bman-portal@18000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x18000 0x4000>, <0x1006000 0x1000>;
+ interrupts = <117 2 0 0>;
+ };
+ bman-portal@1c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x1c000 0x4000>, <0x1007000 0x1000>;
+ interrupts = <119 2 0 0>;
+ };
+ bman-portal@20000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x20000 0x4000>, <0x1008000 0x1000>;
+ interrupts = <121 2 0 0>;
+ };
+ bman-portal@24000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x24000 0x4000>, <0x1009000 0x1000>;
+ interrupts = <123 2 0 0>;
+ };
+ bman-portal@28000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x28000 0x4000>, <0x100a000 0x1000>;
+ interrupts = <125 2 0 0>;
+ };
+ bman-portal@2c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x2c000 0x4000>, <0x100b000 0x1000>;
+ interrupts = <127 2 0 0>;
+ };
+ bman-portal@30000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x30000 0x4000>, <0x100c000 0x1000>;
+ interrupts = <129 2 0 0>;
+ };
+ bman-portal@34000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x34000 0x4000>, <0x100d000 0x1000>;
+ interrupts = <131 2 0 0>;
+ };
+ bman-portal@38000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x38000 0x4000>, <0x100e000 0x1000>;
+ interrupts = <133 2 0 0>;
+ };
+ bman-portal@3c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x3c000 0x4000>, <0x100f000 0x1000>;
+ interrupts = <135 2 0 0>;
+ };
+ bman-portal@40000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x40000 0x4000>, <0x1010000 0x1000>;
+ interrupts = <137 2 0 0>;
+ };
+ bman-portal@44000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x44000 0x4000>, <0x1011000 0x1000>;
+ interrupts = <139 2 0 0>;
+ };
+ bman-portal@48000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x48000 0x4000>, <0x1012000 0x1000>;
+ interrupts = <141 2 0 0>;
+ };
+ bman-portal@4c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x4c000 0x4000>, <0x1013000 0x1000>;
+ interrupts = <143 2 0 0>;
+ };
+ bman-portal@50000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x50000 0x4000>, <0x1014000 0x1000>;
+ interrupts = <145 2 0 0>;
+ };
+ bman-portal@54000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x54000 0x4000>, <0x1015000 0x1000>;
+ interrupts = <147 2 0 0>;
+ };
+ bman-portal@58000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x58000 0x4000>, <0x1016000 0x1000>;
+ interrupts = <149 2 0 0>;
+ };
+ bman-portal@5c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x5c000 0x4000>, <0x1017000 0x1000>;
+ interrupts = <151 2 0 0>;
+ };
+ bman-portal@60000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x60000 0x4000>, <0x1018000 0x1000>;
+ interrupts = <153 2 0 0>;
+ };
+ bman-portal@64000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x64000 0x4000>, <0x1019000 0x1000>;
+ interrupts = <155 2 0 0>;
+ };
+ bman-portal@68000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x68000 0x4000>, <0x101a000 0x1000>;
+ interrupts = <157 2 0 0>;
+ };
+ bman-portal@6c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x6c000 0x4000>, <0x101b000 0x1000>;
+ interrupts = <159 2 0 0>;
+ };
+ bman-portal@70000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x70000 0x4000>, <0x101c000 0x1000>;
+ interrupts = <161 2 0 0>;
+ };
+ bman-portal@74000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x74000 0x4000>, <0x101d000 0x1000>;
+ interrupts = <163 2 0 0>;
+ };
+ bman-portal@78000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x78000 0x4000>, <0x101e000 0x1000>;
+ interrupts = <165 2 0 0>;
+ };
+ bman-portal@7c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x7c000 0x4000>, <0x101f000 0x1000>;
+ interrupts = <167 2 0 0>;
+ };
+ bman-portal@80000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x80000 0x4000>, <0x1020000 0x1000>;
+ interrupts = <169 2 0 0>;
+ };
+ bman-portal@84000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x84000 0x4000>, <0x1021000 0x1000>;
+ interrupts = <171 2 0 0>;
+ };
+ bman-portal@88000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x88000 0x4000>, <0x1022000 0x1000>;
+ interrupts = <173 2 0 0>;
+ };
+ bman-portal@8c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x8c000 0x4000>, <0x1023000 0x1000>;
+ interrupts = <175 2 0 0>;
+ };
+ bman-portal@90000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x90000 0x4000>, <0x1024000 0x1000>;
+ interrupts = <385 2 0 0>;
+ };
+ bman-portal@94000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x94000 0x4000>, <0x1025000 0x1000>;
+ interrupts = <387 2 0 0>;
+ };
+ bman-portal@98000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x98000 0x4000>, <0x1026000 0x1000>;
+ interrupts = <389 2 0 0>;
+ };
+ bman-portal@9c000 {
+ compatible = "fsl,bman-portal";
+ reg = <0x9c000 0x4000>, <0x1027000 0x1000>;
+ interrupts = <391 2 0 0>;
+ };
+ bman-portal@a0000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xa0000 0x4000>, <0x1028000 0x1000>;
+ interrupts = <393 2 0 0>;
+ };
+ bman-portal@a4000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xa4000 0x4000>, <0x1029000 0x1000>;
+ interrupts = <395 2 0 0>;
+ };
+ bman-portal@a8000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xa8000 0x4000>, <0x102a000 0x1000>;
+ interrupts = <397 2 0 0>;
+ };
+ bman-portal@ac000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xac000 0x4000>, <0x102b000 0x1000>;
+ interrupts = <399 2 0 0>;
+ };
+ bman-portal@b0000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xb0000 0x4000>, <0x102c000 0x1000>;
+ interrupts = <401 2 0 0>;
+ };
+ bman-portal@b4000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xb4000 0x4000>, <0x102d000 0x1000>;
+ interrupts = <403 2 0 0>;
+ };
+ bman-portal@b8000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xb8000 0x4000>, <0x102e000 0x1000>;
+ interrupts = <405 2 0 0>;
+ };
+ bman-portal@bc000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xbc000 0x4000>, <0x102f000 0x1000>;
+ interrupts = <407 2 0 0>;
+ };
+ bman-portal@c0000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xc0000 0x4000>, <0x1030000 0x1000>;
+ interrupts = <409 2 0 0>;
+ };
+ bman-portal@c4000 {
+ compatible = "fsl,bman-portal";
+ reg = <0xc4000 0x4000>, <0x1031000 0x1000>;
+ interrupts = <411 2 0 0>;
+ };
+};
+
&soc {
#address-cells = <1>;
#size-cells = <1>;
@@ -486,6 +748,7 @@
/include/ "qoriq-sata2-0.dtsi"
/include/ "qoriq-sata2-1.dtsi"
/include/ "qoriq-sec5.0-0.dtsi"
+/include/ "qoriq-bman1.dtsi"
L2_1: l2-cache-controller@c20000 {
compatible = "fsl,t4240-l2-cache-controller";
diff --git a/arch/powerpc/boot/dts/kmcoge4.dts b/arch/powerpc/boot/dts/kmcoge4.dts
index 89b4119f3b19..97e6d11d1e6d 100644
--- a/arch/powerpc/boot/dts/kmcoge4.dts
+++ b/arch/powerpc/boot/dts/kmcoge4.dts
@@ -25,10 +25,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01008000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x200000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/oca4080.dts b/arch/powerpc/boot/dts/oca4080.dts
index 3d4c751d1608..eb76caae11d9 100644
--- a/arch/powerpc/boot/dts/oca4080.dts
+++ b/arch/powerpc/boot/dts/oca4080.dts
@@ -49,10 +49,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01008000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x200000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/p1023rdb.dts b/arch/powerpc/boot/dts/p1023rdb.dts
index 0a06a88ddbd5..9236e3742a23 100644
--- a/arch/powerpc/boot/dts/p1023rdb.dts
+++ b/arch/powerpc/boot/dts/p1023rdb.dts
@@ -1,7 +1,7 @@
/*
* P1023 RDB Device Tree Source
*
- * Copyright 2013 Freescale Semiconductor Inc.
+ * Copyright 2013 - 2014 Freescale Semiconductor Inc.
*
* Author: Chunhe Lan <Chunhe.Lan@freescale.com>
*
@@ -47,6 +47,21 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
+ bportals: bman-portals@ff200000 {
+ ranges = <0x0 0xf 0xff200000 0x200000>;
+ };
+
soc: soc@ff600000 {
ranges = <0x0 0x0 0xff600000 0x200000>;
@@ -228,7 +243,6 @@
0x0 0x100000>;
};
};
-
};
/include/ "fsl/p1023si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p2041rdb.dts b/arch/powerpc/boot/dts/p2041rdb.dts
index d97ad74c7279..c1e69dc7188e 100644
--- a/arch/powerpc/boot/dts/p2041rdb.dts
+++ b/arch/powerpc/boot/dts/p2041rdb.dts
@@ -1,7 +1,7 @@
/*
* P2041RDB Device Tree Source
*
- * Copyright 2011 Freescale Semiconductor Inc.
+ * Copyright 2011 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -45,10 +45,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01008000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x200000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/p3041ds.dts b/arch/powerpc/boot/dts/p3041ds.dts
index 394ea9c943c9..2192fe94866d 100644
--- a/arch/powerpc/boot/dts/p3041ds.dts
+++ b/arch/powerpc/boot/dts/p3041ds.dts
@@ -1,7 +1,7 @@
/*
* P3041DS Device Tree Source
*
- * Copyright 2010-2011 Freescale Semiconductor Inc.
+ * Copyright 2010 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -45,10 +45,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01008000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x200000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/p4080ds.dts b/arch/powerpc/boot/dts/p4080ds.dts
index 1cf6148b8b05..fad441654642 100644
--- a/arch/powerpc/boot/dts/p4080ds.dts
+++ b/arch/powerpc/boot/dts/p4080ds.dts
@@ -1,7 +1,7 @@
/*
* P4080DS Device Tree Source
*
- * Copyright 2009-2011 Freescale Semiconductor Inc.
+ * Copyright 2009 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -45,10 +45,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01008000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x200000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/p5020ds.dts b/arch/powerpc/boot/dts/p5020ds.dts
index b7f3057cd894..7382636dc560 100644
--- a/arch/powerpc/boot/dts/p5020ds.dts
+++ b/arch/powerpc/boot/dts/p5020ds.dts
@@ -1,7 +1,7 @@
/*
* P5020DS Device Tree Source
*
- * Copyright 2010-2011 Freescale Semiconductor Inc.
+ * Copyright 2010 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -45,10 +45,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01008000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x200000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/p5040ds.dts b/arch/powerpc/boot/dts/p5040ds.dts
index 7e04bf487c04..35dabf5b6098 100644
--- a/arch/powerpc/boot/dts/p5040ds.dts
+++ b/arch/powerpc/boot/dts/p5040ds.dts
@@ -1,7 +1,7 @@
/*
* P5040DS Device Tree Source
*
- * Copyright 2012 Freescale Semiconductor Inc.
+ * Copyright 2012 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -45,10 +45,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01008000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x200000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/t104xqds.dtsi b/arch/powerpc/boot/dts/t104xqds.dtsi
index 234f4b596c5b..f7e9bfbeefc7 100644
--- a/arch/powerpc/boot/dts/t104xqds.dtsi
+++ b/arch/powerpc/boot/dts/t104xqds.dtsi
@@ -1,7 +1,7 @@
/*
* T104xQDS Device Tree Source
*
- * Copyright 2013 Freescale Semiconductor Inc.
+ * Copyright 2013 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -38,6 +38,17 @@
#size-cells = <2>;
interrupt-parent = <&mpic>;
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
ifc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x2000>;
ranges = <0 0 0xf 0xe8000000 0x08000000
@@ -77,6 +88,10 @@
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/t104xrdb.dtsi b/arch/powerpc/boot/dts/t104xrdb.dtsi
index 187add885cae..76e07a3f2ca8 100644
--- a/arch/powerpc/boot/dts/t104xrdb.dtsi
+++ b/arch/powerpc/boot/dts/t104xrdb.dtsi
@@ -33,6 +33,16 @@
*/
/ {
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
ifc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x2000>;
@@ -69,6 +79,10 @@
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/t208xqds.dtsi b/arch/powerpc/boot/dts/t208xqds.dtsi
index 59061834d54e..c42e07f4f648 100644
--- a/arch/powerpc/boot/dts/t208xqds.dtsi
+++ b/arch/powerpc/boot/dts/t208xqds.dtsi
@@ -1,7 +1,7 @@
/*
* T2080/T2081 QDS Device Tree Source
*
- * Copyright 2013 Freescale Semiconductor Inc.
+ * Copyright 2013 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -39,6 +39,17 @@
#size-cells = <2>;
interrupt-parent = <&mpic>;
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
ifc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x2000>;
ranges = <0 0 0xf 0xe8000000 0x08000000
@@ -78,6 +89,10 @@
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
@@ -137,7 +152,7 @@
rtc@68 {
compatible = "dallas,ds3232";
reg = <0x68>;
- interrupts = <0x1 0x1 0 0>;
+ interrupts = <0xb 0x1 0 0>;
};
};
diff --git a/arch/powerpc/boot/dts/t208xrdb.dtsi b/arch/powerpc/boot/dts/t208xrdb.dtsi
index 1481e192e783..e1463b165d0e 100644
--- a/arch/powerpc/boot/dts/t208xrdb.dtsi
+++ b/arch/powerpc/boot/dts/t208xrdb.dtsi
@@ -39,6 +39,17 @@
#size-cells = <2>;
interrupt-parent = <&mpic>;
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
ifc: localbus@ffe124000 {
reg = <0xf 0xfe124000 0 0x2000>;
ranges = <0 0 0xf 0xe8000000 0x08000000
@@ -79,6 +90,10 @@
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/t4240qds.dts b/arch/powerpc/boot/dts/t4240qds.dts
index 97683f6a2936..6df77766410b 100644
--- a/arch/powerpc/boot/dts/t4240qds.dts
+++ b/arch/powerpc/boot/dts/t4240qds.dts
@@ -1,7 +1,7 @@
/*
* T4240QDS Device Tree Source
*
- * Copyright 2012 Freescale Semiconductor Inc.
+ * Copyright 2012 - 2014 Freescale Semiconductor Inc.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
@@ -100,10 +100,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/dts/t4240rdb.dts b/arch/powerpc/boot/dts/t4240rdb.dts
index 53761d4e8c51..46049cf37f02 100644
--- a/arch/powerpc/boot/dts/t4240rdb.dts
+++ b/arch/powerpc/boot/dts/t4240rdb.dts
@@ -69,10 +69,25 @@
device_type = "memory";
};
+ reserved-memory {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges;
+
+ bman_fbpr: bman-fbpr {
+ size = <0 0x1000000>;
+ alignment = <0 0x1000000>;
+ };
+ };
+
dcsr: dcsr@f00000000 {
ranges = <0x00000000 0xf 0x00000000 0x01072000>;
};
+ bportals: bman-portals@ff4000000 {
+ ranges = <0x0 0xf 0xf4000000 0x2000000>;
+ };
+
soc: soc@ffe000000 {
ranges = <0x00000000 0xf 0xfe000000 0x1000000>;
reg = <0xf 0xfe000000 0 0x00001000>;
diff --git a/arch/powerpc/boot/libfdt-wrapper.c b/arch/powerpc/boot/libfdt-wrapper.c
index bb8b9b3505ee..535e8fd8900d 100644
--- a/arch/powerpc/boot/libfdt-wrapper.c
+++ b/arch/powerpc/boot/libfdt-wrapper.c
@@ -44,12 +44,12 @@
#define offset_devp(off) \
({ \
- int _offset = (off); \
+ unsigned long _offset = (off); \
check_err(_offset) ? NULL : (void *)(_offset+1); \
})
-#define devp_offset_find(devp) (((int)(devp))-1)
-#define devp_offset(devp) (devp ? ((int)(devp))-1 : 0)
+#define devp_offset_find(devp) (((unsigned long)(devp))-1)
+#define devp_offset(devp) (devp ? ((unsigned long)(devp))-1 : 0)
static void *fdt;
static void *buf; /* = NULL */
diff --git a/arch/powerpc/boot/libfdt_env.h b/arch/powerpc/boot/libfdt_env.h
index c89fdb1b80e1..8dcd744e5728 100644
--- a/arch/powerpc/boot/libfdt_env.h
+++ b/arch/powerpc/boot/libfdt_env.h
@@ -4,15 +4,17 @@
#include <types.h>
#include <string.h>
+#include "of.h"
+
typedef u32 uint32_t;
typedef u64 uint64_t;
typedef unsigned long uintptr_t;
-#define fdt16_to_cpu(x) (x)
-#define cpu_to_fdt16(x) (x)
-#define fdt32_to_cpu(x) (x)
-#define cpu_to_fdt32(x) (x)
-#define fdt64_to_cpu(x) (x)
-#define cpu_to_fdt64(x) (x)
+#define fdt16_to_cpu(x) be16_to_cpu(x)
+#define cpu_to_fdt16(x) cpu_to_be16(x)
+#define fdt32_to_cpu(x) be32_to_cpu(x)
+#define cpu_to_fdt32(x) cpu_to_be32(x)
+#define fdt64_to_cpu(x) be64_to_cpu(x)
+#define cpu_to_fdt64(x) cpu_to_be64(x)
#endif /* _ARCH_POWERPC_BOOT_LIBFDT_ENV_H */
diff --git a/arch/powerpc/boot/of.h b/arch/powerpc/boot/of.h
index c8c1750aba0c..5603320dce07 100644
--- a/arch/powerpc/boot/of.h
+++ b/arch/powerpc/boot/of.h
@@ -24,11 +24,19 @@ void of_console_init(void);
typedef u32 __be32;
#ifdef __LITTLE_ENDIAN__
+#define cpu_to_be16(x) swab16(x)
+#define be16_to_cpu(x) swab16(x)
#define cpu_to_be32(x) swab32(x)
#define be32_to_cpu(x) swab32(x)
+#define cpu_to_be64(x) swab64(x)
+#define be64_to_cpu(x) swab64(x)
#else
+#define cpu_to_be16(x) (x)
+#define be16_to_cpu(x) (x)
#define cpu_to_be32(x) (x)
#define be32_to_cpu(x) (x)
+#define cpu_to_be64(x) (x)
+#define be64_to_cpu(x) (x)
#endif
#define PROM_ERROR (-1u)
diff --git a/arch/powerpc/boot/planetcore.c b/arch/powerpc/boot/planetcore.c
index 0d8558a475bb..75117e63e6db 100644
--- a/arch/powerpc/boot/planetcore.c
+++ b/arch/powerpc/boot/planetcore.c
@@ -131,36 +131,3 @@ void planetcore_set_stdout_path(const char *table)
setprop_str(chosen, "linux,stdout-path", path);
}
-
-void planetcore_set_serial_speed(const char *table)
-{
- void *chosen, *stdout;
- u64 baud;
- u32 baud32;
- int len;
-
- chosen = finddevice("/chosen");
- if (!chosen)
- return;
-
- len = getprop(chosen, "linux,stdout-path", prop_buf, MAX_PROP_LEN);
- if (len <= 0)
- return;
-
- stdout = finddevice(prop_buf);
- if (!stdout) {
- printf("planetcore_set_serial_speed: "
- "Bad /chosen/linux,stdout-path.\r\n");
-
- return;
- }
-
- if (!planetcore_get_decimal(table, PLANETCORE_KEY_SERIAL_BAUD,
- &baud)) {
- printf("planetcore_set_serial_speed: No SB tag.\r\n");
- return;
- }
-
- baud32 = baud;
- setprop(stdout, "current-speed", &baud32, 4);
-}
diff --git a/arch/powerpc/boot/planetcore.h b/arch/powerpc/boot/planetcore.h
index 0d4094f1771c..d53c733cc463 100644
--- a/arch/powerpc/boot/planetcore.h
+++ b/arch/powerpc/boot/planetcore.h
@@ -43,7 +43,4 @@ void planetcore_set_mac_addrs(const char *table);
*/
void planetcore_set_stdout_path(const char *table);
-/* Sets the current-speed property in the serial node. */
-void planetcore_set_serial_speed(const char *table);
-
#endif
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index ae0f88ec4a32..3f50c27ed8f8 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -277,7 +277,7 @@ treeboot-iss4xx-mpic)
platformo="$object/treeboot-iss4xx.o"
;;
epapr)
- platformo="$object/epapr.o $object/epapr-wrapper.o"
+ platformo="$object/pseries-head.o $object/epapr.o $object/epapr-wrapper.o"
link_address='0x20000000'
pie=-pie
;;
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 9788b3c2d563..9227b517560a 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -28,7 +28,6 @@ CONFIG_PS3_ROM=m
CONFIG_PS3_FLASH=m
CONFIG_PS3_LPM=m
CONFIG_PPC_IBM_CELL_BLADE=y
-CONFIG_PPC_CELLEB=y
CONFIG_RTAS_FLASH=y
CONFIG_CPU_FREQ=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
@@ -113,7 +112,6 @@ CONFIG_IDE=y
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_AEC62XX=y
CONFIG_BLK_DEV_SIIMAGE=y
-CONFIG_BLK_DEV_CELLEB=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=m
CONFIG_CHR_DEV_SG=y
@@ -156,7 +154,6 @@ CONFIG_SERIAL_TXX9_NR_UARTS=2
CONFIG_SERIAL_TXX9_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_HVC_RTAS=y
-CONFIG_HVC_BEAT=y
CONFIG_IPMI_HANDLER=m
CONFIG_IPMI_DEVICE_INTERFACE=m
CONFIG_IPMI_SI=m
diff --git a/arch/powerpc/configs/celleb_defconfig b/arch/powerpc/configs/celleb_defconfig
deleted file mode 100644
index ff454dcd2dd3..000000000000
--- a/arch/powerpc/configs/celleb_defconfig
+++ /dev/null
@@ -1,152 +0,0 @@
-CONFIG_PPC64=y
-CONFIG_TUNE_CELL=y
-CONFIG_ALTIVEC=y
-CONFIG_SMP=y
-CONFIG_NR_CPUS=4
-CONFIG_SYSVIPC=y
-CONFIG_FHANDLE=y
-CONFIG_NO_HZ=y
-CONFIG_HIGH_RES_TIMERS=y
-CONFIG_IKCONFIG=y
-CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=15
-CONFIG_BLK_DEV_INITRD=y
-# CONFIG_COMPAT_BRK is not set
-CONFIG_MODULES=y
-CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
-CONFIG_MODULE_SRCVERSION_ALL=y
-CONFIG_PARTITION_ADVANCED=y
-# CONFIG_PPC_POWERNV is not set
-# CONFIG_PPC_PSERIES is not set
-# CONFIG_PPC_PMAC is not set
-CONFIG_PPC_CELLEB=y
-CONFIG_SPU_FS=y
-# CONFIG_CBE_THERM is not set
-CONFIG_UDBG_RTAS_CONSOLE=y
-# CONFIG_RTAS_PROC is not set
-CONFIG_BINFMT_MISC=m
-CONFIG_KEXEC=y
-CONFIG_NUMA=y
-CONFIG_NET=y
-CONFIG_PACKET=y
-CONFIG_UNIX=y
-CONFIG_INET=y
-CONFIG_IP_MULTICAST=y
-CONFIG_SYN_COOKIES=y
-CONFIG_IPV6=y
-CONFIG_INET6_AH=m
-CONFIG_INET6_ESP=m
-CONFIG_INET6_IPCOMP=m
-CONFIG_IPV6_TUNNEL=m
-CONFIG_NETFILTER=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_BLK_DEV_LOOP=y
-CONFIG_BLK_DEV_RAM=y
-CONFIG_BLK_DEV_RAM_SIZE=131072
-CONFIG_IDE=y
-CONFIG_BLK_DEV_IDECD=m
-CONFIG_BLK_DEV_GENERIC=y
-CONFIG_BLK_DEV_CELLEB=y
-CONFIG_SCSI=m
-# CONFIG_SCSI_PROC_FS is not set
-CONFIG_BLK_DEV_SD=m
-CONFIG_BLK_DEV_SR=m
-CONFIG_CHR_DEV_SG=m
-CONFIG_MD=y
-CONFIG_BLK_DEV_MD=m
-CONFIG_MD_LINEAR=m
-CONFIG_MD_RAID0=m
-CONFIG_MD_RAID1=m
-CONFIG_BLK_DEV_DM=m
-CONFIG_DM_CRYPT=m
-CONFIG_DM_SNAPSHOT=m
-CONFIG_DM_MIRROR=m
-CONFIG_DM_ZERO=m
-CONFIG_DM_MULTIPATH=m
-CONFIG_NETDEVICES=y
-CONFIG_SPIDER_NET=y
-# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
-# CONFIG_INPUT_KEYBOARD is not set
-# CONFIG_INPUT_MOUSE is not set
-# CONFIG_SERIO_I8042 is not set
-# CONFIG_LEGACY_PTYS is not set
-CONFIG_SERIAL_NONSTANDARD=y
-CONFIG_SERIAL_TXX9_NR_UARTS=3
-CONFIG_SERIAL_TXX9_CONSOLE=y
-CONFIG_HVC_RTAS=y
-CONFIG_HVC_BEAT=y
-# CONFIG_HW_RANDOM is not set
-CONFIG_GEN_RTC=y
-CONFIG_I2C=y
-# CONFIG_HWMON is not set
-CONFIG_WATCHDOG=y
-# CONFIG_VGA_CONSOLE is not set
-CONFIG_USB_HIDDEV=y
-CONFIG_USB=y
-CONFIG_USB_MON=y
-CONFIG_USB_EHCI_HCD=m
-# CONFIG_USB_EHCI_HCD_PPC_OF is not set
-CONFIG_USB_OHCI_HCD=m
-CONFIG_USB_STORAGE=m
-CONFIG_EXT2_FS=y
-CONFIG_EXT2_FS_XATTR=y
-CONFIG_EXT2_FS_POSIX_ACL=y
-CONFIG_EXT2_FS_SECURITY=y
-CONFIG_EXT2_FS_XIP=y
-CONFIG_EXT3_FS=y
-CONFIG_EXT3_FS_POSIX_ACL=y
-CONFIG_EXT3_FS_SECURITY=y
-CONFIG_ISO9660_FS=m
-CONFIG_JOLIET=y
-CONFIG_UDF_FS=m
-CONFIG_MSDOS_FS=m
-CONFIG_VFAT_FS=m
-CONFIG_PROC_KCORE=y
-CONFIG_TMPFS=y
-CONFIG_HUGETLBFS=y
-CONFIG_NFS_FS=m
-CONFIG_NFS_V3_ACL=y
-CONFIG_NFSD=m
-CONFIG_NFSD_V3=y
-CONFIG_NFSD_V3_ACL=y
-CONFIG_NLS_ISO8859_1=m
-CONFIG_NLS_ISO8859_2=m
-CONFIG_NLS_ISO8859_3=m
-CONFIG_NLS_ISO8859_4=m
-CONFIG_NLS_ISO8859_5=m
-CONFIG_NLS_ISO8859_6=m
-CONFIG_NLS_ISO8859_7=m
-CONFIG_NLS_ISO8859_9=m
-CONFIG_NLS_ISO8859_13=m
-CONFIG_NLS_ISO8859_14=m
-CONFIG_NLS_ISO8859_15=m
-CONFIG_LIBCRC32C=m
-CONFIG_DEBUG_FS=y
-CONFIG_MAGIC_SYSRQ=y
-CONFIG_DEBUG_KERNEL=y
-CONFIG_DEBUG_MUTEXES=y
-CONFIG_XMON=y
-CONFIG_XMON_DEFAULT=y
-CONFIG_CRYPTO_NULL=m
-CONFIG_CRYPTO_TEST=m
-CONFIG_CRYPTO_ECB=m
-CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD4=m
-CONFIG_CRYPTO_MD5=y
-CONFIG_CRYPTO_MICHAEL_MIC=m
-CONFIG_CRYPTO_SHA256=m
-CONFIG_CRYPTO_SHA512=m
-CONFIG_CRYPTO_TGR192=m
-CONFIG_CRYPTO_WP512=m
-CONFIG_CRYPTO_ANUBIS=m
-CONFIG_CRYPTO_ARC4=m
-CONFIG_CRYPTO_BLOWFISH=m
-CONFIG_CRYPTO_CAST5=m
-CONFIG_CRYPTO_CAST6=m
-CONFIG_CRYPTO_KHAZAD=m
-CONFIG_CRYPTO_SERPENT=m
-CONFIG_CRYPTO_TEA=m
-CONFIG_CRYPTO_TWOFISH=m
-# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index ca7957b09a3c..37659937bd12 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -99,6 +99,8 @@ CONFIG_E1000E=y
CONFIG_AT803X_PHY=y
CONFIG_VITESSE_PHY=y
CONFIG_FIXED_PHY=y
+CONFIG_MDIO_BUS_MUX_GPIO=y
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
# CONFIG_INPUT_MOUSE is not set
@@ -114,11 +116,14 @@ CONFIG_NVRAM=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
CONFIG_SPI=y
CONFIG_SPI_GPIO=y
CONFIG_SPI_FSL_SPI=y
CONFIG_SPI_FSL_ESPI=y
-# CONFIG_HWMON is not set
+CONFIG_SENSORS_LM90=y
+CONFIG_SENSORS_INA2XX=y
CONFIG_USB_HID=m
CONFIG_USB=y
CONFIG_USB_MON=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 04737aaa8b6b..33cd1df818ad 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -12,6 +12,10 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
+CONFIG_CGROUPS=y
+CONFIG_CPUSETS=y
+CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHED=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_KALLSYMS_ALL=y
@@ -75,6 +79,10 @@ CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=131072
CONFIG_EEPROM_LEGACY=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_BLK_DEV_SR_VENDOR=y
+CONFIG_CHR_DEV_SG=y
CONFIG_ATA=y
CONFIG_SATA_FSL=y
CONFIG_SATA_SIL24=y
@@ -85,6 +93,8 @@ CONFIG_FSL_XGMAC_MDIO=y
CONFIG_E1000E=y
CONFIG_VITESSE_PHY=y
CONFIG_FIXED_PHY=y
+CONFIG_MDIO_BUS_MUX_GPIO=y
+CONFIG_MDIO_BUS_MUX_MMIOREG=y
CONFIG_INPUT_FF_MEMLESS=m
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_KEYBOARD is not set
@@ -99,11 +109,14 @@ CONFIG_SERIAL_8250_RSA=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
+CONFIG_I2C_MUX=y
+CONFIG_I2C_MUX_PCA954x=y
CONFIG_SPI=y
CONFIG_SPI_GPIO=y
CONFIG_SPI_FSL_SPI=y
CONFIG_SPI_FSL_ESPI=y
-# CONFIG_HWMON is not set
+CONFIG_SENSORS_LM90=y
+CONFIG_SENSORS_INA2XX=y
CONFIG_USB_HID=m
CONFIG_USB=y
CONFIG_USB_MON=y
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index 8535c343dd57..6ecf7bdbc2f9 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -150,8 +150,7 @@ CONFIG_SPI=y
CONFIG_SPI_FSL_SPI=y
CONFIG_SPI_FSL_ESPI=y
CONFIG_GPIO_MPC8XXX=y
-CONFIG_HWMON=m
-CONFIG_SENSORS_LM90=m
+CONFIG_SENSORS_LM90=y
CONFIG_FB=y
CONFIG_FB_FSL_DIU=y
# CONFIG_VGA_CONSOLE is not set
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index c45ad2e01b0c..b6c7111ea913 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -143,7 +143,7 @@ CONFIG_SPI=y
CONFIG_SPI_FSL_SPI=y
CONFIG_SPI_FSL_ESPI=y
CONFIG_GPIO_MPC8XXX=y
-# CONFIG_HWMON is not set
+CONFIG_SENSORS_LM90=y
CONFIG_FB=y
CONFIG_FB_FSL_DIU=y
# CONFIG_VGA_CONSOLE is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 3315c9f0828a..aad501ae3834 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -36,7 +36,6 @@ CONFIG_PS3_ROM=m
CONFIG_PS3_FLASH=m
CONFIG_PS3_LPM=m
CONFIG_PPC_IBM_CELL_BLADE=y
-CONFIG_PPC_CELLEB=y
CONFIG_PPC_CELL_QPACE=y
CONFIG_RTAS_FLASH=m
CONFIG_IBMEBUS=y
@@ -89,7 +88,6 @@ CONFIG_IDE=y
CONFIG_BLK_DEV_IDECD=y
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_AMD74XX=y
-CONFIG_BLK_DEV_CELLEB=y
CONFIG_BLK_DEV_IDE_PMAC=y
CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y
CONFIG_BLK_DEV_SD=y
@@ -196,7 +194,6 @@ CONFIG_SERIAL_TXX9_CONSOLE=y
CONFIG_SERIAL_JSM=m
CONFIG_HVC_CONSOLE=y
CONFIG_HVC_RTAS=y
-CONFIG_HVC_BEAT=y
CONFIG_HVCS=m
CONFIG_VIRTIO_CONSOLE=m
CONFIG_IBM_BSR=m
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild
index 382b28e364dc..4b87205c230c 100644
--- a/arch/powerpc/include/asm/Kbuild
+++ b/arch/powerpc/include/asm/Kbuild
@@ -1,6 +1,8 @@
-
generic-y += clkdev.h
+generic-y += div64.h
+generic-y += irq_regs.h
generic-y += irq_work.h
+generic-y += local64.h
generic-y += mcs_spinlock.h
generic-y += preempt.h
generic-y += rwsem.h
diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
index 34a05a1a990b..0dc42c5082b7 100644
--- a/arch/powerpc/include/asm/cache.h
+++ b/arch/powerpc/include/asm/cache.h
@@ -76,9 +76,6 @@ extern void _set_L3CR(unsigned long);
#define _set_L3CR(val) do { } while(0)
#endif
-extern void cacheable_memzero(void *p, unsigned int nb);
-extern void *cacheable_memcpy(void *, const void *, unsigned int);
-
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_CACHE_H */
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index 5cf5a6d10685..6367b8347dad 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -100,7 +100,7 @@ struct cpu_spec {
/*
* Processor specific routine to flush tlbs.
*/
- void (*flush_tlb)(unsigned long inval_selector);
+ void (*flush_tlb)(unsigned int action);
};
@@ -114,6 +114,12 @@ extern void do_feature_fixups(unsigned long value, void *fixup_start,
extern const char *powerpc_base_platform;
+/* TLB flush actions. Used as argument to cpu_spec.flush_tlb() hook */
+enum {
+ TLB_INVAL_SCOPE_GLOBAL = 0, /* invalidate all TLBs */
+ TLB_INVAL_SCOPE_LPID = 1, /* invalidate TLBs for current LPID */
+};
+
#endif /* __ASSEMBLY__ */
/* CPU kernel features */
diff --git a/arch/powerpc/include/asm/dbdma.h b/arch/powerpc/include/asm/dbdma.h
index e23f07e73cb3..6c69836b4ec2 100644
--- a/arch/powerpc/include/asm/dbdma.h
+++ b/arch/powerpc/include/asm/dbdma.h
@@ -42,12 +42,12 @@ struct dbdma_regs {
* DBDMA command structure. These fields are all little-endian!
*/
struct dbdma_cmd {
- unsigned short req_count; /* requested byte transfer count */
- unsigned short command; /* command word (has bit-fields) */
- unsigned int phy_addr; /* physical data address */
- unsigned int cmd_dep; /* command-dependent field */
- unsigned short res_count; /* residual count after completion */
- unsigned short xfer_status; /* transfer status */
+ __le16 req_count; /* requested byte transfer count */
+ __le16 command; /* command word (has bit-fields) */
+ __le32 phy_addr; /* physical data address */
+ __le32 cmd_dep; /* command-dependent field */
+ __le16 res_count; /* residual count after completion */
+ __le16 xfer_status; /* transfer status */
};
/* DBDMA command values in command field */
diff --git a/arch/powerpc/include/asm/dcr-native.h b/arch/powerpc/include/asm/dcr-native.h
index 7d2e6235726d..4efc11dacb98 100644
--- a/arch/powerpc/include/asm/dcr-native.h
+++ b/arch/powerpc/include/asm/dcr-native.h
@@ -31,7 +31,7 @@ typedef struct {
static inline bool dcr_map_ok_native(dcr_host_native_t host)
{
- return 1;
+ return true;
}
#define dcr_map_native(dev, dcr_n, dcr_c) \
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index 38faeded7d59..9f1371bab5fc 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -8,6 +8,9 @@
struct dma_map_ops;
struct device_node;
+#ifdef CONFIG_PPC64
+struct pci_dn;
+#endif
/*
* Arch extensions to struct device.
@@ -34,6 +37,9 @@ struct dev_archdata {
#ifdef CONFIG_SWIOTLB
dma_addr_t max_direct_dma_addr;
#endif
+#ifdef CONFIG_PPC64
+ struct pci_dn *pci_data;
+#endif
#ifdef CONFIG_EEH
struct eeh_dev *edev;
#endif
diff --git a/arch/powerpc/include/asm/div64.h b/arch/powerpc/include/asm/div64.h
deleted file mode 100644
index 6cd978cefb28..000000000000
--- a/arch/powerpc/include/asm/div64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/div64.h>
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h
index 894d538f3567..9103687b0436 100644
--- a/arch/powerpc/include/asm/dma-mapping.h
+++ b/arch/powerpc/include/asm/dma-mapping.h
@@ -191,11 +191,11 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
struct dev_archdata *sd = &dev->archdata;
if (sd->max_direct_dma_addr && addr + size > sd->max_direct_dma_addr)
- return 0;
+ return false;
#endif
if (!dev->dma_mask)
- return 0;
+ return false;
return addr + size - 1 <= *dev->dma_mask;
}
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 55abfd09e47f..a52db28ecc1e 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -29,7 +29,7 @@
struct pci_dev;
struct pci_bus;
-struct device_node;
+struct pci_dn;
#ifdef CONFIG_EEH
@@ -136,14 +136,14 @@ struct eeh_dev {
struct eeh_pe *pe; /* Associated PE */
struct list_head list; /* Form link list in the PE */
struct pci_controller *phb; /* Associated PHB */
- struct device_node *dn; /* Associated device node */
+ struct pci_dn *pdn; /* Associated PCI device node */
struct pci_dev *pdev; /* Associated PCI device */
struct pci_bus *bus; /* PCI bus for partial hotplug */
};
-static inline struct device_node *eeh_dev_to_of_node(struct eeh_dev *edev)
+static inline struct pci_dn *eeh_dev_to_pdn(struct eeh_dev *edev)
{
- return edev ? edev->dn : NULL;
+ return edev ? edev->pdn : NULL;
}
static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
@@ -200,8 +200,7 @@ struct eeh_ops {
char *name;
int (*init)(void);
int (*post_init)(void);
- void* (*of_probe)(struct device_node *dn, void *flag);
- int (*dev_probe)(struct pci_dev *dev, void *flag);
+ void* (*probe)(struct pci_dn *pdn, void *data);
int (*set_option)(struct eeh_pe *pe, int option);
int (*get_pe_addr)(struct eeh_pe *pe);
int (*get_state)(struct eeh_pe *pe, int *state);
@@ -211,10 +210,10 @@ struct eeh_ops {
int (*configure_bridge)(struct eeh_pe *pe);
int (*err_inject)(struct eeh_pe *pe, int type, int func,
unsigned long addr, unsigned long mask);
- int (*read_config)(struct device_node *dn, int where, int size, u32 *val);
- int (*write_config)(struct device_node *dn, int where, int size, u32 val);
+ int (*read_config)(struct pci_dn *pdn, int where, int size, u32 *val);
+ int (*write_config)(struct pci_dn *pdn, int where, int size, u32 val);
int (*next_error)(struct eeh_pe **pe);
- int (*restore_config)(struct device_node *dn);
+ int (*restore_config)(struct pci_dn *pdn);
};
extern int eeh_subsystem_flags;
@@ -272,7 +271,7 @@ void eeh_pe_restore_bars(struct eeh_pe *pe);
const char *eeh_pe_loc_get(struct eeh_pe *pe);
struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe);
-void *eeh_dev_init(struct device_node *dn, void *data);
+void *eeh_dev_init(struct pci_dn *pdn, void *data);
void eeh_dev_phb_init_dynamic(struct pci_controller *phb);
int eeh_init(void);
int __init eeh_ops_register(struct eeh_ops *ops);
@@ -280,8 +279,8 @@ int __exit eeh_ops_unregister(const char *name);
int eeh_check_failure(const volatile void __iomem *token);
int eeh_dev_check_failure(struct eeh_dev *edev);
void eeh_addr_cache_build(void);
-void eeh_add_device_early(struct device_node *);
-void eeh_add_device_tree_early(struct device_node *);
+void eeh_add_device_early(struct pci_dn *);
+void eeh_add_device_tree_early(struct pci_dn *);
void eeh_add_device_late(struct pci_dev *);
void eeh_add_device_tree_late(struct pci_bus *);
void eeh_add_sysfs_files(struct pci_bus *);
@@ -323,7 +322,7 @@ static inline int eeh_init(void)
return 0;
}
-static inline void *eeh_dev_init(struct device_node *dn, void *data)
+static inline void *eeh_dev_init(struct pci_dn *pdn, void *data)
{
return NULL;
}
@@ -339,9 +338,9 @@ static inline int eeh_check_failure(const volatile void __iomem *token)
static inline void eeh_addr_cache_build(void) { }
-static inline void eeh_add_device_early(struct device_node *dn) { }
+static inline void eeh_add_device_early(struct pci_dn *pdn) { }
-static inline void eeh_add_device_tree_early(struct device_node *dn) { }
+static inline void eeh_add_device_tree_early(struct pci_dn *pdn) { }
static inline void eeh_add_device_late(struct pci_dev *dev) { }
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 681bc0314b6b..e05808a328db 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -42,7 +42,7 @@
#define FW_FEATURE_SPLPAR ASM_CONST(0x0000000000100000)
#define FW_FEATURE_LPAR ASM_CONST(0x0000000000400000)
#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000)
-#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000)
+/* Free ASM_CONST(0x0000000001000000) */
#define FW_FEATURE_CMO ASM_CONST(0x0000000002000000)
#define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000)
#define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000)
@@ -75,8 +75,6 @@ enum {
FW_FEATURE_POWERNV_ALWAYS = 0,
FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
FW_FEATURE_PS3_ALWAYS = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
- FW_FEATURE_CELLEB_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_BEAT,
- FW_FEATURE_CELLEB_ALWAYS = 0,
FW_FEATURE_NATIVE_POSSIBLE = 0,
FW_FEATURE_NATIVE_ALWAYS = 0,
FW_FEATURE_POSSIBLE =
@@ -89,9 +87,6 @@ enum {
#ifdef CONFIG_PPC_PS3
FW_FEATURE_PS3_POSSIBLE |
#endif
-#ifdef CONFIG_PPC_CELLEB
- FW_FEATURE_CELLEB_POSSIBLE |
-#endif
#ifdef CONFIG_PPC_NATIVE
FW_FEATURE_NATIVE_ALWAYS |
#endif
@@ -106,9 +101,6 @@ enum {
#ifdef CONFIG_PPC_PS3
FW_FEATURE_PS3_ALWAYS &
#endif
-#ifdef CONFIG_PPC_CELLEB
- FW_FEATURE_CELLEB_ALWAYS &
-#endif
#ifdef CONFIG_PPC_NATIVE
FW_FEATURE_NATIVE_ALWAYS &
#endif
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index f1ea5972f6ec..1e27d6338565 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -29,6 +29,7 @@
#include <linux/bitops.h>
#include <asm/machdep.h>
#include <asm/types.h>
+#include <asm/pci-bridge.h>
#define IOMMU_PAGE_SHIFT_4K 12
#define IOMMU_PAGE_SIZE_4K (ASM_CONST(1) << IOMMU_PAGE_SHIFT_4K)
@@ -78,6 +79,9 @@ struct iommu_table {
struct iommu_group *it_group;
#endif
void (*set_bypass)(struct iommu_table *tbl, bool enable);
+#ifdef CONFIG_PPC_POWERNV
+ void *data;
+#endif
};
/* Pure 2^n version of get_order */
@@ -169,7 +173,7 @@ extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
struct dma_attrs *attrs);
extern void iommu_init_early_pSeries(void);
-extern void iommu_init_early_dart(void);
+extern void iommu_init_early_dart(struct pci_controller_ops *controller_ops);
extern void iommu_init_early_pasemi(void);
extern void alloc_dart_table(void);
diff --git a/arch/powerpc/include/asm/irq_regs.h b/arch/powerpc/include/asm/irq_regs.h
deleted file mode 100644
index ba94b51a0a70..000000000000
--- a/arch/powerpc/include/asm/irq_regs.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#include <asm-generic/irq_regs.h>
-
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 2d81e202bdcc..14619a59ec09 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -290,11 +290,11 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing,
pte_t old_pte, new_pte = __pte(0);
while (1) {
- old_pte = pte_val(*ptep);
+ old_pte = *ptep;
/*
* wait until _PAGE_BUSY is clear then set it atomically
*/
- if (unlikely(old_pte & _PAGE_BUSY)) {
+ if (unlikely(pte_val(old_pte) & _PAGE_BUSY)) {
cpu_relax();
continue;
}
@@ -305,16 +305,18 @@ static inline pte_t kvmppc_read_update_linux_pte(pte_t *ptep, int writing,
return __pte(0);
#endif
/* If pte is not present return None */
- if (unlikely(!(old_pte & _PAGE_PRESENT)))
+ if (unlikely(!(pte_val(old_pte) & _PAGE_PRESENT)))
return __pte(0);
new_pte = pte_mkyoung(old_pte);
if (writing && pte_write(old_pte))
new_pte = pte_mkdirty(new_pte);
- if (old_pte == __cmpxchg_u64((unsigned long *)ptep, old_pte,
- new_pte))
+ if (pte_val(old_pte) == __cmpxchg_u64((unsigned long *)ptep,
+ pte_val(old_pte),
+ pte_val(new_pte))) {
break;
+ }
}
return new_pte;
}
@@ -335,7 +337,7 @@ static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
{
if (key)
return PP_RWRX <= pp && pp <= PP_RXRX;
- return 1;
+ return true;
}
static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
@@ -373,7 +375,7 @@ static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
if (pagesize <= PAGE_SIZE)
- return 1;
+ return true;
return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
}
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 8ef05121d3cd..c610961720c7 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -585,7 +585,7 @@ struct kvm_vcpu_arch {
pgd_t *pgdir;
u8 io_gpr; /* GPR used as IO source/target */
- u8 mmio_is_bigendian;
+ u8 mmio_host_swabbed;
u8 mmio_sign_extend;
u8 osi_needed;
u8 osi_enabled;
diff --git a/arch/powerpc/include/asm/local64.h b/arch/powerpc/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/powerpc/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
-#include <asm-generic/local64.h>
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index c8175a3fe560..ef8899432ae7 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -103,9 +103,6 @@ struct machdep_calls {
#endif
#endif /* CONFIG_PPC64 */
- void (*pci_dma_dev_setup)(struct pci_dev *dev);
- void (*pci_dma_bus_setup)(struct pci_bus *bus);
-
/* Platform set_dma_mask and dma_get_required_mask overrides */
int (*dma_set_mask)(struct device *dev, u64 dma_mask);
u64 (*dma_get_required_mask)(struct device *dev);
@@ -125,9 +122,8 @@ struct machdep_calls {
unsigned int (*get_irq)(void);
/* PCI stuff */
- /* Called after scanning the bus, before allocating resources */
+ /* Called after allocating resources */
void (*pcibios_fixup)(void);
- int (*pci_probe_mode)(struct pci_bus *);
void (*pci_irq_fixup)(struct pci_dev *dev);
int (*pcibios_root_bridge_prepare)(struct pci_host_bridge
*bridge);
@@ -237,18 +233,13 @@ struct machdep_calls {
/* Called for each PCI bus in the system when it's probed */
void (*pcibios_fixup_bus)(struct pci_bus *);
- /* Called when pci_enable_device() is called. Returns 0 to
- * allow assignment/enabling of the device. */
- int (*pcibios_enable_device_hook)(struct pci_dev *);
-
/* Called after scan and before resource survey */
void (*pcibios_fixup_phb)(struct pci_controller *hose);
- /* Called during PCI resource reassignment */
- resource_size_t (*pcibios_window_alignment)(struct pci_bus *, unsigned long type);
-
- /* Reset the secondary bus of bridge */
- void (*pcibios_reset_secondary_bus)(struct pci_dev *dev);
+#ifdef CONFIG_PCI_IOV
+ void (*pcibios_fixup_sriov)(struct pci_dev *pdev);
+ resource_size_t (*pcibios_iov_resource_alignment)(struct pci_dev *, int resno);
+#endif /* CONFIG_PCI_IOV */
/* Called to shutdown machine specific hardware not already controlled
* by other drivers.
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 4f13c3ed7acf..1da6a81ce541 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -112,6 +112,7 @@
#define TLBIEL_INVAL_SET_SHIFT 12
#define POWER7_TLB_SETS 128 /* # sets in POWER7 TLB */
+#define POWER8_TLB_SETS 512 /* # sets in POWER8 TLB */
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/mpc85xx.h b/arch/powerpc/include/asm/mpc85xx.h
index 3bef74a9914b..213f3a81593d 100644
--- a/arch/powerpc/include/asm/mpc85xx.h
+++ b/arch/powerpc/include/asm/mpc85xx.h
@@ -61,6 +61,7 @@
#define SVR_T4240 0x824000
#define SVR_T4120 0x824001
#define SVR_T4160 0x824100
+#define SVR_T4080 0x824102
#define SVR_C291 0x850000
#define SVR_C292 0x850020
#define SVR_C293 0x850030
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index 754f93d208fa..98697611e7b3 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -34,10 +34,6 @@
#define MPIC_GREG_GCONF_BASE_MASK 0x000fffff
#define MPIC_GREG_GCONF_MCK 0x08000000
#define MPIC_GREG_GLOBAL_CONF_1 0x00030
-#define MPIC_GREG_GLOBAL_CONF_1_SIE 0x08000000
-#define MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK 0x70000000
-#define MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(r) \
- (((r) << 28) & MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK)
#define MPIC_GREG_VENDOR_0 0x00040
#define MPIC_GREG_VENDOR_1 0x00050
#define MPIC_GREG_VENDOR_2 0x00060
@@ -396,14 +392,7 @@ extern struct bus_type mpic_subsys;
#define MPIC_REGSET_TSI108 MPIC_REGSET(1) /* Tsi108/109 PIC */
/* Get the version of primary MPIC */
-#ifdef CONFIG_MPIC
extern u32 fsl_mpic_primary_get_version(void);
-#else
-static inline u32 fsl_mpic_primary_get_version(void)
-{
- return 0;
-}
-#endif
/* Allocate the controller structure and setup the linux irq descs
* for the range if interrupts passed in. No HW initialization is
@@ -496,11 +485,5 @@ extern unsigned int mpic_get_coreint_irq(void);
/* Fetch Machine Check interrupt from primary mpic */
extern unsigned int mpic_get_mcirq(void);
-/* Set the EPIC clock ratio */
-void mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio);
-
-/* Enable/Disable EPIC serial interrupt mode */
-void mpic_set_serial_int(struct mpic *mpic, int enable);
-
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_MPIC_H */
diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h
new file mode 100644
index 000000000000..ff1ccb375e60
--- /dev/null
+++ b/arch/powerpc/include/asm/nmi.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_NMI_H
+#define _ASM_NMI_H
+
+#endif /* _ASM_NMI_H */
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h
index b0fe0fe4e626..09a518bb7c03 100644
--- a/arch/powerpc/include/asm/nvram.h
+++ b/arch/powerpc/include/asm/nvram.h
@@ -9,12 +9,43 @@
#ifndef _ASM_POWERPC_NVRAM_H
#define _ASM_POWERPC_NVRAM_H
-
+#include <linux/types.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <uapi/asm/nvram.h>
+/*
+ * Set oops header version to distinguish between old and new format header.
+ * lnx,oops-log partition max size is 4000, header version > 4000 will
+ * help in identifying new header.
+ */
+#define OOPS_HDR_VERSION 5000
+
+struct err_log_info {
+ __be32 error_type;
+ __be32 seq_num;
+};
+
+struct nvram_os_partition {
+ const char *name;
+ int req_size; /* desired size, in bytes */
+ int min_size; /* minimum acceptable size (0 means req_size) */
+ long size; /* size of data portion (excluding err_log_info) */
+ long index; /* offset of data portion of partition */
+ bool os_partition; /* partition initialized by OS, not FW */
+};
+
+struct oops_log_info {
+ __be16 version;
+ __be16 report_length;
+ __be64 timestamp;
+} __attribute__((packed));
+
+extern struct nvram_os_partition oops_log_partition;
+
#ifdef CONFIG_PPC_PSERIES
+extern struct nvram_os_partition rtas_log_partition;
+
extern int nvram_write_error_log(char * buff, int length,
unsigned int err_type, unsigned int err_seq);
extern int nvram_read_error_log(char * buff, int length,
@@ -50,6 +81,23 @@ extern void pmac_xpram_write(int xpaddr, u8 data);
/* Synchronize NVRAM */
extern void nvram_sync(void);
+/* Initialize NVRAM OS partition */
+extern int __init nvram_init_os_partition(struct nvram_os_partition *part);
+
+/* Initialize NVRAM oops partition */
+extern void __init nvram_init_oops_partition(int rtas_partition_exists);
+
+/* Read a NVRAM partition */
+extern int nvram_read_partition(struct nvram_os_partition *part, char *buff,
+ int length, unsigned int *err_type,
+ unsigned int *error_log_cnt);
+
+/* Write to NVRAM OS partition */
+extern int nvram_write_os_partition(struct nvram_os_partition *part,
+ char *buff, int length,
+ unsigned int err_type,
+ unsigned int error_log_cnt);
+
/* Determine NVRAM size */
extern ssize_t nvram_get_size(void);
diff --git a/arch/powerpc/include/asm/opal-api.h b/arch/powerpc/include/asm/opal-api.h
new file mode 100644
index 000000000000..0321a909e663
--- /dev/null
+++ b/arch/powerpc/include/asm/opal-api.h
@@ -0,0 +1,735 @@
+/*
+ * OPAL API definitions.
+ *
+ * Copyright 2011-2015 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef __OPAL_API_H
+#define __OPAL_API_H
+
+/****** OPAL APIs ******/
+
+/* Return codes */
+#define OPAL_SUCCESS 0
+#define OPAL_PARAMETER -1
+#define OPAL_BUSY -2
+#define OPAL_PARTIAL -3
+#define OPAL_CONSTRAINED -4
+#define OPAL_CLOSED -5
+#define OPAL_HARDWARE -6
+#define OPAL_UNSUPPORTED -7
+#define OPAL_PERMISSION -8
+#define OPAL_NO_MEM -9
+#define OPAL_RESOURCE -10
+#define OPAL_INTERNAL_ERROR -11
+#define OPAL_BUSY_EVENT -12
+#define OPAL_HARDWARE_FROZEN -13
+#define OPAL_WRONG_STATE -14
+#define OPAL_ASYNC_COMPLETION -15
+#define OPAL_EMPTY -16
+#define OPAL_I2C_TIMEOUT -17
+#define OPAL_I2C_INVALID_CMD -18
+#define OPAL_I2C_LBUS_PARITY -19
+#define OPAL_I2C_BKEND_OVERRUN -20
+#define OPAL_I2C_BKEND_ACCESS -21
+#define OPAL_I2C_ARBT_LOST -22
+#define OPAL_I2C_NACK_RCVD -23
+#define OPAL_I2C_STOP_ERR -24
+
+/* API Tokens (in r0) */
+#define OPAL_INVALID_CALL -1
+#define OPAL_TEST 0
+#define OPAL_CONSOLE_WRITE 1
+#define OPAL_CONSOLE_READ 2
+#define OPAL_RTC_READ 3
+#define OPAL_RTC_WRITE 4
+#define OPAL_CEC_POWER_DOWN 5
+#define OPAL_CEC_REBOOT 6
+#define OPAL_READ_NVRAM 7
+#define OPAL_WRITE_NVRAM 8
+#define OPAL_HANDLE_INTERRUPT 9
+#define OPAL_POLL_EVENTS 10
+#define OPAL_PCI_SET_HUB_TCE_MEMORY 11
+#define OPAL_PCI_SET_PHB_TCE_MEMORY 12
+#define OPAL_PCI_CONFIG_READ_BYTE 13
+#define OPAL_PCI_CONFIG_READ_HALF_WORD 14
+#define OPAL_PCI_CONFIG_READ_WORD 15
+#define OPAL_PCI_CONFIG_WRITE_BYTE 16
+#define OPAL_PCI_CONFIG_WRITE_HALF_WORD 17
+#define OPAL_PCI_CONFIG_WRITE_WORD 18
+#define OPAL_SET_XIVE 19
+#define OPAL_GET_XIVE 20
+#define OPAL_GET_COMPLETION_TOKEN_STATUS 21 /* obsolete */
+#define OPAL_REGISTER_OPAL_EXCEPTION_HANDLER 22
+#define OPAL_PCI_EEH_FREEZE_STATUS 23
+#define OPAL_PCI_SHPC 24
+#define OPAL_CONSOLE_WRITE_BUFFER_SPACE 25
+#define OPAL_PCI_EEH_FREEZE_CLEAR 26
+#define OPAL_PCI_PHB_MMIO_ENABLE 27
+#define OPAL_PCI_SET_PHB_MEM_WINDOW 28
+#define OPAL_PCI_MAP_PE_MMIO_WINDOW 29
+#define OPAL_PCI_SET_PHB_TABLE_MEMORY 30
+#define OPAL_PCI_SET_PE 31
+#define OPAL_PCI_SET_PELTV 32
+#define OPAL_PCI_SET_MVE 33
+#define OPAL_PCI_SET_MVE_ENABLE 34
+#define OPAL_PCI_GET_XIVE_REISSUE 35
+#define OPAL_PCI_SET_XIVE_REISSUE 36
+#define OPAL_PCI_SET_XIVE_PE 37
+#define OPAL_GET_XIVE_SOURCE 38
+#define OPAL_GET_MSI_32 39
+#define OPAL_GET_MSI_64 40
+#define OPAL_START_CPU 41
+#define OPAL_QUERY_CPU_STATUS 42
+#define OPAL_WRITE_OPPANEL 43 /* unimplemented */
+#define OPAL_PCI_MAP_PE_DMA_WINDOW 44
+#define OPAL_PCI_MAP_PE_DMA_WINDOW_REAL 45
+#define OPAL_PCI_RESET 49
+#define OPAL_PCI_GET_HUB_DIAG_DATA 50
+#define OPAL_PCI_GET_PHB_DIAG_DATA 51
+#define OPAL_PCI_FENCE_PHB 52
+#define OPAL_PCI_REINIT 53
+#define OPAL_PCI_MASK_PE_ERROR 54
+#define OPAL_SET_SLOT_LED_STATUS 55
+#define OPAL_GET_EPOW_STATUS 56
+#define OPAL_SET_SYSTEM_ATTENTION_LED 57
+#define OPAL_RESERVED1 58
+#define OPAL_RESERVED2 59
+#define OPAL_PCI_NEXT_ERROR 60
+#define OPAL_PCI_EEH_FREEZE_STATUS2 61
+#define OPAL_PCI_POLL 62
+#define OPAL_PCI_MSI_EOI 63
+#define OPAL_PCI_GET_PHB_DIAG_DATA2 64
+#define OPAL_XSCOM_READ 65
+#define OPAL_XSCOM_WRITE 66
+#define OPAL_LPC_READ 67
+#define OPAL_LPC_WRITE 68
+#define OPAL_RETURN_CPU 69
+#define OPAL_REINIT_CPUS 70
+#define OPAL_ELOG_READ 71
+#define OPAL_ELOG_WRITE 72
+#define OPAL_ELOG_ACK 73
+#define OPAL_ELOG_RESEND 74
+#define OPAL_ELOG_SIZE 75
+#define OPAL_FLASH_VALIDATE 76
+#define OPAL_FLASH_MANAGE 77
+#define OPAL_FLASH_UPDATE 78
+#define OPAL_RESYNC_TIMEBASE 79
+#define OPAL_CHECK_TOKEN 80
+#define OPAL_DUMP_INIT 81
+#define OPAL_DUMP_INFO 82
+#define OPAL_DUMP_READ 83
+#define OPAL_DUMP_ACK 84
+#define OPAL_GET_MSG 85
+#define OPAL_CHECK_ASYNC_COMPLETION 86
+#define OPAL_SYNC_HOST_REBOOT 87
+#define OPAL_SENSOR_READ 88
+#define OPAL_GET_PARAM 89
+#define OPAL_SET_PARAM 90
+#define OPAL_DUMP_RESEND 91
+#define OPAL_ELOG_SEND 92 /* Deprecated */
+#define OPAL_PCI_SET_PHB_CAPI_MODE 93
+#define OPAL_DUMP_INFO2 94
+#define OPAL_WRITE_OPPANEL_ASYNC 95
+#define OPAL_PCI_ERR_INJECT 96
+#define OPAL_PCI_EEH_FREEZE_SET 97
+#define OPAL_HANDLE_HMI 98
+#define OPAL_CONFIG_CPU_IDLE_STATE 99
+#define OPAL_SLW_SET_REG 100
+#define OPAL_REGISTER_DUMP_REGION 101
+#define OPAL_UNREGISTER_DUMP_REGION 102
+#define OPAL_WRITE_TPO 103
+#define OPAL_READ_TPO 104
+#define OPAL_GET_DPO_STATUS 105
+#define OPAL_OLD_I2C_REQUEST 106 /* Deprecated */
+#define OPAL_IPMI_SEND 107
+#define OPAL_IPMI_RECV 108
+#define OPAL_I2C_REQUEST 109
+#define OPAL_FLASH_READ 110
+#define OPAL_FLASH_WRITE 111
+#define OPAL_FLASH_ERASE 112
+#define OPAL_LAST 112
+
+/* Device tree flags */
+
+/* Flags set in power-mgmt nodes in device tree if
+ * respective idle states are supported in the platform.
+ */
+#define OPAL_PM_NAP_ENABLED 0x00010000
+#define OPAL_PM_SLEEP_ENABLED 0x00020000
+#define OPAL_PM_WINKLE_ENABLED 0x00040000
+#define OPAL_PM_SLEEP_ENABLED_ER1 0x00080000 /* with workaround */
+
+#ifndef __ASSEMBLY__
+
+/* Other enums */
+enum OpalFreezeState {
+ OPAL_EEH_STOPPED_NOT_FROZEN = 0,
+ OPAL_EEH_STOPPED_MMIO_FREEZE = 1,
+ OPAL_EEH_STOPPED_DMA_FREEZE = 2,
+ OPAL_EEH_STOPPED_MMIO_DMA_FREEZE = 3,
+ OPAL_EEH_STOPPED_RESET = 4,
+ OPAL_EEH_STOPPED_TEMP_UNAVAIL = 5,
+ OPAL_EEH_STOPPED_PERM_UNAVAIL = 6
+};
+
+enum OpalEehFreezeActionToken {
+ OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO = 1,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_DMA = 2,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL = 3,
+
+ OPAL_EEH_ACTION_SET_FREEZE_MMIO = 1,
+ OPAL_EEH_ACTION_SET_FREEZE_DMA = 2,
+ OPAL_EEH_ACTION_SET_FREEZE_ALL = 3
+};
+
+enum OpalPciStatusToken {
+ OPAL_EEH_NO_ERROR = 0,
+ OPAL_EEH_IOC_ERROR = 1,
+ OPAL_EEH_PHB_ERROR = 2,
+ OPAL_EEH_PE_ERROR = 3,
+ OPAL_EEH_PE_MMIO_ERROR = 4,
+ OPAL_EEH_PE_DMA_ERROR = 5
+};
+
+enum OpalPciErrorSeverity {
+ OPAL_EEH_SEV_NO_ERROR = 0,
+ OPAL_EEH_SEV_IOC_DEAD = 1,
+ OPAL_EEH_SEV_PHB_DEAD = 2,
+ OPAL_EEH_SEV_PHB_FENCED = 3,
+ OPAL_EEH_SEV_PE_ER = 4,
+ OPAL_EEH_SEV_INF = 5
+};
+
+enum OpalErrinjectType {
+ OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR = 0,
+ OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64 = 1,
+};
+
+enum OpalErrinjectFunc {
+ /* IOA bus specific errors */
+ OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR = 0,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_DATA = 1,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_IO_ADDR = 2,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_IO_DATA = 3,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_ADDR = 4,
+ OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_DATA = 5,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_ADDR = 6,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_DATA = 7,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_IO_ADDR = 8,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_IO_DATA = 9,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_ADDR = 10,
+ OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_DATA = 11,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_ADDR = 12,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_DATA = 13,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_MASTER = 14,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_TARGET = 15,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_ADDR = 16,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_DATA = 17,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_MASTER = 18,
+ OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET = 19,
+};
+
+enum OpalMmioWindowType {
+ OPAL_M32_WINDOW_TYPE = 1,
+ OPAL_M64_WINDOW_TYPE = 2,
+ OPAL_IO_WINDOW_TYPE = 3
+};
+
+enum OpalExceptionHandler {
+ OPAL_MACHINE_CHECK_HANDLER = 1,
+ OPAL_HYPERVISOR_MAINTENANCE_HANDLER = 2,
+ OPAL_SOFTPATCH_HANDLER = 3
+};
+
+enum OpalPendingState {
+ OPAL_EVENT_OPAL_INTERNAL = 0x1,
+ OPAL_EVENT_NVRAM = 0x2,
+ OPAL_EVENT_RTC = 0x4,
+ OPAL_EVENT_CONSOLE_OUTPUT = 0x8,
+ OPAL_EVENT_CONSOLE_INPUT = 0x10,
+ OPAL_EVENT_ERROR_LOG_AVAIL = 0x20,
+ OPAL_EVENT_ERROR_LOG = 0x40,
+ OPAL_EVENT_EPOW = 0x80,
+ OPAL_EVENT_LED_STATUS = 0x100,
+ OPAL_EVENT_PCI_ERROR = 0x200,
+ OPAL_EVENT_DUMP_AVAIL = 0x400,
+ OPAL_EVENT_MSG_PENDING = 0x800,
+};
+
+enum OpalThreadStatus {
+ OPAL_THREAD_INACTIVE = 0x0,
+ OPAL_THREAD_STARTED = 0x1,
+ OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
+};
+
+enum OpalPciBusCompare {
+ OpalPciBusAny = 0, /* Any bus number match */
+ OpalPciBus3Bits = 2, /* Match top 3 bits of bus number */
+ OpalPciBus4Bits = 3, /* Match top 4 bits of bus number */
+ OpalPciBus5Bits = 4, /* Match top 5 bits of bus number */
+ OpalPciBus6Bits = 5, /* Match top 6 bits of bus number */
+ OpalPciBus7Bits = 6, /* Match top 7 bits of bus number */
+ OpalPciBusAll = 7, /* Match bus number exactly */
+};
+
+enum OpalDeviceCompare {
+ OPAL_IGNORE_RID_DEVICE_NUMBER = 0,
+ OPAL_COMPARE_RID_DEVICE_NUMBER = 1
+};
+
+enum OpalFuncCompare {
+ OPAL_IGNORE_RID_FUNCTION_NUMBER = 0,
+ OPAL_COMPARE_RID_FUNCTION_NUMBER = 1
+};
+
+enum OpalPeAction {
+ OPAL_UNMAP_PE = 0,
+ OPAL_MAP_PE = 1
+};
+
+enum OpalPeltvAction {
+ OPAL_REMOVE_PE_FROM_DOMAIN = 0,
+ OPAL_ADD_PE_TO_DOMAIN = 1
+};
+
+enum OpalMveEnableAction {
+ OPAL_DISABLE_MVE = 0,
+ OPAL_ENABLE_MVE = 1
+};
+
+enum OpalM64Action {
+ OPAL_DISABLE_M64 = 0,
+ OPAL_ENABLE_M64_SPLIT = 1,
+ OPAL_ENABLE_M64_NON_SPLIT = 2
+};
+
+enum OpalPciResetScope {
+ OPAL_RESET_PHB_COMPLETE = 1,
+ OPAL_RESET_PCI_LINK = 2,
+ OPAL_RESET_PHB_ERROR = 3,
+ OPAL_RESET_PCI_HOT = 4,
+ OPAL_RESET_PCI_FUNDAMENTAL = 5,
+ OPAL_RESET_PCI_IODA_TABLE = 6
+};
+
+enum OpalPciReinitScope {
+ /*
+ * Note: we chose values that do not overlap
+ * OpalPciResetScope as OPAL v2 used the same
+ * enum for both
+ */
+ OPAL_REINIT_PCI_DEV = 1000
+};
+
+enum OpalPciResetState {
+ OPAL_DEASSERT_RESET = 0,
+ OPAL_ASSERT_RESET = 1
+};
+
+/*
+ * Address cycle types for LPC accesses. These also correspond
+ * to the content of the first cell of the "reg" property for
+ * device nodes on the LPC bus
+ */
+enum OpalLPCAddressType {
+ OPAL_LPC_MEM = 0,
+ OPAL_LPC_IO = 1,
+ OPAL_LPC_FW = 2,
+};
+
+enum opal_msg_type {
+ OPAL_MSG_ASYNC_COMP = 0, /* params[0] = token, params[1] = rc,
+ * additional params function-specific
+ */
+ OPAL_MSG_MEM_ERR,
+ OPAL_MSG_EPOW,
+ OPAL_MSG_SHUTDOWN, /* params[0] = 1 reboot, 0 shutdown */
+ OPAL_MSG_HMI_EVT,
+ OPAL_MSG_DPO,
+ OPAL_MSG_TYPE_MAX,
+};
+
+struct opal_msg {
+ __be32 msg_type;
+ __be32 reserved;
+ __be64 params[8];
+};
+
+/* System parameter permission */
+enum OpalSysparamPerm {
+ OPAL_SYSPARAM_READ = 0x1,
+ OPAL_SYSPARAM_WRITE = 0x2,
+ OPAL_SYSPARAM_RW = (OPAL_SYSPARAM_READ | OPAL_SYSPARAM_WRITE),
+};
+
+enum {
+ OPAL_IPMI_MSG_FORMAT_VERSION_1 = 1,
+};
+
+struct opal_ipmi_msg {
+ uint8_t version;
+ uint8_t netfn;
+ uint8_t cmd;
+ uint8_t data[];
+};
+
+/* FSP memory errors handling */
+enum OpalMemErr_Version {
+ OpalMemErr_V1 = 1,
+};
+
+enum OpalMemErrType {
+ OPAL_MEM_ERR_TYPE_RESILIENCE = 0,
+ OPAL_MEM_ERR_TYPE_DYN_DALLOC,
+};
+
+/* Memory Reilience error type */
+enum OpalMemErr_ResilErrType {
+ OPAL_MEM_RESILIENCE_CE = 0,
+ OPAL_MEM_RESILIENCE_UE,
+ OPAL_MEM_RESILIENCE_UE_SCRUB,
+};
+
+/* Dynamic Memory Deallocation type */
+enum OpalMemErr_DynErrType {
+ OPAL_MEM_DYNAMIC_DEALLOC = 0,
+};
+
+struct OpalMemoryErrorData {
+ enum OpalMemErr_Version version:8; /* 0x00 */
+ enum OpalMemErrType type:8; /* 0x01 */
+ __be16 flags; /* 0x02 */
+ uint8_t reserved_1[4]; /* 0x04 */
+
+ union {
+ /* Memory Resilience corrected/uncorrected error info */
+ struct {
+ enum OpalMemErr_ResilErrType resil_err_type:8;
+ uint8_t reserved_1[7];
+ __be64 physical_address_start;
+ __be64 physical_address_end;
+ } resilience;
+ /* Dynamic memory deallocation error info */
+ struct {
+ enum OpalMemErr_DynErrType dyn_err_type:8;
+ uint8_t reserved_1[7];
+ __be64 physical_address_start;
+ __be64 physical_address_end;
+ } dyn_dealloc;
+ } u;
+};
+
+/* HMI interrupt event */
+enum OpalHMI_Version {
+ OpalHMIEvt_V1 = 1,
+};
+
+enum OpalHMI_Severity {
+ OpalHMI_SEV_NO_ERROR = 0,
+ OpalHMI_SEV_WARNING = 1,
+ OpalHMI_SEV_ERROR_SYNC = 2,
+ OpalHMI_SEV_FATAL = 3,
+};
+
+enum OpalHMI_Disposition {
+ OpalHMI_DISPOSITION_RECOVERED = 0,
+ OpalHMI_DISPOSITION_NOT_RECOVERED = 1,
+};
+
+enum OpalHMI_ErrType {
+ OpalHMI_ERROR_MALFUNC_ALERT = 0,
+ OpalHMI_ERROR_PROC_RECOV_DONE,
+ OpalHMI_ERROR_PROC_RECOV_DONE_AGAIN,
+ OpalHMI_ERROR_PROC_RECOV_MASKED,
+ OpalHMI_ERROR_TFAC,
+ OpalHMI_ERROR_TFMR_PARITY,
+ OpalHMI_ERROR_HA_OVERFLOW_WARN,
+ OpalHMI_ERROR_XSCOM_FAIL,
+ OpalHMI_ERROR_XSCOM_DONE,
+ OpalHMI_ERROR_SCOM_FIR,
+ OpalHMI_ERROR_DEBUG_TRIG_FIR,
+ OpalHMI_ERROR_HYP_RESOURCE,
+ OpalHMI_ERROR_CAPP_RECOVERY,
+};
+
+struct OpalHMIEvent {
+ uint8_t version; /* 0x00 */
+ uint8_t severity; /* 0x01 */
+ uint8_t type; /* 0x02 */
+ uint8_t disposition; /* 0x03 */
+ uint8_t reserved_1[4]; /* 0x04 */
+
+ __be64 hmer;
+ /* TFMR register. Valid only for TFAC and TFMR_PARITY error type. */
+ __be64 tfmr;
+};
+
+enum {
+ OPAL_P7IOC_DIAG_TYPE_NONE = 0,
+ OPAL_P7IOC_DIAG_TYPE_RGC = 1,
+ OPAL_P7IOC_DIAG_TYPE_BI = 2,
+ OPAL_P7IOC_DIAG_TYPE_CI = 3,
+ OPAL_P7IOC_DIAG_TYPE_MISC = 4,
+ OPAL_P7IOC_DIAG_TYPE_I2C = 5,
+ OPAL_P7IOC_DIAG_TYPE_LAST = 6
+};
+
+struct OpalIoP7IOCErrorData {
+ __be16 type;
+
+ /* GEM */
+ __be64 gemXfir;
+ __be64 gemRfir;
+ __be64 gemRirqfir;
+ __be64 gemMask;
+ __be64 gemRwof;
+
+ /* LEM */
+ __be64 lemFir;
+ __be64 lemErrMask;
+ __be64 lemAction0;
+ __be64 lemAction1;
+ __be64 lemWof;
+
+ union {
+ struct OpalIoP7IOCRgcErrorData {
+ __be64 rgcStatus; /* 3E1C10 */
+ __be64 rgcLdcp; /* 3E1C18 */
+ }rgc;
+ struct OpalIoP7IOCBiErrorData {
+ __be64 biLdcp0; /* 3C0100, 3C0118 */
+ __be64 biLdcp1; /* 3C0108, 3C0120 */
+ __be64 biLdcp2; /* 3C0110, 3C0128 */
+ __be64 biFenceStatus; /* 3C0130, 3C0130 */
+
+ uint8_t biDownbound; /* BI Downbound or Upbound */
+ }bi;
+ struct OpalIoP7IOCCiErrorData {
+ __be64 ciPortStatus; /* 3Dn008 */
+ __be64 ciPortLdcp; /* 3Dn010 */
+
+ uint8_t ciPort; /* Index of CI port: 0/1 */
+ }ci;
+ };
+};
+
+/**
+ * This structure defines the overlay which will be used to store PHB error
+ * data upon request.
+ */
+enum {
+ OPAL_PHB_ERROR_DATA_VERSION_1 = 1,
+};
+
+enum {
+ OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1,
+ OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2
+};
+
+enum {
+ OPAL_P7IOC_NUM_PEST_REGS = 128,
+ OPAL_PHB3_NUM_PEST_REGS = 256
+};
+
+struct OpalIoPhbErrorCommon {
+ __be32 version;
+ __be32 ioType;
+ __be32 len;
+};
+
+struct OpalIoP7IOCPhbErrorData {
+ struct OpalIoPhbErrorCommon common;
+
+ __be32 brdgCtl;
+
+ // P7IOC utl regs
+ __be32 portStatusReg;
+ __be32 rootCmplxStatus;
+ __be32 busAgentStatus;
+
+ // P7IOC cfg regs
+ __be32 deviceStatus;
+ __be32 slotStatus;
+ __be32 linkStatus;
+ __be32 devCmdStatus;
+ __be32 devSecStatus;
+
+ // cfg AER regs
+ __be32 rootErrorStatus;
+ __be32 uncorrErrorStatus;
+ __be32 corrErrorStatus;
+ __be32 tlpHdr1;
+ __be32 tlpHdr2;
+ __be32 tlpHdr3;
+ __be32 tlpHdr4;
+ __be32 sourceId;
+
+ __be32 rsv3;
+
+ // Record data about the call to allocate a buffer.
+ __be64 errorClass;
+ __be64 correlator;
+
+ //P7IOC MMIO Error Regs
+ __be64 p7iocPlssr; // n120
+ __be64 p7iocCsr; // n110
+ __be64 lemFir; // nC00
+ __be64 lemErrorMask; // nC18
+ __be64 lemWOF; // nC40
+ __be64 phbErrorStatus; // nC80
+ __be64 phbFirstErrorStatus; // nC88
+ __be64 phbErrorLog0; // nCC0
+ __be64 phbErrorLog1; // nCC8
+ __be64 mmioErrorStatus; // nD00
+ __be64 mmioFirstErrorStatus; // nD08
+ __be64 mmioErrorLog0; // nD40
+ __be64 mmioErrorLog1; // nD48
+ __be64 dma0ErrorStatus; // nD80
+ __be64 dma0FirstErrorStatus; // nD88
+ __be64 dma0ErrorLog0; // nDC0
+ __be64 dma0ErrorLog1; // nDC8
+ __be64 dma1ErrorStatus; // nE00
+ __be64 dma1FirstErrorStatus; // nE08
+ __be64 dma1ErrorLog0; // nE40
+ __be64 dma1ErrorLog1; // nE48
+ __be64 pestA[OPAL_P7IOC_NUM_PEST_REGS];
+ __be64 pestB[OPAL_P7IOC_NUM_PEST_REGS];
+};
+
+struct OpalIoPhb3ErrorData {
+ struct OpalIoPhbErrorCommon common;
+
+ __be32 brdgCtl;
+
+ /* PHB3 UTL regs */
+ __be32 portStatusReg;
+ __be32 rootCmplxStatus;
+ __be32 busAgentStatus;
+
+ /* PHB3 cfg regs */
+ __be32 deviceStatus;
+ __be32 slotStatus;
+ __be32 linkStatus;
+ __be32 devCmdStatus;
+ __be32 devSecStatus;
+
+ /* cfg AER regs */
+ __be32 rootErrorStatus;
+ __be32 uncorrErrorStatus;
+ __be32 corrErrorStatus;
+ __be32 tlpHdr1;
+ __be32 tlpHdr2;
+ __be32 tlpHdr3;
+ __be32 tlpHdr4;
+ __be32 sourceId;
+
+ __be32 rsv3;
+
+ /* Record data about the call to allocate a buffer */
+ __be64 errorClass;
+ __be64 correlator;
+
+ /* PHB3 MMIO Error Regs */
+ __be64 nFir; /* 000 */
+ __be64 nFirMask; /* 003 */
+ __be64 nFirWOF; /* 008 */
+ __be64 phbPlssr; /* 120 */
+ __be64 phbCsr; /* 110 */
+ __be64 lemFir; /* C00 */
+ __be64 lemErrorMask; /* C18 */
+ __be64 lemWOF; /* C40 */
+ __be64 phbErrorStatus; /* C80 */
+ __be64 phbFirstErrorStatus; /* C88 */
+ __be64 phbErrorLog0; /* CC0 */
+ __be64 phbErrorLog1; /* CC8 */
+ __be64 mmioErrorStatus; /* D00 */
+ __be64 mmioFirstErrorStatus; /* D08 */
+ __be64 mmioErrorLog0; /* D40 */
+ __be64 mmioErrorLog1; /* D48 */
+ __be64 dma0ErrorStatus; /* D80 */
+ __be64 dma0FirstErrorStatus; /* D88 */
+ __be64 dma0ErrorLog0; /* DC0 */
+ __be64 dma0ErrorLog1; /* DC8 */
+ __be64 dma1ErrorStatus; /* E00 */
+ __be64 dma1FirstErrorStatus; /* E08 */
+ __be64 dma1ErrorLog0; /* E40 */
+ __be64 dma1ErrorLog1; /* E48 */
+ __be64 pestA[OPAL_PHB3_NUM_PEST_REGS];
+ __be64 pestB[OPAL_PHB3_NUM_PEST_REGS];
+};
+
+enum {
+ OPAL_REINIT_CPUS_HILE_BE = (1 << 0),
+ OPAL_REINIT_CPUS_HILE_LE = (1 << 1),
+};
+
+typedef struct oppanel_line {
+ __be64 line;
+ __be64 line_len;
+} oppanel_line_t;
+
+/*
+ * SG entries
+ *
+ * WARNING: The current implementation requires each entry
+ * to represent a block that is 4k aligned *and* each block
+ * size except the last one in the list to be as well.
+ */
+struct opal_sg_entry {
+ __be64 data;
+ __be64 length;
+};
+
+/*
+ * Candiate image SG list.
+ *
+ * length = VER | length
+ */
+struct opal_sg_list {
+ __be64 length;
+ __be64 next;
+ struct opal_sg_entry entry[];
+};
+
+/*
+ * Dump region ID range usable by the OS
+ */
+#define OPAL_DUMP_REGION_HOST_START 0x80
+#define OPAL_DUMP_REGION_LOG_BUF 0x80
+#define OPAL_DUMP_REGION_HOST_END 0xFF
+
+/* CAPI modes for PHB */
+enum {
+ OPAL_PHB_CAPI_MODE_PCIE = 0,
+ OPAL_PHB_CAPI_MODE_CAPI = 1,
+ OPAL_PHB_CAPI_MODE_SNOOP_OFF = 2,
+ OPAL_PHB_CAPI_MODE_SNOOP_ON = 3,
+};
+
+/* OPAL I2C request */
+struct opal_i2c_request {
+ uint8_t type;
+#define OPAL_I2C_RAW_READ 0
+#define OPAL_I2C_RAW_WRITE 1
+#define OPAL_I2C_SM_READ 2
+#define OPAL_I2C_SM_WRITE 3
+ uint8_t flags;
+#define OPAL_I2C_ADDR_10 0x01 /* Not supported yet */
+ uint8_t subaddr_sz; /* Max 4 */
+ uint8_t reserved;
+ __be16 addr; /* 7 or 10 bit address */
+ __be16 reserved2;
+ __be32 subaddr; /* Sub-address if any */
+ __be32 size; /* Data size */
+ __be64 buffer_ra; /* Buffer real address */
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __OPAL_API_H */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 9ee0a30a02ce..042af1abfc4d 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -9,755 +9,17 @@
* 2 of the License, or (at your option) any later version.
*/
-#ifndef __OPAL_H
-#define __OPAL_H
+#ifndef _ASM_POWERPC_OPAL_H
+#define _ASM_POWERPC_OPAL_H
-#ifndef __ASSEMBLY__
-/*
- * SG entry
- *
- * WARNING: The current implementation requires each entry
- * to represent a block that is 4k aligned *and* each block
- * size except the last one in the list to be as well.
- */
-struct opal_sg_entry {
- __be64 data;
- __be64 length;
-};
-
-/* SG list */
-struct opal_sg_list {
- __be64 length;
- __be64 next;
- struct opal_sg_entry entry[];
-};
-
-/* We calculate number of sg entries based on PAGE_SIZE */
-#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
-
-#endif /* __ASSEMBLY__ */
-
-/****** OPAL APIs ******/
-
-/* Return codes */
-#define OPAL_SUCCESS 0
-#define OPAL_PARAMETER -1
-#define OPAL_BUSY -2
-#define OPAL_PARTIAL -3
-#define OPAL_CONSTRAINED -4
-#define OPAL_CLOSED -5
-#define OPAL_HARDWARE -6
-#define OPAL_UNSUPPORTED -7
-#define OPAL_PERMISSION -8
-#define OPAL_NO_MEM -9
-#define OPAL_RESOURCE -10
-#define OPAL_INTERNAL_ERROR -11
-#define OPAL_BUSY_EVENT -12
-#define OPAL_HARDWARE_FROZEN -13
-#define OPAL_WRONG_STATE -14
-#define OPAL_ASYNC_COMPLETION -15
-#define OPAL_I2C_TIMEOUT -17
-#define OPAL_I2C_INVALID_CMD -18
-#define OPAL_I2C_LBUS_PARITY -19
-#define OPAL_I2C_BKEND_OVERRUN -20
-#define OPAL_I2C_BKEND_ACCESS -21
-#define OPAL_I2C_ARBT_LOST -22
-#define OPAL_I2C_NACK_RCVD -23
-#define OPAL_I2C_STOP_ERR -24
-
-/* API Tokens (in r0) */
-#define OPAL_INVALID_CALL -1
-#define OPAL_CONSOLE_WRITE 1
-#define OPAL_CONSOLE_READ 2
-#define OPAL_RTC_READ 3
-#define OPAL_RTC_WRITE 4
-#define OPAL_CEC_POWER_DOWN 5
-#define OPAL_CEC_REBOOT 6
-#define OPAL_READ_NVRAM 7
-#define OPAL_WRITE_NVRAM 8
-#define OPAL_HANDLE_INTERRUPT 9
-#define OPAL_POLL_EVENTS 10
-#define OPAL_PCI_SET_HUB_TCE_MEMORY 11
-#define OPAL_PCI_SET_PHB_TCE_MEMORY 12
-#define OPAL_PCI_CONFIG_READ_BYTE 13
-#define OPAL_PCI_CONFIG_READ_HALF_WORD 14
-#define OPAL_PCI_CONFIG_READ_WORD 15
-#define OPAL_PCI_CONFIG_WRITE_BYTE 16
-#define OPAL_PCI_CONFIG_WRITE_HALF_WORD 17
-#define OPAL_PCI_CONFIG_WRITE_WORD 18
-#define OPAL_SET_XIVE 19
-#define OPAL_GET_XIVE 20
-#define OPAL_GET_COMPLETION_TOKEN_STATUS 21 /* obsolete */
-#define OPAL_REGISTER_OPAL_EXCEPTION_HANDLER 22
-#define OPAL_PCI_EEH_FREEZE_STATUS 23
-#define OPAL_PCI_SHPC 24
-#define OPAL_CONSOLE_WRITE_BUFFER_SPACE 25
-#define OPAL_PCI_EEH_FREEZE_CLEAR 26
-#define OPAL_PCI_PHB_MMIO_ENABLE 27
-#define OPAL_PCI_SET_PHB_MEM_WINDOW 28
-#define OPAL_PCI_MAP_PE_MMIO_WINDOW 29
-#define OPAL_PCI_SET_PHB_TABLE_MEMORY 30
-#define OPAL_PCI_SET_PE 31
-#define OPAL_PCI_SET_PELTV 32
-#define OPAL_PCI_SET_MVE 33
-#define OPAL_PCI_SET_MVE_ENABLE 34
-#define OPAL_PCI_GET_XIVE_REISSUE 35
-#define OPAL_PCI_SET_XIVE_REISSUE 36
-#define OPAL_PCI_SET_XIVE_PE 37
-#define OPAL_GET_XIVE_SOURCE 38
-#define OPAL_GET_MSI_32 39
-#define OPAL_GET_MSI_64 40
-#define OPAL_START_CPU 41
-#define OPAL_QUERY_CPU_STATUS 42
-#define OPAL_WRITE_OPPANEL 43
-#define OPAL_PCI_MAP_PE_DMA_WINDOW 44
-#define OPAL_PCI_MAP_PE_DMA_WINDOW_REAL 45
-#define OPAL_PCI_RESET 49
-#define OPAL_PCI_GET_HUB_DIAG_DATA 50
-#define OPAL_PCI_GET_PHB_DIAG_DATA 51
-#define OPAL_PCI_FENCE_PHB 52
-#define OPAL_PCI_REINIT 53
-#define OPAL_PCI_MASK_PE_ERROR 54
-#define OPAL_SET_SLOT_LED_STATUS 55
-#define OPAL_GET_EPOW_STATUS 56
-#define OPAL_SET_SYSTEM_ATTENTION_LED 57
-#define OPAL_RESERVED1 58
-#define OPAL_RESERVED2 59
-#define OPAL_PCI_NEXT_ERROR 60
-#define OPAL_PCI_EEH_FREEZE_STATUS2 61
-#define OPAL_PCI_POLL 62
-#define OPAL_PCI_MSI_EOI 63
-#define OPAL_PCI_GET_PHB_DIAG_DATA2 64
-#define OPAL_XSCOM_READ 65
-#define OPAL_XSCOM_WRITE 66
-#define OPAL_LPC_READ 67
-#define OPAL_LPC_WRITE 68
-#define OPAL_RETURN_CPU 69
-#define OPAL_REINIT_CPUS 70
-#define OPAL_ELOG_READ 71
-#define OPAL_ELOG_WRITE 72
-#define OPAL_ELOG_ACK 73
-#define OPAL_ELOG_RESEND 74
-#define OPAL_ELOG_SIZE 75
-#define OPAL_FLASH_VALIDATE 76
-#define OPAL_FLASH_MANAGE 77
-#define OPAL_FLASH_UPDATE 78
-#define OPAL_RESYNC_TIMEBASE 79
-#define OPAL_CHECK_TOKEN 80
-#define OPAL_DUMP_INIT 81
-#define OPAL_DUMP_INFO 82
-#define OPAL_DUMP_READ 83
-#define OPAL_DUMP_ACK 84
-#define OPAL_GET_MSG 85
-#define OPAL_CHECK_ASYNC_COMPLETION 86
-#define OPAL_SYNC_HOST_REBOOT 87
-#define OPAL_SENSOR_READ 88
-#define OPAL_GET_PARAM 89
-#define OPAL_SET_PARAM 90
-#define OPAL_DUMP_RESEND 91
-#define OPAL_PCI_SET_PHB_CXL_MODE 93
-#define OPAL_DUMP_INFO2 94
-#define OPAL_PCI_ERR_INJECT 96
-#define OPAL_PCI_EEH_FREEZE_SET 97
-#define OPAL_HANDLE_HMI 98
-#define OPAL_CONFIG_CPU_IDLE_STATE 99
-#define OPAL_SLW_SET_REG 100
-#define OPAL_REGISTER_DUMP_REGION 101
-#define OPAL_UNREGISTER_DUMP_REGION 102
-#define OPAL_WRITE_TPO 103
-#define OPAL_READ_TPO 104
-#define OPAL_IPMI_SEND 107
-#define OPAL_IPMI_RECV 108
-#define OPAL_I2C_REQUEST 109
-
-/* Device tree flags */
-
-/* Flags set in power-mgmt nodes in device tree if
- * respective idle states are supported in the platform.
- */
-#define OPAL_PM_NAP_ENABLED 0x00010000
-#define OPAL_PM_SLEEP_ENABLED 0x00020000
-#define OPAL_PM_WINKLE_ENABLED 0x00040000
-#define OPAL_PM_SLEEP_ENABLED_ER1 0x00080000
+#include <asm/opal-api.h>
#ifndef __ASSEMBLY__
#include <linux/notifier.h>
-/* Other enums */
-enum OpalVendorApiTokens {
- OPAL_START_VENDOR_API_RANGE = 1000, OPAL_END_VENDOR_API_RANGE = 1999
-};
-
-enum OpalFreezeState {
- OPAL_EEH_STOPPED_NOT_FROZEN = 0,
- OPAL_EEH_STOPPED_MMIO_FREEZE = 1,
- OPAL_EEH_STOPPED_DMA_FREEZE = 2,
- OPAL_EEH_STOPPED_MMIO_DMA_FREEZE = 3,
- OPAL_EEH_STOPPED_RESET = 4,
- OPAL_EEH_STOPPED_TEMP_UNAVAIL = 5,
- OPAL_EEH_STOPPED_PERM_UNAVAIL = 6
-};
-
-enum OpalEehFreezeActionToken {
- OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO = 1,
- OPAL_EEH_ACTION_CLEAR_FREEZE_DMA = 2,
- OPAL_EEH_ACTION_CLEAR_FREEZE_ALL = 3,
-
- OPAL_EEH_ACTION_SET_FREEZE_MMIO = 1,
- OPAL_EEH_ACTION_SET_FREEZE_DMA = 2,
- OPAL_EEH_ACTION_SET_FREEZE_ALL = 3
-};
-
-enum OpalPciStatusToken {
- OPAL_EEH_NO_ERROR = 0,
- OPAL_EEH_IOC_ERROR = 1,
- OPAL_EEH_PHB_ERROR = 2,
- OPAL_EEH_PE_ERROR = 3,
- OPAL_EEH_PE_MMIO_ERROR = 4,
- OPAL_EEH_PE_DMA_ERROR = 5
-};
-
-enum OpalPciErrorSeverity {
- OPAL_EEH_SEV_NO_ERROR = 0,
- OPAL_EEH_SEV_IOC_DEAD = 1,
- OPAL_EEH_SEV_PHB_DEAD = 2,
- OPAL_EEH_SEV_PHB_FENCED = 3,
- OPAL_EEH_SEV_PE_ER = 4,
- OPAL_EEH_SEV_INF = 5
-};
-
-enum OpalErrinjectType {
- OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR = 0,
- OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64 = 1,
-};
-
-enum OpalErrinjectFunc {
- /* IOA bus specific errors */
- OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR = 0,
- OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_DATA = 1,
- OPAL_ERR_INJECT_FUNC_IOA_LD_IO_ADDR = 2,
- OPAL_ERR_INJECT_FUNC_IOA_LD_IO_DATA = 3,
- OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_ADDR = 4,
- OPAL_ERR_INJECT_FUNC_IOA_LD_CFG_DATA = 5,
- OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_ADDR = 6,
- OPAL_ERR_INJECT_FUNC_IOA_ST_MEM_DATA = 7,
- OPAL_ERR_INJECT_FUNC_IOA_ST_IO_ADDR = 8,
- OPAL_ERR_INJECT_FUNC_IOA_ST_IO_DATA = 9,
- OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_ADDR = 10,
- OPAL_ERR_INJECT_FUNC_IOA_ST_CFG_DATA = 11,
- OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_ADDR = 12,
- OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_DATA = 13,
- OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_MASTER = 14,
- OPAL_ERR_INJECT_FUNC_IOA_DMA_RD_TARGET = 15,
- OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_ADDR = 16,
- OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_DATA = 17,
- OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_MASTER = 18,
- OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET = 19,
-};
-
-enum OpalShpcAction {
- OPAL_SHPC_GET_LINK_STATE = 0,
- OPAL_SHPC_GET_SLOT_STATE = 1
-};
-
-enum OpalShpcLinkState {
- OPAL_SHPC_LINK_DOWN = 0,
- OPAL_SHPC_LINK_UP = 1
-};
-
-enum OpalMmioWindowType {
- OPAL_M32_WINDOW_TYPE = 1,
- OPAL_M64_WINDOW_TYPE = 2,
- OPAL_IO_WINDOW_TYPE = 3
-};
-
-enum OpalShpcSlotState {
- OPAL_SHPC_DEV_NOT_PRESENT = 0,
- OPAL_SHPC_DEV_PRESENT = 1
-};
-
-enum OpalExceptionHandler {
- OPAL_MACHINE_CHECK_HANDLER = 1,
- OPAL_HYPERVISOR_MAINTENANCE_HANDLER = 2,
- OPAL_SOFTPATCH_HANDLER = 3
-};
-
-enum OpalPendingState {
- OPAL_EVENT_OPAL_INTERNAL = 0x1,
- OPAL_EVENT_NVRAM = 0x2,
- OPAL_EVENT_RTC = 0x4,
- OPAL_EVENT_CONSOLE_OUTPUT = 0x8,
- OPAL_EVENT_CONSOLE_INPUT = 0x10,
- OPAL_EVENT_ERROR_LOG_AVAIL = 0x20,
- OPAL_EVENT_ERROR_LOG = 0x40,
- OPAL_EVENT_EPOW = 0x80,
- OPAL_EVENT_LED_STATUS = 0x100,
- OPAL_EVENT_PCI_ERROR = 0x200,
- OPAL_EVENT_DUMP_AVAIL = 0x400,
- OPAL_EVENT_MSG_PENDING = 0x800,
-};
-
-enum OpalMessageType {
- OPAL_MSG_ASYNC_COMP = 0, /* params[0] = token, params[1] = rc,
- * additional params function-specific
- */
- OPAL_MSG_MEM_ERR,
- OPAL_MSG_EPOW,
- OPAL_MSG_SHUTDOWN, /* params[0] = 1 reboot, 0 shutdown */
- OPAL_MSG_HMI_EVT,
- OPAL_MSG_TYPE_MAX,
-};
-
-enum OpalThreadStatus {
- OPAL_THREAD_INACTIVE = 0x0,
- OPAL_THREAD_STARTED = 0x1,
- OPAL_THREAD_UNAVAILABLE = 0x2 /* opal-v3 */
-};
-
-enum OpalPciBusCompare {
- OpalPciBusAny = 0, /* Any bus number match */
- OpalPciBus3Bits = 2, /* Match top 3 bits of bus number */
- OpalPciBus4Bits = 3, /* Match top 4 bits of bus number */
- OpalPciBus5Bits = 4, /* Match top 5 bits of bus number */
- OpalPciBus6Bits = 5, /* Match top 6 bits of bus number */
- OpalPciBus7Bits = 6, /* Match top 7 bits of bus number */
- OpalPciBusAll = 7, /* Match bus number exactly */
-};
-
-enum OpalDeviceCompare {
- OPAL_IGNORE_RID_DEVICE_NUMBER = 0,
- OPAL_COMPARE_RID_DEVICE_NUMBER = 1
-};
-
-enum OpalFuncCompare {
- OPAL_IGNORE_RID_FUNCTION_NUMBER = 0,
- OPAL_COMPARE_RID_FUNCTION_NUMBER = 1
-};
-
-enum OpalPeAction {
- OPAL_UNMAP_PE = 0,
- OPAL_MAP_PE = 1
-};
-
-enum OpalPeltvAction {
- OPAL_REMOVE_PE_FROM_DOMAIN = 0,
- OPAL_ADD_PE_TO_DOMAIN = 1
-};
-
-enum OpalMveEnableAction {
- OPAL_DISABLE_MVE = 0,
- OPAL_ENABLE_MVE = 1
-};
-
-enum OpalM64EnableAction {
- OPAL_DISABLE_M64 = 0,
- OPAL_ENABLE_M64_SPLIT = 1,
- OPAL_ENABLE_M64_NON_SPLIT = 2
-};
-
-enum OpalPciResetScope {
- OPAL_RESET_PHB_COMPLETE = 1,
- OPAL_RESET_PCI_LINK = 2,
- OPAL_RESET_PHB_ERROR = 3,
- OPAL_RESET_PCI_HOT = 4,
- OPAL_RESET_PCI_FUNDAMENTAL = 5,
- OPAL_RESET_PCI_IODA_TABLE = 6
-};
-
-enum OpalPciReinitScope {
- OPAL_REINIT_PCI_DEV = 1000
-};
-
-enum OpalPciResetState {
- OPAL_DEASSERT_RESET = 0,
- OPAL_ASSERT_RESET = 1
-};
-
-enum OpalPciMaskAction {
- OPAL_UNMASK_ERROR_TYPE = 0,
- OPAL_MASK_ERROR_TYPE = 1
-};
-
-enum OpalSlotLedType {
- OPAL_SLOT_LED_ID_TYPE = 0,
- OPAL_SLOT_LED_FAULT_TYPE = 1
-};
-
-enum OpalLedAction {
- OPAL_TURN_OFF_LED = 0,
- OPAL_TURN_ON_LED = 1,
- OPAL_QUERY_LED_STATE_AFTER_BUSY = 2
-};
-
-enum OpalEpowStatus {
- OPAL_EPOW_NONE = 0,
- OPAL_EPOW_UPS = 1,
- OPAL_EPOW_OVER_AMBIENT_TEMP = 2,
- OPAL_EPOW_OVER_INTERNAL_TEMP = 3
-};
-
-/*
- * Address cycle types for LPC accesses. These also correspond
- * to the content of the first cell of the "reg" property for
- * device nodes on the LPC bus
- */
-enum OpalLPCAddressType {
- OPAL_LPC_MEM = 0,
- OPAL_LPC_IO = 1,
- OPAL_LPC_FW = 2,
-};
-
-/* System parameter permission */
-enum OpalSysparamPerm {
- OPAL_SYSPARAM_READ = 0x1,
- OPAL_SYSPARAM_WRITE = 0x2,
- OPAL_SYSPARAM_RW = (OPAL_SYSPARAM_READ | OPAL_SYSPARAM_WRITE),
-};
-
-struct opal_msg {
- __be32 msg_type;
- __be32 reserved;
- __be64 params[8];
-};
-
-enum {
- OPAL_IPMI_MSG_FORMAT_VERSION_1 = 1,
-};
-
-struct opal_ipmi_msg {
- uint8_t version;
- uint8_t netfn;
- uint8_t cmd;
- uint8_t data[];
-};
-
-/* FSP memory errors handling */
-enum OpalMemErr_Version {
- OpalMemErr_V1 = 1,
-};
-
-enum OpalMemErrType {
- OPAL_MEM_ERR_TYPE_RESILIENCE = 0,
- OPAL_MEM_ERR_TYPE_DYN_DALLOC,
- OPAL_MEM_ERR_TYPE_SCRUB,
-};
-
-/* Memory Reilience error type */
-enum OpalMemErr_ResilErrType {
- OPAL_MEM_RESILIENCE_CE = 0,
- OPAL_MEM_RESILIENCE_UE,
- OPAL_MEM_RESILIENCE_UE_SCRUB,
-};
-
-/* Dynamic Memory Deallocation type */
-enum OpalMemErr_DynErrType {
- OPAL_MEM_DYNAMIC_DEALLOC = 0,
-};
-
-/* OpalMemoryErrorData->flags */
-#define OPAL_MEM_CORRECTED_ERROR 0x0001
-#define OPAL_MEM_THRESHOLD_EXCEEDED 0x0002
-#define OPAL_MEM_ACK_REQUIRED 0x8000
-
-struct OpalMemoryErrorData {
- enum OpalMemErr_Version version:8; /* 0x00 */
- enum OpalMemErrType type:8; /* 0x01 */
- __be16 flags; /* 0x02 */
- uint8_t reserved_1[4]; /* 0x04 */
-
- union {
- /* Memory Resilience corrected/uncorrected error info */
- struct {
- enum OpalMemErr_ResilErrType resil_err_type:8;
- uint8_t reserved_1[7];
- __be64 physical_address_start;
- __be64 physical_address_end;
- } resilience;
- /* Dynamic memory deallocation error info */
- struct {
- enum OpalMemErr_DynErrType dyn_err_type:8;
- uint8_t reserved_1[7];
- __be64 physical_address_start;
- __be64 physical_address_end;
- } dyn_dealloc;
- } u;
-};
-
-/* HMI interrupt event */
-enum OpalHMI_Version {
- OpalHMIEvt_V1 = 1,
-};
-
-enum OpalHMI_Severity {
- OpalHMI_SEV_NO_ERROR = 0,
- OpalHMI_SEV_WARNING = 1,
- OpalHMI_SEV_ERROR_SYNC = 2,
- OpalHMI_SEV_FATAL = 3,
-};
-
-enum OpalHMI_Disposition {
- OpalHMI_DISPOSITION_RECOVERED = 0,
- OpalHMI_DISPOSITION_NOT_RECOVERED = 1,
-};
-
-enum OpalHMI_ErrType {
- OpalHMI_ERROR_MALFUNC_ALERT = 0,
- OpalHMI_ERROR_PROC_RECOV_DONE,
- OpalHMI_ERROR_PROC_RECOV_DONE_AGAIN,
- OpalHMI_ERROR_PROC_RECOV_MASKED,
- OpalHMI_ERROR_TFAC,
- OpalHMI_ERROR_TFMR_PARITY,
- OpalHMI_ERROR_HA_OVERFLOW_WARN,
- OpalHMI_ERROR_XSCOM_FAIL,
- OpalHMI_ERROR_XSCOM_DONE,
- OpalHMI_ERROR_SCOM_FIR,
- OpalHMI_ERROR_DEBUG_TRIG_FIR,
- OpalHMI_ERROR_HYP_RESOURCE,
-};
-
-struct OpalHMIEvent {
- uint8_t version; /* 0x00 */
- uint8_t severity; /* 0x01 */
- uint8_t type; /* 0x02 */
- uint8_t disposition; /* 0x03 */
- uint8_t reserved_1[4]; /* 0x04 */
-
- __be64 hmer;
- /* TFMR register. Valid only for TFAC and TFMR_PARITY error type. */
- __be64 tfmr;
-};
-
-enum {
- OPAL_P7IOC_DIAG_TYPE_NONE = 0,
- OPAL_P7IOC_DIAG_TYPE_RGC = 1,
- OPAL_P7IOC_DIAG_TYPE_BI = 2,
- OPAL_P7IOC_DIAG_TYPE_CI = 3,
- OPAL_P7IOC_DIAG_TYPE_MISC = 4,
- OPAL_P7IOC_DIAG_TYPE_I2C = 5,
- OPAL_P7IOC_DIAG_TYPE_LAST = 6
-};
-
-struct OpalIoP7IOCErrorData {
- __be16 type;
-
- /* GEM */
- __be64 gemXfir;
- __be64 gemRfir;
- __be64 gemRirqfir;
- __be64 gemMask;
- __be64 gemRwof;
-
- /* LEM */
- __be64 lemFir;
- __be64 lemErrMask;
- __be64 lemAction0;
- __be64 lemAction1;
- __be64 lemWof;
-
- union {
- struct OpalIoP7IOCRgcErrorData {
- __be64 rgcStatus; /* 3E1C10 */
- __be64 rgcLdcp; /* 3E1C18 */
- }rgc;
- struct OpalIoP7IOCBiErrorData {
- __be64 biLdcp0; /* 3C0100, 3C0118 */
- __be64 biLdcp1; /* 3C0108, 3C0120 */
- __be64 biLdcp2; /* 3C0110, 3C0128 */
- __be64 biFenceStatus; /* 3C0130, 3C0130 */
-
- u8 biDownbound; /* BI Downbound or Upbound */
- }bi;
- struct OpalIoP7IOCCiErrorData {
- __be64 ciPortStatus; /* 3Dn008 */
- __be64 ciPortLdcp; /* 3Dn010 */
-
- u8 ciPort; /* Index of CI port: 0/1 */
- }ci;
- };
-};
-
-/**
- * This structure defines the overlay which will be used to store PHB error
- * data upon request.
- */
-enum {
- OPAL_PHB_ERROR_DATA_VERSION_1 = 1,
-};
-
-enum {
- OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1,
- OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2
-};
-
-enum {
- OPAL_P7IOC_NUM_PEST_REGS = 128,
- OPAL_PHB3_NUM_PEST_REGS = 256
-};
-
-/* CAPI modes for PHB */
-enum {
- OPAL_PHB_CAPI_MODE_PCIE = 0,
- OPAL_PHB_CAPI_MODE_CAPI = 1,
- OPAL_PHB_CAPI_MODE_SNOOP_OFF = 2,
- OPAL_PHB_CAPI_MODE_SNOOP_ON = 3,
-};
-
-struct OpalIoPhbErrorCommon {
- __be32 version;
- __be32 ioType;
- __be32 len;
-};
-
-struct OpalIoP7IOCPhbErrorData {
- struct OpalIoPhbErrorCommon common;
-
- __be32 brdgCtl;
-
- // P7IOC utl regs
- __be32 portStatusReg;
- __be32 rootCmplxStatus;
- __be32 busAgentStatus;
-
- // P7IOC cfg regs
- __be32 deviceStatus;
- __be32 slotStatus;
- __be32 linkStatus;
- __be32 devCmdStatus;
- __be32 devSecStatus;
-
- // cfg AER regs
- __be32 rootErrorStatus;
- __be32 uncorrErrorStatus;
- __be32 corrErrorStatus;
- __be32 tlpHdr1;
- __be32 tlpHdr2;
- __be32 tlpHdr3;
- __be32 tlpHdr4;
- __be32 sourceId;
-
- __be32 rsv3;
-
- // Record data about the call to allocate a buffer.
- __be64 errorClass;
- __be64 correlator;
-
- //P7IOC MMIO Error Regs
- __be64 p7iocPlssr; // n120
- __be64 p7iocCsr; // n110
- __be64 lemFir; // nC00
- __be64 lemErrorMask; // nC18
- __be64 lemWOF; // nC40
- __be64 phbErrorStatus; // nC80
- __be64 phbFirstErrorStatus; // nC88
- __be64 phbErrorLog0; // nCC0
- __be64 phbErrorLog1; // nCC8
- __be64 mmioErrorStatus; // nD00
- __be64 mmioFirstErrorStatus; // nD08
- __be64 mmioErrorLog0; // nD40
- __be64 mmioErrorLog1; // nD48
- __be64 dma0ErrorStatus; // nD80
- __be64 dma0FirstErrorStatus; // nD88
- __be64 dma0ErrorLog0; // nDC0
- __be64 dma0ErrorLog1; // nDC8
- __be64 dma1ErrorStatus; // nE00
- __be64 dma1FirstErrorStatus; // nE08
- __be64 dma1ErrorLog0; // nE40
- __be64 dma1ErrorLog1; // nE48
- __be64 pestA[OPAL_P7IOC_NUM_PEST_REGS];
- __be64 pestB[OPAL_P7IOC_NUM_PEST_REGS];
-};
-
-struct OpalIoPhb3ErrorData {
- struct OpalIoPhbErrorCommon common;
-
- __be32 brdgCtl;
-
- /* PHB3 UTL regs */
- __be32 portStatusReg;
- __be32 rootCmplxStatus;
- __be32 busAgentStatus;
-
- /* PHB3 cfg regs */
- __be32 deviceStatus;
- __be32 slotStatus;
- __be32 linkStatus;
- __be32 devCmdStatus;
- __be32 devSecStatus;
-
- /* cfg AER regs */
- __be32 rootErrorStatus;
- __be32 uncorrErrorStatus;
- __be32 corrErrorStatus;
- __be32 tlpHdr1;
- __be32 tlpHdr2;
- __be32 tlpHdr3;
- __be32 tlpHdr4;
- __be32 sourceId;
-
- __be32 rsv3;
-
- /* Record data about the call to allocate a buffer */
- __be64 errorClass;
- __be64 correlator;
-
- __be64 nFir; /* 000 */
- __be64 nFirMask; /* 003 */
- __be64 nFirWOF; /* 008 */
-
- /* PHB3 MMIO Error Regs */
- __be64 phbPlssr; /* 120 */
- __be64 phbCsr; /* 110 */
- __be64 lemFir; /* C00 */
- __be64 lemErrorMask; /* C18 */
- __be64 lemWOF; /* C40 */
- __be64 phbErrorStatus; /* C80 */
- __be64 phbFirstErrorStatus; /* C88 */
- __be64 phbErrorLog0; /* CC0 */
- __be64 phbErrorLog1; /* CC8 */
- __be64 mmioErrorStatus; /* D00 */
- __be64 mmioFirstErrorStatus; /* D08 */
- __be64 mmioErrorLog0; /* D40 */
- __be64 mmioErrorLog1; /* D48 */
- __be64 dma0ErrorStatus; /* D80 */
- __be64 dma0FirstErrorStatus; /* D88 */
- __be64 dma0ErrorLog0; /* DC0 */
- __be64 dma0ErrorLog1; /* DC8 */
- __be64 dma1ErrorStatus; /* E00 */
- __be64 dma1FirstErrorStatus; /* E08 */
- __be64 dma1ErrorLog0; /* E40 */
- __be64 dma1ErrorLog1; /* E48 */
- __be64 pestA[OPAL_PHB3_NUM_PEST_REGS];
- __be64 pestB[OPAL_PHB3_NUM_PEST_REGS];
-};
-
-enum {
- OPAL_REINIT_CPUS_HILE_BE = (1 << 0),
- OPAL_REINIT_CPUS_HILE_LE = (1 << 1),
-};
-
-typedef struct oppanel_line {
- const char * line;
- uint64_t line_len;
-} oppanel_line_t;
-
-/* OPAL I2C request */
-struct opal_i2c_request {
- uint8_t type;
-#define OPAL_I2C_RAW_READ 0
-#define OPAL_I2C_RAW_WRITE 1
-#define OPAL_I2C_SM_READ 2
-#define OPAL_I2C_SM_WRITE 3
- uint8_t flags;
-#define OPAL_I2C_ADDR_10 0x01 /* Not supported yet */
- uint8_t subaddr_sz; /* Max 4 */
- uint8_t reserved;
- __be16 addr; /* 7 or 10 bit address */
- __be16 reserved2;
- __be32 subaddr; /* Sub-address if any */
- __be32 size; /* Data size */
- __be64 buffer_ra; /* Buffer real address */
-};
+/* We calculate number of sg entries based on PAGE_SIZE */
+#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
/* /sys/firmware/opal */
extern struct kobject *opal_kobj;
@@ -932,6 +194,13 @@ int64_t opal_ipmi_recv(uint64_t interface, struct opal_ipmi_msg *msg,
int64_t opal_i2c_request(uint64_t async_token, uint32_t bus_id,
struct opal_i2c_request *oreq);
+int64_t opal_flash_read(uint64_t id, uint64_t offset, uint64_t buf,
+ uint64_t size, uint64_t token);
+int64_t opal_flash_write(uint64_t id, uint64_t offset, uint64_t buf,
+ uint64_t size, uint64_t token);
+int64_t opal_flash_erase(uint64_t id, uint64_t offset, uint64_t size,
+ uint64_t token);
+
/* Internal functions */
extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
int depth, void *data);
@@ -946,8 +215,10 @@ extern void hvc_opal_init_early(void);
extern int opal_notifier_register(struct notifier_block *nb);
extern int opal_notifier_unregister(struct notifier_block *nb);
-extern int opal_message_notifier_register(enum OpalMessageType msg_type,
+extern int opal_message_notifier_register(enum opal_msg_type msg_type,
struct notifier_block *nb);
+extern int opal_message_notifier_unregister(enum opal_msg_type msg_type,
+ struct notifier_block *nb);
extern void opal_notifier_enable(void);
extern void opal_notifier_disable(void);
extern void opal_notifier_update_evt(uint64_t evt_mask, uint64_t evt_val);
@@ -962,7 +233,7 @@ extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
struct rtc_time;
extern unsigned long opal_get_boot_time(void);
extern void opal_nvram_init(void);
-extern void opal_flash_init(void);
+extern void opal_flash_update_init(void);
extern void opal_flash_term_callback(void);
extern int opal_elog_init(void);
extern void opal_platform_dump_init(void);
@@ -983,13 +254,8 @@ struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
unsigned long vmalloc_size);
void opal_free_sg_list(struct opal_sg_list *sg);
-/*
- * Dump region ID range usable by the OS
- */
-#define OPAL_DUMP_REGION_HOST_START 0x80
-#define OPAL_DUMP_REGION_LOG_BUF 0x80
-#define OPAL_DUMP_REGION_HOST_END 0xFF
+extern int opal_error_code(int rc);
#endif /* __ASSEMBLY__ */
-#endif /* __OPAL_H */
+#endif /* _ASM_POWERPC_OPAL_H */
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index e5f22c6c4bf9..70bd4381f8e6 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -106,9 +106,9 @@ struct paca_struct {
#endif /* CONFIG_PPC_STD_MMU_64 */
#ifdef CONFIG_PPC_BOOK3E
- u64 exgen[8] __attribute__((aligned(0x80)));
+ u64 exgen[8] __aligned(0x40);
/* Keep pgd in the same cacheline as the start of extlb */
- pgd_t *pgd __attribute__((aligned(0x80))); /* Current PGD */
+ pgd_t *pgd __aligned(0x40); /* Current PGD */
pgd_t *kernel_pgd; /* Kernel PGD */
/* Shared by all threads of a core -- points to tcd of first thread */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 546d036fe925..1811c44bf34b 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -15,6 +15,24 @@
struct device_node;
/*
+ * PCI controller operations
+ */
+struct pci_controller_ops {
+ void (*dma_dev_setup)(struct pci_dev *dev);
+ void (*dma_bus_setup)(struct pci_bus *bus);
+
+ int (*probe_mode)(struct pci_bus *);
+
+ /* Called when pci_enable_device() is called. Returns true to
+ * allow assignment/enabling of the device. */
+ bool (*enable_device_hook)(struct pci_dev *);
+
+ /* Called during PCI resource reassignment */
+ resource_size_t (*window_alignment)(struct pci_bus *, unsigned long type);
+ void (*reset_secondary_bus)(struct pci_dev *dev);
+};
+
+/*
* Structure of a PCI controller (host bridge)
*/
struct pci_controller {
@@ -46,6 +64,7 @@ struct pci_controller {
resource_size_t isa_mem_phys;
resource_size_t isa_mem_size;
+ struct pci_controller_ops controller_ops;
struct pci_ops *ops;
unsigned int __iomem *cfg_addr;
void __iomem *cfg_data;
@@ -89,6 +108,7 @@ struct pci_controller {
#ifdef CONFIG_PPC64
unsigned long buid;
+ struct pci_dn *pci_data;
#endif /* CONFIG_PPC64 */
void *private_data;
@@ -154,31 +174,51 @@ static inline int isa_vaddr_is_ioport(void __iomem *address)
struct iommu_table;
struct pci_dn {
+ int flags;
+#define PCI_DN_FLAG_IOV_VF 0x01
+
int busno; /* pci bus number */
int devfn; /* pci device and function number */
+ int vendor_id; /* Vendor ID */
+ int device_id; /* Device ID */
+ int class_code; /* Device class code */
+ struct pci_dn *parent;
struct pci_controller *phb; /* for pci devices */
struct iommu_table *iommu_table; /* for phb's or bridges */
struct device_node *node; /* back-pointer to the device_node */
int pci_ext_config_space; /* for pci devices */
- struct pci_dev *pcidev; /* back-pointer to the pci device */
#ifdef CONFIG_EEH
struct eeh_dev *edev; /* eeh device */
#endif
#define IODA_INVALID_PE (-1)
#ifdef CONFIG_PPC_POWERNV
int pe_number;
+#ifdef CONFIG_PCI_IOV
+ u16 vfs_expanded; /* number of VFs IOV BAR expanded */
+ u16 num_vfs; /* number of VFs enabled*/
+ int offset; /* PE# for the first VF PE */
+#define M64_PER_IOV 4
+ int m64_per_iov;
+#define IODA_INVALID_M64 (-1)
+ int m64_wins[PCI_SRIOV_NUM_BARS][M64_PER_IOV];
+#endif /* CONFIG_PCI_IOV */
#endif
+ struct list_head child_list;
+ struct list_head list;
};
/* Get the pointer to a device_node's pci_dn */
#define PCI_DN(dn) ((struct pci_dn *) (dn)->data)
+extern struct pci_dn *pci_get_pdn_by_devfn(struct pci_bus *bus,
+ int devfn);
extern struct pci_dn *pci_get_pdn(struct pci_dev *pdev);
-
-extern void * update_dn_pci_info(struct device_node *dn, void *data);
+extern struct pci_dn *add_dev_pci_data(struct pci_dev *pdev);
+extern void remove_dev_pci_data(struct pci_dev *pdev);
+extern void *update_dn_pci_info(struct device_node *dn, void *data);
static inline int pci_device_from_OF_node(struct device_node *np,
u8 *bus, u8 *devfn)
@@ -191,20 +231,12 @@ static inline int pci_device_from_OF_node(struct device_node *np,
}
#if defined(CONFIG_EEH)
-static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn)
+static inline struct eeh_dev *pdn_to_eeh_dev(struct pci_dn *pdn)
{
- /*
- * For those OF nodes whose parent isn't PCI bridge, they
- * don't have PCI_DN actually. So we have to skip them for
- * any EEH operations.
- */
- if (!dn || !PCI_DN(dn))
- return NULL;
-
- return PCI_DN(dn)->edev;
+ return pdn ? pdn->edev : NULL;
}
#else
-#define of_node_to_eeh_dev(x) (NULL)
+#define pdn_to_eeh_dev(x) (NULL)
#endif
/** Find the bus corresponding to the indicated device node */
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index 1b0739bc14b5..4aef8d660999 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -22,7 +22,7 @@
#include <asm-generic/pci-dma-compat.h>
-/* Return values for ppc_md.pci_probe_mode function */
+/* Return values for pci_controller_ops.probe_mode function */
#define PCI_PROBE_NONE -1 /* Don't look at this bus at all */
#define PCI_PROBE_NORMAL 0 /* Do normal PCI probing */
#define PCI_PROBE_DEVTREE 1 /* Instantiate from device tree */
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index db1e2b8eff3c..4122a86d6858 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -23,8 +23,6 @@ extern void pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary);
extern struct list_head hose_list;
-extern void find_and_init_phbs(void);
-
extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */
/** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */
@@ -33,9 +31,14 @@ extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */
/* PCI device_node operations */
struct device_node;
+struct pci_dn;
+
typedef void *(*traverse_func)(struct device_node *me, void *data);
void *traverse_pci_devices(struct device_node *start, traverse_func pre,
void *data);
+void *traverse_pci_dn(struct pci_dn *root,
+ void *(*fn)(struct pci_dn *, void *),
+ void *data);
extern void pci_devs_phb_init(void);
extern void pci_devs_phb_init_dynamic(struct pci_controller *phb);
@@ -76,7 +79,6 @@ static inline const char *eeh_driver_name(struct pci_dev *pdev)
#endif /* CONFIG_EEH */
#else /* CONFIG_PCI */
-static inline void find_and_init_phbs(void) { }
static inline void init_pci_config_tokens(void) { }
#endif /* !CONFIG_PCI */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 7e4612528546..dd0fc18d8103 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -637,105 +637,105 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945)
/* AltiVec Registers (VPRs) */
-#define vr0 0
-#define vr1 1
-#define vr2 2
-#define vr3 3
-#define vr4 4
-#define vr5 5
-#define vr6 6
-#define vr7 7
-#define vr8 8
-#define vr9 9
-#define vr10 10
-#define vr11 11
-#define vr12 12
-#define vr13 13
-#define vr14 14
-#define vr15 15
-#define vr16 16
-#define vr17 17
-#define vr18 18
-#define vr19 19
-#define vr20 20
-#define vr21 21
-#define vr22 22
-#define vr23 23
-#define vr24 24
-#define vr25 25
-#define vr26 26
-#define vr27 27
-#define vr28 28
-#define vr29 29
-#define vr30 30
-#define vr31 31
+#define v0 0
+#define v1 1
+#define v2 2
+#define v3 3
+#define v4 4
+#define v5 5
+#define v6 6
+#define v7 7
+#define v8 8
+#define v9 9
+#define v10 10
+#define v11 11
+#define v12 12
+#define v13 13
+#define v14 14
+#define v15 15
+#define v16 16
+#define v17 17
+#define v18 18
+#define v19 19
+#define v20 20
+#define v21 21
+#define v22 22
+#define v23 23
+#define v24 24
+#define v25 25
+#define v26 26
+#define v27 27
+#define v28 28
+#define v29 29
+#define v30 30
+#define v31 31
/* VSX Registers (VSRs) */
-#define vsr0 0
-#define vsr1 1
-#define vsr2 2
-#define vsr3 3
-#define vsr4 4
-#define vsr5 5
-#define vsr6 6
-#define vsr7 7
-#define vsr8 8
-#define vsr9 9
-#define vsr10 10
-#define vsr11 11
-#define vsr12 12
-#define vsr13 13
-#define vsr14 14
-#define vsr15 15
-#define vsr16 16
-#define vsr17 17
-#define vsr18 18
-#define vsr19 19
-#define vsr20 20
-#define vsr21 21
-#define vsr22 22
-#define vsr23 23
-#define vsr24 24
-#define vsr25 25
-#define vsr26 26
-#define vsr27 27
-#define vsr28 28
-#define vsr29 29
-#define vsr30 30
-#define vsr31 31
-#define vsr32 32
-#define vsr33 33
-#define vsr34 34
-#define vsr35 35
-#define vsr36 36
-#define vsr37 37
-#define vsr38 38
-#define vsr39 39
-#define vsr40 40
-#define vsr41 41
-#define vsr42 42
-#define vsr43 43
-#define vsr44 44
-#define vsr45 45
-#define vsr46 46
-#define vsr47 47
-#define vsr48 48
-#define vsr49 49
-#define vsr50 50
-#define vsr51 51
-#define vsr52 52
-#define vsr53 53
-#define vsr54 54
-#define vsr55 55
-#define vsr56 56
-#define vsr57 57
-#define vsr58 58
-#define vsr59 59
-#define vsr60 60
-#define vsr61 61
-#define vsr62 62
-#define vsr63 63
+#define vs0 0
+#define vs1 1
+#define vs2 2
+#define vs3 3
+#define vs4 4
+#define vs5 5
+#define vs6 6
+#define vs7 7
+#define vs8 8
+#define vs9 9
+#define vs10 10
+#define vs11 11
+#define vs12 12
+#define vs13 13
+#define vs14 14
+#define vs15 15
+#define vs16 16
+#define vs17 17
+#define vs18 18
+#define vs19 19
+#define vs20 20
+#define vs21 21
+#define vs22 22
+#define vs23 23
+#define vs24 24
+#define vs25 25
+#define vs26 26
+#define vs27 27
+#define vs28 28
+#define vs29 29
+#define vs30 30
+#define vs31 31
+#define vs32 32
+#define vs33 33
+#define vs34 34
+#define vs35 35
+#define vs36 36
+#define vs37 37
+#define vs38 38
+#define vs39 39
+#define vs40 40
+#define vs41 41
+#define vs42 42
+#define vs43 43
+#define vs44 44
+#define vs45 45
+#define vs46 46
+#define vs47 47
+#define vs48 48
+#define vs49 49
+#define vs50 50
+#define vs51 51
+#define vs52 52
+#define vs53 53
+#define vs54 54
+#define vs55 55
+#define vs56 56
+#define vs57 57
+#define vs58 58
+#define vs59 59
+#define vs60 60
+#define vs61 61
+#define vs62 62
+#define vs63 63
/* SPE Registers (EVPRs) */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 2e23e92a4372..7a4ede16b283 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -4,6 +4,7 @@
#include <linux/spinlock.h>
#include <asm/page.h>
+#include <linux/time.h>
/*
* Definitions for talking to the RTAS on CHRP machines.
@@ -273,6 +274,7 @@ inline uint32_t rtas_ext_event_company_id(struct rtas_ext_event_log_v6 *ext_log)
#define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I')
#define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H')
#define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D')
+#define PSERIES_ELOG_SECT_ID_HOTPLUG (('H' << 8) | 'P')
/* Vendor specific Platform Event Log Format, Version 6, section header */
struct pseries_errorlog {
@@ -296,6 +298,31 @@ inline uint16_t pseries_errorlog_length(struct pseries_errorlog *sect)
return be16_to_cpu(sect->length);
}
+/* RTAS pseries hotplug errorlog section */
+struct pseries_hp_errorlog {
+ u8 resource;
+ u8 action;
+ u8 id_type;
+ u8 reserved;
+ union {
+ __be32 drc_index;
+ __be32 drc_count;
+ char drc_name[1];
+ } _drc_u;
+};
+
+#define PSERIES_HP_ELOG_RESOURCE_CPU 1
+#define PSERIES_HP_ELOG_RESOURCE_MEM 2
+#define PSERIES_HP_ELOG_RESOURCE_SLOT 3
+#define PSERIES_HP_ELOG_RESOURCE_PHB 4
+
+#define PSERIES_HP_ELOG_ACTION_ADD 1
+#define PSERIES_HP_ELOG_ACTION_REMOVE 2
+
+#define PSERIES_HP_ELOG_ID_DRC_NAME 1
+#define PSERIES_HP_ELOG_ID_DRC_INDEX 2
+#define PSERIES_HP_ELOG_ID_DRC_COUNT 3
+
struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
uint16_t section_id);
@@ -327,7 +354,7 @@ extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data);
extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data);
extern int rtas_online_cpus_mask(cpumask_var_t cpus);
extern int rtas_offline_cpus_mask(cpumask_var_t cpus);
-extern int rtas_ibm_suspend_me(u64 handle, int *vasi_return);
+extern int rtas_ibm_suspend_me(u64 handle);
struct rtc_time;
extern unsigned long rtas_get_boot_time(void);
@@ -343,8 +370,12 @@ extern int early_init_dt_scan_rtas(unsigned long node,
extern void pSeries_log_error(char *buf, unsigned int err_type, int fatal);
#ifdef CONFIG_PPC_PSERIES
+extern time64_t last_rtas_event;
+extern int clobbering_unread_rtas_event(void);
extern int pseries_devicetree_update(s32 scope);
extern void post_mobility_fixup(void);
+#else
+static inline int clobbering_unread_rtas_event(void) { return 0; }
#endif
#ifdef CONFIG_PPC_RTAS_DAEMON
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index fbdf18cf954c..e9d384cbd021 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -7,7 +7,6 @@
extern void ppc_printk_progress(char *s, unsigned short hex);
extern unsigned int rtas_data;
-extern int mem_init_done; /* set on boot once kmalloc can be called */
extern unsigned long long memory_limit;
extern unsigned long klimit;
extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index d607df5081a7..825663c30945 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -42,7 +42,7 @@ struct smp_ops_t {
#ifdef CONFIG_PPC_SMP_MUXED_IPI
void (*cause_ipi)(int cpu, unsigned long data);
#endif
- int (*probe)(void);
+ void (*probe)(void);
int (*kick_cpu)(int nr);
void (*setup_cpu)(int nr);
void (*bringup_done)(void);
@@ -125,7 +125,6 @@ extern irqreturn_t smp_ipi_demux(void);
void smp_init_pSeries(void);
void smp_init_cell(void);
-void smp_init_celleb(void);
void smp_setup_cpu_maps(void);
extern int __cpu_disable(void);
@@ -175,7 +174,7 @@ static inline void set_hard_smp_processor_id(int cpu, int phys)
extern int smt_enabled_at_boot;
-extern int smp_mpic_probe(void);
+extern void smp_mpic_probe(void);
extern void smp_mpic_setup_cpu(int cpu);
extern int smp_generic_kick_cpu(int nr);
extern int smp_generic_cpu_bootable(unsigned int nr);
diff --git a/arch/powerpc/include/asm/swab.h b/arch/powerpc/include/asm/swab.h
index 96f59de61855..487e09077a3e 100644
--- a/arch/powerpc/include/asm/swab.h
+++ b/arch/powerpc/include/asm/swab.h
@@ -9,30 +9,4 @@
#include <uapi/asm/swab.h>
-static __inline__ __u16 ld_le16(const volatile __u16 *addr)
-{
- __u16 val;
-
- __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
- return val;
-}
-
-static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
-{
- __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
-}
-
-static __inline__ __u32 ld_le32(const volatile __u32 *addr)
-{
- __u32 val;
-
- __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
- return val;
-}
-
-static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
-{
- __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
-}
-
#endif /* _ASM_POWERPC_SWAB_H */
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 91062eef582f..f1863a138b4a 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -367,3 +367,4 @@ SYSCALL_SPU(getrandom)
SYSCALL_SPU(memfd_create)
SYSCALL_SPU(bpf)
COMPAT_SYS(execveat)
+PPC64ONLY(switch_endian)
diff --git a/arch/powerpc/include/asm/ucc_slow.h b/arch/powerpc/include/asm/ucc_slow.h
index c44131e68e11..233ef5fe5fde 100644
--- a/arch/powerpc/include/asm/ucc_slow.h
+++ b/arch/powerpc/include/asm/ucc_slow.h
@@ -251,19 +251,6 @@ void ucc_slow_enable(struct ucc_slow_private * uccs, enum comm_dir mode);
*/
void ucc_slow_disable(struct ucc_slow_private * uccs, enum comm_dir mode);
-/* ucc_slow_poll_transmitter_now
- * Immediately forces a poll of the transmitter for data to be sent.
- * Typically, the hardware performs a periodic poll for data that the
- * transmit routine has set up to be transmitted. In cases where
- * this polling cycle is not soon enough, this optional routine can
- * be invoked to force a poll right away, instead. Proper use for
- * each transmission for which this functionality is desired is to
- * call the transmit routine and then this routine right after.
- *
- * uccs - (In) pointer to the slow UCC structure.
- */
-void ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs);
-
/* ucc_slow_graceful_stop_tx
* Smoothly stops transmission on a specified slow UCC.
*
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 36b79c31eedd..f4f8b667d75b 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define __NR_syscalls 363
+#define __NR_syscalls 364
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/asm/vga.h b/arch/powerpc/include/asm/vga.h
index e5f8dd366212..ab3acd2f2786 100644
--- a/arch/powerpc/include/asm/vga.h
+++ b/arch/powerpc/include/asm/vga.h
@@ -25,12 +25,12 @@
static inline void scr_writew(u16 val, volatile u16 *addr)
{
- st_le16(addr, val);
+ *addr = cpu_to_le16(val);
}
static inline u16 scr_readw(volatile const u16 *addr)
{
- return ld_le16(addr);
+ return le16_to_cpu(*addr);
}
#define VT_BUF_HAVE_MEMCPYW
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 6997f4a271df..0e25bdb190bb 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -146,7 +146,7 @@ extern void xics_update_irq_servers(void);
extern void xics_set_cpu_giq(unsigned int gserver, unsigned int join);
extern void xics_mask_unknown_vec(unsigned int vec);
extern irqreturn_t xics_ipi_dispatch(int cpu);
-extern int xics_smp_probe(void);
+extern void xics_smp_probe(void);
extern void xics_register_ics(struct ics *ics);
extern void xics_teardown_cpu(void);
extern void xics_kexec_teardown_cpu(int secondary);
diff --git a/arch/powerpc/include/uapi/asm/ptrace.h b/arch/powerpc/include/uapi/asm/ptrace.h
index 77d2ed35b111..8036b385417d 100644
--- a/arch/powerpc/include/uapi/asm/ptrace.h
+++ b/arch/powerpc/include/uapi/asm/ptrace.h
@@ -136,7 +136,7 @@ struct pt_regs {
#endif /* __powerpc64__ */
/*
- * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
+ * Get/set all the altivec registers v0..v31, vscr, vrsave, in one go.
* The transfer totals 34 quadword. Quadwords 0-31 contain the
* corresponding vector registers. Quadword 32 contains the vscr as the
* last word (offset 12) within that quadword. Quadword 33 contains the
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h
index 5d836b7c1176..5047659815a5 100644
--- a/arch/powerpc/include/uapi/asm/tm.h
+++ b/arch/powerpc/include/uapi/asm/tm.h
@@ -11,7 +11,7 @@
#define TM_CAUSE_RESCHED 0xde
#define TM_CAUSE_TLBI 0xdc
#define TM_CAUSE_FAC_UNAV 0xda
-#define TM_CAUSE_SYSCALL 0xd8 /* future use */
+#define TM_CAUSE_SYSCALL 0xd8
#define TM_CAUSE_MISC 0xd6 /* future use */
#define TM_CAUSE_SIGNAL 0xd4
#define TM_CAUSE_ALIGNMENT 0xd2
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index ef5b5b1f3123..e4aa173dae62 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -385,5 +385,6 @@
#define __NR_memfd_create 360
#define __NR_bpf 361
#define __NR_execveat 362
+#define __NR_switch_endian 363
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index 502cf69b6c89..c1ebbdaac28f 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -33,7 +33,8 @@ obj-y := cputable.o ptrace.o syscalls.o \
signal.o sysfs.o cacheinfo.o time.o \
prom.o traps.o setup-common.o \
udbg.o misc.o io.o dma.o \
- misc_$(CONFIG_WORD_SIZE).o vdso32/
+ misc_$(CONFIG_WORD_SIZE).o vdso32/ \
+ of_platform.o prom_parse.o
obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \
signal_64.o ptrace32.o \
paca.o nvram_64.o firmware.o
@@ -47,7 +48,6 @@ obj-$(CONFIG_PPC64) += vdso64/
obj-$(CONFIG_ALTIVEC) += vecemu.o
obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
obj-$(CONFIG_PPC_P7_NAP) += idle_power7.o
-obj-$(CONFIG_PPC_OF) += of_platform.o prom_parse.o
procfs-y := proc_powerpc.o
obj-$(CONFIG_PROC_FS) += $(procfs-y)
rtaspci-$(CONFIG_PPC64)-$(CONFIG_PCI) := rtas_pci.o
diff --git a/arch/powerpc/kernel/cacheinfo.c b/arch/powerpc/kernel/cacheinfo.c
index ae77b7e59889..c641983bbdd6 100644
--- a/arch/powerpc/kernel/cacheinfo.c
+++ b/arch/powerpc/kernel/cacheinfo.c
@@ -61,12 +61,22 @@ struct cache_type_info {
};
/* These are used to index the cache_type_info array. */
-#define CACHE_TYPE_UNIFIED 0
-#define CACHE_TYPE_INSTRUCTION 1
-#define CACHE_TYPE_DATA 2
+#define CACHE_TYPE_UNIFIED 0 /* cache-size, cache-block-size, etc. */
+#define CACHE_TYPE_UNIFIED_D 1 /* d-cache-size, d-cache-block-size, etc */
+#define CACHE_TYPE_INSTRUCTION 2
+#define CACHE_TYPE_DATA 3
static const struct cache_type_info cache_type_info[] = {
{
+ /* Embedded systems that use cache-size, cache-block-size,
+ * etc. for the Unified (typically L2) cache. */
+ .name = "Unified",
+ .size_prop = "cache-size",
+ .line_size_props = { "cache-line-size",
+ "cache-block-size", },
+ .nr_sets_prop = "cache-sets",
+ },
+ {
/* PowerPC Processor binding says the [di]-cache-*
* must be equal on unified caches, so just use
* d-cache properties. */
@@ -293,7 +303,8 @@ static struct cache *cache_find_first_sibling(struct cache *cache)
{
struct cache *iter;
- if (cache->type == CACHE_TYPE_UNIFIED)
+ if (cache->type == CACHE_TYPE_UNIFIED ||
+ cache->type == CACHE_TYPE_UNIFIED_D)
return cache;
list_for_each_entry(iter, &cache_list, list)
@@ -324,16 +335,29 @@ static bool cache_node_is_unified(const struct device_node *np)
return of_get_property(np, "cache-unified", NULL);
}
-static struct cache *cache_do_one_devnode_unified(struct device_node *node,
- int level)
+/*
+ * Unified caches can have two different sets of tags. Most embedded
+ * use cache-size, etc. for the unified cache size, but open firmware systems
+ * use d-cache-size, etc. Check on initialization for which type we have, and
+ * return the appropriate structure type. Assume it's embedded if it isn't
+ * open firmware. If it's yet a 3rd type, then there will be missing entries
+ * in /sys/devices/system/cpu/cpu0/cache/index2/, and this code will need
+ * to be extended further.
+ */
+static int cache_is_unified_d(const struct device_node *np)
{
- struct cache *cache;
+ return of_get_property(np,
+ cache_type_info[CACHE_TYPE_UNIFIED_D].size_prop, NULL) ?
+ CACHE_TYPE_UNIFIED_D : CACHE_TYPE_UNIFIED;
+}
+/*
+ */
+static struct cache *cache_do_one_devnode_unified(struct device_node *node, int level)
+{
pr_debug("creating L%d ucache for %s\n", level, node->full_name);
- cache = new_cache(CACHE_TYPE_UNIFIED, level, node);
-
- return cache;
+ return new_cache(cache_is_unified_d(node), level, node);
}
static struct cache *cache_do_one_devnode_split(struct device_node *node,
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 46733535cc0b..9c9b7411b28b 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -137,15 +137,11 @@ __init_HFSCR:
/*
* Clear the TLB using the specified IS form of tlbiel instruction
* (invalidate by congruence class). P7 has 128 CCs., P8 has 512.
- *
- * r3 = IS field
*/
__init_tlb_power7:
- li r3,0xc00 /* IS field = 0b11 */
-_GLOBAL(__flush_tlb_power7)
li r6,128
mtctr r6
- mr r7,r3 /* IS field */
+ li r7,0xc00 /* IS field = 0b11 */
ptesync
2: tlbiel r7
addi r7,r7,0x1000
@@ -154,11 +150,9 @@ _GLOBAL(__flush_tlb_power7)
1: blr
__init_tlb_power8:
- li r3,0xc00 /* IS field = 0b11 */
-_GLOBAL(__flush_tlb_power8)
li r6,512
mtctr r6
- mr r7,r3 /* IS field */
+ li r7,0xc00 /* IS field = 0b11 */
ptesync
2: tlbiel r7
addi r7,r7,0x1000
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index f83046878336..60262fdf35ba 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -71,8 +71,8 @@ extern void __restore_cpu_power7(void);
extern void __setup_cpu_power8(unsigned long offset, struct cpu_spec* spec);
extern void __restore_cpu_power8(void);
extern void __restore_cpu_a2(void);
-extern void __flush_tlb_power7(unsigned long inval_selector);
-extern void __flush_tlb_power8(unsigned long inval_selector);
+extern void __flush_tlb_power7(unsigned int action);
+extern void __flush_tlb_power8(unsigned int action);
extern long __machine_check_early_realmode_p7(struct pt_regs *regs);
extern long __machine_check_early_realmode_p8(struct pt_regs *regs);
#endif /* CONFIG_PPC64 */
diff --git a/arch/powerpc/kernel/dma-swiotlb.c b/arch/powerpc/kernel/dma-swiotlb.c
index 735979764cd4..6e8d764ce47b 100644
--- a/arch/powerpc/kernel/dma-swiotlb.c
+++ b/arch/powerpc/kernel/dma-swiotlb.c
@@ -116,16 +116,13 @@ void __init swiotlb_detect_4g(void)
}
}
-static int __init swiotlb_late_init(void)
+static int __init check_swiotlb_enabled(void)
{
- if (ppc_swiotlb_enable) {
+ if (ppc_swiotlb_enable)
swiotlb_print_info();
- set_pci_dma_ops(&swiotlb_dma_ops);
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
- } else {
+ else
swiotlb_free();
- }
return 0;
}
-subsys_initcall(swiotlb_late_init);
+subsys_initcall(check_swiotlb_enabled);
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 3b2252e7731b..a4c62eb0ee48 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -164,30 +164,34 @@ __setup("eeh=", eeh_setup);
*/
static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
{
- struct device_node *dn = eeh_dev_to_of_node(edev);
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
u32 cfg;
int cap, i;
int n = 0, l = 0;
char buffer[128];
- n += scnprintf(buf+n, len-n, "%s\n", dn->full_name);
- pr_warn("EEH: of node=%s\n", dn->full_name);
+ n += scnprintf(buf+n, len-n, "%04x:%02x:%02x:%01x\n",
+ edev->phb->global_number, pdn->busno,
+ PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
+ pr_warn("EEH: of node=%04x:%02x:%02x:%01x\n",
+ edev->phb->global_number, pdn->busno,
+ PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn));
- eeh_ops->read_config(dn, PCI_VENDOR_ID, 4, &cfg);
+ eeh_ops->read_config(pdn, PCI_VENDOR_ID, 4, &cfg);
n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg);
pr_warn("EEH: PCI device/vendor: %08x\n", cfg);
- eeh_ops->read_config(dn, PCI_COMMAND, 4, &cfg);
+ eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cfg);
n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg);
pr_warn("EEH: PCI cmd/status register: %08x\n", cfg);
/* Gather bridge-specific registers */
if (edev->mode & EEH_DEV_BRIDGE) {
- eeh_ops->read_config(dn, PCI_SEC_STATUS, 2, &cfg);
+ eeh_ops->read_config(pdn, PCI_SEC_STATUS, 2, &cfg);
n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg);
pr_warn("EEH: Bridge secondary status: %04x\n", cfg);
- eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &cfg);
+ eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &cfg);
n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg);
pr_warn("EEH: Bridge control: %04x\n", cfg);
}
@@ -195,11 +199,11 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
/* Dump out the PCI-X command and status regs */
cap = edev->pcix_cap;
if (cap) {
- eeh_ops->read_config(dn, cap, 4, &cfg);
+ eeh_ops->read_config(pdn, cap, 4, &cfg);
n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg);
pr_warn("EEH: PCI-X cmd: %08x\n", cfg);
- eeh_ops->read_config(dn, cap+4, 4, &cfg);
+ eeh_ops->read_config(pdn, cap+4, 4, &cfg);
n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg);
pr_warn("EEH: PCI-X status: %08x\n", cfg);
}
@@ -211,7 +215,7 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
pr_warn("EEH: PCI-E capabilities and status follow:\n");
for (i=0; i<=8; i++) {
- eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
+ eeh_ops->read_config(pdn, cap+4*i, 4, &cfg);
n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
if ((i % 4) == 0) {
@@ -238,7 +242,7 @@ static size_t eeh_dump_dev_log(struct eeh_dev *edev, char *buf, size_t len)
pr_warn("EEH: PCI-E AER capability register set follows:\n");
for (i=0; i<=13; i++) {
- eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
+ eeh_ops->read_config(pdn, cap+4*i, 4, &cfg);
n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
if ((i % 4) == 0) {
@@ -414,11 +418,11 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
int ret;
int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
unsigned long flags;
- struct device_node *dn;
+ struct pci_dn *pdn;
struct pci_dev *dev;
struct eeh_pe *pe, *parent_pe, *phb_pe;
int rc = 0;
- const char *location;
+ const char *location = NULL;
eeh_stats.total_mmio_ffs++;
@@ -429,15 +433,14 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
eeh_stats.no_dn++;
return 0;
}
- dn = eeh_dev_to_of_node(edev);
dev = eeh_dev_to_pci_dev(edev);
pe = eeh_dev_to_pe(edev);
/* Access to IO BARs might get this far and still not want checking. */
if (!pe) {
eeh_stats.ignored_check++;
- pr_debug("EEH: Ignored check for %s %s\n",
- eeh_pci_name(dev), dn->full_name);
+ pr_debug("EEH: Ignored check for %s\n",
+ eeh_pci_name(dev));
return 0;
}
@@ -473,10 +476,13 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
if (pe->state & EEH_PE_ISOLATED) {
pe->check_count++;
if (pe->check_count % EEH_MAX_FAILS == 0) {
- location = of_get_property(dn, "ibm,loc-code", NULL);
+ pdn = eeh_dev_to_pdn(edev);
+ if (pdn->node)
+ location = of_get_property(pdn->node, "ibm,loc-code", NULL);
printk(KERN_ERR "EEH: %d reads ignored for recovering device at "
"location=%s driver=%s pci addr=%s\n",
- pe->check_count, location,
+ pe->check_count,
+ location ? location : "unknown",
eeh_driver_name(dev), eeh_pci_name(dev));
printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n",
eeh_driver_name(dev));
@@ -667,6 +673,55 @@ int eeh_pci_enable(struct eeh_pe *pe, int function)
return rc;
}
+static void *eeh_disable_and_save_dev_state(void *data, void *userdata)
+{
+ struct eeh_dev *edev = data;
+ struct pci_dev *pdev = eeh_dev_to_pci_dev(edev);
+ struct pci_dev *dev = userdata;
+
+ /*
+ * The caller should have disabled and saved the
+ * state for the specified device
+ */
+ if (!pdev || pdev == dev)
+ return NULL;
+
+ /* Ensure we have D0 power state */
+ pci_set_power_state(pdev, PCI_D0);
+
+ /* Save device state */
+ pci_save_state(pdev);
+
+ /*
+ * Disable device to avoid any DMA traffic and
+ * interrupt from the device
+ */
+ pci_write_config_word(pdev, PCI_COMMAND, PCI_COMMAND_INTX_DISABLE);
+
+ return NULL;
+}
+
+static void *eeh_restore_dev_state(void *data, void *userdata)
+{
+ struct eeh_dev *edev = data;
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
+ struct pci_dev *pdev = eeh_dev_to_pci_dev(edev);
+ struct pci_dev *dev = userdata;
+
+ if (!pdev)
+ return NULL;
+
+ /* Apply customization from firmware */
+ if (pdn && eeh_ops->restore_config)
+ eeh_ops->restore_config(pdn);
+
+ /* The caller should restore state for the specified device */
+ if (pdev != dev)
+ pci_save_state(pdev);
+
+ return NULL;
+}
+
/**
* pcibios_set_pcie_slot_reset - Set PCI-E reset state
* @dev: pci device struct
@@ -689,13 +744,19 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
switch (state) {
case pcie_deassert_reset:
eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
+ eeh_unfreeze_pe(pe, false);
eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
+ eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev);
break;
case pcie_hot_reset:
+ eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
+ eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
eeh_ops->reset(pe, EEH_RESET_HOT);
break;
case pcie_warm_reset:
+ eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
+ eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
break;
@@ -815,15 +876,15 @@ out:
*/
void eeh_save_bars(struct eeh_dev *edev)
{
+ struct pci_dn *pdn;
int i;
- struct device_node *dn;
- if (!edev)
+ pdn = eeh_dev_to_pdn(edev);
+ if (!pdn)
return;
- dn = eeh_dev_to_of_node(edev);
for (i = 0; i < 16; i++)
- eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]);
+ eeh_ops->read_config(pdn, i * 4, 4, &edev->config_space[i]);
/*
* For PCI bridges including root port, we need enable bus
@@ -914,7 +975,7 @@ static struct notifier_block eeh_reboot_nb = {
int eeh_init(void)
{
struct pci_controller *hose, *tmp;
- struct device_node *phb;
+ struct pci_dn *pdn;
static int cnt = 0;
int ret = 0;
@@ -949,20 +1010,9 @@ int eeh_init(void)
return ret;
/* Enable EEH for all adapters */
- if (eeh_has_flag(EEH_PROBE_MODE_DEVTREE)) {
- list_for_each_entry_safe(hose, tmp,
- &hose_list, list_node) {
- phb = hose->dn;
- traverse_pci_devices(phb, eeh_ops->of_probe, NULL);
- }
- } else if (eeh_has_flag(EEH_PROBE_MODE_DEV)) {
- list_for_each_entry_safe(hose, tmp,
- &hose_list, list_node)
- pci_walk_bus(hose->bus, eeh_ops->dev_probe, NULL);
- } else {
- pr_warn("%s: Invalid probe mode %x",
- __func__, eeh_subsystem_flags);
- return -EINVAL;
+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
+ pdn = hose->pci_data;
+ traverse_pci_dn(pdn, eeh_ops->probe, NULL);
}
/*
@@ -987,8 +1037,8 @@ int eeh_init(void)
core_initcall_sync(eeh_init);
/**
- * eeh_add_device_early - Enable EEH for the indicated device_node
- * @dn: device node for which to set up EEH
+ * eeh_add_device_early - Enable EEH for the indicated device node
+ * @pdn: PCI device node for which to set up EEH
*
* This routine must be used to perform EEH initialization for PCI
* devices that were added after system boot (e.g. hotplug, dlpar).
@@ -998,44 +1048,41 @@ core_initcall_sync(eeh_init);
* on the CEC architecture, type of the device, on earlier boot
* command-line arguments & etc.
*/
-void eeh_add_device_early(struct device_node *dn)
+void eeh_add_device_early(struct pci_dn *pdn)
{
struct pci_controller *phb;
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
- /*
- * If we're doing EEH probe based on PCI device, we
- * would delay the probe until late stage because
- * the PCI device isn't available this moment.
- */
- if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))
+ if (!edev || !eeh_enabled())
return;
- if (!of_node_to_eeh_dev(dn))
- return;
- phb = of_node_to_eeh_dev(dn)->phb;
-
/* USB Bus children of PCI devices will not have BUID's */
- if (NULL == phb || 0 == phb->buid)
+ phb = edev->phb;
+ if (NULL == phb ||
+ (eeh_has_flag(EEH_PROBE_MODE_DEVTREE) && 0 == phb->buid))
return;
- eeh_ops->of_probe(dn, NULL);
+ eeh_ops->probe(pdn, NULL);
}
/**
* eeh_add_device_tree_early - Enable EEH for the indicated device
- * @dn: device node
+ * @pdn: PCI device node
*
* This routine must be used to perform EEH initialization for the
* indicated PCI device that was added after system boot (e.g.
* hotplug, dlpar).
*/
-void eeh_add_device_tree_early(struct device_node *dn)
+void eeh_add_device_tree_early(struct pci_dn *pdn)
{
- struct device_node *sib;
+ struct pci_dn *n;
- for_each_child_of_node(dn, sib)
- eeh_add_device_tree_early(sib);
- eeh_add_device_early(dn);
+ if (!pdn)
+ return;
+
+ list_for_each_entry(n, &pdn->child_list, list)
+ eeh_add_device_tree_early(n);
+ eeh_add_device_early(pdn);
}
EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
@@ -1048,7 +1095,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
*/
void eeh_add_device_late(struct pci_dev *dev)
{
- struct device_node *dn;
+ struct pci_dn *pdn;
struct eeh_dev *edev;
if (!dev || !eeh_enabled())
@@ -1056,8 +1103,8 @@ void eeh_add_device_late(struct pci_dev *dev)
pr_debug("EEH: Adding device %s\n", pci_name(dev));
- dn = pci_device_to_OF_node(dev);
- edev = of_node_to_eeh_dev(dn);
+ pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
+ edev = pdn_to_eeh_dev(pdn);
if (edev->pdev == dev) {
pr_debug("EEH: Already referenced !\n");
return;
@@ -1089,13 +1136,6 @@ void eeh_add_device_late(struct pci_dev *dev)
edev->pdev = dev;
dev->dev.archdata.edev = edev;
- /*
- * We have to do the EEH probe here because the PCI device
- * hasn't been created yet in the early stage.
- */
- if (eeh_has_flag(EEH_PROBE_MODE_DEV))
- eeh_ops->dev_probe(dev, NULL);
-
eeh_addr_cache_insert_dev(dev);
}
diff --git a/arch/powerpc/kernel/eeh_cache.c b/arch/powerpc/kernel/eeh_cache.c
index 07d8a2423a61..eeabeabea49c 100644
--- a/arch/powerpc/kernel/eeh_cache.c
+++ b/arch/powerpc/kernel/eeh_cache.c
@@ -171,30 +171,27 @@ eeh_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
static void __eeh_addr_cache_insert_dev(struct pci_dev *dev)
{
- struct device_node *dn;
+ struct pci_dn *pdn;
struct eeh_dev *edev;
int i;
- dn = pci_device_to_OF_node(dev);
- if (!dn) {
+ pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
+ if (!pdn) {
pr_warn("PCI: no pci dn found for dev=%s\n",
pci_name(dev));
return;
}
- edev = of_node_to_eeh_dev(dn);
+ edev = pdn_to_eeh_dev(pdn);
if (!edev) {
- pr_warn("PCI: no EEH dev found for dn=%s\n",
- dn->full_name);
+ pr_warn("PCI: no EEH dev found for %s\n",
+ pci_name(dev));
return;
}
/* Skip any devices for which EEH is not enabled. */
if (!edev->pe) {
-#ifdef DEBUG
- pr_info("PCI: skip building address cache for=%s - %s\n",
- pci_name(dev), dn->full_name);
-#endif
+ dev_dbg(&dev->dev, "EEH: Skip building address cache\n");
return;
}
@@ -282,18 +279,18 @@ void eeh_addr_cache_rmv_dev(struct pci_dev *dev)
*/
void eeh_addr_cache_build(void)
{
- struct device_node *dn;
+ struct pci_dn *pdn;
struct eeh_dev *edev;
struct pci_dev *dev = NULL;
spin_lock_init(&pci_io_addr_cache_root.piar_lock);
for_each_pci_dev(dev) {
- dn = pci_device_to_OF_node(dev);
- if (!dn)
+ pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
+ if (!pdn)
continue;
- edev = of_node_to_eeh_dev(dn);
+ edev = pdn_to_eeh_dev(pdn);
if (!edev)
continue;
diff --git a/arch/powerpc/kernel/eeh_dev.c b/arch/powerpc/kernel/eeh_dev.c
index e5274ee9a75f..aabba94ff9cb 100644
--- a/arch/powerpc/kernel/eeh_dev.c
+++ b/arch/powerpc/kernel/eeh_dev.c
@@ -43,13 +43,13 @@
/**
* eeh_dev_init - Create EEH device according to OF node
- * @dn: device node
+ * @pdn: PCI device node
* @data: PHB
*
* It will create EEH device according to the given OF node. The function
* might be called by PCI emunation, DR, PHB hotplug.
*/
-void *eeh_dev_init(struct device_node *dn, void *data)
+void *eeh_dev_init(struct pci_dn *pdn, void *data)
{
struct pci_controller *phb = data;
struct eeh_dev *edev;
@@ -63,8 +63,8 @@ void *eeh_dev_init(struct device_node *dn, void *data)
}
/* Associate EEH device with OF node */
- PCI_DN(dn)->edev = edev;
- edev->dn = dn;
+ pdn->edev = edev;
+ edev->pdn = pdn;
edev->phb = phb;
INIT_LIST_HEAD(&edev->list);
@@ -80,16 +80,16 @@ void *eeh_dev_init(struct device_node *dn, void *data)
*/
void eeh_dev_phb_init_dynamic(struct pci_controller *phb)
{
- struct device_node *dn = phb->dn;
+ struct pci_dn *root = phb->pci_data;
/* EEH PE for PHB */
eeh_phb_pe_create(phb);
/* EEH device for PHB */
- eeh_dev_init(dn, phb);
+ eeh_dev_init(root, phb);
/* EEH devices for children OF nodes */
- traverse_pci_devices(dn, eeh_dev_init, phb);
+ traverse_pci_dn(root, eeh_dev_init, phb);
}
/**
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index d099540c0f56..24768ff3cb73 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -83,28 +83,6 @@ static inline void eeh_pcid_put(struct pci_dev *pdev)
module_put(pdev->driver->driver.owner);
}
-#if 0
-static void print_device_node_tree(struct pci_dn *pdn, int dent)
-{
- int i;
- struct device_node *pc;
-
- if (!pdn)
- return;
- for (i = 0; i < dent; i++)
- printk(" ");
- printk("dn=%s mode=%x \tcfg_addr=%x pe_addr=%x \tfull=%s\n",
- pdn->node->name, pdn->eeh_mode, pdn->eeh_config_addr,
- pdn->eeh_pe_config_addr, pdn->node->full_name);
- dent += 3;
- pc = pdn->node->child;
- while (pc) {
- print_device_node_tree(PCI_DN(pc), dent);
- pc = pc->sibling;
- }
-}
-#endif
-
/**
* eeh_disable_irq - Disable interrupt for the recovering device
* @dev: PCI device
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 1e4946c36f9e..35f0b62259bb 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -291,27 +291,25 @@ struct eeh_pe *eeh_pe_get(struct eeh_dev *edev)
*/
static struct eeh_pe *eeh_pe_get_parent(struct eeh_dev *edev)
{
- struct device_node *dn;
struct eeh_dev *parent;
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
/*
* It might have the case for the indirect parent
* EEH device already having associated PE, but
* the direct parent EEH device doesn't have yet.
*/
- dn = edev->dn->parent;
- while (dn) {
+ pdn = pdn ? pdn->parent : NULL;
+ while (pdn) {
/* We're poking out of PCI territory */
- if (!PCI_DN(dn)) return NULL;
-
- parent = of_node_to_eeh_dev(dn);
- /* We're poking out of PCI territory */
- if (!parent) return NULL;
+ parent = pdn_to_eeh_dev(pdn);
+ if (!parent)
+ return NULL;
if (parent->pe)
return parent->pe;
- dn = dn->parent;
+ pdn = pdn->parent;
}
return NULL;
@@ -330,6 +328,13 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
{
struct eeh_pe *pe, *parent;
+ /* Check if the PE number is valid */
+ if (!eeh_has_flag(EEH_VALID_PE_ZERO) && !edev->pe_config_addr) {
+ pr_err("%s: Invalid PE#0 for edev 0x%x on PHB#%d\n",
+ __func__, edev->config_addr, edev->phb->global_number);
+ return -EINVAL;
+ }
+
/*
* Search the PE has been existing or not according
* to the PE address. If that has been existing, the
@@ -338,21 +343,18 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
*/
pe = eeh_pe_get(edev);
if (pe && !(pe->type & EEH_PE_INVALID)) {
- if (!edev->pe_config_addr) {
- pr_err("%s: PE with addr 0x%x already exists\n",
- __func__, edev->config_addr);
- return -EEXIST;
- }
-
/* Mark the PE as type of PCI bus */
pe->type = EEH_PE_BUS;
edev->pe = pe;
/* Put the edev to PE */
list_add_tail(&edev->list, &pe->edevs);
- pr_debug("EEH: Add %s to Bus PE#%x\n",
- edev->dn->full_name, pe->addr);
-
+ pr_debug("EEH: Add %04x:%02x:%02x.%01x to Bus PE#%x\n",
+ edev->phb->global_number,
+ edev->config_addr >> 8,
+ PCI_SLOT(edev->config_addr & 0xFF),
+ PCI_FUNC(edev->config_addr & 0xFF),
+ pe->addr);
return 0;
} else if (pe && (pe->type & EEH_PE_INVALID)) {
list_add_tail(&edev->list, &pe->edevs);
@@ -368,9 +370,14 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
parent->type &= ~(EEH_PE_INVALID | EEH_PE_KEEP);
parent = parent->parent;
}
- pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
- edev->dn->full_name, pe->addr, pe->parent->addr);
+ pr_debug("EEH: Add %04x:%02x:%02x.%01x to Device "
+ "PE#%x, Parent PE#%x\n",
+ edev->phb->global_number,
+ edev->config_addr >> 8,
+ PCI_SLOT(edev->config_addr & 0xFF),
+ PCI_FUNC(edev->config_addr & 0xFF),
+ pe->addr, pe->parent->addr);
return 0;
}
@@ -409,8 +416,13 @@ int eeh_add_to_parent_pe(struct eeh_dev *edev)
list_add_tail(&pe->child, &parent->child_list);
list_add_tail(&edev->list, &pe->edevs);
edev->pe = pe;
- pr_debug("EEH: Add %s to Device PE#%x, Parent PE#%x\n",
- edev->dn->full_name, pe->addr, pe->parent->addr);
+ pr_debug("EEH: Add %04x:%02x:%02x.%01x to "
+ "Device PE#%x, Parent PE#%x\n",
+ edev->phb->global_number,
+ edev->config_addr >> 8,
+ PCI_SLOT(edev->config_addr & 0xFF),
+ PCI_FUNC(edev->config_addr & 0xFF),
+ pe->addr, pe->parent->addr);
return 0;
}
@@ -430,8 +442,11 @@ int eeh_rmv_from_parent_pe(struct eeh_dev *edev)
int cnt;
if (!edev->pe) {
- pr_debug("%s: No PE found for EEH device %s\n",
- __func__, edev->dn->full_name);
+ pr_debug("%s: No PE found for device %04x:%02x:%02x.%01x\n",
+ __func__, edev->phb->global_number,
+ edev->config_addr >> 8,
+ PCI_SLOT(edev->config_addr & 0xFF),
+ PCI_FUNC(edev->config_addr & 0xFF));
return -EEXIST;
}
@@ -653,9 +668,9 @@ void eeh_pe_state_clear(struct eeh_pe *pe, int state)
* blocked on normal path during the stage. So we need utilize
* eeh operations, which is always permitted.
*/
-static void eeh_bridge_check_link(struct eeh_dev *edev,
- struct device_node *dn)
+static void eeh_bridge_check_link(struct eeh_dev *edev)
{
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
int cap;
uint32_t val;
int timeout = 0;
@@ -675,32 +690,32 @@ static void eeh_bridge_check_link(struct eeh_dev *edev,
/* Check slot status */
cap = edev->pcie_cap;
- eeh_ops->read_config(dn, cap + PCI_EXP_SLTSTA, 2, &val);
+ eeh_ops->read_config(pdn, cap + PCI_EXP_SLTSTA, 2, &val);
if (!(val & PCI_EXP_SLTSTA_PDS)) {
pr_debug(" No card in the slot (0x%04x) !\n", val);
return;
}
/* Check power status if we have the capability */
- eeh_ops->read_config(dn, cap + PCI_EXP_SLTCAP, 2, &val);
+ eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCAP, 2, &val);
if (val & PCI_EXP_SLTCAP_PCP) {
- eeh_ops->read_config(dn, cap + PCI_EXP_SLTCTL, 2, &val);
+ eeh_ops->read_config(pdn, cap + PCI_EXP_SLTCTL, 2, &val);
if (val & PCI_EXP_SLTCTL_PCC) {
pr_debug(" In power-off state, power it on ...\n");
val &= ~(PCI_EXP_SLTCTL_PCC | PCI_EXP_SLTCTL_PIC);
val |= (0x0100 & PCI_EXP_SLTCTL_PIC);
- eeh_ops->write_config(dn, cap + PCI_EXP_SLTCTL, 2, val);
+ eeh_ops->write_config(pdn, cap + PCI_EXP_SLTCTL, 2, val);
msleep(2 * 1000);
}
}
/* Enable link */
- eeh_ops->read_config(dn, cap + PCI_EXP_LNKCTL, 2, &val);
+ eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCTL, 2, &val);
val &= ~PCI_EXP_LNKCTL_LD;
- eeh_ops->write_config(dn, cap + PCI_EXP_LNKCTL, 2, val);
+ eeh_ops->write_config(pdn, cap + PCI_EXP_LNKCTL, 2, val);
/* Check link */
- eeh_ops->read_config(dn, cap + PCI_EXP_LNKCAP, 4, &val);
+ eeh_ops->read_config(pdn, cap + PCI_EXP_LNKCAP, 4, &val);
if (!(val & PCI_EXP_LNKCAP_DLLLARC)) {
pr_debug(" No link reporting capability (0x%08x) \n", val);
msleep(1000);
@@ -713,7 +728,7 @@ static void eeh_bridge_check_link(struct eeh_dev *edev,
msleep(20);
timeout += 20;
- eeh_ops->read_config(dn, cap + PCI_EXP_LNKSTA, 2, &val);
+ eeh_ops->read_config(pdn, cap + PCI_EXP_LNKSTA, 2, &val);
if (val & PCI_EXP_LNKSTA_DLLLA)
break;
}
@@ -728,9 +743,9 @@ static void eeh_bridge_check_link(struct eeh_dev *edev,
#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
-static void eeh_restore_bridge_bars(struct eeh_dev *edev,
- struct device_node *dn)
+static void eeh_restore_bridge_bars(struct eeh_dev *edev)
{
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
int i;
/*
@@ -738,49 +753,49 @@ static void eeh_restore_bridge_bars(struct eeh_dev *edev,
* Bus numbers and windows: 0x18 - 0x30
*/
for (i = 4; i < 13; i++)
- eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
+ eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]);
/* Rom: 0x38 */
- eeh_ops->write_config(dn, 14*4, 4, edev->config_space[14]);
+ eeh_ops->write_config(pdn, 14*4, 4, edev->config_space[14]);
/* Cache line & Latency timer: 0xC 0xD */
- eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
+ eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1,
SAVED_BYTE(PCI_CACHE_LINE_SIZE));
- eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
+ eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1,
SAVED_BYTE(PCI_LATENCY_TIMER));
/* Max latency, min grant, interrupt ping and line: 0x3C */
- eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]);
+ eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
/* PCI Command: 0x4 */
- eeh_ops->write_config(dn, PCI_COMMAND, 4, edev->config_space[1]);
+ eeh_ops->write_config(pdn, PCI_COMMAND, 4, edev->config_space[1]);
/* Check the PCIe link is ready */
- eeh_bridge_check_link(edev, dn);
+ eeh_bridge_check_link(edev);
}
-static void eeh_restore_device_bars(struct eeh_dev *edev,
- struct device_node *dn)
+static void eeh_restore_device_bars(struct eeh_dev *edev)
{
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
int i;
u32 cmd;
for (i = 4; i < 10; i++)
- eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
+ eeh_ops->write_config(pdn, i*4, 4, edev->config_space[i]);
/* 12 == Expansion ROM Address */
- eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]);
+ eeh_ops->write_config(pdn, 12*4, 4, edev->config_space[12]);
- eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
+ eeh_ops->write_config(pdn, PCI_CACHE_LINE_SIZE, 1,
SAVED_BYTE(PCI_CACHE_LINE_SIZE));
- eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
+ eeh_ops->write_config(pdn, PCI_LATENCY_TIMER, 1,
SAVED_BYTE(PCI_LATENCY_TIMER));
/* max latency, min grant, interrupt pin and line */
- eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]);
+ eeh_ops->write_config(pdn, 15*4, 4, edev->config_space[15]);
/*
* Restore PERR & SERR bits, some devices require it,
* don't touch the other command bits
*/
- eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd);
+ eeh_ops->read_config(pdn, PCI_COMMAND, 4, &cmd);
if (edev->config_space[1] & PCI_COMMAND_PARITY)
cmd |= PCI_COMMAND_PARITY;
else
@@ -789,7 +804,7 @@ static void eeh_restore_device_bars(struct eeh_dev *edev,
cmd |= PCI_COMMAND_SERR;
else
cmd &= ~PCI_COMMAND_SERR;
- eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd);
+ eeh_ops->write_config(pdn, PCI_COMMAND, 4, cmd);
}
/**
@@ -804,16 +819,16 @@ static void eeh_restore_device_bars(struct eeh_dev *edev,
static void *eeh_restore_one_device_bars(void *data, void *flag)
{
struct eeh_dev *edev = (struct eeh_dev *)data;
- struct device_node *dn = eeh_dev_to_of_node(edev);
+ struct pci_dn *pdn = eeh_dev_to_pdn(edev);
/* Do special restore for bridges */
if (edev->mode & EEH_DEV_BRIDGE)
- eeh_restore_bridge_bars(edev, dn);
+ eeh_restore_bridge_bars(edev);
else
- eeh_restore_device_bars(edev, dn);
+ eeh_restore_device_bars(edev);
- if (eeh_ops->restore_config)
- eeh_ops->restore_config(dn);
+ if (eeh_ops->restore_config && pdn)
+ eeh_ops->restore_config(pdn);
return NULL;
}
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index d180caf2d6de..8ca9434c40e6 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -34,6 +34,7 @@
#include <asm/ftrace.h>
#include <asm/hw_irq.h>
#include <asm/context_tracking.h>
+#include <asm/tm.h>
/*
* System calls.
@@ -145,6 +146,24 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
andi. r11,r10,_TIF_SYSCALL_DOTRACE
bne syscall_dotrace
.Lsyscall_dotrace_cont:
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+BEGIN_FTR_SECTION
+ b 1f
+END_FTR_SECTION_IFCLR(CPU_FTR_TM)
+ extrdi. r11, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
+ beq+ 1f
+
+ /* Doom the transaction and don't perform the syscall: */
+ mfmsr r11
+ li r12, 1
+ rldimi r11, r12, MSR_TM_LG, 63-MSR_TM_LG
+ mtmsrd r11, 0
+ li r11, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
+ TABORT(R11)
+
+ b .Lsyscall_exit
+1:
+#endif
cmpldi 0,r0,NR_syscalls
bge- syscall_enosys
@@ -356,6 +375,11 @@ _GLOBAL(ppc64_swapcontext)
bl sys_swapcontext
b .Lsyscall_exit
+_GLOBAL(ppc_switch_endian)
+ bl save_nvgprs
+ bl sys_switch_endian
+ b .Lsyscall_exit
+
_GLOBAL(ret_from_fork)
bl schedule_tail
REST_NVGPRS(r1)
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 05adc8bbdef8..eeaa0d5f69d5 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -94,6 +94,7 @@ _GLOBAL(power7_powersave_common)
beq 1f
addi r1,r1,INT_FRAME_SIZE
ld r0,16(r1)
+ li r3,0 /* Return 0 (no nap) */
mtlr r0
blr
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index b6f123ab90ed..2c647b1e62e4 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -28,6 +28,55 @@
#include <asm/mce.h>
#include <asm/machdep.h>
+static void flush_tlb_206(unsigned int num_sets, unsigned int action)
+{
+ unsigned long rb;
+ unsigned int i;
+
+ switch (action) {
+ case TLB_INVAL_SCOPE_GLOBAL:
+ rb = TLBIEL_INVAL_SET;
+ break;
+ case TLB_INVAL_SCOPE_LPID:
+ rb = TLBIEL_INVAL_SET_LPID;
+ break;
+ default:
+ BUG();
+ break;
+ }
+
+ asm volatile("ptesync" : : : "memory");
+ for (i = 0; i < num_sets; i++) {
+ asm volatile("tlbiel %0" : : "r" (rb));
+ rb += 1 << TLBIEL_INVAL_SET_SHIFT;
+ }
+ asm volatile("ptesync" : : : "memory");
+}
+
+/*
+ * Generic routine to flush TLB on power7. This routine is used as
+ * flush_tlb hook in cpu_spec for Power7 processor.
+ *
+ * action => TLB_INVAL_SCOPE_GLOBAL: Invalidate all TLBs.
+ * TLB_INVAL_SCOPE_LPID: Invalidate TLB for current LPID.
+ */
+void __flush_tlb_power7(unsigned int action)
+{
+ flush_tlb_206(POWER7_TLB_SETS, action);
+}
+
+/*
+ * Generic routine to flush TLB on power8. This routine is used as
+ * flush_tlb hook in cpu_spec for power8 processor.
+ *
+ * action => TLB_INVAL_SCOPE_GLOBAL: Invalidate all TLBs.
+ * TLB_INVAL_SCOPE_LPID: Invalidate TLB for current LPID.
+ */
+void __flush_tlb_power8(unsigned int action)
+{
+ flush_tlb_206(POWER8_TLB_SETS, action);
+}
+
/* flush SLBs and reload */
static void flush_and_reload_slb(void)
{
@@ -79,7 +128,7 @@ static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
}
if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) {
if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
- cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET);
+ cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
/* reset error bits */
dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB;
}
@@ -110,7 +159,7 @@ static long mce_handle_common_ierror(uint64_t srr1)
break;
case P7_SRR1_MC_IFETCH_TLB_MULTIHIT:
if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
- cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET);
+ cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL);
handled = 1;
}
break;
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c
index 34f7c9b7cd96..1e703f8ebad4 100644
--- a/arch/powerpc/kernel/nvram_64.c
+++ b/arch/powerpc/kernel/nvram_64.c
@@ -26,6 +26,9 @@
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/kmsg_dump.h>
+#include <linux/pstore.h>
+#include <linux/zlib.h>
#include <asm/uaccess.h>
#include <asm/nvram.h>
#include <asm/rtas.h>
@@ -54,6 +57,680 @@ struct nvram_partition {
static LIST_HEAD(nvram_partitions);
+#ifdef CONFIG_PPC_PSERIES
+struct nvram_os_partition rtas_log_partition = {
+ .name = "ibm,rtas-log",
+ .req_size = 2079,
+ .min_size = 1055,
+ .index = -1,
+ .os_partition = true
+};
+#endif
+
+struct nvram_os_partition oops_log_partition = {
+ .name = "lnx,oops-log",
+ .req_size = 4000,
+ .min_size = 2000,
+ .index = -1,
+ .os_partition = true
+};
+
+static const char *nvram_os_partitions[] = {
+#ifdef CONFIG_PPC_PSERIES
+ "ibm,rtas-log",
+#endif
+ "lnx,oops-log",
+ NULL
+};
+
+static void oops_to_nvram(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason);
+
+static struct kmsg_dumper nvram_kmsg_dumper = {
+ .dump = oops_to_nvram
+};
+
+/*
+ * For capturing and compressing an oops or panic report...
+
+ * big_oops_buf[] holds the uncompressed text we're capturing.
+ *
+ * oops_buf[] holds the compressed text, preceded by a oops header.
+ * oops header has u16 holding the version of oops header (to differentiate
+ * between old and new format header) followed by u16 holding the length of
+ * the compressed* text (*Or uncompressed, if compression fails.) and u64
+ * holding the timestamp. oops_buf[] gets written to NVRAM.
+ *
+ * oops_log_info points to the header. oops_data points to the compressed text.
+ *
+ * +- oops_buf
+ * | +- oops_data
+ * v v
+ * +-----------+-----------+-----------+------------------------+
+ * | version | length | timestamp | text |
+ * | (2 bytes) | (2 bytes) | (8 bytes) | (oops_data_sz bytes) |
+ * +-----------+-----------+-----------+------------------------+
+ * ^
+ * +- oops_log_info
+ *
+ * We preallocate these buffers during init to avoid kmalloc during oops/panic.
+ */
+static size_t big_oops_buf_sz;
+static char *big_oops_buf, *oops_buf;
+static char *oops_data;
+static size_t oops_data_sz;
+
+/* Compression parameters */
+#define COMPR_LEVEL 6
+#define WINDOW_BITS 12
+#define MEM_LEVEL 4
+static struct z_stream_s stream;
+
+#ifdef CONFIG_PSTORE
+#ifdef CONFIG_PPC_POWERNV
+static struct nvram_os_partition skiboot_partition = {
+ .name = "ibm,skiboot",
+ .index = -1,
+ .os_partition = false
+};
+#endif
+
+#ifdef CONFIG_PPC_PSERIES
+static struct nvram_os_partition of_config_partition = {
+ .name = "of-config",
+ .index = -1,
+ .os_partition = false
+};
+#endif
+
+static struct nvram_os_partition common_partition = {
+ .name = "common",
+ .index = -1,
+ .os_partition = false
+};
+
+static enum pstore_type_id nvram_type_ids[] = {
+ PSTORE_TYPE_DMESG,
+ PSTORE_TYPE_PPC_COMMON,
+ -1,
+ -1,
+ -1
+};
+static int read_type;
+#endif
+
+/* nvram_write_os_partition
+ *
+ * We need to buffer the error logs into nvram to ensure that we have
+ * the failure information to decode. If we have a severe error there
+ * is no way to guarantee that the OS or the machine is in a state to
+ * get back to user land and write the error to disk. For example if
+ * the SCSI device driver causes a Machine Check by writing to a bad
+ * IO address, there is no way of guaranteeing that the device driver
+ * is in any state that is would also be able to write the error data
+ * captured to disk, thus we buffer it in NVRAM for analysis on the
+ * next boot.
+ *
+ * In NVRAM the partition containing the error log buffer will looks like:
+ * Header (in bytes):
+ * +-----------+----------+--------+------------+------------------+
+ * | signature | checksum | length | name | data |
+ * |0 |1 |2 3|4 15|16 length-1|
+ * +-----------+----------+--------+------------+------------------+
+ *
+ * The 'data' section would look like (in bytes):
+ * +--------------+------------+-----------------------------------+
+ * | event_logged | sequence # | error log |
+ * |0 3|4 7|8 error_log_size-1|
+ * +--------------+------------+-----------------------------------+
+ *
+ * event_logged: 0 if event has not been logged to syslog, 1 if it has
+ * sequence #: The unique sequence # for each event. (until it wraps)
+ * error log: The error log from event_scan
+ */
+int nvram_write_os_partition(struct nvram_os_partition *part,
+ char *buff, int length,
+ unsigned int err_type,
+ unsigned int error_log_cnt)
+{
+ int rc;
+ loff_t tmp_index;
+ struct err_log_info info;
+
+ if (part->index == -1)
+ return -ESPIPE;
+
+ if (length > part->size)
+ length = part->size;
+
+ info.error_type = cpu_to_be32(err_type);
+ info.seq_num = cpu_to_be32(error_log_cnt);
+
+ tmp_index = part->index;
+
+ rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info),
+ &tmp_index);
+ if (rc <= 0) {
+ pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
+ return rc;
+ }
+
+ rc = ppc_md.nvram_write(buff, length, &tmp_index);
+ if (rc <= 0) {
+ pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+/* nvram_read_partition
+ *
+ * Reads nvram partition for at most 'length'
+ */
+int nvram_read_partition(struct nvram_os_partition *part, char *buff,
+ int length, unsigned int *err_type,
+ unsigned int *error_log_cnt)
+{
+ int rc;
+ loff_t tmp_index;
+ struct err_log_info info;
+
+ if (part->index == -1)
+ return -1;
+
+ if (length > part->size)
+ length = part->size;
+
+ tmp_index = part->index;
+
+ if (part->os_partition) {
+ rc = ppc_md.nvram_read((char *)&info,
+ sizeof(struct err_log_info),
+ &tmp_index);
+ if (rc <= 0) {
+ pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
+ return rc;
+ }
+ }
+
+ rc = ppc_md.nvram_read(buff, length, &tmp_index);
+ if (rc <= 0) {
+ pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
+ return rc;
+ }
+
+ if (part->os_partition) {
+ *error_log_cnt = be32_to_cpu(info.seq_num);
+ *err_type = be32_to_cpu(info.error_type);
+ }
+
+ return 0;
+}
+
+/* nvram_init_os_partition
+ *
+ * This sets up a partition with an "OS" signature.
+ *
+ * The general strategy is the following:
+ * 1.) If a partition with the indicated name already exists...
+ * - If it's large enough, use it.
+ * - Otherwise, recycle it and keep going.
+ * 2.) Search for a free partition that is large enough.
+ * 3.) If there's not a free partition large enough, recycle any obsolete
+ * OS partitions and try again.
+ * 4.) Will first try getting a chunk that will satisfy the requested size.
+ * 5.) If a chunk of the requested size cannot be allocated, then try finding
+ * a chunk that will satisfy the minum needed.
+ *
+ * Returns 0 on success, else -1.
+ */
+int __init nvram_init_os_partition(struct nvram_os_partition *part)
+{
+ loff_t p;
+ int size;
+
+ /* Look for ours */
+ p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size);
+
+ /* Found one but too small, remove it */
+ if (p && size < part->min_size) {
+ pr_info("nvram: Found too small %s partition,"
+ " removing it...\n", part->name);
+ nvram_remove_partition(part->name, NVRAM_SIG_OS, NULL);
+ p = 0;
+ }
+
+ /* Create one if we didn't find */
+ if (!p) {
+ p = nvram_create_partition(part->name, NVRAM_SIG_OS,
+ part->req_size, part->min_size);
+ if (p == -ENOSPC) {
+ pr_info("nvram: No room to create %s partition, "
+ "deleting any obsolete OS partitions...\n",
+ part->name);
+ nvram_remove_partition(NULL, NVRAM_SIG_OS,
+ nvram_os_partitions);
+ p = nvram_create_partition(part->name, NVRAM_SIG_OS,
+ part->req_size, part->min_size);
+ }
+ }
+
+ if (p <= 0) {
+ pr_err("nvram: Failed to find or create %s"
+ " partition, err %d\n", part->name, (int)p);
+ return -1;
+ }
+
+ part->index = p;
+ part->size = nvram_get_partition_size(p) - sizeof(struct err_log_info);
+
+ return 0;
+}
+
+/* Derived from logfs_compress() */
+static int nvram_compress(const void *in, void *out, size_t inlen,
+ size_t outlen)
+{
+ int err, ret;
+
+ ret = -EIO;
+ err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
+ MEM_LEVEL, Z_DEFAULT_STRATEGY);
+ if (err != Z_OK)
+ goto error;
+
+ stream.next_in = in;
+ stream.avail_in = inlen;
+ stream.total_in = 0;
+ stream.next_out = out;
+ stream.avail_out = outlen;
+ stream.total_out = 0;
+
+ err = zlib_deflate(&stream, Z_FINISH);
+ if (err != Z_STREAM_END)
+ goto error;
+
+ err = zlib_deflateEnd(&stream);
+ if (err != Z_OK)
+ goto error;
+
+ if (stream.total_out >= stream.total_in)
+ goto error;
+
+ ret = stream.total_out;
+error:
+ return ret;
+}
+
+/* Compress the text from big_oops_buf into oops_buf. */
+static int zip_oops(size_t text_len)
+{
+ struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
+ int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len,
+ oops_data_sz);
+ if (zipped_len < 0) {
+ pr_err("nvram: compression failed; returned %d\n", zipped_len);
+ pr_err("nvram: logging uncompressed oops/panic report\n");
+ return -1;
+ }
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+ oops_hdr->report_length = cpu_to_be16(zipped_len);
+ oops_hdr->timestamp = cpu_to_be64(ktime_get_real_seconds());
+ return 0;
+}
+
+#ifdef CONFIG_PSTORE
+static int nvram_pstore_open(struct pstore_info *psi)
+{
+ /* Reset the iterator to start reading partitions again */
+ read_type = -1;
+ return 0;
+}
+
+/**
+ * nvram_pstore_write - pstore write callback for nvram
+ * @type: Type of message logged
+ * @reason: reason behind dump (oops/panic)
+ * @id: identifier to indicate the write performed
+ * @part: pstore writes data to registered buffer in parts,
+ * part number will indicate the same.
+ * @count: Indicates oops count
+ * @compressed: Flag to indicate the log is compressed
+ * @size: number of bytes written to the registered buffer
+ * @psi: registered pstore_info structure
+ *
+ * Called by pstore_dump() when an oops or panic report is logged in the
+ * printk buffer.
+ * Returns 0 on successful write.
+ */
+static int nvram_pstore_write(enum pstore_type_id type,
+ enum kmsg_dump_reason reason,
+ u64 *id, unsigned int part, int count,
+ bool compressed, size_t size,
+ struct pstore_info *psi)
+{
+ int rc;
+ unsigned int err_type = ERR_TYPE_KERNEL_PANIC;
+ struct oops_log_info *oops_hdr = (struct oops_log_info *) oops_buf;
+
+ /* part 1 has the recent messages from printk buffer */
+ if (part > 1 || (type != PSTORE_TYPE_DMESG))
+ return -1;
+
+ if (clobbering_unread_rtas_event())
+ return -1;
+
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+ oops_hdr->report_length = cpu_to_be16(size);
+ oops_hdr->timestamp = cpu_to_be64(ktime_get_real_seconds());
+
+ if (compressed)
+ err_type = ERR_TYPE_KERNEL_PANIC_GZ;
+
+ rc = nvram_write_os_partition(&oops_log_partition, oops_buf,
+ (int) (sizeof(*oops_hdr) + size), err_type, count);
+
+ if (rc != 0)
+ return rc;
+
+ *id = part;
+ return 0;
+}
+
+/*
+ * Reads the oops/panic report, rtas, of-config and common partition.
+ * Returns the length of the data we read from each partition.
+ * Returns 0 if we've been called before.
+ */
+static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
+ int *count, struct timespec *time, char **buf,
+ bool *compressed, struct pstore_info *psi)
+{
+ struct oops_log_info *oops_hdr;
+ unsigned int err_type, id_no, size = 0;
+ struct nvram_os_partition *part = NULL;
+ char *buff = NULL;
+ int sig = 0;
+ loff_t p;
+
+ read_type++;
+
+ switch (nvram_type_ids[read_type]) {
+ case PSTORE_TYPE_DMESG:
+ part = &oops_log_partition;
+ *type = PSTORE_TYPE_DMESG;
+ break;
+ case PSTORE_TYPE_PPC_COMMON:
+ sig = NVRAM_SIG_SYS;
+ part = &common_partition;
+ *type = PSTORE_TYPE_PPC_COMMON;
+ *id = PSTORE_TYPE_PPC_COMMON;
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ break;
+#ifdef CONFIG_PPC_PSERIES
+ case PSTORE_TYPE_PPC_RTAS:
+ part = &rtas_log_partition;
+ *type = PSTORE_TYPE_PPC_RTAS;
+ time->tv_sec = last_rtas_event;
+ time->tv_nsec = 0;
+ break;
+ case PSTORE_TYPE_PPC_OF:
+ sig = NVRAM_SIG_OF;
+ part = &of_config_partition;
+ *type = PSTORE_TYPE_PPC_OF;
+ *id = PSTORE_TYPE_PPC_OF;
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ break;
+#endif
+#ifdef CONFIG_PPC_POWERNV
+ case PSTORE_TYPE_PPC_OPAL:
+ sig = NVRAM_SIG_FW;
+ part = &skiboot_partition;
+ *type = PSTORE_TYPE_PPC_OPAL;
+ *id = PSTORE_TYPE_PPC_OPAL;
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ break;
+#endif
+ default:
+ return 0;
+ }
+
+ if (!part->os_partition) {
+ p = nvram_find_partition(part->name, sig, &size);
+ if (p <= 0) {
+ pr_err("nvram: Failed to find partition %s, "
+ "err %d\n", part->name, (int)p);
+ return 0;
+ }
+ part->index = p;
+ part->size = size;
+ }
+
+ buff = kmalloc(part->size, GFP_KERNEL);
+
+ if (!buff)
+ return -ENOMEM;
+
+ if (nvram_read_partition(part, buff, part->size, &err_type, &id_no)) {
+ kfree(buff);
+ return 0;
+ }
+
+ *count = 0;
+
+ if (part->os_partition)
+ *id = id_no;
+
+ if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
+ size_t length, hdr_size;
+
+ oops_hdr = (struct oops_log_info *)buff;
+ if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
+ /* Old format oops header had 2-byte record size */
+ hdr_size = sizeof(u16);
+ length = be16_to_cpu(oops_hdr->version);
+ time->tv_sec = 0;
+ time->tv_nsec = 0;
+ } else {
+ hdr_size = sizeof(*oops_hdr);
+ length = be16_to_cpu(oops_hdr->report_length);
+ time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
+ time->tv_nsec = 0;
+ }
+ *buf = kmalloc(length, GFP_KERNEL);
+ if (*buf == NULL)
+ return -ENOMEM;
+ memcpy(*buf, buff + hdr_size, length);
+ kfree(buff);
+
+ if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
+ *compressed = true;
+ else
+ *compressed = false;
+ return length;
+ }
+
+ *buf = buff;
+ return part->size;
+}
+
+static struct pstore_info nvram_pstore_info = {
+ .owner = THIS_MODULE,
+ .name = "nvram",
+ .open = nvram_pstore_open,
+ .read = nvram_pstore_read,
+ .write = nvram_pstore_write,
+};
+
+static int nvram_pstore_init(void)
+{
+ int rc = 0;
+
+ if (machine_is(pseries)) {
+ nvram_type_ids[2] = PSTORE_TYPE_PPC_RTAS;
+ nvram_type_ids[3] = PSTORE_TYPE_PPC_OF;
+ } else
+ nvram_type_ids[2] = PSTORE_TYPE_PPC_OPAL;
+
+ nvram_pstore_info.buf = oops_data;
+ nvram_pstore_info.bufsize = oops_data_sz;
+
+ spin_lock_init(&nvram_pstore_info.buf_lock);
+
+ rc = pstore_register(&nvram_pstore_info);
+ if (rc != 0)
+ pr_err("nvram: pstore_register() failed, defaults to "
+ "kmsg_dump; returned %d\n", rc);
+
+ return rc;
+}
+#else
+static int nvram_pstore_init(void)
+{
+ return -1;
+}
+#endif
+
+void __init nvram_init_oops_partition(int rtas_partition_exists)
+{
+ int rc;
+
+ rc = nvram_init_os_partition(&oops_log_partition);
+ if (rc != 0) {
+#ifdef CONFIG_PPC_PSERIES
+ if (!rtas_partition_exists) {
+ pr_err("nvram: Failed to initialize oops partition!");
+ return;
+ }
+ pr_notice("nvram: Using %s partition to log both"
+ " RTAS errors and oops/panic reports\n",
+ rtas_log_partition.name);
+ memcpy(&oops_log_partition, &rtas_log_partition,
+ sizeof(rtas_log_partition));
+#else
+ pr_err("nvram: Failed to initialize oops partition!");
+ return;
+#endif
+ }
+ oops_buf = kmalloc(oops_log_partition.size, GFP_KERNEL);
+ if (!oops_buf) {
+ pr_err("nvram: No memory for %s partition\n",
+ oops_log_partition.name);
+ return;
+ }
+ oops_data = oops_buf + sizeof(struct oops_log_info);
+ oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info);
+
+ rc = nvram_pstore_init();
+
+ if (!rc)
+ return;
+
+ /*
+ * Figure compression (preceded by elimination of each line's <n>
+ * severity prefix) will reduce the oops/panic report to at most
+ * 45% of its original size.
+ */
+ big_oops_buf_sz = (oops_data_sz * 100) / 45;
+ big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
+ if (big_oops_buf) {
+ stream.workspace = kmalloc(zlib_deflate_workspacesize(
+ WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
+ if (!stream.workspace) {
+ pr_err("nvram: No memory for compression workspace; "
+ "skipping compression of %s partition data\n",
+ oops_log_partition.name);
+ kfree(big_oops_buf);
+ big_oops_buf = NULL;
+ }
+ } else {
+ pr_err("No memory for uncompressed %s data; "
+ "skipping compression\n", oops_log_partition.name);
+ stream.workspace = NULL;
+ }
+
+ rc = kmsg_dump_register(&nvram_kmsg_dumper);
+ if (rc != 0) {
+ pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc);
+ kfree(oops_buf);
+ kfree(big_oops_buf);
+ kfree(stream.workspace);
+ }
+}
+
+/*
+ * This is our kmsg_dump callback, called after an oops or panic report
+ * has been written to the printk buffer. We want to capture as much
+ * of the printk buffer as possible. First, capture as much as we can
+ * that we think will compress sufficiently to fit in the lnx,oops-log
+ * partition. If that's too much, go back and capture uncompressed text.
+ */
+static void oops_to_nvram(struct kmsg_dumper *dumper,
+ enum kmsg_dump_reason reason)
+{
+ struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
+ static unsigned int oops_count = 0;
+ static bool panicking = false;
+ static DEFINE_SPINLOCK(lock);
+ unsigned long flags;
+ size_t text_len;
+ unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ;
+ int rc = -1;
+
+ switch (reason) {
+ case KMSG_DUMP_RESTART:
+ case KMSG_DUMP_HALT:
+ case KMSG_DUMP_POWEROFF:
+ /* These are almost always orderly shutdowns. */
+ return;
+ case KMSG_DUMP_OOPS:
+ break;
+ case KMSG_DUMP_PANIC:
+ panicking = true;
+ break;
+ case KMSG_DUMP_EMERG:
+ if (panicking)
+ /* Panic report already captured. */
+ return;
+ break;
+ default:
+ pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
+ __func__, (int) reason);
+ return;
+ }
+
+ if (clobbering_unread_rtas_event())
+ return;
+
+ if (!spin_trylock_irqsave(&lock, flags))
+ return;
+
+ if (big_oops_buf) {
+ kmsg_dump_get_buffer(dumper, false,
+ big_oops_buf, big_oops_buf_sz, &text_len);
+ rc = zip_oops(text_len);
+ }
+ if (rc != 0) {
+ kmsg_dump_rewind(dumper);
+ kmsg_dump_get_buffer(dumper, false,
+ oops_data, oops_data_sz, &text_len);
+ err_type = ERR_TYPE_KERNEL_PANIC;
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+ oops_hdr->report_length = cpu_to_be16(text_len);
+ oops_hdr->timestamp = cpu_to_be64(ktime_get_real_seconds());
+ }
+
+ (void) nvram_write_os_partition(&oops_log_partition, oops_buf,
+ (int) (sizeof(*oops_hdr) + text_len), err_type,
+ ++oops_count);
+
+ spin_unlock_irqrestore(&lock, flags);
+}
+
static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin)
{
int size;
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index 2f35a72642c6..b60a67d92ebd 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -72,7 +72,7 @@ static int of_pci_phb_probe(struct platform_device *dev)
/* Register devices with EEH */
if (dev->dev.of_node->child)
- eeh_add_device_tree_early(dev->dev.of_node);
+ eeh_add_device_tree_early(PCI_DN(dev->dev.of_node));
/* Scan the bus */
pcibios_scan_phb(phb);
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index 2a525c938158..0d054068a21d 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -76,7 +76,7 @@ struct pci_controller *pcibios_alloc_controller(struct device_node *dev)
list_add_tail(&phb->list_node, &hose_list);
spin_unlock(&hose_spinlock);
phb->dn = dev;
- phb->is_dynamic = mem_init_done;
+ phb->is_dynamic = slab_is_available();
#ifdef CONFIG_PPC64
if (dev) {
int nid = of_node_to_nid(dev);
@@ -109,8 +109,10 @@ void pcibios_free_controller(struct pci_controller *phb)
resource_size_t pcibios_window_alignment(struct pci_bus *bus,
unsigned long type)
{
- if (ppc_md.pcibios_window_alignment)
- return ppc_md.pcibios_window_alignment(bus, type);
+ struct pci_controller *phb = pci_bus_to_host(bus);
+
+ if (phb->controller_ops.window_alignment)
+ return phb->controller_ops.window_alignment(bus, type);
/*
* PCI core will figure out the default
@@ -122,14 +124,26 @@ resource_size_t pcibios_window_alignment(struct pci_bus *bus,
void pcibios_reset_secondary_bus(struct pci_dev *dev)
{
- if (ppc_md.pcibios_reset_secondary_bus) {
- ppc_md.pcibios_reset_secondary_bus(dev);
+ struct pci_controller *phb = pci_bus_to_host(dev->bus);
+
+ if (phb->controller_ops.reset_secondary_bus) {
+ phb->controller_ops.reset_secondary_bus(dev);
return;
}
pci_reset_secondary_bus(dev);
}
+#ifdef CONFIG_PCI_IOV
+resource_size_t pcibios_iov_resource_alignment(struct pci_dev *pdev, int resno)
+{
+ if (ppc_md.pcibios_iov_resource_alignment)
+ return ppc_md.pcibios_iov_resource_alignment(pdev, resno);
+
+ return pci_iov_resource_size(pdev, resno);
+}
+#endif /* CONFIG_PCI_IOV */
+
static resource_size_t pcibios_io_size(const struct pci_controller *hose)
{
#ifdef CONFIG_PPC64
@@ -788,6 +802,10 @@ static void pcibios_fixup_resources(struct pci_dev *dev)
pci_name(dev));
return;
}
+
+ if (dev->is_virtfn)
+ return;
+
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
struct resource *res = dev->resource + i;
struct pci_bus_region reg;
@@ -942,6 +960,8 @@ static void pcibios_fixup_bridge(struct pci_bus *bus)
void pcibios_setup_bus_self(struct pci_bus *bus)
{
+ struct pci_controller *phb;
+
/* Fix up the bus resources for P2P bridges */
if (bus->self != NULL)
pcibios_fixup_bridge(bus);
@@ -953,12 +973,14 @@ void pcibios_setup_bus_self(struct pci_bus *bus)
ppc_md.pcibios_fixup_bus(bus);
/* Setup bus DMA mappings */
- if (ppc_md.pci_dma_bus_setup)
- ppc_md.pci_dma_bus_setup(bus);
+ phb = pci_bus_to_host(bus);
+ if (phb->controller_ops.dma_bus_setup)
+ phb->controller_ops.dma_bus_setup(bus);
}
static void pcibios_setup_device(struct pci_dev *dev)
{
+ struct pci_controller *phb;
/* Fixup NUMA node as it may not be setup yet by the generic
* code and is needed by the DMA init
*/
@@ -969,8 +991,9 @@ static void pcibios_setup_device(struct pci_dev *dev)
set_dma_offset(&dev->dev, PCI_DRAM_OFFSET);
/* Additional platform DMA/iommu setup */
- if (ppc_md.pci_dma_dev_setup)
- ppc_md.pci_dma_dev_setup(dev);
+ phb = pci_bus_to_host(dev->bus);
+ if (phb->controller_ops.dma_dev_setup)
+ phb->controller_ops.dma_dev_setup(dev);
/* Read default IRQs and fixup if necessary */
pci_read_irq_line(dev);
@@ -986,6 +1009,12 @@ int pcibios_add_device(struct pci_dev *dev)
*/
if (dev->bus->is_added)
pcibios_setup_device(dev);
+
+#ifdef CONFIG_PCI_IOV
+ if (ppc_md.pcibios_fixup_sriov)
+ ppc_md.pcibios_fixup_sriov(dev);
+#endif /* CONFIG_PCI_IOV */
+
return 0;
}
@@ -1450,8 +1479,10 @@ EXPORT_SYMBOL_GPL(pcibios_finish_adding_to_bus);
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
- if (ppc_md.pcibios_enable_device_hook)
- if (ppc_md.pcibios_enable_device_hook(dev))
+ struct pci_controller *phb = pci_bus_to_host(dev->bus);
+
+ if (phb->controller_ops.enable_device_hook)
+ if (!phb->controller_ops.enable_device_hook(dev))
return -EINVAL;
return pci_enable_resources(dev, mask);
@@ -1624,8 +1655,8 @@ void pcibios_scan_phb(struct pci_controller *hose)
/* Get probe mode and perform scan */
mode = PCI_PROBE_NORMAL;
- if (node && ppc_md.pci_probe_mode)
- mode = ppc_md.pci_probe_mode(bus);
+ if (node && hose->controller_ops.probe_mode)
+ mode = hose->controller_ops.probe_mode(bus);
pr_debug(" probe mode: %d\n", mode);
if (mode == PCI_PROBE_DEVTREE)
of_scan_bus(node, bus);
diff --git a/arch/powerpc/kernel/pci-hotplug.c b/arch/powerpc/kernel/pci-hotplug.c
index 5b789177aa29..7ed85a69a9c2 100644
--- a/arch/powerpc/kernel/pci-hotplug.c
+++ b/arch/powerpc/kernel/pci-hotplug.c
@@ -73,13 +73,16 @@ void pcibios_add_pci_devices(struct pci_bus * bus)
{
int slotno, mode, pass, max;
struct pci_dev *dev;
+ struct pci_controller *phb;
struct device_node *dn = pci_bus_to_OF_node(bus);
- eeh_add_device_tree_early(dn);
+ eeh_add_device_tree_early(PCI_DN(dn));
+
+ phb = pci_bus_to_host(bus);
mode = PCI_PROBE_NORMAL;
- if (ppc_md.pci_probe_mode)
- mode = ppc_md.pci_probe_mode(bus);
+ if (phb->controller_ops.probe_mode)
+ mode = phb->controller_ops.probe_mode(bus);
if (mode == PCI_PROBE_DEVTREE) {
/* use ofdt-based probe */
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c
index 83df3075d3df..b3b4df91b792 100644
--- a/arch/powerpc/kernel/pci_dn.c
+++ b/arch/powerpc/kernel/pci_dn.c
@@ -32,12 +32,237 @@
#include <asm/ppc-pci.h>
#include <asm/firmware.h>
+/*
+ * The function is used to find the firmware data of one
+ * specific PCI device, which is attached to the indicated
+ * PCI bus. For VFs, their firmware data is linked to that
+ * one of PF's bridge. For other devices, their firmware
+ * data is linked to that of their bridge.
+ */
+static struct pci_dn *pci_bus_to_pdn(struct pci_bus *bus)
+{
+ struct pci_bus *pbus;
+ struct device_node *dn;
+ struct pci_dn *pdn;
+
+ /*
+ * We probably have virtual bus which doesn't
+ * have associated bridge.
+ */
+ pbus = bus;
+ while (pbus) {
+ if (pci_is_root_bus(pbus) || pbus->self)
+ break;
+
+ pbus = pbus->parent;
+ }
+
+ /*
+ * Except virtual bus, all PCI buses should
+ * have device nodes.
+ */
+ dn = pci_bus_to_OF_node(pbus);
+ pdn = dn ? PCI_DN(dn) : NULL;
+
+ return pdn;
+}
+
+struct pci_dn *pci_get_pdn_by_devfn(struct pci_bus *bus,
+ int devfn)
+{
+ struct device_node *dn = NULL;
+ struct pci_dn *parent, *pdn;
+ struct pci_dev *pdev = NULL;
+
+ /* Fast path: fetch from PCI device */
+ list_for_each_entry(pdev, &bus->devices, bus_list) {
+ if (pdev->devfn == devfn) {
+ if (pdev->dev.archdata.pci_data)
+ return pdev->dev.archdata.pci_data;
+
+ dn = pci_device_to_OF_node(pdev);
+ break;
+ }
+ }
+
+ /* Fast path: fetch from device node */
+ pdn = dn ? PCI_DN(dn) : NULL;
+ if (pdn)
+ return pdn;
+
+ /* Slow path: fetch from firmware data hierarchy */
+ parent = pci_bus_to_pdn(bus);
+ if (!parent)
+ return NULL;
+
+ list_for_each_entry(pdn, &parent->child_list, list) {
+ if (pdn->busno == bus->number &&
+ pdn->devfn == devfn)
+ return pdn;
+ }
+
+ return NULL;
+}
+
struct pci_dn *pci_get_pdn(struct pci_dev *pdev)
{
- struct device_node *dn = pci_device_to_OF_node(pdev);
- if (!dn)
+ struct device_node *dn;
+ struct pci_dn *parent, *pdn;
+
+ /* Search device directly */
+ if (pdev->dev.archdata.pci_data)
+ return pdev->dev.archdata.pci_data;
+
+ /* Check device node */
+ dn = pci_device_to_OF_node(pdev);
+ pdn = dn ? PCI_DN(dn) : NULL;
+ if (pdn)
+ return pdn;
+
+ /*
+ * VFs don't have device nodes. We hook their
+ * firmware data to PF's bridge.
+ */
+ parent = pci_bus_to_pdn(pdev->bus);
+ if (!parent)
+ return NULL;
+
+ list_for_each_entry(pdn, &parent->child_list, list) {
+ if (pdn->busno == pdev->bus->number &&
+ pdn->devfn == pdev->devfn)
+ return pdn;
+ }
+
+ return NULL;
+}
+
+#ifdef CONFIG_PCI_IOV
+static struct pci_dn *add_one_dev_pci_data(struct pci_dn *parent,
+ struct pci_dev *pdev,
+ int busno, int devfn)
+{
+ struct pci_dn *pdn;
+
+ /* Except PHB, we always have the parent */
+ if (!parent)
+ return NULL;
+
+ pdn = kzalloc(sizeof(*pdn), GFP_KERNEL);
+ if (!pdn) {
+ dev_warn(&pdev->dev, "%s: Out of memory!\n", __func__);
return NULL;
- return PCI_DN(dn);
+ }
+
+ pdn->phb = parent->phb;
+ pdn->parent = parent;
+ pdn->busno = busno;
+ pdn->devfn = devfn;
+#ifdef CONFIG_PPC_POWERNV
+ pdn->pe_number = IODA_INVALID_PE;
+#endif
+ INIT_LIST_HEAD(&pdn->child_list);
+ INIT_LIST_HEAD(&pdn->list);
+ list_add_tail(&pdn->list, &parent->child_list);
+
+ /*
+ * If we already have PCI device instance, lets
+ * bind them.
+ */
+ if (pdev)
+ pdev->dev.archdata.pci_data = pdn;
+
+ return pdn;
+}
+#endif
+
+struct pci_dn *add_dev_pci_data(struct pci_dev *pdev)
+{
+#ifdef CONFIG_PCI_IOV
+ struct pci_dn *parent, *pdn;
+ int i;
+
+ /* Only support IOV for now */
+ if (!pdev->is_physfn)
+ return pci_get_pdn(pdev);
+
+ /* Check if VFs have been populated */
+ pdn = pci_get_pdn(pdev);
+ if (!pdn || (pdn->flags & PCI_DN_FLAG_IOV_VF))
+ return NULL;
+
+ pdn->flags |= PCI_DN_FLAG_IOV_VF;
+ parent = pci_bus_to_pdn(pdev->bus);
+ if (!parent)
+ return NULL;
+
+ for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
+ pdn = add_one_dev_pci_data(parent, NULL,
+ pci_iov_virtfn_bus(pdev, i),
+ pci_iov_virtfn_devfn(pdev, i));
+ if (!pdn) {
+ dev_warn(&pdev->dev, "%s: Cannot create firmware data for VF#%d\n",
+ __func__, i);
+ return NULL;
+ }
+ }
+#endif /* CONFIG_PCI_IOV */
+
+ return pci_get_pdn(pdev);
+}
+
+void remove_dev_pci_data(struct pci_dev *pdev)
+{
+#ifdef CONFIG_PCI_IOV
+ struct pci_dn *parent;
+ struct pci_dn *pdn, *tmp;
+ int i;
+
+ /*
+ * VF and VF PE are created/released dynamically, so we need to
+ * bind/unbind them. Otherwise the VF and VF PE would be mismatched
+ * when re-enabling SR-IOV.
+ */
+ if (pdev->is_virtfn) {
+ pdn = pci_get_pdn(pdev);
+#ifdef CONFIG_PPC_POWERNV
+ pdn->pe_number = IODA_INVALID_PE;
+#endif
+ return;
+ }
+
+ /* Only support IOV PF for now */
+ if (!pdev->is_physfn)
+ return;
+
+ /* Check if VFs have been populated */
+ pdn = pci_get_pdn(pdev);
+ if (!pdn || !(pdn->flags & PCI_DN_FLAG_IOV_VF))
+ return;
+
+ pdn->flags &= ~PCI_DN_FLAG_IOV_VF;
+ parent = pci_bus_to_pdn(pdev->bus);
+ if (!parent)
+ return;
+
+ /*
+ * We might introduce flag to pci_dn in future
+ * so that we can release VF's firmware data in
+ * a batch mode.
+ */
+ for (i = 0; i < pci_sriov_get_totalvfs(pdev); i++) {
+ list_for_each_entry_safe(pdn, tmp,
+ &parent->child_list, list) {
+ if (pdn->busno != pci_iov_virtfn_bus(pdev, i) ||
+ pdn->devfn != pci_iov_virtfn_devfn(pdev, i))
+ continue;
+
+ if (!list_empty(&pdn->list))
+ list_del(&pdn->list);
+
+ kfree(pdn);
+ }
+ }
+#endif /* CONFIG_PCI_IOV */
}
/*
@@ -49,6 +274,7 @@ void *update_dn_pci_info(struct device_node *dn, void *data)
struct pci_controller *phb = data;
const __be32 *type = of_get_property(dn, "ibm,pci-config-space-type", NULL);
const __be32 *regs;
+ struct device_node *parent;
struct pci_dn *pdn;
pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL);
@@ -69,7 +295,25 @@ void *update_dn_pci_info(struct device_node *dn, void *data)
pdn->devfn = (addr >> 8) & 0xff;
}
+ /* vendor/device IDs and class code */
+ regs = of_get_property(dn, "vendor-id", NULL);
+ pdn->vendor_id = regs ? of_read_number(regs, 1) : 0;
+ regs = of_get_property(dn, "device-id", NULL);
+ pdn->device_id = regs ? of_read_number(regs, 1) : 0;
+ regs = of_get_property(dn, "class-code", NULL);
+ pdn->class_code = regs ? of_read_number(regs, 1) : 0;
+
+ /* Extended config space */
pdn->pci_ext_config_space = (type && of_read_number(type, 1) == 1);
+
+ /* Attach to parent node */
+ INIT_LIST_HEAD(&pdn->child_list);
+ INIT_LIST_HEAD(&pdn->list);
+ parent = of_get_parent(dn);
+ pdn->parent = parent ? PCI_DN(parent) : NULL;
+ if (pdn->parent)
+ list_add_tail(&pdn->list, &pdn->parent->child_list);
+
return NULL;
}
@@ -131,6 +375,46 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre,
return NULL;
}
+static struct pci_dn *pci_dn_next_one(struct pci_dn *root,
+ struct pci_dn *pdn)
+{
+ struct list_head *next = pdn->child_list.next;
+
+ if (next != &pdn->child_list)
+ return list_entry(next, struct pci_dn, list);
+
+ while (1) {
+ if (pdn == root)
+ return NULL;
+
+ next = pdn->list.next;
+ if (next != &pdn->parent->child_list)
+ break;
+
+ pdn = pdn->parent;
+ }
+
+ return list_entry(next, struct pci_dn, list);
+}
+
+void *traverse_pci_dn(struct pci_dn *root,
+ void *(*fn)(struct pci_dn *, void *),
+ void *data)
+{
+ struct pci_dn *pdn = root;
+ void *ret;
+
+ /* Only scan the child nodes */
+ for (pdn = pci_dn_next_one(root, pdn); pdn;
+ pdn = pci_dn_next_one(root, pdn)) {
+ ret = fn(pdn, data);
+ if (ret)
+ return ret;
+ }
+
+ return NULL;
+}
+
/**
* pci_devs_phb_init_dynamic - setup pci devices under this PHB
* phb: pci-to-host bridge (top-level bridge connecting to cpu)
@@ -147,8 +431,12 @@ void pci_devs_phb_init_dynamic(struct pci_controller *phb)
/* PHB nodes themselves must not match */
update_dn_pci_info(dn, phb);
pdn = dn->data;
- if (pdn)
+ if (pdn) {
pdn->devfn = pdn->busno = -1;
+ pdn->vendor_id = pdn->device_id = pdn->class_code = 0;
+ pdn->phb = phb;
+ phb->pci_data = pdn;
+ }
/* Update dn->phb ptrs for new phb and children devices */
traverse_pci_devices(dn, update_dn_pci_info, phb);
@@ -171,3 +459,16 @@ void __init pci_devs_phb_init(void)
list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
pci_devs_phb_init_dynamic(phb);
}
+
+static void pci_dev_pdn_setup(struct pci_dev *pdev)
+{
+ struct pci_dn *pdn;
+
+ if (pdev->dev.archdata.pci_data)
+ return;
+
+ /* Setup the fast path */
+ pdn = pci_get_pdn(pdev);
+ pdev->dev.archdata.pci_data = pdn;
+}
+DECLARE_PCI_FIXUP_EARLY(PCI_ANY_ID, PCI_ANY_ID, pci_dev_pdn_setup);
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index e6245e9c7d8d..42e02a2d570b 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -207,6 +207,7 @@ void of_scan_pci_bridge(struct pci_dev *dev)
{
struct device_node *node = dev->dev.of_node;
struct pci_bus *bus;
+ struct pci_controller *phb;
const __be32 *busrange, *ranges;
int len, i, mode;
struct pci_bus_region region;
@@ -286,9 +287,11 @@ void of_scan_pci_bridge(struct pci_dev *dev)
bus->number);
pr_debug(" bus name: %s\n", bus->name);
+ phb = pci_bus_to_host(bus);
+
mode = PCI_PROBE_NORMAL;
- if (ppc_md.pci_probe_mode)
- mode = ppc_md.pci_probe_mode(bus);
+ if (phb->controller_ops.probe_mode)
+ mode = phb->controller_ops.probe_mode(bus);
pr_debug(" probe mode: %d\n", mode);
if (mode == PCI_PROBE_DEVTREE)
@@ -305,7 +308,7 @@ static struct pci_dev *of_scan_pci_dev(struct pci_bus *bus,
const __be32 *reg;
int reglen, devfn;
#ifdef CONFIG_EEH
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ struct eeh_dev *edev = pdn_to_eeh_dev(PCI_DN(dn));
#endif
pr_debug(" * %s\n", dn->full_name);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index b4cc7bef6b16..febb50dd5328 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1114,8 +1114,11 @@ static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
*/
extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */
+/*
+ * Copy architecture-specific thread state
+ */
int copy_thread(unsigned long clone_flags, unsigned long usp,
- unsigned long arg, struct task_struct *p)
+ unsigned long kthread_arg, struct task_struct *p)
{
struct pt_regs *childregs, *kregs;
extern void ret_from_fork(void);
@@ -1127,6 +1130,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
sp -= sizeof(struct pt_regs);
childregs = (struct pt_regs *) sp;
if (unlikely(p->flags & PF_KTHREAD)) {
+ /* kernel thread */
struct thread_info *ti = (void *)task_stack_page(p);
memset(childregs, 0, sizeof(struct pt_regs));
childregs->gpr[1] = sp + sizeof(struct pt_regs);
@@ -1137,11 +1141,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
clear_tsk_thread_flag(p, TIF_32BIT);
childregs->softe = 1;
#endif
- childregs->gpr[15] = arg;
+ childregs->gpr[15] = kthread_arg;
p->thread.regs = NULL; /* no user register state */
ti->flags |= _TIF_RESTOREALL;
f = ret_from_kernel_thread;
} else {
+ /* user thread */
struct pt_regs *regs = current_pt_regs();
CHECK_FULL_REGS(regs);
*childregs = *regs;
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 1a85d8f96739..fd1fe4c37599 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -2898,7 +2898,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
* Call OF "quiesce" method to shut down pending DMA's from
* devices etc...
*/
- prom_printf("Calling quiesce...\n");
+ prom_printf("Quiescing Open Firmware ...\n");
call_prom("quiesce", 0, 0);
/*
@@ -2910,7 +2910,7 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
/* Don't print anything after quiesce under OPAL, it crashes OFW */
if (of_platform != PLATFORM_OPAL) {
- prom_printf("returning from prom_init\n");
+ prom_printf("Booting Linux via __start() ...\n");
prom_debug("->dt_header_start=0x%x\n", hdr);
}
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 21c45a2d0706..7a488c108410 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -401,7 +401,7 @@ static char *__fetch_rtas_last_error(char *altbuf)
buf = altbuf;
} else {
buf = rtas_err_buf;
- if (mem_init_done)
+ if (slab_is_available())
buf = kmalloc(RTAS_ERROR_LOG_MAX, GFP_ATOMIC);
}
if (buf)
@@ -461,7 +461,7 @@ int rtas_call(int token, int nargs, int nret, int *outputs, ...)
if (buff_copy) {
log_error(buff_copy, ERR_TYPE_RTAS_LOG, 0);
- if (mem_init_done)
+ if (slab_is_available())
kfree(buff_copy);
}
return ret;
@@ -897,7 +897,7 @@ int rtas_offline_cpus_mask(cpumask_var_t cpus)
}
EXPORT_SYMBOL(rtas_offline_cpus_mask);
-int rtas_ibm_suspend_me(u64 handle, int *vasi_return)
+int rtas_ibm_suspend_me(u64 handle)
{
long state;
long rc;
@@ -919,13 +919,11 @@ int rtas_ibm_suspend_me(u64 handle, int *vasi_return)
printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned %ld\n",rc);
return rc;
} else if (state == H_VASI_ENABLED) {
- *vasi_return = RTAS_NOT_SUSPENDABLE;
- return 0;
+ return -EAGAIN;
} else if (state != H_VASI_SUSPENDING) {
printk(KERN_ERR "rtas_ibm_suspend_me: vasi_state returned state %ld\n",
state);
- *vasi_return = -1;
- return 0;
+ return -EIO;
}
if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY))
@@ -972,7 +970,7 @@ out:
return atomic_read(&data.error);
}
#else /* CONFIG_PPC_PSERIES */
-int rtas_ibm_suspend_me(u64 handle, int *vasi_return)
+int rtas_ibm_suspend_me(u64 handle)
{
return -ENOSYS;
}
@@ -1022,7 +1020,6 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
unsigned long flags;
char *buff_copy, *errbuf = NULL;
int nargs, nret, token;
- int rc;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
@@ -1054,15 +1051,18 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
if (token == ibm_suspend_me_token) {
/*
- * rtas_ibm_suspend_me assumes args are in cpu endian, or at least the
- * hcall within it requires it.
+ * rtas_ibm_suspend_me assumes the streamid handle is in cpu
+ * endian, or at least the hcall within it requires it.
*/
- int vasi_rc = 0;
+ int rc = 0;
u64 handle = ((u64)be32_to_cpu(args.args[0]) << 32)
| be32_to_cpu(args.args[1]);
- rc = rtas_ibm_suspend_me(handle, &vasi_rc);
- args.rets[0] = cpu_to_be32(vasi_rc);
- if (rc)
+ rc = rtas_ibm_suspend_me(handle);
+ if (rc == -EAGAIN)
+ args.rets[0] = cpu_to_be32(RTAS_NOT_SUSPENDABLE);
+ else if (rc == -EIO)
+ args.rets[0] = cpu_to_be32(-1);
+ else if (rc)
return rc;
goto copy_return;
}
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index ce230da2c015..73f1934582c2 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -113,7 +113,7 @@ static int rtas_pci_read_config(struct pci_bus *bus,
ret = rtas_read_config(pdn, where, size, val);
if (*val == EEH_IO_ERROR_VALUE(size) &&
- eeh_dev_check_failure(of_node_to_eeh_dev(dn)))
+ eeh_dev_check_failure(pdn_to_eeh_dev(pdn)))
return PCIBIOS_DEVICE_NOT_FOUND;
return ret;
@@ -277,50 +277,3 @@ int rtas_setup_phb(struct pci_controller *phb)
return 0;
}
-
-void __init find_and_init_phbs(void)
-{
- struct device_node *node;
- struct pci_controller *phb;
- struct device_node *root = of_find_node_by_path("/");
-
- for_each_child_of_node(root, node) {
- if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
- strcmp(node->type, "pciex") != 0))
- continue;
-
- phb = pcibios_alloc_controller(node);
- if (!phb)
- continue;
- rtas_setup_phb(phb);
- pci_process_bridge_OF_ranges(phb, node, 0);
- isa_bridge_find_early(phb);
- }
-
- of_node_put(root);
- pci_devs_phb_init();
-
- /*
- * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
- * in chosen.
- */
- if (of_chosen) {
- const int *prop;
-
- prop = of_get_property(of_chosen,
- "linux,pci-probe-only", NULL);
- if (prop) {
- if (*prop)
- pci_add_flags(PCI_PROBE_ONLY);
- else
- pci_clear_flags(PCI_PROBE_ONLY);
- }
-
-#ifdef CONFIG_PPC32 /* Will be made generic soon */
- prop = of_get_property(of_chosen,
- "linux,pci-assign-all-buses", NULL);
- if (prop && *prop)
- pci_add_flags(PCI_REASSIGN_ALL_BUS);
-#endif /* CONFIG_PPC32 */
- }
-}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 49f553bbb360..c69671c03c3b 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -37,6 +37,7 @@
#include <linux/memblock.h>
#include <linux/hugetlb.h>
#include <linux/memory.h>
+#include <linux/nmi.h>
#include <asm/io.h>
#include <asm/kdump.h>
@@ -779,3 +780,22 @@ unsigned long memory_block_size_bytes(void)
struct ppc_pci_io ppc_pci_io;
EXPORT_SYMBOL(ppc_pci_io);
#endif
+
+#ifdef CONFIG_HARDLOCKUP_DETECTOR
+u64 hw_nmi_get_sample_period(int watchdog_thresh)
+{
+ return ppc_proc_freq * watchdog_thresh;
+}
+
+/*
+ * The hardlockup detector breaks PMU event based branches and is likely
+ * to get false positives in KVM guests, so disable it by default.
+ */
+static int __init disable_hardlockup_detector(void)
+{
+ hardlockup_detector_disable();
+
+ return 0;
+}
+early_initcall(disable_hardlockup_detector);
+#endif
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c
index b2702e87db0d..5fa92706444b 100644
--- a/arch/powerpc/kernel/syscalls.c
+++ b/arch/powerpc/kernel/syscalls.c
@@ -121,3 +121,20 @@ long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low,
return sys_fadvise64(fd, (u64)offset_high << 32 | offset_low,
(u64)len_high << 32 | len_low, advice);
}
+
+long sys_switch_endian(void)
+{
+ struct thread_info *ti;
+
+ current->thread.regs->msr ^= MSR_LE;
+
+ /*
+ * Set TIF_RESTOREALL so that r3 isn't clobbered on return to
+ * userspace. That also has the effect of restoring the non-volatile
+ * GPRs, so we saved them on the way in here.
+ */
+ ti = current_thread_info();
+ ti->flags |= _TIF_RESTOREALL;
+
+ return 0;
+}
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S
index 7ab5d434e2ee..4d6b1d3a747f 100644
--- a/arch/powerpc/kernel/systbl.S
+++ b/arch/powerpc/kernel/systbl.S
@@ -22,6 +22,7 @@
#define PPC_SYS(func) .llong DOTSYM(ppc_##func),DOTSYM(ppc_##func)
#define OLDSYS(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(sys_ni_syscall)
#define SYS32ONLY(func) .llong DOTSYM(sys_ni_syscall),DOTSYM(compat_sys_##func)
+#define PPC64ONLY(func) .llong DOTSYM(ppc_##func),DOTSYM(sys_ni_syscall)
#define SYSX(f, f3264, f32) .llong DOTSYM(f),DOTSYM(f3264)
#else
#define SYSCALL(func) .long sys_##func
@@ -29,6 +30,7 @@
#define PPC_SYS(func) .long ppc_##func
#define OLDSYS(func) .long sys_##func
#define SYS32ONLY(func) .long sys_##func
+#define PPC64ONLY(func) .long sys_ni_syscall
#define SYSX(f, f3264, f32) .long f32
#endif
#define SYSCALL_SPU(func) SYSCALL(func)
diff --git a/arch/powerpc/kernel/systbl_chk.c b/arch/powerpc/kernel/systbl_chk.c
index 238aa63ced8f..2384129f5893 100644
--- a/arch/powerpc/kernel/systbl_chk.c
+++ b/arch/powerpc/kernel/systbl_chk.c
@@ -21,9 +21,11 @@
#ifdef CONFIG_PPC64
#define OLDSYS(func) -1
#define SYS32ONLY(func) -1
+#define PPC64ONLY(func) __NR_##func
#else
#define OLDSYS(func) __NR_old##func
#define SYS32ONLY(func) __NR_##func
+#define PPC64ONLY(func) -1
#endif
#define SYSX(f, f3264, f32) -1
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 2a324f4cb1b9..5754b226da7e 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -152,9 +152,9 @@ _GLOBAL(tm_reclaim)
addi r7, r3, THREAD_TRANSACT_VRSTATE
SAVE_32VRS(0, r6, r7) /* r6 scratch, r7 transact vr state */
- mfvscr vr0
+ mfvscr v0
li r6, VRSTATE_VSCR
- stvx vr0, r7, r6
+ stvx v0, r7, r6
dont_backup_vec:
mfspr r0, SPRN_VRSAVE
std r0, THREAD_TRANSACT_VRSAVE(r3)
@@ -359,8 +359,8 @@ _GLOBAL(__tm_recheckpoint)
addi r8, r3, THREAD_VRSTATE
li r5, VRSTATE_VSCR
- lvx vr0, r8, r5
- mtvscr vr0
+ lvx v0, r8, r5
+ mtvscr v0
REST_32VRS(0, r5, r8) /* r5 scratch, r8 ptr */
dont_restore_vec:
ld r5, THREAD_VRSAVE(r3)
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index b7aa07279a63..7cc38b5b58bc 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -46,8 +46,6 @@ void __init udbg_early_init(void)
#elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE)
/* Maple real mode debug */
udbg_init_maple_realmode();
-#elif defined(CONFIG_PPC_EARLY_DEBUG_BEAT)
- udbg_init_debug_beat();
#elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE)
udbg_init_pas_realmode();
#elif defined(CONFIG_PPC_EARLY_DEBUG_BOOTX)
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S
index 74f8050518d6..f5c80d567d8d 100644
--- a/arch/powerpc/kernel/vector.S
+++ b/arch/powerpc/kernel/vector.S
@@ -24,8 +24,8 @@ _GLOBAL(do_load_up_transact_altivec)
stw r4,THREAD_USED_VR(r3)
li r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR
- lvx vr0,r10,r3
- mtvscr vr0
+ lvx v0,r10,r3
+ mtvscr v0
addi r10,r3,THREAD_TRANSACT_VRSTATE
REST_32VRS(0,r4,r10)
@@ -52,8 +52,8 @@ _GLOBAL(vec_enable)
*/
_GLOBAL(load_vr_state)
li r4,VRSTATE_VSCR
- lvx vr0,r4,r3
- mtvscr vr0
+ lvx v0,r4,r3
+ mtvscr v0
REST_32VRS(0,r4,r3)
blr
@@ -63,9 +63,9 @@ _GLOBAL(load_vr_state)
*/
_GLOBAL(store_vr_state)
SAVE_32VRS(0, r4, r3)
- mfvscr vr0
+ mfvscr v0
li r4, VRSTATE_VSCR
- stvx vr0, r4, r3
+ stvx v0, r4, r3
blr
/*
@@ -104,9 +104,9 @@ _GLOBAL(load_up_altivec)
addi r4,r4,THREAD
addi r6,r4,THREAD_VRSTATE
SAVE_32VRS(0,r5,r6)
- mfvscr vr0
+ mfvscr v0
li r10,VRSTATE_VSCR
- stvx vr0,r10,r6
+ stvx v0,r10,r6
/* Disable VMX for last_task_used_altivec */
PPC_LL r5,PT_REGS(r4)
toreal(r5)
@@ -142,8 +142,8 @@ _GLOBAL(load_up_altivec)
li r4,1
li r10,VRSTATE_VSCR
stw r4,THREAD_USED_VR(r5)
- lvx vr0,r10,r6
- mtvscr vr0
+ lvx v0,r10,r6
+ mtvscr v0
REST_32VRS(0,r4,r6)
#ifndef CONFIG_SMP
/* Update last_task_used_altivec to 'current' */
@@ -186,9 +186,9 @@ _GLOBAL(giveup_altivec)
addi r7,r3,THREAD_VRSTATE
2: PPC_LCMPI 0,r5,0
SAVE_32VRS(0,r4,r7)
- mfvscr vr0
+ mfvscr v0
li r4,VRSTATE_VSCR
- stvx vr0,r4,r7
+ stvx v0,r4,r7
beq 1f
PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
#ifdef CONFIG_VSX
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 60081bd75847..93b5f5c9b445 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -84,7 +84,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
}
if (dsisr & DSISR_MC_TLB_MULTI) {
if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
- cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET_LPID);
+ cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_LPID);
dsisr &= ~DSISR_MC_TLB_MULTI;
}
/* Any other errors we don't understand? */
@@ -102,7 +102,7 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
break;
case SRR1_MC_IFETCH_TLBMULTI:
if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
- cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET_LPID);
+ cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_LPID);
break;
default:
handled = 0;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 24bfe401373e..91bbc845ac66 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -720,7 +720,7 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
return;
}
- if (vcpu->arch.mmio_is_bigendian) {
+ if (!vcpu->arch.mmio_host_swabbed) {
switch (run->mmio.len) {
case 8: gpr = *(u64 *)run->mmio.data; break;
case 4: gpr = *(u32 *)run->mmio.data; break;
@@ -728,10 +728,10 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
case 1: gpr = *(u8 *)run->mmio.data; break;
}
} else {
- /* Convert BE data from userland back to LE. */
switch (run->mmio.len) {
- case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
- case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
+ case 8: gpr = swab64(*(u64 *)run->mmio.data); break;
+ case 4: gpr = swab32(*(u32 *)run->mmio.data); break;
+ case 2: gpr = swab16(*(u16 *)run->mmio.data); break;
case 1: gpr = *(u8 *)run->mmio.data; break;
}
}
@@ -780,14 +780,13 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
int is_default_endian)
{
int idx, ret;
- int is_bigendian;
+ bool host_swabbed;
+ /* Pity C doesn't have a logical XOR operator */
if (kvmppc_need_byteswap(vcpu)) {
- /* Default endianness is "little endian". */
- is_bigendian = !is_default_endian;
+ host_swabbed = is_default_endian;
} else {
- /* Default endianness is "big endian". */
- is_bigendian = is_default_endian;
+ host_swabbed = !is_default_endian;
}
if (bytes > sizeof(run->mmio.data)) {
@@ -800,7 +799,7 @@ int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
run->mmio.is_write = 0;
vcpu->arch.io_gpr = rt;
- vcpu->arch.mmio_is_bigendian = is_bigendian;
+ vcpu->arch.mmio_host_swabbed = host_swabbed;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 0;
vcpu->arch.mmio_sign_extend = 0;
@@ -840,14 +839,13 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
{
void *data = run->mmio.data;
int idx, ret;
- int is_bigendian;
+ bool host_swabbed;
+ /* Pity C doesn't have a logical XOR operator */
if (kvmppc_need_byteswap(vcpu)) {
- /* Default endianness is "little endian". */
- is_bigendian = !is_default_endian;
+ host_swabbed = is_default_endian;
} else {
- /* Default endianness is "big endian". */
- is_bigendian = is_default_endian;
+ host_swabbed = !is_default_endian;
}
if (bytes > sizeof(run->mmio.data)) {
@@ -862,7 +860,7 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
vcpu->mmio_is_write = 1;
/* Store the value at the lowest bytes in 'data'. */
- if (is_bigendian) {
+ if (!host_swabbed) {
switch (bytes) {
case 8: *(u64 *)data = val; break;
case 4: *(u32 *)data = val; break;
@@ -870,11 +868,11 @@ int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
case 1: *(u8 *)data = val; break;
}
} else {
- /* Store LE value into 'data'. */
switch (bytes) {
- case 4: st_le32(data, val); break;
- case 2: st_le16(data, val); break;
- case 1: *(u8 *)data = val; break;
+ case 8: *(u64 *)data = swab64(val); break;
+ case 4: *(u32 *)data = swab32(val); break;
+ case 2: *(u16 *)data = swab16(val); break;
+ case 1: *(u8 *)data = val; break;
}
}
diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c
index 4a6c2cf890d9..60b0b3fc8fc1 100644
--- a/arch/powerpc/lib/alloc.c
+++ b/arch/powerpc/lib/alloc.c
@@ -10,7 +10,7 @@ void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
{
void *p;
- if (mem_init_done)
+ if (slab_is_available())
p = kzalloc(size, mask);
else {
p = memblock_virt_alloc(size, 0);
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S
index 55f19f9fd708..6813f80d1eec 100644
--- a/arch/powerpc/lib/copy_32.S
+++ b/arch/powerpc/lib/copy_32.S
@@ -69,54 +69,6 @@ CACHELINE_BYTES = L1_CACHE_BYTES
LG_CACHELINE_BYTES = L1_CACHE_SHIFT
CACHELINE_MASK = (L1_CACHE_BYTES-1)
-/*
- * Use dcbz on the complete cache lines in the destination
- * to set them to zero. This requires that the destination
- * area is cacheable. -- paulus
- */
-_GLOBAL(cacheable_memzero)
- mr r5,r4
- li r4,0
- addi r6,r3,-4
- cmplwi 0,r5,4
- blt 7f
- stwu r4,4(r6)
- beqlr
- andi. r0,r6,3
- add r5,r0,r5
- subf r6,r0,r6
- clrlwi r7,r6,32-LG_CACHELINE_BYTES
- add r8,r7,r5
- srwi r9,r8,LG_CACHELINE_BYTES
- addic. r9,r9,-1 /* total number of complete cachelines */
- ble 2f
- xori r0,r7,CACHELINE_MASK & ~3
- srwi. r0,r0,2
- beq 3f
- mtctr r0
-4: stwu r4,4(r6)
- bdnz 4b
-3: mtctr r9
- li r7,4
-10: dcbz r7,r6
- addi r6,r6,CACHELINE_BYTES
- bdnz 10b
- clrlwi r5,r8,32-LG_CACHELINE_BYTES
- addi r5,r5,4
-2: srwi r0,r5,2
- mtctr r0
- bdz 6f
-1: stwu r4,4(r6)
- bdnz 1b
-6: andi. r5,r5,3
-7: cmpwi 0,r5,0
- beqlr
- mtctr r5
- addi r6,r6,3
-8: stbu r4,1(r6)
- bdnz 8b
- blr
-
_GLOBAL(memset)
rlwimi r4,r4,8,16,23
rlwimi r4,r4,16,0,15
@@ -142,85 +94,6 @@ _GLOBAL(memset)
bdnz 8b
blr
-/*
- * This version uses dcbz on the complete cache lines in the
- * destination area to reduce memory traffic. This requires that
- * the destination area is cacheable.
- * We only use this version if the source and dest don't overlap.
- * -- paulus.
- */
-_GLOBAL(cacheable_memcpy)
- add r7,r3,r5 /* test if the src & dst overlap */
- add r8,r4,r5
- cmplw 0,r4,r7
- cmplw 1,r3,r8
- crand 0,0,4 /* cr0.lt &= cr1.lt */
- blt memcpy /* if regions overlap */
-
- addi r4,r4,-4
- addi r6,r3,-4
- neg r0,r3
- andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
- beq 58f
-
- cmplw 0,r5,r0 /* is this more than total to do? */
- blt 63f /* if not much to do */
- andi. r8,r0,3 /* get it word-aligned first */
- subf r5,r0,r5
- mtctr r8
- beq+ 61f
-70: lbz r9,4(r4) /* do some bytes */
- stb r9,4(r6)
- addi r4,r4,1
- addi r6,r6,1
- bdnz 70b
-61: srwi. r0,r0,2
- mtctr r0
- beq 58f
-72: lwzu r9,4(r4) /* do some words */
- stwu r9,4(r6)
- bdnz 72b
-
-58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
- clrlwi r5,r5,32-LG_CACHELINE_BYTES
- li r11,4
- mtctr r0
- beq 63f
-53:
- dcbz r11,r6
- COPY_16_BYTES
-#if L1_CACHE_BYTES >= 32
- COPY_16_BYTES
-#if L1_CACHE_BYTES >= 64
- COPY_16_BYTES
- COPY_16_BYTES
-#if L1_CACHE_BYTES >= 128
- COPY_16_BYTES
- COPY_16_BYTES
- COPY_16_BYTES
- COPY_16_BYTES
-#endif
-#endif
-#endif
- bdnz 53b
-
-63: srwi. r0,r5,2
- mtctr r0
- beq 64f
-30: lwzu r0,4(r4)
- stwu r0,4(r6)
- bdnz 30b
-
-64: andi. r0,r5,3
- mtctr r0
- beq+ 65f
-40: lbz r0,4(r4)
- stb r0,4(r6)
- addi r4,r4,1
- addi r6,r6,1
- bdnz 40b
-65: blr
-
_GLOBAL(memmove)
cmplw 0,r3,r4
bgt backwards_memcpy
diff --git a/arch/powerpc/lib/copypage_power7.S b/arch/powerpc/lib/copypage_power7.S
index d7dafb3777ac..a84d333ecb09 100644
--- a/arch/powerpc/lib/copypage_power7.S
+++ b/arch/powerpc/lib/copypage_power7.S
@@ -83,23 +83,23 @@ _GLOBAL(copypage_power7)
li r12,112
.align 5
-1: lvx vr7,r0,r4
- lvx vr6,r4,r6
- lvx vr5,r4,r7
- lvx vr4,r4,r8
- lvx vr3,r4,r9
- lvx vr2,r4,r10
- lvx vr1,r4,r11
- lvx vr0,r4,r12
+1: lvx v7,r0,r4
+ lvx v6,r4,r6
+ lvx v5,r4,r7
+ lvx v4,r4,r8
+ lvx v3,r4,r9
+ lvx v2,r4,r10
+ lvx v1,r4,r11
+ lvx v0,r4,r12
addi r4,r4,128
- stvx vr7,r0,r3
- stvx vr6,r3,r6
- stvx vr5,r3,r7
- stvx vr4,r3,r8
- stvx vr3,r3,r9
- stvx vr2,r3,r10
- stvx vr1,r3,r11
- stvx vr0,r3,r12
+ stvx v7,r0,r3
+ stvx v6,r3,r6
+ stvx v5,r3,r7
+ stvx v4,r3,r8
+ stvx v3,r3,r9
+ stvx v2,r3,r10
+ stvx v1,r3,r11
+ stvx v0,r3,r12
addi r3,r3,128
bdnz 1b
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index 92ee840529bc..da0c568d18c4 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -388,29 +388,29 @@ err3; std r0,0(r3)
li r11,48
bf cr7*4+3,5f
-err3; lvx vr1,r0,r4
+err3; lvx v1,r0,r4
addi r4,r4,16
-err3; stvx vr1,r0,r3
+err3; stvx v1,r0,r3
addi r3,r3,16
5: bf cr7*4+2,6f
-err3; lvx vr1,r0,r4
-err3; lvx vr0,r4,r9
+err3; lvx v1,r0,r4
+err3; lvx v0,r4,r9
addi r4,r4,32
-err3; stvx vr1,r0,r3
-err3; stvx vr0,r3,r9
+err3; stvx v1,r0,r3
+err3; stvx v0,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
-err3; lvx vr3,r0,r4
-err3; lvx vr2,r4,r9
-err3; lvx vr1,r4,r10
-err3; lvx vr0,r4,r11
+err3; lvx v3,r0,r4
+err3; lvx v2,r4,r9
+err3; lvx v1,r4,r10
+err3; lvx v0,r4,r11
addi r4,r4,64
-err3; stvx vr3,r0,r3
-err3; stvx vr2,r3,r9
-err3; stvx vr1,r3,r10
-err3; stvx vr0,r3,r11
+err3; stvx v3,r0,r3
+err3; stvx v2,r3,r9
+err3; stvx v1,r3,r10
+err3; stvx v0,r3,r11
addi r3,r3,64
7: sub r5,r5,r6
@@ -433,23 +433,23 @@ err3; stvx vr0,r3,r11
*/
.align 5
8:
-err4; lvx vr7,r0,r4
-err4; lvx vr6,r4,r9
-err4; lvx vr5,r4,r10
-err4; lvx vr4,r4,r11
-err4; lvx vr3,r4,r12
-err4; lvx vr2,r4,r14
-err4; lvx vr1,r4,r15
-err4; lvx vr0,r4,r16
+err4; lvx v7,r0,r4
+err4; lvx v6,r4,r9
+err4; lvx v5,r4,r10
+err4; lvx v4,r4,r11
+err4; lvx v3,r4,r12
+err4; lvx v2,r4,r14
+err4; lvx v1,r4,r15
+err4; lvx v0,r4,r16
addi r4,r4,128
-err4; stvx vr7,r0,r3
-err4; stvx vr6,r3,r9
-err4; stvx vr5,r3,r10
-err4; stvx vr4,r3,r11
-err4; stvx vr3,r3,r12
-err4; stvx vr2,r3,r14
-err4; stvx vr1,r3,r15
-err4; stvx vr0,r3,r16
+err4; stvx v7,r0,r3
+err4; stvx v6,r3,r9
+err4; stvx v5,r3,r10
+err4; stvx v4,r3,r11
+err4; stvx v3,r3,r12
+err4; stvx v2,r3,r14
+err4; stvx v1,r3,r15
+err4; stvx v0,r3,r16
addi r3,r3,128
bdnz 8b
@@ -463,29 +463,29 @@ err4; stvx vr0,r3,r16
mtocrf 0x01,r6
bf cr7*4+1,9f
-err3; lvx vr3,r0,r4
-err3; lvx vr2,r4,r9
-err3; lvx vr1,r4,r10
-err3; lvx vr0,r4,r11
+err3; lvx v3,r0,r4
+err3; lvx v2,r4,r9
+err3; lvx v1,r4,r10
+err3; lvx v0,r4,r11
addi r4,r4,64
-err3; stvx vr3,r0,r3
-err3; stvx vr2,r3,r9
-err3; stvx vr1,r3,r10
-err3; stvx vr0,r3,r11
+err3; stvx v3,r0,r3
+err3; stvx v2,r3,r9
+err3; stvx v1,r3,r10
+err3; stvx v0,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
-err3; lvx vr1,r0,r4
-err3; lvx vr0,r4,r9
+err3; lvx v1,r0,r4
+err3; lvx v0,r4,r9
addi r4,r4,32
-err3; stvx vr1,r0,r3
-err3; stvx vr0,r3,r9
+err3; stvx v1,r0,r3
+err3; stvx v0,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
-err3; lvx vr1,r0,r4
+err3; lvx v1,r0,r4
addi r4,r4,16
-err3; stvx vr1,r0,r3
+err3; stvx v1,r0,r3
addi r3,r3,16
/* Up to 15B to go */
@@ -560,42 +560,42 @@ err3; stw r7,4(r3)
li r10,32
li r11,48
- LVS(vr16,0,r4) /* Setup permute control vector */
-err3; lvx vr0,0,r4
+ LVS(v16,0,r4) /* Setup permute control vector */
+err3; lvx v0,0,r4
addi r4,r4,16
bf cr7*4+3,5f
-err3; lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
+err3; lvx v1,r0,r4
+ VPERM(v8,v0,v1,v16)
addi r4,r4,16
-err3; stvx vr8,r0,r3
+err3; stvx v8,r0,r3
addi r3,r3,16
- vor vr0,vr1,vr1
+ vor v0,v1,v1
5: bf cr7*4+2,6f
-err3; lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
-err3; lvx vr0,r4,r9
- VPERM(vr9,vr1,vr0,vr16)
+err3; lvx v1,r0,r4
+ VPERM(v8,v0,v1,v16)
+err3; lvx v0,r4,r9
+ VPERM(v9,v1,v0,v16)
addi r4,r4,32
-err3; stvx vr8,r0,r3
-err3; stvx vr9,r3,r9
+err3; stvx v8,r0,r3
+err3; stvx v9,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
-err3; lvx vr3,r0,r4
- VPERM(vr8,vr0,vr3,vr16)
-err3; lvx vr2,r4,r9
- VPERM(vr9,vr3,vr2,vr16)
-err3; lvx vr1,r4,r10
- VPERM(vr10,vr2,vr1,vr16)
-err3; lvx vr0,r4,r11
- VPERM(vr11,vr1,vr0,vr16)
+err3; lvx v3,r0,r4
+ VPERM(v8,v0,v3,v16)
+err3; lvx v2,r4,r9
+ VPERM(v9,v3,v2,v16)
+err3; lvx v1,r4,r10
+ VPERM(v10,v2,v1,v16)
+err3; lvx v0,r4,r11
+ VPERM(v11,v1,v0,v16)
addi r4,r4,64
-err3; stvx vr8,r0,r3
-err3; stvx vr9,r3,r9
-err3; stvx vr10,r3,r10
-err3; stvx vr11,r3,r11
+err3; stvx v8,r0,r3
+err3; stvx v9,r3,r9
+err3; stvx v10,r3,r10
+err3; stvx v11,r3,r11
addi r3,r3,64
7: sub r5,r5,r6
@@ -618,31 +618,31 @@ err3; stvx vr11,r3,r11
*/
.align 5
8:
-err4; lvx vr7,r0,r4
- VPERM(vr8,vr0,vr7,vr16)
-err4; lvx vr6,r4,r9
- VPERM(vr9,vr7,vr6,vr16)
-err4; lvx vr5,r4,r10
- VPERM(vr10,vr6,vr5,vr16)
-err4; lvx vr4,r4,r11
- VPERM(vr11,vr5,vr4,vr16)
-err4; lvx vr3,r4,r12
- VPERM(vr12,vr4,vr3,vr16)
-err4; lvx vr2,r4,r14
- VPERM(vr13,vr3,vr2,vr16)
-err4; lvx vr1,r4,r15
- VPERM(vr14,vr2,vr1,vr16)
-err4; lvx vr0,r4,r16
- VPERM(vr15,vr1,vr0,vr16)
+err4; lvx v7,r0,r4
+ VPERM(v8,v0,v7,v16)
+err4; lvx v6,r4,r9
+ VPERM(v9,v7,v6,v16)
+err4; lvx v5,r4,r10
+ VPERM(v10,v6,v5,v16)
+err4; lvx v4,r4,r11
+ VPERM(v11,v5,v4,v16)
+err4; lvx v3,r4,r12
+ VPERM(v12,v4,v3,v16)
+err4; lvx v2,r4,r14
+ VPERM(v13,v3,v2,v16)
+err4; lvx v1,r4,r15
+ VPERM(v14,v2,v1,v16)
+err4; lvx v0,r4,r16
+ VPERM(v15,v1,v0,v16)
addi r4,r4,128
-err4; stvx vr8,r0,r3
-err4; stvx vr9,r3,r9
-err4; stvx vr10,r3,r10
-err4; stvx vr11,r3,r11
-err4; stvx vr12,r3,r12
-err4; stvx vr13,r3,r14
-err4; stvx vr14,r3,r15
-err4; stvx vr15,r3,r16
+err4; stvx v8,r0,r3
+err4; stvx v9,r3,r9
+err4; stvx v10,r3,r10
+err4; stvx v11,r3,r11
+err4; stvx v12,r3,r12
+err4; stvx v13,r3,r14
+err4; stvx v14,r3,r15
+err4; stvx v15,r3,r16
addi r3,r3,128
bdnz 8b
@@ -656,36 +656,36 @@ err4; stvx vr15,r3,r16
mtocrf 0x01,r6
bf cr7*4+1,9f
-err3; lvx vr3,r0,r4
- VPERM(vr8,vr0,vr3,vr16)
-err3; lvx vr2,r4,r9
- VPERM(vr9,vr3,vr2,vr16)
-err3; lvx vr1,r4,r10
- VPERM(vr10,vr2,vr1,vr16)
-err3; lvx vr0,r4,r11
- VPERM(vr11,vr1,vr0,vr16)
+err3; lvx v3,r0,r4
+ VPERM(v8,v0,v3,v16)
+err3; lvx v2,r4,r9
+ VPERM(v9,v3,v2,v16)
+err3; lvx v1,r4,r10
+ VPERM(v10,v2,v1,v16)
+err3; lvx v0,r4,r11
+ VPERM(v11,v1,v0,v16)
addi r4,r4,64
-err3; stvx vr8,r0,r3
-err3; stvx vr9,r3,r9
-err3; stvx vr10,r3,r10
-err3; stvx vr11,r3,r11
+err3; stvx v8,r0,r3
+err3; stvx v9,r3,r9
+err3; stvx v10,r3,r10
+err3; stvx v11,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
-err3; lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
-err3; lvx vr0,r4,r9
- VPERM(vr9,vr1,vr0,vr16)
+err3; lvx v1,r0,r4
+ VPERM(v8,v0,v1,v16)
+err3; lvx v0,r4,r9
+ VPERM(v9,v1,v0,v16)
addi r4,r4,32
-err3; stvx vr8,r0,r3
-err3; stvx vr9,r3,r9
+err3; stvx v8,r0,r3
+err3; stvx v9,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
-err3; lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
+err3; lvx v1,r0,r4
+ VPERM(v8,v0,v1,v16)
addi r4,r4,16
-err3; stvx vr8,r0,r3
+err3; stvx v8,r0,r3
addi r3,r3,16
/* Up to 15B to go */
diff --git a/arch/powerpc/lib/crtsavres.S b/arch/powerpc/lib/crtsavres.S
index a5b30c71a8d3..18af0b3d3eb2 100644
--- a/arch/powerpc/lib/crtsavres.S
+++ b/arch/powerpc/lib/crtsavres.S
@@ -236,78 +236,78 @@ _GLOBAL(_rest32gpr_31_x)
_GLOBAL(_savevr_20)
li r11,-192
- stvx vr20,r11,r0
+ stvx v20,r11,r0
_GLOBAL(_savevr_21)
li r11,-176
- stvx vr21,r11,r0
+ stvx v21,r11,r0
_GLOBAL(_savevr_22)
li r11,-160
- stvx vr22,r11,r0
+ stvx v22,r11,r0
_GLOBAL(_savevr_23)
li r11,-144
- stvx vr23,r11,r0
+ stvx v23,r11,r0
_GLOBAL(_savevr_24)
li r11,-128
- stvx vr24,r11,r0
+ stvx v24,r11,r0
_GLOBAL(_savevr_25)
li r11,-112
- stvx vr25,r11,r0
+ stvx v25,r11,r0
_GLOBAL(_savevr_26)
li r11,-96
- stvx vr26,r11,r0
+ stvx v26,r11,r0
_GLOBAL(_savevr_27)
li r11,-80
- stvx vr27,r11,r0
+ stvx v27,r11,r0
_GLOBAL(_savevr_28)
li r11,-64
- stvx vr28,r11,r0
+ stvx v28,r11,r0
_GLOBAL(_savevr_29)
li r11,-48
- stvx vr29,r11,r0
+ stvx v29,r11,r0
_GLOBAL(_savevr_30)
li r11,-32
- stvx vr30,r11,r0
+ stvx v30,r11,r0
_GLOBAL(_savevr_31)
li r11,-16
- stvx vr31,r11,r0
+ stvx v31,r11,r0
blr
_GLOBAL(_restvr_20)
li r11,-192
- lvx vr20,r11,r0
+ lvx v20,r11,r0
_GLOBAL(_restvr_21)
li r11,-176
- lvx vr21,r11,r0
+ lvx v21,r11,r0
_GLOBAL(_restvr_22)
li r11,-160
- lvx vr22,r11,r0
+ lvx v22,r11,r0
_GLOBAL(_restvr_23)
li r11,-144
- lvx vr23,r11,r0
+ lvx v23,r11,r0
_GLOBAL(_restvr_24)
li r11,-128
- lvx vr24,r11,r0
+ lvx v24,r11,r0
_GLOBAL(_restvr_25)
li r11,-112
- lvx vr25,r11,r0
+ lvx v25,r11,r0
_GLOBAL(_restvr_26)
li r11,-96
- lvx vr26,r11,r0
+ lvx v26,r11,r0
_GLOBAL(_restvr_27)
li r11,-80
- lvx vr27,r11,r0
+ lvx v27,r11,r0
_GLOBAL(_restvr_28)
li r11,-64
- lvx vr28,r11,r0
+ lvx v28,r11,r0
_GLOBAL(_restvr_29)
li r11,-48
- lvx vr29,r11,r0
+ lvx v29,r11,r0
_GLOBAL(_restvr_30)
li r11,-32
- lvx vr30,r11,r0
+ lvx v30,r11,r0
_GLOBAL(_restvr_31)
li r11,-16
- lvx vr31,r11,r0
+ lvx v31,r11,r0
blr
#endif /* CONFIG_ALTIVEC */
@@ -443,101 +443,101 @@ _restgpr0_31:
.globl _savevr_20
_savevr_20:
li r12,-192
- stvx vr20,r12,r0
+ stvx v20,r12,r0
.globl _savevr_21
_savevr_21:
li r12,-176
- stvx vr21,r12,r0
+ stvx v21,r12,r0
.globl _savevr_22
_savevr_22:
li r12,-160
- stvx vr22,r12,r0
+ stvx v22,r12,r0
.globl _savevr_23
_savevr_23:
li r12,-144
- stvx vr23,r12,r0
+ stvx v23,r12,r0
.globl _savevr_24
_savevr_24:
li r12,-128
- stvx vr24,r12,r0
+ stvx v24,r12,r0
.globl _savevr_25
_savevr_25:
li r12,-112
- stvx vr25,r12,r0
+ stvx v25,r12,r0
.globl _savevr_26
_savevr_26:
li r12,-96
- stvx vr26,r12,r0
+ stvx v26,r12,r0
.globl _savevr_27
_savevr_27:
li r12,-80
- stvx vr27,r12,r0
+ stvx v27,r12,r0
.globl _savevr_28
_savevr_28:
li r12,-64
- stvx vr28,r12,r0
+ stvx v28,r12,r0
.globl _savevr_29
_savevr_29:
li r12,-48
- stvx vr29,r12,r0
+ stvx v29,r12,r0
.globl _savevr_30
_savevr_30:
li r12,-32
- stvx vr30,r12,r0
+ stvx v30,r12,r0
.globl _savevr_31
_savevr_31:
li r12,-16
- stvx vr31,r12,r0
+ stvx v31,r12,r0
blr
.globl _restvr_20
_restvr_20:
li r12,-192
- lvx vr20,r12,r0
+ lvx v20,r12,r0
.globl _restvr_21
_restvr_21:
li r12,-176
- lvx vr21,r12,r0
+ lvx v21,r12,r0
.globl _restvr_22
_restvr_22:
li r12,-160
- lvx vr22,r12,r0
+ lvx v22,r12,r0
.globl _restvr_23
_restvr_23:
li r12,-144
- lvx vr23,r12,r0
+ lvx v23,r12,r0
.globl _restvr_24
_restvr_24:
li r12,-128
- lvx vr24,r12,r0
+ lvx v24,r12,r0
.globl _restvr_25
_restvr_25:
li r12,-112
- lvx vr25,r12,r0
+ lvx v25,r12,r0
.globl _restvr_26
_restvr_26:
li r12,-96
- lvx vr26,r12,r0
+ lvx v26,r12,r0
.globl _restvr_27
_restvr_27:
li r12,-80
- lvx vr27,r12,r0
+ lvx v27,r12,r0
.globl _restvr_28
_restvr_28:
li r12,-64
- lvx vr28,r12,r0
+ lvx v28,r12,r0
.globl _restvr_29
_restvr_29:
li r12,-48
- lvx vr29,r12,r0
+ lvx v29,r12,r0
.globl _restvr_30
_restvr_30:
li r12,-32
- lvx vr30,r12,r0
+ lvx v30,r12,r0
.globl _restvr_31
_restvr_31:
li r12,-16
- lvx vr31,r12,r0
+ lvx v31,r12,r0
blr
#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/lib/ldstfp.S b/arch/powerpc/lib/ldstfp.S
index 85aec08ab234..5d0cdbfbe3f2 100644
--- a/arch/powerpc/lib/ldstfp.S
+++ b/arch/powerpc/lib/ldstfp.S
@@ -184,16 +184,16 @@ _GLOBAL(do_stfd)
extab 2b,3b
#ifdef CONFIG_ALTIVEC
-/* Get the contents of vrN into vr0; N is in r3. */
+/* Get the contents of vrN into v0; N is in r3. */
_GLOBAL(get_vr)
mflr r0
rlwinm r3,r3,3,0xf8
bcl 20,31,1f
- blr /* vr0 is already in vr0 */
+ blr /* v0 is already in v0 */
nop
reg = 1
.rept 31
- vor vr0,reg,reg /* assembler doesn't know vmr? */
+ vor v0,reg,reg /* assembler doesn't know vmr? */
blr
reg = reg + 1
.endr
@@ -203,16 +203,16 @@ reg = reg + 1
mtlr r0
bctr
-/* Put the contents of vr0 into vrN; N is in r3. */
+/* Put the contents of v0 into vrN; N is in r3. */
_GLOBAL(put_vr)
mflr r0
rlwinm r3,r3,3,0xf8
bcl 20,31,1f
- blr /* vr0 is already in vr0 */
+ blr /* v0 is already in v0 */
nop
reg = 1
.rept 31
- vor reg,vr0,vr0
+ vor reg,v0,v0
blr
reg = reg + 1
.endr
@@ -234,13 +234,13 @@ _GLOBAL(do_lvx)
MTMSRD(r7)
isync
beq cr7,1f
- stvx vr0,r1,r8
+ stvx v0,r1,r8
1: li r9,-EFAULT
-2: lvx vr0,0,r4
+2: lvx v0,0,r4
li r9,0
3: beq cr7,4f
bl put_vr
- lvx vr0,r1,r8
+ lvx v0,r1,r8
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
mtlr r0
MTMSRD(r6)
@@ -262,13 +262,13 @@ _GLOBAL(do_stvx)
MTMSRD(r7)
isync
beq cr7,1f
- stvx vr0,r1,r8
+ stvx v0,r1,r8
bl get_vr
1: li r9,-EFAULT
-2: stvx vr0,0,r4
+2: stvx v0,0,r4
li r9,0
3: beq cr7,4f
- lvx vr0,r1,r8
+ lvx v0,r1,r8
4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1)
mtlr r0
MTMSRD(r6)
@@ -280,12 +280,12 @@ _GLOBAL(do_stvx)
#endif /* CONFIG_ALTIVEC */
#ifdef CONFIG_VSX
-/* Get the contents of vsrN into vsr0; N is in r3. */
+/* Get the contents of vsN into vs0; N is in r3. */
_GLOBAL(get_vsr)
mflr r0
rlwinm r3,r3,3,0x1f8
bcl 20,31,1f
- blr /* vsr0 is already in vsr0 */
+ blr /* vs0 is already in vs0 */
nop
reg = 1
.rept 63
@@ -299,12 +299,12 @@ reg = reg + 1
mtlr r0
bctr
-/* Put the contents of vsr0 into vsrN; N is in r3. */
+/* Put the contents of vs0 into vsN; N is in r3. */
_GLOBAL(put_vsr)
mflr r0
rlwinm r3,r3,3,0x1f8
bcl 20,31,1f
- blr /* vr0 is already in vr0 */
+ blr /* v0 is already in v0 */
nop
reg = 1
.rept 63
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index 170a0346f756..f7deebdf3365 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -41,6 +41,7 @@ void __spin_yield(arch_spinlock_t *lock)
plpar_hcall_norets(H_CONFER,
get_hard_smp_processor_id(holder_cpu), yield_count);
}
+EXPORT_SYMBOL_GPL(__spin_yield);
/*
* Waiting for a read lock or a write lock on a rwlock...
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 0830587df16e..786234fd4e91 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -321,29 +321,29 @@ _GLOBAL(memcpy_power7)
li r11,48
bf cr7*4+3,5f
- lvx vr1,r0,r4
+ lvx v1,r0,r4
addi r4,r4,16
- stvx vr1,r0,r3
+ stvx v1,r0,r3
addi r3,r3,16
5: bf cr7*4+2,6f
- lvx vr1,r0,r4
- lvx vr0,r4,r9
+ lvx v1,r0,r4
+ lvx v0,r4,r9
addi r4,r4,32
- stvx vr1,r0,r3
- stvx vr0,r3,r9
+ stvx v1,r0,r3
+ stvx v0,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
- lvx vr3,r0,r4
- lvx vr2,r4,r9
- lvx vr1,r4,r10
- lvx vr0,r4,r11
+ lvx v3,r0,r4
+ lvx v2,r4,r9
+ lvx v1,r4,r10
+ lvx v0,r4,r11
addi r4,r4,64
- stvx vr3,r0,r3
- stvx vr2,r3,r9
- stvx vr1,r3,r10
- stvx vr0,r3,r11
+ stvx v3,r0,r3
+ stvx v2,r3,r9
+ stvx v1,r3,r10
+ stvx v0,r3,r11
addi r3,r3,64
7: sub r5,r5,r6
@@ -366,23 +366,23 @@ _GLOBAL(memcpy_power7)
*/
.align 5
8:
- lvx vr7,r0,r4
- lvx vr6,r4,r9
- lvx vr5,r4,r10
- lvx vr4,r4,r11
- lvx vr3,r4,r12
- lvx vr2,r4,r14
- lvx vr1,r4,r15
- lvx vr0,r4,r16
+ lvx v7,r0,r4
+ lvx v6,r4,r9
+ lvx v5,r4,r10
+ lvx v4,r4,r11
+ lvx v3,r4,r12
+ lvx v2,r4,r14
+ lvx v1,r4,r15
+ lvx v0,r4,r16
addi r4,r4,128
- stvx vr7,r0,r3
- stvx vr6,r3,r9
- stvx vr5,r3,r10
- stvx vr4,r3,r11
- stvx vr3,r3,r12
- stvx vr2,r3,r14
- stvx vr1,r3,r15
- stvx vr0,r3,r16
+ stvx v7,r0,r3
+ stvx v6,r3,r9
+ stvx v5,r3,r10
+ stvx v4,r3,r11
+ stvx v3,r3,r12
+ stvx v2,r3,r14
+ stvx v1,r3,r15
+ stvx v0,r3,r16
addi r3,r3,128
bdnz 8b
@@ -396,29 +396,29 @@ _GLOBAL(memcpy_power7)
mtocrf 0x01,r6
bf cr7*4+1,9f
- lvx vr3,r0,r4
- lvx vr2,r4,r9
- lvx vr1,r4,r10
- lvx vr0,r4,r11
+ lvx v3,r0,r4
+ lvx v2,r4,r9
+ lvx v1,r4,r10
+ lvx v0,r4,r11
addi r4,r4,64
- stvx vr3,r0,r3
- stvx vr2,r3,r9
- stvx vr1,r3,r10
- stvx vr0,r3,r11
+ stvx v3,r0,r3
+ stvx v2,r3,r9
+ stvx v1,r3,r10
+ stvx v0,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
- lvx vr1,r0,r4
- lvx vr0,r4,r9
+ lvx v1,r0,r4
+ lvx v0,r4,r9
addi r4,r4,32
- stvx vr1,r0,r3
- stvx vr0,r3,r9
+ stvx v1,r0,r3
+ stvx v0,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
- lvx vr1,r0,r4
+ lvx v1,r0,r4
addi r4,r4,16
- stvx vr1,r0,r3
+ stvx v1,r0,r3
addi r3,r3,16
/* Up to 15B to go */
@@ -494,42 +494,42 @@ _GLOBAL(memcpy_power7)
li r10,32
li r11,48
- LVS(vr16,0,r4) /* Setup permute control vector */
- lvx vr0,0,r4
+ LVS(v16,0,r4) /* Setup permute control vector */
+ lvx v0,0,r4
addi r4,r4,16
bf cr7*4+3,5f
- lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
+ lvx v1,r0,r4
+ VPERM(v8,v0,v1,v16)
addi r4,r4,16
- stvx vr8,r0,r3
+ stvx v8,r0,r3
addi r3,r3,16
- vor vr0,vr1,vr1
+ vor v0,v1,v1
5: bf cr7*4+2,6f
- lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
- lvx vr0,r4,r9
- VPERM(vr9,vr1,vr0,vr16)
+ lvx v1,r0,r4
+ VPERM(v8,v0,v1,v16)
+ lvx v0,r4,r9
+ VPERM(v9,v1,v0,v16)
addi r4,r4,32
- stvx vr8,r0,r3
- stvx vr9,r3,r9
+ stvx v8,r0,r3
+ stvx v9,r3,r9
addi r3,r3,32
6: bf cr7*4+1,7f
- lvx vr3,r0,r4
- VPERM(vr8,vr0,vr3,vr16)
- lvx vr2,r4,r9
- VPERM(vr9,vr3,vr2,vr16)
- lvx vr1,r4,r10
- VPERM(vr10,vr2,vr1,vr16)
- lvx vr0,r4,r11
- VPERM(vr11,vr1,vr0,vr16)
+ lvx v3,r0,r4
+ VPERM(v8,v0,v3,v16)
+ lvx v2,r4,r9
+ VPERM(v9,v3,v2,v16)
+ lvx v1,r4,r10
+ VPERM(v10,v2,v1,v16)
+ lvx v0,r4,r11
+ VPERM(v11,v1,v0,v16)
addi r4,r4,64
- stvx vr8,r0,r3
- stvx vr9,r3,r9
- stvx vr10,r3,r10
- stvx vr11,r3,r11
+ stvx v8,r0,r3
+ stvx v9,r3,r9
+ stvx v10,r3,r10
+ stvx v11,r3,r11
addi r3,r3,64
7: sub r5,r5,r6
@@ -552,31 +552,31 @@ _GLOBAL(memcpy_power7)
*/
.align 5
8:
- lvx vr7,r0,r4
- VPERM(vr8,vr0,vr7,vr16)
- lvx vr6,r4,r9
- VPERM(vr9,vr7,vr6,vr16)
- lvx vr5,r4,r10
- VPERM(vr10,vr6,vr5,vr16)
- lvx vr4,r4,r11
- VPERM(vr11,vr5,vr4,vr16)
- lvx vr3,r4,r12
- VPERM(vr12,vr4,vr3,vr16)
- lvx vr2,r4,r14
- VPERM(vr13,vr3,vr2,vr16)
- lvx vr1,r4,r15
- VPERM(vr14,vr2,vr1,vr16)
- lvx vr0,r4,r16
- VPERM(vr15,vr1,vr0,vr16)
+ lvx v7,r0,r4
+ VPERM(v8,v0,v7,v16)
+ lvx v6,r4,r9
+ VPERM(v9,v7,v6,v16)
+ lvx v5,r4,r10
+ VPERM(v10,v6,v5,v16)
+ lvx v4,r4,r11
+ VPERM(v11,v5,v4,v16)
+ lvx v3,r4,r12
+ VPERM(v12,v4,v3,v16)
+ lvx v2,r4,r14
+ VPERM(v13,v3,v2,v16)
+ lvx v1,r4,r15
+ VPERM(v14,v2,v1,v16)
+ lvx v0,r4,r16
+ VPERM(v15,v1,v0,v16)
addi r4,r4,128
- stvx vr8,r0,r3
- stvx vr9,r3,r9
- stvx vr10,r3,r10
- stvx vr11,r3,r11
- stvx vr12,r3,r12
- stvx vr13,r3,r14
- stvx vr14,r3,r15
- stvx vr15,r3,r16
+ stvx v8,r0,r3
+ stvx v9,r3,r9
+ stvx v10,r3,r10
+ stvx v11,r3,r11
+ stvx v12,r3,r12
+ stvx v13,r3,r14
+ stvx v14,r3,r15
+ stvx v15,r3,r16
addi r3,r3,128
bdnz 8b
@@ -590,36 +590,36 @@ _GLOBAL(memcpy_power7)
mtocrf 0x01,r6
bf cr7*4+1,9f
- lvx vr3,r0,r4
- VPERM(vr8,vr0,vr3,vr16)
- lvx vr2,r4,r9
- VPERM(vr9,vr3,vr2,vr16)
- lvx vr1,r4,r10
- VPERM(vr10,vr2,vr1,vr16)
- lvx vr0,r4,r11
- VPERM(vr11,vr1,vr0,vr16)
+ lvx v3,r0,r4
+ VPERM(v8,v0,v3,v16)
+ lvx v2,r4,r9
+ VPERM(v9,v3,v2,v16)
+ lvx v1,r4,r10
+ VPERM(v10,v2,v1,v16)
+ lvx v0,r4,r11
+ VPERM(v11,v1,v0,v16)
addi r4,r4,64
- stvx vr8,r0,r3
- stvx vr9,r3,r9
- stvx vr10,r3,r10
- stvx vr11,r3,r11
+ stvx v8,r0,r3
+ stvx v9,r3,r9
+ stvx v10,r3,r10
+ stvx v11,r3,r11
addi r3,r3,64
9: bf cr7*4+2,10f
- lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
- lvx vr0,r4,r9
- VPERM(vr9,vr1,vr0,vr16)
+ lvx v1,r0,r4
+ VPERM(v8,v0,v1,v16)
+ lvx v0,r4,r9
+ VPERM(v9,v1,v0,v16)
addi r4,r4,32
- stvx vr8,r0,r3
- stvx vr9,r3,r9
+ stvx v8,r0,r3
+ stvx v9,r3,r9
addi r3,r3,32
10: bf cr7*4+3,11f
- lvx vr1,r0,r4
- VPERM(vr8,vr0,vr1,vr16)
+ lvx v1,r0,r4
+ VPERM(v8,v0,v1,v16)
addi r4,r4,16
- stvx vr8,r0,r3
+ stvx v8,r0,r3
addi r3,r3,16
/* Up to 15B to go */
diff --git a/arch/powerpc/lib/ppc_ksyms.c b/arch/powerpc/lib/ppc_ksyms.c
index f993959647b5..c7f8e9586316 100644
--- a/arch/powerpc/lib/ppc_ksyms.c
+++ b/arch/powerpc/lib/ppc_ksyms.c
@@ -8,10 +8,6 @@ EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memmove);
EXPORT_SYMBOL(memcmp);
EXPORT_SYMBOL(memchr);
-#ifdef CONFIG_PPC32
-EXPORT_SYMBOL(cacheable_memcpy);
-EXPORT_SYMBOL(cacheable_memzero);
-#endif
EXPORT_SYMBOL(strcpy);
EXPORT_SYMBOL(strncpy);
diff --git a/arch/powerpc/lib/rheap.c b/arch/powerpc/lib/rheap.c
index a1060a868e69..69abf844c2c3 100644
--- a/arch/powerpc/lib/rheap.c
+++ b/arch/powerpc/lib/rheap.c
@@ -284,7 +284,7 @@ EXPORT_SYMBOL_GPL(rh_create);
*/
void rh_destroy(rh_info_t * info)
{
- if ((info->flags & RHIF_STATIC_BLOCK) == 0 && info->block != NULL)
+ if ((info->flags & RHIF_STATIC_BLOCK) == 0)
kfree(info->block);
if ((info->flags & RHIF_STATIC_INFO) == 0)
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 438dcd3fd0d1..9c8770b5f96f 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_40x) += 40x_mmu.o
obj-$(CONFIG_44x) += 44x_mmu.o
obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o
obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o
+obj-$(CONFIG_PPC_SPLPAR) += vphn.o
obj-$(CONFIG_PPC_MM_SLICES) += slice.o
obj-y += hugetlbpage.o
ifeq ($(CONFIG_HUGETLB_PAGE),y)
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index d85e86aac7fb..169aba446a74 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -228,7 +228,7 @@ __dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t
do {
SetPageReserved(page);
map_page(vaddr, page_to_phys(page),
- pgprot_noncached(PAGE_KERNEL));
+ pgprot_val(pgprot_noncached(PAGE_KERNEL)));
page++;
vaddr += PAGE_SIZE;
} while (size -= PAGE_SIZE);
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index b46912fee7cd..9c90e66cffb6 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -181,7 +181,7 @@ static unsigned long map_mem_in_cams_addr(phys_addr_t phys, unsigned long virt,
unsigned long cam_sz;
cam_sz = calc_cam_sz(ram, virt, phys);
- settlbcam(i, virt, phys, cam_sz, PAGE_KERNEL_X, 0);
+ settlbcam(i, virt, phys, cam_sz, pgprot_val(PAGE_KERNEL_X), 0);
ram -= cam_sz;
amount_mapped += cam_sz;
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 86686514ae13..43dafb9d6a46 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -33,7 +33,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
* atomically mark the linux large page PMD busy and dirty
*/
do {
- pmd_t pmd = ACCESS_ONCE(*pmdp);
+ pmd_t pmd = READ_ONCE(*pmdp);
old_pmd = pmd_val(pmd);
/* If PMD busy, retry the access */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 7e408bfc7948..fa9d5c238d22 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -964,7 +964,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
*shift = 0;
pgdp = pgdir + pgd_index(ea);
- pgd = ACCESS_ONCE(*pgdp);
+ pgd = READ_ONCE(*pgdp);
/*
* Always operate on the local stack value. This make sure the
* value don't get updated by a parallel THP split/collapse,
@@ -1045,7 +1045,7 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
if (pte_end < end)
end = pte_end;
- pte = ACCESS_ONCE(*ptep);
+ pte = READ_ONCE(*ptep);
mask = _PAGE_PRESENT | _PAGE_USER;
if (write)
mask |= _PAGE_RW;
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 10471f9bb63f..d747dd7bc90b 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -132,6 +132,7 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
align = max_t(unsigned long, align, minalign);
name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
new = kmem_cache_create(name, table_size, align, 0, ctor);
+ kfree(name);
pgtable_cache[shift - 1] = new;
pr_debug("Allocated pgtable cache for order %d\n", shift);
}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index b7285a5870f8..45fda71feb27 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -61,7 +61,6 @@
#define CPU_FTR_NOEXECUTE 0
#endif
-int mem_init_done;
unsigned long long memory_limit;
#ifdef CONFIG_HIGHMEM
@@ -377,8 +376,6 @@ void __init mem_init(void)
pr_info(" * 0x%08lx..0x%08lx : vmalloc & ioremap\n",
VMALLOC_START, VMALLOC_END);
#endif /* CONFIG_PPC32 */
-
- mem_init_done = 1;
}
void free_initmem(void)
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 78c45f392f5b..085b66b10891 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -96,7 +96,7 @@ extern void _tlbia(void);
extern void mapin_ram(void);
extern int map_page(unsigned long va, phys_addr_t pa, int flags);
extern void setbat(int index, unsigned long virt, phys_addr_t phys,
- unsigned int size, int flags);
+ unsigned int size, pgprot_t prot);
extern int __map_without_bats;
extern int __allow_ioremap_reserved;
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 0257a7d659ef..5e80621d9324 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -958,6 +958,13 @@ void __init initmem_init(void)
memblock_dump_all();
+ /*
+ * Reduce the possible NUMA nodes to the online NUMA nodes,
+ * since we do not support node hotplug. This ensures that we
+ * lower the maximum NUMA node ID to what is actually present.
+ */
+ nodes_and(node_possible_map, node_possible_map, node_online_map);
+
for_each_online_node(nid) {
unsigned long start_pfn, end_pfn;
@@ -1177,6 +1184,9 @@ u64 memory_hotplug_max(void)
/* Virtual Processor Home Node (VPHN) support */
#ifdef CONFIG_PPC_SPLPAR
+
+#include "vphn.h"
+
struct topology_update_data {
struct topology_update_data *next;
unsigned int cpu;
@@ -1248,55 +1258,6 @@ static int update_cpu_associativity_changes_mask(void)
}
/*
- * 6 64-bit registers unpacked into 12 32-bit associativity values. To form
- * the complete property we have to add the length in the first cell.
- */
-#define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1)
-
-/*
- * Convert the associativity domain numbers returned from the hypervisor
- * to the sequence they would appear in the ibm,associativity property.
- */
-static int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
-{
- int i, nr_assoc_doms = 0;
- const __be16 *field = (const __be16 *) packed;
-
-#define VPHN_FIELD_UNUSED (0xffff)
-#define VPHN_FIELD_MSB (0x8000)
-#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
-
- for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
- if (be16_to_cpup(field) == VPHN_FIELD_UNUSED) {
- /* All significant fields processed, and remaining
- * fields contain the reserved value of all 1's.
- * Just store them.
- */
- unpacked[i] = *((__be32 *)field);
- field += 2;
- } else if (be16_to_cpup(field) & VPHN_FIELD_MSB) {
- /* Data is in the lower 15 bits of this field */
- unpacked[i] = cpu_to_be32(
- be16_to_cpup(field) & VPHN_FIELD_MASK);
- field++;
- nr_assoc_doms++;
- } else {
- /* Data is in the lower 15 bits of this field
- * concatenated with the next 16 bit field
- */
- unpacked[i] = *((__be32 *)field);
- field += 2;
- nr_assoc_doms++;
- }
- }
-
- /* The first cell contains the length of the property */
- unpacked[0] = cpu_to_be32(nr_assoc_doms);
-
- return nr_assoc_doms;
-}
-
-/*
* Retrieve the new associativity information for a virtual processor's
* home node.
*/
@@ -1306,11 +1267,8 @@ static long hcall_vphn(unsigned long cpu, __be32 *associativity)
long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
u64 flags = 1;
int hwcpu = get_hard_smp_processor_id(cpu);
- int i;
rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
- for (i = 0; i < 6; i++)
- retbuf[i] = cpu_to_be64(retbuf[i]);
vphn_unpack_associativity(retbuf, associativity);
return rc;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 03b1a3b0fbd5..7692d1bb1bc6 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -54,9 +54,6 @@ extern char etext[], _stext[];
#ifdef HAVE_BATS
extern phys_addr_t v_mapped_by_bats(unsigned long va);
extern unsigned long p_mapped_by_bats(phys_addr_t pa);
-void setbat(int index, unsigned long virt, phys_addr_t phys,
- unsigned int size, int flags);
-
#else /* !HAVE_BATS */
#define v_mapped_by_bats(x) (0UL)
#define p_mapped_by_bats(x) (0UL)
@@ -110,9 +107,8 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
__init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
{
pte_t *pte;
- extern int mem_init_done;
- if (mem_init_done) {
+ if (slab_is_available()) {
pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
} else {
pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
@@ -192,7 +188,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
/* Make sure we have the base flags */
if ((flags & _PAGE_PRESENT) == 0)
- flags |= PAGE_KERNEL;
+ flags |= pgprot_val(PAGE_KERNEL);
/* Non-cacheable page cannot be coherent */
if (flags & _PAGE_NO_CACHE)
@@ -219,9 +215,9 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
* Don't allow anybody to remap normal RAM that we're using.
* mem_init() sets high_memory so only do the check after that.
*/
- if (mem_init_done && (p < virt_to_phys(high_memory)) &&
+ if (slab_is_available() && (p < virt_to_phys(high_memory)) &&
!(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
- printk("__ioremap(): phys addr 0x%llx is RAM lr %pf\n",
+ printk("__ioremap(): phys addr 0x%llx is RAM lr %ps\n",
(unsigned long long)p, __builtin_return_address(0));
return NULL;
}
@@ -247,7 +243,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
if ((v = p_mapped_by_tlbcam(p)))
goto out;
- if (mem_init_done) {
+ if (slab_is_available()) {
struct vm_struct *area;
area = get_vm_area_caller(size, VM_IOREMAP, caller);
if (area == 0)
@@ -266,7 +262,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
for (i = 0; i < size && err == 0; i += PAGE_SIZE)
err = map_page(v+i, p+i, flags);
if (err) {
- if (mem_init_done)
+ if (slab_is_available())
vunmap((void *)v);
return NULL;
}
@@ -327,7 +323,7 @@ void __init __mapin_ram_chunk(unsigned long offset, unsigned long top)
p = memstart_addr + s;
for (; s < top; s += PAGE_SIZE) {
ktext = ((char *) v >= _stext && (char *) v < etext);
- f = ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL;
+ f = ktext ? pgprot_val(PAGE_KERNEL_TEXT) : pgprot_val(PAGE_KERNEL);
map_page(v, p, f);
#ifdef CONFIG_PPC_STD_MMU_32
if (ktext)
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 6957cc1ca0a7..59daa5eeec25 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -231,7 +231,7 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size,
if ((size == 0) || (paligned == 0))
return NULL;
- if (mem_init_done) {
+ if (slab_is_available()) {
struct vm_struct *area;
area = __get_vm_area_caller(size, VM_IOREMAP,
@@ -315,7 +315,7 @@ void __iounmap(volatile void __iomem *token)
{
void *addr;
- if (!mem_init_done)
+ if (!slab_is_available())
return;
addr = (void *) ((unsigned long __force)
@@ -723,7 +723,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
assert_spin_locked(&mm->page_table_lock);
WARN_ON(!pmd_trans_huge(pmd));
#endif
- trace_hugepage_set_pmd(addr, pmd);
+ trace_hugepage_set_pmd(addr, pmd_val(pmd));
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
}
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c
index 5029dc19b517..6b2f3e457171 100644
--- a/arch/powerpc/mm/ppc_mmu_32.c
+++ b/arch/powerpc/mm/ppc_mmu_32.c
@@ -113,11 +113,12 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
* of 2 between 128k and 256M.
*/
void __init setbat(int index, unsigned long virt, phys_addr_t phys,
- unsigned int size, int flags)
+ unsigned int size, pgprot_t prot)
{
unsigned int bl;
int wimgxpp;
struct ppc_bat *bat = BATS[index];
+ unsigned long flags = pgprot_val(prot);
if ((flags & _PAGE_NO_CACHE) ||
(cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0))
@@ -224,7 +225,7 @@ void __init MMU_init_hw(void)
*/
if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
Hash = __va(memblock_alloc(Hash_size, Hash_size));
- cacheable_memzero(Hash, Hash_size);
+ memset(Hash, 0, Hash_size);
_SDR1 = __pa(Hash) | SDR1_LOW_BITS;
Hash_end = (struct hash_pte *) ((unsigned long)Hash + Hash_size);
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c
index d2a94b85dbc2..c522969f012d 100644
--- a/arch/powerpc/mm/tlb_hash64.c
+++ b/arch/powerpc/mm/tlb_hash64.c
@@ -216,7 +216,7 @@ void __flush_hash_table_range(struct mm_struct *mm, unsigned long start,
continue;
pte = pte_val(*ptep);
if (hugepage_shift)
- trace_hugepage_invalidate(start, pte_val(pte));
+ trace_hugepage_invalidate(start, pte);
if (!(pte & _PAGE_HASHPTE))
continue;
if (unlikely(hugepage_shift && pmd_trans_huge(*(pmd_t *)pte)))
diff --git a/arch/powerpc/mm/vphn.c b/arch/powerpc/mm/vphn.c
new file mode 100644
index 000000000000..5f8ef50e5c66
--- /dev/null
+++ b/arch/powerpc/mm/vphn.c
@@ -0,0 +1,70 @@
+#include <asm/byteorder.h>
+#include "vphn.h"
+
+/*
+ * The associativity domain numbers are returned from the hypervisor as a
+ * stream of mixed 16-bit and 32-bit fields. The stream is terminated by the
+ * special value of "all ones" (aka. 0xffff) and its size may not exceed 48
+ * bytes.
+ *
+ * --- 16-bit fields -->
+ * _________________________
+ * | 0 | 1 | 2 | 3 | be_packed[0]
+ * ------+-----+-----+------
+ * _________________________
+ * | 4 | 5 | 6 | 7 | be_packed[1]
+ * -------------------------
+ * ...
+ * _________________________
+ * | 20 | 21 | 22 | 23 | be_packed[5]
+ * -------------------------
+ *
+ * Convert to the sequence they would appear in the ibm,associativity property.
+ */
+int vphn_unpack_associativity(const long *packed, __be32 *unpacked)
+{
+ __be64 be_packed[VPHN_REGISTER_COUNT];
+ int i, nr_assoc_doms = 0;
+ const __be16 *field = (const __be16 *) be_packed;
+ u16 last = 0;
+ bool is_32bit = false;
+
+#define VPHN_FIELD_UNUSED (0xffff)
+#define VPHN_FIELD_MSB (0x8000)
+#define VPHN_FIELD_MASK (~VPHN_FIELD_MSB)
+
+ /* Let's fix the values returned by plpar_hcall9() */
+ for (i = 0; i < VPHN_REGISTER_COUNT; i++)
+ be_packed[i] = cpu_to_be64(packed[i]);
+
+ for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) {
+ u16 new = be16_to_cpup(field++);
+
+ if (is_32bit) {
+ /* Let's concatenate the 16 bits of this field to the
+ * 15 lower bits of the previous field
+ */
+ unpacked[++nr_assoc_doms] =
+ cpu_to_be32(last << 16 | new);
+ is_32bit = false;
+ } else if (new == VPHN_FIELD_UNUSED)
+ /* This is the list terminator */
+ break;
+ else if (new & VPHN_FIELD_MSB) {
+ /* Data is in the lower 15 bits of this field */
+ unpacked[++nr_assoc_doms] =
+ cpu_to_be32(new & VPHN_FIELD_MASK);
+ } else {
+ /* Data is in the lower 15 bits of this field
+ * concatenated with the next 16 bit field
+ */
+ last = new;
+ is_32bit = true;
+ }
+ }
+
+ /* The first cell contains the length of the property */
+ unpacked[0] = cpu_to_be32(nr_assoc_doms);
+
+ return nr_assoc_doms;
+}
diff --git a/arch/powerpc/mm/vphn.h b/arch/powerpc/mm/vphn.h
new file mode 100644
index 000000000000..fe8b7805b78f
--- /dev/null
+++ b/arch/powerpc/mm/vphn.h
@@ -0,0 +1,16 @@
+#ifndef _ARCH_POWERPC_MM_VPHN_H_
+#define _ARCH_POWERPC_MM_VPHN_H_
+
+/* The H_HOME_NODE_ASSOCIATIVITY h_call returns 6 64-bit registers.
+ */
+#define VPHN_REGISTER_COUNT 6
+
+/*
+ * 6 64-bit registers unpacked into up to 24 be32 associativity values. To
+ * form the complete property we have to add the length in the first cell.
+ */
+#define VPHN_ASSOC_BUFSIZE (VPHN_REGISTER_COUNT*sizeof(u64)/sizeof(u16) + 1)
+
+extern int vphn_unpack_associativity(const long *packed, __be32 *unpacked);
+
+#endif
diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c
index 2396dda282cd..ead55351b254 100644
--- a/arch/powerpc/perf/callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -243,7 +243,7 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
sp = regs->gpr[1];
perf_callchain_store(entry, next_ip);
- for (;;) {
+ while (entry->nr < PERF_MAX_STACK_DEPTH) {
fp = (unsigned long __user *) sp;
if (!valid_user_sp(sp, 1) || read_user_stack_64(fp, &next_sp))
return;
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 7fd60dcb2cb0..12b638425bb9 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -1837,8 +1837,10 @@ static int power_pmu_event_init(struct perf_event *event)
cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
event->attr.branch_sample_type);
- if(cpuhw->bhrb_filter == -1)
+ if (cpuhw->bhrb_filter == -1) {
+ put_cpu_var(cpu_hw_events);
return -EOPNOTSUPP;
+ }
}
put_cpu_var(cpu_hw_events);
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index abeb9ec0d117..ec2eb20631d1 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -142,6 +142,15 @@ static struct attribute_group event_long_desc_group = {
static struct kmem_cache *hv_page_cache;
+/*
+ * request_buffer and result_buffer are not required to be 4k aligned,
+ * but are not allowed to cross any 4k boundary. Aligning them to 4k is
+ * the simplest way to ensure that.
+ */
+#define H24x7_DATA_BUFFER_SIZE 4096
+DEFINE_PER_CPU(char, hv_24x7_reqb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
+DEFINE_PER_CPU(char, hv_24x7_resb[H24x7_DATA_BUFFER_SIZE]) __aligned(4096);
+
static char *event_name(struct hv_24x7_event_data *ev, int *len)
{
*len = be16_to_cpu(ev->event_name_len) - 2;
@@ -152,6 +161,7 @@ static char *event_desc(struct hv_24x7_event_data *ev, int *len)
{
unsigned nl = be16_to_cpu(ev->event_name_len);
__be16 *desc_len = (__be16 *)(ev->remainder + nl - 2);
+
*len = be16_to_cpu(*desc_len) - 2;
return (char *)ev->remainder + nl;
}
@@ -162,6 +172,7 @@ static char *event_long_desc(struct hv_24x7_event_data *ev, int *len)
__be16 *desc_len_ = (__be16 *)(ev->remainder + nl - 2);
unsigned desc_len = be16_to_cpu(*desc_len_);
__be16 *long_desc_len = (__be16 *)(ev->remainder + nl + desc_len - 2);
+
*len = be16_to_cpu(*long_desc_len) - 2;
return (char *)ev->remainder + nl + desc_len;
}
@@ -239,14 +250,12 @@ static unsigned long h_get_24x7_catalog_page_(unsigned long phys_4096,
unsigned long index)
{
pr_devel("h_get_24x7_catalog_page(0x%lx, %lu, %lu)",
- phys_4096,
- version,
- index);
+ phys_4096, version, index);
+
WARN_ON(!IS_ALIGNED(phys_4096, 4096));
+
return plpar_hcall_norets(H_GET_24X7_CATALOG_PAGE,
- phys_4096,
- version,
- index);
+ phys_4096, version, index);
}
static unsigned long h_get_24x7_catalog_page(char page[],
@@ -300,6 +309,7 @@ static ssize_t device_show_string(struct device *dev,
struct dev_ext_attribute *d;
d = container_of(attr, struct dev_ext_attribute, attr);
+
return sprintf(buf, "%s\n", (char *)d->var);
}
@@ -314,6 +324,7 @@ static struct attribute *device_str_attr_create_(char *name, char *str)
attr->attr.attr.name = name;
attr->attr.attr.mode = 0444;
attr->attr.show = device_show_string;
+
return &attr->attr.attr;
}
@@ -387,7 +398,6 @@ static struct attribute *event_to_attr(unsigned ix,
a_ev_name = kasprintf(GFP_KERNEL, "%.*s%s__%d",
(int)event_name_len, ev_name, ev_suffix, nonce);
-
if (!a_ev_name)
goto out_val;
@@ -637,7 +647,7 @@ static ssize_t catalog_event_len_validate(struct hv_24x7_event_data *event,
#define MAX_4K (SIZE_MAX / 4096)
-static void create_events_from_catalog(struct attribute ***events_,
+static int create_events_from_catalog(struct attribute ***events_,
struct attribute ***event_descs_,
struct attribute ***event_long_descs_)
{
@@ -655,19 +665,25 @@ static void create_events_from_catalog(struct attribute ***events_,
void *event_data, *end;
struct hv_24x7_event_data *event;
struct rb_root ev_uniq = RB_ROOT;
+ int ret = 0;
- if (!page)
+ if (!page) {
+ ret = -ENOMEM;
goto e_out;
+ }
hret = h_get_24x7_catalog_page(page, 0, 0);
- if (hret)
+ if (hret) {
+ ret = -EIO;
goto e_free;
+ }
catalog_version_num = be64_to_cpu(page_0->version);
catalog_page_len = be32_to_cpu(page_0->length);
if (MAX_4K < catalog_page_len) {
pr_err("invalid page count: %zu\n", catalog_page_len);
+ ret = -EIO;
goto e_free;
}
@@ -686,6 +702,7 @@ static void create_events_from_catalog(struct attribute ***events_,
|| (MAX_4K - event_data_offs < event_data_len)) {
pr_err("invalid event data offs %zu and/or len %zu\n",
event_data_offs, event_data_len);
+ ret = -EIO;
goto e_free;
}
@@ -694,12 +711,14 @@ static void create_events_from_catalog(struct attribute ***events_,
event_data_offs,
event_data_offs + event_data_len,
catalog_page_len);
+ ret = -EIO;
goto e_free;
}
if (SIZE_MAX / MAX_EVENTS_PER_EVENT_DATA - 1 < event_entry_count) {
pr_err("event_entry_count %zu is invalid\n",
event_entry_count);
+ ret = -EIO;
goto e_free;
}
@@ -712,6 +731,7 @@ static void create_events_from_catalog(struct attribute ***events_,
event_data = vmalloc(event_data_bytes);
if (!event_data) {
pr_err("could not allocate event data\n");
+ ret = -ENOMEM;
goto e_free;
}
@@ -731,6 +751,7 @@ static void create_events_from_catalog(struct attribute ***events_,
if (hret) {
pr_err("failed to get event data in page %zu\n",
i + event_data_offs);
+ ret = -EIO;
goto e_event_data;
}
}
@@ -778,18 +799,24 @@ static void create_events_from_catalog(struct attribute ***events_,
event_idx_last, event_entry_count, junk_events);
events = kmalloc_array(attr_max + 1, sizeof(*events), GFP_KERNEL);
- if (!events)
+ if (!events) {
+ ret = -ENOMEM;
goto e_event_data;
+ }
event_descs = kmalloc_array(event_idx + 1, sizeof(*event_descs),
GFP_KERNEL);
- if (!event_descs)
+ if (!event_descs) {
+ ret = -ENOMEM;
goto e_event_attrs;
+ }
event_long_descs = kmalloc_array(event_idx + 1,
sizeof(*event_long_descs), GFP_KERNEL);
- if (!event_long_descs)
+ if (!event_long_descs) {
+ ret = -ENOMEM;
goto e_event_descs;
+ }
/* Iterate over the catalog filling in the attribute vector */
for (junk_events = 0, event_attr_ct = 0, desc_ct = 0, long_desc_ct = 0,
@@ -843,7 +870,7 @@ static void create_events_from_catalog(struct attribute ***events_,
*events_ = events;
*event_descs_ = event_descs;
*event_long_descs_ = event_long_descs;
- return;
+ return 0;
e_event_descs:
kfree(event_descs);
@@ -857,6 +884,7 @@ e_out:
*events_ = NULL;
*event_descs_ = NULL;
*event_long_descs_ = NULL;
+ return ret;
}
static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
@@ -872,6 +900,7 @@ static ssize_t catalog_read(struct file *filp, struct kobject *kobj,
uint64_t catalog_version_num = 0;
void *page = kmem_cache_alloc(hv_page_cache, GFP_USER);
struct hv_24x7_catalog_page_0 *page_0 = page;
+
if (!page)
return -ENOMEM;
@@ -976,31 +1005,104 @@ static const struct attribute_group *attr_groups[] = {
NULL,
};
-DEFINE_PER_CPU(char, hv_24x7_reqb[4096]) __aligned(4096);
-DEFINE_PER_CPU(char, hv_24x7_resb[4096]) __aligned(4096);
+static void log_24x7_hcall(struct hv_24x7_request_buffer *request_buffer,
+ struct hv_24x7_data_result_buffer *result_buffer,
+ unsigned long ret)
+{
+ struct hv_24x7_request *req;
+
+ req = &request_buffer->requests[0];
+ pr_notice_ratelimited("hcall failed: [%d %#x %#x %d] => "
+ "ret 0x%lx (%ld) detail=0x%x failing ix=%x\n",
+ req->performance_domain, req->data_offset,
+ req->starting_ix, req->starting_lpar_ix, ret, ret,
+ result_buffer->detailed_rc,
+ result_buffer->failing_request_ix);
+}
+
+/*
+ * Start the process for a new H_GET_24x7_DATA hcall.
+ */
+static void init_24x7_request(struct hv_24x7_request_buffer *request_buffer,
+ struct hv_24x7_data_result_buffer *result_buffer)
+{
+
+ memset(request_buffer, 0, 4096);
+ memset(result_buffer, 0, 4096);
+
+ request_buffer->interface_version = HV_24X7_IF_VERSION_CURRENT;
+ /* memset above set request_buffer->num_requests to 0 */
+}
-static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
- u16 lpar, u64 *res,
- bool success_expected)
+/*
+ * Commit (i.e perform) the H_GET_24x7_DATA hcall using the data collected
+ * by 'init_24x7_request()' and 'add_event_to_24x7_request()'.
+ */
+static int make_24x7_request(struct hv_24x7_request_buffer *request_buffer,
+ struct hv_24x7_data_result_buffer *result_buffer)
{
unsigned long ret;
/*
- * request_buffer and result_buffer are not required to be 4k aligned,
- * but are not allowed to cross any 4k boundary. Aligning them to 4k is
- * the simplest way to ensure that.
+ * NOTE: Due to variable number of array elements in request and
+ * result buffer(s), sizeof() is not reliable. Use the actual
+ * allocated buffer size, H24x7_DATA_BUFFER_SIZE.
*/
- struct reqb {
- struct hv_24x7_request_buffer buf;
- struct hv_24x7_request req;
- } __packed *request_buffer;
-
- struct {
- struct hv_24x7_data_result_buffer buf;
- struct hv_24x7_result res;
- struct hv_24x7_result_element elem;
- __be64 result;
- } __packed *result_buffer;
+ ret = plpar_hcall_norets(H_GET_24X7_DATA,
+ virt_to_phys(request_buffer), H24x7_DATA_BUFFER_SIZE,
+ virt_to_phys(result_buffer), H24x7_DATA_BUFFER_SIZE);
+
+ if (ret)
+ log_24x7_hcall(request_buffer, result_buffer, ret);
+
+ return ret;
+}
+
+/*
+ * Add the given @event to the next slot in the 24x7 request_buffer.
+ *
+ * Note that H_GET_24X7_DATA hcall allows reading several counters'
+ * values in a single HCALL. We expect the caller to add events to the
+ * request buffer one by one, make the HCALL and process the results.
+ */
+static int add_event_to_24x7_request(struct perf_event *event,
+ struct hv_24x7_request_buffer *request_buffer)
+{
+ u16 idx;
+ int i;
+ struct hv_24x7_request *req;
+
+ if (request_buffer->num_requests > 254) {
+ pr_devel("Too many requests for 24x7 HCALL %d\n",
+ request_buffer->num_requests);
+ return -EINVAL;
+ }
+
+ if (is_physical_domain(event_get_domain(event)))
+ idx = event_get_core(event);
+ else
+ idx = event_get_vcpu(event);
+
+ i = request_buffer->num_requests++;
+ req = &request_buffer->requests[i];
+
+ req->performance_domain = event_get_domain(event);
+ req->data_size = cpu_to_be16(8);
+ req->data_offset = cpu_to_be32(event_get_offset(event));
+ req->starting_lpar_ix = cpu_to_be16(event_get_lpar(event)),
+ req->max_num_lpars = cpu_to_be16(1);
+ req->starting_ix = cpu_to_be16(idx);
+ req->max_ix = cpu_to_be16(1);
+
+ return 0;
+}
+
+static unsigned long single_24x7_request(struct perf_event *event, u64 *count)
+{
+ unsigned long ret;
+ struct hv_24x7_request_buffer *request_buffer;
+ struct hv_24x7_data_result_buffer *result_buffer;
+ struct hv_24x7_result *resb;
BUILD_BUG_ON(sizeof(*request_buffer) > 4096);
BUILD_BUG_ON(sizeof(*result_buffer) > 4096);
@@ -1008,63 +1110,28 @@ static unsigned long single_24x7_request(u8 domain, u32 offset, u16 ix,
request_buffer = (void *)get_cpu_var(hv_24x7_reqb);
result_buffer = (void *)get_cpu_var(hv_24x7_resb);
- memset(request_buffer, 0, 4096);
- memset(result_buffer, 0, 4096);
-
- *request_buffer = (struct reqb) {
- .buf = {
- .interface_version = HV_24X7_IF_VERSION_CURRENT,
- .num_requests = 1,
- },
- .req = {
- .performance_domain = domain,
- .data_size = cpu_to_be16(8),
- .data_offset = cpu_to_be32(offset),
- .starting_lpar_ix = cpu_to_be16(lpar),
- .max_num_lpars = cpu_to_be16(1),
- .starting_ix = cpu_to_be16(ix),
- .max_ix = cpu_to_be16(1),
- }
- };
+ init_24x7_request(request_buffer, result_buffer);
- ret = plpar_hcall_norets(H_GET_24X7_DATA,
- virt_to_phys(request_buffer), sizeof(*request_buffer),
- virt_to_phys(result_buffer), sizeof(*result_buffer));
+ ret = add_event_to_24x7_request(event, request_buffer);
+ if (ret)
+ goto out;
+ ret = make_24x7_request(request_buffer, result_buffer);
if (ret) {
- if (success_expected)
- pr_err_ratelimited("hcall failed: %d %#x %#x %d => "
- "0x%lx (%ld) detail=0x%x failing ix=%x\n",
- domain, offset, ix, lpar, ret, ret,
- result_buffer->buf.detailed_rc,
- result_buffer->buf.failing_request_ix);
+ log_24x7_hcall(request_buffer, result_buffer, ret);
goto out;
}
- *res = be64_to_cpu(result_buffer->result);
+ /* process result from hcall */
+ resb = &result_buffer->results[0];
+ *count = be64_to_cpu(resb->elements[0].element_data[0]);
out:
+ put_cpu_var(hv_24x7_reqb);
+ put_cpu_var(hv_24x7_resb);
return ret;
}
-static unsigned long event_24x7_request(struct perf_event *event, u64 *res,
- bool success_expected)
-{
- u16 idx;
- unsigned domain = event_get_domain(event);
-
- if (is_physical_domain(domain))
- idx = event_get_core(event);
- else
- idx = event_get_vcpu(event);
-
- return single_24x7_request(event_get_domain(event),
- event_get_offset(event),
- idx,
- event_get_lpar(event),
- res,
- success_expected);
-}
static int h_24x7_event_init(struct perf_event *event)
{
@@ -1133,7 +1200,7 @@ static int h_24x7_event_init(struct perf_event *event)
}
/* see if the event complains */
- if (event_24x7_request(event, &ct, false)) {
+ if (single_24x7_request(event, &ct)) {
pr_devel("test hcall failed\n");
return -EIO;
}
@@ -1145,7 +1212,7 @@ static u64 h_24x7_get_value(struct perf_event *event)
{
unsigned long ret;
u64 ct;
- ret = event_24x7_request(event, &ct, true);
+ ret = single_24x7_request(event, &ct);
if (ret)
/* We checked this in event init, shouldn't fail here... */
return 0;
@@ -1153,15 +1220,22 @@ static u64 h_24x7_get_value(struct perf_event *event)
return ct;
}
-static void h_24x7_event_update(struct perf_event *event)
+static void update_event_count(struct perf_event *event, u64 now)
{
s64 prev;
- u64 now;
- now = h_24x7_get_value(event);
+
prev = local64_xchg(&event->hw.prev_count, now);
local64_add(now - prev, &event->count);
}
+static void h_24x7_event_read(struct perf_event *event)
+{
+ u64 now;
+
+ now = h_24x7_get_value(event);
+ update_event_count(event, now);
+}
+
static void h_24x7_event_start(struct perf_event *event, int flags)
{
if (flags & PERF_EF_RELOAD)
@@ -1170,7 +1244,7 @@ static void h_24x7_event_start(struct perf_event *event, int flags)
static void h_24x7_event_stop(struct perf_event *event, int flags)
{
- h_24x7_event_update(event);
+ h_24x7_event_read(event);
}
static int h_24x7_event_add(struct perf_event *event, int flags)
@@ -1191,7 +1265,7 @@ static struct pmu h_24x7_pmu = {
.del = h_24x7_event_stop,
.start = h_24x7_event_start,
.stop = h_24x7_event_stop,
- .read = h_24x7_event_update,
+ .read = h_24x7_event_read,
};
static int hv_24x7_init(void)
@@ -1219,10 +1293,13 @@ static int hv_24x7_init(void)
/* sampling not supported */
h_24x7_pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
- create_events_from_catalog(&event_group.attrs,
+ r = create_events_from_catalog(&event_group.attrs,
&event_desc_group.attrs,
&event_long_desc_group.attrs);
+ if (r)
+ return r;
+
r = perf_pmu_register(&h_24x7_pmu, h_24x7_pmu.name, -1);
if (r)
return r;
diff --git a/arch/powerpc/perf/hv-24x7.h b/arch/powerpc/perf/hv-24x7.h
index 69cd4e690f58..0f9fa21a29f2 100644
--- a/arch/powerpc/perf/hv-24x7.h
+++ b/arch/powerpc/perf/hv-24x7.h
@@ -50,7 +50,7 @@ struct hv_24x7_request_buffer {
__u8 interface_version;
__u8 num_requests;
__u8 reserved[0xE];
- struct hv_24x7_request requests[];
+ struct hv_24x7_request requests[1];
} __packed;
struct hv_24x7_result_element {
@@ -66,7 +66,7 @@ struct hv_24x7_result_element {
__be32 lpar_cfg_instance_id;
/* size = @result_element_data_size of cointaining result. */
- __u8 element_data[];
+ __u64 element_data[1];
} __packed;
struct hv_24x7_result {
@@ -87,7 +87,7 @@ struct hv_24x7_result {
/* WARNING: only valid for first result element due to variable sizes
* of result elements */
/* struct hv_24x7_result_element[@num_elements_returned] */
- struct hv_24x7_result_element elements[];
+ struct hv_24x7_result_element elements[1];
} __packed;
struct hv_24x7_data_result_buffer {
@@ -103,7 +103,7 @@ struct hv_24x7_data_result_buffer {
__u8 reserved2[0x8];
/* WARNING: only valid for the first result due to variable sizes of
* results */
- struct hv_24x7_result results[]; /* [@num_results] */
+ struct hv_24x7_result results[1]; /* [@num_results] */
} __packed;
#endif
diff --git a/arch/powerpc/platforms/85xx/common.c b/arch/powerpc/platforms/85xx/common.c
index 4a9ad871a168..7bfb9b184dd4 100644
--- a/arch/powerpc/platforms/85xx/common.c
+++ b/arch/powerpc/platforms/85xx/common.c
@@ -40,6 +40,7 @@ static const struct of_device_id mpc85xx_common_ids[] __initconst = {
{ .compatible = "fsl,qoriq-pcie-v2.4", },
{ .compatible = "fsl,qoriq-pcie-v2.3", },
{ .compatible = "fsl,qoriq-pcie-v2.2", },
+ { .compatible = "fsl,fman", },
{},
};
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index 1f309ccb096e..9824d2cf79bd 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -88,6 +88,15 @@ static const struct of_device_id of_device_ids[] = {
.compatible = "simple-bus"
},
{
+ .compatible = "mdio-mux-gpio"
+ },
+ {
+ .compatible = "fsl,fpga-ngpixis"
+ },
+ {
+ .compatible = "fsl,fpga-qixis"
+ },
+ {
.compatible = "fsl,srio",
},
{
@@ -108,6 +117,9 @@ static const struct of_device_id of_device_ids[] = {
{
.compatible = "fsl,qe",
},
+ {
+ .compatible = "fsl,fman",
+ },
/* The following two are for the Freescale hypervisor */
{
.name = "hypervisor",
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c
index d7c1e69f3070..8631ac5f0e57 100644
--- a/arch/powerpc/platforms/85xx/smp.c
+++ b/arch/powerpc/platforms/85xx/smp.c
@@ -360,10 +360,10 @@ static void mpc85xx_smp_kexec_down(void *arg)
static void map_and_flush(unsigned long paddr)
{
struct page *page = pfn_to_page(paddr >> PAGE_SHIFT);
- unsigned long kaddr = (unsigned long)kmap(page);
+ unsigned long kaddr = (unsigned long)kmap_atomic(page);
flush_dcache_range(kaddr, kaddr + PAGE_SIZE);
- kunmap(page);
+ kunmap_atomic((void *)kaddr);
}
/**
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 391b3f6b54a3..b7f9c408bf24 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -72,11 +72,6 @@ config PPC_SMP_MUXED_IPI
cpu. This will enable the generic code to multiplex the 4
messages on to one ipi.
-config PPC_UDBG_BEAT
- bool "BEAT based debug console"
- depends on PPC_CELLEB
- default n
-
config IPIC
bool
default n
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 76483e3acd60..7264e91190be 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -2,6 +2,7 @@ config PPC64
bool "64-bit kernel"
default n
select HAVE_VIRT_CPU_ACCOUNTING
+ select ZLIB_DEFLATE
help
This option selects whether a 32-bit or a 64-bit kernel
will be built.
@@ -15,7 +16,7 @@ choice
The most common ones are the desktop and server CPUs (601, 603,
604, 740, 750, 74xx) CPUs from Freescale and IBM, with their
embedded 512x/52xx/82xx/83xx/86xx counterparts.
- The other embeeded parts, namely 4xx, 8xx, e200 (55xx) and e500
+ The other embedded parts, namely 4xx, 8xx, e200 (55xx) and e500
(85xx) each form a family of their own that is not compatible
with the others.
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig
index 870b6dbd4d18..2f23133ab3d1 100644
--- a/arch/powerpc/platforms/cell/Kconfig
+++ b/arch/powerpc/platforms/cell/Kconfig
@@ -33,17 +33,6 @@ config PPC_IBM_CELL_BLADE
select PPC_UDBG_16550
select UDBG_RTAS_CONSOLE
-config PPC_CELLEB
- bool "Toshiba's Cell Reference Set 'Celleb' Architecture"
- depends on PPC64 && PPC_BOOK3S
- select PPC_CELL_NATIVE
- select PPC_OF_PLATFORM_PCI
- select PCI
- select HAS_TXX9_SERIAL
- select PPC_UDBG_BEAT
- select USB_OHCI_BIG_ENDIAN_MMIO
- select USB_EHCI_BIG_ENDIAN_MMIO
-
config PPC_CELL_QPACE
bool "IBM Cell - QPACE"
depends on PPC64 && PPC_BOOK3S
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile
index 2d16884f67b9..34699bddfddd 100644
--- a/arch/powerpc/platforms/cell/Makefile
+++ b/arch/powerpc/platforms/cell/Makefile
@@ -29,18 +29,3 @@ obj-$(CONFIG_AXON_MSI) += axon_msi.o
# qpace setup
obj-$(CONFIG_PPC_CELL_QPACE) += qpace_setup.o
-
-# celleb stuff
-ifeq ($(CONFIG_PPC_CELLEB),y)
-obj-y += celleb_setup.o \
- celleb_pci.o celleb_scc_epci.o \
- celleb_scc_pciex.o \
- celleb_scc_uhc.o \
- spider-pci.o beat.o beat_htab.o \
- beat_hvCall.o beat_interrupt.o \
- beat_iommu.o
-
-obj-$(CONFIG_PPC_UDBG_BEAT) += beat_udbg.o
-obj-$(CONFIG_SERIAL_TXX9) += celleb_scc_sio.o
-obj-$(CONFIG_SPU_BASE) += beat_spu_priv1.o
-endif
diff --git a/arch/powerpc/platforms/cell/beat.c b/arch/powerpc/platforms/cell/beat.c
deleted file mode 100644
index affcf566d460..000000000000
--- a/arch/powerpc/platforms/cell/beat.c
+++ /dev/null
@@ -1,264 +0,0 @@
-/*
- * Simple routines for Celleb/Beat
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/export.h>
-#include <linux/init.h>
-#include <linux/err.h>
-#include <linux/rtc.h>
-#include <linux/interrupt.h>
-#include <linux/irqreturn.h>
-#include <linux/reboot.h>
-
-#include <asm/hvconsole.h>
-#include <asm/time.h>
-#include <asm/machdep.h>
-#include <asm/firmware.h>
-
-#include "beat_wrapper.h"
-#include "beat.h"
-#include "beat_interrupt.h"
-
-static int beat_pm_poweroff_flag;
-
-void beat_restart(char *cmd)
-{
- beat_shutdown_logical_partition(!beat_pm_poweroff_flag);
-}
-
-void beat_power_off(void)
-{
- beat_shutdown_logical_partition(0);
-}
-
-u64 beat_halt_code = 0x1000000000000000UL;
-EXPORT_SYMBOL(beat_halt_code);
-
-void beat_halt(void)
-{
- beat_shutdown_logical_partition(beat_halt_code);
-}
-
-int beat_set_rtc_time(struct rtc_time *rtc_time)
-{
- u64 tim;
- tim = mktime(rtc_time->tm_year+1900,
- rtc_time->tm_mon+1, rtc_time->tm_mday,
- rtc_time->tm_hour, rtc_time->tm_min, rtc_time->tm_sec);
- if (beat_rtc_write(tim))
- return -1;
- return 0;
-}
-
-void beat_get_rtc_time(struct rtc_time *rtc_time)
-{
- u64 tim;
-
- if (beat_rtc_read(&tim))
- tim = 0;
- to_tm(tim, rtc_time);
- rtc_time->tm_year -= 1900;
- rtc_time->tm_mon -= 1;
-}
-
-#define BEAT_NVRAM_SIZE 4096
-
-ssize_t beat_nvram_read(char *buf, size_t count, loff_t *index)
-{
- unsigned int i;
- unsigned long len;
- char *p = buf;
-
- if (*index >= BEAT_NVRAM_SIZE)
- return -ENODEV;
- i = *index;
- if (i + count > BEAT_NVRAM_SIZE)
- count = BEAT_NVRAM_SIZE - i;
-
- for (; count != 0; count -= len) {
- len = count;
- if (len > BEAT_NVRW_CNT)
- len = BEAT_NVRW_CNT;
- if (beat_eeprom_read(i, len, p))
- return -EIO;
-
- p += len;
- i += len;
- }
- *index = i;
- return p - buf;
-}
-
-ssize_t beat_nvram_write(char *buf, size_t count, loff_t *index)
-{
- unsigned int i;
- unsigned long len;
- char *p = buf;
-
- if (*index >= BEAT_NVRAM_SIZE)
- return -ENODEV;
- i = *index;
- if (i + count > BEAT_NVRAM_SIZE)
- count = BEAT_NVRAM_SIZE - i;
-
- for (; count != 0; count -= len) {
- len = count;
- if (len > BEAT_NVRW_CNT)
- len = BEAT_NVRW_CNT;
- if (beat_eeprom_write(i, len, p))
- return -EIO;
-
- p += len;
- i += len;
- }
- *index = i;
- return p - buf;
-}
-
-ssize_t beat_nvram_get_size(void)
-{
- return BEAT_NVRAM_SIZE;
-}
-
-int beat_set_xdabr(unsigned long dabr, unsigned long dabrx)
-{
- if (beat_set_dabr(dabr, dabrx))
- return -1;
- return 0;
-}
-
-int64_t beat_get_term_char(u64 vterm, u64 *len, u64 *t1, u64 *t2)
-{
- u64 db[2];
- s64 ret;
-
- ret = beat_get_characters_from_console(vterm, len, (u8 *)db);
- if (ret == 0) {
- *t1 = db[0];
- *t2 = db[1];
- }
- return ret;
-}
-EXPORT_SYMBOL(beat_get_term_char);
-
-int64_t beat_put_term_char(u64 vterm, u64 len, u64 t1, u64 t2)
-{
- u64 db[2];
-
- db[0] = t1;
- db[1] = t2;
- return beat_put_characters_to_console(vterm, len, (u8 *)db);
-}
-EXPORT_SYMBOL(beat_put_term_char);
-
-void beat_power_save(void)
-{
- beat_pause(0);
-}
-
-#ifdef CONFIG_KEXEC
-void beat_kexec_cpu_down(int crash, int secondary)
-{
- beatic_deinit_IRQ();
-}
-#endif
-
-static irqreturn_t beat_power_event(int virq, void *arg)
-{
- printk(KERN_DEBUG "Beat: power button pressed\n");
- beat_pm_poweroff_flag = 1;
- ctrl_alt_del();
- return IRQ_HANDLED;
-}
-
-static irqreturn_t beat_reset_event(int virq, void *arg)
-{
- printk(KERN_DEBUG "Beat: reset button pressed\n");
- beat_pm_poweroff_flag = 0;
- ctrl_alt_del();
- return IRQ_HANDLED;
-}
-
-static struct beat_event_list {
- const char *typecode;
- irq_handler_t handler;
- unsigned int virq;
-} beat_event_list[] = {
- { "power", beat_power_event, 0 },
- { "reset", beat_reset_event, 0 },
-};
-
-static int __init beat_register_event(void)
-{
- u64 path[4], data[2];
- int rc, i;
- unsigned int virq;
-
- for (i = 0; i < ARRAY_SIZE(beat_event_list); i++) {
- struct beat_event_list *ev = &beat_event_list[i];
-
- if (beat_construct_event_receive_port(data) != 0) {
- printk(KERN_ERR "Beat: "
- "cannot construct event receive port for %s\n",
- ev->typecode);
- return -EINVAL;
- }
-
- virq = irq_create_mapping(NULL, data[0]);
- if (virq == NO_IRQ) {
- printk(KERN_ERR "Beat: failed to get virtual IRQ"
- " for event receive port for %s\n",
- ev->typecode);
- beat_destruct_event_receive_port(data[0]);
- return -EIO;
- }
- ev->virq = virq;
-
- rc = request_irq(virq, ev->handler, 0,
- ev->typecode, NULL);
- if (rc != 0) {
- printk(KERN_ERR "Beat: failed to request virtual IRQ"
- " for event receive port for %s\n",
- ev->typecode);
- beat_destruct_event_receive_port(data[0]);
- return rc;
- }
-
- path[0] = 0x1000000065780000ul; /* 1,ex */
- path[1] = 0x627574746f6e0000ul; /* button */
- path[2] = 0;
- strncpy((char *)&path[2], ev->typecode, 8);
- path[3] = 0;
- data[1] = 0;
-
- beat_create_repository_node(path, data);
- }
- return 0;
-}
-
-static int __init beat_event_init(void)
-{
- if (!firmware_has_feature(FW_FEATURE_BEAT))
- return -EINVAL;
-
- beat_pm_poweroff_flag = 0;
- return beat_register_event();
-}
-
-device_initcall(beat_event_init);
diff --git a/arch/powerpc/platforms/cell/beat.h b/arch/powerpc/platforms/cell/beat.h
deleted file mode 100644
index bfcb8e351ae5..000000000000
--- a/arch/powerpc/platforms/cell/beat.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/*
- * Guest OS Interfaces.
- *
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef _CELLEB_BEAT_H
-#define _CELLEB_BEAT_H
-
-int64_t beat_get_term_char(uint64_t, uint64_t *, uint64_t *, uint64_t *);
-int64_t beat_put_term_char(uint64_t, uint64_t, uint64_t, uint64_t);
-int64_t beat_repository_encode(int, const char *, uint64_t[4]);
-void beat_restart(char *);
-void beat_power_off(void);
-void beat_halt(void);
-int beat_set_rtc_time(struct rtc_time *);
-void beat_get_rtc_time(struct rtc_time *);
-ssize_t beat_nvram_get_size(void);
-ssize_t beat_nvram_read(char *, size_t, loff_t *);
-ssize_t beat_nvram_write(char *, size_t, loff_t *);
-int beat_set_xdabr(unsigned long, unsigned long);
-void beat_power_save(void);
-void beat_kexec_cpu_down(int, int);
-
-#endif /* _CELLEB_BEAT_H */
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
deleted file mode 100644
index bee9232fe619..000000000000
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ /dev/null
@@ -1,445 +0,0 @@
-/*
- * "Cell Reference Set" HTAB support.
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This code is based on arch/powerpc/platforms/pseries/lpar.c:
- * Copyright (C) 2001 Todd Inglett, IBM Corporation
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#undef DEBUG_LOW
-
-#include <linux/kernel.h>
-#include <linux/spinlock.h>
-
-#include <asm/mmu.h>
-#include <asm/page.h>
-#include <asm/pgtable.h>
-#include <asm/machdep.h>
-#include <asm/udbg.h>
-
-#include "beat_wrapper.h"
-
-#ifdef DEBUG_LOW
-#define DBG_LOW(fmt...) do { udbg_printf(fmt); } while (0)
-#else
-#define DBG_LOW(fmt...) do { } while (0)
-#endif
-
-static DEFINE_RAW_SPINLOCK(beat_htab_lock);
-
-static inline unsigned int beat_read_mask(unsigned hpte_group)
-{
- unsigned long rmask = 0;
- u64 hpte_v[5];
-
- beat_read_htab_entries(0, hpte_group + 0, hpte_v);
- if (!(hpte_v[0] & HPTE_V_BOLTED))
- rmask |= 0x8000;
- if (!(hpte_v[1] & HPTE_V_BOLTED))
- rmask |= 0x4000;
- if (!(hpte_v[2] & HPTE_V_BOLTED))
- rmask |= 0x2000;
- if (!(hpte_v[3] & HPTE_V_BOLTED))
- rmask |= 0x1000;
- beat_read_htab_entries(0, hpte_group + 4, hpte_v);
- if (!(hpte_v[0] & HPTE_V_BOLTED))
- rmask |= 0x0800;
- if (!(hpte_v[1] & HPTE_V_BOLTED))
- rmask |= 0x0400;
- if (!(hpte_v[2] & HPTE_V_BOLTED))
- rmask |= 0x0200;
- if (!(hpte_v[3] & HPTE_V_BOLTED))
- rmask |= 0x0100;
- hpte_group = ~hpte_group & (htab_hash_mask * HPTES_PER_GROUP);
- beat_read_htab_entries(0, hpte_group + 0, hpte_v);
- if (!(hpte_v[0] & HPTE_V_BOLTED))
- rmask |= 0x80;
- if (!(hpte_v[1] & HPTE_V_BOLTED))
- rmask |= 0x40;
- if (!(hpte_v[2] & HPTE_V_BOLTED))
- rmask |= 0x20;
- if (!(hpte_v[3] & HPTE_V_BOLTED))
- rmask |= 0x10;
- beat_read_htab_entries(0, hpte_group + 4, hpte_v);
- if (!(hpte_v[0] & HPTE_V_BOLTED))
- rmask |= 0x08;
- if (!(hpte_v[1] & HPTE_V_BOLTED))
- rmask |= 0x04;
- if (!(hpte_v[2] & HPTE_V_BOLTED))
- rmask |= 0x02;
- if (!(hpte_v[3] & HPTE_V_BOLTED))
- rmask |= 0x01;
- return rmask;
-}
-
-static long beat_lpar_hpte_insert(unsigned long hpte_group,
- unsigned long vpn, unsigned long pa,
- unsigned long rflags, unsigned long vflags,
- int psize, int apsize, int ssize)
-{
- unsigned long lpar_rc;
- u64 hpte_v, hpte_r, slot;
-
- if (vflags & HPTE_V_SECONDARY)
- return -1;
-
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW("hpte_insert(group=%lx, va=%016lx, pa=%016lx, "
- "rflags=%lx, vflags=%lx, psize=%d)\n",
- hpte_group, va, pa, rflags, vflags, psize);
-
- hpte_v = hpte_encode_v(vpn, psize, apsize, MMU_SEGSIZE_256M) |
- vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
-
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
-
- if (rflags & _PAGE_NO_CACHE)
- hpte_r &= ~HPTE_R_M;
-
- raw_spin_lock(&beat_htab_lock);
- lpar_rc = beat_read_mask(hpte_group);
- if (lpar_rc == 0) {
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" full\n");
- raw_spin_unlock(&beat_htab_lock);
- return -1;
- }
-
- lpar_rc = beat_insert_htab_entry(0, hpte_group, lpar_rc << 48,
- hpte_v, hpte_r, &slot);
- raw_spin_unlock(&beat_htab_lock);
-
- /*
- * Since we try and ioremap PHBs we don't own, the pte insert
- * will fail. However we must catch the failure in hash_page
- * or we will loop forever, so return -2 in this case.
- */
- if (unlikely(lpar_rc != 0)) {
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" lpar err %lx\n", lpar_rc);
- return -2;
- }
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" -> slot: %lx\n", slot);
-
- /* We have to pass down the secondary bucket bit here as well */
- return (slot ^ hpte_group) & 15;
-}
-
-static long beat_lpar_hpte_remove(unsigned long hpte_group)
-{
- DBG_LOW("hpte_remove(group=%lx)\n", hpte_group);
- return -1;
-}
-
-static unsigned long beat_lpar_hpte_getword0(unsigned long slot)
-{
- unsigned long dword0;
- unsigned long lpar_rc;
- u64 dword[5];
-
- lpar_rc = beat_read_htab_entries(0, slot & ~3UL, dword);
-
- dword0 = dword[slot&3];
-
- BUG_ON(lpar_rc != 0);
-
- return dword0;
-}
-
-static void beat_lpar_hptab_clear(void)
-{
- unsigned long size_bytes = 1UL << ppc64_pft_size;
- unsigned long hpte_count = size_bytes >> 4;
- int i;
- u64 dummy0, dummy1;
-
- /* TODO: Use bulk call */
- for (i = 0; i < hpte_count; i++)
- beat_write_htab_entry(0, i, 0, 0, -1UL, -1UL, &dummy0, &dummy1);
-}
-
-/*
- * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
- * the low 3 bits of flags happen to line up. So no transform is needed.
- * We can probably optimize here and assume the high bits of newpp are
- * already zero. For now I am paranoid.
- */
-static long beat_lpar_hpte_updatepp(unsigned long slot,
- unsigned long newpp,
- unsigned long vpn,
- int psize, int apsize,
- int ssize, unsigned long flags)
-{
- unsigned long lpar_rc;
- u64 dummy0, dummy1;
- unsigned long want_v;
-
- want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
-
- DBG_LOW(" update: "
- "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
- want_v & HPTE_V_AVPN, slot, psize, newpp);
-
- raw_spin_lock(&beat_htab_lock);
- dummy0 = beat_lpar_hpte_getword0(slot);
- if ((dummy0 & ~0x7FUL) != (want_v & ~0x7FUL)) {
- DBG_LOW("not found !\n");
- raw_spin_unlock(&beat_htab_lock);
- return -1;
- }
-
- lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7, &dummy0,
- &dummy1);
- raw_spin_unlock(&beat_htab_lock);
- if (lpar_rc != 0 || dummy0 == 0) {
- DBG_LOW("not found !\n");
- return -1;
- }
-
- DBG_LOW("ok %lx %lx\n", dummy0, dummy1);
-
- BUG_ON(lpar_rc != 0);
-
- return 0;
-}
-
-static long beat_lpar_hpte_find(unsigned long vpn, int psize)
-{
- unsigned long hash;
- unsigned long i, j;
- long slot;
- unsigned long want_v, hpte_v;
-
- hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, MMU_SEGSIZE_256M);
- want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
-
- for (j = 0; j < 2; j++) {
- slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
- for (i = 0; i < HPTES_PER_GROUP; i++) {
- hpte_v = beat_lpar_hpte_getword0(slot);
-
- if (HPTE_V_COMPARE(hpte_v, want_v)
- && (hpte_v & HPTE_V_VALID)
- && (!!(hpte_v & HPTE_V_SECONDARY) == j)) {
- /* HPTE matches */
- if (j)
- slot = -slot;
- return slot;
- }
- ++slot;
- }
- hash = ~hash;
- }
-
- return -1;
-}
-
-static void beat_lpar_hpte_updateboltedpp(unsigned long newpp,
- unsigned long ea,
- int psize, int ssize)
-{
- unsigned long vpn;
- unsigned long lpar_rc, slot, vsid;
- u64 dummy0, dummy1;
-
- vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
- vpn = hpt_vpn(ea, vsid, MMU_SEGSIZE_256M);
-
- raw_spin_lock(&beat_htab_lock);
- slot = beat_lpar_hpte_find(vpn, psize);
- BUG_ON(slot == -1);
-
- lpar_rc = beat_write_htab_entry(0, slot, 0, newpp, 0, 7,
- &dummy0, &dummy1);
- raw_spin_unlock(&beat_htab_lock);
-
- BUG_ON(lpar_rc != 0);
-}
-
-static void beat_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn,
- int psize, int apsize,
- int ssize, int local)
-{
- unsigned long want_v;
- unsigned long lpar_rc;
- u64 dummy1, dummy2;
- unsigned long flags;
-
- DBG_LOW(" inval : slot=%lx, va=%016lx, psize: %d, local: %d\n",
- slot, va, psize, local);
- want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
-
- raw_spin_lock_irqsave(&beat_htab_lock, flags);
- dummy1 = beat_lpar_hpte_getword0(slot);
-
- if ((dummy1 & ~0x7FUL) != (want_v & ~0x7FUL)) {
- DBG_LOW("not found !\n");
- raw_spin_unlock_irqrestore(&beat_htab_lock, flags);
- return;
- }
-
- lpar_rc = beat_write_htab_entry(0, slot, 0, 0, HPTE_V_VALID, 0,
- &dummy1, &dummy2);
- raw_spin_unlock_irqrestore(&beat_htab_lock, flags);
-
- BUG_ON(lpar_rc != 0);
-}
-
-void __init hpte_init_beat(void)
-{
- ppc_md.hpte_invalidate = beat_lpar_hpte_invalidate;
- ppc_md.hpte_updatepp = beat_lpar_hpte_updatepp;
- ppc_md.hpte_updateboltedpp = beat_lpar_hpte_updateboltedpp;
- ppc_md.hpte_insert = beat_lpar_hpte_insert;
- ppc_md.hpte_remove = beat_lpar_hpte_remove;
- ppc_md.hpte_clear_all = beat_lpar_hptab_clear;
-}
-
-static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
- unsigned long vpn, unsigned long pa,
- unsigned long rflags, unsigned long vflags,
- int psize, int apsize, int ssize)
-{
- unsigned long lpar_rc;
- u64 hpte_v, hpte_r, slot;
-
- if (vflags & HPTE_V_SECONDARY)
- return -1;
-
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW("hpte_insert(group=%lx, vpn=%016lx, pa=%016lx, "
- "rflags=%lx, vflags=%lx, psize=%d)\n",
- hpte_group, vpn, pa, rflags, vflags, psize);
-
- hpte_v = hpte_encode_v(vpn, psize, apsize, MMU_SEGSIZE_256M) |
- vflags | HPTE_V_VALID;
- hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
-
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r);
-
- if (rflags & _PAGE_NO_CACHE)
- hpte_r &= ~HPTE_R_M;
-
- /* insert into not-volted entry */
- lpar_rc = beat_insert_htab_entry3(0, hpte_group, hpte_v, hpte_r,
- HPTE_V_BOLTED, 0, &slot);
- /*
- * Since we try and ioremap PHBs we don't own, the pte insert
- * will fail. However we must catch the failure in hash_page
- * or we will loop forever, so return -2 in this case.
- */
- if (unlikely(lpar_rc != 0)) {
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" lpar err %lx\n", lpar_rc);
- return -2;
- }
- if (!(vflags & HPTE_V_BOLTED))
- DBG_LOW(" -> slot: %lx\n", slot);
-
- /* We have to pass down the secondary bucket bit here as well */
- return (slot ^ hpte_group) & 15;
-}
-
-/*
- * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and
- * the low 3 bits of flags happen to line up. So no transform is needed.
- * We can probably optimize here and assume the high bits of newpp are
- * already zero. For now I am paranoid.
- */
-static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
- unsigned long newpp,
- unsigned long vpn,
- int psize, int apsize,
- int ssize, unsigned long flags)
-{
- unsigned long lpar_rc;
- unsigned long want_v;
- unsigned long pss;
-
- want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
- pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc[psize];
-
- DBG_LOW(" update: "
- "avpnv=%016lx, slot=%016lx, psize: %d, newpp %016lx ... ",
- want_v & HPTE_V_AVPN, slot, psize, newpp);
-
- lpar_rc = beat_update_htab_permission3(0, slot, want_v, pss, 7, newpp);
-
- if (lpar_rc == 0xfffffff7) {
- DBG_LOW("not found !\n");
- return -1;
- }
-
- DBG_LOW("ok\n");
-
- BUG_ON(lpar_rc != 0);
-
- return 0;
-}
-
-static void beat_lpar_hpte_invalidate_v3(unsigned long slot, unsigned long vpn,
- int psize, int apsize,
- int ssize, int local)
-{
- unsigned long want_v;
- unsigned long lpar_rc;
- unsigned long pss;
-
- DBG_LOW(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n",
- slot, vpn, psize, local);
- want_v = hpte_encode_avpn(vpn, psize, MMU_SEGSIZE_256M);
- pss = (psize == MMU_PAGE_4K) ? -1UL : mmu_psize_defs[psize].penc[psize];
-
- lpar_rc = beat_invalidate_htab_entry3(0, slot, want_v, pss);
-
- /* E_busy can be valid output: page may be already replaced */
- BUG_ON(lpar_rc != 0 && lpar_rc != 0xfffffff7);
-}
-
-static int64_t _beat_lpar_hptab_clear_v3(void)
-{
- return beat_clear_htab3(0);
-}
-
-static void beat_lpar_hptab_clear_v3(void)
-{
- _beat_lpar_hptab_clear_v3();
-}
-
-void __init hpte_init_beat_v3(void)
-{
- if (_beat_lpar_hptab_clear_v3() == 0) {
- ppc_md.hpte_invalidate = beat_lpar_hpte_invalidate_v3;
- ppc_md.hpte_updatepp = beat_lpar_hpte_updatepp_v3;
- ppc_md.hpte_updateboltedpp = beat_lpar_hpte_updateboltedpp;
- ppc_md.hpte_insert = beat_lpar_hpte_insert_v3;
- ppc_md.hpte_remove = beat_lpar_hpte_remove;
- ppc_md.hpte_clear_all = beat_lpar_hptab_clear_v3;
- } else {
- ppc_md.hpte_invalidate = beat_lpar_hpte_invalidate;
- ppc_md.hpte_updatepp = beat_lpar_hpte_updatepp;
- ppc_md.hpte_updateboltedpp = beat_lpar_hpte_updateboltedpp;
- ppc_md.hpte_insert = beat_lpar_hpte_insert;
- ppc_md.hpte_remove = beat_lpar_hpte_remove;
- ppc_md.hpte_clear_all = beat_lpar_hptab_clear;
- }
-}
diff --git a/arch/powerpc/platforms/cell/beat_hvCall.S b/arch/powerpc/platforms/cell/beat_hvCall.S
deleted file mode 100644
index 96c801907126..000000000000
--- a/arch/powerpc/platforms/cell/beat_hvCall.S
+++ /dev/null
@@ -1,285 +0,0 @@
-/*
- * Beat hypervisor call I/F
- *
- * (C) Copyright 2007 TOSHIBA CORPORATION
- *
- * This code is based on arch/powerpc/platforms/pseries/hvCall.S.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <asm/ppc_asm.h>
-
-/* Not implemented on Beat, now */
-#define HCALL_INST_PRECALL
-#define HCALL_INST_POSTCALL
-
- .text
-
-#define HVSC .long 0x44000022
-
-/* Note: takes only 7 input parameters at maximum */
-_GLOBAL(beat_hcall_norets)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- mr r11,r3
- mr r3,r4
- mr r4,r5
- mr r5,r6
- mr r6,r7
- mr r7,r8
- mr r8,r9
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes 8 input parameters at maximum */
-_GLOBAL(beat_hcall_norets8)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- mr r11,r3
- mr r3,r4
- mr r4,r5
- mr r5,r6
- mr r6,r7
- mr r7,r8
- mr r8,r9
- ld r10,STK_PARAM(R10)(r1)
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes only 6 input parameters, 1 output parameters at maximum */
-_GLOBAL(beat_hcall1)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- std r4,STK_PARAM(R4)(r1) /* save ret buffer */
-
- mr r11,r3
- mr r3,r5
- mr r4,r6
- mr r5,r7
- mr r6,r8
- mr r7,r9
- mr r8,r10
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- ld r12,STK_PARAM(R4)(r1)
- std r4, 0(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes only 6 input parameters, 2 output parameters at maximum */
-_GLOBAL(beat_hcall2)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- std r4,STK_PARAM(R4)(r1) /* save ret buffer */
-
- mr r11,r3
- mr r3,r5
- mr r4,r6
- mr r5,r7
- mr r6,r8
- mr r7,r9
- mr r8,r10
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- ld r12,STK_PARAM(R4)(r1)
- std r4, 0(r12)
- std r5, 8(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes only 6 input parameters, 3 output parameters at maximum */
-_GLOBAL(beat_hcall3)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- std r4,STK_PARAM(R4)(r1) /* save ret buffer */
-
- mr r11,r3
- mr r3,r5
- mr r4,r6
- mr r5,r7
- mr r6,r8
- mr r7,r9
- mr r8,r10
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- ld r12,STK_PARAM(R4)(r1)
- std r4, 0(r12)
- std r5, 8(r12)
- std r6, 16(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes only 6 input parameters, 4 output parameters at maximum */
-_GLOBAL(beat_hcall4)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- std r4,STK_PARAM(R4)(r1) /* save ret buffer */
-
- mr r11,r3
- mr r3,r5
- mr r4,r6
- mr r5,r7
- mr r6,r8
- mr r7,r9
- mr r8,r10
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- ld r12,STK_PARAM(R4)(r1)
- std r4, 0(r12)
- std r5, 8(r12)
- std r6, 16(r12)
- std r7, 24(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes only 6 input parameters, 5 output parameters at maximum */
-_GLOBAL(beat_hcall5)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- std r4,STK_PARAM(R4)(r1) /* save ret buffer */
-
- mr r11,r3
- mr r3,r5
- mr r4,r6
- mr r5,r7
- mr r6,r8
- mr r7,r9
- mr r8,r10
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- ld r12,STK_PARAM(R4)(r1)
- std r4, 0(r12)
- std r5, 8(r12)
- std r6, 16(r12)
- std r7, 24(r12)
- std r8, 32(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
-
-/* Note: takes only 6 input parameters, 6 output parameters at maximum */
-_GLOBAL(beat_hcall6)
- HMT_MEDIUM
-
- mfcr r0
- stw r0,8(r1)
-
- HCALL_INST_PRECALL
-
- std r4,STK_PARAM(R4)(r1) /* save ret buffer */
-
- mr r11,r3
- mr r3,r5
- mr r4,r6
- mr r5,r7
- mr r6,r8
- mr r7,r9
- mr r8,r10
-
- HVSC /* invoke the hypervisor */
-
- HCALL_INST_POSTCALL
-
- ld r12,STK_PARAM(R4)(r1)
- std r4, 0(r12)
- std r5, 8(r12)
- std r6, 16(r12)
- std r7, 24(r12)
- std r8, 32(r12)
- std r9, 40(r12)
-
- lwz r0,8(r1)
- mtcrf 0xff,r0
-
- blr /* return r3 = status */
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
deleted file mode 100644
index 9e5dfbcc00af..000000000000
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * Celleb/Beat Interrupt controller
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/init.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/percpu.h>
-#include <linux/types.h>
-
-#include <asm/machdep.h>
-
-#include "beat_interrupt.h"
-#include "beat_wrapper.h"
-
-#define MAX_IRQS NR_IRQS
-static DEFINE_RAW_SPINLOCK(beatic_irq_mask_lock);
-static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64];
-static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64];
-
-static struct irq_domain *beatic_host;
-
-/*
- * In this implementation, "virq" == "IRQ plug number",
- * "(irq_hw_number_t)hwirq" == "IRQ outlet number".
- */
-
-/* assumption: locked */
-static inline void beatic_update_irq_mask(unsigned int irq_plug)
-{
- int off;
- unsigned long masks[4];
-
- off = (irq_plug / 256) * 4;
- masks[0] = beatic_irq_mask_enable[off + 0]
- & beatic_irq_mask_ack[off + 0];
- masks[1] = beatic_irq_mask_enable[off + 1]
- & beatic_irq_mask_ack[off + 1];
- masks[2] = beatic_irq_mask_enable[off + 2]
- & beatic_irq_mask_ack[off + 2];
- masks[3] = beatic_irq_mask_enable[off + 3]
- & beatic_irq_mask_ack[off + 3];
- if (beat_set_interrupt_mask(irq_plug&~255UL,
- masks[0], masks[1], masks[2], masks[3]) != 0)
- panic("Failed to set mask IRQ!");
-}
-
-static void beatic_mask_irq(struct irq_data *d)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
- beatic_irq_mask_enable[d->irq/64] &= ~(1UL << (63 - (d->irq%64)));
- beatic_update_irq_mask(d->irq);
- raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
-}
-
-static void beatic_unmask_irq(struct irq_data *d)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
- beatic_irq_mask_enable[d->irq/64] |= 1UL << (63 - (d->irq%64));
- beatic_update_irq_mask(d->irq);
- raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
-}
-
-static void beatic_ack_irq(struct irq_data *d)
-{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
- beatic_irq_mask_ack[d->irq/64] &= ~(1UL << (63 - (d->irq%64)));
- beatic_update_irq_mask(d->irq);
- raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
-}
-
-static void beatic_end_irq(struct irq_data *d)
-{
- s64 err;
- unsigned long flags;
-
- err = beat_downcount_of_interrupt(d->irq);
- if (err != 0) {
- if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */
- panic("Failed to downcount IRQ! Error = %16llx", err);
-
- printk(KERN_ERR "IRQ over-downcounted, plug %d\n", d->irq);
- }
- raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags);
- beatic_irq_mask_ack[d->irq/64] |= 1UL << (63 - (d->irq%64));
- beatic_update_irq_mask(d->irq);
- raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags);
-}
-
-static struct irq_chip beatic_pic = {
- .name = "CELL-BEAT",
- .irq_unmask = beatic_unmask_irq,
- .irq_mask = beatic_mask_irq,
- .irq_eoi = beatic_end_irq,
-};
-
-/*
- * Dispose binding hardware IRQ number (hw) and Virtuql IRQ number (virq),
- * update flags.
- *
- * Note that the number (virq) is already assigned at upper layer.
- */
-static void beatic_pic_host_unmap(struct irq_domain *h, unsigned int virq)
-{
- beat_destruct_irq_plug(virq);
-}
-
-/*
- * Create or update binding hardware IRQ number (hw) and Virtuql
- * IRQ number (virq). This is called only once for a given mapping.
- *
- * Note that the number (virq) is already assigned at upper layer.
- */
-static int beatic_pic_host_map(struct irq_domain *h, unsigned int virq,
- irq_hw_number_t hw)
-{
- int64_t err;
-
- err = beat_construct_and_connect_irq_plug(virq, hw);
- if (err < 0)
- return -EIO;
-
- irq_set_status_flags(virq, IRQ_LEVEL);
- irq_set_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq);
- return 0;
-}
-
-/*
- * Translate device-tree interrupt spec to irq_hw_number_t style (ulong),
- * to pass away to irq_create_mapping().
- *
- * Called from irq_create_of_mapping() only.
- * Note: We have only 1 entry to translate.
- */
-static int beatic_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
- const u32 *intspec, unsigned int intsize,
- irq_hw_number_t *out_hwirq,
- unsigned int *out_flags)
-{
- const u64 *intspec2 = (const u64 *)intspec;
-
- *out_hwirq = *intspec2;
- *out_flags |= IRQ_TYPE_LEVEL_LOW;
- return 0;
-}
-
-static int beatic_pic_host_match(struct irq_domain *h, struct device_node *np)
-{
- /* Match all */
- return 1;
-}
-
-static const struct irq_domain_ops beatic_pic_host_ops = {
- .map = beatic_pic_host_map,
- .unmap = beatic_pic_host_unmap,
- .xlate = beatic_pic_host_xlate,
- .match = beatic_pic_host_match,
-};
-
-/*
- * Get an IRQ number
- * Note: returns VIRQ
- */
-static inline unsigned int beatic_get_irq_plug(void)
-{
- int i;
- uint64_t pending[4], ub;
-
- for (i = 0; i < MAX_IRQS; i += 256) {
- beat_detect_pending_interrupts(i, pending);
- __asm__ ("cntlzd %0,%1":"=r"(ub):
- "r"(pending[0] & beatic_irq_mask_enable[i/64+0]
- & beatic_irq_mask_ack[i/64+0]));
- if (ub != 64)
- return i + ub + 0;
- __asm__ ("cntlzd %0,%1":"=r"(ub):
- "r"(pending[1] & beatic_irq_mask_enable[i/64+1]
- & beatic_irq_mask_ack[i/64+1]));
- if (ub != 64)
- return i + ub + 64;
- __asm__ ("cntlzd %0,%1":"=r"(ub):
- "r"(pending[2] & beatic_irq_mask_enable[i/64+2]
- & beatic_irq_mask_ack[i/64+2]));
- if (ub != 64)
- return i + ub + 128;
- __asm__ ("cntlzd %0,%1":"=r"(ub):
- "r"(pending[3] & beatic_irq_mask_enable[i/64+3]
- & beatic_irq_mask_ack[i/64+3]));
- if (ub != 64)
- return i + ub + 192;
- }
-
- return NO_IRQ;
-}
-unsigned int beatic_get_irq(void)
-{
- unsigned int ret;
-
- ret = beatic_get_irq_plug();
- if (ret != NO_IRQ)
- beatic_ack_irq(irq_get_irq_data(ret));
- return ret;
-}
-
-/*
- */
-void __init beatic_init_IRQ(void)
-{
- int i;
-
- memset(beatic_irq_mask_enable, 0, sizeof(beatic_irq_mask_enable));
- memset(beatic_irq_mask_ack, 255, sizeof(beatic_irq_mask_ack));
- for (i = 0; i < MAX_IRQS; i += 256)
- beat_set_interrupt_mask(i, 0L, 0L, 0L, 0L);
-
- /* Set out get_irq function */
- ppc_md.get_irq = beatic_get_irq;
-
- /* Allocate an irq host */
- beatic_host = irq_domain_add_nomap(NULL, ~0, &beatic_pic_host_ops, NULL);
- BUG_ON(beatic_host == NULL);
- irq_set_default_host(beatic_host);
-}
-
-void beatic_deinit_IRQ(void)
-{
- int i;
-
- for (i = 1; i < nr_irqs; i++)
- beat_destruct_irq_plug(i);
-}
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.h b/arch/powerpc/platforms/cell/beat_interrupt.h
deleted file mode 100644
index a7e52f91a078..000000000000
--- a/arch/powerpc/platforms/cell/beat_interrupt.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Celleb/Beat Interrupt controller
- *
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef ASM_BEAT_PIC_H
-#define ASM_BEAT_PIC_H
-#ifdef __KERNEL__
-
-extern void beatic_init_IRQ(void);
-extern unsigned int beatic_get_irq(void);
-extern void beatic_deinit_IRQ(void);
-
-#endif
-#endif /* ASM_BEAT_PIC_H */
diff --git a/arch/powerpc/platforms/cell/beat_iommu.c b/arch/powerpc/platforms/cell/beat_iommu.c
deleted file mode 100644
index 3ce685568935..000000000000
--- a/arch/powerpc/platforms/cell/beat_iommu.c
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Support for IOMMU on Celleb platform.
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/dma-mapping.h>
-#include <linux/pci.h>
-#include <linux/of_platform.h>
-
-#include <asm/machdep.h>
-
-#include "beat_wrapper.h"
-
-#define DMA_FLAGS 0xf800000000000000UL /* r/w permitted, coherency required,
- strongest order */
-
-static int __init find_dma_window(u64 *io_space_id, u64 *ioid,
- u64 *base, u64 *size, u64 *io_page_size)
-{
- struct device_node *dn;
- const unsigned long *dma_window;
-
- for_each_node_by_type(dn, "ioif") {
- dma_window = of_get_property(dn, "toshiba,dma-window", NULL);
- if (dma_window) {
- *io_space_id = (dma_window[0] >> 32) & 0xffffffffUL;
- *ioid = dma_window[0] & 0x7ffUL;
- *base = dma_window[1];
- *size = dma_window[2];
- *io_page_size = 1 << dma_window[3];
- of_node_put(dn);
- return 1;
- }
- }
- return 0;
-}
-
-static unsigned long celleb_dma_direct_offset;
-
-static void __init celleb_init_direct_mapping(void)
-{
- u64 lpar_addr, io_addr;
- u64 io_space_id, ioid, dma_base, dma_size, io_page_size;
-
- if (!find_dma_window(&io_space_id, &ioid, &dma_base, &dma_size,
- &io_page_size)) {
- pr_info("No dma window found !\n");
- return;
- }
-
- for (lpar_addr = 0; lpar_addr < dma_size; lpar_addr += io_page_size) {
- io_addr = lpar_addr + dma_base;
- (void)beat_put_iopte(io_space_id, io_addr, lpar_addr,
- ioid, DMA_FLAGS);
- }
-
- celleb_dma_direct_offset = dma_base;
-}
-
-static void celleb_dma_dev_setup(struct device *dev)
-{
- set_dma_ops(dev, &dma_direct_ops);
- set_dma_offset(dev, celleb_dma_direct_offset);
-}
-
-static void celleb_pci_dma_dev_setup(struct pci_dev *pdev)
-{
- celleb_dma_dev_setup(&pdev->dev);
-}
-
-static int celleb_of_bus_notify(struct notifier_block *nb,
- unsigned long action, void *data)
-{
- struct device *dev = data;
-
- /* We are only intereted in device addition */
- if (action != BUS_NOTIFY_ADD_DEVICE)
- return 0;
-
- celleb_dma_dev_setup(dev);
-
- return 0;
-}
-
-static struct notifier_block celleb_of_bus_notifier = {
- .notifier_call = celleb_of_bus_notify
-};
-
-static int __init celleb_init_iommu(void)
-{
- celleb_init_direct_mapping();
- ppc_md.pci_dma_dev_setup = celleb_pci_dma_dev_setup;
- bus_register_notifier(&platform_bus_type, &celleb_of_bus_notifier);
-
- return 0;
-}
-
-machine_arch_initcall(celleb_beat, celleb_init_iommu);
diff --git a/arch/powerpc/platforms/cell/beat_spu_priv1.c b/arch/powerpc/platforms/cell/beat_spu_priv1.c
deleted file mode 100644
index 13f52589d3a9..000000000000
--- a/arch/powerpc/platforms/cell/beat_spu_priv1.c
+++ /dev/null
@@ -1,205 +0,0 @@
-/*
- * spu hypervisor abstraction for Beat
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <asm/types.h>
-#include <asm/spu.h>
-#include <asm/spu_priv1.h>
-
-#include "beat_wrapper.h"
-
-static inline void _int_mask_set(struct spu *spu, int class, u64 mask)
-{
- spu->shadow_int_mask_RW[class] = mask;
- beat_set_irq_mask_for_spe(spu->spe_id, class, mask);
-}
-
-static inline u64 _int_mask_get(struct spu *spu, int class)
-{
- return spu->shadow_int_mask_RW[class];
-}
-
-static void int_mask_set(struct spu *spu, int class, u64 mask)
-{
- _int_mask_set(spu, class, mask);
-}
-
-static u64 int_mask_get(struct spu *spu, int class)
-{
- return _int_mask_get(spu, class);
-}
-
-static void int_mask_and(struct spu *spu, int class, u64 mask)
-{
- u64 old_mask;
- old_mask = _int_mask_get(spu, class);
- _int_mask_set(spu, class, old_mask & mask);
-}
-
-static void int_mask_or(struct spu *spu, int class, u64 mask)
-{
- u64 old_mask;
- old_mask = _int_mask_get(spu, class);
- _int_mask_set(spu, class, old_mask | mask);
-}
-
-static void int_stat_clear(struct spu *spu, int class, u64 stat)
-{
- beat_clear_interrupt_status_of_spe(spu->spe_id, class, stat);
-}
-
-static u64 int_stat_get(struct spu *spu, int class)
-{
- u64 int_stat;
- beat_get_interrupt_status_of_spe(spu->spe_id, class, &int_stat);
- return int_stat;
-}
-
-static void cpu_affinity_set(struct spu *spu, int cpu)
-{
- return;
-}
-
-static u64 mfc_dar_get(struct spu *spu)
-{
- u64 dar;
- beat_get_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_dar_RW), &dar);
- return dar;
-}
-
-static u64 mfc_dsisr_get(struct spu *spu)
-{
- u64 dsisr;
- beat_get_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_dsisr_RW), &dsisr);
- return dsisr;
-}
-
-static void mfc_dsisr_set(struct spu *spu, u64 dsisr)
-{
- beat_set_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_dsisr_RW), dsisr);
-}
-
-static void mfc_sdr_setup(struct spu *spu)
-{
- return;
-}
-
-static void mfc_sr1_set(struct spu *spu, u64 sr1)
-{
- beat_set_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_sr1_RW), sr1);
-}
-
-static u64 mfc_sr1_get(struct spu *spu)
-{
- u64 sr1;
- beat_get_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_sr1_RW), &sr1);
- return sr1;
-}
-
-static void mfc_tclass_id_set(struct spu *spu, u64 tclass_id)
-{
- beat_set_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_tclass_id_RW), tclass_id);
-}
-
-static u64 mfc_tclass_id_get(struct spu *spu)
-{
- u64 tclass_id;
- beat_get_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, mfc_tclass_id_RW), &tclass_id);
- return tclass_id;
-}
-
-static void tlb_invalidate(struct spu *spu)
-{
- beat_set_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, tlb_invalidate_entry_W), 0ul);
-}
-
-static void resource_allocation_groupID_set(struct spu *spu, u64 id)
-{
- beat_set_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, resource_allocation_groupID_RW),
- id);
-}
-
-static u64 resource_allocation_groupID_get(struct spu *spu)
-{
- u64 id;
- beat_get_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, resource_allocation_groupID_RW),
- &id);
- return id;
-}
-
-static void resource_allocation_enable_set(struct spu *spu, u64 enable)
-{
- beat_set_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, resource_allocation_enable_RW),
- enable);
-}
-
-static u64 resource_allocation_enable_get(struct spu *spu)
-{
- u64 enable;
- beat_get_spe_privileged_state_1_registers(
- spu->spe_id,
- offsetof(struct spu_priv1, resource_allocation_enable_RW),
- &enable);
- return enable;
-}
-
-const struct spu_priv1_ops spu_priv1_beat_ops = {
- .int_mask_and = int_mask_and,
- .int_mask_or = int_mask_or,
- .int_mask_set = int_mask_set,
- .int_mask_get = int_mask_get,
- .int_stat_clear = int_stat_clear,
- .int_stat_get = int_stat_get,
- .cpu_affinity_set = cpu_affinity_set,
- .mfc_dar_get = mfc_dar_get,
- .mfc_dsisr_get = mfc_dsisr_get,
- .mfc_dsisr_set = mfc_dsisr_set,
- .mfc_sdr_setup = mfc_sdr_setup,
- .mfc_sr1_set = mfc_sr1_set,
- .mfc_sr1_get = mfc_sr1_get,
- .mfc_tclass_id_set = mfc_tclass_id_set,
- .mfc_tclass_id_get = mfc_tclass_id_get,
- .tlb_invalidate = tlb_invalidate,
- .resource_allocation_groupID_set = resource_allocation_groupID_set,
- .resource_allocation_groupID_get = resource_allocation_groupID_get,
- .resource_allocation_enable_set = resource_allocation_enable_set,
- .resource_allocation_enable_get = resource_allocation_enable_get,
-};
diff --git a/arch/powerpc/platforms/cell/beat_syscall.h b/arch/powerpc/platforms/cell/beat_syscall.h
deleted file mode 100644
index 8580dc7e1798..000000000000
--- a/arch/powerpc/platforms/cell/beat_syscall.h
+++ /dev/null
@@ -1,164 +0,0 @@
-/*
- * Beat hypervisor call numbers
- *
- * (C) Copyright 2004-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef BEAT_BEAT_syscall_H
-#define BEAT_BEAT_syscall_H
-
-#ifdef __ASSEMBLY__
-#define __BEAT_ADD_VENDOR_ID(__x, __v) ((__v)<<60|(__x))
-#else
-#define __BEAT_ADD_VENDOR_ID(__x, __v) ((u64)(__v)<<60|(__x))
-#endif
-#define HV_allocate_memory __BEAT_ADD_VENDOR_ID(0, 0)
-#define HV_construct_virtual_address_space __BEAT_ADD_VENDOR_ID(2, 0)
-#define HV_destruct_virtual_address_space __BEAT_ADD_VENDOR_ID(10, 0)
-#define HV_get_virtual_address_space_id_of_ppe __BEAT_ADD_VENDOR_ID(4, 0)
-#define HV_query_logical_partition_address_region_info \
- __BEAT_ADD_VENDOR_ID(6, 0)
-#define HV_release_memory __BEAT_ADD_VENDOR_ID(13, 0)
-#define HV_select_virtual_address_space __BEAT_ADD_VENDOR_ID(7, 0)
-#define HV_load_range_registers __BEAT_ADD_VENDOR_ID(68, 0)
-#define HV_set_ppe_l2cache_rmt_entry __BEAT_ADD_VENDOR_ID(70, 0)
-#define HV_set_ppe_tlb_rmt_entry __BEAT_ADD_VENDOR_ID(71, 0)
-#define HV_set_spe_tlb_rmt_entry __BEAT_ADD_VENDOR_ID(72, 0)
-#define HV_get_io_address_translation_fault_info __BEAT_ADD_VENDOR_ID(14, 0)
-#define HV_get_iopte __BEAT_ADD_VENDOR_ID(16, 0)
-#define HV_preload_iopt_cache __BEAT_ADD_VENDOR_ID(17, 0)
-#define HV_put_iopte __BEAT_ADD_VENDOR_ID(15, 0)
-#define HV_connect_event_ports __BEAT_ADD_VENDOR_ID(21, 0)
-#define HV_construct_event_receive_port __BEAT_ADD_VENDOR_ID(18, 0)
-#define HV_destruct_event_receive_port __BEAT_ADD_VENDOR_ID(19, 0)
-#define HV_destruct_event_send_port __BEAT_ADD_VENDOR_ID(22, 0)
-#define HV_get_state_of_event_send_port __BEAT_ADD_VENDOR_ID(25, 0)
-#define HV_request_to_connect_event_ports __BEAT_ADD_VENDOR_ID(20, 0)
-#define HV_send_event_externally __BEAT_ADD_VENDOR_ID(23, 0)
-#define HV_send_event_locally __BEAT_ADD_VENDOR_ID(24, 0)
-#define HV_construct_and_connect_irq_plug __BEAT_ADD_VENDOR_ID(28, 0)
-#define HV_destruct_irq_plug __BEAT_ADD_VENDOR_ID(29, 0)
-#define HV_detect_pending_interrupts __BEAT_ADD_VENDOR_ID(26, 0)
-#define HV_end_of_interrupt __BEAT_ADD_VENDOR_ID(27, 0)
-#define HV_assign_control_signal_notification_port __BEAT_ADD_VENDOR_ID(45, 0)
-#define HV_end_of_control_signal_processing __BEAT_ADD_VENDOR_ID(48, 0)
-#define HV_get_control_signal __BEAT_ADD_VENDOR_ID(46, 0)
-#define HV_set_irq_mask_for_spe __BEAT_ADD_VENDOR_ID(61, 0)
-#define HV_shutdown_logical_partition __BEAT_ADD_VENDOR_ID(44, 0)
-#define HV_connect_message_ports __BEAT_ADD_VENDOR_ID(35, 0)
-#define HV_destruct_message_port __BEAT_ADD_VENDOR_ID(36, 0)
-#define HV_receive_message __BEAT_ADD_VENDOR_ID(37, 0)
-#define HV_get_message_port_info __BEAT_ADD_VENDOR_ID(34, 0)
-#define HV_request_to_connect_message_ports __BEAT_ADD_VENDOR_ID(33, 0)
-#define HV_send_message __BEAT_ADD_VENDOR_ID(32, 0)
-#define HV_get_logical_ppe_id __BEAT_ADD_VENDOR_ID(69, 0)
-#define HV_pause __BEAT_ADD_VENDOR_ID(9, 0)
-#define HV_destruct_shared_memory_handle __BEAT_ADD_VENDOR_ID(51, 0)
-#define HV_get_shared_memory_info __BEAT_ADD_VENDOR_ID(52, 0)
-#define HV_permit_sharing_memory __BEAT_ADD_VENDOR_ID(50, 0)
-#define HV_request_to_attach_shared_memory __BEAT_ADD_VENDOR_ID(49, 0)
-#define HV_enable_logical_spe_execution __BEAT_ADD_VENDOR_ID(55, 0)
-#define HV_construct_logical_spe __BEAT_ADD_VENDOR_ID(53, 0)
-#define HV_disable_logical_spe_execution __BEAT_ADD_VENDOR_ID(56, 0)
-#define HV_destruct_logical_spe __BEAT_ADD_VENDOR_ID(54, 0)
-#define HV_sense_spe_execution_status __BEAT_ADD_VENDOR_ID(58, 0)
-#define HV_insert_htab_entry __BEAT_ADD_VENDOR_ID(101, 0)
-#define HV_read_htab_entries __BEAT_ADD_VENDOR_ID(95, 0)
-#define HV_write_htab_entry __BEAT_ADD_VENDOR_ID(94, 0)
-#define HV_assign_io_address_translation_fault_port \
- __BEAT_ADD_VENDOR_ID(100, 0)
-#define HV_set_interrupt_mask __BEAT_ADD_VENDOR_ID(73, 0)
-#define HV_get_logical_partition_id __BEAT_ADD_VENDOR_ID(74, 0)
-#define HV_create_repository_node2 __BEAT_ADD_VENDOR_ID(90, 0)
-#define HV_create_repository_node __BEAT_ADD_VENDOR_ID(90, 0) /* alias */
-#define HV_get_repository_node_value2 __BEAT_ADD_VENDOR_ID(91, 0)
-#define HV_get_repository_node_value __BEAT_ADD_VENDOR_ID(91, 0) /* alias */
-#define HV_modify_repository_node_value2 __BEAT_ADD_VENDOR_ID(92, 0)
-#define HV_modify_repository_node_value __BEAT_ADD_VENDOR_ID(92, 0) /* alias */
-#define HV_remove_repository_node2 __BEAT_ADD_VENDOR_ID(93, 0)
-#define HV_remove_repository_node __BEAT_ADD_VENDOR_ID(93, 0) /* alias */
-#define HV_cancel_shared_memory __BEAT_ADD_VENDOR_ID(104, 0)
-#define HV_clear_interrupt_status_of_spe __BEAT_ADD_VENDOR_ID(206, 0)
-#define HV_construct_spe_irq_outlet __BEAT_ADD_VENDOR_ID(80, 0)
-#define HV_destruct_spe_irq_outlet __BEAT_ADD_VENDOR_ID(81, 0)
-#define HV_disconnect_ipspc_service __BEAT_ADD_VENDOR_ID(88, 0)
-#define HV_execute_ipspc_command __BEAT_ADD_VENDOR_ID(86, 0)
-#define HV_get_interrupt_status_of_spe __BEAT_ADD_VENDOR_ID(205, 0)
-#define HV_get_spe_privileged_state_1_registers __BEAT_ADD_VENDOR_ID(208, 0)
-#define HV_permit_use_of_ipspc_service __BEAT_ADD_VENDOR_ID(85, 0)
-#define HV_reinitialize_logical_spe __BEAT_ADD_VENDOR_ID(82, 0)
-#define HV_request_ipspc_service __BEAT_ADD_VENDOR_ID(84, 0)
-#define HV_stop_ipspc_command __BEAT_ADD_VENDOR_ID(87, 0)
-#define HV_set_spe_privileged_state_1_registers __BEAT_ADD_VENDOR_ID(204, 0)
-#define HV_get_status_of_ipspc_service __BEAT_ADD_VENDOR_ID(203, 0)
-#define HV_put_characters_to_console __BEAT_ADD_VENDOR_ID(0x101, 1)
-#define HV_get_characters_from_console __BEAT_ADD_VENDOR_ID(0x102, 1)
-#define HV_get_base_clock __BEAT_ADD_VENDOR_ID(0x111, 1)
-#define HV_set_base_clock __BEAT_ADD_VENDOR_ID(0x112, 1)
-#define HV_get_frame_cycle __BEAT_ADD_VENDOR_ID(0x114, 1)
-#define HV_disable_console __BEAT_ADD_VENDOR_ID(0x115, 1)
-#define HV_disable_all_console __BEAT_ADD_VENDOR_ID(0x116, 1)
-#define HV_oneshot_timer __BEAT_ADD_VENDOR_ID(0x117, 1)
-#define HV_set_dabr __BEAT_ADD_VENDOR_ID(0x118, 1)
-#define HV_get_dabr __BEAT_ADD_VENDOR_ID(0x119, 1)
-#define HV_start_hv_stats __BEAT_ADD_VENDOR_ID(0x21c, 1)
-#define HV_stop_hv_stats __BEAT_ADD_VENDOR_ID(0x21d, 1)
-#define HV_get_hv_stats __BEAT_ADD_VENDOR_ID(0x21e, 1)
-#define HV_get_hv_error_stats __BEAT_ADD_VENDOR_ID(0x221, 1)
-#define HV_get_stats __BEAT_ADD_VENDOR_ID(0x224, 1)
-#define HV_get_heap_stats __BEAT_ADD_VENDOR_ID(0x225, 1)
-#define HV_get_memory_stats __BEAT_ADD_VENDOR_ID(0x227, 1)
-#define HV_get_memory_detail __BEAT_ADD_VENDOR_ID(0x228, 1)
-#define HV_set_priority_of_irq_outlet __BEAT_ADD_VENDOR_ID(0x122, 1)
-#define HV_get_physical_spe_by_reservation_id __BEAT_ADD_VENDOR_ID(0x128, 1)
-#define HV_get_spe_context __BEAT_ADD_VENDOR_ID(0x129, 1)
-#define HV_set_spe_context __BEAT_ADD_VENDOR_ID(0x12a, 1)
-#define HV_downcount_of_interrupt __BEAT_ADD_VENDOR_ID(0x12e, 1)
-#define HV_peek_spe_context __BEAT_ADD_VENDOR_ID(0x12f, 1)
-#define HV_read_bpa_register __BEAT_ADD_VENDOR_ID(0x131, 1)
-#define HV_write_bpa_register __BEAT_ADD_VENDOR_ID(0x132, 1)
-#define HV_map_context_table_of_spe __BEAT_ADD_VENDOR_ID(0x137, 1)
-#define HV_get_slb_for_logical_spe __BEAT_ADD_VENDOR_ID(0x138, 1)
-#define HV_set_slb_for_logical_spe __BEAT_ADD_VENDOR_ID(0x139, 1)
-#define HV_init_pm __BEAT_ADD_VENDOR_ID(0x150, 1)
-#define HV_set_pm_signal __BEAT_ADD_VENDOR_ID(0x151, 1)
-#define HV_get_pm_signal __BEAT_ADD_VENDOR_ID(0x152, 1)
-#define HV_set_pm_config __BEAT_ADD_VENDOR_ID(0x153, 1)
-#define HV_get_pm_config __BEAT_ADD_VENDOR_ID(0x154, 1)
-#define HV_get_inner_trace_data __BEAT_ADD_VENDOR_ID(0x155, 1)
-#define HV_set_ext_trace_buffer __BEAT_ADD_VENDOR_ID(0x156, 1)
-#define HV_get_ext_trace_buffer __BEAT_ADD_VENDOR_ID(0x157, 1)
-#define HV_set_pm_interrupt __BEAT_ADD_VENDOR_ID(0x158, 1)
-#define HV_get_pm_interrupt __BEAT_ADD_VENDOR_ID(0x159, 1)
-#define HV_kick_pm __BEAT_ADD_VENDOR_ID(0x160, 1)
-#define HV_construct_pm_context __BEAT_ADD_VENDOR_ID(0x164, 1)
-#define HV_destruct_pm_context __BEAT_ADD_VENDOR_ID(0x165, 1)
-#define HV_be_slow __BEAT_ADD_VENDOR_ID(0x170, 1)
-#define HV_assign_ipspc_server_connection_status_notification_port \
- __BEAT_ADD_VENDOR_ID(0x173, 1)
-#define HV_get_raid_of_physical_spe __BEAT_ADD_VENDOR_ID(0x174, 1)
-#define HV_set_physical_spe_to_rag __BEAT_ADD_VENDOR_ID(0x175, 1)
-#define HV_release_physical_spe_from_rag __BEAT_ADD_VENDOR_ID(0x176, 1)
-#define HV_rtc_read __BEAT_ADD_VENDOR_ID(0x190, 1)
-#define HV_rtc_write __BEAT_ADD_VENDOR_ID(0x191, 1)
-#define HV_eeprom_read __BEAT_ADD_VENDOR_ID(0x192, 1)
-#define HV_eeprom_write __BEAT_ADD_VENDOR_ID(0x193, 1)
-#define HV_insert_htab_entry3 __BEAT_ADD_VENDOR_ID(0x104, 1)
-#define HV_invalidate_htab_entry3 __BEAT_ADD_VENDOR_ID(0x105, 1)
-#define HV_update_htab_permission3 __BEAT_ADD_VENDOR_ID(0x106, 1)
-#define HV_clear_htab3 __BEAT_ADD_VENDOR_ID(0x107, 1)
-#endif
diff --git a/arch/powerpc/platforms/cell/beat_udbg.c b/arch/powerpc/platforms/cell/beat_udbg.c
deleted file mode 100644
index 350735bc8888..000000000000
--- a/arch/powerpc/platforms/cell/beat_udbg.c
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * udbg function for Beat
- *
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/console.h>
-
-#include <asm/machdep.h>
-#include <asm/prom.h>
-#include <asm/udbg.h>
-
-#include "beat.h"
-
-#define celleb_vtermno 0
-
-static void udbg_putc_beat(char c)
-{
- unsigned long rc;
-
- if (c == '\n')
- udbg_putc_beat('\r');
-
- rc = beat_put_term_char(celleb_vtermno, 1, (uint64_t)c << 56, 0);
-}
-
-/* Buffered chars getc */
-static u64 inbuflen;
-static u64 inbuf[2]; /* must be 2 u64s */
-
-static int udbg_getc_poll_beat(void)
-{
- /* The interface is tricky because it may return up to 16 chars.
- * We save them statically for future calls to udbg_getc().
- */
- char ch, *buf = (char *)inbuf;
- int i;
- long rc;
- if (inbuflen == 0) {
- /* get some more chars. */
- inbuflen = 0;
- rc = beat_get_term_char(celleb_vtermno, &inbuflen,
- inbuf+0, inbuf+1);
- if (rc != 0)
- inbuflen = 0; /* otherwise inbuflen is garbage */
- }
- if (inbuflen <= 0 || inbuflen > 16) {
- /* Catch error case as well as other oddities (corruption) */
- inbuflen = 0;
- return -1;
- }
- ch = buf[0];
- for (i = 1; i < inbuflen; i++) /* shuffle them down. */
- buf[i-1] = buf[i];
- inbuflen--;
- return ch;
-}
-
-static int udbg_getc_beat(void)
-{
- int ch;
- for (;;) {
- ch = udbg_getc_poll_beat();
- if (ch == -1) {
- /* This shouldn't be needed...but... */
- volatile unsigned long delay;
- for (delay = 0; delay < 2000000; delay++)
- ;
- } else {
- return ch;
- }
- }
-}
-
-/* call this from early_init() for a working debug console on
- * vterm capable LPAR machines
- */
-void __init udbg_init_debug_beat(void)
-{
- udbg_putc = udbg_putc_beat;
- udbg_getc = udbg_getc_beat;
- udbg_getc_poll = udbg_getc_poll_beat;
-}
diff --git a/arch/powerpc/platforms/cell/beat_wrapper.h b/arch/powerpc/platforms/cell/beat_wrapper.h
deleted file mode 100644
index c1109969f242..000000000000
--- a/arch/powerpc/platforms/cell/beat_wrapper.h
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Beat hypervisor call I/F
- *
- * (C) Copyright 2007 TOSHIBA CORPORATION
- *
- * This code is based on arch/powerpc/platforms/pseries/plpar_wrapper.h.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-#ifndef BEAT_HCALL
-#include <linux/string.h>
-#include "beat_syscall.h"
-
-/* defined in hvCall.S */
-extern s64 beat_hcall_norets(u64 opcode, ...);
-extern s64 beat_hcall_norets8(u64 opcode, u64 arg1, u64 arg2, u64 arg3,
- u64 arg4, u64 arg5, u64 arg6, u64 arg7, u64 arg8);
-extern s64 beat_hcall1(u64 opcode, u64 retbuf[1], ...);
-extern s64 beat_hcall2(u64 opcode, u64 retbuf[2], ...);
-extern s64 beat_hcall3(u64 opcode, u64 retbuf[3], ...);
-extern s64 beat_hcall4(u64 opcode, u64 retbuf[4], ...);
-extern s64 beat_hcall5(u64 opcode, u64 retbuf[5], ...);
-extern s64 beat_hcall6(u64 opcode, u64 retbuf[6], ...);
-
-static inline s64 beat_downcount_of_interrupt(u64 plug_id)
-{
- return beat_hcall_norets(HV_downcount_of_interrupt, plug_id);
-}
-
-static inline s64 beat_set_interrupt_mask(u64 index,
- u64 val0, u64 val1, u64 val2, u64 val3)
-{
- return beat_hcall_norets(HV_set_interrupt_mask, index,
- val0, val1, val2, val3);
-}
-
-static inline s64 beat_destruct_irq_plug(u64 plug_id)
-{
- return beat_hcall_norets(HV_destruct_irq_plug, plug_id);
-}
-
-static inline s64 beat_construct_and_connect_irq_plug(u64 plug_id,
- u64 outlet_id)
-{
- return beat_hcall_norets(HV_construct_and_connect_irq_plug, plug_id,
- outlet_id);
-}
-
-static inline s64 beat_detect_pending_interrupts(u64 index, u64 *retbuf)
-{
- return beat_hcall4(HV_detect_pending_interrupts, retbuf, index);
-}
-
-static inline s64 beat_pause(u64 style)
-{
- return beat_hcall_norets(HV_pause, style);
-}
-
-static inline s64 beat_read_htab_entries(u64 htab_id, u64 index, u64 *retbuf)
-{
- return beat_hcall5(HV_read_htab_entries, retbuf, htab_id, index);
-}
-
-static inline s64 beat_insert_htab_entry(u64 htab_id, u64 group,
- u64 bitmask, u64 hpte_v, u64 hpte_r, u64 *slot)
-{
- u64 dummy[3];
- s64 ret;
-
- ret = beat_hcall3(HV_insert_htab_entry, dummy, htab_id, group,
- bitmask, hpte_v, hpte_r);
- *slot = dummy[0];
- return ret;
-}
-
-static inline s64 beat_write_htab_entry(u64 htab_id, u64 slot,
- u64 hpte_v, u64 hpte_r, u64 mask_v, u64 mask_r,
- u64 *ret_v, u64 *ret_r)
-{
- u64 dummy[2];
- s64 ret;
-
- ret = beat_hcall2(HV_write_htab_entry, dummy, htab_id, slot,
- hpte_v, hpte_r, mask_v, mask_r);
- *ret_v = dummy[0];
- *ret_r = dummy[1];
- return ret;
-}
-
-static inline s64 beat_insert_htab_entry3(u64 htab_id, u64 group,
- u64 hpte_v, u64 hpte_r, u64 mask_v, u64 value_v, u64 *slot)
-{
- u64 dummy[1];
- s64 ret;
-
- ret = beat_hcall1(HV_insert_htab_entry3, dummy, htab_id, group,
- hpte_v, hpte_r, mask_v, value_v);
- *slot = dummy[0];
- return ret;
-}
-
-static inline s64 beat_invalidate_htab_entry3(u64 htab_id, u64 group,
- u64 va, u64 pss)
-{
- return beat_hcall_norets(HV_invalidate_htab_entry3,
- htab_id, group, va, pss);
-}
-
-static inline s64 beat_update_htab_permission3(u64 htab_id, u64 group,
- u64 va, u64 pss, u64 ptel_mask, u64 ptel_value)
-{
- return beat_hcall_norets(HV_update_htab_permission3,
- htab_id, group, va, pss, ptel_mask, ptel_value);
-}
-
-static inline s64 beat_clear_htab3(u64 htab_id)
-{
- return beat_hcall_norets(HV_clear_htab3, htab_id);
-}
-
-static inline void beat_shutdown_logical_partition(u64 code)
-{
- (void)beat_hcall_norets(HV_shutdown_logical_partition, code);
-}
-
-static inline s64 beat_rtc_write(u64 time_from_epoch)
-{
- return beat_hcall_norets(HV_rtc_write, time_from_epoch);
-}
-
-static inline s64 beat_rtc_read(u64 *time_from_epoch)
-{
- u64 dummy[1];
- s64 ret;
-
- ret = beat_hcall1(HV_rtc_read, dummy);
- *time_from_epoch = dummy[0];
- return ret;
-}
-
-#define BEAT_NVRW_CNT (sizeof(u64) * 6)
-
-static inline s64 beat_eeprom_write(u64 index, u64 length, u8 *buffer)
-{
- u64 b[6];
-
- if (length > BEAT_NVRW_CNT)
- return -1;
- memcpy(b, buffer, sizeof(b));
- return beat_hcall_norets8(HV_eeprom_write, index, length,
- b[0], b[1], b[2], b[3], b[4], b[5]);
-}
-
-static inline s64 beat_eeprom_read(u64 index, u64 length, u8 *buffer)
-{
- u64 b[6];
- s64 ret;
-
- if (length > BEAT_NVRW_CNT)
- return -1;
- ret = beat_hcall6(HV_eeprom_read, b, index, length);
- memcpy(buffer, b, length);
- return ret;
-}
-
-static inline s64 beat_set_dabr(u64 value, u64 style)
-{
- return beat_hcall_norets(HV_set_dabr, value, style);
-}
-
-static inline s64 beat_get_characters_from_console(u64 termno, u64 *len,
- u8 *buffer)
-{
- u64 dummy[3];
- s64 ret;
-
- ret = beat_hcall3(HV_get_characters_from_console, dummy, termno, len);
- *len = dummy[0];
- memcpy(buffer, dummy + 1, *len);
- return ret;
-}
-
-static inline s64 beat_put_characters_to_console(u64 termno, u64 len,
- u8 *buffer)
-{
- u64 b[2];
-
- memcpy(b, buffer, len);
- return beat_hcall_norets(HV_put_characters_to_console, termno, len,
- b[0], b[1]);
-}
-
-static inline s64 beat_get_spe_privileged_state_1_registers(
- u64 id, u64 offsetof, u64 *value)
-{
- u64 dummy[1];
- s64 ret;
-
- ret = beat_hcall1(HV_get_spe_privileged_state_1_registers, dummy, id,
- offsetof);
- *value = dummy[0];
- return ret;
-}
-
-static inline s64 beat_set_irq_mask_for_spe(u64 id, u64 class, u64 mask)
-{
- return beat_hcall_norets(HV_set_irq_mask_for_spe, id, class, mask);
-}
-
-static inline s64 beat_clear_interrupt_status_of_spe(u64 id, u64 class,
- u64 mask)
-{
- return beat_hcall_norets(HV_clear_interrupt_status_of_spe,
- id, class, mask);
-}
-
-static inline s64 beat_set_spe_privileged_state_1_registers(
- u64 id, u64 offsetof, u64 value)
-{
- return beat_hcall_norets(HV_set_spe_privileged_state_1_registers,
- id, offsetof, value);
-}
-
-static inline s64 beat_get_interrupt_status_of_spe(u64 id, u64 class, u64 *val)
-{
- u64 dummy[1];
- s64 ret;
-
- ret = beat_hcall1(HV_get_interrupt_status_of_spe, dummy, id, class);
- *val = dummy[0];
- return ret;
-}
-
-static inline s64 beat_put_iopte(u64 ioas_id, u64 io_addr, u64 real_addr,
- u64 ioid, u64 flags)
-{
- return beat_hcall_norets(HV_put_iopte, ioas_id, io_addr, real_addr,
- ioid, flags);
-}
-
-static inline s64 beat_construct_event_receive_port(u64 *port)
-{
- u64 dummy[1];
- s64 ret;
-
- ret = beat_hcall1(HV_construct_event_receive_port, dummy);
- *port = dummy[0];
- return ret;
-}
-
-static inline s64 beat_destruct_event_receive_port(u64 port)
-{
- s64 ret;
-
- ret = beat_hcall_norets(HV_destruct_event_receive_port, port);
- return ret;
-}
-
-static inline s64 beat_create_repository_node(u64 path[4], u64 data[2])
-{
- s64 ret;
-
- ret = beat_hcall_norets(HV_create_repository_node2,
- path[0], path[1], path[2], path[3], data[0], data[1]);
- return ret;
-}
-
-static inline s64 beat_get_repository_node_value(u64 lpid, u64 path[4],
- u64 data[2])
-{
- s64 ret;
-
- ret = beat_hcall2(HV_get_repository_node_value2, data,
- lpid, path[0], path[1], path[2], path[3]);
- return ret;
-}
-
-#endif
diff --git a/arch/powerpc/platforms/cell/cell.h b/arch/powerpc/platforms/cell/cell.h
new file mode 100644
index 000000000000..ef143dfee068
--- /dev/null
+++ b/arch/powerpc/platforms/cell/cell.h
@@ -0,0 +1,24 @@
+/*
+ * Cell Platform common data structures
+ *
+ * Copyright 2015, Daniel Axtens, IBM Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2, or (at your option)
+ * any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef CELL_H
+#define CELL_H
+
+#include <asm/pci-bridge.h>
+
+extern struct pci_controller_ops cell_pci_controller_ops;
+
+#endif
diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c
deleted file mode 100644
index 3ce70ded2d6a..000000000000
--- a/arch/powerpc/platforms/cell/celleb_pci.c
+++ /dev/null
@@ -1,500 +0,0 @@
-/*
- * Support for PCI on Celleb platform.
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This code is based on arch/powerpc/kernel/rtas_pci.c:
- * Copyright (C) 2001 Dave Engebretsen, IBM Corporation
- * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/threads.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-#include <linux/init.h>
-#include <linux/memblock.h>
-#include <linux/pci_regs.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/slab.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include <asm/ppc-pci.h>
-
-#include "celleb_pci.h"
-
-#define MAX_PCI_DEVICES 32
-#define MAX_PCI_FUNCTIONS 8
-#define MAX_PCI_BASE_ADDRS 3 /* use 64 bit address */
-
-/* definition for fake pci configuration area for GbE, .... ,and etc. */
-
-struct celleb_pci_resource {
- struct resource r[MAX_PCI_BASE_ADDRS];
-};
-
-struct celleb_pci_private {
- unsigned char *fake_config[MAX_PCI_DEVICES][MAX_PCI_FUNCTIONS];
- struct celleb_pci_resource *res[MAX_PCI_DEVICES][MAX_PCI_FUNCTIONS];
-};
-
-static inline u8 celleb_fake_config_readb(void *addr)
-{
- u8 *p = addr;
- return *p;
-}
-
-static inline u16 celleb_fake_config_readw(void *addr)
-{
- __le16 *p = addr;
- return le16_to_cpu(*p);
-}
-
-static inline u32 celleb_fake_config_readl(void *addr)
-{
- __le32 *p = addr;
- return le32_to_cpu(*p);
-}
-
-static inline void celleb_fake_config_writeb(u32 val, void *addr)
-{
- u8 *p = addr;
- *p = val;
-}
-
-static inline void celleb_fake_config_writew(u32 val, void *addr)
-{
- __le16 val16;
- __le16 *p = addr;
- val16 = cpu_to_le16(val);
- *p = val16;
-}
-
-static inline void celleb_fake_config_writel(u32 val, void *addr)
-{
- __le32 val32;
- __le32 *p = addr;
- val32 = cpu_to_le32(val);
- *p = val32;
-}
-
-static unsigned char *get_fake_config_start(struct pci_controller *hose,
- int devno, int fn)
-{
- struct celleb_pci_private *private = hose->private_data;
-
- if (private == NULL)
- return NULL;
-
- return private->fake_config[devno][fn];
-}
-
-static struct celleb_pci_resource *get_resource_start(
- struct pci_controller *hose,
- int devno, int fn)
-{
- struct celleb_pci_private *private = hose->private_data;
-
- if (private == NULL)
- return NULL;
-
- return private->res[devno][fn];
-}
-
-
-static void celleb_config_read_fake(unsigned char *config, int where,
- int size, u32 *val)
-{
- char *p = config + where;
-
- switch (size) {
- case 1:
- *val = celleb_fake_config_readb(p);
- break;
- case 2:
- *val = celleb_fake_config_readw(p);
- break;
- case 4:
- *val = celleb_fake_config_readl(p);
- break;
- }
-}
-
-static void celleb_config_write_fake(unsigned char *config, int where,
- int size, u32 val)
-{
- char *p = config + where;
-
- switch (size) {
- case 1:
- celleb_fake_config_writeb(val, p);
- break;
- case 2:
- celleb_fake_config_writew(val, p);
- break;
- case 4:
- celleb_fake_config_writel(val, p);
- break;
- }
-}
-
-static int celleb_fake_pci_read_config(struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 *val)
-{
- char *config;
- struct pci_controller *hose = pci_bus_to_host(bus);
- unsigned int devno = devfn >> 3;
- unsigned int fn = devfn & 0x7;
-
- /* allignment check */
- BUG_ON(where % size);
-
- pr_debug(" fake read: bus=0x%x, ", bus->number);
- config = get_fake_config_start(hose, devno, fn);
-
- pr_debug("devno=0x%x, where=0x%x, size=0x%x, ", devno, where, size);
- if (!config) {
- pr_debug("failed\n");
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- celleb_config_read_fake(config, where, size, val);
- pr_debug("val=0x%x\n", *val);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-
-static int celleb_fake_pci_write_config(struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 val)
-{
- char *config;
- struct pci_controller *hose = pci_bus_to_host(bus);
- struct celleb_pci_resource *res;
- unsigned int devno = devfn >> 3;
- unsigned int fn = devfn & 0x7;
-
- /* allignment check */
- BUG_ON(where % size);
-
- config = get_fake_config_start(hose, devno, fn);
-
- if (!config)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (val == ~0) {
- int i = (where - PCI_BASE_ADDRESS_0) >> 3;
-
- switch (where) {
- case PCI_BASE_ADDRESS_0:
- case PCI_BASE_ADDRESS_2:
- if (size != 4)
- return PCIBIOS_DEVICE_NOT_FOUND;
- res = get_resource_start(hose, devno, fn);
- if (!res)
- return PCIBIOS_DEVICE_NOT_FOUND;
- celleb_config_write_fake(config, where, size,
- (res->r[i].end - res->r[i].start));
- return PCIBIOS_SUCCESSFUL;
- case PCI_BASE_ADDRESS_1:
- case PCI_BASE_ADDRESS_3:
- case PCI_BASE_ADDRESS_4:
- case PCI_BASE_ADDRESS_5:
- break;
- default:
- break;
- }
- }
-
- celleb_config_write_fake(config, where, size, val);
- pr_debug(" fake write: where=%x, size=%d, val=%x\n",
- where, size, val);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static struct pci_ops celleb_fake_pci_ops = {
- .read = celleb_fake_pci_read_config,
- .write = celleb_fake_pci_write_config,
-};
-
-static inline void celleb_setup_pci_base_addrs(struct pci_controller *hose,
- unsigned int devno, unsigned int fn,
- unsigned int num_base_addr)
-{
- u32 val;
- unsigned char *config;
- struct celleb_pci_resource *res;
-
- config = get_fake_config_start(hose, devno, fn);
- res = get_resource_start(hose, devno, fn);
-
- if (!config || !res)
- return;
-
- switch (num_base_addr) {
- case 3:
- val = (res->r[2].start & 0xfffffff0)
- | PCI_BASE_ADDRESS_MEM_TYPE_64;
- celleb_config_write_fake(config, PCI_BASE_ADDRESS_4, 4, val);
- val = res->r[2].start >> 32;
- celleb_config_write_fake(config, PCI_BASE_ADDRESS_5, 4, val);
- /* FALLTHROUGH */
- case 2:
- val = (res->r[1].start & 0xfffffff0)
- | PCI_BASE_ADDRESS_MEM_TYPE_64;
- celleb_config_write_fake(config, PCI_BASE_ADDRESS_2, 4, val);
- val = res->r[1].start >> 32;
- celleb_config_write_fake(config, PCI_BASE_ADDRESS_3, 4, val);
- /* FALLTHROUGH */
- case 1:
- val = (res->r[0].start & 0xfffffff0)
- | PCI_BASE_ADDRESS_MEM_TYPE_64;
- celleb_config_write_fake(config, PCI_BASE_ADDRESS_0, 4, val);
- val = res->r[0].start >> 32;
- celleb_config_write_fake(config, PCI_BASE_ADDRESS_1, 4, val);
- break;
- }
-
- val = PCI_COMMAND_IO | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
- celleb_config_write_fake(config, PCI_COMMAND, 2, val);
-}
-
-static int __init celleb_setup_fake_pci_device(struct device_node *node,
- struct pci_controller *hose)
-{
- unsigned int rlen;
- int num_base_addr = 0;
- u32 val;
- const u32 *wi0, *wi1, *wi2, *wi3, *wi4;
- unsigned int devno, fn;
- struct celleb_pci_private *private = hose->private_data;
- unsigned char **config = NULL;
- struct celleb_pci_resource **res = NULL;
- const char *name;
- const unsigned long *li;
- int size, result;
-
- if (private == NULL) {
- printk(KERN_ERR "PCI: "
- "memory space for pci controller is not assigned\n");
- goto error;
- }
-
- name = of_get_property(node, "model", &rlen);
- if (!name) {
- printk(KERN_ERR "PCI: model property not found.\n");
- goto error;
- }
-
- wi4 = of_get_property(node, "reg", &rlen);
- if (wi4 == NULL)
- goto error;
-
- devno = ((wi4[0] >> 8) & 0xff) >> 3;
- fn = (wi4[0] >> 8) & 0x7;
-
- pr_debug("PCI: celleb_setup_fake_pci() %s devno=%x fn=%x\n", name,
- devno, fn);
-
- size = 256;
- config = &private->fake_config[devno][fn];
- *config = zalloc_maybe_bootmem(size, GFP_KERNEL);
- if (*config == NULL) {
- printk(KERN_ERR "PCI: "
- "not enough memory for fake configuration space\n");
- goto error;
- }
- pr_debug("PCI: fake config area assigned 0x%016lx\n",
- (unsigned long)*config);
-
- size = sizeof(struct celleb_pci_resource);
- res = &private->res[devno][fn];
- *res = zalloc_maybe_bootmem(size, GFP_KERNEL);
- if (*res == NULL) {
- printk(KERN_ERR
- "PCI: not enough memory for resource data space\n");
- goto error;
- }
- pr_debug("PCI: res assigned 0x%016lx\n", (unsigned long)*res);
-
- wi0 = of_get_property(node, "device-id", NULL);
- wi1 = of_get_property(node, "vendor-id", NULL);
- wi2 = of_get_property(node, "class-code", NULL);
- wi3 = of_get_property(node, "revision-id", NULL);
- if (!wi0 || !wi1 || !wi2 || !wi3) {
- printk(KERN_ERR "PCI: Missing device tree properties.\n");
- goto error;
- }
-
- celleb_config_write_fake(*config, PCI_DEVICE_ID, 2, wi0[0] & 0xffff);
- celleb_config_write_fake(*config, PCI_VENDOR_ID, 2, wi1[0] & 0xffff);
- pr_debug("class-code = 0x%08x\n", wi2[0]);
-
- celleb_config_write_fake(*config, PCI_CLASS_PROG, 1, wi2[0] & 0xff);
- celleb_config_write_fake(*config, PCI_CLASS_DEVICE, 2,
- (wi2[0] >> 8) & 0xffff);
- celleb_config_write_fake(*config, PCI_REVISION_ID, 1, wi3[0]);
-
- while (num_base_addr < MAX_PCI_BASE_ADDRS) {
- result = of_address_to_resource(node,
- num_base_addr, &(*res)->r[num_base_addr]);
- if (result)
- break;
- num_base_addr++;
- }
-
- celleb_setup_pci_base_addrs(hose, devno, fn, num_base_addr);
-
- li = of_get_property(node, "interrupts", &rlen);
- if (!li) {
- printk(KERN_ERR "PCI: interrupts not found.\n");
- goto error;
- }
- val = li[0];
- celleb_config_write_fake(*config, PCI_INTERRUPT_PIN, 1, 1);
- celleb_config_write_fake(*config, PCI_INTERRUPT_LINE, 1, val);
-
-#ifdef DEBUG
- pr_debug("PCI: %s irq=%ld\n", name, li[0]);
- for (i = 0; i < 6; i++) {
- celleb_config_read_fake(*config,
- PCI_BASE_ADDRESS_0 + 0x4 * i, 4,
- &val);
- pr_debug("PCI: %s fn=%d base_address_%d=0x%x\n",
- name, fn, i, val);
- }
-#endif
-
- celleb_config_write_fake(*config, PCI_HEADER_TYPE, 1,
- PCI_HEADER_TYPE_NORMAL);
-
- return 0;
-
-error:
- if (mem_init_done) {
- if (config && *config)
- kfree(*config);
- if (res && *res)
- kfree(*res);
-
- } else {
- if (config && *config) {
- size = 256;
- memblock_free(__pa(*config), size);
- }
- if (res && *res) {
- size = sizeof(struct celleb_pci_resource);
- memblock_free(__pa(*res), size);
- }
- }
-
- return 1;
-}
-
-static int __init phb_set_bus_ranges(struct device_node *dev,
- struct pci_controller *phb)
-{
- const int *bus_range;
- unsigned int len;
-
- bus_range = of_get_property(dev, "bus-range", &len);
- if (bus_range == NULL || len < 2 * sizeof(int))
- return 1;
-
- phb->first_busno = bus_range[0];
- phb->last_busno = bus_range[1];
-
- return 0;
-}
-
-static void __init celleb_alloc_private_mem(struct pci_controller *hose)
-{
- hose->private_data =
- zalloc_maybe_bootmem(sizeof(struct celleb_pci_private),
- GFP_KERNEL);
-}
-
-static int __init celleb_setup_fake_pci(struct device_node *dev,
- struct pci_controller *phb)
-{
- struct device_node *node;
-
- phb->ops = &celleb_fake_pci_ops;
- celleb_alloc_private_mem(phb);
-
- for (node = of_get_next_child(dev, NULL);
- node != NULL; node = of_get_next_child(dev, node))
- celleb_setup_fake_pci_device(node, phb);
-
- return 0;
-}
-
-static struct celleb_phb_spec celleb_fake_pci_spec __initdata = {
- .setup = celleb_setup_fake_pci,
-};
-
-static const struct of_device_id celleb_phb_match[] __initconst = {
- {
- .name = "pci-pseudo",
- .data = &celleb_fake_pci_spec,
- }, {
- .name = "epci",
- .data = &celleb_epci_spec,
- }, {
- .name = "pcie",
- .data = &celleb_pciex_spec,
- }, {
- },
-};
-
-int __init celleb_setup_phb(struct pci_controller *phb)
-{
- struct device_node *dev = phb->dn;
- const struct of_device_id *match;
- const struct celleb_phb_spec *phb_spec;
- int rc;
-
- match = of_match_node(celleb_phb_match, dev);
- if (!match)
- return 1;
-
- phb_set_bus_ranges(dev, phb);
- phb->buid = 1;
-
- phb_spec = match->data;
- rc = (*phb_spec->setup)(dev, phb);
- if (rc)
- return 1;
-
- if (phb_spec->ops)
- iowa_register_bus(phb, phb_spec->ops,
- phb_spec->iowa_init,
- phb_spec->iowa_data);
- return 0;
-}
-
-int celleb_pci_probe_mode(struct pci_bus *bus)
-{
- return PCI_PROBE_DEVTREE;
-}
diff --git a/arch/powerpc/platforms/cell/celleb_pci.h b/arch/powerpc/platforms/cell/celleb_pci.h
deleted file mode 100644
index a801fcc5f389..000000000000
--- a/arch/powerpc/platforms/cell/celleb_pci.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * pci prototypes for Celleb platform
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef _CELLEB_PCI_H
-#define _CELLEB_PCI_H
-
-#include <linux/pci.h>
-
-#include <asm/pci-bridge.h>
-#include <asm/prom.h>
-#include <asm/ppc-pci.h>
-#include <asm/io-workarounds.h>
-
-struct iowa_bus;
-
-struct celleb_phb_spec {
- int (*setup)(struct device_node *, struct pci_controller *);
- struct ppc_pci_io *ops;
- int (*iowa_init)(struct iowa_bus *, void *);
- void *iowa_data;
-};
-
-extern int celleb_setup_phb(struct pci_controller *);
-extern int celleb_pci_probe_mode(struct pci_bus *);
-
-extern struct celleb_phb_spec celleb_epci_spec;
-extern struct celleb_phb_spec celleb_pciex_spec;
-
-#endif /* _CELLEB_PCI_H */
diff --git a/arch/powerpc/platforms/cell/celleb_scc.h b/arch/powerpc/platforms/cell/celleb_scc.h
deleted file mode 100644
index b596a711c348..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc.h
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * SCC (Super Companion Chip) definitions
- *
- * (C) Copyright 2004-2006 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#ifndef _CELLEB_SCC_H
-#define _CELLEB_SCC_H
-
-#define PCI_VENDOR_ID_TOSHIBA_2 0x102f
-#define PCI_DEVICE_ID_TOSHIBA_SCC_PCIEXC_BRIDGE 0x01b0
-#define PCI_DEVICE_ID_TOSHIBA_SCC_EPCI_BRIDGE 0x01b1
-#define PCI_DEVICE_ID_TOSHIBA_SCC_BRIDGE 0x01b2
-#define PCI_DEVICE_ID_TOSHIBA_SCC_GBE 0x01b3
-#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
-#define PCI_DEVICE_ID_TOSHIBA_SCC_USB2 0x01b5
-#define PCI_DEVICE_ID_TOSHIBA_SCC_USB 0x01b6
-#define PCI_DEVICE_ID_TOSHIBA_SCC_ENCDEC 0x01b7
-
-#define SCC_EPCI_REG 0x0000d000
-
-/* EPCI registers */
-#define SCC_EPCI_CNF10_REG 0x010
-#define SCC_EPCI_CNF14_REG 0x014
-#define SCC_EPCI_CNF18_REG 0x018
-#define SCC_EPCI_PVBAT 0x100
-#define SCC_EPCI_VPMBAT 0x104
-#define SCC_EPCI_VPIBAT 0x108
-#define SCC_EPCI_VCSR 0x110
-#define SCC_EPCI_VIENAB 0x114
-#define SCC_EPCI_VISTAT 0x118
-#define SCC_EPCI_VRDCOUNT 0x124
-#define SCC_EPCI_BAM0 0x12c
-#define SCC_EPCI_BAM1 0x134
-#define SCC_EPCI_BAM2 0x13c
-#define SCC_EPCI_IADR 0x164
-#define SCC_EPCI_CLKRST 0x800
-#define SCC_EPCI_INTSET 0x804
-#define SCC_EPCI_STATUS 0x808
-#define SCC_EPCI_ABTSET 0x80c
-#define SCC_EPCI_WATRP 0x810
-#define SCC_EPCI_DUMYRADR 0x814
-#define SCC_EPCI_SWRESP 0x818
-#define SCC_EPCI_CNTOPT 0x81c
-#define SCC_EPCI_ECMODE 0xf00
-#define SCC_EPCI_IOM_AC_NUM 5
-#define SCC_EPCI_IOM_ACTE(n) (0xf10 + (n) * 4)
-#define SCC_EPCI_IOT_AC_NUM 4
-#define SCC_EPCI_IOT_ACTE(n) (0xf30 + (n) * 4)
-#define SCC_EPCI_MAEA 0xf50
-#define SCC_EPCI_MAEC 0xf54
-#define SCC_EPCI_CKCTRL 0xff0
-
-/* bits for SCC_EPCI_VCSR */
-#define SCC_EPCI_VCSR_FRE 0x00020000
-#define SCC_EPCI_VCSR_FWE 0x00010000
-#define SCC_EPCI_VCSR_DR 0x00000400
-#define SCC_EPCI_VCSR_SR 0x00000008
-#define SCC_EPCI_VCSR_AT 0x00000004
-
-/* bits for SCC_EPCI_VIENAB/SCC_EPCI_VISTAT */
-#define SCC_EPCI_VISTAT_PMPE 0x00000008
-#define SCC_EPCI_VISTAT_PMFE 0x00000004
-#define SCC_EPCI_VISTAT_PRA 0x00000002
-#define SCC_EPCI_VISTAT_PRD 0x00000001
-#define SCC_EPCI_VISTAT_ALL 0x0000000f
-
-#define SCC_EPCI_VIENAB_PMPEE 0x00000008
-#define SCC_EPCI_VIENAB_PMFEE 0x00000004
-#define SCC_EPCI_VIENAB_PRA 0x00000002
-#define SCC_EPCI_VIENAB_PRD 0x00000001
-#define SCC_EPCI_VIENAB_ALL 0x0000000f
-
-/* bits for SCC_EPCI_CLKRST */
-#define SCC_EPCI_CLKRST_CKS_MASK 0x00030000
-#define SCC_EPCI_CLKRST_CKS_2 0x00000000
-#define SCC_EPCI_CLKRST_CKS_4 0x00010000
-#define SCC_EPCI_CLKRST_CKS_8 0x00020000
-#define SCC_EPCI_CLKRST_PCICRST 0x00000400
-#define SCC_EPCI_CLKRST_BC 0x00000200
-#define SCC_EPCI_CLKRST_PCIRST 0x00000100
-#define SCC_EPCI_CLKRST_PCKEN 0x00000001
-
-/* bits for SCC_EPCI_INTSET/SCC_EPCI_STATUS */
-#define SCC_EPCI_INT_2M 0x01000000
-#define SCC_EPCI_INT_RERR 0x00200000
-#define SCC_EPCI_INT_SERR 0x00100000
-#define SCC_EPCI_INT_PRTER 0x00080000
-#define SCC_EPCI_INT_SER 0x00040000
-#define SCC_EPCI_INT_PER 0x00020000
-#define SCC_EPCI_INT_PAI 0x00010000
-#define SCC_EPCI_INT_1M 0x00000100
-#define SCC_EPCI_INT_PME 0x00000010
-#define SCC_EPCI_INT_INTD 0x00000008
-#define SCC_EPCI_INT_INTC 0x00000004
-#define SCC_EPCI_INT_INTB 0x00000002
-#define SCC_EPCI_INT_INTA 0x00000001
-#define SCC_EPCI_INT_DEVINT 0x0000000f
-#define SCC_EPCI_INT_ALL 0x003f001f
-#define SCC_EPCI_INT_ALLERR 0x003f0000
-
-/* bits for SCC_EPCI_CKCTRL */
-#define SCC_EPCI_CKCTRL_CRST0 0x00010000
-#define SCC_EPCI_CKCTRL_CRST1 0x00020000
-#define SCC_EPCI_CKCTRL_OCLKEN 0x00000100
-#define SCC_EPCI_CKCTRL_LCLKEN 0x00000001
-
-#define SCC_EPCI_IDSEL_AD_TO_SLOT(ad) ((ad) - 10)
-#define SCC_EPCI_MAX_DEVNU SCC_EPCI_IDSEL_AD_TO_SLOT(32)
-
-/* bits for SCC_EPCI_CNTOPT */
-#define SCC_EPCI_CNTOPT_O2PMB 0x00000002
-
-/* SCC PCIEXC SMMIO registers */
-#define PEXCADRS 0x000
-#define PEXCWDATA 0x004
-#define PEXCRDATA 0x008
-#define PEXDADRS 0x010
-#define PEXDCMND 0x014
-#define PEXDWDATA 0x018
-#define PEXDRDATA 0x01c
-#define PEXREQID 0x020
-#define PEXTIDMAP 0x024
-#define PEXINTMASK 0x028
-#define PEXINTSTS 0x02c
-#define PEXAERRMASK 0x030
-#define PEXAERRSTS 0x034
-#define PEXPRERRMASK 0x040
-#define PEXPRERRSTS 0x044
-#define PEXPRERRID01 0x048
-#define PEXPRERRID23 0x04c
-#define PEXVDMASK 0x050
-#define PEXVDSTS 0x054
-#define PEXRCVCPLIDA 0x060
-#define PEXLENERRIDA 0x068
-#define PEXPHYPLLST 0x070
-#define PEXDMRDEN0 0x100
-#define PEXDMRDADR0 0x104
-#define PEXDMRDENX 0x110
-#define PEXDMRDADRX 0x114
-#define PEXECMODE 0xf00
-#define PEXMAEA(n) (0xf50 + (8 * n))
-#define PEXMAEC(n) (0xf54 + (8 * n))
-#define PEXCCRCTRL 0xff0
-
-/* SCC PCIEXC bits and shifts for PEXCADRS */
-#define PEXCADRS_BYTE_EN_SHIFT 20
-#define PEXCADRS_CMD_SHIFT 16
-#define PEXCADRS_CMD_READ (0xa << PEXCADRS_CMD_SHIFT)
-#define PEXCADRS_CMD_WRITE (0xb << PEXCADRS_CMD_SHIFT)
-
-/* SCC PCIEXC shifts for PEXDADRS */
-#define PEXDADRS_BUSNO_SHIFT 20
-#define PEXDADRS_DEVNO_SHIFT 15
-#define PEXDADRS_FUNCNO_SHIFT 12
-
-/* SCC PCIEXC bits and shifts for PEXDCMND */
-#define PEXDCMND_BYTE_EN_SHIFT 4
-#define PEXDCMND_IO_READ 0x2
-#define PEXDCMND_IO_WRITE 0x3
-#define PEXDCMND_CONFIG_READ 0xa
-#define PEXDCMND_CONFIG_WRITE 0xb
-
-/* SCC PCIEXC bits for PEXPHYPLLST */
-#define PEXPHYPLLST_PEXPHYAPLLST 0x00000001
-
-/* SCC PCIEXC bits for PEXECMODE */
-#define PEXECMODE_ALL_THROUGH 0x00000000
-#define PEXECMODE_ALL_8BIT 0x00550155
-#define PEXECMODE_ALL_16BIT 0x00aa02aa
-
-/* SCC PCIEXC bits for PEXCCRCTRL */
-#define PEXCCRCTRL_PEXIPCOREEN 0x00040000
-#define PEXCCRCTRL_PEXIPCONTEN 0x00020000
-#define PEXCCRCTRL_PEXPHYPLLEN 0x00010000
-#define PEXCCRCTRL_PCIEXCAOCKEN 0x00000100
-
-/* SCC PCIEXC port configuration registers */
-#define PEXTCERRCHK 0x21c
-#define PEXTAMAPB0 0x220
-#define PEXTAMAPL0 0x224
-#define PEXTAMAPB(n) (PEXTAMAPB0 + 8 * (n))
-#define PEXTAMAPL(n) (PEXTAMAPL0 + 8 * (n))
-#define PEXCHVC0P 0x500
-#define PEXCHVC0NP 0x504
-#define PEXCHVC0C 0x508
-#define PEXCDVC0P 0x50c
-#define PEXCDVC0NP 0x510
-#define PEXCDVC0C 0x514
-#define PEXCHVCXP 0x518
-#define PEXCHVCXNP 0x51c
-#define PEXCHVCXC 0x520
-#define PEXCDVCXP 0x524
-#define PEXCDVCXNP 0x528
-#define PEXCDVCXC 0x52c
-#define PEXCTTRG 0x530
-#define PEXTSCTRL 0x700
-#define PEXTSSTS 0x704
-#define PEXSKPCTRL 0x708
-
-/* UHC registers */
-#define SCC_UHC_CKRCTRL 0xff0
-#define SCC_UHC_ECMODE 0xf00
-
-/* bits for SCC_UHC_CKRCTRL */
-#define SCC_UHC_F48MCKLEN 0x00000001
-#define SCC_UHC_P_SUSPEND 0x00000002
-#define SCC_UHC_PHY_SUSPEND_SEL 0x00000004
-#define SCC_UHC_HCLKEN 0x00000100
-#define SCC_UHC_USBEN 0x00010000
-#define SCC_UHC_USBCEN 0x00020000
-#define SCC_UHC_PHYEN 0x00040000
-
-/* bits for SCC_UHC_ECMODE */
-#define SCC_UHC_ECMODE_BY_BYTE 0x00000555
-#define SCC_UHC_ECMODE_BY_WORD 0x00000aaa
-
-#endif /* _CELLEB_SCC_H */
diff --git a/arch/powerpc/platforms/cell/celleb_scc_epci.c b/arch/powerpc/platforms/cell/celleb_scc_epci.c
deleted file mode 100644
index 9438bbed402f..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc_epci.c
+++ /dev/null
@@ -1,428 +0,0 @@
-/*
- * Support for SCC external PCI
- *
- * (C) Copyright 2004-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/threads.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/pci_regs.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/prom.h>
-#include <asm/pci-bridge.h>
-#include <asm/ppc-pci.h>
-
-#include "celleb_scc.h"
-#include "celleb_pci.h"
-
-#define MAX_PCI_DEVICES 32
-#define MAX_PCI_FUNCTIONS 8
-
-#define iob() __asm__ __volatile__("eieio; sync":::"memory")
-
-static inline PCI_IO_ADDR celleb_epci_get_epci_base(
- struct pci_controller *hose)
-{
- /*
- * Note:
- * Celleb epci uses cfg_addr as a base address for
- * epci control registers.
- */
-
- return hose->cfg_addr;
-}
-
-static inline PCI_IO_ADDR celleb_epci_get_epci_cfg(
- struct pci_controller *hose)
-{
- /*
- * Note:
- * Celleb epci uses cfg_data as a base address for
- * configuration area for epci devices.
- */
-
- return hose->cfg_data;
-}
-
-static inline void clear_and_disable_master_abort_interrupt(
- struct pci_controller *hose)
-{
- PCI_IO_ADDR epci_base;
- PCI_IO_ADDR reg;
- epci_base = celleb_epci_get_epci_base(hose);
- reg = epci_base + PCI_COMMAND;
- out_be32(reg, in_be32(reg) | (PCI_STATUS_REC_MASTER_ABORT << 16));
-}
-
-static int celleb_epci_check_abort(struct pci_controller *hose,
- PCI_IO_ADDR addr)
-{
- PCI_IO_ADDR reg;
- PCI_IO_ADDR epci_base;
- u32 val;
-
- iob();
- epci_base = celleb_epci_get_epci_base(hose);
-
- reg = epci_base + PCI_COMMAND;
- val = in_be32(reg);
-
- if (val & (PCI_STATUS_REC_MASTER_ABORT << 16)) {
- out_be32(reg,
- (val & 0xffff) | (PCI_STATUS_REC_MASTER_ABORT << 16));
-
- /* clear PCI Controller error, FRE, PMFE */
- reg = epci_base + SCC_EPCI_STATUS;
- out_be32(reg, SCC_EPCI_INT_PAI);
-
- reg = epci_base + SCC_EPCI_VCSR;
- val = in_be32(reg) & 0xffff;
- val |= SCC_EPCI_VCSR_FRE;
- out_be32(reg, val);
-
- reg = epci_base + SCC_EPCI_VISTAT;
- out_be32(reg, SCC_EPCI_VISTAT_PMFE);
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static PCI_IO_ADDR celleb_epci_make_config_addr(struct pci_bus *bus,
- struct pci_controller *hose, unsigned int devfn, int where)
-{
- PCI_IO_ADDR addr;
-
- if (bus != hose->bus)
- addr = celleb_epci_get_epci_cfg(hose) +
- (((bus->number & 0xff) << 16)
- | ((devfn & 0xff) << 8)
- | (where & 0xff)
- | 0x01000000);
- else
- addr = celleb_epci_get_epci_cfg(hose) +
- (((devfn & 0xff) << 8) | (where & 0xff));
-
- pr_debug("EPCI: config_addr = 0x%p\n", addr);
-
- return addr;
-}
-
-static int celleb_epci_read_config(struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 *val)
-{
- PCI_IO_ADDR epci_base;
- PCI_IO_ADDR addr;
- struct pci_controller *hose = pci_bus_to_host(bus);
-
- /* allignment check */
- BUG_ON(where % size);
-
- if (!celleb_epci_get_epci_cfg(hose))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (bus->number == hose->first_busno && devfn == 0) {
- /* EPCI controller self */
-
- epci_base = celleb_epci_get_epci_base(hose);
- addr = epci_base + where;
-
- switch (size) {
- case 1:
- *val = in_8(addr);
- break;
- case 2:
- *val = in_be16(addr);
- break;
- case 4:
- *val = in_be32(addr);
- break;
- default:
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- } else {
-
- clear_and_disable_master_abort_interrupt(hose);
- addr = celleb_epci_make_config_addr(bus, hose, devfn, where);
-
- switch (size) {
- case 1:
- *val = in_8(addr);
- break;
- case 2:
- *val = in_le16(addr);
- break;
- case 4:
- *val = in_le32(addr);
- break;
- default:
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
- }
-
- pr_debug("EPCI: "
- "addr=0x%p, devfn=0x%x, where=0x%x, size=0x%x, val=0x%x\n",
- addr, devfn, where, size, *val);
-
- return celleb_epci_check_abort(hose, NULL);
-}
-
-static int celleb_epci_write_config(struct pci_bus *bus,
- unsigned int devfn, int where, int size, u32 val)
-{
- PCI_IO_ADDR epci_base;
- PCI_IO_ADDR addr;
- struct pci_controller *hose = pci_bus_to_host(bus);
-
- /* allignment check */
- BUG_ON(where % size);
-
- if (!celleb_epci_get_epci_cfg(hose))
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (bus->number == hose->first_busno && devfn == 0) {
- /* EPCI controller self */
-
- epci_base = celleb_epci_get_epci_base(hose);
- addr = epci_base + where;
-
- switch (size) {
- case 1:
- out_8(addr, val);
- break;
- case 2:
- out_be16(addr, val);
- break;
- case 4:
- out_be32(addr, val);
- break;
- default:
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- } else {
-
- clear_and_disable_master_abort_interrupt(hose);
- addr = celleb_epci_make_config_addr(bus, hose, devfn, where);
-
- switch (size) {
- case 1:
- out_8(addr, val);
- break;
- case 2:
- out_le16(addr, val);
- break;
- case 4:
- out_le32(addr, val);
- break;
- default:
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
- }
-
- return celleb_epci_check_abort(hose, addr);
-}
-
-struct pci_ops celleb_epci_ops = {
- .read = celleb_epci_read_config,
- .write = celleb_epci_write_config,
-};
-
-/* to be moved in FW */
-static int __init celleb_epci_init(struct pci_controller *hose)
-{
- u32 val;
- PCI_IO_ADDR reg;
- PCI_IO_ADDR epci_base;
- int hwres = 0;
-
- epci_base = celleb_epci_get_epci_base(hose);
-
- /* PCI core reset(Internal bus and PCI clock) */
- reg = epci_base + SCC_EPCI_CKCTRL;
- val = in_be32(reg);
- if (val == 0x00030101)
- hwres = 1;
- else {
- val &= ~(SCC_EPCI_CKCTRL_CRST0 | SCC_EPCI_CKCTRL_CRST1);
- out_be32(reg, val);
-
- /* set PCI core clock */
- val = in_be32(reg);
- val |= (SCC_EPCI_CKCTRL_OCLKEN | SCC_EPCI_CKCTRL_LCLKEN);
- out_be32(reg, val);
-
- /* release PCI core reset (internal bus) */
- val = in_be32(reg);
- val |= SCC_EPCI_CKCTRL_CRST0;
- out_be32(reg, val);
-
- /* set PCI clock select */
- reg = epci_base + SCC_EPCI_CLKRST;
- val = in_be32(reg);
- val &= ~SCC_EPCI_CLKRST_CKS_MASK;
- val |= SCC_EPCI_CLKRST_CKS_2;
- out_be32(reg, val);
-
- /* set arbiter */
- reg = epci_base + SCC_EPCI_ABTSET;
- out_be32(reg, 0x0f1f001f); /* temporary value */
-
- /* buffer on */
- reg = epci_base + SCC_EPCI_CLKRST;
- val = in_be32(reg);
- val |= SCC_EPCI_CLKRST_BC;
- out_be32(reg, val);
-
- /* PCI clock enable */
- val = in_be32(reg);
- val |= SCC_EPCI_CLKRST_PCKEN;
- out_be32(reg, val);
-
- /* release PCI core reset (all) */
- reg = epci_base + SCC_EPCI_CKCTRL;
- val = in_be32(reg);
- val |= (SCC_EPCI_CKCTRL_CRST0 | SCC_EPCI_CKCTRL_CRST1);
- out_be32(reg, val);
-
- /* set base translation registers. (already set by Beat) */
-
- /* set base address masks. (already set by Beat) */
- }
-
- /* release interrupt masks and clear all interrupts */
- reg = epci_base + SCC_EPCI_INTSET;
- out_be32(reg, 0x013f011f); /* all interrupts enable */
- reg = epci_base + SCC_EPCI_VIENAB;
- val = SCC_EPCI_VIENAB_PMPEE | SCC_EPCI_VIENAB_PMFEE;
- out_be32(reg, val);
- reg = epci_base + SCC_EPCI_STATUS;
- out_be32(reg, 0xffffffff);
- reg = epci_base + SCC_EPCI_VISTAT;
- out_be32(reg, 0xffffffff);
-
- /* disable PCI->IB address translation */
- reg = epci_base + SCC_EPCI_VCSR;
- val = in_be32(reg);
- val &= ~(SCC_EPCI_VCSR_DR | SCC_EPCI_VCSR_AT);
- out_be32(reg, val);
-
- /* set base addresses. (no need to set?) */
-
- /* memory space, bus master enable */
- reg = epci_base + PCI_COMMAND;
- val = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER;
- out_be32(reg, val);
-
- /* endian mode setup */
- reg = epci_base + SCC_EPCI_ECMODE;
- val = 0x00550155;
- out_be32(reg, val);
-
- /* set control option */
- reg = epci_base + SCC_EPCI_CNTOPT;
- val = in_be32(reg);
- val |= SCC_EPCI_CNTOPT_O2PMB;
- out_be32(reg, val);
-
- /* XXX: temporay: set registers for address conversion setup */
- reg = epci_base + SCC_EPCI_CNF10_REG;
- out_be32(reg, 0x80000008);
- reg = epci_base + SCC_EPCI_CNF14_REG;
- out_be32(reg, 0x40000008);
-
- reg = epci_base + SCC_EPCI_BAM0;
- out_be32(reg, 0x80000000);
- reg = epci_base + SCC_EPCI_BAM1;
- out_be32(reg, 0xe0000000);
-
- reg = epci_base + SCC_EPCI_PVBAT;
- out_be32(reg, 0x80000000);
-
- if (!hwres) {
- /* release external PCI reset */
- reg = epci_base + SCC_EPCI_CLKRST;
- val = in_be32(reg);
- val |= SCC_EPCI_CLKRST_PCIRST;
- out_be32(reg, val);
- }
-
- return 0;
-}
-
-static int __init celleb_setup_epci(struct device_node *node,
- struct pci_controller *hose)
-{
- struct resource r;
-
- pr_debug("PCI: celleb_setup_epci()\n");
-
- /*
- * Note:
- * Celleb epci uses cfg_addr and cfg_data member of
- * pci_controller structure in irregular way.
- *
- * cfg_addr is used to map for control registers of
- * celleb epci.
- *
- * cfg_data is used for configuration area of devices
- * on Celleb epci buses.
- */
-
- if (of_address_to_resource(node, 0, &r))
- goto error;
- hose->cfg_addr = ioremap(r.start, resource_size(&r));
- if (!hose->cfg_addr)
- goto error;
- pr_debug("EPCI: cfg_addr map 0x%016llx->0x%016lx + 0x%016llx\n",
- r.start, (unsigned long)hose->cfg_addr, resource_size(&r));
-
- if (of_address_to_resource(node, 2, &r))
- goto error;
- hose->cfg_data = ioremap(r.start, resource_size(&r));
- if (!hose->cfg_data)
- goto error;
- pr_debug("EPCI: cfg_data map 0x%016llx->0x%016lx + 0x%016llx\n",
- r.start, (unsigned long)hose->cfg_data, resource_size(&r));
-
- hose->ops = &celleb_epci_ops;
- celleb_epci_init(hose);
-
- return 0;
-
-error:
- if (hose->cfg_addr)
- iounmap(hose->cfg_addr);
-
- if (hose->cfg_data)
- iounmap(hose->cfg_data);
- return 1;
-}
-
-struct celleb_phb_spec celleb_epci_spec __initdata = {
- .setup = celleb_setup_epci,
- .ops = &spiderpci_ops,
- .iowa_init = &spiderpci_iowa_init,
- .iowa_data = (void *)0,
-};
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
deleted file mode 100644
index 94170e4f2ce7..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+++ /dev/null
@@ -1,538 +0,0 @@
-/*
- * Support for Celleb PCI-Express.
- *
- * (C) Copyright 2007-2008 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#undef DEBUG
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/interrupt.h>
-
-#include <asm/io.h>
-#include <asm/irq.h>
-#include <asm/iommu.h>
-#include <asm/byteorder.h>
-
-#include "celleb_scc.h"
-#include "celleb_pci.h"
-
-#define PEX_IN(base, off) in_be32((void __iomem *)(base) + (off))
-#define PEX_OUT(base, off, data) out_be32((void __iomem *)(base) + (off), (data))
-
-static void scc_pciex_io_flush(struct iowa_bus *bus)
-{
- (void)PEX_IN(bus->phb->cfg_addr, PEXDMRDEN0);
-}
-
-/*
- * Memory space access to device on PCIEX
- */
-#define PCIEX_MMIO_READ(name, ret) \
-static ret scc_pciex_##name(const PCI_IO_ADDR addr) \
-{ \
- ret val = __do_##name(addr); \
- scc_pciex_io_flush(iowa_mem_find_bus(addr)); \
- return val; \
-}
-
-#define PCIEX_MMIO_READ_STR(name) \
-static void scc_pciex_##name(const PCI_IO_ADDR addr, void *buf, \
- unsigned long count) \
-{ \
- __do_##name(addr, buf, count); \
- scc_pciex_io_flush(iowa_mem_find_bus(addr)); \
-}
-
-PCIEX_MMIO_READ(readb, u8)
-PCIEX_MMIO_READ(readw, u16)
-PCIEX_MMIO_READ(readl, u32)
-PCIEX_MMIO_READ(readq, u64)
-PCIEX_MMIO_READ(readw_be, u16)
-PCIEX_MMIO_READ(readl_be, u32)
-PCIEX_MMIO_READ(readq_be, u64)
-PCIEX_MMIO_READ_STR(readsb)
-PCIEX_MMIO_READ_STR(readsw)
-PCIEX_MMIO_READ_STR(readsl)
-
-static void scc_pciex_memcpy_fromio(void *dest, const PCI_IO_ADDR src,
- unsigned long n)
-{
- __do_memcpy_fromio(dest, src, n);
- scc_pciex_io_flush(iowa_mem_find_bus(src));
-}
-
-/*
- * I/O port access to devices on PCIEX.
- */
-
-static inline unsigned long get_bus_address(struct pci_controller *phb,
- unsigned long port)
-{
- return port - ((unsigned long)(phb->io_base_virt) - _IO_BASE);
-}
-
-static u32 scc_pciex_read_port(struct pci_controller *phb,
- unsigned long port, int size)
-{
- unsigned int byte_enable;
- unsigned int cmd, shift;
- unsigned long addr;
- u32 data, ret;
-
- BUG_ON(((port & 0x3ul) + size) > 4);
-
- addr = get_bus_address(phb, port);
- shift = addr & 0x3ul;
- byte_enable = ((1 << size) - 1) << shift;
- cmd = PEXDCMND_IO_READ | (byte_enable << PEXDCMND_BYTE_EN_SHIFT);
- PEX_OUT(phb->cfg_addr, PEXDADRS, (addr & ~0x3ul));
- PEX_OUT(phb->cfg_addr, PEXDCMND, cmd);
- data = PEX_IN(phb->cfg_addr, PEXDRDATA);
- ret = (data >> (shift * 8)) & (0xFFFFFFFF >> ((4 - size) * 8));
-
- pr_debug("PCIEX:PIO READ:port=0x%lx, addr=0x%lx, size=%d, be=%x,"
- " cmd=%x, data=%x, ret=%x\n", port, addr, size, byte_enable,
- cmd, data, ret);
-
- return ret;
-}
-
-static void scc_pciex_write_port(struct pci_controller *phb,
- unsigned long port, int size, u32 val)
-{
- unsigned int byte_enable;
- unsigned int cmd, shift;
- unsigned long addr;
- u32 data;
-
- BUG_ON(((port & 0x3ul) + size) > 4);
-
- addr = get_bus_address(phb, port);
- shift = addr & 0x3ul;
- byte_enable = ((1 << size) - 1) << shift;
- cmd = PEXDCMND_IO_WRITE | (byte_enable << PEXDCMND_BYTE_EN_SHIFT);
- data = (val & (0xFFFFFFFF >> (4 - size) * 8)) << (shift * 8);
- PEX_OUT(phb->cfg_addr, PEXDADRS, (addr & ~0x3ul));
- PEX_OUT(phb->cfg_addr, PEXDCMND, cmd);
- PEX_OUT(phb->cfg_addr, PEXDWDATA, data);
-
- pr_debug("PCIEX:PIO WRITE:port=0x%lx, addr=%lx, size=%d, val=%x,"
- " be=%x, cmd=%x, data=%x\n", port, addr, size, val,
- byte_enable, cmd, data);
-}
-
-static u8 __scc_pciex_inb(struct pci_controller *phb, unsigned long port)
-{
- return (u8)scc_pciex_read_port(phb, port, 1);
-}
-
-static u16 __scc_pciex_inw(struct pci_controller *phb, unsigned long port)
-{
- u32 data;
- if ((port & 0x3ul) < 3)
- data = scc_pciex_read_port(phb, port, 2);
- else {
- u32 d1 = scc_pciex_read_port(phb, port, 1);
- u32 d2 = scc_pciex_read_port(phb, port + 1, 1);
- data = d1 | (d2 << 8);
- }
- return (u16)data;
-}
-
-static u32 __scc_pciex_inl(struct pci_controller *phb, unsigned long port)
-{
- unsigned int mod = port & 0x3ul;
- u32 data;
- if (mod == 0)
- data = scc_pciex_read_port(phb, port, 4);
- else {
- u32 d1 = scc_pciex_read_port(phb, port, 4 - mod);
- u32 d2 = scc_pciex_read_port(phb, port + 1, mod);
- data = d1 | (d2 << (mod * 8));
- }
- return data;
-}
-
-static void __scc_pciex_outb(struct pci_controller *phb,
- u8 val, unsigned long port)
-{
- scc_pciex_write_port(phb, port, 1, (u32)val);
-}
-
-static void __scc_pciex_outw(struct pci_controller *phb,
- u16 val, unsigned long port)
-{
- if ((port & 0x3ul) < 3)
- scc_pciex_write_port(phb, port, 2, (u32)val);
- else {
- u32 d1 = val & 0x000000FF;
- u32 d2 = (val & 0x0000FF00) >> 8;
- scc_pciex_write_port(phb, port, 1, d1);
- scc_pciex_write_port(phb, port + 1, 1, d2);
- }
-}
-
-static void __scc_pciex_outl(struct pci_controller *phb,
- u32 val, unsigned long port)
-{
- unsigned int mod = port & 0x3ul;
- if (mod == 0)
- scc_pciex_write_port(phb, port, 4, val);
- else {
- u32 d1 = val & (0xFFFFFFFFul >> (mod * 8));
- u32 d2 = val >> ((4 - mod) * 8);
- scc_pciex_write_port(phb, port, 4 - mod, d1);
- scc_pciex_write_port(phb, port + 1, mod, d2);
- }
-}
-
-#define PCIEX_PIO_FUNC(size, name) \
-static u##size scc_pciex_in##name(unsigned long port) \
-{ \
- struct iowa_bus *bus = iowa_pio_find_bus(port); \
- u##size data = __scc_pciex_in##name(bus->phb, port); \
- scc_pciex_io_flush(bus); \
- return data; \
-} \
-static void scc_pciex_ins##name(unsigned long p, void *b, unsigned long c) \
-{ \
- struct iowa_bus *bus = iowa_pio_find_bus(p); \
- __le##size *dst = b; \
- for (; c != 0; c--, dst++) \
- *dst = cpu_to_le##size(__scc_pciex_in##name(bus->phb, p)); \
- scc_pciex_io_flush(bus); \
-} \
-static void scc_pciex_out##name(u##size val, unsigned long port) \
-{ \
- struct iowa_bus *bus = iowa_pio_find_bus(port); \
- __scc_pciex_out##name(bus->phb, val, port); \
-} \
-static void scc_pciex_outs##name(unsigned long p, const void *b, \
- unsigned long c) \
-{ \
- struct iowa_bus *bus = iowa_pio_find_bus(p); \
- const __le##size *src = b; \
- for (; c != 0; c--, src++) \
- __scc_pciex_out##name(bus->phb, le##size##_to_cpu(*src), p); \
-}
-#define __le8 u8
-#define cpu_to_le8(x) (x)
-#define le8_to_cpu(x) (x)
-PCIEX_PIO_FUNC(8, b)
-PCIEX_PIO_FUNC(16, w)
-PCIEX_PIO_FUNC(32, l)
-
-static struct ppc_pci_io scc_pciex_ops = {
- .readb = scc_pciex_readb,
- .readw = scc_pciex_readw,
- .readl = scc_pciex_readl,
- .readq = scc_pciex_readq,
- .readw_be = scc_pciex_readw_be,
- .readl_be = scc_pciex_readl_be,
- .readq_be = scc_pciex_readq_be,
- .readsb = scc_pciex_readsb,
- .readsw = scc_pciex_readsw,
- .readsl = scc_pciex_readsl,
- .memcpy_fromio = scc_pciex_memcpy_fromio,
- .inb = scc_pciex_inb,
- .inw = scc_pciex_inw,
- .inl = scc_pciex_inl,
- .outb = scc_pciex_outb,
- .outw = scc_pciex_outw,
- .outl = scc_pciex_outl,
- .insb = scc_pciex_insb,
- .insw = scc_pciex_insw,
- .insl = scc_pciex_insl,
- .outsb = scc_pciex_outsb,
- .outsw = scc_pciex_outsw,
- .outsl = scc_pciex_outsl,
-};
-
-static int __init scc_pciex_iowa_init(struct iowa_bus *bus, void *data)
-{
- dma_addr_t dummy_page_da;
- void *dummy_page_va;
-
- dummy_page_va = kmalloc(PAGE_SIZE, GFP_KERNEL);
- if (!dummy_page_va) {
- pr_err("PCIEX:Alloc dummy_page_va failed\n");
- return -1;
- }
-
- dummy_page_da = dma_map_single(bus->phb->parent, dummy_page_va,
- PAGE_SIZE, DMA_FROM_DEVICE);
- if (dma_mapping_error(bus->phb->parent, dummy_page_da)) {
- pr_err("PCIEX:Map dummy page failed.\n");
- kfree(dummy_page_va);
- return -1;
- }
-
- PEX_OUT(bus->phb->cfg_addr, PEXDMRDADR0, dummy_page_da);
-
- return 0;
-}
-
-/*
- * config space access
- */
-#define MK_PEXDADRS(bus_no, dev_no, func_no, addr) \
- ((uint32_t)(((addr) & ~0x3UL) | \
- ((bus_no) << PEXDADRS_BUSNO_SHIFT) | \
- ((dev_no) << PEXDADRS_DEVNO_SHIFT) | \
- ((func_no) << PEXDADRS_FUNCNO_SHIFT)))
-
-#define MK_PEXDCMND_BYTE_EN(addr, size) \
- ((((0x1 << (size))-1) << ((addr) & 0x3)) << PEXDCMND_BYTE_EN_SHIFT)
-#define MK_PEXDCMND(cmd, addr, size) ((cmd) | MK_PEXDCMND_BYTE_EN(addr, size))
-
-static uint32_t config_read_pciex_dev(unsigned int __iomem *base,
- uint64_t bus_no, uint64_t dev_no, uint64_t func_no,
- uint64_t off, uint64_t size)
-{
- uint32_t ret;
- uint32_t addr, cmd;
-
- addr = MK_PEXDADRS(bus_no, dev_no, func_no, off);
- cmd = MK_PEXDCMND(PEXDCMND_CONFIG_READ, off, size);
- PEX_OUT(base, PEXDADRS, addr);
- PEX_OUT(base, PEXDCMND, cmd);
- ret = (PEX_IN(base, PEXDRDATA)
- >> ((off & (4-size)) * 8)) & ((0x1 << (size * 8)) - 1);
- return ret;
-}
-
-static void config_write_pciex_dev(unsigned int __iomem *base, uint64_t bus_no,
- uint64_t dev_no, uint64_t func_no, uint64_t off, uint64_t size,
- uint32_t data)
-{
- uint32_t addr, cmd;
-
- addr = MK_PEXDADRS(bus_no, dev_no, func_no, off);
- cmd = MK_PEXDCMND(PEXDCMND_CONFIG_WRITE, off, size);
- PEX_OUT(base, PEXDADRS, addr);
- PEX_OUT(base, PEXDCMND, cmd);
- PEX_OUT(base, PEXDWDATA,
- (data & ((0x1 << (size * 8)) - 1)) << ((off & (4-size)) * 8));
-}
-
-#define MK_PEXCADRS_BYTE_EN(off, len) \
- ((((0x1 << (len)) - 1) << ((off) & 0x3)) << PEXCADRS_BYTE_EN_SHIFT)
-#define MK_PEXCADRS(cmd, addr, size) \
- ((cmd) | MK_PEXCADRS_BYTE_EN(addr, size) | ((addr) & ~0x3))
-static uint32_t config_read_pciex_rc(unsigned int __iomem *base,
- uint32_t where, uint32_t size)
-{
- PEX_OUT(base, PEXCADRS, MK_PEXCADRS(PEXCADRS_CMD_READ, where, size));
- return (PEX_IN(base, PEXCRDATA)
- >> ((where & (4 - size)) * 8)) & ((0x1 << (size * 8)) - 1);
-}
-
-static void config_write_pciex_rc(unsigned int __iomem *base, uint32_t where,
- uint32_t size, uint32_t val)
-{
- uint32_t data;
-
- data = (val & ((0x1 << (size * 8)) - 1)) << ((where & (4 - size)) * 8);
- PEX_OUT(base, PEXCADRS, MK_PEXCADRS(PEXCADRS_CMD_WRITE, where, size));
- PEX_OUT(base, PEXCWDATA, data);
-}
-
-/* Interfaces */
-/* Note: Work-around
- * On SCC PCIEXC, one device is seen on all 32 dev_no.
- * As SCC PCIEXC can have only one device on the bus, we look only one dev_no.
- * (dev_no = 1)
- */
-static int scc_pciex_read_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, unsigned int *val)
-{
- struct pci_controller *phb = pci_bus_to_host(bus);
-
- if (bus->number == phb->first_busno && PCI_SLOT(devfn) != 1) {
- *val = ~0;
- return PCIBIOS_DEVICE_NOT_FOUND;
- }
-
- if (bus->number == 0 && PCI_SLOT(devfn) == 0)
- *val = config_read_pciex_rc(phb->cfg_addr, where, size);
- else
- *val = config_read_pciex_dev(phb->cfg_addr, bus->number,
- PCI_SLOT(devfn), PCI_FUNC(devfn), where, size);
-
- return PCIBIOS_SUCCESSFUL;
-}
-
-static int scc_pciex_write_config(struct pci_bus *bus, unsigned int devfn,
- int where, int size, unsigned int val)
-{
- struct pci_controller *phb = pci_bus_to_host(bus);
-
- if (bus->number == phb->first_busno && PCI_SLOT(devfn) != 1)
- return PCIBIOS_DEVICE_NOT_FOUND;
-
- if (bus->number == 0 && PCI_SLOT(devfn) == 0)
- config_write_pciex_rc(phb->cfg_addr, where, size, val);
- else
- config_write_pciex_dev(phb->cfg_addr, bus->number,
- PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
- return PCIBIOS_SUCCESSFUL;
-}
-
-static struct pci_ops scc_pciex_pci_ops = {
- .read = scc_pciex_read_config,
- .write = scc_pciex_write_config,
-};
-
-static void pciex_clear_intr_all(unsigned int __iomem *base)
-{
- PEX_OUT(base, PEXAERRSTS, 0xffffffff);
- PEX_OUT(base, PEXPRERRSTS, 0xffffffff);
- PEX_OUT(base, PEXINTSTS, 0xffffffff);
-}
-
-#if 0
-static void pciex_disable_intr_all(unsigned int *base)
-{
- PEX_OUT(base, PEXINTMASK, 0x0);
- PEX_OUT(base, PEXAERRMASK, 0x0);
- PEX_OUT(base, PEXPRERRMASK, 0x0);
- PEX_OUT(base, PEXVDMASK, 0x0);
-}
-#endif
-
-static void pciex_enable_intr_all(unsigned int __iomem *base)
-{
- PEX_OUT(base, PEXINTMASK, 0x0000e7f1);
- PEX_OUT(base, PEXAERRMASK, 0x03ff01ff);
- PEX_OUT(base, PEXPRERRMASK, 0x0001010f);
- PEX_OUT(base, PEXVDMASK, 0x00000001);
-}
-
-static void pciex_check_status(unsigned int __iomem *base)
-{
- uint32_t err = 0;
- uint32_t intsts, aerr, prerr, rcvcp, lenerr;
- uint32_t maea, maec;
-
- intsts = PEX_IN(base, PEXINTSTS);
- aerr = PEX_IN(base, PEXAERRSTS);
- prerr = PEX_IN(base, PEXPRERRSTS);
- rcvcp = PEX_IN(base, PEXRCVCPLIDA);
- lenerr = PEX_IN(base, PEXLENERRIDA);
-
- if (intsts || aerr || prerr || rcvcp || lenerr)
- err = 1;
-
- pr_info("PCEXC interrupt!!\n");
- pr_info("PEXINTSTS :0x%08x\n", intsts);
- pr_info("PEXAERRSTS :0x%08x\n", aerr);
- pr_info("PEXPRERRSTS :0x%08x\n", prerr);
- pr_info("PEXRCVCPLIDA :0x%08x\n", rcvcp);
- pr_info("PEXLENERRIDA :0x%08x\n", lenerr);
-
- /* print detail of Protection Error */
- if (intsts & 0x00004000) {
- uint32_t i, n;
- for (i = 0; i < 4; i++) {
- n = 1 << i;
- if (prerr & n) {
- maea = PEX_IN(base, PEXMAEA(i));
- maec = PEX_IN(base, PEXMAEC(i));
- pr_info("PEXMAEC%d :0x%08x\n", i, maec);
- pr_info("PEXMAEA%d :0x%08x\n", i, maea);
- }
- }
- }
-
- if (err)
- pciex_clear_intr_all(base);
-}
-
-static irqreturn_t pciex_handle_internal_irq(int irq, void *dev_id)
-{
- struct pci_controller *phb = dev_id;
-
- pr_debug("PCIEX:pciex_handle_internal_irq(irq=%d)\n", irq);
-
- BUG_ON(phb->cfg_addr == NULL);
-
- pciex_check_status(phb->cfg_addr);
-
- return IRQ_HANDLED;
-}
-
-static __init int celleb_setup_pciex(struct device_node *node,
- struct pci_controller *phb)
-{
- struct resource r;
- int virq;
-
- /* SMMIO registers; used inside this file */
- if (of_address_to_resource(node, 0, &r)) {
- pr_err("PCIEXC:Failed to get config resource.\n");
- return 1;
- }
- phb->cfg_addr = ioremap(r.start, resource_size(&r));
- if (!phb->cfg_addr) {
- pr_err("PCIEXC:Failed to remap SMMIO region.\n");
- return 1;
- }
-
- /* Not use cfg_data, cmd and data regs are near address reg */
- phb->cfg_data = NULL;
-
- /* set pci_ops */
- phb->ops = &scc_pciex_pci_ops;
-
- /* internal interrupt handler */
- virq = irq_of_parse_and_map(node, 1);
- if (!virq) {
- pr_err("PCIEXC:Failed to map irq\n");
- goto error;
- }
- if (request_irq(virq, pciex_handle_internal_irq,
- 0, "pciex", (void *)phb)) {
- pr_err("PCIEXC:Failed to request irq\n");
- goto error;
- }
-
- /* enable all interrupts */
- pciex_clear_intr_all(phb->cfg_addr);
- pciex_enable_intr_all(phb->cfg_addr);
- /* MSI: TBD */
-
- return 0;
-
-error:
- phb->cfg_data = NULL;
- if (phb->cfg_addr)
- iounmap(phb->cfg_addr);
- phb->cfg_addr = NULL;
- return 1;
-}
-
-struct celleb_phb_spec celleb_pciex_spec __initdata = {
- .setup = celleb_setup_pciex,
- .ops = &scc_pciex_ops,
- .iowa_init = &scc_pciex_iowa_init,
-};
diff --git a/arch/powerpc/platforms/cell/celleb_scc_sio.c b/arch/powerpc/platforms/cell/celleb_scc_sio.c
deleted file mode 100644
index c8eb57193826..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc_sio.c
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * setup serial port in SCC
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/tty.h>
-#include <linux/serial.h>
-#include <linux/serial_core.h>
-#include <linux/console.h>
-
-#include <asm/io.h>
-#include <asm/prom.h>
-
-/* sio irq0=0xb00010022 irq0=0xb00010023 irq2=0xb00010024
- mmio=0xfff000-0x1000,0xff2000-0x1000 */
-static int txx9_serial_bitmap __initdata;
-
-static struct {
- uint32_t offset;
- uint32_t index;
-} txx9_scc_tab[3] __initdata = {
- { 0x300, 0 }, /* 0xFFF300 */
- { 0x400, 0 }, /* 0xFFF400 */
- { 0x800, 1 } /* 0xFF2800 */
-};
-
-static int __init txx9_serial_init(void)
-{
- extern int early_serial_txx9_setup(struct uart_port *port);
- struct device_node *node;
- int i;
- struct uart_port req;
- struct of_phandle_args irq;
- struct resource res;
-
- for_each_compatible_node(node, "serial", "toshiba,sio-scc") {
- for (i = 0; i < ARRAY_SIZE(txx9_scc_tab); i++) {
- if (!(txx9_serial_bitmap & (1<<i)))
- continue;
-
- if (of_irq_parse_one(node, i, &irq))
- continue;
- if (of_address_to_resource(node,
- txx9_scc_tab[i].index, &res))
- continue;
-
- memset(&req, 0, sizeof(req));
- req.line = i;
- req.iotype = UPIO_MEM;
- req.mapbase = res.start + txx9_scc_tab[i].offset;
-#ifdef CONFIG_SERIAL_TXX9_CONSOLE
- req.membase = ioremap(req.mapbase, 0x24);
-#endif
- req.irq = irq_create_of_mapping(&irq);
- req.flags |= UPF_IOREMAP | UPF_BUGGY_UART
- /*HAVE_CTS_LINE*/;
- req.uartclk = 83300000;
- early_serial_txx9_setup(&req);
- }
- }
-
- return 0;
-}
-
-static int __init txx9_serial_config(char *ptr)
-{
- int i;
-
- for (;;) {
- switch (get_option(&ptr, &i)) {
- default:
- return 0;
- case 2:
- txx9_serial_bitmap |= 1 << i;
- break;
- case 1:
- txx9_serial_bitmap |= 1 << i;
- return 0;
- }
- }
-}
-__setup("txx9_serial=", txx9_serial_config);
-
-console_initcall(txx9_serial_init);
diff --git a/arch/powerpc/platforms/cell/celleb_scc_uhc.c b/arch/powerpc/platforms/cell/celleb_scc_uhc.c
deleted file mode 100644
index d63b720bfe3a..000000000000
--- a/arch/powerpc/platforms/cell/celleb_scc_uhc.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * SCC (Super Companion Chip) UHC setup
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-
-#include <asm/delay.h>
-#include <asm/io.h>
-#include <asm/machdep.h>
-
-#include "celleb_scc.h"
-
-#define UHC_RESET_WAIT_MAX 10000
-
-static inline int uhc_clkctrl_ready(u32 val)
-{
- const u32 mask = SCC_UHC_USBCEN | SCC_UHC_USBCEN;
- return((val & mask) == mask);
-}
-
-/*
- * UHC(usb host controller) enable function.
- * affect to both of OHCI and EHCI core module.
- */
-static void enable_scc_uhc(struct pci_dev *dev)
-{
- void __iomem *uhc_base;
- u32 __iomem *uhc_clkctrl;
- u32 __iomem *uhc_ecmode;
- u32 val = 0;
- int i;
-
- if (!machine_is(celleb_beat) &&
- !machine_is(celleb_native))
- return;
-
- uhc_base = ioremap(pci_resource_start(dev, 0),
- pci_resource_len(dev, 0));
- if (!uhc_base) {
- printk(KERN_ERR "failed to map UHC register base.\n");
- return;
- }
- uhc_clkctrl = uhc_base + SCC_UHC_CKRCTRL;
- uhc_ecmode = uhc_base + SCC_UHC_ECMODE;
-
- /* setup for normal mode */
- val |= SCC_UHC_F48MCKLEN;
- out_be32(uhc_clkctrl, val);
- val |= SCC_UHC_PHY_SUSPEND_SEL;
- out_be32(uhc_clkctrl, val);
- udelay(10);
- val |= SCC_UHC_PHYEN;
- out_be32(uhc_clkctrl, val);
- udelay(50);
-
- /* disable reset */
- val |= SCC_UHC_HCLKEN;
- out_be32(uhc_clkctrl, val);
- val |= (SCC_UHC_USBCEN | SCC_UHC_USBEN);
- out_be32(uhc_clkctrl, val);
- i = 0;
- while (!uhc_clkctrl_ready(in_be32(uhc_clkctrl))) {
- udelay(10);
- if (i++ > UHC_RESET_WAIT_MAX) {
- printk(KERN_ERR "Failed to disable UHC reset %x\n",
- in_be32(uhc_clkctrl));
- break;
- }
- }
-
- /* Endian Conversion Mode for Master ALL area */
- out_be32(uhc_ecmode, SCC_UHC_ECMODE_BY_BYTE);
-
- iounmap(uhc_base);
-}
-
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TOSHIBA_2,
- PCI_DEVICE_ID_TOSHIBA_SCC_USB, enable_scc_uhc);
diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c
deleted file mode 100644
index 90be8ec51686..000000000000
--- a/arch/powerpc/platforms/cell/celleb_setup.c
+++ /dev/null
@@ -1,243 +0,0 @@
-/*
- * Celleb setup code
- *
- * (C) Copyright 2006-2007 TOSHIBA CORPORATION
- *
- * This code is based on arch/powerpc/platforms/cell/setup.c:
- * Copyright (C) 1995 Linus Torvalds
- * Adapted from 'alpha' version by Gary Thomas
- * Modified by Cort Dougan (cort@cs.nmt.edu)
- * Modified by PPC64 Team, IBM Corp
- * Modified by Cell Team, IBM Deutschland Entwicklung GmbH
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#undef DEBUG
-
-#include <linux/cpu.h>
-#include <linux/sched.h>
-#include <linux/kernel.h>
-#include <linux/export.h>
-#include <linux/mm.h>
-#include <linux/stddef.h>
-#include <linux/unistd.h>
-#include <linux/reboot.h>
-#include <linux/init.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/seq_file.h>
-#include <linux/root_dev.h>
-#include <linux/console.h>
-#include <linux/of_platform.h>
-
-#include <asm/mmu.h>
-#include <asm/processor.h>
-#include <asm/io.h>
-#include <asm/prom.h>
-#include <asm/machdep.h>
-#include <asm/cputable.h>
-#include <asm/irq.h>
-#include <asm/time.h>
-#include <asm/spu_priv1.h>
-#include <asm/firmware.h>
-#include <asm/rtas.h>
-#include <asm/cell-regs.h>
-
-#include "beat_interrupt.h"
-#include "beat_wrapper.h"
-#include "beat.h"
-#include "celleb_pci.h"
-#include "interrupt.h"
-#include "pervasive.h"
-#include "ras.h"
-
-static char celleb_machine_type[128] = "Celleb";
-
-static void celleb_show_cpuinfo(struct seq_file *m)
-{
- struct device_node *root;
- const char *model = "";
-
- root = of_find_node_by_path("/");
- if (root)
- model = of_get_property(root, "model", NULL);
- /* using "CHRP" is to trick anaconda into installing FCx into Celleb */
- seq_printf(m, "machine\t\t: %s %s\n", celleb_machine_type, model);
- of_node_put(root);
-}
-
-static int __init celleb_machine_type_hack(char *ptr)
-{
- strlcpy(celleb_machine_type, ptr, sizeof(celleb_machine_type));
- return 0;
-}
-
-__setup("celleb_machine_type_hack=", celleb_machine_type_hack);
-
-static void celleb_progress(char *s, unsigned short hex)
-{
- printk("*** %04x : %s\n", hex, s ? s : "");
-}
-
-static void __init celleb_setup_arch_common(void)
-{
- /* init to some ~sane value until calibrate_delay() runs */
- loops_per_jiffy = 50000000;
-
-#ifdef CONFIG_DUMMY_CONSOLE
- conswitchp = &dummy_con;
-#endif
-}
-
-static const struct of_device_id celleb_bus_ids[] __initconst = {
- { .type = "scc", },
- { .type = "ioif", }, /* old style */
- {},
-};
-
-static int __init celleb_publish_devices(void)
-{
- /* Publish OF platform devices for southbridge IOs */
- of_platform_bus_probe(NULL, celleb_bus_ids, NULL);
-
- return 0;
-}
-machine_device_initcall(celleb_beat, celleb_publish_devices);
-machine_device_initcall(celleb_native, celleb_publish_devices);
-
-
-/*
- * functions for Celleb-Beat
- */
-static void __init celleb_setup_arch_beat(void)
-{
-#ifdef CONFIG_SPU_BASE
- spu_priv1_ops = &spu_priv1_beat_ops;
- spu_management_ops = &spu_management_of_ops;
-#endif
-
- celleb_setup_arch_common();
-}
-
-static int __init celleb_probe_beat(void)
-{
- unsigned long root = of_get_flat_dt_root();
-
- if (!of_flat_dt_is_compatible(root, "Beat"))
- return 0;
-
- powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS
- | FW_FEATURE_BEAT | FW_FEATURE_LPAR;
- hpte_init_beat_v3();
- pm_power_off = beat_power_off;
-
- return 1;
-}
-
-
-/*
- * functions for Celleb-native
- */
-static void __init celleb_init_IRQ_native(void)
-{
- iic_init_IRQ();
- spider_init_IRQ();
-}
-
-static void __init celleb_setup_arch_native(void)
-{
-#ifdef CONFIG_SPU_BASE
- spu_priv1_ops = &spu_priv1_mmio_ops;
- spu_management_ops = &spu_management_of_ops;
-#endif
-
- cbe_regs_init();
-
-#ifdef CONFIG_CBE_RAS
- cbe_ras_init();
-#endif
-
-#ifdef CONFIG_SMP
- smp_init_cell();
-#endif
-
- cbe_pervasive_init();
-
- /* XXX: nvram initialization should be added */
-
- celleb_setup_arch_common();
-}
-
-static int __init celleb_probe_native(void)
-{
- unsigned long root = of_get_flat_dt_root();
-
- if (of_flat_dt_is_compatible(root, "Beat") ||
- !of_flat_dt_is_compatible(root, "TOSHIBA,Celleb"))
- return 0;
-
- powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS;
- hpte_init_native();
- pm_power_off = rtas_power_off;
-
- return 1;
-}
-
-
-/*
- * machine definitions
- */
-define_machine(celleb_beat) {
- .name = "Cell Reference Set (Beat)",
- .probe = celleb_probe_beat,
- .setup_arch = celleb_setup_arch_beat,
- .show_cpuinfo = celleb_show_cpuinfo,
- .restart = beat_restart,
- .halt = beat_halt,
- .get_rtc_time = beat_get_rtc_time,
- .set_rtc_time = beat_set_rtc_time,
- .calibrate_decr = generic_calibrate_decr,
- .progress = celleb_progress,
- .power_save = beat_power_save,
- .nvram_size = beat_nvram_get_size,
- .nvram_read = beat_nvram_read,
- .nvram_write = beat_nvram_write,
- .set_dabr = beat_set_xdabr,
- .init_IRQ = beatic_init_IRQ,
- .get_irq = beatic_get_irq,
- .pci_probe_mode = celleb_pci_probe_mode,
- .pci_setup_phb = celleb_setup_phb,
-#ifdef CONFIG_KEXEC
- .kexec_cpu_down = beat_kexec_cpu_down,
-#endif
-};
-
-define_machine(celleb_native) {
- .name = "Cell Reference Set (native)",
- .probe = celleb_probe_native,
- .setup_arch = celleb_setup_arch_native,
- .show_cpuinfo = celleb_show_cpuinfo,
- .restart = rtas_restart,
- .halt = rtas_halt,
- .get_boot_time = rtas_get_boot_time,
- .get_rtc_time = rtas_get_rtc_time,
- .set_rtc_time = rtas_set_rtc_time,
- .calibrate_decr = generic_calibrate_decr,
- .progress = celleb_progress,
- .pci_probe_mode = celleb_pci_probe_mode,
- .pci_setup_phb = celleb_setup_phb,
- .init_IRQ = celleb_init_IRQ_native,
-};
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 4c11421847be..3af8324c122e 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void)
void iic_setup_cpu(void)
{
- out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff);
+ out_be64(&this_cpu_ptr(&cpu_iic)->regs->prio, 0xff);
}
u8 iic_get_target_id(int cpu)
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c
index c7c8720aa39f..21b502398bf3 100644
--- a/arch/powerpc/platforms/cell/iommu.c
+++ b/arch/powerpc/platforms/cell/iommu.c
@@ -39,6 +39,7 @@
#include <asm/firmware.h>
#include <asm/cell-regs.h>
+#include "cell.h"
#include "interrupt.h"
/* Define CELL_IOMMU_REAL_UNMAP to actually unmap non-used pages
@@ -197,7 +198,7 @@ static int tce_build_cell(struct iommu_table *tbl, long index, long npages,
io_pte = (unsigned long *)tbl->it_base + (index - tbl->it_offset);
- for (i = 0; i < npages; i++, uaddr += tbl->it_page_shift)
+ for (i = 0; i < npages; i++, uaddr += (1 << tbl->it_page_shift))
io_pte[i] = base_pte | (__pa(uaddr) & CBE_IOPTE_RPN_Mask);
mb();
@@ -857,7 +858,7 @@ static int __init cell_iommu_init_disabled(void)
cell_dma_direct_offset += base;
if (cell_dma_direct_offset != 0)
- ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
+ cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
printk("iommu: disabled, direct DMA offset is 0x%lx\n",
cell_dma_direct_offset);
@@ -1197,8 +1198,8 @@ static int __init cell_iommu_init(void)
if (cell_iommu_init_disabled() == 0)
goto bail;
- /* Setup various ppc_md. callbacks */
- ppc_md.pci_dma_dev_setup = cell_pci_dma_dev_setup;
+ /* Setup various callbacks */
+ cell_pci_controller_ops.dma_dev_setup = cell_pci_dma_dev_setup;
ppc_md.dma_get_required_mask = cell_dma_get_required_mask;
ppc_md.tce_build = tce_build_cell;
ppc_md.tce_free = tce_free_cell;
@@ -1234,5 +1235,3 @@ static int __init cell_iommu_init(void)
return 0;
}
machine_arch_initcall(cell, cell_iommu_init);
-machine_arch_initcall(celleb_native, cell_iommu_init);
-
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index d62aa982d530..36cff28d0293 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -54,6 +54,7 @@
#include <asm/cell-regs.h>
#include <asm/io-workarounds.h>
+#include "cell.h"
#include "interrupt.h"
#include "pervasive.h"
#include "ras.h"
@@ -126,6 +127,8 @@ static int cell_setup_phb(struct pci_controller *phb)
if (rc)
return rc;
+ phb->controller_ops = cell_pci_controller_ops;
+
np = phb->dn;
model = of_get_property(np, "model", NULL);
if (model == NULL || strcmp(np->name, "pci"))
@@ -279,3 +282,5 @@ define_machine(cell) {
.init_IRQ = cell_init_irq,
.pci_setup_phb = cell_setup_phb,
};
+
+struct pci_controller_ops cell_pci_controller_ops;
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c
index b64e7ead752f..895560f4be69 100644
--- a/arch/powerpc/platforms/cell/smp.c
+++ b/arch/powerpc/platforms/cell/smp.c
@@ -102,13 +102,6 @@ static inline int smp_startup_cpu(unsigned int lcpu)
return 1;
}
-static int __init smp_iic_probe(void)
-{
- iic_request_IPIs();
-
- return num_possible_cpus();
-}
-
static void smp_cell_setup_cpu(int cpu)
{
if (cpu != boot_cpuid)
@@ -139,7 +132,7 @@ static int smp_cell_kick_cpu(int nr)
static struct smp_ops_t bpa_iic_smp_ops = {
.message_pass = iic_message_pass,
- .probe = smp_iic_probe,
+ .probe = iic_request_IPIs,
.kick_cpu = smp_cell_kick_cpu,
.setup_cpu = smp_cell_setup_cpu,
.cpu_bootable = smp_generic_cpu_bootable,
diff --git a/arch/powerpc/platforms/cell/spu_callbacks.c b/arch/powerpc/platforms/cell/spu_callbacks.c
index b0ec78e8ad68..a494028b2cdf 100644
--- a/arch/powerpc/platforms/cell/spu_callbacks.c
+++ b/arch/powerpc/platforms/cell/spu_callbacks.c
@@ -39,6 +39,7 @@ static void *spu_syscall_table[] = {
#define PPC_SYS(func) sys_ni_syscall,
#define OLDSYS(func) sys_ni_syscall,
#define SYS32ONLY(func) sys_ni_syscall,
+#define PPC64ONLY(func) sys_ni_syscall,
#define SYSX(f, f3264, f32) sys_ni_syscall,
#define SYSCALL_SPU(func) sys_##func,
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 860a59eb8ea2..15ebc4e8a151 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -253,7 +253,7 @@ static void briq_restart(char *cmd)
* But unfortunately, the firmware does not connect /chosen/{stdin,stdout}
* the the built-in serial node. Instead, a /failsafe node is created.
*/
-static void chrp_init_early(void)
+static __init void chrp_init_early(void)
{
struct device_node *node;
const char *property;
diff --git a/arch/powerpc/platforms/maple/maple.h b/arch/powerpc/platforms/maple/maple.h
index c6911ddc479f..eecfa182b06e 100644
--- a/arch/powerpc/platforms/maple/maple.h
+++ b/arch/powerpc/platforms/maple/maple.h
@@ -10,3 +10,5 @@ extern void maple_calibrate_decr(void);
extern void maple_pci_init(void);
extern void maple_pci_irq_fixup(struct pci_dev *dev);
extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel);
+
+extern struct pci_controller_ops maple_pci_controller_ops;
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index d3a13067ec42..a923230e575b 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -510,6 +510,7 @@ static int __init maple_add_bridge(struct device_node *dev)
return -ENOMEM;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
+ hose->controller_ops = maple_pci_controller_ops;
disp_name = NULL;
if (of_device_is_compatible(dev, "u3-agp")) {
@@ -660,3 +661,6 @@ static void quirk_ipr_msi(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN,
quirk_ipr_msi);
+
+struct pci_controller_ops maple_pci_controller_ops = {
+};
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 56b85cd61aaf..a837188544c8 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -203,7 +203,7 @@ static void __init maple_init_early(void)
{
DBG(" -> maple_init_early\n");
- iommu_init_early_dart();
+ iommu_init_early_dart(&maple_pci_controller_ops);
DBG(" <- maple_init_early\n");
}
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c
index 2e576f2ae442..b8f567b2ea19 100644
--- a/arch/powerpc/platforms/pasemi/iommu.c
+++ b/arch/powerpc/platforms/pasemi/iommu.c
@@ -27,6 +27,8 @@
#include <asm/machdep.h>
#include <asm/firmware.h>
+#include "pasemi.h"
+
#define IOBMAP_PAGE_SHIFT 12
#define IOBMAP_PAGE_SIZE (1 << IOBMAP_PAGE_SHIFT)
#define IOBMAP_PAGE_MASK (IOBMAP_PAGE_SIZE - 1)
@@ -248,8 +250,8 @@ void __init iommu_init_early_pasemi(void)
iob_init(NULL);
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pasemi;
- ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pasemi;
+ pasemi_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pasemi;
+ pasemi_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pasemi;
ppc_md.tce_build = iobmap_build;
ppc_md.tce_free = iobmap_free;
set_pci_dma_ops(&dma_iommu_ops);
diff --git a/arch/powerpc/platforms/pasemi/pasemi.h b/arch/powerpc/platforms/pasemi/pasemi.h
index ea65bf0eb897..11f230a48227 100644
--- a/arch/powerpc/platforms/pasemi/pasemi.h
+++ b/arch/powerpc/platforms/pasemi/pasemi.h
@@ -30,5 +30,6 @@ static inline void restore_astate(int cpu)
}
#endif
+extern struct pci_controller_ops pasemi_pci_controller_ops;
#endif /* _PASEMI_PASEMI_H */
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index aa862713258c..f3a68a0fef23 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -31,6 +31,8 @@
#include <asm/ppc-pci.h>
+#include "pasemi.h"
+
#define PA_PXP_CFA(bus, devfn, off) (((bus) << 20) | ((devfn) << 12) | (off))
static inline int pa_pxp_offset_valid(u8 bus, u8 devfn, int offset)
@@ -199,6 +201,7 @@ static int __init pas_add_bridge(struct device_node *dev)
hose->first_busno = 0;
hose->last_busno = 0xff;
+ hose->controller_ops = pasemi_pci_controller_ops;
setup_pa_pxp(hose);
@@ -239,3 +242,5 @@ void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset)
return (void __iomem *)pa_pxp_cfg_addr(hose, dev->bus->number, dev->devfn, offset);
}
+
+struct pci_controller_ops pasemi_pci_controller_ops;
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c
index 3e91ef538114..76f5013c35e5 100644
--- a/arch/powerpc/platforms/powermac/bootx_init.c
+++ b/arch/powerpc/platforms/powermac/bootx_init.c
@@ -246,7 +246,7 @@ static void __init bootx_scan_dt_build_strings(unsigned long base,
DBG(" detected display ! adding properties names !\n");
bootx_dt_add_string("linux,boot-display", mem_end);
bootx_dt_add_string("linux,opened", mem_end);
- strncpy(bootx_disp_path, namep, 255);
+ strlcpy(bootx_disp_path, namep, sizeof(bootx_disp_path));
}
/* get and store all property names */
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index f4071a67ad00..59ab16fa600f 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -27,6 +27,8 @@
#include <asm/grackle.h>
#include <asm/ppc-pci.h>
+#include "pmac.h"
+
#undef DEBUG
#ifdef DEBUG
@@ -798,6 +800,7 @@ static int __init pmac_add_bridge(struct device_node *dev)
return -ENOMEM;
hose->first_busno = bus_range ? bus_range[0] : 0;
hose->last_busno = bus_range ? bus_range[1] : 0xff;
+ hose->controller_ops = pmac_pci_controller_ops;
disp_name = NULL;
@@ -942,7 +945,7 @@ void __init pmac_pci_init(void)
}
#ifdef CONFIG_PPC32
-int pmac_pci_enable_device_hook(struct pci_dev *dev)
+static bool pmac_pci_enable_device_hook(struct pci_dev *dev)
{
struct device_node* node;
int updatecfg = 0;
@@ -958,11 +961,11 @@ int pmac_pci_enable_device_hook(struct pci_dev *dev)
&& !node) {
printk(KERN_INFO "Apple USB OHCI %s disabled by firmware\n",
pci_name(dev));
- return -EINVAL;
+ return false;
}
if (!node)
- return 0;
+ return true;
uninorth_child = node->parent &&
of_device_is_compatible(node->parent, "uni-north");
@@ -1003,7 +1006,7 @@ int pmac_pci_enable_device_hook(struct pci_dev *dev)
L1_CACHE_BYTES >> 2);
}
- return 0;
+ return true;
}
void pmac_pci_fixup_ohci(struct pci_dev *dev)
@@ -1223,3 +1226,30 @@ static void fixup_u4_pcie(struct pci_dev* dev)
pci_write_config_dword(dev, PCI_PREF_MEMORY_BASE, 0);
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_U4_PCIE, fixup_u4_pcie);
+
+#ifdef CONFIG_PPC64
+static int pmac_pci_probe_mode(struct pci_bus *bus)
+{
+ struct device_node *node = pci_bus_to_OF_node(bus);
+
+ /* We need to use normal PCI probing for the AGP bus,
+ * since the device for the AGP bridge isn't in the tree.
+ * Same for the PCIe host on U4 and the HT host bridge.
+ */
+ if (bus->self == NULL && (of_device_is_compatible(node, "u3-agp") ||
+ of_device_is_compatible(node, "u4-pcie") ||
+ of_device_is_compatible(node, "u3-ht")))
+ return PCI_PROBE_NORMAL;
+ return PCI_PROBE_DEVTREE;
+}
+#endif /* CONFIG_PPC64 */
+
+struct pci_controller_ops pmac_pci_controller_ops = {
+#ifdef CONFIG_PPC64
+ .probe_mode = pmac_pci_probe_mode,
+#endif
+#ifdef CONFIG_PPC32
+ .enable_device_hook = pmac_pci_enable_device_hook,
+#endif
+};
+
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 4c24bf60d39d..59cfc9d63c2d 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -321,6 +321,9 @@ static void __init pmac_pic_probe_oldstyle(void)
max_irqs = max_real_irqs = 64;
/* We might have a second cascaded heathrow */
+
+ /* Compensate for of_node_put() in of_find_node_by_name() */
+ of_node_get(master);
slave = of_find_node_by_name(master, "mac-io");
/* Check ordering of master & slave */
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h
index 8327cce2bdb0..e7f8163d6769 100644
--- a/arch/powerpc/platforms/powermac/pmac.h
+++ b/arch/powerpc/platforms/powermac/pmac.h
@@ -25,7 +25,6 @@ extern void pmac_pci_init(void);
extern void pmac_nvram_update(void);
extern unsigned char pmac_nvram_read_byte(int addr);
extern void pmac_nvram_write_byte(int addr, unsigned char val);
-extern int pmac_pci_enable_device_hook(struct pci_dev *dev);
extern void pmac_pcibios_after_init(void);
extern int of_show_percpuinfo(struct seq_file *m, int i);
@@ -39,4 +38,6 @@ extern void low_cpu_die(void) __attribute__((noreturn));
extern int pmac_nvram_init(void);
extern void pmac_pic_init(void);
+extern struct pci_controller_ops pmac_pci_controller_ops;
+
#endif /* __PMAC_H__ */
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index 713d36d45d1d..8dd78f4e1af4 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -473,7 +473,7 @@ static void __init pmac_init_early(void)
udbg_adb_init(!!strstr(boot_command_line, "btextdbg"));
#ifdef CONFIG_PPC64
- iommu_init_early_dart();
+ iommu_init_early_dart(&pmac_pci_controller_ops);
#endif
/* SMP Init has to be done early as we need to patch up
@@ -637,24 +637,6 @@ static int __init pmac_probe(void)
return 1;
}
-#ifdef CONFIG_PPC64
-/* Move that to pci.c */
-static int pmac_pci_probe_mode(struct pci_bus *bus)
-{
- struct device_node *node = pci_bus_to_OF_node(bus);
-
- /* We need to use normal PCI probing for the AGP bus,
- * since the device for the AGP bridge isn't in the tree.
- * Same for the PCIe host on U4 and the HT host bridge.
- */
- if (bus->self == NULL && (of_device_is_compatible(node, "u3-agp") ||
- of_device_is_compatible(node, "u4-pcie") ||
- of_device_is_compatible(node, "u3-ht")))
- return PCI_PROBE_NORMAL;
- return PCI_PROBE_DEVTREE;
-}
-#endif /* CONFIG_PPC64 */
-
define_machine(powermac) {
.name = "PowerMac",
.probe = pmac_probe,
@@ -674,12 +656,10 @@ define_machine(powermac) {
.feature_call = pmac_do_feature_call,
.progress = udbg_progress,
#ifdef CONFIG_PPC64
- .pci_probe_mode = pmac_pci_probe_mode,
.power_save = power4_idle,
.enable_pmcs = power4_enable_pmcs,
#endif /* CONFIG_PPC64 */
#ifdef CONFIG_PPC32
- .pcibios_enable_device_hook = pmac_pci_enable_device_hook,
.pcibios_after_init = pmac_pcibios_after_init,
.phys_mem_access_prot = pci_phys_mem_access_prot,
#endif
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index af094ae03dbb..28a147ca32ba 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -268,14 +268,14 @@ static void __init psurge_quad_init(void)
mdelay(33);
}
-static int __init smp_psurge_probe(void)
+static void __init smp_psurge_probe(void)
{
int i, ncpus;
struct device_node *dn;
/* We don't do SMP on the PPC601 -- paulus */
if (PVR_VER(mfspr(SPRN_PVR)) == 1)
- return 1;
+ return;
/*
* The powersurge cpu board can be used in the generation
@@ -289,7 +289,7 @@ static int __init smp_psurge_probe(void)
*/
dn = of_find_node_by_name(NULL, "hammerhead");
if (dn == NULL)
- return 1;
+ return;
of_node_put(dn);
hhead_base = ioremap(HAMMERHEAD_BASE, 0x800);
@@ -310,13 +310,13 @@ static int __init smp_psurge_probe(void)
/* not a dual-cpu card */
iounmap(hhead_base);
psurge_type = PSURGE_NONE;
- return 1;
+ return;
}
ncpus = 2;
}
if (psurge_secondary_ipi_init())
- return 1;
+ return;
psurge_start = ioremap(PSURGE_START, 4);
psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
@@ -332,8 +332,6 @@ static int __init smp_psurge_probe(void)
set_cpu_present(i, true);
if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
-
- return ncpus;
}
static int __init smp_psurge_kick_cpu(int nr)
@@ -766,7 +764,7 @@ static void __init smp_core99_setup(int ncpus)
powersave_nap = 0;
}
-static int __init smp_core99_probe(void)
+static void __init smp_core99_probe(void)
{
struct device_node *cpus;
int ncpus = 0;
@@ -781,7 +779,7 @@ static int __init smp_core99_probe(void)
/* Nothing more to do if less than 2 of them */
if (ncpus <= 1)
- return 1;
+ return;
/* We need to perform some early initialisations before we can start
* setting up SMP as we are running before initcalls
@@ -797,8 +795,6 @@ static int __init smp_core99_probe(void)
/* Collect l2cr and l3cr values from CPU 0 */
core99_init_caches(0);
-
- return ncpus;
}
static int smp_core99_kick_cpu(int nr)
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index 45a8ed0585cd..4b044d8cb49a 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -19,10 +19,3 @@ config PPC_POWERNV
select CPU_FREQ_GOV_CONSERVATIVE
select PPC_DOORBELL
default y
-
-config PPC_POWERNV_RTAS
- depends on PPC_POWERNV
- bool "Support for RTAS based PowerNV platforms such as BML"
- default y
- select PPC_ICS_RTAS
- select PPC_RTAS
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 6f3c5d33c3af..33e44f37212f 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -5,7 +5,7 @@ obj-y += opal-msglog.o opal-hmi.o opal-power.o
obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o
-obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o
+obj-$(CONFIG_EEH) += eeh-powernv.o
obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
obj-$(CONFIG_MEMORY_FAILURE) += opal-memory-errors.o
obj-$(CONFIG_TRACEPOINTS) += opal-tracepoints.o
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
deleted file mode 100644
index 2809c9895288..000000000000
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ /dev/null
@@ -1,1149 +0,0 @@
-/*
- * The file intends to implement the functions needed by EEH, which is
- * built on IODA compliant chip. Actually, lots of functions related
- * to EEH would be built based on the OPAL APIs.
- *
- * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/debugfs.h>
-#include <linux/delay.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/kernel.h>
-#include <linux/msi.h>
-#include <linux/notifier.h>
-#include <linux/pci.h>
-#include <linux/string.h>
-
-#include <asm/eeh.h>
-#include <asm/eeh_event.h>
-#include <asm/io.h>
-#include <asm/iommu.h>
-#include <asm/msi_bitmap.h>
-#include <asm/opal.h>
-#include <asm/pci-bridge.h>
-#include <asm/ppc-pci.h>
-#include <asm/tce.h>
-
-#include "powernv.h"
-#include "pci.h"
-
-static int ioda_eeh_nb_init = 0;
-
-static int ioda_eeh_event(struct notifier_block *nb,
- unsigned long events, void *change)
-{
- uint64_t changed_evts = (uint64_t)change;
-
- /*
- * We simply send special EEH event if EEH has
- * been enabled, or clear pending events in
- * case that we enable EEH soon
- */
- if (!(changed_evts & OPAL_EVENT_PCI_ERROR) ||
- !(events & OPAL_EVENT_PCI_ERROR))
- return 0;
-
- if (eeh_enabled())
- eeh_send_failure_event(NULL);
- else
- opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
-
- return 0;
-}
-
-static struct notifier_block ioda_eeh_nb = {
- .notifier_call = ioda_eeh_event,
- .next = NULL,
- .priority = 0
-};
-
-#ifdef CONFIG_DEBUG_FS
-static ssize_t ioda_eeh_ei_write(struct file *filp,
- const char __user *user_buf,
- size_t count, loff_t *ppos)
-{
- struct pci_controller *hose = filp->private_data;
- struct pnv_phb *phb = hose->private_data;
- struct eeh_dev *edev;
- struct eeh_pe *pe;
- int pe_no, type, func;
- unsigned long addr, mask;
- char buf[50];
- int ret;
-
- if (!phb->eeh_ops || !phb->eeh_ops->err_inject)
- return -ENXIO;
-
- ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
- if (!ret)
- return -EFAULT;
-
- /* Retrieve parameters */
- ret = sscanf(buf, "%x:%x:%x:%lx:%lx",
- &pe_no, &type, &func, &addr, &mask);
- if (ret != 5)
- return -EINVAL;
-
- /* Retrieve PE */
- edev = kzalloc(sizeof(*edev), GFP_KERNEL);
- if (!edev)
- return -ENOMEM;
- edev->phb = hose;
- edev->pe_config_addr = pe_no;
- pe = eeh_pe_get(edev);
- kfree(edev);
- if (!pe)
- return -ENODEV;
-
- /* Do error injection */
- ret = phb->eeh_ops->err_inject(pe, type, func, addr, mask);
- return ret < 0 ? ret : count;
-}
-
-static const struct file_operations ioda_eeh_ei_fops = {
- .open = simple_open,
- .llseek = no_llseek,
- .write = ioda_eeh_ei_write,
-};
-
-static int ioda_eeh_dbgfs_set(void *data, int offset, u64 val)
-{
- struct pci_controller *hose = data;
- struct pnv_phb *phb = hose->private_data;
-
- out_be64(phb->regs + offset, val);
- return 0;
-}
-
-static int ioda_eeh_dbgfs_get(void *data, int offset, u64 *val)
-{
- struct pci_controller *hose = data;
- struct pnv_phb *phb = hose->private_data;
-
- *val = in_be64(phb->regs + offset);
- return 0;
-}
-
-static int ioda_eeh_outb_dbgfs_set(void *data, u64 val)
-{
- return ioda_eeh_dbgfs_set(data, 0xD10, val);
-}
-
-static int ioda_eeh_outb_dbgfs_get(void *data, u64 *val)
-{
- return ioda_eeh_dbgfs_get(data, 0xD10, val);
-}
-
-static int ioda_eeh_inbA_dbgfs_set(void *data, u64 val)
-{
- return ioda_eeh_dbgfs_set(data, 0xD90, val);
-}
-
-static int ioda_eeh_inbA_dbgfs_get(void *data, u64 *val)
-{
- return ioda_eeh_dbgfs_get(data, 0xD90, val);
-}
-
-static int ioda_eeh_inbB_dbgfs_set(void *data, u64 val)
-{
- return ioda_eeh_dbgfs_set(data, 0xE10, val);
-}
-
-static int ioda_eeh_inbB_dbgfs_get(void *data, u64 *val)
-{
- return ioda_eeh_dbgfs_get(data, 0xE10, val);
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_outb_dbgfs_ops, ioda_eeh_outb_dbgfs_get,
- ioda_eeh_outb_dbgfs_set, "0x%llx\n");
-DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbA_dbgfs_ops, ioda_eeh_inbA_dbgfs_get,
- ioda_eeh_inbA_dbgfs_set, "0x%llx\n");
-DEFINE_SIMPLE_ATTRIBUTE(ioda_eeh_inbB_dbgfs_ops, ioda_eeh_inbB_dbgfs_get,
- ioda_eeh_inbB_dbgfs_set, "0x%llx\n");
-#endif /* CONFIG_DEBUG_FS */
-
-
-/**
- * ioda_eeh_post_init - Chip dependent post initialization
- * @hose: PCI controller
- *
- * The function will be called after eeh PEs and devices
- * have been built. That means the EEH is ready to supply
- * service with I/O cache.
- */
-static int ioda_eeh_post_init(struct pci_controller *hose)
-{
- struct pnv_phb *phb = hose->private_data;
- int ret;
-
- /* Register OPAL event notifier */
- if (!ioda_eeh_nb_init) {
- ret = opal_notifier_register(&ioda_eeh_nb);
- if (ret) {
- pr_err("%s: Can't register OPAL event notifier (%d)\n",
- __func__, ret);
- return ret;
- }
-
- ioda_eeh_nb_init = 1;
- }
-
-#ifdef CONFIG_DEBUG_FS
- if (!phb->has_dbgfs && phb->dbgfs) {
- phb->has_dbgfs = 1;
-
- debugfs_create_file("err_injct", 0200,
- phb->dbgfs, hose,
- &ioda_eeh_ei_fops);
-
- debugfs_create_file("err_injct_outbound", 0600,
- phb->dbgfs, hose,
- &ioda_eeh_outb_dbgfs_ops);
- debugfs_create_file("err_injct_inboundA", 0600,
- phb->dbgfs, hose,
- &ioda_eeh_inbA_dbgfs_ops);
- debugfs_create_file("err_injct_inboundB", 0600,
- phb->dbgfs, hose,
- &ioda_eeh_inbB_dbgfs_ops);
- }
-#endif
-
- /* If EEH is enabled, we're going to rely on that.
- * Otherwise, we restore to conventional mechanism
- * to clear frozen PE during PCI config access.
- */
- if (eeh_enabled())
- phb->flags |= PNV_PHB_FLAG_EEH;
- else
- phb->flags &= ~PNV_PHB_FLAG_EEH;
-
- return 0;
-}
-
-/**
- * ioda_eeh_set_option - Set EEH operation or I/O setting
- * @pe: EEH PE
- * @option: options
- *
- * Enable or disable EEH option for the indicated PE. The
- * function also can be used to enable I/O or DMA for the
- * PE.
- */
-static int ioda_eeh_set_option(struct eeh_pe *pe, int option)
-{
- struct pci_controller *hose = pe->phb;
- struct pnv_phb *phb = hose->private_data;
- bool freeze_pe = false;
- int enable, ret = 0;
- s64 rc;
-
- /* Check on PE number */
- if (pe->addr < 0 || pe->addr >= phb->ioda.total_pe) {
- pr_err("%s: PE address %x out of range [0, %x] "
- "on PHB#%x\n",
- __func__, pe->addr, phb->ioda.total_pe,
- hose->global_number);
- return -EINVAL;
- }
-
- switch (option) {
- case EEH_OPT_DISABLE:
- return -EPERM;
- case EEH_OPT_ENABLE:
- return 0;
- case EEH_OPT_THAW_MMIO:
- enable = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO;
- break;
- case EEH_OPT_THAW_DMA:
- enable = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA;
- break;
- case EEH_OPT_FREEZE_PE:
- freeze_pe = true;
- enable = OPAL_EEH_ACTION_SET_FREEZE_ALL;
- break;
- default:
- pr_warn("%s: Invalid option %d\n",
- __func__, option);
- return -EINVAL;
- }
-
- /* If PHB supports compound PE, to handle it */
- if (freeze_pe) {
- if (phb->freeze_pe) {
- phb->freeze_pe(phb, pe->addr);
- } else {
- rc = opal_pci_eeh_freeze_set(phb->opal_id,
- pe->addr,
- enable);
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld freezing "
- "PHB#%x-PE#%x\n",
- __func__, rc,
- phb->hose->global_number, pe->addr);
- ret = -EIO;
- }
- }
- } else {
- if (phb->unfreeze_pe) {
- ret = phb->unfreeze_pe(phb, pe->addr, enable);
- } else {
- rc = opal_pci_eeh_freeze_clear(phb->opal_id,
- pe->addr,
- enable);
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld enable %d "
- "for PHB#%x-PE#%x\n",
- __func__, rc, option,
- phb->hose->global_number, pe->addr);
- ret = -EIO;
- }
- }
- }
-
- return ret;
-}
-
-static void ioda_eeh_phb_diag(struct eeh_pe *pe)
-{
- struct pnv_phb *phb = pe->phb->private_data;
- long rc;
-
- rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data,
- PNV_PCI_DIAG_BUF_SIZE);
- if (rc != OPAL_SUCCESS)
- pr_warn("%s: Failed to get diag-data for PHB#%x (%ld)\n",
- __func__, pe->phb->global_number, rc);
-}
-
-static int ioda_eeh_get_phb_state(struct eeh_pe *pe)
-{
- struct pnv_phb *phb = pe->phb->private_data;
- u8 fstate;
- __be16 pcierr;
- s64 rc;
- int result = 0;
-
- rc = opal_pci_eeh_freeze_status(phb->opal_id,
- pe->addr,
- &fstate,
- &pcierr,
- NULL);
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld getting PHB#%x state\n",
- __func__, rc, phb->hose->global_number);
- return EEH_STATE_NOT_SUPPORT;
- }
-
- /*
- * Check PHB state. If the PHB is frozen for the
- * first time, to dump the PHB diag-data.
- */
- if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) {
- result = (EEH_STATE_MMIO_ACTIVE |
- EEH_STATE_DMA_ACTIVE |
- EEH_STATE_MMIO_ENABLED |
- EEH_STATE_DMA_ENABLED);
- } else if (!(pe->state & EEH_PE_ISOLATED)) {
- eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
- ioda_eeh_phb_diag(pe);
-
- if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
- pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
- }
-
- return result;
-}
-
-static int ioda_eeh_get_pe_state(struct eeh_pe *pe)
-{
- struct pnv_phb *phb = pe->phb->private_data;
- u8 fstate;
- __be16 pcierr;
- s64 rc;
- int result;
-
- /*
- * We don't clobber hardware frozen state until PE
- * reset is completed. In order to keep EEH core
- * moving forward, we have to return operational
- * state during PE reset.
- */
- if (pe->state & EEH_PE_RESET) {
- result = (EEH_STATE_MMIO_ACTIVE |
- EEH_STATE_DMA_ACTIVE |
- EEH_STATE_MMIO_ENABLED |
- EEH_STATE_DMA_ENABLED);
- return result;
- }
-
- /*
- * Fetch PE state from hardware. If the PHB
- * supports compound PE, let it handle that.
- */
- if (phb->get_pe_state) {
- fstate = phb->get_pe_state(phb, pe->addr);
- } else {
- rc = opal_pci_eeh_freeze_status(phb->opal_id,
- pe->addr,
- &fstate,
- &pcierr,
- NULL);
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n",
- __func__, rc, phb->hose->global_number, pe->addr);
- return EEH_STATE_NOT_SUPPORT;
- }
- }
-
- /* Figure out state */
- switch (fstate) {
- case OPAL_EEH_STOPPED_NOT_FROZEN:
- result = (EEH_STATE_MMIO_ACTIVE |
- EEH_STATE_DMA_ACTIVE |
- EEH_STATE_MMIO_ENABLED |
- EEH_STATE_DMA_ENABLED);
- break;
- case OPAL_EEH_STOPPED_MMIO_FREEZE:
- result = (EEH_STATE_DMA_ACTIVE |
- EEH_STATE_DMA_ENABLED);
- break;
- case OPAL_EEH_STOPPED_DMA_FREEZE:
- result = (EEH_STATE_MMIO_ACTIVE |
- EEH_STATE_MMIO_ENABLED);
- break;
- case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
- result = 0;
- break;
- case OPAL_EEH_STOPPED_RESET:
- result = EEH_STATE_RESET_ACTIVE;
- break;
- case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
- result = EEH_STATE_UNAVAILABLE;
- break;
- case OPAL_EEH_STOPPED_PERM_UNAVAIL:
- result = EEH_STATE_NOT_SUPPORT;
- break;
- default:
- result = EEH_STATE_NOT_SUPPORT;
- pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n",
- __func__, phb->hose->global_number,
- pe->addr, fstate);
- }
-
- /*
- * If PHB supports compound PE, to freeze all
- * slave PEs for consistency.
- *
- * If the PE is switching to frozen state for the
- * first time, to dump the PHB diag-data.
- */
- if (!(result & EEH_STATE_NOT_SUPPORT) &&
- !(result & EEH_STATE_UNAVAILABLE) &&
- !(result & EEH_STATE_MMIO_ACTIVE) &&
- !(result & EEH_STATE_DMA_ACTIVE) &&
- !(pe->state & EEH_PE_ISOLATED)) {
- if (phb->freeze_pe)
- phb->freeze_pe(phb, pe->addr);
-
- eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
- ioda_eeh_phb_diag(pe);
-
- if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
- pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
- }
-
- return result;
-}
-
-/**
- * ioda_eeh_get_state - Retrieve the state of PE
- * @pe: EEH PE
- *
- * The PE's state should be retrieved from the PEEV, PEST
- * IODA tables. Since the OPAL has exported the function
- * to do it, it'd better to use that.
- */
-static int ioda_eeh_get_state(struct eeh_pe *pe)
-{
- struct pnv_phb *phb = pe->phb->private_data;
-
- /* Sanity check on PE number. PHB PE should have 0 */
- if (pe->addr < 0 ||
- pe->addr >= phb->ioda.total_pe) {
- pr_warn("%s: PHB#%x-PE#%x out of range [0, %x]\n",
- __func__, phb->hose->global_number,
- pe->addr, phb->ioda.total_pe);
- return EEH_STATE_NOT_SUPPORT;
- }
-
- if (pe->type & EEH_PE_PHB)
- return ioda_eeh_get_phb_state(pe);
-
- return ioda_eeh_get_pe_state(pe);
-}
-
-static s64 ioda_eeh_phb_poll(struct pnv_phb *phb)
-{
- s64 rc = OPAL_HARDWARE;
-
- while (1) {
- rc = opal_pci_poll(phb->opal_id);
- if (rc <= 0)
- break;
-
- if (system_state < SYSTEM_RUNNING)
- udelay(1000 * rc);
- else
- msleep(rc);
- }
-
- return rc;
-}
-
-int ioda_eeh_phb_reset(struct pci_controller *hose, int option)
-{
- struct pnv_phb *phb = hose->private_data;
- s64 rc = OPAL_HARDWARE;
-
- pr_debug("%s: Reset PHB#%x, option=%d\n",
- __func__, hose->global_number, option);
-
- /* Issue PHB complete reset request */
- if (option == EEH_RESET_FUNDAMENTAL ||
- option == EEH_RESET_HOT)
- rc = opal_pci_reset(phb->opal_id,
- OPAL_RESET_PHB_COMPLETE,
- OPAL_ASSERT_RESET);
- else if (option == EEH_RESET_DEACTIVATE)
- rc = opal_pci_reset(phb->opal_id,
- OPAL_RESET_PHB_COMPLETE,
- OPAL_DEASSERT_RESET);
- if (rc < 0)
- goto out;
-
- /*
- * Poll state of the PHB until the request is done
- * successfully. The PHB reset is usually PHB complete
- * reset followed by hot reset on root bus. So we also
- * need the PCI bus settlement delay.
- */
- rc = ioda_eeh_phb_poll(phb);
- if (option == EEH_RESET_DEACTIVATE) {
- if (system_state < SYSTEM_RUNNING)
- udelay(1000 * EEH_PE_RST_SETTLE_TIME);
- else
- msleep(EEH_PE_RST_SETTLE_TIME);
- }
-out:
- if (rc != OPAL_SUCCESS)
- return -EIO;
-
- return 0;
-}
-
-static int ioda_eeh_root_reset(struct pci_controller *hose, int option)
-{
- struct pnv_phb *phb = hose->private_data;
- s64 rc = OPAL_SUCCESS;
-
- pr_debug("%s: Reset PHB#%x, option=%d\n",
- __func__, hose->global_number, option);
-
- /*
- * During the reset deassert time, we needn't care
- * the reset scope because the firmware does nothing
- * for fundamental or hot reset during deassert phase.
- */
- if (option == EEH_RESET_FUNDAMENTAL)
- rc = opal_pci_reset(phb->opal_id,
- OPAL_RESET_PCI_FUNDAMENTAL,
- OPAL_ASSERT_RESET);
- else if (option == EEH_RESET_HOT)
- rc = opal_pci_reset(phb->opal_id,
- OPAL_RESET_PCI_HOT,
- OPAL_ASSERT_RESET);
- else if (option == EEH_RESET_DEACTIVATE)
- rc = opal_pci_reset(phb->opal_id,
- OPAL_RESET_PCI_HOT,
- OPAL_DEASSERT_RESET);
- if (rc < 0)
- goto out;
-
- /* Poll state of the PHB until the request is done */
- rc = ioda_eeh_phb_poll(phb);
- if (option == EEH_RESET_DEACTIVATE)
- msleep(EEH_PE_RST_SETTLE_TIME);
-out:
- if (rc != OPAL_SUCCESS)
- return -EIO;
-
- return 0;
-}
-
-static int ioda_eeh_bridge_reset(struct pci_dev *dev, int option)
-
-{
- struct device_node *dn = pci_device_to_OF_node(dev);
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
- int aer = edev ? edev->aer_cap : 0;
- u32 ctrl;
-
- pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n",
- __func__, pci_domain_nr(dev->bus),
- dev->bus->number, option);
-
- switch (option) {
- case EEH_RESET_FUNDAMENTAL:
- case EEH_RESET_HOT:
- /* Don't report linkDown event */
- if (aer) {
- eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK,
- 4, &ctrl);
- ctrl |= PCI_ERR_UNC_SURPDN;
- eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK,
- 4, ctrl);
- }
-
- eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl);
- ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
- eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl);
- msleep(EEH_PE_RST_HOLD_TIME);
-
- break;
- case EEH_RESET_DEACTIVATE:
- eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &ctrl);
- ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
- eeh_ops->write_config(dn, PCI_BRIDGE_CONTROL, 2, ctrl);
- msleep(EEH_PE_RST_SETTLE_TIME);
-
- /* Continue reporting linkDown event */
- if (aer) {
- eeh_ops->read_config(dn, aer + PCI_ERR_UNCOR_MASK,
- 4, &ctrl);
- ctrl &= ~PCI_ERR_UNC_SURPDN;
- eeh_ops->write_config(dn, aer + PCI_ERR_UNCOR_MASK,
- 4, ctrl);
- }
-
- break;
- }
-
- return 0;
-}
-
-void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
-{
- struct pci_controller *hose;
-
- if (pci_is_root_bus(dev->bus)) {
- hose = pci_bus_to_host(dev->bus);
- ioda_eeh_root_reset(hose, EEH_RESET_HOT);
- ioda_eeh_root_reset(hose, EEH_RESET_DEACTIVATE);
- } else {
- ioda_eeh_bridge_reset(dev, EEH_RESET_HOT);
- ioda_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE);
- }
-}
-
-/**
- * ioda_eeh_reset - Reset the indicated PE
- * @pe: EEH PE
- * @option: reset option
- *
- * Do reset on the indicated PE. For PCI bus sensitive PE,
- * we need to reset the parent p2p bridge. The PHB has to
- * be reinitialized if the p2p bridge is root bridge. For
- * PCI device sensitive PE, we will try to reset the device
- * through FLR. For now, we don't have OPAL APIs to do HARD
- * reset yet, so all reset would be SOFT (HOT) reset.
- */
-static int ioda_eeh_reset(struct eeh_pe *pe, int option)
-{
- struct pci_controller *hose = pe->phb;
- struct pci_bus *bus;
- int ret;
-
- /*
- * For PHB reset, we always have complete reset. For those PEs whose
- * primary bus derived from root complex (root bus) or root port
- * (usually bus#1), we apply hot or fundamental reset on the root port.
- * For other PEs, we always have hot reset on the PE primary bus.
- *
- * Here, we have different design to pHyp, which always clear the
- * frozen state during PE reset. However, the good idea here from
- * benh is to keep frozen state before we get PE reset done completely
- * (until BAR restore). With the frozen state, HW drops illegal IO
- * or MMIO access, which can incur recrusive frozen PE during PE
- * reset. The side effect is that EEH core has to clear the frozen
- * state explicitly after BAR restore.
- */
- if (pe->type & EEH_PE_PHB) {
- ret = ioda_eeh_phb_reset(hose, option);
- } else {
- struct pnv_phb *phb;
- s64 rc;
-
- /*
- * The frozen PE might be caused by PAPR error injection
- * registers, which are expected to be cleared after hitting
- * frozen PE as stated in the hardware spec. Unfortunately,
- * that's not true on P7IOC. So we have to clear it manually
- * to avoid recursive EEH errors during recovery.
- */
- phb = hose->private_data;
- if (phb->model == PNV_PHB_MODEL_P7IOC &&
- (option == EEH_RESET_HOT ||
- option == EEH_RESET_FUNDAMENTAL)) {
- rc = opal_pci_reset(phb->opal_id,
- OPAL_RESET_PHB_ERROR,
- OPAL_ASSERT_RESET);
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld clearing "
- "error injection registers\n",
- __func__, rc);
- return -EIO;
- }
- }
-
- bus = eeh_pe_bus_get(pe);
- if (pci_is_root_bus(bus) ||
- pci_is_root_bus(bus->parent))
- ret = ioda_eeh_root_reset(hose, option);
- else
- ret = ioda_eeh_bridge_reset(bus->self, option);
- }
-
- return ret;
-}
-
-/**
- * ioda_eeh_get_log - Retrieve error log
- * @pe: frozen PE
- * @severity: permanent or temporary error
- * @drv_log: device driver log
- * @len: length of device driver log
- *
- * Retrieve error log, which contains log from device driver
- * and firmware.
- */
-static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
- char *drv_log, unsigned long len)
-{
- if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
- pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
-
- return 0;
-}
-
-/**
- * ioda_eeh_configure_bridge - Configure the PCI bridges for the indicated PE
- * @pe: EEH PE
- *
- * For particular PE, it might have included PCI bridges. In order
- * to make the PE work properly, those PCI bridges should be configured
- * correctly. However, we need do nothing on P7IOC since the reset
- * function will do everything that should be covered by the function.
- */
-static int ioda_eeh_configure_bridge(struct eeh_pe *pe)
-{
- return 0;
-}
-
-static int ioda_eeh_err_inject(struct eeh_pe *pe, int type, int func,
- unsigned long addr, unsigned long mask)
-{
- struct pci_controller *hose = pe->phb;
- struct pnv_phb *phb = hose->private_data;
- s64 ret;
-
- /* Sanity check on error type */
- if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR &&
- type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) {
- pr_warn("%s: Invalid error type %d\n",
- __func__, type);
- return -ERANGE;
- }
-
- if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR ||
- func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) {
- pr_warn("%s: Invalid error function %d\n",
- __func__, func);
- return -ERANGE;
- }
-
- /* Firmware supports error injection ? */
- if (!opal_check_token(OPAL_PCI_ERR_INJECT)) {
- pr_warn("%s: Firmware doesn't support error injection\n",
- __func__);
- return -ENXIO;
- }
-
- /* Do error injection */
- ret = opal_pci_err_inject(phb->opal_id, pe->addr,
- type, func, addr, mask);
- if (ret != OPAL_SUCCESS) {
- pr_warn("%s: Failure %lld injecting error "
- "%d-%d to PHB#%x-PE#%x\n",
- __func__, ret, type, func,
- hose->global_number, pe->addr);
- return -EIO;
- }
-
- return 0;
-}
-
-static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
-{
- /* GEM */
- if (data->gemXfir || data->gemRfir ||
- data->gemRirqfir || data->gemMask || data->gemRwof)
- pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n",
- be64_to_cpu(data->gemXfir),
- be64_to_cpu(data->gemRfir),
- be64_to_cpu(data->gemRirqfir),
- be64_to_cpu(data->gemMask),
- be64_to_cpu(data->gemRwof));
-
- /* LEM */
- if (data->lemFir || data->lemErrMask ||
- data->lemAction0 || data->lemAction1 || data->lemWof)
- pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n",
- be64_to_cpu(data->lemFir),
- be64_to_cpu(data->lemErrMask),
- be64_to_cpu(data->lemAction0),
- be64_to_cpu(data->lemAction1),
- be64_to_cpu(data->lemWof));
-}
-
-static void ioda_eeh_hub_diag(struct pci_controller *hose)
-{
- struct pnv_phb *phb = hose->private_data;
- struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
- long rc;
-
- rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
- if (rc != OPAL_SUCCESS) {
- pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n",
- __func__, phb->hub_id, rc);
- return;
- }
-
- switch (data->type) {
- case OPAL_P7IOC_DIAG_TYPE_RGC:
- pr_info("P7IOC diag-data for RGC\n\n");
- ioda_eeh_hub_diag_common(data);
- if (data->rgc.rgcStatus || data->rgc.rgcLdcp)
- pr_info(" RGC: %016llx %016llx\n",
- be64_to_cpu(data->rgc.rgcStatus),
- be64_to_cpu(data->rgc.rgcLdcp));
- break;
- case OPAL_P7IOC_DIAG_TYPE_BI:
- pr_info("P7IOC diag-data for BI %s\n\n",
- data->bi.biDownbound ? "Downbound" : "Upbound");
- ioda_eeh_hub_diag_common(data);
- if (data->bi.biLdcp0 || data->bi.biLdcp1 ||
- data->bi.biLdcp2 || data->bi.biFenceStatus)
- pr_info(" BI: %016llx %016llx %016llx %016llx\n",
- be64_to_cpu(data->bi.biLdcp0),
- be64_to_cpu(data->bi.biLdcp1),
- be64_to_cpu(data->bi.biLdcp2),
- be64_to_cpu(data->bi.biFenceStatus));
- break;
- case OPAL_P7IOC_DIAG_TYPE_CI:
- pr_info("P7IOC diag-data for CI Port %d\n\n",
- data->ci.ciPort);
- ioda_eeh_hub_diag_common(data);
- if (data->ci.ciPortStatus || data->ci.ciPortLdcp)
- pr_info(" CI: %016llx %016llx\n",
- be64_to_cpu(data->ci.ciPortStatus),
- be64_to_cpu(data->ci.ciPortLdcp));
- break;
- case OPAL_P7IOC_DIAG_TYPE_MISC:
- pr_info("P7IOC diag-data for MISC\n\n");
- ioda_eeh_hub_diag_common(data);
- break;
- case OPAL_P7IOC_DIAG_TYPE_I2C:
- pr_info("P7IOC diag-data for I2C\n\n");
- ioda_eeh_hub_diag_common(data);
- break;
- default:
- pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n",
- __func__, phb->hub_id, data->type);
- }
-}
-
-static int ioda_eeh_get_pe(struct pci_controller *hose,
- u16 pe_no, struct eeh_pe **pe)
-{
- struct pnv_phb *phb = hose->private_data;
- struct pnv_ioda_pe *pnv_pe;
- struct eeh_pe *dev_pe;
- struct eeh_dev edev;
-
- /*
- * If PHB supports compound PE, to fetch
- * the master PE because slave PE is invisible
- * to EEH core.
- */
- pnv_pe = &phb->ioda.pe_array[pe_no];
- if (pnv_pe->flags & PNV_IODA_PE_SLAVE) {
- pnv_pe = pnv_pe->master;
- WARN_ON(!pnv_pe ||
- !(pnv_pe->flags & PNV_IODA_PE_MASTER));
- pe_no = pnv_pe->pe_number;
- }
-
- /* Find the PE according to PE# */
- memset(&edev, 0, sizeof(struct eeh_dev));
- edev.phb = hose;
- edev.pe_config_addr = pe_no;
- dev_pe = eeh_pe_get(&edev);
- if (!dev_pe)
- return -EEXIST;
-
- /* Freeze the (compound) PE */
- *pe = dev_pe;
- if (!(dev_pe->state & EEH_PE_ISOLATED))
- phb->freeze_pe(phb, pe_no);
-
- /*
- * At this point, we're sure the (compound) PE should
- * have been frozen. However, we still need poke until
- * hitting the frozen PE on top level.
- */
- dev_pe = dev_pe->parent;
- while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) {
- int ret;
- int active_flags = (EEH_STATE_MMIO_ACTIVE |
- EEH_STATE_DMA_ACTIVE);
-
- ret = eeh_ops->get_state(dev_pe, NULL);
- if (ret <= 0 || (ret & active_flags) == active_flags) {
- dev_pe = dev_pe->parent;
- continue;
- }
-
- /* Frozen parent PE */
- *pe = dev_pe;
- if (!(dev_pe->state & EEH_PE_ISOLATED))
- phb->freeze_pe(phb, dev_pe->addr);
-
- /* Next one */
- dev_pe = dev_pe->parent;
- }
-
- return 0;
-}
-
-/**
- * ioda_eeh_next_error - Retrieve next error for EEH core to handle
- * @pe: The affected PE
- *
- * The function is expected to be called by EEH core while it gets
- * special EEH event (without binding PE). The function calls to
- * OPAL APIs for next error to handle. The informational error is
- * handled internally by platform. However, the dead IOC, dead PHB,
- * fenced PHB and frozen PE should be handled by EEH core eventually.
- */
-static int ioda_eeh_next_error(struct eeh_pe **pe)
-{
- struct pci_controller *hose;
- struct pnv_phb *phb;
- struct eeh_pe *phb_pe, *parent_pe;
- __be64 frozen_pe_no;
- __be16 err_type, severity;
- int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
- long rc;
- int state, ret = EEH_NEXT_ERR_NONE;
-
- /*
- * While running here, it's safe to purge the event queue.
- * And we should keep the cached OPAL notifier event sychronized
- * between the kernel and firmware.
- */
- eeh_remove_event(NULL, false);
- opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
-
- list_for_each_entry(hose, &hose_list, list_node) {
- /*
- * If the subordinate PCI buses of the PHB has been
- * removed or is exactly under error recovery, we
- * needn't take care of it any more.
- */
- phb = hose->private_data;
- phb_pe = eeh_phb_pe_get(hose);
- if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED))
- continue;
-
- rc = opal_pci_next_error(phb->opal_id,
- &frozen_pe_no, &err_type, &severity);
-
- /* If OPAL API returns error, we needn't proceed */
- if (rc != OPAL_SUCCESS) {
- pr_devel("%s: Invalid return value on "
- "PHB#%x (0x%lx) from opal_pci_next_error",
- __func__, hose->global_number, rc);
- continue;
- }
-
- /* If the PHB doesn't have error, stop processing */
- if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR ||
- be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) {
- pr_devel("%s: No error found on PHB#%x\n",
- __func__, hose->global_number);
- continue;
- }
-
- /*
- * Processing the error. We're expecting the error with
- * highest priority reported upon multiple errors on the
- * specific PHB.
- */
- pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
- __func__, be16_to_cpu(err_type), be16_to_cpu(severity),
- be64_to_cpu(frozen_pe_no), hose->global_number);
- switch (be16_to_cpu(err_type)) {
- case OPAL_EEH_IOC_ERROR:
- if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) {
- pr_err("EEH: dead IOC detected\n");
- ret = EEH_NEXT_ERR_DEAD_IOC;
- } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
- pr_info("EEH: IOC informative error "
- "detected\n");
- ioda_eeh_hub_diag(hose);
- ret = EEH_NEXT_ERR_NONE;
- }
-
- break;
- case OPAL_EEH_PHB_ERROR:
- if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) {
- *pe = phb_pe;
- pr_err("EEH: dead PHB#%x detected, "
- "location: %s\n",
- hose->global_number,
- eeh_pe_loc_get(phb_pe));
- ret = EEH_NEXT_ERR_DEAD_PHB;
- } else if (be16_to_cpu(severity) ==
- OPAL_EEH_SEV_PHB_FENCED) {
- *pe = phb_pe;
- pr_err("EEH: Fenced PHB#%x detected, "
- "location: %s\n",
- hose->global_number,
- eeh_pe_loc_get(phb_pe));
- ret = EEH_NEXT_ERR_FENCED_PHB;
- } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
- pr_info("EEH: PHB#%x informative error "
- "detected, location: %s\n",
- hose->global_number,
- eeh_pe_loc_get(phb_pe));
- ioda_eeh_phb_diag(phb_pe);
- pnv_pci_dump_phb_diag_data(hose, phb_pe->data);
- ret = EEH_NEXT_ERR_NONE;
- }
-
- break;
- case OPAL_EEH_PE_ERROR:
- /*
- * If we can't find the corresponding PE, we
- * just try to unfreeze.
- */
- if (ioda_eeh_get_pe(hose,
- be64_to_cpu(frozen_pe_no), pe)) {
- /* Try best to clear it */
- pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
- hose->global_number, frozen_pe_no);
- pr_info("EEH: PHB location: %s\n",
- eeh_pe_loc_get(phb_pe));
- opal_pci_eeh_freeze_clear(phb->opal_id, frozen_pe_no,
- OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
- ret = EEH_NEXT_ERR_NONE;
- } else if ((*pe)->state & EEH_PE_ISOLATED ||
- eeh_pe_passed(*pe)) {
- ret = EEH_NEXT_ERR_NONE;
- } else {
- pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
- (*pe)->addr, (*pe)->phb->global_number);
- pr_err("EEH: PE location: %s, PHB location: %s\n",
- eeh_pe_loc_get(*pe), eeh_pe_loc_get(phb_pe));
- ret = EEH_NEXT_ERR_FROZEN_PE;
- }
-
- break;
- default:
- pr_warn("%s: Unexpected error type %d\n",
- __func__, be16_to_cpu(err_type));
- }
-
- /*
- * EEH core will try recover from fenced PHB or
- * frozen PE. In the time for frozen PE, EEH core
- * enable IO path for that before collecting logs,
- * but it ruins the site. So we have to dump the
- * log in advance here.
- */
- if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
- ret == EEH_NEXT_ERR_FENCED_PHB) &&
- !((*pe)->state & EEH_PE_ISOLATED)) {
- eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
- ioda_eeh_phb_diag(*pe);
-
- if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
- pnv_pci_dump_phb_diag_data((*pe)->phb,
- (*pe)->data);
- }
-
- /*
- * We probably have the frozen parent PE out there and
- * we need have to handle frozen parent PE firstly.
- */
- if (ret == EEH_NEXT_ERR_FROZEN_PE) {
- parent_pe = (*pe)->parent;
- while (parent_pe) {
- /* Hit the ceiling ? */
- if (parent_pe->type & EEH_PE_PHB)
- break;
-
- /* Frozen parent PE ? */
- state = ioda_eeh_get_state(parent_pe);
- if (state > 0 &&
- (state & active_flags) != active_flags)
- *pe = parent_pe;
-
- /* Next parent level */
- parent_pe = parent_pe->parent;
- }
-
- /* We possibly migrate to another PE */
- eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
- }
-
- /*
- * If we have no errors on the specific PHB or only
- * informative error there, we continue poking it.
- * Otherwise, we need actions to be taken by upper
- * layer.
- */
- if (ret > EEH_NEXT_ERR_INF)
- break;
- }
-
- return ret;
-}
-
-struct pnv_eeh_ops ioda_eeh_ops = {
- .post_init = ioda_eeh_post_init,
- .set_option = ioda_eeh_set_option,
- .get_state = ioda_eeh_get_state,
- .reset = ioda_eeh_reset,
- .get_log = ioda_eeh_get_log,
- .configure_bridge = ioda_eeh_configure_bridge,
- .err_inject = ioda_eeh_err_inject,
- .next_error = ioda_eeh_next_error
-};
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index e261869adc86..ce738ab3d5a9 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -12,6 +12,7 @@
*/
#include <linux/atomic.h>
+#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/init.h>
@@ -38,12 +39,14 @@
#include "powernv.h"
#include "pci.h"
+static bool pnv_eeh_nb_init = false;
+
/**
- * powernv_eeh_init - EEH platform dependent initialization
+ * pnv_eeh_init - EEH platform dependent initialization
*
* EEH platform dependent initialization on powernv
*/
-static int powernv_eeh_init(void)
+static int pnv_eeh_init(void)
{
struct pci_controller *hose;
struct pnv_phb *phb;
@@ -85,37 +88,280 @@ static int powernv_eeh_init(void)
return 0;
}
+static int pnv_eeh_event(struct notifier_block *nb,
+ unsigned long events, void *change)
+{
+ uint64_t changed_evts = (uint64_t)change;
+
+ /*
+ * We simply send special EEH event if EEH has
+ * been enabled, or clear pending events in
+ * case that we enable EEH soon
+ */
+ if (!(changed_evts & OPAL_EVENT_PCI_ERROR) ||
+ !(events & OPAL_EVENT_PCI_ERROR))
+ return 0;
+
+ if (eeh_enabled())
+ eeh_send_failure_event(NULL);
+ else
+ opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
+
+ return 0;
+}
+
+static struct notifier_block pnv_eeh_nb = {
+ .notifier_call = pnv_eeh_event,
+ .next = NULL,
+ .priority = 0
+};
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t pnv_eeh_ei_write(struct file *filp,
+ const char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct pci_controller *hose = filp->private_data;
+ struct eeh_dev *edev;
+ struct eeh_pe *pe;
+ int pe_no, type, func;
+ unsigned long addr, mask;
+ char buf[50];
+ int ret;
+
+ if (!eeh_ops || !eeh_ops->err_inject)
+ return -ENXIO;
+
+ /* Copy over argument buffer */
+ ret = simple_write_to_buffer(buf, sizeof(buf), ppos, user_buf, count);
+ if (!ret)
+ return -EFAULT;
+
+ /* Retrieve parameters */
+ ret = sscanf(buf, "%x:%x:%x:%lx:%lx",
+ &pe_no, &type, &func, &addr, &mask);
+ if (ret != 5)
+ return -EINVAL;
+
+ /* Retrieve PE */
+ edev = kzalloc(sizeof(*edev), GFP_KERNEL);
+ if (!edev)
+ return -ENOMEM;
+ edev->phb = hose;
+ edev->pe_config_addr = pe_no;
+ pe = eeh_pe_get(edev);
+ kfree(edev);
+ if (!pe)
+ return -ENODEV;
+
+ /* Do error injection */
+ ret = eeh_ops->err_inject(pe, type, func, addr, mask);
+ return ret < 0 ? ret : count;
+}
+
+static const struct file_operations pnv_eeh_ei_fops = {
+ .open = simple_open,
+ .llseek = no_llseek,
+ .write = pnv_eeh_ei_write,
+};
+
+static int pnv_eeh_dbgfs_set(void *data, int offset, u64 val)
+{
+ struct pci_controller *hose = data;
+ struct pnv_phb *phb = hose->private_data;
+
+ out_be64(phb->regs + offset, val);
+ return 0;
+}
+
+static int pnv_eeh_dbgfs_get(void *data, int offset, u64 *val)
+{
+ struct pci_controller *hose = data;
+ struct pnv_phb *phb = hose->private_data;
+
+ *val = in_be64(phb->regs + offset);
+ return 0;
+}
+
+static int pnv_eeh_outb_dbgfs_set(void *data, u64 val)
+{
+ return pnv_eeh_dbgfs_set(data, 0xD10, val);
+}
+
+static int pnv_eeh_outb_dbgfs_get(void *data, u64 *val)
+{
+ return pnv_eeh_dbgfs_get(data, 0xD10, val);
+}
+
+static int pnv_eeh_inbA_dbgfs_set(void *data, u64 val)
+{
+ return pnv_eeh_dbgfs_set(data, 0xD90, val);
+}
+
+static int pnv_eeh_inbA_dbgfs_get(void *data, u64 *val)
+{
+ return pnv_eeh_dbgfs_get(data, 0xD90, val);
+}
+
+static int pnv_eeh_inbB_dbgfs_set(void *data, u64 val)
+{
+ return pnv_eeh_dbgfs_set(data, 0xE10, val);
+}
+
+static int pnv_eeh_inbB_dbgfs_get(void *data, u64 *val)
+{
+ return pnv_eeh_dbgfs_get(data, 0xE10, val);
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_outb_dbgfs_ops, pnv_eeh_outb_dbgfs_get,
+ pnv_eeh_outb_dbgfs_set, "0x%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_inbA_dbgfs_ops, pnv_eeh_inbA_dbgfs_get,
+ pnv_eeh_inbA_dbgfs_set, "0x%llx\n");
+DEFINE_SIMPLE_ATTRIBUTE(pnv_eeh_inbB_dbgfs_ops, pnv_eeh_inbB_dbgfs_get,
+ pnv_eeh_inbB_dbgfs_set, "0x%llx\n");
+#endif /* CONFIG_DEBUG_FS */
+
/**
- * powernv_eeh_post_init - EEH platform dependent post initialization
+ * pnv_eeh_post_init - EEH platform dependent post initialization
*
* EEH platform dependent post initialization on powernv. When
* the function is called, the EEH PEs and devices should have
* been built. If the I/O cache staff has been built, EEH is
* ready to supply service.
*/
-static int powernv_eeh_post_init(void)
+static int pnv_eeh_post_init(void)
{
struct pci_controller *hose;
struct pnv_phb *phb;
int ret = 0;
+ /* Register OPAL event notifier */
+ if (!pnv_eeh_nb_init) {
+ ret = opal_notifier_register(&pnv_eeh_nb);
+ if (ret) {
+ pr_warn("%s: Can't register OPAL event notifier (%d)\n",
+ __func__, ret);
+ return ret;
+ }
+
+ pnv_eeh_nb_init = true;
+ }
+
list_for_each_entry(hose, &hose_list, list_node) {
phb = hose->private_data;
- if (phb->eeh_ops && phb->eeh_ops->post_init) {
- ret = phb->eeh_ops->post_init(hose);
- if (ret)
- break;
- }
+ /*
+ * If EEH is enabled, we're going to rely on that.
+ * Otherwise, we restore to conventional mechanism
+ * to clear frozen PE during PCI config access.
+ */
+ if (eeh_enabled())
+ phb->flags |= PNV_PHB_FLAG_EEH;
+ else
+ phb->flags &= ~PNV_PHB_FLAG_EEH;
+
+ /* Create debugfs entries */
+#ifdef CONFIG_DEBUG_FS
+ if (phb->has_dbgfs || !phb->dbgfs)
+ continue;
+
+ phb->has_dbgfs = 1;
+ debugfs_create_file("err_injct", 0200,
+ phb->dbgfs, hose,
+ &pnv_eeh_ei_fops);
+
+ debugfs_create_file("err_injct_outbound", 0600,
+ phb->dbgfs, hose,
+ &pnv_eeh_outb_dbgfs_ops);
+ debugfs_create_file("err_injct_inboundA", 0600,
+ phb->dbgfs, hose,
+ &pnv_eeh_inbA_dbgfs_ops);
+ debugfs_create_file("err_injct_inboundB", 0600,
+ phb->dbgfs, hose,
+ &pnv_eeh_inbB_dbgfs_ops);
+#endif /* CONFIG_DEBUG_FS */
}
+
return ret;
}
+static int pnv_eeh_cap_start(struct pci_dn *pdn)
+{
+ u32 status;
+
+ if (!pdn)
+ return 0;
+
+ pnv_pci_cfg_read(pdn, PCI_STATUS, 2, &status);
+ if (!(status & PCI_STATUS_CAP_LIST))
+ return 0;
+
+ return PCI_CAPABILITY_LIST;
+}
+
+static int pnv_eeh_find_cap(struct pci_dn *pdn, int cap)
+{
+ int pos = pnv_eeh_cap_start(pdn);
+ int cnt = 48; /* Maximal number of capabilities */
+ u32 id;
+
+ if (!pos)
+ return 0;
+
+ while (cnt--) {
+ pnv_pci_cfg_read(pdn, pos, 1, &pos);
+ if (pos < 0x40)
+ break;
+
+ pos &= ~3;
+ pnv_pci_cfg_read(pdn, pos + PCI_CAP_LIST_ID, 1, &id);
+ if (id == 0xff)
+ break;
+
+ /* Found */
+ if (id == cap)
+ return pos;
+
+ /* Next one */
+ pos += PCI_CAP_LIST_NEXT;
+ }
+
+ return 0;
+}
+
+static int pnv_eeh_find_ecap(struct pci_dn *pdn, int cap)
+{
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
+ u32 header;
+ int pos = 256, ttl = (4096 - 256) / 8;
+
+ if (!edev || !edev->pcie_cap)
+ return 0;
+ if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
+ return 0;
+ else if (!header)
+ return 0;
+
+ while (ttl-- > 0) {
+ if (PCI_EXT_CAP_ID(header) == cap && pos)
+ return pos;
+
+ pos = PCI_EXT_CAP_NEXT(header);
+ if (pos < 256)
+ break;
+
+ if (pnv_pci_cfg_read(pdn, pos, 4, &header) != PCIBIOS_SUCCESSFUL)
+ break;
+ }
+
+ return 0;
+}
+
/**
- * powernv_eeh_dev_probe - Do probe on PCI device
- * @dev: PCI device
- * @flag: unused
+ * pnv_eeh_probe - Do probe on PCI device
+ * @pdn: PCI device node
+ * @data: unused
*
* When EEH module is installed during system boot, all PCI devices
* are checked one by one to see if it supports EEH. The function
@@ -129,12 +375,12 @@ static int powernv_eeh_post_init(void)
* was possiblly triggered by EEH core, the binding between EEH device
* and the PCI device isn't built yet.
*/
-static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
+static void *pnv_eeh_probe(struct pci_dn *pdn, void *data)
{
- struct pci_controller *hose = pci_bus_to_host(dev->bus);
+ struct pci_controller *hose = pdn->phb;
struct pnv_phb *phb = hose->private_data;
- struct device_node *dn = pci_device_to_OF_node(dev);
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
+ uint32_t pcie_flags;
int ret;
/*
@@ -143,40 +389,42 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
* the root bridge. So it's not reasonable to continue
* the probing.
*/
- if (!dn || !edev || edev->pe)
- return 0;
+ if (!edev || edev->pe)
+ return NULL;
/* Skip for PCI-ISA bridge */
- if ((dev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
- return 0;
+ if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
+ return NULL;
/* Initialize eeh device */
- edev->class_code = dev->class;
+ edev->class_code = pdn->class_code;
edev->mode &= 0xFFFFFF00;
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ edev->pcix_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
+ edev->pcie_cap = pnv_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
+ edev->aer_cap = pnv_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
+ if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
edev->mode |= EEH_DEV_BRIDGE;
- edev->pcix_cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
- if (pci_is_pcie(dev)) {
- edev->pcie_cap = pci_pcie_cap(dev);
-
- if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
- edev->mode |= EEH_DEV_ROOT_PORT;
- else if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
- edev->mode |= EEH_DEV_DS_PORT;
-
- edev->aer_cap = pci_find_ext_capability(dev,
- PCI_EXT_CAP_ID_ERR);
+ if (edev->pcie_cap) {
+ pnv_pci_cfg_read(pdn, edev->pcie_cap + PCI_EXP_FLAGS,
+ 2, &pcie_flags);
+ pcie_flags = (pcie_flags & PCI_EXP_FLAGS_TYPE) >> 4;
+ if (pcie_flags == PCI_EXP_TYPE_ROOT_PORT)
+ edev->mode |= EEH_DEV_ROOT_PORT;
+ else if (pcie_flags == PCI_EXP_TYPE_DOWNSTREAM)
+ edev->mode |= EEH_DEV_DS_PORT;
+ }
}
- edev->config_addr = ((dev->bus->number << 8) | dev->devfn);
- edev->pe_config_addr = phb->bdfn_to_pe(phb, dev->bus, dev->devfn & 0xff);
+ edev->config_addr = (pdn->busno << 8) | (pdn->devfn);
+ edev->pe_config_addr = phb->ioda.pe_rmap[edev->config_addr];
/* Create PE */
ret = eeh_add_to_parent_pe(edev);
if (ret) {
- pr_warn("%s: Can't add PCI dev %s to parent PE (%d)\n",
- __func__, pci_name(dev), ret);
- return ret;
+ pr_warn("%s: Can't add PCI dev %04x:%02x:%02x.%01x to parent PE (%d)\n",
+ __func__, hose->global_number, pdn->busno,
+ PCI_SLOT(pdn->devfn), PCI_FUNC(pdn->devfn), ret);
+ return NULL;
}
/*
@@ -195,8 +443,10 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
* Broadcom Austin 4-ports NICs (14e4:1657)
* Broadcom Shiner 2-ports 10G NICs (14e4:168e)
*/
- if ((dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x1657) ||
- (dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x168e))
+ if ((pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
+ pdn->device_id == 0x1657) ||
+ (pdn->vendor_id == PCI_VENDOR_ID_BROADCOM &&
+ pdn->device_id == 0x168e))
edev->pe->state |= EEH_PE_CFG_RESTRICTED;
/*
@@ -206,7 +456,8 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
* to PE reset.
*/
if (!edev->pe->bus)
- edev->pe->bus = dev->bus;
+ edev->pe->bus = pci_find_bus(hose->global_number,
+ pdn->busno);
/*
* Enable EEH explicitly so that we will do EEH check
@@ -217,11 +468,11 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
/* Save memory bars */
eeh_save_bars(edev);
- return 0;
+ return NULL;
}
/**
- * powernv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
+ * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
* @pe: EEH PE
* @option: operation to be issued
*
@@ -229,36 +480,236 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
* Currently, following options are support according to PAPR:
* Enable EEH, Disable EEH, Enable MMIO and Enable DMA
*/
-static int powernv_eeh_set_option(struct eeh_pe *pe, int option)
+static int pnv_eeh_set_option(struct eeh_pe *pe, int option)
{
struct pci_controller *hose = pe->phb;
struct pnv_phb *phb = hose->private_data;
- int ret = -EEXIST;
+ bool freeze_pe = false;
+ int opt, ret = 0;
+ s64 rc;
+
+ /* Sanity check on option */
+ switch (option) {
+ case EEH_OPT_DISABLE:
+ return -EPERM;
+ case EEH_OPT_ENABLE:
+ return 0;
+ case EEH_OPT_THAW_MMIO:
+ opt = OPAL_EEH_ACTION_CLEAR_FREEZE_MMIO;
+ break;
+ case EEH_OPT_THAW_DMA:
+ opt = OPAL_EEH_ACTION_CLEAR_FREEZE_DMA;
+ break;
+ case EEH_OPT_FREEZE_PE:
+ freeze_pe = true;
+ opt = OPAL_EEH_ACTION_SET_FREEZE_ALL;
+ break;
+ default:
+ pr_warn("%s: Invalid option %d\n", __func__, option);
+ return -EINVAL;
+ }
- /*
- * What we need do is pass it down for hardware
- * implementation to handle it.
- */
- if (phb->eeh_ops && phb->eeh_ops->set_option)
- ret = phb->eeh_ops->set_option(pe, option);
+ /* If PHB supports compound PE, to handle it */
+ if (freeze_pe) {
+ if (phb->freeze_pe) {
+ phb->freeze_pe(phb, pe->addr);
+ } else {
+ rc = opal_pci_eeh_freeze_set(phb->opal_id,
+ pe->addr, opt);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld freezing "
+ "PHB#%x-PE#%x\n",
+ __func__, rc,
+ phb->hose->global_number, pe->addr);
+ ret = -EIO;
+ }
+ }
+ } else {
+ if (phb->unfreeze_pe) {
+ ret = phb->unfreeze_pe(phb, pe->addr, opt);
+ } else {
+ rc = opal_pci_eeh_freeze_clear(phb->opal_id,
+ pe->addr, opt);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld enable %d "
+ "for PHB#%x-PE#%x\n",
+ __func__, rc, option,
+ phb->hose->global_number, pe->addr);
+ ret = -EIO;
+ }
+ }
+ }
return ret;
}
/**
- * powernv_eeh_get_pe_addr - Retrieve PE address
+ * pnv_eeh_get_pe_addr - Retrieve PE address
* @pe: EEH PE
*
* Retrieve the PE address according to the given tranditional
* PCI BDF (Bus/Device/Function) address.
*/
-static int powernv_eeh_get_pe_addr(struct eeh_pe *pe)
+static int pnv_eeh_get_pe_addr(struct eeh_pe *pe)
{
return pe->addr;
}
+static void pnv_eeh_get_phb_diag(struct eeh_pe *pe)
+{
+ struct pnv_phb *phb = pe->phb->private_data;
+ s64 rc;
+
+ rc = opal_pci_get_phb_diag_data2(phb->opal_id, pe->data,
+ PNV_PCI_DIAG_BUF_SIZE);
+ if (rc != OPAL_SUCCESS)
+ pr_warn("%s: Failure %lld getting PHB#%x diag-data\n",
+ __func__, rc, pe->phb->global_number);
+}
+
+static int pnv_eeh_get_phb_state(struct eeh_pe *pe)
+{
+ struct pnv_phb *phb = pe->phb->private_data;
+ u8 fstate;
+ __be16 pcierr;
+ s64 rc;
+ int result = 0;
+
+ rc = opal_pci_eeh_freeze_status(phb->opal_id,
+ pe->addr,
+ &fstate,
+ &pcierr,
+ NULL);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld getting PHB#%x state\n",
+ __func__, rc, phb->hose->global_number);
+ return EEH_STATE_NOT_SUPPORT;
+ }
+
+ /*
+ * Check PHB state. If the PHB is frozen for the
+ * first time, to dump the PHB diag-data.
+ */
+ if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) {
+ result = (EEH_STATE_MMIO_ACTIVE |
+ EEH_STATE_DMA_ACTIVE |
+ EEH_STATE_MMIO_ENABLED |
+ EEH_STATE_DMA_ENABLED);
+ } else if (!(pe->state & EEH_PE_ISOLATED)) {
+ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ pnv_eeh_get_phb_diag(pe);
+
+ if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
+ pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
+ }
+
+ return result;
+}
+
+static int pnv_eeh_get_pe_state(struct eeh_pe *pe)
+{
+ struct pnv_phb *phb = pe->phb->private_data;
+ u8 fstate;
+ __be16 pcierr;
+ s64 rc;
+ int result;
+
+ /*
+ * We don't clobber hardware frozen state until PE
+ * reset is completed. In order to keep EEH core
+ * moving forward, we have to return operational
+ * state during PE reset.
+ */
+ if (pe->state & EEH_PE_RESET) {
+ result = (EEH_STATE_MMIO_ACTIVE |
+ EEH_STATE_DMA_ACTIVE |
+ EEH_STATE_MMIO_ENABLED |
+ EEH_STATE_DMA_ENABLED);
+ return result;
+ }
+
+ /*
+ * Fetch PE state from hardware. If the PHB
+ * supports compound PE, let it handle that.
+ */
+ if (phb->get_pe_state) {
+ fstate = phb->get_pe_state(phb, pe->addr);
+ } else {
+ rc = opal_pci_eeh_freeze_status(phb->opal_id,
+ pe->addr,
+ &fstate,
+ &pcierr,
+ NULL);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld getting PHB#%x-PE%x state\n",
+ __func__, rc, phb->hose->global_number,
+ pe->addr);
+ return EEH_STATE_NOT_SUPPORT;
+ }
+ }
+
+ /* Figure out state */
+ switch (fstate) {
+ case OPAL_EEH_STOPPED_NOT_FROZEN:
+ result = (EEH_STATE_MMIO_ACTIVE |
+ EEH_STATE_DMA_ACTIVE |
+ EEH_STATE_MMIO_ENABLED |
+ EEH_STATE_DMA_ENABLED);
+ break;
+ case OPAL_EEH_STOPPED_MMIO_FREEZE:
+ result = (EEH_STATE_DMA_ACTIVE |
+ EEH_STATE_DMA_ENABLED);
+ break;
+ case OPAL_EEH_STOPPED_DMA_FREEZE:
+ result = (EEH_STATE_MMIO_ACTIVE |
+ EEH_STATE_MMIO_ENABLED);
+ break;
+ case OPAL_EEH_STOPPED_MMIO_DMA_FREEZE:
+ result = 0;
+ break;
+ case OPAL_EEH_STOPPED_RESET:
+ result = EEH_STATE_RESET_ACTIVE;
+ break;
+ case OPAL_EEH_STOPPED_TEMP_UNAVAIL:
+ result = EEH_STATE_UNAVAILABLE;
+ break;
+ case OPAL_EEH_STOPPED_PERM_UNAVAIL:
+ result = EEH_STATE_NOT_SUPPORT;
+ break;
+ default:
+ result = EEH_STATE_NOT_SUPPORT;
+ pr_warn("%s: Invalid PHB#%x-PE#%x state %x\n",
+ __func__, phb->hose->global_number,
+ pe->addr, fstate);
+ }
+
+ /*
+ * If PHB supports compound PE, to freeze all
+ * slave PEs for consistency.
+ *
+ * If the PE is switching to frozen state for the
+ * first time, to dump the PHB diag-data.
+ */
+ if (!(result & EEH_STATE_NOT_SUPPORT) &&
+ !(result & EEH_STATE_UNAVAILABLE) &&
+ !(result & EEH_STATE_MMIO_ACTIVE) &&
+ !(result & EEH_STATE_DMA_ACTIVE) &&
+ !(pe->state & EEH_PE_ISOLATED)) {
+ if (phb->freeze_pe)
+ phb->freeze_pe(phb, pe->addr);
+
+ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
+ pnv_eeh_get_phb_diag(pe);
+
+ if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
+ pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
+ }
+
+ return result;
+}
+
/**
- * powernv_eeh_get_state - Retrieve PE state
+ * pnv_eeh_get_state - Retrieve PE state
* @pe: EEH PE
* @delay: delay while PE state is temporarily unavailable
*
@@ -267,64 +718,279 @@ static int powernv_eeh_get_pe_addr(struct eeh_pe *pe)
* we prefer passing down to hardware implementation to handle
* it.
*/
-static int powernv_eeh_get_state(struct eeh_pe *pe, int *delay)
+static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay)
+{
+ int ret;
+
+ if (pe->type & EEH_PE_PHB)
+ ret = pnv_eeh_get_phb_state(pe);
+ else
+ ret = pnv_eeh_get_pe_state(pe);
+
+ if (!delay)
+ return ret;
+
+ /*
+ * If the PE state is temporarily unavailable,
+ * to inform the EEH core delay for default
+ * period (1 second)
+ */
+ *delay = 0;
+ if (ret & EEH_STATE_UNAVAILABLE)
+ *delay = 1000;
+
+ return ret;
+}
+
+static s64 pnv_eeh_phb_poll(struct pnv_phb *phb)
+{
+ s64 rc = OPAL_HARDWARE;
+
+ while (1) {
+ rc = opal_pci_poll(phb->opal_id);
+ if (rc <= 0)
+ break;
+
+ if (system_state < SYSTEM_RUNNING)
+ udelay(1000 * rc);
+ else
+ msleep(rc);
+ }
+
+ return rc;
+}
+
+int pnv_eeh_phb_reset(struct pci_controller *hose, int option)
{
- struct pci_controller *hose = pe->phb;
struct pnv_phb *phb = hose->private_data;
- int ret = EEH_STATE_NOT_SUPPORT;
+ s64 rc = OPAL_HARDWARE;
+
+ pr_debug("%s: Reset PHB#%x, option=%d\n",
+ __func__, hose->global_number, option);
+
+ /* Issue PHB complete reset request */
+ if (option == EEH_RESET_FUNDAMENTAL ||
+ option == EEH_RESET_HOT)
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PHB_COMPLETE,
+ OPAL_ASSERT_RESET);
+ else if (option == EEH_RESET_DEACTIVATE)
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PHB_COMPLETE,
+ OPAL_DEASSERT_RESET);
+ if (rc < 0)
+ goto out;
- if (phb->eeh_ops && phb->eeh_ops->get_state) {
- ret = phb->eeh_ops->get_state(pe);
+ /*
+ * Poll state of the PHB until the request is done
+ * successfully. The PHB reset is usually PHB complete
+ * reset followed by hot reset on root bus. So we also
+ * need the PCI bus settlement delay.
+ */
+ rc = pnv_eeh_phb_poll(phb);
+ if (option == EEH_RESET_DEACTIVATE) {
+ if (system_state < SYSTEM_RUNNING)
+ udelay(1000 * EEH_PE_RST_SETTLE_TIME);
+ else
+ msleep(EEH_PE_RST_SETTLE_TIME);
+ }
+out:
+ if (rc != OPAL_SUCCESS)
+ return -EIO;
- /*
- * If the PE state is temporarily unavailable,
- * to inform the EEH core delay for default
- * period (1 second)
- */
- if (delay) {
- *delay = 0;
- if (ret & EEH_STATE_UNAVAILABLE)
- *delay = 1000;
+ return 0;
+}
+
+static int pnv_eeh_root_reset(struct pci_controller *hose, int option)
+{
+ struct pnv_phb *phb = hose->private_data;
+ s64 rc = OPAL_HARDWARE;
+
+ pr_debug("%s: Reset PHB#%x, option=%d\n",
+ __func__, hose->global_number, option);
+
+ /*
+ * During the reset deassert time, we needn't care
+ * the reset scope because the firmware does nothing
+ * for fundamental or hot reset during deassert phase.
+ */
+ if (option == EEH_RESET_FUNDAMENTAL)
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PCI_FUNDAMENTAL,
+ OPAL_ASSERT_RESET);
+ else if (option == EEH_RESET_HOT)
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PCI_HOT,
+ OPAL_ASSERT_RESET);
+ else if (option == EEH_RESET_DEACTIVATE)
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PCI_HOT,
+ OPAL_DEASSERT_RESET);
+ if (rc < 0)
+ goto out;
+
+ /* Poll state of the PHB until the request is done */
+ rc = pnv_eeh_phb_poll(phb);
+ if (option == EEH_RESET_DEACTIVATE)
+ msleep(EEH_PE_RST_SETTLE_TIME);
+out:
+ if (rc != OPAL_SUCCESS)
+ return -EIO;
+
+ return 0;
+}
+
+static int pnv_eeh_bridge_reset(struct pci_dev *dev, int option)
+{
+ struct pci_dn *pdn = pci_get_pdn_by_devfn(dev->bus, dev->devfn);
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
+ int aer = edev ? edev->aer_cap : 0;
+ u32 ctrl;
+
+ pr_debug("%s: Reset PCI bus %04x:%02x with option %d\n",
+ __func__, pci_domain_nr(dev->bus),
+ dev->bus->number, option);
+
+ switch (option) {
+ case EEH_RESET_FUNDAMENTAL:
+ case EEH_RESET_HOT:
+ /* Don't report linkDown event */
+ if (aer) {
+ eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
+ 4, &ctrl);
+ ctrl |= PCI_ERR_UNC_SURPDN;
+ eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
+ 4, ctrl);
}
+
+ eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
+ ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
+ eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
+
+ msleep(EEH_PE_RST_HOLD_TIME);
+ break;
+ case EEH_RESET_DEACTIVATE:
+ eeh_ops->read_config(pdn, PCI_BRIDGE_CONTROL, 2, &ctrl);
+ ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
+ eeh_ops->write_config(pdn, PCI_BRIDGE_CONTROL, 2, ctrl);
+
+ msleep(EEH_PE_RST_SETTLE_TIME);
+
+ /* Continue reporting linkDown event */
+ if (aer) {
+ eeh_ops->read_config(pdn, aer + PCI_ERR_UNCOR_MASK,
+ 4, &ctrl);
+ ctrl &= ~PCI_ERR_UNC_SURPDN;
+ eeh_ops->write_config(pdn, aer + PCI_ERR_UNCOR_MASK,
+ 4, ctrl);
+ }
+
+ break;
}
- return ret;
+ return 0;
+}
+
+void pnv_pci_reset_secondary_bus(struct pci_dev *dev)
+{
+ struct pci_controller *hose;
+
+ if (pci_is_root_bus(dev->bus)) {
+ hose = pci_bus_to_host(dev->bus);
+ pnv_eeh_root_reset(hose, EEH_RESET_HOT);
+ pnv_eeh_root_reset(hose, EEH_RESET_DEACTIVATE);
+ } else {
+ pnv_eeh_bridge_reset(dev, EEH_RESET_HOT);
+ pnv_eeh_bridge_reset(dev, EEH_RESET_DEACTIVATE);
+ }
}
/**
- * powernv_eeh_reset - Reset the specified PE
+ * pnv_eeh_reset - Reset the specified PE
* @pe: EEH PE
* @option: reset option
*
- * Reset the specified PE
+ * Do reset on the indicated PE. For PCI bus sensitive PE,
+ * we need to reset the parent p2p bridge. The PHB has to
+ * be reinitialized if the p2p bridge is root bridge. For
+ * PCI device sensitive PE, we will try to reset the device
+ * through FLR. For now, we don't have OPAL APIs to do HARD
+ * reset yet, so all reset would be SOFT (HOT) reset.
*/
-static int powernv_eeh_reset(struct eeh_pe *pe, int option)
+static int pnv_eeh_reset(struct eeh_pe *pe, int option)
{
struct pci_controller *hose = pe->phb;
- struct pnv_phb *phb = hose->private_data;
- int ret = -EEXIST;
+ struct pci_bus *bus;
+ int ret;
+
+ /*
+ * For PHB reset, we always have complete reset. For those PEs whose
+ * primary bus derived from root complex (root bus) or root port
+ * (usually bus#1), we apply hot or fundamental reset on the root port.
+ * For other PEs, we always have hot reset on the PE primary bus.
+ *
+ * Here, we have different design to pHyp, which always clear the
+ * frozen state during PE reset. However, the good idea here from
+ * benh is to keep frozen state before we get PE reset done completely
+ * (until BAR restore). With the frozen state, HW drops illegal IO
+ * or MMIO access, which can incur recrusive frozen PE during PE
+ * reset. The side effect is that EEH core has to clear the frozen
+ * state explicitly after BAR restore.
+ */
+ if (pe->type & EEH_PE_PHB) {
+ ret = pnv_eeh_phb_reset(hose, option);
+ } else {
+ struct pnv_phb *phb;
+ s64 rc;
- if (phb->eeh_ops && phb->eeh_ops->reset)
- ret = phb->eeh_ops->reset(pe, option);
+ /*
+ * The frozen PE might be caused by PAPR error injection
+ * registers, which are expected to be cleared after hitting
+ * frozen PE as stated in the hardware spec. Unfortunately,
+ * that's not true on P7IOC. So we have to clear it manually
+ * to avoid recursive EEH errors during recovery.
+ */
+ phb = hose->private_data;
+ if (phb->model == PNV_PHB_MODEL_P7IOC &&
+ (option == EEH_RESET_HOT ||
+ option == EEH_RESET_FUNDAMENTAL)) {
+ rc = opal_pci_reset(phb->opal_id,
+ OPAL_RESET_PHB_ERROR,
+ OPAL_ASSERT_RESET);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld clearing "
+ "error injection registers\n",
+ __func__, rc);
+ return -EIO;
+ }
+ }
+
+ bus = eeh_pe_bus_get(pe);
+ if (pci_is_root_bus(bus) ||
+ pci_is_root_bus(bus->parent))
+ ret = pnv_eeh_root_reset(hose, option);
+ else
+ ret = pnv_eeh_bridge_reset(bus->self, option);
+ }
return ret;
}
/**
- * powernv_eeh_wait_state - Wait for PE state
+ * pnv_eeh_wait_state - Wait for PE state
* @pe: EEH PE
* @max_wait: maximal period in microsecond
*
* Wait for the state of associated PE. It might take some time
* to retrieve the PE's state.
*/
-static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
+static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
{
int ret;
int mwait;
while (1) {
- ret = powernv_eeh_get_state(pe, &mwait);
+ ret = pnv_eeh_get_state(pe, &mwait);
/*
* If the PE's state is temporarily unavailable,
@@ -348,7 +1014,7 @@ static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
}
/**
- * powernv_eeh_get_log - Retrieve error log
+ * pnv_eeh_get_log - Retrieve error log
* @pe: EEH PE
* @severity: temporary or permanent error log
* @drv_log: driver log to be combined with retrieved error log
@@ -356,41 +1022,30 @@ static int powernv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
*
* Retrieve the temporary or permanent error from the PE.
*/
-static int powernv_eeh_get_log(struct eeh_pe *pe, int severity,
- char *drv_log, unsigned long len)
+static int pnv_eeh_get_log(struct eeh_pe *pe, int severity,
+ char *drv_log, unsigned long len)
{
- struct pci_controller *hose = pe->phb;
- struct pnv_phb *phb = hose->private_data;
- int ret = -EEXIST;
+ if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
+ pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
- if (phb->eeh_ops && phb->eeh_ops->get_log)
- ret = phb->eeh_ops->get_log(pe, severity, drv_log, len);
-
- return ret;
+ return 0;
}
/**
- * powernv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
+ * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
* @pe: EEH PE
*
* The function will be called to reconfigure the bridges included
* in the specified PE so that the mulfunctional PE would be recovered
* again.
*/
-static int powernv_eeh_configure_bridge(struct eeh_pe *pe)
+static int pnv_eeh_configure_bridge(struct eeh_pe *pe)
{
- struct pci_controller *hose = pe->phb;
- struct pnv_phb *phb = hose->private_data;
- int ret = 0;
-
- if (phb->eeh_ops && phb->eeh_ops->configure_bridge)
- ret = phb->eeh_ops->configure_bridge(pe);
-
- return ret;
+ return 0;
}
/**
- * powernv_pe_err_inject - Inject specified error to the indicated PE
+ * pnv_pe_err_inject - Inject specified error to the indicated PE
* @pe: the indicated PE
* @type: error type
* @func: specific error type
@@ -401,22 +1056,52 @@ static int powernv_eeh_configure_bridge(struct eeh_pe *pe)
* determined by @type and @func, to the indicated PE for
* testing purpose.
*/
-static int powernv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
- unsigned long addr, unsigned long mask)
+static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
+ unsigned long addr, unsigned long mask)
{
struct pci_controller *hose = pe->phb;
struct pnv_phb *phb = hose->private_data;
- int ret = -EEXIST;
+ s64 rc;
+
+ /* Sanity check on error type */
+ if (type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR &&
+ type != OPAL_ERR_INJECT_TYPE_IOA_BUS_ERR64) {
+ pr_warn("%s: Invalid error type %d\n",
+ __func__, type);
+ return -ERANGE;
+ }
- if (phb->eeh_ops && phb->eeh_ops->err_inject)
- ret = phb->eeh_ops->err_inject(pe, type, func, addr, mask);
+ if (func < OPAL_ERR_INJECT_FUNC_IOA_LD_MEM_ADDR ||
+ func > OPAL_ERR_INJECT_FUNC_IOA_DMA_WR_TARGET) {
+ pr_warn("%s: Invalid error function %d\n",
+ __func__, func);
+ return -ERANGE;
+ }
- return ret;
+ /* Firmware supports error injection ? */
+ if (!opal_check_token(OPAL_PCI_ERR_INJECT)) {
+ pr_warn("%s: Firmware doesn't support error injection\n",
+ __func__);
+ return -ENXIO;
+ }
+
+ /* Do error injection */
+ rc = opal_pci_err_inject(phb->opal_id, pe->addr,
+ type, func, addr, mask);
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failure %lld injecting error "
+ "%d-%d to PHB#%x-PE#%x\n",
+ __func__, rc, type, func,
+ hose->global_number, pe->addr);
+ return -EIO;
+ }
+
+ return 0;
}
-static inline bool powernv_eeh_cfg_blocked(struct device_node *dn)
+static inline bool pnv_eeh_cfg_blocked(struct pci_dn *pdn)
{
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
if (!edev || !edev->pe)
return false;
@@ -427,51 +1112,377 @@ static inline bool powernv_eeh_cfg_blocked(struct device_node *dn)
return false;
}
-static int powernv_eeh_read_config(struct device_node *dn,
- int where, int size, u32 *val)
+static int pnv_eeh_read_config(struct pci_dn *pdn,
+ int where, int size, u32 *val)
{
- if (powernv_eeh_cfg_blocked(dn)) {
+ if (!pdn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (pnv_eeh_cfg_blocked(pdn)) {
*val = 0xFFFFFFFF;
return PCIBIOS_SET_FAILED;
}
- return pnv_pci_cfg_read(dn, where, size, val);
+ return pnv_pci_cfg_read(pdn, where, size, val);
}
-static int powernv_eeh_write_config(struct device_node *dn,
- int where, int size, u32 val)
+static int pnv_eeh_write_config(struct pci_dn *pdn,
+ int where, int size, u32 val)
{
- if (powernv_eeh_cfg_blocked(dn))
+ if (!pdn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ if (pnv_eeh_cfg_blocked(pdn))
return PCIBIOS_SET_FAILED;
- return pnv_pci_cfg_write(dn, where, size, val);
+ return pnv_pci_cfg_write(pdn, where, size, val);
+}
+
+static void pnv_eeh_dump_hub_diag_common(struct OpalIoP7IOCErrorData *data)
+{
+ /* GEM */
+ if (data->gemXfir || data->gemRfir ||
+ data->gemRirqfir || data->gemMask || data->gemRwof)
+ pr_info(" GEM: %016llx %016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(data->gemXfir),
+ be64_to_cpu(data->gemRfir),
+ be64_to_cpu(data->gemRirqfir),
+ be64_to_cpu(data->gemMask),
+ be64_to_cpu(data->gemRwof));
+
+ /* LEM */
+ if (data->lemFir || data->lemErrMask ||
+ data->lemAction0 || data->lemAction1 || data->lemWof)
+ pr_info(" LEM: %016llx %016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(data->lemFir),
+ be64_to_cpu(data->lemErrMask),
+ be64_to_cpu(data->lemAction0),
+ be64_to_cpu(data->lemAction1),
+ be64_to_cpu(data->lemWof));
+}
+
+static void pnv_eeh_get_and_dump_hub_diag(struct pci_controller *hose)
+{
+ struct pnv_phb *phb = hose->private_data;
+ struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
+ long rc;
+
+ rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
+ if (rc != OPAL_SUCCESS) {
+ pr_warn("%s: Failed to get HUB#%llx diag-data (%ld)\n",
+ __func__, phb->hub_id, rc);
+ return;
+ }
+
+ switch (data->type) {
+ case OPAL_P7IOC_DIAG_TYPE_RGC:
+ pr_info("P7IOC diag-data for RGC\n\n");
+ pnv_eeh_dump_hub_diag_common(data);
+ if (data->rgc.rgcStatus || data->rgc.rgcLdcp)
+ pr_info(" RGC: %016llx %016llx\n",
+ be64_to_cpu(data->rgc.rgcStatus),
+ be64_to_cpu(data->rgc.rgcLdcp));
+ break;
+ case OPAL_P7IOC_DIAG_TYPE_BI:
+ pr_info("P7IOC diag-data for BI %s\n\n",
+ data->bi.biDownbound ? "Downbound" : "Upbound");
+ pnv_eeh_dump_hub_diag_common(data);
+ if (data->bi.biLdcp0 || data->bi.biLdcp1 ||
+ data->bi.biLdcp2 || data->bi.biFenceStatus)
+ pr_info(" BI: %016llx %016llx %016llx %016llx\n",
+ be64_to_cpu(data->bi.biLdcp0),
+ be64_to_cpu(data->bi.biLdcp1),
+ be64_to_cpu(data->bi.biLdcp2),
+ be64_to_cpu(data->bi.biFenceStatus));
+ break;
+ case OPAL_P7IOC_DIAG_TYPE_CI:
+ pr_info("P7IOC diag-data for CI Port %d\n\n",
+ data->ci.ciPort);
+ pnv_eeh_dump_hub_diag_common(data);
+ if (data->ci.ciPortStatus || data->ci.ciPortLdcp)
+ pr_info(" CI: %016llx %016llx\n",
+ be64_to_cpu(data->ci.ciPortStatus),
+ be64_to_cpu(data->ci.ciPortLdcp));
+ break;
+ case OPAL_P7IOC_DIAG_TYPE_MISC:
+ pr_info("P7IOC diag-data for MISC\n\n");
+ pnv_eeh_dump_hub_diag_common(data);
+ break;
+ case OPAL_P7IOC_DIAG_TYPE_I2C:
+ pr_info("P7IOC diag-data for I2C\n\n");
+ pnv_eeh_dump_hub_diag_common(data);
+ break;
+ default:
+ pr_warn("%s: Invalid type of HUB#%llx diag-data (%d)\n",
+ __func__, phb->hub_id, data->type);
+ }
+}
+
+static int pnv_eeh_get_pe(struct pci_controller *hose,
+ u16 pe_no, struct eeh_pe **pe)
+{
+ struct pnv_phb *phb = hose->private_data;
+ struct pnv_ioda_pe *pnv_pe;
+ struct eeh_pe *dev_pe;
+ struct eeh_dev edev;
+
+ /*
+ * If PHB supports compound PE, to fetch
+ * the master PE because slave PE is invisible
+ * to EEH core.
+ */
+ pnv_pe = &phb->ioda.pe_array[pe_no];
+ if (pnv_pe->flags & PNV_IODA_PE_SLAVE) {
+ pnv_pe = pnv_pe->master;
+ WARN_ON(!pnv_pe ||
+ !(pnv_pe->flags & PNV_IODA_PE_MASTER));
+ pe_no = pnv_pe->pe_number;
+ }
+
+ /* Find the PE according to PE# */
+ memset(&edev, 0, sizeof(struct eeh_dev));
+ edev.phb = hose;
+ edev.pe_config_addr = pe_no;
+ dev_pe = eeh_pe_get(&edev);
+ if (!dev_pe)
+ return -EEXIST;
+
+ /* Freeze the (compound) PE */
+ *pe = dev_pe;
+ if (!(dev_pe->state & EEH_PE_ISOLATED))
+ phb->freeze_pe(phb, pe_no);
+
+ /*
+ * At this point, we're sure the (compound) PE should
+ * have been frozen. However, we still need poke until
+ * hitting the frozen PE on top level.
+ */
+ dev_pe = dev_pe->parent;
+ while (dev_pe && !(dev_pe->type & EEH_PE_PHB)) {
+ int ret;
+ int active_flags = (EEH_STATE_MMIO_ACTIVE |
+ EEH_STATE_DMA_ACTIVE);
+
+ ret = eeh_ops->get_state(dev_pe, NULL);
+ if (ret <= 0 || (ret & active_flags) == active_flags) {
+ dev_pe = dev_pe->parent;
+ continue;
+ }
+
+ /* Frozen parent PE */
+ *pe = dev_pe;
+ if (!(dev_pe->state & EEH_PE_ISOLATED))
+ phb->freeze_pe(phb, dev_pe->addr);
+
+ /* Next one */
+ dev_pe = dev_pe->parent;
+ }
+
+ return 0;
}
/**
- * powernv_eeh_next_error - Retrieve next EEH error to handle
+ * pnv_eeh_next_error - Retrieve next EEH error to handle
* @pe: Affected PE
*
- * Using OPAL API, to retrieve next EEH error for EEH core to handle
+ * The function is expected to be called by EEH core while it gets
+ * special EEH event (without binding PE). The function calls to
+ * OPAL APIs for next error to handle. The informational error is
+ * handled internally by platform. However, the dead IOC, dead PHB,
+ * fenced PHB and frozen PE should be handled by EEH core eventually.
*/
-static int powernv_eeh_next_error(struct eeh_pe **pe)
+static int pnv_eeh_next_error(struct eeh_pe **pe)
{
struct pci_controller *hose;
- struct pnv_phb *phb = NULL;
+ struct pnv_phb *phb;
+ struct eeh_pe *phb_pe, *parent_pe;
+ __be64 frozen_pe_no;
+ __be16 err_type, severity;
+ int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
+ long rc;
+ int state, ret = EEH_NEXT_ERR_NONE;
+
+ /*
+ * While running here, it's safe to purge the event queue.
+ * And we should keep the cached OPAL notifier event sychronized
+ * between the kernel and firmware.
+ */
+ eeh_remove_event(NULL, false);
+ opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
list_for_each_entry(hose, &hose_list, list_node) {
+ /*
+ * If the subordinate PCI buses of the PHB has been
+ * removed or is exactly under error recovery, we
+ * needn't take care of it any more.
+ */
phb = hose->private_data;
- break;
- }
+ phb_pe = eeh_phb_pe_get(hose);
+ if (!phb_pe || (phb_pe->state & EEH_PE_ISOLATED))
+ continue;
+
+ rc = opal_pci_next_error(phb->opal_id,
+ &frozen_pe_no, &err_type, &severity);
+ if (rc != OPAL_SUCCESS) {
+ pr_devel("%s: Invalid return value on "
+ "PHB#%x (0x%lx) from opal_pci_next_error",
+ __func__, hose->global_number, rc);
+ continue;
+ }
+
+ /* If the PHB doesn't have error, stop processing */
+ if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR ||
+ be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) {
+ pr_devel("%s: No error found on PHB#%x\n",
+ __func__, hose->global_number);
+ continue;
+ }
+
+ /*
+ * Processing the error. We're expecting the error with
+ * highest priority reported upon multiple errors on the
+ * specific PHB.
+ */
+ pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
+ __func__, be16_to_cpu(err_type),
+ be16_to_cpu(severity), be64_to_cpu(frozen_pe_no),
+ hose->global_number);
+ switch (be16_to_cpu(err_type)) {
+ case OPAL_EEH_IOC_ERROR:
+ if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) {
+ pr_err("EEH: dead IOC detected\n");
+ ret = EEH_NEXT_ERR_DEAD_IOC;
+ } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
+ pr_info("EEH: IOC informative error "
+ "detected\n");
+ pnv_eeh_get_and_dump_hub_diag(hose);
+ ret = EEH_NEXT_ERR_NONE;
+ }
+
+ break;
+ case OPAL_EEH_PHB_ERROR:
+ if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) {
+ *pe = phb_pe;
+ pr_err("EEH: dead PHB#%x detected, "
+ "location: %s\n",
+ hose->global_number,
+ eeh_pe_loc_get(phb_pe));
+ ret = EEH_NEXT_ERR_DEAD_PHB;
+ } else if (be16_to_cpu(severity) ==
+ OPAL_EEH_SEV_PHB_FENCED) {
+ *pe = phb_pe;
+ pr_err("EEH: Fenced PHB#%x detected, "
+ "location: %s\n",
+ hose->global_number,
+ eeh_pe_loc_get(phb_pe));
+ ret = EEH_NEXT_ERR_FENCED_PHB;
+ } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
+ pr_info("EEH: PHB#%x informative error "
+ "detected, location: %s\n",
+ hose->global_number,
+ eeh_pe_loc_get(phb_pe));
+ pnv_eeh_get_phb_diag(phb_pe);
+ pnv_pci_dump_phb_diag_data(hose, phb_pe->data);
+ ret = EEH_NEXT_ERR_NONE;
+ }
+
+ break;
+ case OPAL_EEH_PE_ERROR:
+ /*
+ * If we can't find the corresponding PE, we
+ * just try to unfreeze.
+ */
+ if (pnv_eeh_get_pe(hose,
+ be64_to_cpu(frozen_pe_no), pe)) {
+ /* Try best to clear it */
+ pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
+ hose->global_number, frozen_pe_no);
+ pr_info("EEH: PHB location: %s\n",
+ eeh_pe_loc_get(phb_pe));
+ opal_pci_eeh_freeze_clear(phb->opal_id,
+ frozen_pe_no,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+ ret = EEH_NEXT_ERR_NONE;
+ } else if ((*pe)->state & EEH_PE_ISOLATED ||
+ eeh_pe_passed(*pe)) {
+ ret = EEH_NEXT_ERR_NONE;
+ } else {
+ pr_err("EEH: Frozen PE#%x "
+ "on PHB#%x detected\n",
+ (*pe)->addr,
+ (*pe)->phb->global_number);
+ pr_err("EEH: PE location: %s, "
+ "PHB location: %s\n",
+ eeh_pe_loc_get(*pe),
+ eeh_pe_loc_get(phb_pe));
+ ret = EEH_NEXT_ERR_FROZEN_PE;
+ }
+
+ break;
+ default:
+ pr_warn("%s: Unexpected error type %d\n",
+ __func__, be16_to_cpu(err_type));
+ }
- if (phb && phb->eeh_ops->next_error)
- return phb->eeh_ops->next_error(pe);
+ /*
+ * EEH core will try recover from fenced PHB or
+ * frozen PE. In the time for frozen PE, EEH core
+ * enable IO path for that before collecting logs,
+ * but it ruins the site. So we have to dump the
+ * log in advance here.
+ */
+ if ((ret == EEH_NEXT_ERR_FROZEN_PE ||
+ ret == EEH_NEXT_ERR_FENCED_PHB) &&
+ !((*pe)->state & EEH_PE_ISOLATED)) {
+ eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
+ pnv_eeh_get_phb_diag(*pe);
+
+ if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
+ pnv_pci_dump_phb_diag_data((*pe)->phb,
+ (*pe)->data);
+ }
- return -EEXIST;
+ /*
+ * We probably have the frozen parent PE out there and
+ * we need have to handle frozen parent PE firstly.
+ */
+ if (ret == EEH_NEXT_ERR_FROZEN_PE) {
+ parent_pe = (*pe)->parent;
+ while (parent_pe) {
+ /* Hit the ceiling ? */
+ if (parent_pe->type & EEH_PE_PHB)
+ break;
+
+ /* Frozen parent PE ? */
+ state = eeh_ops->get_state(parent_pe, NULL);
+ if (state > 0 &&
+ (state & active_flags) != active_flags)
+ *pe = parent_pe;
+
+ /* Next parent level */
+ parent_pe = parent_pe->parent;
+ }
+
+ /* We possibly migrate to another PE */
+ eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
+ }
+
+ /*
+ * If we have no errors on the specific PHB or only
+ * informative error there, we continue poking it.
+ * Otherwise, we need actions to be taken by upper
+ * layer.
+ */
+ if (ret > EEH_NEXT_ERR_INF)
+ break;
+ }
+
+ return ret;
}
-static int powernv_eeh_restore_config(struct device_node *dn)
+static int pnv_eeh_restore_config(struct pci_dn *pdn)
{
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
struct pnv_phb *phb;
s64 ret;
@@ -490,24 +1501,23 @@ static int powernv_eeh_restore_config(struct device_node *dn)
return 0;
}
-static struct eeh_ops powernv_eeh_ops = {
+static struct eeh_ops pnv_eeh_ops = {
.name = "powernv",
- .init = powernv_eeh_init,
- .post_init = powernv_eeh_post_init,
- .of_probe = NULL,
- .dev_probe = powernv_eeh_dev_probe,
- .set_option = powernv_eeh_set_option,
- .get_pe_addr = powernv_eeh_get_pe_addr,
- .get_state = powernv_eeh_get_state,
- .reset = powernv_eeh_reset,
- .wait_state = powernv_eeh_wait_state,
- .get_log = powernv_eeh_get_log,
- .configure_bridge = powernv_eeh_configure_bridge,
- .err_inject = powernv_eeh_err_inject,
- .read_config = powernv_eeh_read_config,
- .write_config = powernv_eeh_write_config,
- .next_error = powernv_eeh_next_error,
- .restore_config = powernv_eeh_restore_config
+ .init = pnv_eeh_init,
+ .post_init = pnv_eeh_post_init,
+ .probe = pnv_eeh_probe,
+ .set_option = pnv_eeh_set_option,
+ .get_pe_addr = pnv_eeh_get_pe_addr,
+ .get_state = pnv_eeh_get_state,
+ .reset = pnv_eeh_reset,
+ .wait_state = pnv_eeh_wait_state,
+ .get_log = pnv_eeh_get_log,
+ .configure_bridge = pnv_eeh_configure_bridge,
+ .err_inject = pnv_eeh_err_inject,
+ .read_config = pnv_eeh_read_config,
+ .write_config = pnv_eeh_write_config,
+ .next_error = pnv_eeh_next_error,
+ .restore_config = pnv_eeh_restore_config
};
/**
@@ -521,7 +1531,7 @@ static int __init eeh_powernv_init(void)
int ret = -EINVAL;
eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE);
- ret = eeh_ops_register(&powernv_eeh_ops);
+ ret = eeh_ops_register(&pnv_eeh_ops);
if (!ret)
pr_info("EEH: PowerNV platform initialized\n");
else
diff --git a/arch/powerpc/platforms/powernv/opal-dump.c b/arch/powerpc/platforms/powernv/opal-dump.c
index 23260f7dfa7a..5aa9c1ce4de3 100644
--- a/arch/powerpc/platforms/powernv/opal-dump.c
+++ b/arch/powerpc/platforms/powernv/opal-dump.c
@@ -452,5 +452,6 @@ void __init opal_platform_dump_init(void)
return;
}
- opal_dump_resend_notification();
+ if (opal_check_token(OPAL_DUMP_RESEND))
+ opal_dump_resend_notification();
}
diff --git a/arch/powerpc/platforms/powernv/opal-elog.c b/arch/powerpc/platforms/powernv/opal-elog.c
index 518fe95dbf24..38ce757e5e2a 100644
--- a/arch/powerpc/platforms/powernv/opal-elog.c
+++ b/arch/powerpc/platforms/powernv/opal-elog.c
@@ -313,7 +313,8 @@ int __init opal_elog_init(void)
}
/* We are now ready to pull error logs from opal. */
- opal_resend_pending_logs();
+ if (opal_check_token(OPAL_ELOG_RESEND))
+ opal_resend_pending_logs();
return 0;
}
diff --git a/arch/powerpc/platforms/powernv/opal-flash.c b/arch/powerpc/platforms/powernv/opal-flash.c
index 5c21d9c07f45..4ec6219287fc 100644
--- a/arch/powerpc/platforms/powernv/opal-flash.c
+++ b/arch/powerpc/platforms/powernv/opal-flash.c
@@ -120,7 +120,11 @@ static struct image_header_t image_header;
static struct image_data_t image_data;
static struct validate_flash_t validate_flash_data;
static struct manage_flash_t manage_flash_data;
-static struct update_flash_t update_flash_data;
+
+/* Initialize update_flash_data status to No Operation */
+static struct update_flash_t update_flash_data = {
+ .status = FLASH_NO_OP,
+};
static DEFINE_MUTEX(image_data_mutex);
@@ -542,7 +546,7 @@ static struct attribute_group image_op_attr_group = {
.attrs = image_op_attrs,
};
-void __init opal_flash_init(void)
+void __init opal_flash_update_init(void)
{
int ret;
diff --git a/arch/powerpc/platforms/powernv/opal-nvram.c b/arch/powerpc/platforms/powernv/opal-nvram.c
index f9896fd5d04a..9db4398ded5d 100644
--- a/arch/powerpc/platforms/powernv/opal-nvram.c
+++ b/arch/powerpc/platforms/powernv/opal-nvram.c
@@ -16,6 +16,7 @@
#include <linux/of.h>
#include <asm/opal.h>
+#include <asm/nvram.h>
#include <asm/machdep.h>
static unsigned int nvram_size;
@@ -62,6 +63,15 @@ static ssize_t opal_nvram_write(char *buf, size_t count, loff_t *index)
return count;
}
+static int __init opal_nvram_init_log_partitions(void)
+{
+ /* Scan nvram for partitions */
+ nvram_scan_partitions();
+ nvram_init_oops_partition(0);
+ return 0;
+}
+machine_arch_initcall(powernv, opal_nvram_init_log_partitions);
+
void __init opal_nvram_init(void)
{
struct device_node *np;
diff --git a/arch/powerpc/platforms/powernv/opal-sensor.c b/arch/powerpc/platforms/powernv/opal-sensor.c
index 4ab67ef7abc9..655250499d18 100644
--- a/arch/powerpc/platforms/powernv/opal-sensor.c
+++ b/arch/powerpc/platforms/powernv/opal-sensor.c
@@ -46,18 +46,28 @@ int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data)
mutex_lock(&opal_sensor_mutex);
ret = opal_sensor_read(sensor_hndl, token, &data);
- if (ret != OPAL_ASYNC_COMPLETION)
- goto out_token;
+ switch (ret) {
+ case OPAL_ASYNC_COMPLETION:
+ ret = opal_async_wait_response(token, &msg);
+ if (ret) {
+ pr_err("%s: Failed to wait for the async response, %d\n",
+ __func__, ret);
+ goto out_token;
+ }
- ret = opal_async_wait_response(token, &msg);
- if (ret) {
- pr_err("%s: Failed to wait for the async response, %d\n",
- __func__, ret);
- goto out_token;
- }
+ ret = opal_error_code(be64_to_cpu(msg.params[1]));
+ *sensor_data = be32_to_cpu(data);
+ break;
+
+ case OPAL_SUCCESS:
+ ret = 0;
+ *sensor_data = be32_to_cpu(data);
+ break;
- *sensor_data = be32_to_cpu(data);
- ret = be64_to_cpu(msg.params[1]);
+ default:
+ ret = opal_error_code(ret);
+ break;
+ }
out_token:
mutex_unlock(&opal_sensor_mutex);
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index fcbe899fe299..a7ade94cdf87 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -286,9 +286,12 @@ OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI);
OPAL_CALL(opal_slw_set_reg, OPAL_SLW_SET_REG);
OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION);
OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION);
-OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CXL_MODE);
+OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CAPI_MODE);
OPAL_CALL(opal_tpo_write, OPAL_WRITE_TPO);
OPAL_CALL(opal_tpo_read, OPAL_READ_TPO);
OPAL_CALL(opal_ipmi_send, OPAL_IPMI_SEND);
OPAL_CALL(opal_ipmi_recv, OPAL_IPMI_RECV);
OPAL_CALL(opal_i2c_request, OPAL_I2C_REQUEST);
+OPAL_CALL(opal_flash_read, OPAL_FLASH_READ);
+OPAL_CALL(opal_flash_write, OPAL_FLASH_WRITE);
+OPAL_CALL(opal_flash_erase, OPAL_FLASH_ERASE);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index 18fd4e71c9c1..2241565b0739 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -23,6 +23,8 @@
#include <linux/kobject.h>
#include <linux/delay.h>
#include <linux/memblock.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
#include <asm/machdep.h>
#include <asm/opal.h>
@@ -58,6 +60,7 @@ static struct atomic_notifier_head opal_msg_notifier_head[OPAL_MSG_TYPE_MAX];
static DEFINE_SPINLOCK(opal_notifier_lock);
static uint64_t last_notified_mask = 0x0ul;
static atomic_t opal_notifier_hold = ATOMIC_INIT(0);
+static uint32_t opal_heartbeat;
static void opal_reinit_cores(void)
{
@@ -302,23 +305,26 @@ void opal_notifier_disable(void)
* Opal message notifier based on message type. Allow subscribers to get
* notified for specific messgae type.
*/
-int opal_message_notifier_register(enum OpalMessageType msg_type,
+int opal_message_notifier_register(enum opal_msg_type msg_type,
struct notifier_block *nb)
{
- if (!nb) {
- pr_warning("%s: Invalid argument (%p)\n",
- __func__, nb);
- return -EINVAL;
- }
- if (msg_type > OPAL_MSG_TYPE_MAX) {
- pr_warning("%s: Invalid message type argument (%d)\n",
+ if (!nb || msg_type >= OPAL_MSG_TYPE_MAX) {
+ pr_warning("%s: Invalid arguments, msg_type:%d\n",
__func__, msg_type);
return -EINVAL;
}
+
return atomic_notifier_chain_register(
&opal_msg_notifier_head[msg_type], nb);
}
+int opal_message_notifier_unregister(enum opal_msg_type msg_type,
+ struct notifier_block *nb)
+{
+ return atomic_notifier_chain_unregister(
+ &opal_msg_notifier_head[msg_type], nb);
+}
+
static void opal_message_do_notify(uint32_t msg_type, void *msg)
{
/* notify subscribers */
@@ -351,7 +357,7 @@ static void opal_handle_message(void)
type = be32_to_cpu(msg.msg_type);
/* Sanity check */
- if (type > OPAL_MSG_TYPE_MAX) {
+ if (type >= OPAL_MSG_TYPE_MAX) {
pr_warning("%s: Unknown message type: %u\n", __func__, type);
return;
}
@@ -665,6 +671,9 @@ static void __init opal_dump_region_init(void)
uint64_t size;
int rc;
+ if (!opal_check_token(OPAL_REGISTER_DUMP_REGION))
+ return;
+
/* Register kernel log buffer */
addr = log_buf_addr_get();
if (addr == NULL)
@@ -684,6 +693,15 @@ static void __init opal_dump_region_init(void)
"rc = %d\n", rc);
}
+static void opal_flash_init(struct device_node *opal_node)
+{
+ struct device_node *np;
+
+ for_each_child_of_node(opal_node, np)
+ if (of_device_is_compatible(np, "ibm,opal-flash"))
+ of_platform_device_create(np, NULL, NULL);
+}
+
static void opal_ipmi_init(struct device_node *opal_node)
{
struct device_node *np;
@@ -741,6 +759,29 @@ static void __init opal_irq_init(struct device_node *dn)
}
}
+static int kopald(void *unused)
+{
+ set_freezable();
+ do {
+ try_to_freeze();
+ opal_poll_events(NULL);
+ msleep_interruptible(opal_heartbeat);
+ } while (!kthread_should_stop());
+
+ return 0;
+}
+
+static void opal_init_heartbeat(void)
+{
+ /* Old firwmware, we assume the HVC heartbeat is sufficient */
+ if (of_property_read_u32(opal_node, "ibm,heartbeat-ms",
+ &opal_heartbeat) != 0)
+ opal_heartbeat = 0;
+
+ if (opal_heartbeat)
+ kthread_run(kopald, NULL, "kopald");
+}
+
static int __init opal_init(void)
{
struct device_node *np, *consoles;
@@ -769,6 +810,9 @@ static int __init opal_init(void)
/* Create i2c platform devices */
opal_i2c_create_devs();
+ /* Setup a heatbeat thread if requested by OPAL */
+ opal_init_heartbeat();
+
/* Find all OPAL interrupts and request them */
opal_irq_init(opal_node);
@@ -782,7 +826,7 @@ static int __init opal_init(void)
/* Setup error log interface */
rc = opal_elog_init();
/* Setup code update interface */
- opal_flash_init();
+ opal_flash_update_init();
/* Setup platform dump extract interface */
opal_platform_dump_init();
/* Setup system parameters interface */
@@ -791,8 +835,11 @@ static int __init opal_init(void)
opal_msglog_init();
}
+ /* Initialize OPAL IPMI backend */
opal_ipmi_init(opal_node);
+ opal_flash_init(opal_node);
+
return 0;
}
machine_subsys_initcall(powernv, opal_init);
@@ -823,13 +870,17 @@ void opal_shutdown(void)
}
/* Unregister memory dump region */
- opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF);
+ if (opal_check_token(OPAL_UNREGISTER_DUMP_REGION))
+ opal_unregister_dump_region(OPAL_DUMP_REGION_LOG_BUF);
}
/* Export this so that test modules can use it */
EXPORT_SYMBOL_GPL(opal_invalid_call);
EXPORT_SYMBOL_GPL(opal_ipmi_send);
EXPORT_SYMBOL_GPL(opal_ipmi_recv);
+EXPORT_SYMBOL_GPL(opal_flash_read);
+EXPORT_SYMBOL_GPL(opal_flash_write);
+EXPORT_SYMBOL_GPL(opal_flash_erase);
/* Convert a region of vmalloc memory to an opal sg list */
struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
@@ -894,6 +945,25 @@ void opal_free_sg_list(struct opal_sg_list *sg)
}
}
+int opal_error_code(int rc)
+{
+ switch (rc) {
+ case OPAL_SUCCESS: return 0;
+
+ case OPAL_PARAMETER: return -EINVAL;
+ case OPAL_ASYNC_COMPLETION: return -EINPROGRESS;
+ case OPAL_BUSY_EVENT: return -EBUSY;
+ case OPAL_NO_MEM: return -ENOMEM;
+
+ case OPAL_UNSUPPORTED: return -EIO;
+ case OPAL_HARDWARE: return -EIO;
+ case OPAL_INTERNAL_ERROR: return -EIO;
+ default:
+ pr_err("%s: unexpected OPAL error %d\n", __func__, rc);
+ return -EIO;
+ }
+}
+
EXPORT_SYMBOL_GPL(opal_poll_events);
EXPORT_SYMBOL_GPL(opal_rtc_read);
EXPORT_SYMBOL_GPL(opal_rtc_write);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 6c9ff2b95119..920c252d1f49 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -44,6 +44,9 @@
#include "powernv.h"
#include "pci.h"
+/* 256M DMA window, 4K TCE pages, 8 bytes TCE */
+#define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
+
static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
const char *fmt, ...)
{
@@ -56,11 +59,18 @@ static void pe_level_printk(const struct pnv_ioda_pe *pe, const char *level,
vaf.fmt = fmt;
vaf.va = &args;
- if (pe->pdev)
+ if (pe->flags & PNV_IODA_PE_DEV)
strlcpy(pfix, dev_name(&pe->pdev->dev), sizeof(pfix));
- else
+ else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL))
sprintf(pfix, "%04x:%02x ",
pci_domain_nr(pe->pbus), pe->pbus->number);
+#ifdef CONFIG_PCI_IOV
+ else if (pe->flags & PNV_IODA_PE_VF)
+ sprintf(pfix, "%04x:%02x:%2x.%d",
+ pci_domain_nr(pe->parent_dev->bus),
+ (pe->rid & 0xff00) >> 8,
+ PCI_SLOT(pe->rid), PCI_FUNC(pe->rid));
+#endif /* CONFIG_PCI_IOV*/
printk("%spci %s: [PE# %.3d] %pV",
level, pfix, pe->pe_number, &vaf);
@@ -591,7 +601,7 @@ static int pnv_ioda_set_peltv(struct pnv_phb *phb,
bool is_add)
{
struct pnv_ioda_pe *slave;
- struct pci_dev *pdev;
+ struct pci_dev *pdev = NULL;
int ret;
/*
@@ -630,8 +640,12 @@ static int pnv_ioda_set_peltv(struct pnv_phb *phb,
if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
pdev = pe->pbus->self;
- else
+ else if (pe->flags & PNV_IODA_PE_DEV)
pdev = pe->pdev->bus->self;
+#ifdef CONFIG_PCI_IOV
+ else if (pe->flags & PNV_IODA_PE_VF)
+ pdev = pe->parent_dev->bus->self;
+#endif /* CONFIG_PCI_IOV */
while (pdev) {
struct pci_dn *pdn = pci_get_pdn(pdev);
struct pnv_ioda_pe *parent;
@@ -649,6 +663,87 @@ static int pnv_ioda_set_peltv(struct pnv_phb *phb,
return 0;
}
+#ifdef CONFIG_PCI_IOV
+static int pnv_ioda_deconfigure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
+{
+ struct pci_dev *parent;
+ uint8_t bcomp, dcomp, fcomp;
+ int64_t rc;
+ long rid_end, rid;
+
+ /* Currently, we just deconfigure VF PE. Bus PE will always there.*/
+ if (pe->pbus) {
+ int count;
+
+ dcomp = OPAL_IGNORE_RID_DEVICE_NUMBER;
+ fcomp = OPAL_IGNORE_RID_FUNCTION_NUMBER;
+ parent = pe->pbus->self;
+ if (pe->flags & PNV_IODA_PE_BUS_ALL)
+ count = pe->pbus->busn_res.end - pe->pbus->busn_res.start + 1;
+ else
+ count = 1;
+
+ switch(count) {
+ case 1: bcomp = OpalPciBusAll; break;
+ case 2: bcomp = OpalPciBus7Bits; break;
+ case 4: bcomp = OpalPciBus6Bits; break;
+ case 8: bcomp = OpalPciBus5Bits; break;
+ case 16: bcomp = OpalPciBus4Bits; break;
+ case 32: bcomp = OpalPciBus3Bits; break;
+ default:
+ dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
+ count);
+ /* Do an exact match only */
+ bcomp = OpalPciBusAll;
+ }
+ rid_end = pe->rid + (count << 8);
+ } else {
+ if (pe->flags & PNV_IODA_PE_VF)
+ parent = pe->parent_dev;
+ else
+ parent = pe->pdev->bus->self;
+ bcomp = OpalPciBusAll;
+ dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
+ fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
+ rid_end = pe->rid + 1;
+ }
+
+ /* Clear the reverse map */
+ for (rid = pe->rid; rid < rid_end; rid++)
+ phb->ioda.pe_rmap[rid] = 0;
+
+ /* Release from all parents PELT-V */
+ while (parent) {
+ struct pci_dn *pdn = pci_get_pdn(parent);
+ if (pdn && pdn->pe_number != IODA_INVALID_PE) {
+ rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
+ pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
+ /* XXX What to do in case of error ? */
+ }
+ parent = parent->bus->self;
+ }
+
+ opal_pci_eeh_freeze_set(phb->opal_id, pe->pe_number,
+ OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
+
+ /* Disassociate PE in PELT */
+ rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number,
+ pe->pe_number, OPAL_REMOVE_PE_FROM_DOMAIN);
+ if (rc)
+ pe_warn(pe, "OPAL error %ld remove self from PELTV\n", rc);
+ rc = opal_pci_set_pe(phb->opal_id, pe->pe_number, pe->rid,
+ bcomp, dcomp, fcomp, OPAL_UNMAP_PE);
+ if (rc)
+ pe_err(pe, "OPAL error %ld trying to setup PELT table\n", rc);
+
+ pe->pbus = NULL;
+ pe->pdev = NULL;
+ pe->parent_dev = NULL;
+
+ return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+
static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
{
struct pci_dev *parent;
@@ -675,15 +770,19 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
case 16: bcomp = OpalPciBus4Bits; break;
case 32: bcomp = OpalPciBus3Bits; break;
default:
- pr_err("%s: Number of subordinate busses %d"
- " unsupported\n",
- pci_name(pe->pbus->self), count);
+ dev_err(&pe->pbus->dev, "Number of subordinate buses %d unsupported\n",
+ count);
/* Do an exact match only */
bcomp = OpalPciBusAll;
}
rid_end = pe->rid + (count << 8);
} else {
- parent = pe->pdev->bus->self;
+#ifdef CONFIG_PCI_IOV
+ if (pe->flags & PNV_IODA_PE_VF)
+ parent = pe->parent_dev;
+ else
+#endif /* CONFIG_PCI_IOV */
+ parent = pe->pdev->bus->self;
bcomp = OpalPciBusAll;
dcomp = OPAL_COMPARE_RID_DEVICE_NUMBER;
fcomp = OPAL_COMPARE_RID_FUNCTION_NUMBER;
@@ -774,6 +873,78 @@ static unsigned int pnv_ioda_dma_weight(struct pci_dev *dev)
return 10;
}
+#ifdef CONFIG_PCI_IOV
+static int pnv_pci_vf_resource_shift(struct pci_dev *dev, int offset)
+{
+ struct pci_dn *pdn = pci_get_pdn(dev);
+ int i;
+ struct resource *res, res2;
+ resource_size_t size;
+ u16 num_vfs;
+
+ if (!dev->is_physfn)
+ return -EINVAL;
+
+ /*
+ * "offset" is in VFs. The M64 windows are sized so that when they
+ * are segmented, each segment is the same size as the IOV BAR.
+ * Each segment is in a separate PE, and the high order bits of the
+ * address are the PE number. Therefore, each VF's BAR is in a
+ * separate PE, and changing the IOV BAR start address changes the
+ * range of PEs the VFs are in.
+ */
+ num_vfs = pdn->num_vfs;
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &dev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || !res->parent)
+ continue;
+
+ if (!pnv_pci_is_mem_pref_64(res->flags))
+ continue;
+
+ /*
+ * The actual IOV BAR range is determined by the start address
+ * and the actual size for num_vfs VFs BAR. This check is to
+ * make sure that after shifting, the range will not overlap
+ * with another device.
+ */
+ size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
+ res2.flags = res->flags;
+ res2.start = res->start + (size * offset);
+ res2.end = res2.start + (size * num_vfs) - 1;
+
+ if (res2.end > res->end) {
+ dev_err(&dev->dev, "VF BAR%d: %pR would extend past %pR (trying to enable %d VFs shifted by %d)\n",
+ i, &res2, res, num_vfs, offset);
+ return -EBUSY;
+ }
+ }
+
+ /*
+ * After doing so, there would be a "hole" in the /proc/iomem when
+ * offset is a positive value. It looks like the device return some
+ * mmio back to the system, which actually no one could use it.
+ */
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &dev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || !res->parent)
+ continue;
+
+ if (!pnv_pci_is_mem_pref_64(res->flags))
+ continue;
+
+ size = pci_iov_resource_size(dev, i + PCI_IOV_RESOURCES);
+ res2 = *res;
+ res->start += size * offset;
+
+ dev_info(&dev->dev, "VF BAR%d: %pR shifted to %pR (enabling %d VFs shifted by %d)\n",
+ i, &res2, res, num_vfs, offset);
+ pci_update_resource(dev, i + PCI_IOV_RESOURCES);
+ }
+ return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+
#if 0
static struct pnv_ioda_pe *pnv_ioda_setup_dev_PE(struct pci_dev *dev)
{
@@ -857,7 +1028,6 @@ static void pnv_ioda_setup_same_PE(struct pci_bus *bus, struct pnv_ioda_pe *pe)
pci_name(dev));
continue;
}
- pdn->pcidev = dev;
pdn->pe_number = pe->pe_number;
pe->dma_weight += pnv_ioda_dma_weight(dev);
if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate)
@@ -916,6 +1086,10 @@ static void pnv_ioda_setup_bus_PE(struct pci_bus *bus, int all)
return;
}
+ pe->tce32_table = kzalloc_node(sizeof(struct iommu_table),
+ GFP_KERNEL, hose->node);
+ pe->tce32_table->data = pe;
+
/* Associate it with all child devices */
pnv_ioda_setup_same_PE(bus, pe);
@@ -974,6 +1148,441 @@ static void pnv_pci_ioda_setup_PEs(void)
}
}
+#ifdef CONFIG_PCI_IOV
+static int pnv_pci_vf_release_m64(struct pci_dev *pdev)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pci_dn *pdn;
+ int i, j;
+
+ bus = pdev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ pdn = pci_get_pdn(pdev);
+
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
+ for (j = 0; j < M64_PER_IOV; j++) {
+ if (pdn->m64_wins[i][j] == IODA_INVALID_M64)
+ continue;
+ opal_pci_phb_mmio_enable(phb->opal_id,
+ OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 0);
+ clear_bit(pdn->m64_wins[i][j], &phb->ioda.m64_bar_alloc);
+ pdn->m64_wins[i][j] = IODA_INVALID_M64;
+ }
+
+ return 0;
+}
+
+static int pnv_pci_vf_assign_m64(struct pci_dev *pdev, u16 num_vfs)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pci_dn *pdn;
+ unsigned int win;
+ struct resource *res;
+ int i, j;
+ int64_t rc;
+ int total_vfs;
+ resource_size_t size, start;
+ int pe_num;
+ int vf_groups;
+ int vf_per_group;
+
+ bus = pdev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ pdn = pci_get_pdn(pdev);
+ total_vfs = pci_sriov_get_totalvfs(pdev);
+
+ /* Initialize the m64_wins to IODA_INVALID_M64 */
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++)
+ for (j = 0; j < M64_PER_IOV; j++)
+ pdn->m64_wins[i][j] = IODA_INVALID_M64;
+
+ if (pdn->m64_per_iov == M64_PER_IOV) {
+ vf_groups = (num_vfs <= M64_PER_IOV) ? num_vfs: M64_PER_IOV;
+ vf_per_group = (num_vfs <= M64_PER_IOV)? 1:
+ roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
+ } else {
+ vf_groups = 1;
+ vf_per_group = 1;
+ }
+
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &pdev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || !res->parent)
+ continue;
+
+ if (!pnv_pci_is_mem_pref_64(res->flags))
+ continue;
+
+ for (j = 0; j < vf_groups; j++) {
+ do {
+ win = find_next_zero_bit(&phb->ioda.m64_bar_alloc,
+ phb->ioda.m64_bar_idx + 1, 0);
+
+ if (win >= phb->ioda.m64_bar_idx + 1)
+ goto m64_failed;
+ } while (test_and_set_bit(win, &phb->ioda.m64_bar_alloc));
+
+ pdn->m64_wins[i][j] = win;
+
+ if (pdn->m64_per_iov == M64_PER_IOV) {
+ size = pci_iov_resource_size(pdev,
+ PCI_IOV_RESOURCES + i);
+ size = size * vf_per_group;
+ start = res->start + size * j;
+ } else {
+ size = resource_size(res);
+ start = res->start;
+ }
+
+ /* Map the M64 here */
+ if (pdn->m64_per_iov == M64_PER_IOV) {
+ pe_num = pdn->offset + j;
+ rc = opal_pci_map_pe_mmio_window(phb->opal_id,
+ pe_num, OPAL_M64_WINDOW_TYPE,
+ pdn->m64_wins[i][j], 0);
+ }
+
+ rc = opal_pci_set_phb_mem_window(phb->opal_id,
+ OPAL_M64_WINDOW_TYPE,
+ pdn->m64_wins[i][j],
+ start,
+ 0, /* unused */
+ size);
+
+
+ if (rc != OPAL_SUCCESS) {
+ dev_err(&pdev->dev, "Failed to map M64 window #%d: %lld\n",
+ win, rc);
+ goto m64_failed;
+ }
+
+ if (pdn->m64_per_iov == M64_PER_IOV)
+ rc = opal_pci_phb_mmio_enable(phb->opal_id,
+ OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 2);
+ else
+ rc = opal_pci_phb_mmio_enable(phb->opal_id,
+ OPAL_M64_WINDOW_TYPE, pdn->m64_wins[i][j], 1);
+
+ if (rc != OPAL_SUCCESS) {
+ dev_err(&pdev->dev, "Failed to enable M64 window #%d: %llx\n",
+ win, rc);
+ goto m64_failed;
+ }
+ }
+ }
+ return 0;
+
+m64_failed:
+ pnv_pci_vf_release_m64(pdev);
+ return -EBUSY;
+}
+
+static void pnv_pci_ioda2_release_dma_pe(struct pci_dev *dev, struct pnv_ioda_pe *pe)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct iommu_table *tbl;
+ unsigned long addr;
+ int64_t rc;
+
+ bus = dev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ tbl = pe->tce32_table;
+ addr = tbl->it_base;
+
+ opal_pci_map_pe_dma_window(phb->opal_id, pe->pe_number,
+ pe->pe_number << 1, 1, __pa(addr),
+ 0, 0x1000);
+
+ rc = opal_pci_map_pe_dma_window_real(pe->phb->opal_id,
+ pe->pe_number,
+ (pe->pe_number << 1) + 1,
+ pe->tce_bypass_base,
+ 0);
+ if (rc)
+ pe_warn(pe, "OPAL error %ld release DMA window\n", rc);
+
+ iommu_free_table(tbl, of_node_full_name(dev->dev.of_node));
+ free_pages(addr, get_order(TCE32_TABLE_SIZE));
+ pe->tce32_table = NULL;
+}
+
+static void pnv_ioda_release_vf_PE(struct pci_dev *pdev, u16 num_vfs)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pnv_ioda_pe *pe, *pe_n;
+ struct pci_dn *pdn;
+ u16 vf_index;
+ int64_t rc;
+
+ bus = pdev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ pdn = pci_get_pdn(pdev);
+
+ if (!pdev->is_physfn)
+ return;
+
+ if (pdn->m64_per_iov == M64_PER_IOV && num_vfs > M64_PER_IOV) {
+ int vf_group;
+ int vf_per_group;
+ int vf_index1;
+
+ vf_per_group = roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
+
+ for (vf_group = 0; vf_group < M64_PER_IOV; vf_group++)
+ for (vf_index = vf_group * vf_per_group;
+ vf_index < (vf_group + 1) * vf_per_group &&
+ vf_index < num_vfs;
+ vf_index++)
+ for (vf_index1 = vf_group * vf_per_group;
+ vf_index1 < (vf_group + 1) * vf_per_group &&
+ vf_index1 < num_vfs;
+ vf_index1++){
+
+ rc = opal_pci_set_peltv(phb->opal_id,
+ pdn->offset + vf_index,
+ pdn->offset + vf_index1,
+ OPAL_REMOVE_PE_FROM_DOMAIN);
+
+ if (rc)
+ dev_warn(&pdev->dev, "%s: Failed to unlink same group PE#%d(%lld)\n",
+ __func__,
+ pdn->offset + vf_index1, rc);
+ }
+ }
+
+ list_for_each_entry_safe(pe, pe_n, &phb->ioda.pe_list, list) {
+ if (pe->parent_dev != pdev)
+ continue;
+
+ pnv_pci_ioda2_release_dma_pe(pdev, pe);
+
+ /* Remove from list */
+ mutex_lock(&phb->ioda.pe_list_mutex);
+ list_del(&pe->list);
+ mutex_unlock(&phb->ioda.pe_list_mutex);
+
+ pnv_ioda_deconfigure_pe(phb, pe);
+
+ pnv_ioda_free_pe(phb, pe->pe_number);
+ }
+}
+
+void pnv_pci_sriov_disable(struct pci_dev *pdev)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pci_dn *pdn;
+ struct pci_sriov *iov;
+ u16 num_vfs;
+
+ bus = pdev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ pdn = pci_get_pdn(pdev);
+ iov = pdev->sriov;
+ num_vfs = pdn->num_vfs;
+
+ /* Release VF PEs */
+ pnv_ioda_release_vf_PE(pdev, num_vfs);
+
+ if (phb->type == PNV_PHB_IODA2) {
+ if (pdn->m64_per_iov == 1)
+ pnv_pci_vf_resource_shift(pdev, -pdn->offset);
+
+ /* Release M64 windows */
+ pnv_pci_vf_release_m64(pdev);
+
+ /* Release PE numbers */
+ bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
+ pdn->offset = 0;
+ }
+}
+
+static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
+ struct pnv_ioda_pe *pe);
+static void pnv_ioda_setup_vf_PE(struct pci_dev *pdev, u16 num_vfs)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pnv_ioda_pe *pe;
+ int pe_num;
+ u16 vf_index;
+ struct pci_dn *pdn;
+ int64_t rc;
+
+ bus = pdev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ pdn = pci_get_pdn(pdev);
+
+ if (!pdev->is_physfn)
+ return;
+
+ /* Reserve PE for each VF */
+ for (vf_index = 0; vf_index < num_vfs; vf_index++) {
+ pe_num = pdn->offset + vf_index;
+
+ pe = &phb->ioda.pe_array[pe_num];
+ pe->pe_number = pe_num;
+ pe->phb = phb;
+ pe->flags = PNV_IODA_PE_VF;
+ pe->pbus = NULL;
+ pe->parent_dev = pdev;
+ pe->tce32_seg = -1;
+ pe->mve_number = -1;
+ pe->rid = (pci_iov_virtfn_bus(pdev, vf_index) << 8) |
+ pci_iov_virtfn_devfn(pdev, vf_index);
+
+ pe_info(pe, "VF %04d:%02d:%02d.%d associated with PE#%d\n",
+ hose->global_number, pdev->bus->number,
+ PCI_SLOT(pci_iov_virtfn_devfn(pdev, vf_index)),
+ PCI_FUNC(pci_iov_virtfn_devfn(pdev, vf_index)), pe_num);
+
+ if (pnv_ioda_configure_pe(phb, pe)) {
+ /* XXX What do we do here ? */
+ if (pe_num)
+ pnv_ioda_free_pe(phb, pe_num);
+ pe->pdev = NULL;
+ continue;
+ }
+
+ pe->tce32_table = kzalloc_node(sizeof(struct iommu_table),
+ GFP_KERNEL, hose->node);
+ pe->tce32_table->data = pe;
+
+ /* Put PE to the list */
+ mutex_lock(&phb->ioda.pe_list_mutex);
+ list_add_tail(&pe->list, &phb->ioda.pe_list);
+ mutex_unlock(&phb->ioda.pe_list_mutex);
+
+ pnv_pci_ioda2_setup_dma_pe(phb, pe);
+ }
+
+ if (pdn->m64_per_iov == M64_PER_IOV && num_vfs > M64_PER_IOV) {
+ int vf_group;
+ int vf_per_group;
+ int vf_index1;
+
+ vf_per_group = roundup_pow_of_two(num_vfs) / pdn->m64_per_iov;
+
+ for (vf_group = 0; vf_group < M64_PER_IOV; vf_group++) {
+ for (vf_index = vf_group * vf_per_group;
+ vf_index < (vf_group + 1) * vf_per_group &&
+ vf_index < num_vfs;
+ vf_index++) {
+ for (vf_index1 = vf_group * vf_per_group;
+ vf_index1 < (vf_group + 1) * vf_per_group &&
+ vf_index1 < num_vfs;
+ vf_index1++) {
+
+ rc = opal_pci_set_peltv(phb->opal_id,
+ pdn->offset + vf_index,
+ pdn->offset + vf_index1,
+ OPAL_ADD_PE_TO_DOMAIN);
+
+ if (rc)
+ dev_warn(&pdev->dev, "%s: Failed to link same group PE#%d(%lld)\n",
+ __func__,
+ pdn->offset + vf_index1, rc);
+ }
+ }
+ }
+ }
+}
+
+int pnv_pci_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
+{
+ struct pci_bus *bus;
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct pci_dn *pdn;
+ int ret;
+
+ bus = pdev->bus;
+ hose = pci_bus_to_host(bus);
+ phb = hose->private_data;
+ pdn = pci_get_pdn(pdev);
+
+ if (phb->type == PNV_PHB_IODA2) {
+ /* Calculate available PE for required VFs */
+ mutex_lock(&phb->ioda.pe_alloc_mutex);
+ pdn->offset = bitmap_find_next_zero_area(
+ phb->ioda.pe_alloc, phb->ioda.total_pe,
+ 0, num_vfs, 0);
+ if (pdn->offset >= phb->ioda.total_pe) {
+ mutex_unlock(&phb->ioda.pe_alloc_mutex);
+ dev_info(&pdev->dev, "Failed to enable VF%d\n", num_vfs);
+ pdn->offset = 0;
+ return -EBUSY;
+ }
+ bitmap_set(phb->ioda.pe_alloc, pdn->offset, num_vfs);
+ pdn->num_vfs = num_vfs;
+ mutex_unlock(&phb->ioda.pe_alloc_mutex);
+
+ /* Assign M64 window accordingly */
+ ret = pnv_pci_vf_assign_m64(pdev, num_vfs);
+ if (ret) {
+ dev_info(&pdev->dev, "Not enough M64 window resources\n");
+ goto m64_failed;
+ }
+
+ /*
+ * When using one M64 BAR to map one IOV BAR, we need to shift
+ * the IOV BAR according to the PE# allocated to the VFs.
+ * Otherwise, the PE# for the VF will conflict with others.
+ */
+ if (pdn->m64_per_iov == 1) {
+ ret = pnv_pci_vf_resource_shift(pdev, pdn->offset);
+ if (ret)
+ goto m64_failed;
+ }
+ }
+
+ /* Setup VF PEs */
+ pnv_ioda_setup_vf_PE(pdev, num_vfs);
+
+ return 0;
+
+m64_failed:
+ bitmap_clear(phb->ioda.pe_alloc, pdn->offset, num_vfs);
+ pdn->offset = 0;
+
+ return ret;
+}
+
+int pcibios_sriov_disable(struct pci_dev *pdev)
+{
+ pnv_pci_sriov_disable(pdev);
+
+ /* Release PCI data */
+ remove_dev_pci_data(pdev);
+ return 0;
+}
+
+int pcibios_sriov_enable(struct pci_dev *pdev, u16 num_vfs)
+{
+ /* Allocate PCI data */
+ add_dev_pci_data(pdev);
+
+ pnv_pci_sriov_enable(pdev, num_vfs);
+ return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+
static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev)
{
struct pci_dn *pdn = pci_get_pdn(pdev);
@@ -989,7 +1598,7 @@ static void pnv_pci_ioda_dma_dev_setup(struct pnv_phb *phb, struct pci_dev *pdev
pe = &phb->ioda.pe_array[pdn->pe_number];
WARN_ON(get_dma_ops(&pdev->dev) != &dma_iommu_ops);
- set_iommu_table_base_and_group(&pdev->dev, &pe->tce32_table);
+ set_iommu_table_base_and_group(&pdev->dev, pe->tce32_table);
}
static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
@@ -1016,7 +1625,7 @@ static int pnv_pci_ioda_dma_set_mask(struct pnv_phb *phb,
} else {
dev_info(&pdev->dev, "Using 32-bit DMA via iommu\n");
set_dma_ops(&pdev->dev, &dma_iommu_ops);
- set_iommu_table_base(&pdev->dev, &pe->tce32_table);
+ set_iommu_table_base(&pdev->dev, pe->tce32_table);
}
*pdev->dev.dma_mask = dma_mask;
return 0;
@@ -1053,9 +1662,9 @@ static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe,
list_for_each_entry(dev, &bus->devices, bus_list) {
if (add_to_iommu_group)
set_iommu_table_base_and_group(&dev->dev,
- &pe->tce32_table);
+ pe->tce32_table);
else
- set_iommu_table_base(&dev->dev, &pe->tce32_table);
+ set_iommu_table_base(&dev->dev, pe->tce32_table);
if (dev->subordinate)
pnv_ioda_setup_bus_dma(pe, dev->subordinate,
@@ -1145,8 +1754,7 @@ static void pnv_pci_ioda2_tce_invalidate(struct pnv_ioda_pe *pe,
void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
__be64 *startp, __be64 *endp, bool rm)
{
- struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
- tce32_table);
+ struct pnv_ioda_pe *pe = tbl->data;
struct pnv_phb *phb = pe->phb;
if (phb->type == PNV_PHB_IODA1)
@@ -1167,9 +1775,6 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
int64_t rc;
void *addr;
- /* 256M DMA window, 4K TCE pages, 8 bytes TCE */
-#define TCE32_TABLE_SIZE ((0x10000000 / 0x1000) * 8)
-
/* XXX FIXME: Handle 64-bit only DMA devices */
/* XXX FIXME: Provide 64-bit DMA facilities & non-4K TCE tables etc.. */
/* XXX FIXME: Allocate multi-level tables on PHB3 */
@@ -1212,7 +1817,7 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
}
/* Setup linux iommu table */
- tbl = &pe->tce32_table;
+ tbl = pe->tce32_table;
pnv_pci_setup_iommu_table(tbl, addr, TCE32_TABLE_SIZE * segs,
base << 28, IOMMU_PAGE_SHIFT_4K);
@@ -1232,12 +1837,19 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
TCE_PCI_SWINV_PAIR);
}
iommu_init_table(tbl, phb->hose->node);
- iommu_register_group(tbl, phb->hose->global_number, pe->pe_number);
- if (pe->pdev)
+ if (pe->flags & PNV_IODA_PE_DEV) {
+ iommu_register_group(tbl, phb->hose->global_number,
+ pe->pe_number);
set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
- else
+ } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) {
+ iommu_register_group(tbl, phb->hose->global_number,
+ pe->pe_number);
pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
+ } else if (pe->flags & PNV_IODA_PE_VF) {
+ iommu_register_group(tbl, phb->hose->global_number,
+ pe->pe_number);
+ }
return;
fail:
@@ -1250,8 +1862,7 @@ static void pnv_pci_ioda_setup_dma_pe(struct pnv_phb *phb,
static void pnv_pci_ioda2_set_bypass(struct iommu_table *tbl, bool enable)
{
- struct pnv_ioda_pe *pe = container_of(tbl, struct pnv_ioda_pe,
- tce32_table);
+ struct pnv_ioda_pe *pe = tbl->data;
uint16_t window_id = (pe->pe_number << 1 ) + 1;
int64_t rc;
@@ -1296,10 +1907,10 @@ static void pnv_pci_ioda2_setup_bypass_pe(struct pnv_phb *phb,
pe->tce_bypass_base = 1ull << 59;
/* Install set_bypass callback for VFIO */
- pe->tce32_table.set_bypass = pnv_pci_ioda2_set_bypass;
+ pe->tce32_table->set_bypass = pnv_pci_ioda2_set_bypass;
/* Enable bypass by default */
- pnv_pci_ioda2_set_bypass(&pe->tce32_table, true);
+ pnv_pci_ioda2_set_bypass(pe->tce32_table, true);
}
static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
@@ -1347,7 +1958,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
}
/* Setup linux iommu table */
- tbl = &pe->tce32_table;
+ tbl = pe->tce32_table;
pnv_pci_setup_iommu_table(tbl, addr, tce_table_size, 0,
IOMMU_PAGE_SHIFT_4K);
@@ -1365,12 +1976,19 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
tbl->it_type |= (TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE);
}
iommu_init_table(tbl, phb->hose->node);
- iommu_register_group(tbl, phb->hose->global_number, pe->pe_number);
- if (pe->pdev)
+ if (pe->flags & PNV_IODA_PE_DEV) {
+ iommu_register_group(tbl, phb->hose->global_number,
+ pe->pe_number);
set_iommu_table_base_and_group(&pe->pdev->dev, tbl);
- else
+ } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) {
+ iommu_register_group(tbl, phb->hose->global_number,
+ pe->pe_number);
pnv_ioda_setup_bus_dma(pe, pe->pbus, true);
+ } else if (pe->flags & PNV_IODA_PE_VF) {
+ iommu_register_group(tbl, phb->hose->global_number,
+ pe->pe_number);
+ }
/* Also create a bypass window */
if (!pnv_iommu_bypass_disabled)
@@ -1731,6 +2349,73 @@ static void pnv_pci_init_ioda_msis(struct pnv_phb *phb)
static void pnv_pci_init_ioda_msis(struct pnv_phb *phb) { }
#endif /* CONFIG_PCI_MSI */
+#ifdef CONFIG_PCI_IOV
+static void pnv_pci_ioda_fixup_iov_resources(struct pci_dev *pdev)
+{
+ struct pci_controller *hose;
+ struct pnv_phb *phb;
+ struct resource *res;
+ int i;
+ resource_size_t size;
+ struct pci_dn *pdn;
+ int mul, total_vfs;
+
+ if (!pdev->is_physfn || pdev->is_added)
+ return;
+
+ hose = pci_bus_to_host(pdev->bus);
+ phb = hose->private_data;
+
+ pdn = pci_get_pdn(pdev);
+ pdn->vfs_expanded = 0;
+
+ total_vfs = pci_sriov_get_totalvfs(pdev);
+ pdn->m64_per_iov = 1;
+ mul = phb->ioda.total_pe;
+
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &pdev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || res->parent)
+ continue;
+ if (!pnv_pci_is_mem_pref_64(res->flags)) {
+ dev_warn(&pdev->dev, " non M64 VF BAR%d: %pR\n",
+ i, res);
+ continue;
+ }
+
+ size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
+
+ /* bigger than 64M */
+ if (size > (1 << 26)) {
+ dev_info(&pdev->dev, "PowerNV: VF BAR%d: %pR IOV size is bigger than 64M, roundup power2\n",
+ i, res);
+ pdn->m64_per_iov = M64_PER_IOV;
+ mul = roundup_pow_of_two(total_vfs);
+ break;
+ }
+ }
+
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ res = &pdev->resource[i + PCI_IOV_RESOURCES];
+ if (!res->flags || res->parent)
+ continue;
+ if (!pnv_pci_is_mem_pref_64(res->flags)) {
+ dev_warn(&pdev->dev, "Skipping expanding VF BAR%d: %pR\n",
+ i, res);
+ continue;
+ }
+
+ dev_dbg(&pdev->dev, " Fixing VF BAR%d: %pR to\n", i, res);
+ size = pci_iov_resource_size(pdev, i + PCI_IOV_RESOURCES);
+ res->end = res->start + size * mul - 1;
+ dev_dbg(&pdev->dev, " %pR\n", res);
+ dev_info(&pdev->dev, "VF BAR%d: %pR (expanded to %d VFs for PE alignment)",
+ i, res, mul);
+ }
+ pdn->vfs_expanded = mul;
+}
+#endif /* CONFIG_PCI_IOV */
+
/*
* This function is supposed to be called on basis of PE from top
* to bottom style. So the the I/O or MMIO segment assigned to
@@ -1777,7 +2462,8 @@ static void pnv_ioda_setup_pe_seg(struct pci_controller *hose,
region.start += phb->ioda.io_segsize;
index++;
}
- } else if (res->flags & IORESOURCE_MEM) {
+ } else if ((res->flags & IORESOURCE_MEM) &&
+ !pnv_pci_is_mem_pref_64(res->flags)) {
region.start = res->start -
hose->mem_offset[0] -
phb->ioda.m32_pci_base;
@@ -1907,10 +2593,29 @@ static resource_size_t pnv_pci_window_alignment(struct pci_bus *bus,
return phb->ioda.io_segsize;
}
+#ifdef CONFIG_PCI_IOV
+static resource_size_t pnv_pci_iov_resource_alignment(struct pci_dev *pdev,
+ int resno)
+{
+ struct pci_dn *pdn = pci_get_pdn(pdev);
+ resource_size_t align, iov_align;
+
+ iov_align = resource_size(&pdev->resource[resno]);
+ if (iov_align)
+ return iov_align;
+
+ align = pci_iov_resource_size(pdev, resno);
+ if (pdn->vfs_expanded)
+ return pdn->vfs_expanded * align;
+
+ return align;
+}
+#endif /* CONFIG_PCI_IOV */
+
/* Prevent enabling devices for which we couldn't properly
* assign a PE
*/
-static int pnv_pci_enable_device_hook(struct pci_dev *dev)
+static bool pnv_pci_enable_device_hook(struct pci_dev *dev)
{
struct pci_controller *hose = pci_bus_to_host(dev->bus);
struct pnv_phb *phb = hose->private_data;
@@ -1922,13 +2627,13 @@ static int pnv_pci_enable_device_hook(struct pci_dev *dev)
* PEs isn't ready.
*/
if (!phb->initialized)
- return 0;
+ return true;
pdn = pci_get_pdn(dev);
if (!pdn || pdn->pe_number == IODA_INVALID_PE)
- return -EINVAL;
+ return false;
- return 0;
+ return true;
}
static u32 pnv_ioda_bdfn_to_pe(struct pnv_phb *phb, struct pci_bus *bus,
@@ -1988,9 +2693,11 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
hose->last_busno = 0xff;
}
hose->private_data = phb;
+ hose->controller_ops = pnv_pci_controller_ops;
phb->hub_id = hub_id;
phb->opal_id = phb_id;
phb->type = ioda_type;
+ mutex_init(&phb->ioda.pe_alloc_mutex);
/* Detect specific models for error handling */
if (of_device_is_compatible(np, "ibm,p7ioc-pciex"))
@@ -2050,6 +2757,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
INIT_LIST_HEAD(&phb->ioda.pe_dma_list);
INIT_LIST_HEAD(&phb->ioda.pe_list);
+ mutex_init(&phb->ioda.pe_list_mutex);
/* Calculate how many 32-bit TCE segments we have */
phb->ioda.tce32_count = phb->ioda.m32_pci_base >> 28;
@@ -2078,9 +2786,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
phb->get_pe_state = pnv_ioda_get_pe_state;
phb->freeze_pe = pnv_ioda_freeze_pe;
phb->unfreeze_pe = pnv_ioda_unfreeze_pe;
-#ifdef CONFIG_EEH
- phb->eeh_ops = &ioda_eeh_ops;
-#endif
/* Setup RID -> PE mapping function */
phb->bdfn_to_pe = pnv_ioda_bdfn_to_pe;
@@ -2104,9 +2809,15 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
* the child P2P bridges) can form individual PE.
*/
ppc_md.pcibios_fixup = pnv_pci_ioda_fixup;
- ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
- ppc_md.pcibios_window_alignment = pnv_pci_window_alignment;
- ppc_md.pcibios_reset_secondary_bus = pnv_pci_reset_secondary_bus;
+ pnv_pci_controller_ops.enable_device_hook = pnv_pci_enable_device_hook;
+ pnv_pci_controller_ops.window_alignment = pnv_pci_window_alignment;
+ pnv_pci_controller_ops.reset_secondary_bus = pnv_pci_reset_secondary_bus;
+
+#ifdef CONFIG_PCI_IOV
+ ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
+ ppc_md.pcibios_iov_resource_alignment = pnv_pci_iov_resource_alignment;
+#endif
+
pci_add_flags(PCI_REASSIGN_ALL_RSRC);
/* Reset IODA tables to a clean state */
@@ -2121,8 +2832,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
*/
if (is_kdump_kernel()) {
pr_info(" Issue PHB reset ...\n");
- ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
- ioda_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
+ pnv_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
+ pnv_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
}
/* Remove M64 resource if we can't configure it successfully */
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index 6ef6d4d8e7e2..4729ca793813 100644
--- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -133,6 +133,7 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
phb->hose->first_busno = 0;
phb->hose->last_busno = 0xff;
phb->hose->private_data = phb;
+ phb->hose->controller_ops = pnv_pci_controller_ops;
phb->hub_id = hub_id;
phb->opal_id = phb_id;
phb->type = PNV_PHB_P5IOC2;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 54323d6b5166..bca2aeb6e4b6 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -366,9 +366,9 @@ static void pnv_pci_handle_eeh_config(struct pnv_phb *phb, u32 pe_no)
spin_unlock_irqrestore(&phb->lock, flags);
}
-static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
- struct device_node *dn)
+static void pnv_pci_config_check_eeh(struct pci_dn *pdn)
{
+ struct pnv_phb *phb = pdn->phb->private_data;
u8 fstate;
__be16 pcierr;
int pe_no;
@@ -379,7 +379,7 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
* setup that yet. So all ER errors should be mapped to
* reserved PE.
*/
- pe_no = PCI_DN(dn)->pe_number;
+ pe_no = pdn->pe_number;
if (pe_no == IODA_INVALID_PE) {
if (phb->type == PNV_PHB_P5IOC2)
pe_no = 0;
@@ -407,8 +407,7 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
}
cfg_dbg(" -> EEH check, bdfn=%04x PE#%d fstate=%x\n",
- (PCI_DN(dn)->busno << 8) | (PCI_DN(dn)->devfn),
- pe_no, fstate);
+ (pdn->busno << 8) | (pdn->devfn), pe_no, fstate);
/* Clear the frozen state if applicable */
if (fstate == OPAL_EEH_STOPPED_MMIO_FREEZE ||
@@ -425,10 +424,9 @@ static void pnv_pci_config_check_eeh(struct pnv_phb *phb,
}
}
-int pnv_pci_cfg_read(struct device_node *dn,
+int pnv_pci_cfg_read(struct pci_dn *pdn,
int where, int size, u32 *val)
{
- struct pci_dn *pdn = PCI_DN(dn);
struct pnv_phb *phb = pdn->phb->private_data;
u32 bdfn = (pdn->busno << 8) | pdn->devfn;
s64 rc;
@@ -462,10 +460,9 @@ int pnv_pci_cfg_read(struct device_node *dn,
return PCIBIOS_SUCCESSFUL;
}
-int pnv_pci_cfg_write(struct device_node *dn,
+int pnv_pci_cfg_write(struct pci_dn *pdn,
int where, int size, u32 val)
{
- struct pci_dn *pdn = PCI_DN(dn);
struct pnv_phb *phb = pdn->phb->private_data;
u32 bdfn = (pdn->busno << 8) | pdn->devfn;
@@ -489,18 +486,17 @@ int pnv_pci_cfg_write(struct device_node *dn,
}
#if CONFIG_EEH
-static bool pnv_pci_cfg_check(struct pci_controller *hose,
- struct device_node *dn)
+static bool pnv_pci_cfg_check(struct pci_dn *pdn)
{
struct eeh_dev *edev = NULL;
- struct pnv_phb *phb = hose->private_data;
+ struct pnv_phb *phb = pdn->phb->private_data;
/* EEH not enabled ? */
if (!(phb->flags & PNV_PHB_FLAG_EEH))
return true;
/* PE reset or device removed ? */
- edev = of_node_to_eeh_dev(dn);
+ edev = pdn->edev;
if (edev) {
if (edev->pe &&
(edev->pe->state & EEH_PE_CFG_BLOCKED))
@@ -513,8 +509,7 @@ static bool pnv_pci_cfg_check(struct pci_controller *hose,
return true;
}
#else
-static inline pnv_pci_cfg_check(struct pci_controller *hose,
- struct device_node *dn)
+static inline pnv_pci_cfg_check(struct pci_dn *pdn)
{
return true;
}
@@ -524,32 +519,26 @@ static int pnv_pci_read_config(struct pci_bus *bus,
unsigned int devfn,
int where, int size, u32 *val)
{
- struct device_node *dn, *busdn = pci_bus_to_OF_node(bus);
struct pci_dn *pdn;
struct pnv_phb *phb;
- bool found = false;
int ret;
*val = 0xFFFFFFFF;
- for (dn = busdn->child; dn; dn = dn->sibling) {
- pdn = PCI_DN(dn);
- if (pdn && pdn->devfn == devfn) {
- phb = pdn->phb->private_data;
- found = true;
- break;
- }
- }
+ pdn = pci_get_pdn_by_devfn(bus, devfn);
+ if (!pdn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
- if (!found || !pnv_pci_cfg_check(pdn->phb, dn))
+ if (!pnv_pci_cfg_check(pdn))
return PCIBIOS_DEVICE_NOT_FOUND;
- ret = pnv_pci_cfg_read(dn, where, size, val);
- if (phb->flags & PNV_PHB_FLAG_EEH) {
+ ret = pnv_pci_cfg_read(pdn, where, size, val);
+ phb = pdn->phb->private_data;
+ if (phb->flags & PNV_PHB_FLAG_EEH && pdn->edev) {
if (*val == EEH_IO_ERROR_VALUE(size) &&
- eeh_dev_check_failure(of_node_to_eeh_dev(dn)))
+ eeh_dev_check_failure(pdn->edev))
return PCIBIOS_DEVICE_NOT_FOUND;
} else {
- pnv_pci_config_check_eeh(phb, dn);
+ pnv_pci_config_check_eeh(pdn);
}
return ret;
@@ -559,27 +548,21 @@ static int pnv_pci_write_config(struct pci_bus *bus,
unsigned int devfn,
int where, int size, u32 val)
{
- struct device_node *dn, *busdn = pci_bus_to_OF_node(bus);
struct pci_dn *pdn;
struct pnv_phb *phb;
- bool found = false;
int ret;
- for (dn = busdn->child; dn; dn = dn->sibling) {
- pdn = PCI_DN(dn);
- if (pdn && pdn->devfn == devfn) {
- phb = pdn->phb->private_data;
- found = true;
- break;
- }
- }
+ pdn = pci_get_pdn_by_devfn(bus, devfn);
+ if (!pdn)
+ return PCIBIOS_DEVICE_NOT_FOUND;
- if (!found || !pnv_pci_cfg_check(pdn->phb, dn))
+ if (!pnv_pci_cfg_check(pdn))
return PCIBIOS_DEVICE_NOT_FOUND;
- ret = pnv_pci_cfg_write(dn, where, size, val);
+ ret = pnv_pci_cfg_write(pdn, where, size, val);
+ phb = pdn->phb->private_data;
if (!(phb->flags & PNV_PHB_FLAG_EEH))
- pnv_pci_config_check_eeh(phb, dn);
+ pnv_pci_config_check_eeh(pdn);
return ret;
}
@@ -679,66 +662,31 @@ void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
tbl->it_type = TCE_PCI;
}
-static struct iommu_table *pnv_pci_setup_bml_iommu(struct pci_controller *hose)
-{
- struct iommu_table *tbl;
- const __be64 *basep, *swinvp;
- const __be32 *sizep;
-
- basep = of_get_property(hose->dn, "linux,tce-base", NULL);
- sizep = of_get_property(hose->dn, "linux,tce-size", NULL);
- if (basep == NULL || sizep == NULL) {
- pr_err("PCI: %s has missing tce entries !\n",
- hose->dn->full_name);
- return NULL;
- }
- tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, hose->node);
- if (WARN_ON(!tbl))
- return NULL;
- pnv_pci_setup_iommu_table(tbl, __va(be64_to_cpup(basep)),
- be32_to_cpup(sizep), 0, IOMMU_PAGE_SHIFT_4K);
- iommu_init_table(tbl, hose->node);
- iommu_register_group(tbl, pci_domain_nr(hose->bus), 0);
-
- /* Deal with SW invalidated TCEs when needed (BML way) */
- swinvp = of_get_property(hose->dn, "linux,tce-sw-invalidate-info",
- NULL);
- if (swinvp) {
- tbl->it_busno = be64_to_cpu(swinvp[1]);
- tbl->it_index = (unsigned long)ioremap(be64_to_cpup(swinvp), 8);
- tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
- }
- return tbl;
-}
-
-static void pnv_pci_dma_fallback_setup(struct pci_controller *hose,
- struct pci_dev *pdev)
-{
- struct device_node *np = pci_bus_to_OF_node(hose->bus);
- struct pci_dn *pdn;
-
- if (np == NULL)
- return;
- pdn = PCI_DN(np);
- if (!pdn->iommu_table)
- pdn->iommu_table = pnv_pci_setup_bml_iommu(hose);
- if (!pdn->iommu_table)
- return;
- set_iommu_table_base_and_group(&pdev->dev, pdn->iommu_table);
-}
-
static void pnv_pci_dma_dev_setup(struct pci_dev *pdev)
{
struct pci_controller *hose = pci_bus_to_host(pdev->bus);
struct pnv_phb *phb = hose->private_data;
+#ifdef CONFIG_PCI_IOV
+ struct pnv_ioda_pe *pe;
+ struct pci_dn *pdn;
+
+ /* Fix the VF pdn PE number */
+ if (pdev->is_virtfn) {
+ pdn = pci_get_pdn(pdev);
+ WARN_ON(pdn->pe_number != IODA_INVALID_PE);
+ list_for_each_entry(pe, &phb->ioda.pe_list, list) {
+ if (pe->rid == ((pdev->bus->number << 8) |
+ (pdev->devfn & 0xff))) {
+ pdn->pe_number = pe->pe_number;
+ pe->pdev = pdev;
+ break;
+ }
+ }
+ }
+#endif /* CONFIG_PCI_IOV */
- /* If we have no phb structure, try to setup a fallback based on
- * the device-tree (RTAS PCI for example)
- */
if (phb && phb->dma_dev_setup)
phb->dma_dev_setup(phb, pdev);
- else
- pnv_pci_dma_fallback_setup(hose, pdev);
}
int pnv_pci_dma_set_mask(struct pci_dev *pdev, u64 dma_mask)
@@ -784,44 +732,36 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_IBM, 0x3b9, pnv_p7ioc_rc_quirk);
void __init pnv_pci_init(void)
{
struct device_node *np;
+ bool found_ioda = false;
pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
- /* OPAL absent, try POPAL first then RTAS detection of PHBs */
- if (!firmware_has_feature(FW_FEATURE_OPAL)) {
-#ifdef CONFIG_PPC_POWERNV_RTAS
- init_pci_config_tokens();
- find_and_init_phbs();
-#endif /* CONFIG_PPC_POWERNV_RTAS */
- }
- /* OPAL is here, do our normal stuff */
- else {
- int found_ioda = 0;
+ /* If we don't have OPAL, eg. in sim, just skip PCI probe */
+ if (!firmware_has_feature(FW_FEATURE_OPAL))
+ return;
- /* Look for IODA IO-Hubs. We don't support mixing IODA
- * and p5ioc2 due to the need to change some global
- * probing flags
- */
- for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
- pnv_pci_init_ioda_hub(np);
- found_ioda = 1;
- }
+ /* Look for IODA IO-Hubs. We don't support mixing IODA
+ * and p5ioc2 due to the need to change some global
+ * probing flags
+ */
+ for_each_compatible_node(np, NULL, "ibm,ioda-hub") {
+ pnv_pci_init_ioda_hub(np);
+ found_ioda = true;
+ }
- /* Look for p5ioc2 IO-Hubs */
- if (!found_ioda)
- for_each_compatible_node(np, NULL, "ibm,p5ioc2")
- pnv_pci_init_p5ioc2_hub(np);
+ /* Look for p5ioc2 IO-Hubs */
+ if (!found_ioda)
+ for_each_compatible_node(np, NULL, "ibm,p5ioc2")
+ pnv_pci_init_p5ioc2_hub(np);
- /* Look for ioda2 built-in PHB3's */
- for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
- pnv_pci_init_ioda2_phb(np);
- }
+ /* Look for ioda2 built-in PHB3's */
+ for_each_compatible_node(np, NULL, "ibm,ioda2-phb")
+ pnv_pci_init_ioda2_phb(np);
/* Setup the linkage between OF nodes and PHBs */
pci_devs_phb_init();
/* Configure IOMMU DMA hooks */
- ppc_md.pci_dma_dev_setup = pnv_pci_dma_dev_setup;
ppc_md.tce_build = pnv_tce_build_vm;
ppc_md.tce_free = pnv_tce_free_vm;
ppc_md.tce_build_rm = pnv_tce_build_rm;
@@ -837,3 +777,7 @@ void __init pnv_pci_init(void)
}
machine_subsys_initcall_sync(powernv, tce_iommu_bus_notifier_init);
+
+struct pci_controller_ops pnv_pci_controller_ops = {
+ .dma_dev_setup = pnv_pci_dma_dev_setup,
+};
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 6c02ff8dd69f..070ee888fc95 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -23,6 +23,7 @@ enum pnv_phb_model {
#define PNV_IODA_PE_BUS_ALL (1 << 2) /* PE has subordinate buses */
#define PNV_IODA_PE_MASTER (1 << 3) /* Master PE in compound case */
#define PNV_IODA_PE_SLAVE (1 << 4) /* Slave PE in compound case */
+#define PNV_IODA_PE_VF (1 << 5) /* PE for one VF */
/* Data associated with a PE, including IOMMU tracking etc.. */
struct pnv_phb;
@@ -34,6 +35,9 @@ struct pnv_ioda_pe {
* entire bus (& children). In the former case, pdev
* is populated, in the later case, pbus is.
*/
+#ifdef CONFIG_PCI_IOV
+ struct pci_dev *parent_dev;
+#endif
struct pci_dev *pdev;
struct pci_bus *pbus;
@@ -53,7 +57,7 @@ struct pnv_ioda_pe {
/* "Base" iommu table, ie, 4K TCEs, 32-bit DMA */
int tce32_seg;
int tce32_segcount;
- struct iommu_table tce32_table;
+ struct iommu_table *tce32_table;
phys_addr_t tce_inval_reg_phys;
/* 64-bit TCE bypass region */
@@ -75,22 +79,6 @@ struct pnv_ioda_pe {
struct list_head list;
};
-/* IOC dependent EEH operations */
-#ifdef CONFIG_EEH
-struct pnv_eeh_ops {
- int (*post_init)(struct pci_controller *hose);
- int (*set_option)(struct eeh_pe *pe, int option);
- int (*get_state)(struct eeh_pe *pe);
- int (*reset)(struct eeh_pe *pe, int option);
- int (*get_log)(struct eeh_pe *pe, int severity,
- char *drv_log, unsigned long len);
- int (*configure_bridge)(struct eeh_pe *pe);
- int (*err_inject)(struct eeh_pe *pe, int type, int func,
- unsigned long addr, unsigned long mask);
- int (*next_error)(struct eeh_pe **pe);
-};
-#endif /* CONFIG_EEH */
-
#define PNV_PHB_FLAG_EEH (1 << 0)
struct pnv_phb {
@@ -104,10 +92,6 @@ struct pnv_phb {
int initialized;
spinlock_t lock;
-#ifdef CONFIG_EEH
- struct pnv_eeh_ops *eeh_ops;
-#endif
-
#ifdef CONFIG_DEBUG_FS
int has_dbgfs;
struct dentry *dbgfs;
@@ -165,6 +149,8 @@ struct pnv_phb {
/* PE allocation bitmap */
unsigned long *pe_alloc;
+ /* PE allocation mutex */
+ struct mutex pe_alloc_mutex;
/* M32 & IO segment maps */
unsigned int *m32_segmap;
@@ -179,6 +165,7 @@ struct pnv_phb {
* on the sequence of creation
*/
struct list_head pe_list;
+ struct mutex pe_list_mutex;
/* Reverse map of PEs, will have to extend if
* we are to support more than 256 PEs, indexed
@@ -213,15 +200,12 @@ struct pnv_phb {
};
extern struct pci_ops pnv_pci_ops;
-#ifdef CONFIG_EEH
-extern struct pnv_eeh_ops ioda_eeh_ops;
-#endif
void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
unsigned char *log_buff);
-int pnv_pci_cfg_read(struct device_node *dn,
+int pnv_pci_cfg_read(struct pci_dn *pdn,
int where, int size, u32 *val);
-int pnv_pci_cfg_write(struct device_node *dn,
+int pnv_pci_cfg_write(struct pci_dn *pdn,
int where, int size, u32 val);
extern void pnv_pci_setup_iommu_table(struct iommu_table *tbl,
void *tce_mem, u64 tce_size,
@@ -232,6 +216,6 @@ extern void pnv_pci_init_ioda2_phb(struct device_node *np);
extern void pnv_pci_ioda_tce_invalidate(struct iommu_table *tbl,
__be64 *startp, __be64 *endp, bool rm);
extern void pnv_pci_reset_secondary_bus(struct pci_dev *dev);
-extern int ioda_eeh_phb_reset(struct pci_controller *hose, int option);
+extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
#endif /* __POWERNV_PCI_H */
diff --git a/arch/powerpc/platforms/powernv/powernv.h b/arch/powerpc/platforms/powernv/powernv.h
index 604c48e7879a..826d2c9bea56 100644
--- a/arch/powerpc/platforms/powernv/powernv.h
+++ b/arch/powerpc/platforms/powernv/powernv.h
@@ -29,6 +29,8 @@ static inline u64 pnv_pci_dma_get_required_mask(struct pci_dev *pdev)
}
#endif
+extern struct pci_controller_ops pnv_pci_controller_ops;
+
extern u32 pnv_get_supported_cpuidle_states(void);
extern void pnv_lpc_init(void);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index d2de7d5d7574..16fdcb23f4c3 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -32,7 +32,6 @@
#include <asm/machdep.h>
#include <asm/firmware.h>
#include <asm/xics.h>
-#include <asm/rtas.h>
#include <asm/opal.h>
#include <asm/kexec.h>
#include <asm/smp.h>
@@ -278,20 +277,6 @@ static void __init pnv_setup_machdep_opal(void)
ppc_md.handle_hmi_exception = opal_handle_hmi_exception;
}
-#ifdef CONFIG_PPC_POWERNV_RTAS
-static void __init pnv_setup_machdep_rtas(void)
-{
- if (rtas_token("get-time-of-day") != RTAS_UNKNOWN_SERVICE) {
- ppc_md.get_boot_time = rtas_get_boot_time;
- ppc_md.get_rtc_time = rtas_get_rtc_time;
- ppc_md.set_rtc_time = rtas_set_rtc_time;
- }
- ppc_md.restart = rtas_restart;
- pm_power_off = rtas_power_off;
- ppc_md.halt = rtas_halt;
-}
-#endif /* CONFIG_PPC_POWERNV_RTAS */
-
static u32 supported_cpuidle_states;
int pnv_save_sprs_for_winkle(void)
@@ -409,37 +394,39 @@ static int __init pnv_init_idle_states(void)
{
struct device_node *power_mgt;
int dt_idle_states;
- const __be32 *idle_state_flags;
- u32 len_flags, flags;
+ u32 *flags;
int i;
supported_cpuidle_states = 0;
if (cpuidle_disable != IDLE_NO_OVERRIDE)
- return 0;
+ goto out;
if (!firmware_has_feature(FW_FEATURE_OPALv3))
- return 0;
+ goto out;
power_mgt = of_find_node_by_path("/ibm,opal/power-mgt");
if (!power_mgt) {
pr_warn("opal: PowerMgmt Node not found\n");
- return 0;
+ goto out;
+ }
+ dt_idle_states = of_property_count_u32_elems(power_mgt,
+ "ibm,cpu-idle-state-flags");
+ if (dt_idle_states < 0) {
+ pr_warn("cpuidle-powernv: no idle states found in the DT\n");
+ goto out;
}
- idle_state_flags = of_get_property(power_mgt,
- "ibm,cpu-idle-state-flags", &len_flags);
- if (!idle_state_flags) {
- pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-flags\n");
- return 0;
+ flags = kzalloc(sizeof(*flags) * dt_idle_states, GFP_KERNEL);
+ if (of_property_read_u32_array(power_mgt,
+ "ibm,cpu-idle-state-flags", flags, dt_idle_states)) {
+ pr_warn("cpuidle-powernv: missing ibm,cpu-idle-state-flags in DT\n");
+ goto out_free;
}
- dt_idle_states = len_flags / sizeof(u32);
+ for (i = 0; i < dt_idle_states; i++)
+ supported_cpuidle_states |= flags[i];
- for (i = 0; i < dt_idle_states; i++) {
- flags = be32_to_cpu(idle_state_flags[i]);
- supported_cpuidle_states |= flags;
- }
if (!(supported_cpuidle_states & OPAL_PM_SLEEP_ENABLED_ER1)) {
patch_instruction(
(unsigned int *)pnv_fastsleep_workaround_at_entry,
@@ -449,6 +436,9 @@ static int __init pnv_init_idle_states(void)
PPC_INST_NOP);
}
pnv_alloc_idle_core_states();
+out_free:
+ kfree(flags);
+out:
return 0;
}
@@ -465,10 +455,6 @@ static int __init pnv_probe(void)
if (firmware_has_feature(FW_FEATURE_OPAL))
pnv_setup_machdep_opal();
-#ifdef CONFIG_PPC_POWERNV_RTAS
- else if (rtas.base)
- pnv_setup_machdep_rtas();
-#endif /* CONFIG_PPC_POWERNV_RTAS */
pr_debug("PowerNV detected !\n");
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 38a45088f633..8f70ba681a78 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -25,7 +25,6 @@
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/firmware.h>
-#include <asm/rtas.h>
#include <asm/vdso_datapage.h>
#include <asm/cputhreads.h>
#include <asm/xics.h>
@@ -251,18 +250,6 @@ void __init pnv_smp_init(void)
{
smp_ops = &pnv_smp_ops;
- /* XXX We don't yet have a proper entry point from HAL, for
- * now we rely on kexec-style entry from BML
- */
-
-#ifdef CONFIG_PPC_RTAS
- /* Non-lpar has additional take/give timebase */
- if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) {
- smp_ops->give_timebase = rtas_give_timebase;
- smp_ops->take_timebase = rtas_take_timebase;
- }
-#endif /* CONFIG_PPC_RTAS */
-
#ifdef CONFIG_HOTPLUG_CPU
ppc_md.cpu_die = pnv_smp_cpu_kill_self;
#endif
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c
index b358bec6c8cb..3c7707af3384 100644
--- a/arch/powerpc/platforms/ps3/smp.c
+++ b/arch/powerpc/platforms/ps3/smp.c
@@ -57,7 +57,7 @@ static void ps3_smp_message_pass(int cpu, int msg)
" (%d)\n", __func__, __LINE__, cpu, msg, result);
}
-static int __init ps3_smp_probe(void)
+static void __init ps3_smp_probe(void)
{
int cpu;
@@ -100,8 +100,6 @@ static int __init ps3_smp_probe(void)
DBG(" <- %s:%d: (%d)\n", __func__, __LINE__, cpu);
}
-
- return 2;
}
void ps3_smp_cleanup_cpu(int cpu)
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index a758a9c3bbba..54c87d5d349d 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -16,7 +16,6 @@ config PPC_PSERIES
select PPC_UDBG_16550
select PPC_NATIVE
select PPC_PCI_CHOICE if EXPERT
- select ZLIB_DEFLATE
select PPC_DOORBELL
select HAVE_CONTEXT_TRACKING
select HOTPLUG_CPU if SMP
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index c22bb1b4beb8..b4b11096ea8b 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -10,6 +10,8 @@
* 2 as published by the Free Software Foundation.
*/
+#define pr_fmt(fmt) "dlpar: " fmt
+
#include <linux/kernel.h>
#include <linux/notifier.h>
#include <linux/spinlock.h>
@@ -535,13 +537,125 @@ static ssize_t dlpar_cpu_release(const char *buf, size_t count)
return count;
}
+#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
+
+static int handle_dlpar_errorlog(struct pseries_hp_errorlog *hp_elog)
+{
+ int rc;
+
+ /* pseries error logs are in BE format, convert to cpu type */
+ switch (hp_elog->id_type) {
+ case PSERIES_HP_ELOG_ID_DRC_COUNT:
+ hp_elog->_drc_u.drc_count =
+ be32_to_cpu(hp_elog->_drc_u.drc_count);
+ break;
+ case PSERIES_HP_ELOG_ID_DRC_INDEX:
+ hp_elog->_drc_u.drc_index =
+ be32_to_cpu(hp_elog->_drc_u.drc_index);
+ }
+
+ switch (hp_elog->resource) {
+ case PSERIES_HP_ELOG_RESOURCE_MEM:
+ rc = dlpar_memory(hp_elog);
+ break;
+ default:
+ pr_warn_ratelimited("Invalid resource (%d) specified\n",
+ hp_elog->resource);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static ssize_t dlpar_store(struct class *class, struct class_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct pseries_hp_errorlog *hp_elog;
+ const char *arg;
+ int rc;
+
+ hp_elog = kzalloc(sizeof(*hp_elog), GFP_KERNEL);
+ if (!hp_elog) {
+ rc = -ENOMEM;
+ goto dlpar_store_out;
+ }
+
+ /* Parse out the request from the user, this will be in the form
+ * <resource> <action> <id_type> <id>
+ */
+ arg = buf;
+ if (!strncmp(arg, "memory", 6)) {
+ hp_elog->resource = PSERIES_HP_ELOG_RESOURCE_MEM;
+ arg += strlen("memory ");
+ } else {
+ pr_err("Invalid resource specified: \"%s\"\n", buf);
+ rc = -EINVAL;
+ goto dlpar_store_out;
+ }
+
+ if (!strncmp(arg, "add", 3)) {
+ hp_elog->action = PSERIES_HP_ELOG_ACTION_ADD;
+ arg += strlen("add ");
+ } else if (!strncmp(arg, "remove", 6)) {
+ hp_elog->action = PSERIES_HP_ELOG_ACTION_REMOVE;
+ arg += strlen("remove ");
+ } else {
+ pr_err("Invalid action specified: \"%s\"\n", buf);
+ rc = -EINVAL;
+ goto dlpar_store_out;
+ }
+
+ if (!strncmp(arg, "index", 5)) {
+ u32 index;
+
+ hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_INDEX;
+ arg += strlen("index ");
+ if (kstrtou32(arg, 0, &index)) {
+ rc = -EINVAL;
+ pr_err("Invalid drc_index specified: \"%s\"\n", buf);
+ goto dlpar_store_out;
+ }
+
+ hp_elog->_drc_u.drc_index = cpu_to_be32(index);
+ } else if (!strncmp(arg, "count", 5)) {
+ u32 count;
+
+ hp_elog->id_type = PSERIES_HP_ELOG_ID_DRC_COUNT;
+ arg += strlen("count ");
+ if (kstrtou32(arg, 0, &count)) {
+ rc = -EINVAL;
+ pr_err("Invalid count specified: \"%s\"\n", buf);
+ goto dlpar_store_out;
+ }
+
+ hp_elog->_drc_u.drc_count = cpu_to_be32(count);
+ } else {
+ pr_err("Invalid id_type specified: \"%s\"\n", buf);
+ rc = -EINVAL;
+ goto dlpar_store_out;
+ }
+
+ rc = handle_dlpar_errorlog(hp_elog);
+
+dlpar_store_out:
+ kfree(hp_elog);
+ return rc ? rc : count;
+}
+
+static CLASS_ATTR(dlpar, S_IWUSR, NULL, dlpar_store);
+
static int __init pseries_dlpar_init(void)
{
+ int rc;
+
+#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
ppc_md.cpu_probe = dlpar_cpu_probe;
ppc_md.cpu_release = dlpar_cpu_release;
+#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
- return 0;
+ rc = sysfs_create_file(kernel_kobj, &class_attr_dlpar.attr);
+
+ return rc;
}
machine_device_initcall(pseries, pseries_dlpar_init);
-#endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
index a6c7e19f5eb3..2039397cc75d 100644
--- a/arch/powerpc/platforms/pseries/eeh_pseries.c
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -118,9 +118,8 @@ static int pseries_eeh_init(void)
return 0;
}
-static int pseries_eeh_cap_start(struct device_node *dn)
+static int pseries_eeh_cap_start(struct pci_dn *pdn)
{
- struct pci_dn *pdn = PCI_DN(dn);
u32 status;
if (!pdn)
@@ -134,10 +133,9 @@ static int pseries_eeh_cap_start(struct device_node *dn)
}
-static int pseries_eeh_find_cap(struct device_node *dn, int cap)
+static int pseries_eeh_find_cap(struct pci_dn *pdn, int cap)
{
- struct pci_dn *pdn = PCI_DN(dn);
- int pos = pseries_eeh_cap_start(dn);
+ int pos = pseries_eeh_cap_start(pdn);
int cnt = 48; /* Maximal number of capabilities */
u32 id;
@@ -160,10 +158,9 @@ static int pseries_eeh_find_cap(struct device_node *dn, int cap)
return 0;
}
-static int pseries_eeh_find_ecap(struct device_node *dn, int cap)
+static int pseries_eeh_find_ecap(struct pci_dn *pdn, int cap)
{
- struct pci_dn *pdn = PCI_DN(dn);
- struct eeh_dev *edev = of_node_to_eeh_dev(dn);
+ struct eeh_dev *edev = pdn_to_eeh_dev(pdn);
u32 header;
int pos = 256;
int ttl = (4096 - 256) / 8;
@@ -191,53 +188,44 @@ static int pseries_eeh_find_ecap(struct device_node *dn, int cap)
}
/**
- * pseries_eeh_of_probe - EEH probe on the given device
- * @dn: OF node
- * @flag: Unused
+ * pseries_eeh_probe - EEH probe on the given device
+ * @pdn: PCI device node
+ * @data: Unused
*
* When EEH module is installed during system boot, all PCI devices
* are checked one by one to see if it supports EEH. The function
* is introduced for the purpose.
*/
-static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
+static void *pseries_eeh_probe(struct pci_dn *pdn, void *data)
{
struct eeh_dev *edev;
struct eeh_pe pe;
- struct pci_dn *pdn = PCI_DN(dn);
- const __be32 *classp, *vendorp, *devicep;
- u32 class_code;
- const __be32 *regs;
u32 pcie_flags;
int enable = 0;
int ret;
/* Retrieve OF node and eeh device */
- edev = of_node_to_eeh_dev(dn);
- if (edev->pe || !of_device_is_available(dn))
+ edev = pdn_to_eeh_dev(pdn);
+ if (!edev || edev->pe)
return NULL;
- /* Retrieve class/vendor/device IDs */
- classp = of_get_property(dn, "class-code", NULL);
- vendorp = of_get_property(dn, "vendor-id", NULL);
- devicep = of_get_property(dn, "device-id", NULL);
-
- /* Skip for bad OF node or PCI-ISA bridge */
- if (!classp || !vendorp || !devicep)
- return NULL;
- if (dn->type && !strcmp(dn->type, "isa"))
+ /* Check class/vendor/device IDs */
+ if (!pdn->vendor_id || !pdn->device_id || !pdn->class_code)
return NULL;
- class_code = of_read_number(classp, 1);
+ /* Skip for PCI-ISA bridge */
+ if ((pdn->class_code >> 8) == PCI_CLASS_BRIDGE_ISA)
+ return NULL;
/*
* Update class code and mode of eeh device. We need
* correctly reflects that current device is root port
* or PCIe switch downstream port.
*/
- edev->class_code = class_code;
- edev->pcix_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_PCIX);
- edev->pcie_cap = pseries_eeh_find_cap(dn, PCI_CAP_ID_EXP);
- edev->aer_cap = pseries_eeh_find_ecap(dn, PCI_EXT_CAP_ID_ERR);
+ edev->class_code = pdn->class_code;
+ edev->pcix_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_PCIX);
+ edev->pcie_cap = pseries_eeh_find_cap(pdn, PCI_CAP_ID_EXP);
+ edev->aer_cap = pseries_eeh_find_ecap(pdn, PCI_EXT_CAP_ID_ERR);
edev->mode &= 0xFFFFFF00;
if ((edev->class_code >> 8) == PCI_CLASS_BRIDGE_PCI) {
edev->mode |= EEH_DEV_BRIDGE;
@@ -252,24 +240,16 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
}
}
- /* Retrieve the device address */
- regs = of_get_property(dn, "reg", NULL);
- if (!regs) {
- pr_warn("%s: OF node property %s::reg not found\n",
- __func__, dn->full_name);
- return NULL;
- }
-
/* Initialize the fake PE */
memset(&pe, 0, sizeof(struct eeh_pe));
pe.phb = edev->phb;
- pe.config_addr = of_read_number(regs, 1);
+ pe.config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
/* Enable EEH on the device */
ret = eeh_ops->set_option(&pe, EEH_OPT_ENABLE);
if (!ret) {
- edev->config_addr = of_read_number(regs, 1);
/* Retrieve PE address */
+ edev->config_addr = (pdn->busno << 16) | (pdn->devfn << 8);
edev->pe_config_addr = eeh_ops->get_pe_addr(&pe);
pe.addr = edev->pe_config_addr;
@@ -285,16 +265,17 @@ static void *pseries_eeh_of_probe(struct device_node *dn, void *flag)
eeh_add_flag(EEH_ENABLED);
eeh_add_to_parent_pe(edev);
- pr_debug("%s: EEH enabled on %s PHB#%d-PE#%x, config addr#%x\n",
- __func__, dn->full_name, pe.phb->global_number,
- pe.addr, pe.config_addr);
- } else if (dn->parent && of_node_to_eeh_dev(dn->parent) &&
- (of_node_to_eeh_dev(dn->parent))->pe) {
+ pr_debug("%s: EEH enabled on %02x:%02x.%01x PHB#%d-PE#%x\n",
+ __func__, pdn->busno, PCI_SLOT(pdn->devfn),
+ PCI_FUNC(pdn->devfn), pe.phb->global_number,
+ pe.addr);
+ } else if (pdn->parent && pdn_to_eeh_dev(pdn->parent) &&
+ (pdn_to_eeh_dev(pdn->parent))->pe) {
/* This device doesn't support EEH, but it may have an
* EEH parent, in which case we mark it as supported.
*/
- edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr;
- edev->pe_config_addr = of_node_to_eeh_dev(dn->parent)->pe_config_addr;
+ edev->config_addr = pdn_to_eeh_dev(pdn->parent)->config_addr;
+ edev->pe_config_addr = pdn_to_eeh_dev(pdn->parent)->pe_config_addr;
eeh_add_to_parent_pe(edev);
}
}
@@ -670,45 +651,36 @@ static int pseries_eeh_configure_bridge(struct eeh_pe *pe)
/**
* pseries_eeh_read_config - Read PCI config space
- * @dn: device node
+ * @pdn: PCI device node
* @where: PCI address
* @size: size to read
* @val: return value
*
* Read config space from the speicifed device
*/
-static int pseries_eeh_read_config(struct device_node *dn, int where, int size, u32 *val)
+static int pseries_eeh_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
{
- struct pci_dn *pdn;
-
- pdn = PCI_DN(dn);
-
return rtas_read_config(pdn, where, size, val);
}
/**
* pseries_eeh_write_config - Write PCI config space
- * @dn: device node
+ * @pdn: PCI device node
* @where: PCI address
* @size: size to write
* @val: value to be written
*
* Write config space to the specified device
*/
-static int pseries_eeh_write_config(struct device_node *dn, int where, int size, u32 val)
+static int pseries_eeh_write_config(struct pci_dn *pdn, int where, int size, u32 val)
{
- struct pci_dn *pdn;
-
- pdn = PCI_DN(dn);
-
return rtas_write_config(pdn, where, size, val);
}
static struct eeh_ops pseries_eeh_ops = {
.name = "pseries",
.init = pseries_eeh_init,
- .of_probe = pseries_eeh_of_probe,
- .dev_probe = NULL,
+ .probe = pseries_eeh_probe,
.set_option = pseries_eeh_set_option,
.get_pe_addr = pseries_eeh_get_pe_addr,
.get_state = pseries_eeh_get_state,
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index fa41f0da5b6f..0ced387e1463 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -9,11 +9,14 @@
* 2 of the License, or (at your option) any later version.
*/
+#define pr_fmt(fmt) "pseries-hotplug-mem: " fmt
+
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/memblock.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
+#include <linux/slab.h>
#include <asm/firmware.h>
#include <asm/machdep.h>
@@ -21,6 +24,8 @@
#include <asm/sparsemem.h>
#include "pseries.h"
+static bool rtas_hp_event;
+
unsigned long pseries_memory_block_size(void)
{
struct device_node *np;
@@ -64,6 +69,67 @@ unsigned long pseries_memory_block_size(void)
return memblock_size;
}
+static void dlpar_free_drconf_property(struct property *prop)
+{
+ kfree(prop->name);
+ kfree(prop->value);
+ kfree(prop);
+}
+
+static struct property *dlpar_clone_drconf_property(struct device_node *dn)
+{
+ struct property *prop, *new_prop;
+ struct of_drconf_cell *lmbs;
+ u32 num_lmbs, *p;
+ int i;
+
+ prop = of_find_property(dn, "ibm,dynamic-memory", NULL);
+ if (!prop)
+ return NULL;
+
+ new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL);
+ if (!new_prop)
+ return NULL;
+
+ new_prop->name = kstrdup(prop->name, GFP_KERNEL);
+ new_prop->value = kmalloc(prop->length, GFP_KERNEL);
+ if (!new_prop->name || !new_prop->value) {
+ dlpar_free_drconf_property(new_prop);
+ return NULL;
+ }
+
+ memcpy(new_prop->value, prop->value, prop->length);
+ new_prop->length = prop->length;
+
+ /* Convert the property to cpu endian-ness */
+ p = new_prop->value;
+ *p = be32_to_cpu(*p);
+
+ num_lmbs = *p++;
+ lmbs = (struct of_drconf_cell *)p;
+
+ for (i = 0; i < num_lmbs; i++) {
+ lmbs[i].base_addr = be64_to_cpu(lmbs[i].base_addr);
+ lmbs[i].drc_index = be32_to_cpu(lmbs[i].drc_index);
+ lmbs[i].flags = be32_to_cpu(lmbs[i].flags);
+ }
+
+ return new_prop;
+}
+
+static struct memory_block *lmb_to_memblock(struct of_drconf_cell *lmb)
+{
+ unsigned long section_nr;
+ struct mem_section *mem_sect;
+ struct memory_block *mem_block;
+
+ section_nr = pfn_to_section_nr(PFN_DOWN(lmb->base_addr));
+ mem_sect = __nr_to_section(section_nr);
+
+ mem_block = find_memory_block(mem_sect);
+ return mem_block;
+}
+
#ifdef CONFIG_MEMORY_HOTREMOVE
static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
{
@@ -122,6 +188,173 @@ static int pseries_remove_mem_node(struct device_node *np)
pseries_remove_memblock(base, lmb_size);
return 0;
}
+
+static bool lmb_is_removable(struct of_drconf_cell *lmb)
+{
+ int i, scns_per_block;
+ int rc = 1;
+ unsigned long pfn, block_sz;
+ u64 phys_addr;
+
+ if (!(lmb->flags & DRCONF_MEM_ASSIGNED))
+ return false;
+
+ block_sz = memory_block_size_bytes();
+ scns_per_block = block_sz / MIN_MEMORY_BLOCK_SIZE;
+ phys_addr = lmb->base_addr;
+
+ for (i = 0; i < scns_per_block; i++) {
+ pfn = PFN_DOWN(phys_addr);
+ if (!pfn_present(pfn))
+ continue;
+
+ rc &= is_mem_section_removable(pfn, PAGES_PER_SECTION);
+ phys_addr += MIN_MEMORY_BLOCK_SIZE;
+ }
+
+ return rc ? true : false;
+}
+
+static int dlpar_add_lmb(struct of_drconf_cell *);
+
+static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
+{
+ struct memory_block *mem_block;
+ unsigned long block_sz;
+ int nid, rc;
+
+ if (!lmb_is_removable(lmb))
+ return -EINVAL;
+
+ mem_block = lmb_to_memblock(lmb);
+ if (!mem_block)
+ return -EINVAL;
+
+ rc = device_offline(&mem_block->dev);
+ put_device(&mem_block->dev);
+ if (rc)
+ return rc;
+
+ block_sz = pseries_memory_block_size();
+ nid = memory_add_physaddr_to_nid(lmb->base_addr);
+
+ remove_memory(nid, lmb->base_addr, block_sz);
+
+ /* Update memory regions for memory remove */
+ memblock_remove(lmb->base_addr, block_sz);
+
+ dlpar_release_drc(lmb->drc_index);
+
+ lmb->flags &= ~DRCONF_MEM_ASSIGNED;
+ return 0;
+}
+
+static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
+ struct property *prop)
+{
+ struct of_drconf_cell *lmbs;
+ int lmbs_removed = 0;
+ int lmbs_available = 0;
+ u32 num_lmbs, *p;
+ int i, rc;
+
+ pr_info("Attempting to hot-remove %d LMB(s)\n", lmbs_to_remove);
+
+ if (lmbs_to_remove == 0)
+ return -EINVAL;
+
+ p = prop->value;
+ num_lmbs = *p++;
+ lmbs = (struct of_drconf_cell *)p;
+
+ /* Validate that there are enough LMBs to satisfy the request */
+ for (i = 0; i < num_lmbs; i++) {
+ if (lmbs[i].flags & DRCONF_MEM_ASSIGNED)
+ lmbs_available++;
+ }
+
+ if (lmbs_available < lmbs_to_remove)
+ return -EINVAL;
+
+ for (i = 0; i < num_lmbs && lmbs_removed < lmbs_to_remove; i++) {
+ rc = dlpar_remove_lmb(&lmbs[i]);
+ if (rc)
+ continue;
+
+ lmbs_removed++;
+
+ /* Mark this lmb so we can add it later if all of the
+ * requested LMBs cannot be removed.
+ */
+ lmbs[i].reserved = 1;
+ }
+
+ if (lmbs_removed != lmbs_to_remove) {
+ pr_err("Memory hot-remove failed, adding LMB's back\n");
+
+ for (i = 0; i < num_lmbs; i++) {
+ if (!lmbs[i].reserved)
+ continue;
+
+ rc = dlpar_add_lmb(&lmbs[i]);
+ if (rc)
+ pr_err("Failed to add LMB back, drc index %x\n",
+ lmbs[i].drc_index);
+
+ lmbs[i].reserved = 0;
+ }
+
+ rc = -EINVAL;
+ } else {
+ for (i = 0; i < num_lmbs; i++) {
+ if (!lmbs[i].reserved)
+ continue;
+
+ pr_info("Memory at %llx was hot-removed\n",
+ lmbs[i].base_addr);
+
+ lmbs[i].reserved = 0;
+ }
+ rc = 0;
+ }
+
+ return rc;
+}
+
+static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
+{
+ struct of_drconf_cell *lmbs;
+ u32 num_lmbs, *p;
+ int lmb_found;
+ int i, rc;
+
+ pr_info("Attempting to hot-remove LMB, drc index %x\n", drc_index);
+
+ p = prop->value;
+ num_lmbs = *p++;
+ lmbs = (struct of_drconf_cell *)p;
+
+ lmb_found = 0;
+ for (i = 0; i < num_lmbs; i++) {
+ if (lmbs[i].drc_index == drc_index) {
+ lmb_found = 1;
+ rc = dlpar_remove_lmb(&lmbs[i]);
+ break;
+ }
+ }
+
+ if (!lmb_found)
+ rc = -EINVAL;
+
+ if (rc)
+ pr_info("Failed to hot-remove memory at %llx\n",
+ lmbs[i].base_addr);
+ else
+ pr_info("Memory at %llx was hot-removed\n", lmbs[i].base_addr);
+
+ return rc;
+}
+
#else
static inline int pseries_remove_memblock(unsigned long base,
unsigned int memblock_size)
@@ -132,8 +365,261 @@ static inline int pseries_remove_mem_node(struct device_node *np)
{
return 0;
}
+static inline int dlpar_memory_remove(struct pseries_hp_errorlog *hp_elog)
+{
+ return -EOPNOTSUPP;
+}
+static int dlpar_remove_lmb(struct of_drconf_cell *lmb)
+{
+ return -EOPNOTSUPP;
+}
+static int dlpar_memory_remove_by_count(u32 lmbs_to_remove,
+ struct property *prop)
+{
+ return -EOPNOTSUPP;
+}
+static int dlpar_memory_remove_by_index(u32 drc_index, struct property *prop)
+{
+ return -EOPNOTSUPP;
+}
+
#endif /* CONFIG_MEMORY_HOTREMOVE */
+static int dlpar_add_lmb(struct of_drconf_cell *lmb)
+{
+ struct memory_block *mem_block;
+ unsigned long block_sz;
+ int nid, rc;
+
+ if (lmb->flags & DRCONF_MEM_ASSIGNED)
+ return -EINVAL;
+
+ block_sz = memory_block_size_bytes();
+
+ rc = dlpar_acquire_drc(lmb->drc_index);
+ if (rc)
+ return rc;
+
+ /* Find the node id for this address */
+ nid = memory_add_physaddr_to_nid(lmb->base_addr);
+
+ /* Add the memory */
+ rc = add_memory(nid, lmb->base_addr, block_sz);
+ if (rc) {
+ dlpar_release_drc(lmb->drc_index);
+ return rc;
+ }
+
+ /* Register this block of memory */
+ rc = memblock_add(lmb->base_addr, block_sz);
+ if (rc) {
+ remove_memory(nid, lmb->base_addr, block_sz);
+ dlpar_release_drc(lmb->drc_index);
+ return rc;
+ }
+
+ mem_block = lmb_to_memblock(lmb);
+ if (!mem_block) {
+ remove_memory(nid, lmb->base_addr, block_sz);
+ dlpar_release_drc(lmb->drc_index);
+ return -EINVAL;
+ }
+
+ rc = device_online(&mem_block->dev);
+ put_device(&mem_block->dev);
+ if (rc) {
+ remove_memory(nid, lmb->base_addr, block_sz);
+ dlpar_release_drc(lmb->drc_index);
+ return rc;
+ }
+
+ lmb->flags |= DRCONF_MEM_ASSIGNED;
+ return 0;
+}
+
+static int dlpar_memory_add_by_count(u32 lmbs_to_add, struct property *prop)
+{
+ struct of_drconf_cell *lmbs;
+ u32 num_lmbs, *p;
+ int lmbs_available = 0;
+ int lmbs_added = 0;
+ int i, rc;
+
+ pr_info("Attempting to hot-add %d LMB(s)\n", lmbs_to_add);
+
+ if (lmbs_to_add == 0)
+ return -EINVAL;
+
+ p = prop->value;
+ num_lmbs = *p++;
+ lmbs = (struct of_drconf_cell *)p;
+
+ /* Validate that there are enough LMBs to satisfy the request */
+ for (i = 0; i < num_lmbs; i++) {
+ if (!(lmbs[i].flags & DRCONF_MEM_ASSIGNED))
+ lmbs_available++;
+ }
+
+ if (lmbs_available < lmbs_to_add)
+ return -EINVAL;
+
+ for (i = 0; i < num_lmbs && lmbs_to_add != lmbs_added; i++) {
+ rc = dlpar_add_lmb(&lmbs[i]);
+ if (rc)
+ continue;
+
+ lmbs_added++;
+
+ /* Mark this lmb so we can remove it later if all of the
+ * requested LMBs cannot be added.
+ */
+ lmbs[i].reserved = 1;
+ }
+
+ if (lmbs_added != lmbs_to_add) {
+ pr_err("Memory hot-add failed, removing any added LMBs\n");
+
+ for (i = 0; i < num_lmbs; i++) {
+ if (!lmbs[i].reserved)
+ continue;
+
+ rc = dlpar_remove_lmb(&lmbs[i]);
+ if (rc)
+ pr_err("Failed to remove LMB, drc index %x\n",
+ be32_to_cpu(lmbs[i].drc_index));
+ }
+ rc = -EINVAL;
+ } else {
+ for (i = 0; i < num_lmbs; i++) {
+ if (!lmbs[i].reserved)
+ continue;
+
+ pr_info("Memory at %llx (drc index %x) was hot-added\n",
+ lmbs[i].base_addr, lmbs[i].drc_index);
+ lmbs[i].reserved = 0;
+ }
+ }
+
+ return rc;
+}
+
+static int dlpar_memory_add_by_index(u32 drc_index, struct property *prop)
+{
+ struct of_drconf_cell *lmbs;
+ u32 num_lmbs, *p;
+ int i, lmb_found;
+ int rc;
+
+ pr_info("Attempting to hot-add LMB, drc index %x\n", drc_index);
+
+ p = prop->value;
+ num_lmbs = *p++;
+ lmbs = (struct of_drconf_cell *)p;
+
+ lmb_found = 0;
+ for (i = 0; i < num_lmbs; i++) {
+ if (lmbs[i].drc_index == drc_index) {
+ lmb_found = 1;
+ rc = dlpar_add_lmb(&lmbs[i]);
+ break;
+ }
+ }
+
+ if (!lmb_found)
+ rc = -EINVAL;
+
+ if (rc)
+ pr_info("Failed to hot-add memory, drc index %x\n", drc_index);
+ else
+ pr_info("Memory at %llx (drc index %x) was hot-added\n",
+ lmbs[i].base_addr, drc_index);
+
+ return rc;
+}
+
+static void dlpar_update_drconf_property(struct device_node *dn,
+ struct property *prop)
+{
+ struct of_drconf_cell *lmbs;
+ u32 num_lmbs, *p;
+ int i;
+
+ /* Convert the property back to BE */
+ p = prop->value;
+ num_lmbs = *p;
+ *p = cpu_to_be32(*p);
+ p++;
+
+ lmbs = (struct of_drconf_cell *)p;
+ for (i = 0; i < num_lmbs; i++) {
+ lmbs[i].base_addr = cpu_to_be64(lmbs[i].base_addr);
+ lmbs[i].drc_index = cpu_to_be32(lmbs[i].drc_index);
+ lmbs[i].flags = cpu_to_be32(lmbs[i].flags);
+ }
+
+ rtas_hp_event = true;
+ of_update_property(dn, prop);
+ rtas_hp_event = false;
+}
+
+int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
+{
+ struct device_node *dn;
+ struct property *prop;
+ u32 count, drc_index;
+ int rc;
+
+ count = hp_elog->_drc_u.drc_count;
+ drc_index = hp_elog->_drc_u.drc_index;
+
+ lock_device_hotplug();
+
+ dn = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
+ if (!dn) {
+ rc = -EINVAL;
+ goto dlpar_memory_out;
+ }
+
+ prop = dlpar_clone_drconf_property(dn);
+ if (!prop) {
+ rc = -EINVAL;
+ goto dlpar_memory_out;
+ }
+
+ switch (hp_elog->action) {
+ case PSERIES_HP_ELOG_ACTION_ADD:
+ if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
+ rc = dlpar_memory_add_by_count(count, prop);
+ else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
+ rc = dlpar_memory_add_by_index(drc_index, prop);
+ else
+ rc = -EINVAL;
+ break;
+ case PSERIES_HP_ELOG_ACTION_REMOVE:
+ if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_COUNT)
+ rc = dlpar_memory_remove_by_count(count, prop);
+ else if (hp_elog->id_type == PSERIES_HP_ELOG_ID_DRC_INDEX)
+ rc = dlpar_memory_remove_by_index(drc_index, prop);
+ else
+ rc = -EINVAL;
+ break;
+ default:
+ pr_err("Invalid action (%d) specified\n", hp_elog->action);
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ dlpar_free_drconf_property(prop);
+ else
+ dlpar_update_drconf_property(dn, prop);
+
+dlpar_memory_out:
+ of_node_put(dn);
+ unlock_device_hotplug();
+ return rc;
+}
+
static int pseries_add_mem_node(struct device_node *np)
{
const char *type;
@@ -174,6 +660,9 @@ static int pseries_update_drconf_memory(struct of_reconfig_data *pr)
__be32 *p;
int i, rc = -EINVAL;
+ if (rtas_hp_event)
+ return 0;
+
memblock_size = pseries_memory_block_size();
if (!memblock_size)
return -EINVAL;
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 7803a19adb31..61d5a17f45c0 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -49,6 +49,7 @@
#include <asm/mmzone.h>
#include <asm/plpar_wrappers.h>
+#include "pseries.h"
static void tce_invalidate_pSeries_sw(struct iommu_table *tbl,
__be64 *startp, __be64 *endp)
@@ -1307,16 +1308,16 @@ void iommu_init_early_pSeries(void)
ppc_md.tce_free = tce_free_pSeriesLP;
}
ppc_md.tce_get = tce_get_pSeriesLP;
- ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
+ pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeriesLP;
+ pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeriesLP;
ppc_md.dma_set_mask = dma_set_mask_pSeriesLP;
ppc_md.dma_get_required_mask = dma_get_required_mask_pSeriesLP;
} else {
ppc_md.tce_build = tce_build_pSeries;
ppc_md.tce_free = tce_free_pSeries;
ppc_md.tce_get = tce_get_pseries;
- ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeries;
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeries;
+ pseries_pci_controller_ops.dma_bus_setup = pci_dma_bus_setup_pSeries;
+ pseries_pci_controller_ops.dma_dev_setup = pci_dma_dev_setup_pSeries;
}
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c
index 8f35d525cede..ceb18d349459 100644
--- a/arch/powerpc/platforms/pseries/mobility.c
+++ b/arch/powerpc/platforms/pseries/mobility.c
@@ -320,28 +320,34 @@ static ssize_t migrate_store(struct class *class, struct class_attribute *attr,
{
u64 streamid;
int rc;
- int vasi_rc = 0;
rc = kstrtou64(buf, 0, &streamid);
if (rc)
return rc;
do {
- rc = rtas_ibm_suspend_me(streamid, &vasi_rc);
- if (!rc && vasi_rc == RTAS_NOT_SUSPENDABLE)
+ rc = rtas_ibm_suspend_me(streamid);
+ if (rc == -EAGAIN)
ssleep(1);
- } while (!rc && vasi_rc == RTAS_NOT_SUSPENDABLE);
+ } while (rc == -EAGAIN);
if (rc)
return rc;
- if (vasi_rc)
- return vasi_rc;
post_mobility_fixup();
return count;
}
+/*
+ * Used by drmgr to determine the kernel behavior of the migration interface.
+ *
+ * Version 1: Performs all PAPR requirements for migration including
+ * firmware activation and device tree update.
+ */
+#define MIGRATION_API_VERSION 1
+
static CLASS_ATTR(migration, S_IWUSR, NULL, migrate_store);
+static CLASS_ATTR_STRING(api_version, S_IRUGO, __stringify(MIGRATION_API_VERSION));
static int __init mobility_sysfs_init(void)
{
@@ -352,7 +358,13 @@ static int __init mobility_sysfs_init(void)
return -ENOMEM;
rc = sysfs_create_file(mobility_kobj, &class_attr_migration.attr);
+ if (rc)
+ pr_err("mobility: unable to create migration sysfs file (%d)\n", rc);
- return rc;
+ rc = sysfs_create_file(mobility_kobj, &class_attr_api_version.attr.attr);
+ if (rc)
+ pr_err("mobility: unable to create api_version sysfs file (%d)\n", rc);
+
+ return 0;
}
machine_device_initcall(pseries, mobility_sysfs_init);
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 691a154c286d..c8d24f9a6948 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -195,6 +195,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
{
struct device_node *dn;
+ struct pci_dn *pdn;
struct eeh_dev *edev;
/* Found our PE and assume 8 at that point. */
@@ -204,10 +205,11 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
return NULL;
/* Get the top level device in the PE */
- edev = of_node_to_eeh_dev(dn);
+ edev = pdn_to_eeh_dev(PCI_DN(dn));
if (edev->pe)
edev = list_first_entry(&edev->pe->edevs, struct eeh_dev, list);
- dn = eeh_dev_to_of_node(edev);
+ pdn = eeh_dev_to_pdn(edev);
+ dn = pdn ? pdn->node : NULL;
if (!dn)
return NULL;
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 054a0ed5c7ee..9f8184175c86 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -20,7 +20,6 @@
#include <linux/kmsg_dump.h>
#include <linux/pstore.h>
#include <linux/ctype.h>
-#include <linux/zlib.h>
#include <asm/uaccess.h>
#include <asm/nvram.h>
#include <asm/rtas.h>
@@ -30,129 +29,17 @@
/* Max bytes to read/write in one go */
#define NVRW_CNT 0x20
-/*
- * Set oops header version to distinguish between old and new format header.
- * lnx,oops-log partition max size is 4000, header version > 4000 will
- * help in identifying new header.
- */
-#define OOPS_HDR_VERSION 5000
-
static unsigned int nvram_size;
static int nvram_fetch, nvram_store;
static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */
static DEFINE_SPINLOCK(nvram_lock);
-struct err_log_info {
- __be32 error_type;
- __be32 seq_num;
-};
-
-struct nvram_os_partition {
- const char *name;
- int req_size; /* desired size, in bytes */
- int min_size; /* minimum acceptable size (0 means req_size) */
- long size; /* size of data portion (excluding err_log_info) */
- long index; /* offset of data portion of partition */
- bool os_partition; /* partition initialized by OS, not FW */
-};
-
-static struct nvram_os_partition rtas_log_partition = {
- .name = "ibm,rtas-log",
- .req_size = 2079,
- .min_size = 1055,
- .index = -1,
- .os_partition = true
-};
-
-static struct nvram_os_partition oops_log_partition = {
- .name = "lnx,oops-log",
- .req_size = 4000,
- .min_size = 2000,
- .index = -1,
- .os_partition = true
-};
-
-static const char *pseries_nvram_os_partitions[] = {
- "ibm,rtas-log",
- "lnx,oops-log",
- NULL
-};
-
-struct oops_log_info {
- __be16 version;
- __be16 report_length;
- __be64 timestamp;
-} __attribute__((packed));
-
-static void oops_to_nvram(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason);
-
-static struct kmsg_dumper nvram_kmsg_dumper = {
- .dump = oops_to_nvram
-};
-
/* See clobbering_unread_rtas_event() */
#define NVRAM_RTAS_READ_TIMEOUT 5 /* seconds */
-static unsigned long last_unread_rtas_event; /* timestamp */
-
-/*
- * For capturing and compressing an oops or panic report...
-
- * big_oops_buf[] holds the uncompressed text we're capturing.
- *
- * oops_buf[] holds the compressed text, preceded by a oops header.
- * oops header has u16 holding the version of oops header (to differentiate
- * between old and new format header) followed by u16 holding the length of
- * the compressed* text (*Or uncompressed, if compression fails.) and u64
- * holding the timestamp. oops_buf[] gets written to NVRAM.
- *
- * oops_log_info points to the header. oops_data points to the compressed text.
- *
- * +- oops_buf
- * | +- oops_data
- * v v
- * +-----------+-----------+-----------+------------------------+
- * | version | length | timestamp | text |
- * | (2 bytes) | (2 bytes) | (8 bytes) | (oops_data_sz bytes) |
- * +-----------+-----------+-----------+------------------------+
- * ^
- * +- oops_log_info
- *
- * We preallocate these buffers during init to avoid kmalloc during oops/panic.
- */
-static size_t big_oops_buf_sz;
-static char *big_oops_buf, *oops_buf;
-static char *oops_data;
-static size_t oops_data_sz;
-
-/* Compression parameters */
-#define COMPR_LEVEL 6
-#define WINDOW_BITS 12
-#define MEM_LEVEL 4
-static struct z_stream_s stream;
+static time64_t last_unread_rtas_event; /* timestamp */
#ifdef CONFIG_PSTORE
-static struct nvram_os_partition of_config_partition = {
- .name = "of-config",
- .index = -1,
- .os_partition = false
-};
-
-static struct nvram_os_partition common_partition = {
- .name = "common",
- .index = -1,
- .os_partition = false
-};
-
-static enum pstore_type_id nvram_type_ids[] = {
- PSTORE_TYPE_DMESG,
- PSTORE_TYPE_PPC_RTAS,
- PSTORE_TYPE_PPC_OF,
- PSTORE_TYPE_PPC_COMMON,
- -1
-};
-static int read_type;
-static unsigned long last_rtas_event;
+time64_t last_rtas_event;
#endif
static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index)
@@ -246,132 +133,26 @@ static ssize_t pSeries_nvram_get_size(void)
return nvram_size ? nvram_size : -ENODEV;
}
-
-/* nvram_write_os_partition, nvram_write_error_log
+/* nvram_write_error_log
*
* We need to buffer the error logs into nvram to ensure that we have
- * the failure information to decode. If we have a severe error there
- * is no way to guarantee that the OS or the machine is in a state to
- * get back to user land and write the error to disk. For example if
- * the SCSI device driver causes a Machine Check by writing to a bad
- * IO address, there is no way of guaranteeing that the device driver
- * is in any state that is would also be able to write the error data
- * captured to disk, thus we buffer it in NVRAM for analysis on the
- * next boot.
- *
- * In NVRAM the partition containing the error log buffer will looks like:
- * Header (in bytes):
- * +-----------+----------+--------+------------+------------------+
- * | signature | checksum | length | name | data |
- * |0 |1 |2 3|4 15|16 length-1|
- * +-----------+----------+--------+------------+------------------+
- *
- * The 'data' section would look like (in bytes):
- * +--------------+------------+-----------------------------------+
- * | event_logged | sequence # | error log |
- * |0 3|4 7|8 error_log_size-1|
- * +--------------+------------+-----------------------------------+
- *
- * event_logged: 0 if event has not been logged to syslog, 1 if it has
- * sequence #: The unique sequence # for each event. (until it wraps)
- * error log: The error log from event_scan
+ * the failure information to decode.
*/
-static int nvram_write_os_partition(struct nvram_os_partition *part,
- char *buff, int length,
- unsigned int err_type,
- unsigned int error_log_cnt)
-{
- int rc;
- loff_t tmp_index;
- struct err_log_info info;
-
- if (part->index == -1) {
- return -ESPIPE;
- }
-
- if (length > part->size) {
- length = part->size;
- }
-
- info.error_type = cpu_to_be32(err_type);
- info.seq_num = cpu_to_be32(error_log_cnt);
-
- tmp_index = part->index;
-
- rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index);
- if (rc <= 0) {
- pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
- return rc;
- }
-
- rc = ppc_md.nvram_write(buff, length, &tmp_index);
- if (rc <= 0) {
- pr_err("%s: Failed nvram_write (%d)\n", __func__, rc);
- return rc;
- }
-
- return 0;
-}
-
int nvram_write_error_log(char * buff, int length,
unsigned int err_type, unsigned int error_log_cnt)
{
int rc = nvram_write_os_partition(&rtas_log_partition, buff, length,
err_type, error_log_cnt);
if (!rc) {
- last_unread_rtas_event = get_seconds();
+ last_unread_rtas_event = ktime_get_real_seconds();
#ifdef CONFIG_PSTORE
- last_rtas_event = get_seconds();
+ last_rtas_event = ktime_get_real_seconds();
#endif
}
return rc;
}
-/* nvram_read_partition
- *
- * Reads nvram partition for at most 'length'
- */
-static int nvram_read_partition(struct nvram_os_partition *part, char *buff,
- int length, unsigned int *err_type,
- unsigned int *error_log_cnt)
-{
- int rc;
- loff_t tmp_index;
- struct err_log_info info;
-
- if (part->index == -1)
- return -1;
-
- if (length > part->size)
- length = part->size;
-
- tmp_index = part->index;
-
- if (part->os_partition) {
- rc = ppc_md.nvram_read((char *)&info,
- sizeof(struct err_log_info),
- &tmp_index);
- if (rc <= 0) {
- pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
- return rc;
- }
- }
-
- rc = ppc_md.nvram_read(buff, length, &tmp_index);
- if (rc <= 0) {
- pr_err("%s: Failed nvram_read (%d)\n", __func__, rc);
- return rc;
- }
-
- if (part->os_partition) {
- *error_log_cnt = be32_to_cpu(info.seq_num);
- *err_type = be32_to_cpu(info.error_type);
- }
-
- return 0;
-}
-
/* nvram_read_error_log
*
* Reads nvram for error log for at most 'length'
@@ -407,67 +188,6 @@ int nvram_clear_error_log(void)
return 0;
}
-/* pseries_nvram_init_os_partition
- *
- * This sets up a partition with an "OS" signature.
- *
- * The general strategy is the following:
- * 1.) If a partition with the indicated name already exists...
- * - If it's large enough, use it.
- * - Otherwise, recycle it and keep going.
- * 2.) Search for a free partition that is large enough.
- * 3.) If there's not a free partition large enough, recycle any obsolete
- * OS partitions and try again.
- * 4.) Will first try getting a chunk that will satisfy the requested size.
- * 5.) If a chunk of the requested size cannot be allocated, then try finding
- * a chunk that will satisfy the minum needed.
- *
- * Returns 0 on success, else -1.
- */
-static int __init pseries_nvram_init_os_partition(struct nvram_os_partition
- *part)
-{
- loff_t p;
- int size;
-
- /* Look for ours */
- p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size);
-
- /* Found one but too small, remove it */
- if (p && size < part->min_size) {
- pr_info("nvram: Found too small %s partition,"
- " removing it...\n", part->name);
- nvram_remove_partition(part->name, NVRAM_SIG_OS, NULL);
- p = 0;
- }
-
- /* Create one if we didn't find */
- if (!p) {
- p = nvram_create_partition(part->name, NVRAM_SIG_OS,
- part->req_size, part->min_size);
- if (p == -ENOSPC) {
- pr_info("nvram: No room to create %s partition, "
- "deleting any obsolete OS partitions...\n",
- part->name);
- nvram_remove_partition(NULL, NVRAM_SIG_OS,
- pseries_nvram_os_partitions);
- p = nvram_create_partition(part->name, NVRAM_SIG_OS,
- part->req_size, part->min_size);
- }
- }
-
- if (p <= 0) {
- pr_err("nvram: Failed to find or create %s"
- " partition, err %d\n", part->name, (int)p);
- return -1;
- }
-
- part->index = p;
- part->size = nvram_get_partition_size(p) - sizeof(struct err_log_info);
-
- return 0;
-}
-
/*
* Are we using the ibm,rtas-log for oops/panic reports? And if so,
* would logging this oops/panic overwrite an RTAS event that rtas_errd
@@ -476,321 +196,14 @@ static int __init pseries_nvram_init_os_partition(struct nvram_os_partition
* We assume that if rtas_errd hasn't read the RTAS event in
* NVRAM_RTAS_READ_TIMEOUT seconds, it's probably not going to.
*/
-static int clobbering_unread_rtas_event(void)
+int clobbering_unread_rtas_event(void)
{
return (oops_log_partition.index == rtas_log_partition.index
&& last_unread_rtas_event
- && get_seconds() - last_unread_rtas_event <=
+ && ktime_get_real_seconds() - last_unread_rtas_event <=
NVRAM_RTAS_READ_TIMEOUT);
}
-/* Derived from logfs_compress() */
-static int nvram_compress(const void *in, void *out, size_t inlen,
- size_t outlen)
-{
- int err, ret;
-
- ret = -EIO;
- err = zlib_deflateInit2(&stream, COMPR_LEVEL, Z_DEFLATED, WINDOW_BITS,
- MEM_LEVEL, Z_DEFAULT_STRATEGY);
- if (err != Z_OK)
- goto error;
-
- stream.next_in = in;
- stream.avail_in = inlen;
- stream.total_in = 0;
- stream.next_out = out;
- stream.avail_out = outlen;
- stream.total_out = 0;
-
- err = zlib_deflate(&stream, Z_FINISH);
- if (err != Z_STREAM_END)
- goto error;
-
- err = zlib_deflateEnd(&stream);
- if (err != Z_OK)
- goto error;
-
- if (stream.total_out >= stream.total_in)
- goto error;
-
- ret = stream.total_out;
-error:
- return ret;
-}
-
-/* Compress the text from big_oops_buf into oops_buf. */
-static int zip_oops(size_t text_len)
-{
- struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
- int zipped_len = nvram_compress(big_oops_buf, oops_data, text_len,
- oops_data_sz);
- if (zipped_len < 0) {
- pr_err("nvram: compression failed; returned %d\n", zipped_len);
- pr_err("nvram: logging uncompressed oops/panic report\n");
- return -1;
- }
- oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
- oops_hdr->report_length = cpu_to_be16(zipped_len);
- oops_hdr->timestamp = cpu_to_be64(get_seconds());
- return 0;
-}
-
-#ifdef CONFIG_PSTORE
-static int nvram_pstore_open(struct pstore_info *psi)
-{
- /* Reset the iterator to start reading partitions again */
- read_type = -1;
- return 0;
-}
-
-/**
- * nvram_pstore_write - pstore write callback for nvram
- * @type: Type of message logged
- * @reason: reason behind dump (oops/panic)
- * @id: identifier to indicate the write performed
- * @part: pstore writes data to registered buffer in parts,
- * part number will indicate the same.
- * @count: Indicates oops count
- * @compressed: Flag to indicate the log is compressed
- * @size: number of bytes written to the registered buffer
- * @psi: registered pstore_info structure
- *
- * Called by pstore_dump() when an oops or panic report is logged in the
- * printk buffer.
- * Returns 0 on successful write.
- */
-static int nvram_pstore_write(enum pstore_type_id type,
- enum kmsg_dump_reason reason,
- u64 *id, unsigned int part, int count,
- bool compressed, size_t size,
- struct pstore_info *psi)
-{
- int rc;
- unsigned int err_type = ERR_TYPE_KERNEL_PANIC;
- struct oops_log_info *oops_hdr = (struct oops_log_info *) oops_buf;
-
- /* part 1 has the recent messages from printk buffer */
- if (part > 1 || type != PSTORE_TYPE_DMESG ||
- clobbering_unread_rtas_event())
- return -1;
-
- oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
- oops_hdr->report_length = cpu_to_be16(size);
- oops_hdr->timestamp = cpu_to_be64(get_seconds());
-
- if (compressed)
- err_type = ERR_TYPE_KERNEL_PANIC_GZ;
-
- rc = nvram_write_os_partition(&oops_log_partition, oops_buf,
- (int) (sizeof(*oops_hdr) + size), err_type, count);
-
- if (rc != 0)
- return rc;
-
- *id = part;
- return 0;
-}
-
-/*
- * Reads the oops/panic report, rtas, of-config and common partition.
- * Returns the length of the data we read from each partition.
- * Returns 0 if we've been called before.
- */
-static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
- int *count, struct timespec *time, char **buf,
- bool *compressed, struct pstore_info *psi)
-{
- struct oops_log_info *oops_hdr;
- unsigned int err_type, id_no, size = 0;
- struct nvram_os_partition *part = NULL;
- char *buff = NULL;
- int sig = 0;
- loff_t p;
-
- read_type++;
-
- switch (nvram_type_ids[read_type]) {
- case PSTORE_TYPE_DMESG:
- part = &oops_log_partition;
- *type = PSTORE_TYPE_DMESG;
- break;
- case PSTORE_TYPE_PPC_RTAS:
- part = &rtas_log_partition;
- *type = PSTORE_TYPE_PPC_RTAS;
- time->tv_sec = last_rtas_event;
- time->tv_nsec = 0;
- break;
- case PSTORE_TYPE_PPC_OF:
- sig = NVRAM_SIG_OF;
- part = &of_config_partition;
- *type = PSTORE_TYPE_PPC_OF;
- *id = PSTORE_TYPE_PPC_OF;
- time->tv_sec = 0;
- time->tv_nsec = 0;
- break;
- case PSTORE_TYPE_PPC_COMMON:
- sig = NVRAM_SIG_SYS;
- part = &common_partition;
- *type = PSTORE_TYPE_PPC_COMMON;
- *id = PSTORE_TYPE_PPC_COMMON;
- time->tv_sec = 0;
- time->tv_nsec = 0;
- break;
- default:
- return 0;
- }
-
- if (!part->os_partition) {
- p = nvram_find_partition(part->name, sig, &size);
- if (p <= 0) {
- pr_err("nvram: Failed to find partition %s, "
- "err %d\n", part->name, (int)p);
- return 0;
- }
- part->index = p;
- part->size = size;
- }
-
- buff = kmalloc(part->size, GFP_KERNEL);
-
- if (!buff)
- return -ENOMEM;
-
- if (nvram_read_partition(part, buff, part->size, &err_type, &id_no)) {
- kfree(buff);
- return 0;
- }
-
- *count = 0;
-
- if (part->os_partition)
- *id = id_no;
-
- if (nvram_type_ids[read_type] == PSTORE_TYPE_DMESG) {
- size_t length, hdr_size;
-
- oops_hdr = (struct oops_log_info *)buff;
- if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
- /* Old format oops header had 2-byte record size */
- hdr_size = sizeof(u16);
- length = be16_to_cpu(oops_hdr->version);
- time->tv_sec = 0;
- time->tv_nsec = 0;
- } else {
- hdr_size = sizeof(*oops_hdr);
- length = be16_to_cpu(oops_hdr->report_length);
- time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
- time->tv_nsec = 0;
- }
- *buf = kmalloc(length, GFP_KERNEL);
- if (*buf == NULL)
- return -ENOMEM;
- memcpy(*buf, buff + hdr_size, length);
- kfree(buff);
-
- if (err_type == ERR_TYPE_KERNEL_PANIC_GZ)
- *compressed = true;
- else
- *compressed = false;
- return length;
- }
-
- *buf = buff;
- return part->size;
-}
-
-static struct pstore_info nvram_pstore_info = {
- .owner = THIS_MODULE,
- .name = "nvram",
- .open = nvram_pstore_open,
- .read = nvram_pstore_read,
- .write = nvram_pstore_write,
-};
-
-static int nvram_pstore_init(void)
-{
- int rc = 0;
-
- nvram_pstore_info.buf = oops_data;
- nvram_pstore_info.bufsize = oops_data_sz;
-
- spin_lock_init(&nvram_pstore_info.buf_lock);
-
- rc = pstore_register(&nvram_pstore_info);
- if (rc != 0)
- pr_err("nvram: pstore_register() failed, defaults to "
- "kmsg_dump; returned %d\n", rc);
-
- return rc;
-}
-#else
-static int nvram_pstore_init(void)
-{
- return -1;
-}
-#endif
-
-static void __init nvram_init_oops_partition(int rtas_partition_exists)
-{
- int rc;
-
- rc = pseries_nvram_init_os_partition(&oops_log_partition);
- if (rc != 0) {
- if (!rtas_partition_exists)
- return;
- pr_notice("nvram: Using %s partition to log both"
- " RTAS errors and oops/panic reports\n",
- rtas_log_partition.name);
- memcpy(&oops_log_partition, &rtas_log_partition,
- sizeof(rtas_log_partition));
- }
- oops_buf = kmalloc(oops_log_partition.size, GFP_KERNEL);
- if (!oops_buf) {
- pr_err("nvram: No memory for %s partition\n",
- oops_log_partition.name);
- return;
- }
- oops_data = oops_buf + sizeof(struct oops_log_info);
- oops_data_sz = oops_log_partition.size - sizeof(struct oops_log_info);
-
- rc = nvram_pstore_init();
-
- if (!rc)
- return;
-
- /*
- * Figure compression (preceded by elimination of each line's <n>
- * severity prefix) will reduce the oops/panic report to at most
- * 45% of its original size.
- */
- big_oops_buf_sz = (oops_data_sz * 100) / 45;
- big_oops_buf = kmalloc(big_oops_buf_sz, GFP_KERNEL);
- if (big_oops_buf) {
- stream.workspace = kmalloc(zlib_deflate_workspacesize(
- WINDOW_BITS, MEM_LEVEL), GFP_KERNEL);
- if (!stream.workspace) {
- pr_err("nvram: No memory for compression workspace; "
- "skipping compression of %s partition data\n",
- oops_log_partition.name);
- kfree(big_oops_buf);
- big_oops_buf = NULL;
- }
- } else {
- pr_err("No memory for uncompressed %s data; "
- "skipping compression\n", oops_log_partition.name);
- stream.workspace = NULL;
- }
-
- rc = kmsg_dump_register(&nvram_kmsg_dumper);
- if (rc != 0) {
- pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc);
- kfree(oops_buf);
- kfree(big_oops_buf);
- kfree(stream.workspace);
- }
-}
-
static int __init pseries_nvram_init_log_partitions(void)
{
int rc;
@@ -798,7 +211,7 @@ static int __init pseries_nvram_init_log_partitions(void)
/* Scan nvram for partitions */
nvram_scan_partitions();
- rc = pseries_nvram_init_os_partition(&rtas_log_partition);
+ rc = nvram_init_os_partition(&rtas_log_partition);
nvram_init_oops_partition(rc == 0);
return 0;
}
@@ -834,72 +247,3 @@ int __init pSeries_nvram_init(void)
return 0;
}
-
-/*
- * This is our kmsg_dump callback, called after an oops or panic report
- * has been written to the printk buffer. We want to capture as much
- * of the printk buffer as possible. First, capture as much as we can
- * that we think will compress sufficiently to fit in the lnx,oops-log
- * partition. If that's too much, go back and capture uncompressed text.
- */
-static void oops_to_nvram(struct kmsg_dumper *dumper,
- enum kmsg_dump_reason reason)
-{
- struct oops_log_info *oops_hdr = (struct oops_log_info *)oops_buf;
- static unsigned int oops_count = 0;
- static bool panicking = false;
- static DEFINE_SPINLOCK(lock);
- unsigned long flags;
- size_t text_len;
- unsigned int err_type = ERR_TYPE_KERNEL_PANIC_GZ;
- int rc = -1;
-
- switch (reason) {
- case KMSG_DUMP_RESTART:
- case KMSG_DUMP_HALT:
- case KMSG_DUMP_POWEROFF:
- /* These are almost always orderly shutdowns. */
- return;
- case KMSG_DUMP_OOPS:
- break;
- case KMSG_DUMP_PANIC:
- panicking = true;
- break;
- case KMSG_DUMP_EMERG:
- if (panicking)
- /* Panic report already captured. */
- return;
- break;
- default:
- pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n",
- __func__, (int) reason);
- return;
- }
-
- if (clobbering_unread_rtas_event())
- return;
-
- if (!spin_trylock_irqsave(&lock, flags))
- return;
-
- if (big_oops_buf) {
- kmsg_dump_get_buffer(dumper, false,
- big_oops_buf, big_oops_buf_sz, &text_len);
- rc = zip_oops(text_len);
- }
- if (rc != 0) {
- kmsg_dump_rewind(dumper);
- kmsg_dump_get_buffer(dumper, false,
- oops_data, oops_data_sz, &text_len);
- err_type = ERR_TYPE_KERNEL_PANIC;
- oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
- oops_hdr->report_length = cpu_to_be16(text_len);
- oops_hdr->timestamp = cpu_to_be64(get_seconds());
- }
-
- (void) nvram_write_os_partition(&oops_log_partition, oops_buf,
- (int) (sizeof(*oops_hdr) + text_len), err_type,
- ++oops_count);
-
- spin_unlock_irqrestore(&lock, flags);
-}
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 89e23811199c..5d4a3df59d0c 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -32,6 +32,8 @@
#include <asm/firmware.h>
#include <asm/eeh.h>
+#include "pseries.h"
+
static struct pci_bus *
find_bus_among_children(struct pci_bus *bus,
struct device_node *dn)
@@ -75,6 +77,7 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn)
return NULL;
rtas_setup_phb(phb);
pci_process_bridge_OF_ranges(phb, dn, 0);
+ phb->controller_ops = pseries_pci_controller_ops;
pci_devs_phb_init_dynamic(phb);
@@ -82,7 +85,7 @@ struct pci_controller *init_phb_dynamic(struct device_node *dn)
eeh_dev_phb_init_dynamic(phb);
if (dn->child)
- eeh_add_device_tree_early(dn);
+ eeh_add_device_tree_early(PCI_DN(dn));
pcibios_scan_phb(phb);
pcibios_finish_adding_to_bus(phb->bus);
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 1796c5438cc6..8411c27293e4 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -11,6 +11,7 @@
#define _PSERIES_PSERIES_H
#include <linux/interrupt.h>
+#include <asm/rtas.h>
struct device_node;
@@ -60,11 +61,24 @@ extern struct device_node *dlpar_configure_connector(__be32,
struct device_node *);
extern int dlpar_attach_node(struct device_node *);
extern int dlpar_detach_node(struct device_node *);
+extern int dlpar_acquire_drc(u32 drc_index);
+extern int dlpar_release_drc(u32 drc_index);
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int dlpar_memory(struct pseries_hp_errorlog *hp_elog);
+#else
+static inline int dlpar_memory(struct pseries_hp_errorlog *hp_elog)
+{
+ return -EOPNOTSUPP;
+}
+#endif
/* PCI root bridge prepare function override for pseries */
struct pci_host_bridge;
int pseries_root_bridge_prepare(struct pci_host_bridge *bridge);
+extern struct pci_controller_ops pseries_pci_controller_ops;
+
unsigned long pseries_memory_block_size(void);
#endif /* _PSERIES_PSERIES_H */
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index e445b6701f50..df6a7041922b 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -265,7 +265,7 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act
update_dn_pci_info(np, pci->phb);
/* Create EEH device for the OF node */
- eeh_dev_init(np, pci->phb);
+ eeh_dev_init(PCI_DN(np), pci->phb);
}
break;
default:
@@ -461,6 +461,47 @@ static long pseries_little_endian_exceptions(void)
}
#endif
+static void __init find_and_init_phbs(void)
+{
+ struct device_node *node;
+ struct pci_controller *phb;
+ struct device_node *root = of_find_node_by_path("/");
+
+ for_each_child_of_node(root, node) {
+ if (node->type == NULL || (strcmp(node->type, "pci") != 0 &&
+ strcmp(node->type, "pciex") != 0))
+ continue;
+
+ phb = pcibios_alloc_controller(node);
+ if (!phb)
+ continue;
+ rtas_setup_phb(phb);
+ pci_process_bridge_OF_ranges(phb, node, 0);
+ isa_bridge_find_early(phb);
+ phb->controller_ops = pseries_pci_controller_ops;
+ }
+
+ of_node_put(root);
+ pci_devs_phb_init();
+
+ /*
+ * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
+ * in chosen.
+ */
+ if (of_chosen) {
+ const int *prop;
+
+ prop = of_get_property(of_chosen,
+ "linux,pci-probe-only", NULL);
+ if (prop) {
+ if (*prop)
+ pci_add_flags(PCI_PROBE_ONLY);
+ else
+ pci_clear_flags(PCI_PROBE_ONLY);
+ }
+ }
+}
+
static void __init pSeries_setup_arch(void)
{
set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
@@ -793,6 +834,10 @@ static int pSeries_pci_probe_mode(struct pci_bus *bus)
void pSeries_final_fixup(void) { }
#endif
+struct pci_controller_ops pseries_pci_controller_ops = {
+ .probe_mode = pSeries_pci_probe_mode,
+};
+
define_machine(pseries) {
.name = "pSeries",
.probe = pSeries_probe,
@@ -801,7 +846,6 @@ define_machine(pseries) {
.show_cpuinfo = pSeries_show_cpuinfo,
.log_error = pSeries_log_error,
.pcibios_fixup = pSeries_final_fixup,
- .pci_probe_mode = pSeries_pci_probe_mode,
.restart = rtas_restart,
.halt = rtas_halt,
.panic = rtas_os_term,
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index a3555b10c1a5..6932ea803e33 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -197,16 +197,14 @@ static void pSeries_cause_ipi_mux(int cpu, unsigned long data)
xics_cause_ipi(cpu, data);
}
-static __init int pSeries_smp_probe(void)
+static __init void pSeries_smp_probe(void)
{
- int ret = xics_smp_probe();
+ xics_smp_probe();
if (cpu_has_feature(CPU_FTR_DBELL)) {
xics_cause_ipi = smp_ops->cause_ipi;
smp_ops->cause_ipi = pSeries_cause_ipi_mux;
}
-
- return ret;
}
static struct smp_ops_t pSeries_mpic_smp_ops = {
diff --git a/arch/powerpc/relocs_check.pl b/arch/powerpc/relocs_check.pl
deleted file mode 100755
index 3f46e8b9c56d..000000000000
--- a/arch/powerpc/relocs_check.pl
+++ /dev/null
@@ -1,66 +0,0 @@
-#!/usr/bin/perl
-
-# Copyright © 2009 IBM Corporation
-
-# This program is free software; you can redistribute it and/or
-# modify it under the terms of the GNU General Public License
-# as published by the Free Software Foundation; either version
-# 2 of the License, or (at your option) any later version.
-
-# This script checks the relocations of a vmlinux for "suspicious"
-# relocations.
-
-use strict;
-use warnings;
-
-if ($#ARGV != 1) {
- die "$0 [path to objdump] [path to vmlinux]\n";
-}
-
-# Have Kbuild supply the path to objdump so we handle cross compilation.
-my $objdump = shift;
-my $vmlinux = shift;
-my $bad_relocs_count = 0;
-my $bad_relocs = "";
-my $old_binutils = 0;
-
-open(FD, "$objdump -R $vmlinux|") or die;
-while (<FD>) {
- study $_;
-
- # Only look at relocation lines.
- next if (!/\s+R_/);
-
- # These relocations are okay
- # On PPC64:
- # R_PPC64_RELATIVE, R_PPC64_NONE, R_PPC64_ADDR64
- # On PPC:
- # R_PPC_RELATIVE, R_PPC_ADDR16_HI,
- # R_PPC_ADDR16_HA,R_PPC_ADDR16_LO,
- # R_PPC_NONE
-
- next if (/\bR_PPC64_RELATIVE\b/ or /\bR_PPC64_NONE\b/ or
- /\bR_PPC64_ADDR64\s+mach_/);
- next if (/\bR_PPC_ADDR16_LO\b/ or /\bR_PPC_ADDR16_HI\b/ or
- /\bR_PPC_ADDR16_HA\b/ or /\bR_PPC_RELATIVE\b/ or
- /\bR_PPC_NONE\b/);
-
- # If we see this type of relocation it's an idication that
- # we /may/ be using an old version of binutils.
- if (/R_PPC64_UADDR64/) {
- $old_binutils++;
- }
-
- $bad_relocs_count++;
- $bad_relocs .= $_;
-}
-
-if ($bad_relocs_count) {
- print "WARNING: $bad_relocs_count bad relocations\n";
- print $bad_relocs;
-}
-
-if ($old_binutils) {
- print "WARNING: You need at least binutils >= 2.19 to build a ".
- "CONFIG_RELOCATABLE kernel\n";
-}
diff --git a/arch/powerpc/relocs_check.sh b/arch/powerpc/relocs_check.sh
new file mode 100755
index 000000000000..2e4ebd0e25b3
--- /dev/null
+++ b/arch/powerpc/relocs_check.sh
@@ -0,0 +1,59 @@
+#!/bin/sh
+
+# Copyright © 2015 IBM Corporation
+
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; either version
+# 2 of the License, or (at your option) any later version.
+
+# This script checks the relocations of a vmlinux for "suspicious"
+# relocations.
+
+# based on relocs_check.pl
+# Copyright © 2009 IBM Corporation
+
+if [ $# -lt 2 ]; then
+ echo "$0 [path to objdump] [path to vmlinux]" 1>&2
+ exit 1
+fi
+
+# Have Kbuild supply the path to objdump so we handle cross compilation.
+objdump="$1"
+vmlinux="$2"
+
+bad_relocs=$(
+"$objdump" -R "$vmlinux" |
+ # Only look at relocation lines.
+ grep -E '\<R_' |
+ # These relocations are okay
+ # On PPC64:
+ # R_PPC64_RELATIVE, R_PPC64_NONE
+ # R_PPC64_ADDR64 mach_<name>
+ # On PPC:
+ # R_PPC_RELATIVE, R_PPC_ADDR16_HI,
+ # R_PPC_ADDR16_HA,R_PPC_ADDR16_LO,
+ # R_PPC_NONE
+ grep -F -w -v 'R_PPC64_RELATIVE
+R_PPC64_NONE
+R_PPC_ADDR16_LO
+R_PPC_ADDR16_HI
+R_PPC_ADDR16_HA
+R_PPC_RELATIVE
+R_PPC_NONE' |
+ grep -E -v '\<R_PPC64_ADDR64[[:space:]]+mach_'
+)
+
+if [ -z "$bad_relocs" ]; then
+ exit 0
+fi
+
+num_bad=$(echo "$bad_relocs" | wc -l)
+echo "WARNING: $num_bad bad relocations"
+echo "$bad_relocs"
+
+# If we see this type of relocation it's an idication that
+# we /may/ be using an old version of binutils.
+if echo "$bad_relocs" | grep -q -F -w R_PPC64_UADDR64; then
+ echo "WARNING: You need at least binutils >= 2.19 to build a CONFIG_RELOCATABLE kernel"
+fi
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 9e5353ff6d1b..d00a5663e312 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -369,7 +369,7 @@ static int dart_dma_set_mask(struct device *dev, u64 dma_mask)
return 0;
}
-void __init iommu_init_early_dart(void)
+void __init iommu_init_early_dart(struct pci_controller_ops *controller_ops)
{
struct device_node *dn;
@@ -395,8 +395,8 @@ void __init iommu_init_early_dart(void)
if (dart_is_u4)
ppc_md.dma_set_mask = dart_dma_set_mask;
- ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_dart;
- ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_dart;
+ controller_ops->dma_dev_setup = pci_dma_dev_setup_dart;
+ controller_ops->dma_bus_setup = pci_dma_bus_setup_dart;
/* Setup pci_dma ops */
set_pci_dma_ops(&dma_iommu_ops);
@@ -404,8 +404,8 @@ void __init iommu_init_early_dart(void)
bail:
/* If init failed, use direct iommu and null setup functions */
- ppc_md.pci_dma_dev_setup = NULL;
- ppc_md.pci_dma_bus_setup = NULL;
+ controller_ops->dma_dev_setup = NULL;
+ controller_ops->dma_bus_setup = NULL;
/* Setup pci_dma ops */
set_pci_dma_ops(&dma_direct_ops);
diff --git a/arch/powerpc/sysdev/dcr.c b/arch/powerpc/sysdev/dcr.c
index 2d8a101b6b9e..121e26fffd50 100644
--- a/arch/powerpc/sysdev/dcr.c
+++ b/arch/powerpc/sysdev/dcr.c
@@ -54,7 +54,7 @@ bool dcr_map_ok_generic(dcr_host_t host)
else if (host.type == DCR_HOST_MMIO)
return dcr_map_ok_mmio(host.host.mmio);
else
- return 0;
+ return false;
}
EXPORT_SYMBOL_GPL(dcr_map_ok_generic);
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 4bbb4b8dfd09..f086c6f22dc9 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -162,7 +162,17 @@ static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq,
msg->address_lo = lower_32_bits(address);
msg->address_hi = upper_32_bits(address);
- msg->data = hwirq;
+ /*
+ * MPIC version 2.0 has erratum PIC1. It causes
+ * that neither MSI nor MSI-X can work fine.
+ * This is a workaround to allow MSI-X to function
+ * properly. It only works for MSI-X, we prevent
+ * MSI on buggy chips in fsl_setup_msi_irqs().
+ */
+ if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
+ msg->data = __swab32(hwirq);
+ else
+ msg->data = hwirq;
pr_debug("%s: allocated srs: %d, ibs: %d\n", __func__,
(hwirq >> msi_data->srs_shift) & MSI_SRS_MASK,
@@ -180,8 +190,16 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
struct msi_msg msg;
struct fsl_msi *msi_data;
- if (type == PCI_CAP_ID_MSIX)
- pr_debug("fslmsi: MSI-X untested, trying anyway.\n");
+ if (type == PCI_CAP_ID_MSI) {
+ /*
+ * MPIC version 2.0 has erratum PIC1. For now MSI
+ * could not work. So check to prevent MSI from
+ * being used on the board with this erratum.
+ */
+ list_for_each_entry(msi_data, &msi_head, list)
+ if (msi_data->feature & MSI_HW_ERRATA_ENDIAN)
+ return -EINVAL;
+ }
/*
* If the PCI node has an fsl,msi property, then we need to use it
@@ -446,6 +464,11 @@ static int fsl_of_msi_probe(struct platform_device *dev)
msi->feature = features->fsl_pic_ip;
+ /* For erratum PIC1 on MPIC version 2.0*/
+ if ((features->fsl_pic_ip & FSL_PIC_IP_MASK) == FSL_PIC_IP_MPIC
+ && (fsl_mpic_primary_get_version() == 0x0200))
+ msi->feature |= MSI_HW_ERRATA_ENDIAN;
+
/*
* Remember the phandle, so that we can match with any PCI nodes
* that have an "fsl,msi" property.
diff --git a/arch/powerpc/sysdev/fsl_msi.h b/arch/powerpc/sysdev/fsl_msi.h
index 420cfcbdac01..a67359d993e5 100644
--- a/arch/powerpc/sysdev/fsl_msi.h
+++ b/arch/powerpc/sysdev/fsl_msi.h
@@ -27,6 +27,8 @@
#define FSL_PIC_IP_IPIC 0x00000002
#define FSL_PIC_IP_VMPIC 0x00000003
+#define MSI_HW_ERRATA_ENDIAN 0x00000010
+
struct fsl_msi_cascade_data;
struct fsl_msi {
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 4b74c276e427..9a8fcf0d79d7 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -111,6 +111,18 @@ static struct pci_ops fsl_indirect_pcie_ops =
#define MAX_PHYS_ADDR_BITS 40
static u64 pci64_dma_offset = 1ull << MAX_PHYS_ADDR_BITS;
+#ifdef CONFIG_SWIOTLB
+static void setup_swiotlb_ops(struct pci_controller *hose)
+{
+ if (ppc_swiotlb_enable) {
+ hose->controller_ops.dma_dev_setup = pci_dma_dev_setup_swiotlb;
+ set_pci_dma_ops(&swiotlb_dma_ops);
+ }
+}
+#else
+static inline void setup_swiotlb_ops(struct pci_controller *hose) {}
+#endif
+
static int fsl_pci_dma_set_mask(struct device *dev, u64 dma_mask)
{
if (!dev->dma_mask || !dma_supported(dev, dma_mask))
@@ -548,6 +560,9 @@ int fsl_add_bridge(struct platform_device *pdev, int is_primary)
/* Setup PEX window registers */
setup_pci_atmu(hose);
+ /* Set up controller operations */
+ setup_swiotlb_ops(hose);
+
return 0;
no_bridge:
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index bbfbbf2025fd..b2b8447a227a 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -655,7 +655,6 @@ static inline struct mpic * mpic_from_irq_data(struct irq_data *d)
static inline void mpic_eoi(struct mpic *mpic)
{
mpic_cpu_write(MPIC_INFO(CPU_EOI), 0);
- (void)mpic_cpu_read(MPIC_INFO(CPU_WHOAMI));
}
/*
@@ -1676,31 +1675,6 @@ void __init mpic_init(struct mpic *mpic)
mpic_err_int_init(mpic, MPIC_FSL_ERR_INT);
}
-void __init mpic_set_clk_ratio(struct mpic *mpic, u32 clock_ratio)
-{
- u32 v;
-
- v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
- v &= ~MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO_MASK;
- v |= MPIC_GREG_GLOBAL_CONF_1_CLK_RATIO(clock_ratio);
- mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
-}
-
-void __init mpic_set_serial_int(struct mpic *mpic, int enable)
-{
- unsigned long flags;
- u32 v;
-
- raw_spin_lock_irqsave(&mpic_lock, flags);
- v = mpic_read(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1);
- if (enable)
- v |= MPIC_GREG_GLOBAL_CONF_1_SIE;
- else
- v &= ~MPIC_GREG_GLOBAL_CONF_1_SIE;
- mpic_write(mpic->gregs, MPIC_GREG_GLOBAL_CONF_1, v);
- raw_spin_unlock_irqrestore(&mpic_lock, flags);
-}
-
void mpic_irq_set_priority(unsigned int irq, unsigned int pri)
{
struct mpic *mpic = mpic_find(irq);
@@ -1923,7 +1897,7 @@ void smp_mpic_message_pass(int cpu, int msg)
msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask);
}
-int __init smp_mpic_probe(void)
+void __init smp_mpic_probe(void)
{
int nr_cpus;
@@ -1935,8 +1909,6 @@ int __init smp_mpic_probe(void)
if (nr_cpus > 1)
mpic_request_ipis();
-
- return nr_cpus;
}
void smp_mpic_setup_cpu(int cpu)
diff --git a/arch/powerpc/sysdev/qe_lib/qe_io.c b/arch/powerpc/sysdev/qe_lib/qe_io.c
index d09994164daf..7ea0174f6d3d 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_io.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_io.c
@@ -190,28 +190,3 @@ int par_io_of_config(struct device_node *np)
return 0;
}
EXPORT_SYMBOL(par_io_of_config);
-
-#ifdef DEBUG
-static void dump_par_io(void)
-{
- unsigned int i;
-
- printk(KERN_INFO "%s: par_io=%p\n", __func__, par_io);
- for (i = 0; i < num_par_io_ports; i++) {
- printk(KERN_INFO " cpodr[%u]=%08x\n", i,
- in_be32(&par_io[i].cpodr));
- printk(KERN_INFO " cpdata[%u]=%08x\n", i,
- in_be32(&par_io[i].cpdata));
- printk(KERN_INFO " cpdir1[%u]=%08x\n", i,
- in_be32(&par_io[i].cpdir1));
- printk(KERN_INFO " cpdir2[%u]=%08x\n", i,
- in_be32(&par_io[i].cpdir2));
- printk(KERN_INFO " cppar1[%u]=%08x\n", i,
- in_be32(&par_io[i].cppar1));
- printk(KERN_INFO " cppar2[%u]=%08x\n", i,
- in_be32(&par_io[i].cppar2));
- }
-
-}
-EXPORT_SYMBOL(dump_par_io);
-#endif /* DEBUG */
diff --git a/arch/powerpc/sysdev/qe_lib/ucc_slow.c b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
index befaf1123f7f..5f91628209eb 100644
--- a/arch/powerpc/sysdev/qe_lib/ucc_slow.c
+++ b/arch/powerpc/sysdev/qe_lib/ucc_slow.c
@@ -43,11 +43,6 @@ u32 ucc_slow_get_qe_cr_subblock(int uccs_num)
}
EXPORT_SYMBOL(ucc_slow_get_qe_cr_subblock);
-void ucc_slow_poll_transmitter_now(struct ucc_slow_private * uccs)
-{
- out_be16(&uccs->us_regs->utodr, UCC_SLOW_TOD);
-}
-
void ucc_slow_graceful_stop_tx(struct ucc_slow_private * uccs)
{
struct ucc_slow_info *us_info = uccs->us_info;
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index 125743b58c70..878a54036a25 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -140,15 +140,13 @@ static void xics_request_ipi(void)
IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
}
-int __init xics_smp_probe(void)
+void __init xics_smp_probe(void)
{
/* Setup cause_ipi callback based on which ICP is used */
smp_ops->cause_ipi = icp_ops->cause_ipi;
/* Register all the IPIs */
xics_request_ipi();
-
- return num_possible_cpus();
}
#endif /* CONFIG_SMP */