diff options
author | Eric Paris <eparis@redhat.com> | 2014-03-07 17:41:32 +0100 |
---|---|---|
committer | Eric Paris <eparis@redhat.com> | 2014-03-07 17:41:32 +0100 |
commit | b7d3622a39fde7658170b7f3cf6c6889bb8db30d (patch) | |
tree | 64f4e781ecb2a85d675e234072b988560bcd25f1 /arch/powerpc/include | |
parent | audit: whitespace fix in kernel-parameters.txt (diff) | |
parent | Linux 3.13 (diff) | |
download | linux-b7d3622a39fde7658170b7f3cf6c6889bb8db30d.tar.xz linux-b7d3622a39fde7658170b7f3cf6c6889bb8db30d.zip |
Merge tag 'v3.13' into for-3.15
Linux 3.13
Conflicts:
include/net/xfrm.h
Simple merge where v3.13 removed 'extern' from definitions and the audit
tree did s/u32/unsigned int/ to the same definitions.
Diffstat (limited to 'arch/powerpc/include')
52 files changed, 882 insertions, 536 deletions
diff --git a/arch/powerpc/include/asm/Kbuild b/arch/powerpc/include/asm/Kbuild index 704e6f10ae80..d8f9d2f18a23 100644 --- a/arch/powerpc/include/asm/Kbuild +++ b/arch/powerpc/include/asm/Kbuild @@ -2,4 +2,5 @@ generic-y += clkdev.h generic-y += rwsem.h generic-y += trace_clock.h +generic-y += preempt.h generic-y += vtime.h
\ No newline at end of file diff --git a/arch/powerpc/include/asm/archrandom.h b/arch/powerpc/include/asm/archrandom.h new file mode 100644 index 000000000000..d853d163ba47 --- /dev/null +++ b/arch/powerpc/include/asm/archrandom.h @@ -0,0 +1,32 @@ +#ifndef _ASM_POWERPC_ARCHRANDOM_H +#define _ASM_POWERPC_ARCHRANDOM_H + +#ifdef CONFIG_ARCH_RANDOM + +#include <asm/machdep.h> + +static inline int arch_get_random_long(unsigned long *v) +{ + if (ppc_md.get_random_long) + return ppc_md.get_random_long(v); + + return 0; +} + +static inline int arch_get_random_int(unsigned int *v) +{ + unsigned long val; + int rc; + + rc = arch_get_random_long(&val); + if (rc) + *v = val; + + return rc; +} + +int powernv_get_random_long(unsigned long *v); + +#endif /* CONFIG_ARCH_RANDOM */ + +#endif /* _ASM_POWERPC_ARCHRANDOM_H */ diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index ce0c28495f9a..8251a3ba870f 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -14,6 +14,9 @@ * which always checksum on 4 octet boundaries. ihl is the number * of 32-bit words and is always >= 5. */ +#ifdef CONFIG_GENERIC_CSUM +#include <asm-generic/checksum.h> +#else extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl); /* @@ -123,5 +126,7 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr, return sum; #endif } + +#endif #endif /* __KERNEL__ */ #endif diff --git a/arch/powerpc/include/asm/disassemble.h b/arch/powerpc/include/asm/disassemble.h index 9b198d1b3b2b..856f8deb557a 100644 --- a/arch/powerpc/include/asm/disassemble.h +++ b/arch/powerpc/include/asm/disassemble.h @@ -77,4 +77,8 @@ static inline unsigned int get_d(u32 inst) return inst & 0xffff; } +static inline unsigned int get_oc(u32 inst) +{ + return (inst >> 11) & 0x7fff; +} #endif /* __ASM_PPC_DISASSEMBLE_H__ */ diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index cc0655a702a7..935b5e7a1436 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -31,6 +31,8 @@ extern unsigned long randomize_et_dyn(unsigned long base); #define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000)) +#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0) + /* * Our registers are always unsigned longs, whether we're a 32 bit * process or 64 bit, on either a 64 bit or 32 bit kernel. @@ -86,6 +88,8 @@ typedef elf_vrregset_t elf_fpxregset_t; #ifdef __powerpc64__ # define SET_PERSONALITY(ex) \ do { \ + if (((ex).e_flags & 0x3) == 2) \ + set_thread_flag(TIF_ELF2ABI); \ if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ set_thread_flag(TIF_32BIT); \ else \ diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h index 5a8b82aa7241..4358e3002f35 100644 --- a/arch/powerpc/include/asm/emulated_ops.h +++ b/arch/powerpc/include/asm/emulated_ops.h @@ -43,6 +43,7 @@ extern struct ppc_emulated { struct ppc_emulated_entry popcntb; struct ppc_emulated_entry spe; struct ppc_emulated_entry string; + struct ppc_emulated_entry sync; struct ppc_emulated_entry unaligned; #ifdef CONFIG_MATH_EMULATION struct ppc_emulated_entry math; diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index cca12f084842..243ce69ad685 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h @@ -198,12 +198,27 @@ END_FTR_SECTION_NESTED(ftr,ftr,943) cmpwi r10,0; \ bne do_kvm_##n +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE +/* + * If hv is possible, interrupts come into to the hv version + * of the kvmppc_interrupt code, which then jumps to the PR handler, + * kvmppc_interrupt_pr, if the guest is a PR guest. + */ +#define kvmppc_interrupt kvmppc_interrupt_hv +#else +#define kvmppc_interrupt kvmppc_interrupt_pr +#endif + #define __KVM_HANDLER(area, h, n) \ do_kvm_##n: \ BEGIN_FTR_SECTION_NESTED(947) \ ld r10,area+EX_CFAR(r13); \ std r10,HSTATE_CFAR(r13); \ END_FTR_SECTION_NESTED(CPU_FTR_CFAR,CPU_FTR_CFAR,947); \ + BEGIN_FTR_SECTION_NESTED(948) \ + ld r10,area+EX_PPR(r13); \ + std r10,HSTATE_PPR(r13); \ + END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \ ld r10,area+EX_R10(r13); \ stw r9,HSTATE_SCRATCH1(r13); \ ld r9,area+EX_R9(r13); \ @@ -217,6 +232,10 @@ do_kvm_##n: \ ld r10,area+EX_R10(r13); \ beq 89f; \ stw r9,HSTATE_SCRATCH1(r13); \ + BEGIN_FTR_SECTION_NESTED(948) \ + ld r9,area+EX_PPR(r13); \ + std r9,HSTATE_PPR(r13); \ + END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,948); \ ld r9,area+EX_R9(r13); \ std r12,HSTATE_SCRATCH0(r13); \ li r12,n; \ @@ -236,7 +255,7 @@ do_kvm_##n: \ #define KVM_HANDLER_SKIP(area, h, n) #endif -#ifdef CONFIG_KVM_BOOK3S_PR +#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE #define KVMTEST_PR(n) __KVMTEST(n) #define KVM_HANDLER_PR(area, h, n) __KVM_HANDLER(area, h, n) #define KVM_HANDLER_PR_SKIP(area, h, n) __KVM_HANDLER_SKIP(area, h, n) @@ -265,7 +284,7 @@ do_kvm_##n: \ subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ beq- 1f; \ ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ -1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ +1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \ blt+ cr1,3f; /* abort if it is */ \ li r1,(n); /* will be reloaded later */ \ sth r1,PACA_TRAP_SAVE(r13); \ diff --git a/arch/powerpc/include/asm/fsl_ifc.h b/arch/powerpc/include/asm/fsl_ifc.h index b8a4b9bc50b3..f49ddb1b2273 100644 --- a/arch/powerpc/include/asm/fsl_ifc.h +++ b/arch/powerpc/include/asm/fsl_ifc.h @@ -93,6 +93,7 @@ #define CSOR_NAND_PGS_512 0x00000000 #define CSOR_NAND_PGS_2K 0x00080000 #define CSOR_NAND_PGS_4K 0x00100000 +#define CSOR_NAND_PGS_8K 0x00180000 /* Spare region Size */ #define CSOR_NAND_SPRZ_MASK 0x0000E000 #define CSOR_NAND_SPRZ_SHIFT 13 @@ -102,6 +103,7 @@ #define CSOR_NAND_SPRZ_210 0x00006000 #define CSOR_NAND_SPRZ_218 0x00008000 #define CSOR_NAND_SPRZ_224 0x0000A000 +#define CSOR_NAND_SPRZ_CSOR_EXT 0x0000C000 /* Pages Per Block */ #define CSOR_NAND_PB_MASK 0x00000700 #define CSOR_NAND_PB_SHIFT 8 diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 0c7f2bfcf134..d8b600b3f058 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h @@ -403,6 +403,8 @@ static inline unsigned long cmo_get_page_size(void) extern long pSeries_enable_reloc_on_exc(void); extern long pSeries_disable_reloc_on_exc(void); +extern long pseries_big_endian_exceptions(void); + #else #define pSeries_enable_reloc_on_exc() do {} while (0) diff --git a/arch/powerpc/include/asm/hvsi.h b/arch/powerpc/include/asm/hvsi.h index d3f64f361814..d4a5315718ca 100644 --- a/arch/powerpc/include/asm/hvsi.h +++ b/arch/powerpc/include/asm/hvsi.h @@ -25,7 +25,7 @@ struct hvsi_header { uint8_t type; uint8_t len; - uint16_t seqno; + __be16 seqno; } __attribute__((packed)); struct hvsi_data { @@ -35,24 +35,24 @@ struct hvsi_data { struct hvsi_control { struct hvsi_header hdr; - uint16_t verb; + __be16 verb; /* optional depending on verb: */ - uint32_t word; - uint32_t mask; + __be32 word; + __be32 mask; } __attribute__((packed)); struct hvsi_query { struct hvsi_header hdr; - uint16_t verb; + __be16 verb; } __attribute__((packed)); struct hvsi_query_response { struct hvsi_header hdr; - uint16_t verb; - uint16_t query_seqno; + __be16 verb; + __be16 query_seqno; union { uint8_t version; - uint32_t mctrl_word; + __be32 mctrl_word; } u; } __attribute__((packed)); diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 5a64757dc0d1..575fbf81fad0 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h @@ -21,7 +21,7 @@ extern struct pci_dev *isa_bridge_pcidev; /* * has legacy ISA devices ? */ -#define arch_has_dev_port() (isa_bridge_pcidev != NULL) +#define arch_has_dev_port() (isa_bridge_pcidev != NULL || isa_io_special) #endif #include <linux/device.h> @@ -113,7 +113,7 @@ extern bool isa_io_special; /* gcc 4.0 and older doesn't have 'Z' constraint */ #if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ == 0) -#define DEF_MMIO_IN_LE(name, size, insn) \ +#define DEF_MMIO_IN_X(name, size, insn) \ static inline u##size name(const volatile u##size __iomem *addr) \ { \ u##size ret; \ @@ -122,7 +122,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \ return ret; \ } -#define DEF_MMIO_OUT_LE(name, size, insn) \ +#define DEF_MMIO_OUT_X(name, size, insn) \ static inline void name(volatile u##size __iomem *addr, u##size val) \ { \ __asm__ __volatile__("sync;"#insn" %1,0,%2" \ @@ -130,7 +130,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \ IO_SET_SYNC_FLAG(); \ } #else /* newer gcc */ -#define DEF_MMIO_IN_LE(name, size, insn) \ +#define DEF_MMIO_IN_X(name, size, insn) \ static inline u##size name(const volatile u##size __iomem *addr) \ { \ u##size ret; \ @@ -139,7 +139,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \ return ret; \ } -#define DEF_MMIO_OUT_LE(name, size, insn) \ +#define DEF_MMIO_OUT_X(name, size, insn) \ static inline void name(volatile u##size __iomem *addr, u##size val) \ { \ __asm__ __volatile__("sync;"#insn" %1,%y0" \ @@ -148,7 +148,7 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \ } #endif -#define DEF_MMIO_IN_BE(name, size, insn) \ +#define DEF_MMIO_IN_D(name, size, insn) \ static inline u##size name(const volatile u##size __iomem *addr) \ { \ u##size ret; \ @@ -157,7 +157,7 @@ static inline u##size name(const volatile u##size __iomem *addr) \ return ret; \ } -#define DEF_MMIO_OUT_BE(name, size, insn) \ +#define DEF_MMIO_OUT_D(name, size, insn) \ static inline void name(volatile u##size __iomem *addr, u##size val) \ { \ __asm__ __volatile__("sync;"#insn"%U0%X0 %1,%0" \ @@ -165,22 +165,37 @@ static inline void name(volatile u##size __iomem *addr, u##size val) \ IO_SET_SYNC_FLAG(); \ } +DEF_MMIO_IN_D(in_8, 8, lbz); +DEF_MMIO_OUT_D(out_8, 8, stb); -DEF_MMIO_IN_BE(in_8, 8, lbz); -DEF_MMIO_IN_BE(in_be16, 16, lhz); -DEF_MMIO_IN_BE(in_be32, 32, lwz); -DEF_MMIO_IN_LE(in_le16, 16, lhbrx); -DEF_MMIO_IN_LE(in_le32, 32, lwbrx); +#ifdef __BIG_ENDIAN__ +DEF_MMIO_IN_D(in_be16, 16, lhz); +DEF_MMIO_IN_D(in_be32, 32, lwz); +DEF_MMIO_IN_X(in_le16, 16, lhbrx); +DEF_MMIO_IN_X(in_le32, 32, lwbrx); -DEF_MMIO_OUT_BE(out_8, 8, stb); -DEF_MMIO_OUT_BE(out_be16, 16, sth); -DEF_MMIO_OUT_BE(out_be32, 32, stw); -DEF_MMIO_OUT_LE(out_le16, 16, sthbrx); -DEF_MMIO_OUT_LE(out_le32, 32, stwbrx); +DEF_MMIO_OUT_D(out_be16, 16, sth); +DEF_MMIO_OUT_D(out_be32, 32, stw); +DEF_MMIO_OUT_X(out_le16, 16, sthbrx); +DEF_MMIO_OUT_X(out_le32, 32, stwbrx); +#else +DEF_MMIO_IN_X(in_be16, 16, lhbrx); +DEF_MMIO_IN_X(in_be32, 32, lwbrx); +DEF_MMIO_IN_D(in_le16, 16, lhz); +DEF_MMIO_IN_D(in_le32, 32, lwz); + +DEF_MMIO_OUT_X(out_be16, 16, sthbrx); +DEF_MMIO_OUT_X(out_be32, 32, stwbrx); +DEF_MMIO_OUT_D(out_le16, 16, sth); +DEF_MMIO_OUT_D(out_le32, 32, stw); + +#endif /* __BIG_ENDIAN */ #ifdef __powerpc64__ -DEF_MMIO_OUT_BE(out_be64, 64, std); -DEF_MMIO_IN_BE(in_be64, 64, ld); + +#ifdef __BIG_ENDIAN__ +DEF_MMIO_OUT_D(out_be64, 64, std); +DEF_MMIO_IN_D(in_be64, 64, ld); /* There is no asm instructions for 64 bits reverse loads and stores */ static inline u64 in_le64(const volatile u64 __iomem *addr) @@ -192,6 +207,22 @@ static inline void out_le64(volatile u64 __iomem *addr, u64 val) { out_be64(addr, swab64(val)); } +#else +DEF_MMIO_OUT_D(out_le64, 64, std); +DEF_MMIO_IN_D(in_le64, 64, ld); + +/* There is no asm instructions for 64 bits reverse loads and stores */ +static inline u64 in_be64(const volatile u64 __iomem *addr) +{ + return swab64(in_le64(addr)); +} + +static inline void out_be64(volatile u64 __iomem *addr, u64 val) +{ + out_le64(addr, swab64(val)); +} + +#endif #endif /* __powerpc64__ */ /* diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index 851bac7afa4b..1bd92fd43cfb 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h @@ -123,6 +123,8 @@ #define BOOK3S_HFLAG_SLB 0x2 #define BOOK3S_HFLAG_PAIRED_SINGLE 0x4 #define BOOK3S_HFLAG_NATIVE_PS 0x8 +#define BOOK3S_HFLAG_MULTI_PGSIZE 0x10 +#define BOOK3S_HFLAG_NEW_TLBIE 0x20 #define RESUME_FLAG_NV (1<<0) /* Reload guest nonvolatile state? */ #define RESUME_FLAG_HOST (1<<1) /* Resume host? */ @@ -136,6 +138,8 @@ #define KVM_GUEST_MODE_NONE 0 #define KVM_GUEST_MODE_GUEST 1 #define KVM_GUEST_MODE_SKIP 2 +#define KVM_GUEST_MODE_GUEST_HV 3 +#define KVM_GUEST_MODE_HOST_HV 4 #define KVM_INST_FETCH_FAILED -1 diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index fa19e2f1a874..bc23b1ba7980 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h @@ -58,16 +58,18 @@ struct hpte_cache { struct hlist_node list_pte_long; struct hlist_node list_vpte; struct hlist_node list_vpte_long; +#ifdef CONFIG_PPC_BOOK3S_64 + struct hlist_node list_vpte_64k; +#endif struct rcu_head rcu_head; u64 host_vpn; u64 pfn; ulong slot; struct kvmppc_pte pte; + int pagesize; }; struct kvmppc_vcpu_book3s { - struct kvm_vcpu vcpu; - struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; struct kvmppc_sid_map sid_map[SID_MAP_NUM]; struct { u64 esid; @@ -99,6 +101,9 @@ struct kvmppc_vcpu_book3s { struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; +#ifdef CONFIG_PPC_BOOK3S_64 + struct hlist_head hpte_hash_vpte_64k[HPTEG_HASH_NUM_VPTE_64K]; +#endif int hpte_cache_count; spinlock_t mmu_lock; }; @@ -107,8 +112,9 @@ struct kvmppc_vcpu_book3s { #define CONTEXT_GUEST 1 #define CONTEXT_GUEST_END 2 -#define VSID_REAL 0x0fffffffffc00000ULL -#define VSID_BAT 0x0fffffffffb00000ULL +#define VSID_REAL 0x07ffffffffc00000ULL +#define VSID_BAT 0x07ffffffffb00000ULL +#define VSID_64K 0x0800000000000000ULL #define VSID_1T 0x1000000000000000ULL #define VSID_REAL_DR 0x2000000000000000ULL #define VSID_REAL_IR 0x4000000000000000ULL @@ -118,11 +124,12 @@ extern void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong ea, ulong ea_mask) extern void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 vp, u64 vp_mask); extern void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end); extern void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 new_msr); -extern void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr); extern void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_book3s_32_init(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu); -extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); +extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, + bool iswrite); +extern void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); extern void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong eaddr, ulong seg_size); extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); @@ -134,6 +141,7 @@ extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); +extern void kvmppc_mmu_hpte_cache_free(struct hpte_cache *pte); extern void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu); extern int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu); extern void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte); @@ -151,7 +159,8 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, u32 val); extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); -extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); +extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn, bool writing, + bool *writable); extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev, unsigned long *rmap, long pte_index, int realmode); extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep, @@ -172,6 +181,8 @@ extern long kvmppc_do_h_remove(struct kvm *kvm, unsigned long flags, unsigned long *hpret); extern long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot, unsigned long *map); +extern void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, + unsigned long mask); extern void kvmppc_entry_trampoline(void); extern void kvmppc_hv_entry_trampoline(void); @@ -181,14 +192,16 @@ extern void kvmppc_load_up_vsx(void); extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst); extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst); extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd); +extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, + struct kvm_vcpu *vcpu); +extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, + struct kvmppc_book3s_shadow_vcpu *svcpu); static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu) { - return container_of(vcpu, struct kvmppc_vcpu_book3s, vcpu); + return vcpu->arch.book3s; } -extern void kvm_return_point(void); - /* Also add subarch specific defines */ #ifdef CONFIG_KVM_BOOK3S_32_HANDLER @@ -198,203 +211,6 @@ extern void kvm_return_point(void); #include <asm/kvm_book3s_64.h> #endif -#ifdef CONFIG_KVM_BOOK3S_PR - -static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) -{ - return to_book3s(vcpu)->hior; -} - -static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, - unsigned long pending_now, unsigned long old_pending) -{ - if (pending_now) - vcpu->arch.shared->int_pending = 1; - else if (old_pending) - vcpu->arch.shared->int_pending = 0; -} - -static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) -{ - if ( num < 14 ) { - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - svcpu->gpr[num] = val; - svcpu_put(svcpu); - to_book3s(vcpu)->shadow_vcpu->gpr[num] = val; - } else - vcpu->arch.gpr[num] = val; -} - -static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) -{ - if ( num < 14 ) { - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - ulong r = svcpu->gpr[num]; - svcpu_put(svcpu); - return r; - } else - return vcpu->arch.gpr[num]; -} - -static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - svcpu->cr = val; - svcpu_put(svcpu); - to_book3s(vcpu)->shadow_vcpu->cr = val; -} - -static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - u32 r; - r = svcpu->cr; - svcpu_put(svcpu); - return r; -} - -static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - svcpu->xer = val; - to_book3s(vcpu)->shadow_vcpu->xer = val; - svcpu_put(svcpu); -} - -static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - u32 r; - r = svcpu->xer; - svcpu_put(svcpu); - return r; -} - -static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - svcpu->ctr = val; - svcpu_put(svcpu); -} - -static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - ulong r; - r = svcpu->ctr; - svcpu_put(svcpu); - return r; -} - -static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - svcpu->lr = val; - svcpu_put(svcpu); -} - -static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - ulong r; - r = svcpu->lr; - svcpu_put(svcpu); - return r; -} - -static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - svcpu->pc = val; - svcpu_put(svcpu); -} - -static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - ulong r; - r = svcpu->pc; - svcpu_put(svcpu); - return r; -} - -static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) -{ - ulong pc = kvmppc_get_pc(vcpu); - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - u32 r; - - /* Load the instruction manually if it failed to do so in the - * exit path */ - if (svcpu->last_inst == KVM_INST_FETCH_FAILED) - kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); - - r = svcpu->last_inst; - svcpu_put(svcpu); - return r; -} - -/* - * Like kvmppc_get_last_inst(), but for fetching a sc instruction. - * Because the sc instruction sets SRR0 to point to the following - * instruction, we have to fetch from pc - 4. - */ -static inline u32 kvmppc_get_last_sc(struct kvm_vcpu *vcpu) -{ - ulong pc = kvmppc_get_pc(vcpu) - 4; - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - u32 r; - - /* Load the instruction manually if it failed to do so in the - * exit path */ - if (svcpu->last_inst == KVM_INST_FETCH_FAILED) - kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); - - r = svcpu->last_inst; - svcpu_put(svcpu); - return r; -} - -static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) -{ - struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); - ulong r; - r = svcpu->fault_dar; - svcpu_put(svcpu); - return r; -} - -static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) -{ - ulong crit_raw = vcpu->arch.shared->critical; - ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); - bool crit; - - /* Truncate crit indicators in 32 bit mode */ - if (!(vcpu->arch.shared->msr & MSR_SF)) { - crit_raw &= 0xffffffff; - crit_r1 &= 0xffffffff; - } - - /* Critical section when crit == r1 */ - crit = (crit_raw == crit_r1); - /* ... and we're in supervisor mode */ - crit = crit && !(vcpu->arch.shared->msr & MSR_PR); - - return crit; -} -#else /* CONFIG_KVM_BOOK3S_PR */ - -static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu) -{ - return 0; -} - -static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu, - unsigned long pending_now, unsigned long old_pending) -{ -} - static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) { vcpu->arch.gpr[num] = val; @@ -489,12 +305,6 @@ static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) return vcpu->arch.fault_dar; } -static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) -{ - return false; -} -#endif - /* Magic register values loaded into r3 and r4 before the 'sc' assembly * instruction for the OSI hypercalls */ #define OSI_SC_MAGIC_R3 0x113724FA diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h index ce0ef6ce8f86..c720e0b3238d 100644 --- a/arch/powerpc/include/asm/kvm_book3s_32.h +++ b/arch/powerpc/include/asm/kvm_book3s_32.h @@ -22,7 +22,7 @@ static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) { - return to_book3s(vcpu)->shadow_vcpu; + return vcpu->arch.shadow_vcpu; } static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h index 86d638a3b359..bf0fa8b0a883 100644 --- a/arch/powerpc/include/asm/kvm_book3s_64.h +++ b/arch/powerpc/include/asm/kvm_book3s_64.h @@ -20,7 +20,7 @@ #ifndef __ASM_KVM_BOOK3S_64_H__ #define __ASM_KVM_BOOK3S_64_H__ -#ifdef CONFIG_KVM_BOOK3S_PR +#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu) { preempt_disable(); @@ -35,7 +35,7 @@ static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu) #define SPAPR_TCE_SHIFT 12 -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE #define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */ extern unsigned long kvm_rma_pages; #endif @@ -278,7 +278,7 @@ static inline int is_vrma_hpte(unsigned long hpte_v) (HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16))); } -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE /* * Note modification of an HPTE; set the HPTE modified bit * if anyone is interested. @@ -289,6 +289,6 @@ static inline void note_hpte_modification(struct kvm *kvm, if (atomic_read(&kvm->arch.hpte_mod_interest)) rev->guest_rpte |= HPTE_GR_MODIFIED; } -#endif /* CONFIG_KVM_BOOK3S_64_HV */ +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ #endif /* __ASM_KVM_BOOK3S_64_H__ */ diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 9039d3c97eec..192917d2239c 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h @@ -79,11 +79,12 @@ struct kvmppc_host_state { ulong vmhandler; ulong scratch0; ulong scratch1; + ulong scratch2; u8 in_guest; u8 restore_hid5; u8 napping; -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE u8 hwthread_req; u8 hwthread_state; u8 host_ipi; @@ -101,21 +102,23 @@ struct kvmppc_host_state { #endif #ifdef CONFIG_PPC_BOOK3S_64 u64 cfar; + u64 ppr; #endif }; struct kvmppc_book3s_shadow_vcpu { + bool in_use; ulong gpr[14]; u32 cr; u32 xer; - - u32 fault_dsisr; - u32 last_inst; ulong ctr; ulong lr; ulong pc; + ulong shadow_srr1; ulong fault_dar; + u32 fault_dsisr; + u32 last_inst; #ifdef CONFIG_PPC_BOOK3S_32 u32 sr[16]; /* Guest SRs */ diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h index d3c1eb34c986..dd8f61510dfd 100644 --- a/arch/powerpc/include/asm/kvm_booke.h +++ b/arch/powerpc/include/asm/kvm_booke.h @@ -26,7 +26,12 @@ /* LPIDs we support with this build -- runtime limit may be lower */ #define KVMPPC_NR_LPIDS 64 -#define KVMPPC_INST_EHPRIV 0x7c00021c +#define KVMPPC_INST_EHPRIV 0x7c00021c +#define EHPRIV_OC_SHIFT 11 +/* "ehpriv 1" : ehpriv with OC = 1 is used for debug emulation */ +#define EHPRIV_OC_DEBUG 1 +#define KVMPPC_INST_EHPRIV_DEBUG (KVMPPC_INST_EHPRIV | \ + (EHPRIV_OC_DEBUG << EHPRIV_OC_SHIFT)) static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) { diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index 33283532e9d8..237d1d25b448 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h @@ -63,20 +63,17 @@ extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte); #endif -/* We don't currently support large pages. */ -#define KVM_HPAGE_GFN_SHIFT(x) 0 -#define KVM_NR_PAGE_SIZES 1 -#define KVM_PAGES_PER_HPAGE(x) (1UL<<31) - #define HPTEG_CACHE_NUM (1 << 15) #define HPTEG_HASH_BITS_PTE 13 #define HPTEG_HASH_BITS_PTE_LONG 12 #define HPTEG_HASH_BITS_VPTE 13 #define HPTEG_HASH_BITS_VPTE_LONG 5 +#define HPTEG_HASH_BITS_VPTE_64K 11 #define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE) #define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG) #define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE) #define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG) +#define HPTEG_HASH_NUM_VPTE_64K (1 << HPTEG_HASH_BITS_VPTE_64K) /* Physical Address Mask - allowed range of real mode RAM access */ #define KVM_PAM 0x0fffffffffffffffULL @@ -89,6 +86,9 @@ struct lppaca; struct slb_shadow; struct dtl_entry; +struct kvmppc_vcpu_book3s; +struct kvmppc_book3s_shadow_vcpu; + struct kvm_vm_stat { u32 remote_tlb_flush; }; @@ -224,15 +224,15 @@ struct revmap_entry { #define KVMPPC_GOT_PAGE 0x80 struct kvm_arch_memory_slot { -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE unsigned long *rmap; unsigned long *slot_phys; -#endif /* CONFIG_KVM_BOOK3S_64_HV */ +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ }; struct kvm_arch { unsigned int lpid; -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE unsigned long hpt_virt; struct revmap_entry *revmap; unsigned int host_lpid; @@ -256,7 +256,10 @@ struct kvm_arch { cpumask_t need_tlb_flush; struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; int hpt_cma_alloc; -#endif /* CONFIG_KVM_BOOK3S_64_HV */ +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ +#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE + struct mutex hpt_mutex; +#endif #ifdef CONFIG_PPC_BOOK3S_64 struct list_head spapr_tce_tables; struct list_head rtas_tokens; @@ -267,6 +270,7 @@ struct kvm_arch { #ifdef CONFIG_KVM_XICS struct kvmppc_xics *xics; #endif + struct kvmppc_ops *kvm_ops; }; /* @@ -294,6 +298,10 @@ struct kvmppc_vcore { u64 stolen_tb; u64 preempt_tb; struct kvm_vcpu *runner; + u64 tb_offset; /* guest timebase - host timebase */ + ulong lpcr; + u32 arch_compat; + ulong pcr; }; #define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) @@ -328,6 +336,7 @@ struct kvmppc_pte { bool may_read : 1; bool may_write : 1; bool may_execute : 1; + u8 page_size; /* MMU_PAGE_xxx */ }; struct kvmppc_mmu { @@ -340,7 +349,8 @@ struct kvmppc_mmu { /* book3s */ void (*mtsrin)(struct kvm_vcpu *vcpu, u32 srnum, ulong value); u32 (*mfsrin)(struct kvm_vcpu *vcpu, u32 srnum); - int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, struct kvmppc_pte *pte, bool data); + int (*xlate)(struct kvm_vcpu *vcpu, gva_t eaddr, + struct kvmppc_pte *pte, bool data, bool iswrite); void (*reset_msr)(struct kvm_vcpu *vcpu); void (*tlbie)(struct kvm_vcpu *vcpu, ulong addr, bool large); int (*esid_to_vsid)(struct kvm_vcpu *vcpu, ulong esid, u64 *vsid); @@ -360,6 +370,7 @@ struct kvmppc_slb { bool large : 1; /* PTEs are 16MB */ bool tb : 1; /* 1TB segment */ bool class : 1; + u8 base_page_size; /* MMU_PAGE_xxx */ }; # ifdef CONFIG_PPC_FSL_BOOK3E @@ -377,17 +388,6 @@ struct kvmppc_slb { #define KVMPPC_EPR_USER 1 /* exit to userspace to fill EPR */ #define KVMPPC_EPR_KERNEL 2 /* in-kernel irqchip */ -struct kvmppc_booke_debug_reg { - u32 dbcr0; - u32 dbcr1; - u32 dbcr2; -#ifdef CONFIG_KVM_E500MC - u32 dbcr4; -#endif - u64 iac[KVMPPC_BOOKE_MAX_IAC]; - u64 dac[KVMPPC_BOOKE_MAX_DAC]; -}; - #define KVMPPC_IRQ_DEFAULT 0 #define KVMPPC_IRQ_MPIC 1 #define KVMPPC_IRQ_XICS 2 @@ -402,6 +402,10 @@ struct kvm_vcpu_arch { int slb_max; /* 1 + index of last valid entry in slb[] */ int slb_nr; /* total number of entries in SLB */ struct kvmppc_mmu mmu; + struct kvmppc_vcpu_book3s *book3s; +#endif +#ifdef CONFIG_PPC_BOOK3S_32 + struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; #endif ulong gpr[32]; @@ -463,6 +467,8 @@ struct kvm_vcpu_arch { u32 ctrl; ulong dabr; ulong cfar; + ulong ppr; + ulong shadow_srr1; #endif u32 vrsave; /* also USPRG0 */ u32 mmucr; @@ -498,6 +504,8 @@ struct kvm_vcpu_arch { u64 mmcr[3]; u32 pmc[8]; + u64 siar; + u64 sdar; #ifdef CONFIG_KVM_EXIT_TIMING struct mutex exit_timing_lock; @@ -531,7 +539,10 @@ struct kvm_vcpu_arch { u32 eptcfg; u32 epr; u32 crit_save; - struct kvmppc_booke_debug_reg dbg_reg; + /* guest debug registers*/ + struct debug_reg dbg_reg; + /* hardware visible debug registers when in guest state */ + struct debug_reg shadow_dbg_reg; #endif gpa_t paddr_accessed; gva_t vaddr_accessed; @@ -582,7 +593,7 @@ struct kvm_vcpu_arch { struct kvmppc_icp *icp; /* XICS presentation controller */ #endif -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE struct kvm_vcpu_arch_shared shregs; unsigned long pgfault_addr; diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index b15554a26c20..c8317fbf92c4 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h @@ -106,13 +106,6 @@ extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu); extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); - -extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, - unsigned int op, int *advance); -extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, - ulong val); -extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, - ulong *val); extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); extern int kvmppc_booke_init(void); @@ -135,17 +128,17 @@ extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, struct kvm_create_spapr_tce *args); extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, unsigned long ioba, unsigned long tce); -extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, - struct kvm_allocate_rma *rma); extern struct kvm_rma_info *kvm_alloc_rma(void); extern void kvm_release_rma(struct kvm_rma_info *ri); extern struct page *kvm_alloc_hpt(unsigned long nr_pages); extern void kvm_release_hpt(struct page *page, unsigned long nr_pages); extern int kvmppc_core_init_vm(struct kvm *kvm); extern void kvmppc_core_destroy_vm(struct kvm *kvm); -extern void kvmppc_core_free_memslot(struct kvm_memory_slot *free, +extern void kvmppc_core_free_memslot(struct kvm *kvm, + struct kvm_memory_slot *free, struct kvm_memory_slot *dont); -extern int kvmppc_core_create_memslot(struct kvm_memory_slot *slot, +extern int kvmppc_core_create_memslot(struct kvm *kvm, + struct kvm_memory_slot *slot, unsigned long npages); extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, struct kvm_memory_slot *memslot, @@ -177,6 +170,72 @@ extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq); extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq); +union kvmppc_one_reg { + u32 wval; + u64 dval; + vector128 vval; + u64 vsxval[2]; + struct { + u64 addr; + u64 length; + } vpaval; +}; + +struct kvmppc_ops { + struct module *owner; + int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); + int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); + int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id, + union kvmppc_one_reg *val); + int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id, + union kvmppc_one_reg *val); + void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); + void (*vcpu_put)(struct kvm_vcpu *vcpu); + void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr); + int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); + struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id); + void (*vcpu_free)(struct kvm_vcpu *vcpu); + int (*check_requests)(struct kvm_vcpu *vcpu); + int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log); + void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot); + int (*prepare_memory_region)(struct kvm *kvm, + struct kvm_memory_slot *memslot, + struct kvm_userspace_memory_region *mem); + void (*commit_memory_region)(struct kvm *kvm, + struct kvm_userspace_memory_region *mem, + const struct kvm_memory_slot *old); + int (*unmap_hva)(struct kvm *kvm, unsigned long hva); + int (*unmap_hva_range)(struct kvm *kvm, unsigned long start, + unsigned long end); + int (*age_hva)(struct kvm *kvm, unsigned long hva); + int (*test_age_hva)(struct kvm *kvm, unsigned long hva); + void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte); + void (*mmu_destroy)(struct kvm_vcpu *vcpu); + void (*free_memslot)(struct kvm_memory_slot *free, + struct kvm_memory_slot *dont); + int (*create_memslot)(struct kvm_memory_slot *slot, + unsigned long npages); + int (*init_vm)(struct kvm *kvm); + void (*destroy_vm)(struct kvm *kvm); + int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info); + int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu, + unsigned int inst, int *advance); + int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); + int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); + void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu); + long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl, + unsigned long arg); + +}; + +extern struct kvmppc_ops *kvmppc_hv_ops; +extern struct kvmppc_ops *kvmppc_pr_ops; + +static inline bool is_kvmppc_hv_enabled(struct kvm *kvm) +{ + return kvm->arch.kvm_ops == kvmppc_hv_ops; +} + /* * Cuts out inst bits with ordering according to spec. * That means the leftmost bit is zero. All given bits are included. @@ -210,17 +269,6 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value) return r; } -union kvmppc_one_reg { - u32 wval; - u64 dval; - vector128 vval; - u64 vsxval[2]; - struct { - u64 addr; - u64 length; - } vpaval; -}; - #define one_reg_size(id) \ (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) @@ -245,10 +293,10 @@ union kvmppc_one_reg { __v; \ }) -void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); +int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); -void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); +int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); @@ -260,7 +308,7 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid); struct openpic; -#ifdef CONFIG_KVM_BOOK3S_64_HV +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE extern void kvm_cma_reserve(void) __init; static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) { @@ -269,10 +317,10 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) static inline u32 kvmppc_get_xics_latch(void) { - u32 xirr = get_paca()->kvm_hstate.saved_xirr; + u32 xirr; + xirr = get_paca()->kvm_hstate.saved_xirr; get_paca()->kvm_hstate.saved_xirr = 0; - return xirr; } @@ -281,7 +329,10 @@ static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi) paca[cpu].kvm_hstate.host_ipi = host_ipi; } -extern void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu); +static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) +{ + vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu); +} #else static inline void __init kvm_cma_reserve(void) diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 4470d1e34d23..844c28de7ec0 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h @@ -84,8 +84,8 @@ struct lppaca { * the processor is yielded (either because of an OS yield or a * hypervisor preempt). An even value implies that the processor is * currently executing. - * NOTE: This value will ALWAYS be zero for dedicated processors and - * will NEVER be zero for shared processors (ie, initialized to a 1). + * NOTE: Even dedicated processor partitions can yield so this + * field cannot be used to determine if we are shared or dedicated. */ volatile __be32 yield_count; volatile __be32 dispersion_count; /* dispatch changed physical cpu */ @@ -106,15 +106,15 @@ extern struct lppaca lppaca[]; #define lppaca_of(cpu) (*paca[cpu].lppaca_ptr) /* - * Old kernels used a reserved bit in the VPA to determine if it was running - * in shared processor mode. New kernels look for a non zero yield count - * but KVM still needs to set the bit to keep the old stuff happy. + * We are using a non architected field to determine if a partition is + * shared or dedicated. This currently works on both KVM and PHYP, but + * we will have to transition to something better. */ #define LPPACA_OLD_SHARED_PROC 2 static inline bool lppaca_shared_proc(struct lppaca *l) { - return l->yield_count != 0; + return !!(l->__old_status & LPPACA_OLD_SHARED_PROC); } /* diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index 8b480901165a..ad3025d0880b 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h @@ -78,6 +78,18 @@ struct machdep_calls { long index); void (*tce_flush)(struct iommu_table *tbl); + /* _rm versions are for real mode use only */ + int (*tce_build_rm)(struct iommu_table *tbl, + long index, + long npages, + unsigned long uaddr, + enum dma_data_direction direction, + struct dma_attrs *attrs); + void (*tce_free_rm)(struct iommu_table *tbl, + long index, + long npages); + void (*tce_flush_rm)(struct iommu_table *tbl); + void __iomem * (*ioremap)(phys_addr_t addr, unsigned long size, unsigned long flags, void *caller); void (*iounmap)(volatile void __iomem *token); @@ -263,6 +275,10 @@ struct machdep_calls { ssize_t (*cpu_probe)(const char *, size_t); ssize_t (*cpu_release)(const char *, size_t); #endif + +#ifdef CONFIG_ARCH_RANDOM + int (*get_random_long)(unsigned long *v); +#endif }; extern void e500_idle(void); diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index c4cf01197273..807014dde821 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h @@ -135,8 +135,8 @@ extern char initial_stab[]; #ifndef __ASSEMBLY__ struct hash_pte { - unsigned long v; - unsigned long r; + __be64 v; + __be64 r; }; extern struct hash_pte *htab_address; diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h index c5cd72833d6e..7bdcf340016c 100644 --- a/arch/powerpc/include/asm/opal.h +++ b/arch/powerpc/include/asm/opal.h @@ -129,6 +129,9 @@ extern int opal_enter_rtas(struct rtas_args *args, #define OPAL_LPC_READ 67 #define OPAL_LPC_WRITE 68 #define OPAL_RETURN_CPU 69 +#define OPAL_FLASH_VALIDATE 76 +#define OPAL_FLASH_MANAGE 77 +#define OPAL_FLASH_UPDATE 78 #ifndef __ASSEMBLY__ @@ -460,10 +463,12 @@ enum { enum { OPAL_PHB_ERROR_DATA_TYPE_P7IOC = 1, + OPAL_PHB_ERROR_DATA_TYPE_PHB3 = 2 }; enum { OPAL_P7IOC_NUM_PEST_REGS = 128, + OPAL_PHB3_NUM_PEST_REGS = 256 }; struct OpalIoPhbErrorCommon { @@ -531,28 +536,94 @@ struct OpalIoP7IOCPhbErrorData { uint64_t pestB[OPAL_P7IOC_NUM_PEST_REGS]; }; +struct OpalIoPhb3ErrorData { + struct OpalIoPhbErrorCommon common; + + uint32_t brdgCtl; + + /* PHB3 UTL regs */ + uint32_t portStatusReg; + uint32_t rootCmplxStatus; + uint32_t busAgentStatus; + + /* PHB3 cfg regs */ + uint32_t deviceStatus; + uint32_t slotStatus; + uint32_t linkStatus; + uint32_t devCmdStatus; + uint32_t devSecStatus; + + /* cfg AER regs */ + uint32_t rootErrorStatus; + uint32_t uncorrErrorStatus; + uint32_t corrErrorStatus; + uint32_t tlpHdr1; + uint32_t tlpHdr2; + uint32_t tlpHdr3; + uint32_t tlpHdr4; + uint32_t sourceId; + + uint32_t rsv3; + + /* Record data about the call to allocate a buffer */ + uint64_t errorClass; + uint64_t correlator; + + uint64_t nFir; /* 000 */ + uint64_t nFirMask; /* 003 */ + uint64_t nFirWOF; /* 008 */ + + /* PHB3 MMIO Error Regs */ + uint64_t phbPlssr; /* 120 */ + uint64_t phbCsr; /* 110 */ + uint64_t lemFir; /* C00 */ + uint64_t lemErrorMask; /* C18 */ + uint64_t lemWOF; /* C40 */ + uint64_t phbErrorStatus; /* C80 */ + uint64_t phbFirstErrorStatus; /* C88 */ + uint64_t phbErrorLog0; /* CC0 */ + uint64_t phbErrorLog1; /* CC8 */ + uint64_t mmioErrorStatus; /* D00 */ + uint64_t mmioFirstErrorStatus; /* D08 */ + uint64_t mmioErrorLog0; /* D40 */ + uint64_t mmioErrorLog1; /* D48 */ + uint64_t dma0ErrorStatus; /* D80 */ + uint64_t dma0FirstErrorStatus; /* D88 */ + uint64_t dma0ErrorLog0; /* DC0 */ + uint64_t dma0ErrorLog1; /* DC8 */ + uint64_t dma1ErrorStatus; /* E00 */ + uint64_t dma1FirstErrorStatus; /* E08 */ + uint64_t dma1ErrorLog0; /* E40 */ + uint64_t dma1ErrorLog1; /* E48 */ + uint64_t pestA[OPAL_PHB3_NUM_PEST_REGS]; + uint64_t pestB[OPAL_PHB3_NUM_PEST_REGS]; +}; + typedef struct oppanel_line { const char * line; uint64_t line_len; } oppanel_line_t; +/* /sys/firmware/opal */ +extern struct kobject *opal_kobj; + /* API functions */ -int64_t opal_console_write(int64_t term_number, int64_t *length, +int64_t opal_console_write(int64_t term_number, __be64 *length, const uint8_t *buffer); -int64_t opal_console_read(int64_t term_number, int64_t *length, +int64_t opal_console_read(int64_t term_number, __be64 *length, uint8_t *buffer); int64_t opal_console_write_buffer_space(int64_t term_number, - int64_t *length); -int64_t opal_rtc_read(uint32_t *year_month_day, - uint64_t *hour_minute_second_millisecond); + __be64 *length); +int64_t opal_rtc_read(__be32 *year_month_day, + __be64 *hour_minute_second_millisecond); int64_t opal_rtc_write(uint32_t year_month_day, uint64_t hour_minute_second_millisecond); int64_t opal_cec_power_down(uint64_t request); int64_t opal_cec_reboot(void); int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset); int64_t opal_write_nvram(uint64_t buffer, uint64_t size, uint64_t offset); -int64_t opal_handle_interrupt(uint64_t isn, uint64_t *outstanding_event_mask); -int64_t opal_poll_events(uint64_t *outstanding_event_mask); +int64_t opal_handle_interrupt(uint64_t isn, __be64 *outstanding_event_mask); +int64_t opal_poll_events(__be64 *outstanding_event_mask); int64_t opal_pci_set_hub_tce_memory(uint64_t hub_id, uint64_t tce_mem_addr, uint64_t tce_mem_size); int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr, @@ -560,9 +631,9 @@ int64_t opal_pci_set_phb_tce_memory(uint64_t phb_id, uint64_t tce_mem_addr, int64_t opal_pci_config_read_byte(uint64_t phb_id, uint64_t bus_dev_func, uint64_t offset, uint8_t *data); int64_t opal_pci_config_read_half_word(uint64_t phb_id, uint64_t bus_dev_func, - uint64_t offset, uint16_t *data); + uint64_t offset, __be16 *data); int64_t opal_pci_config_read_word(uint64_t phb_id, uint64_t bus_dev_func, - uint64_t offset, uint32_t *data); + uint64_t offset, __be32 *data); int64_t opal_pci_config_write_byte(uint64_t phb_id, uint64_t bus_dev_func, uint64_t offset, uint8_t data); int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func, @@ -570,14 +641,14 @@ int64_t opal_pci_config_write_half_word(uint64_t phb_id, uint64_t bus_dev_func, int64_t opal_pci_config_write_word(uint64_t phb_id, uint64_t bus_dev_func, uint64_t offset, uint32_t data); int64_t opal_set_xive(uint32_t isn, uint16_t server, uint8_t priority); -int64_t opal_get_xive(uint32_t isn, uint16_t *server, uint8_t *priority); +int64_t opal_get_xive(uint32_t isn, __be16 *server, uint8_t *priority); int64_t opal_register_exception_handler(uint64_t opal_exception, uint64_t handler_address, uint64_t glue_cache_line); int64_t opal_pci_eeh_freeze_status(uint64_t phb_id, uint64_t pe_number, uint8_t *freeze_state, - uint16_t *pci_error_type, - uint64_t *phb_status); + __be16 *pci_error_type, + __be64 *phb_status); int64_t opal_pci_eeh_freeze_clear(uint64_t phb_id, uint64_t pe_number, uint64_t eeh_action_token); int64_t opal_pci_shpc(uint64_t phb_id, uint64_t shpc_action, uint8_t *state); @@ -614,13 +685,13 @@ int64_t opal_pci_msi_eoi(uint64_t phb_id, uint32_t hw_irq); int64_t opal_pci_set_xive_pe(uint64_t phb_id, uint32_t pe_number, uint32_t xive_num); int64_t opal_get_xive_source(uint64_t phb_id, uint32_t xive_num, - int32_t *interrupt_source_number); + __be32 *interrupt_source_number); int64_t opal_get_msi_32(uint64_t phb_id, uint32_t mve_number, uint32_t xive_num, - uint8_t msi_range, uint32_t *msi_address, - uint32_t *message_data); + uint8_t msi_range, __be32 *msi_address, + __be32 *message_data); int64_t opal_get_msi_64(uint64_t phb_id, uint32_t mve_number, uint32_t xive_num, uint8_t msi_range, - uint64_t *msi_address, uint32_t *message_data); + __be64 *msi_address, __be32 *message_data); int64_t opal_start_cpu(uint64_t thread_number, uint64_t start_address); int64_t opal_query_cpu_status(uint64_t thread_number, uint8_t *thread_status); int64_t opal_write_oppanel(oppanel_line_t *lines, uint64_t num_lines); @@ -642,20 +713,23 @@ int64_t opal_pci_fence_phb(uint64_t phb_id); int64_t opal_pci_reinit(uint64_t phb_id, uint8_t reinit_scope); int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t error_type, uint8_t mask_action); int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action); -int64_t opal_get_epow_status(uint64_t *status); +int64_t opal_get_epow_status(__be64 *status); int64_t opal_set_system_attention_led(uint8_t led_action); int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, uint16_t *pci_error_type, uint16_t *severity); int64_t opal_pci_poll(uint64_t phb_id); int64_t opal_return_cpu(void); -int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val); +int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val); int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val); int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type, uint32_t addr, uint32_t data, uint32_t sz); int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type, - uint32_t addr, uint32_t *data, uint32_t sz); + uint32_t addr, __be32 *data, uint32_t sz); +int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result); +int64_t opal_manage_flash(uint8_t op); +int64_t opal_update_flash(uint64_t blk_list); /* Internal functions */ extern int early_init_dt_scan_opal(unsigned long node, const char *uname, int depth, void *data); @@ -684,6 +758,7 @@ extern int opal_set_rtc_time(struct rtc_time *tm); extern void opal_get_rtc_time(struct rtc_time *tm); extern unsigned long opal_get_boot_time(void); extern void opal_nvram_init(void); +extern void opal_flash_init(void); extern int opal_machine_check(struct pt_regs *regs); diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index a5954cebbc55..b6ea9e068c13 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h @@ -166,7 +166,7 @@ struct paca_struct { struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */ #ifdef CONFIG_KVM_BOOK3S_HANDLER -#ifdef CONFIG_KVM_BOOK3S_PR +#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE /* We use this to store guest state in */ struct kvmppc_book3s_shadow_vcpu shadow_vcpu; #endif diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index b9f426212d3a..32e4e212b9c1 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h @@ -78,7 +78,7 @@ extern unsigned int HPAGE_SHIFT; * * Also, KERNELBASE >= PAGE_OFFSET and PHYSICAL_START >= MEMORY_START * - * There are two was to determine a physical address from a virtual one: + * There are two ways to determine a physical address from a virtual one: * va = pa + PAGE_OFFSET - MEMORY_START * va = pa + KERNELBASE - PHYSICAL_START * @@ -403,7 +403,7 @@ void arch_free_page(struct page *page, int order); struct vm_area_struct; -#ifdef CONFIG_PPC_64K_PAGES +#if defined(CONFIG_PPC_64K_PAGES) && defined(CONFIG_PPC64) typedef pte_t *pgtable_t; #else typedef struct page *pgtable_t; diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h index 27b2386f738a..842846c1b711 100644 --- a/arch/powerpc/include/asm/pgalloc-32.h +++ b/arch/powerpc/include/asm/pgalloc-32.h @@ -84,10 +84,8 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { - struct page *page = page_address(table); - tlb_flush_pgtable(tlb, address); - pgtable_page_dtor(page); - pgtable_free_tlb(tlb, page, 0); + pgtable_page_dtor(table); + pgtable_free_tlb(tlb, page_address(table), 0); } #endif /* _ASM_POWERPC_PGALLOC_32_H */ diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h index f65e27b09bd3..4b0be20fcbfd 100644 --- a/arch/powerpc/include/asm/pgalloc-64.h +++ b/arch/powerpc/include/asm/pgalloc-64.h @@ -16,6 +16,7 @@ struct vmemmap_backing { unsigned long phys; unsigned long virt_addr; }; +extern struct vmemmap_backing *vmemmap_list; /* * Functions that deal with pagetables that could be at any level of @@ -91,7 +92,10 @@ static inline pgtable_t pte_alloc_one(struct mm_struct *mm, if (!pte) return NULL; page = virt_to_page(pte); - pgtable_page_ctor(page); + if (!pgtable_page_ctor(page)) { + __free_page(page); + return NULL; + } return page; } @@ -144,11 +148,9 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb, static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { - struct page *page = page_address(table); - tlb_flush_pgtable(tlb, address); - pgtable_page_dtor(page); - pgtable_free_tlb(tlb, page, 0); + pgtable_page_dtor(table); + pgtable_free_tlb(tlb, page_address(table), 0); } #else /* if CONFIG_PPC_64K_PAGES */ diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 46db09414a10..4a191c472867 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h @@ -394,6 +394,8 @@ static inline void mark_hpte_slot_valid(unsigned char *hpte_slot_array, hpte_slot_array[index] = hidx << 4 | 0x1 << 3; } +struct page *realmode_pfn_to_page(unsigned long pfn); + static inline char *get_hpte_slot_array(pmd_t *pmdp) { /* diff --git a/arch/powerpc/include/asm/plpar_wrappers.h b/arch/powerpc/include/asm/plpar_wrappers.h index a63b045e707c..12c32c5f533d 100644 --- a/arch/powerpc/include/asm/plpar_wrappers.h +++ b/arch/powerpc/include/asm/plpar_wrappers.h @@ -287,6 +287,32 @@ static inline long disable_reloc_on_exceptions(void) { return plpar_set_mode(0, 3, 0, 0); } +/* + * Take exceptions in big endian mode on this partition + * + * Note: this call has a partition wide scope and can take a while to complete. + * If it returns H_LONG_BUSY_* it should be retried periodically until it + * returns H_SUCCESS. + */ +static inline long enable_big_endian_exceptions(void) +{ + /* mflags = 0: big endian exceptions */ + return plpar_set_mode(0, 4, 0, 0); +} + +/* + * Take exceptions in little endian mode on this partition + * + * Note: this call has a partition wide scope and can take a while to complete. + * If it returns H_LONG_BUSY_* it should be retried periodically until it + * returns H_SUCCESS. + */ +static inline long enable_little_endian_exceptions(void) +{ + /* mflags = 1: little endian exceptions */ + return plpar_set_mode(1, 4, 0, 0); +} + static inline long plapr_set_ciabr(unsigned long ciabr) { return plpar_set_mode(0, 1, ciabr, 0); diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index d7fe9f5b46d4..3132bb9365f3 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -143,6 +143,8 @@ #define PPC_INST_LSWX 0x7c00042a #define PPC_INST_LWARX 0x7c000028 #define PPC_INST_LWSYNC 0x7c2004ac +#define PPC_INST_SYNC 0x7c0004ac +#define PPC_INST_SYNC_MASK 0xfc0007fe #define PPC_INST_LXVD2X 0x7c000698 #define PPC_INST_MCRXR 0x7c000400 #define PPC_INST_MCRXR_MASK 0xfc0007fe @@ -181,6 +183,7 @@ #define PPC_INST_TLBIVAX 0x7c000624 #define PPC_INST_TLBSRX_DOT 0x7c0006a5 #define PPC_INST_XXLOR 0xf0000510 +#define PPC_INST_XXSWAPD 0xf0000250 #define PPC_INST_XVCPSGNDP 0xf0000780 #define PPC_INST_TRECHKPT 0x7c0007dd #define PPC_INST_TRECLAIM 0x7c00075d @@ -200,6 +203,7 @@ /* Misc instructions for BPF compiler */ #define PPC_INST_LD 0xe8000000 #define PPC_INST_LHZ 0xa0000000 +#define PPC_INST_LHBRX 0x7c00062c #define PPC_INST_LWZ 0x80000000 #define PPC_INST_STD 0xf8000000 #define PPC_INST_STDU 0xf8000001 @@ -218,7 +222,7 @@ #define PPC_INST_MULLW 0x7c0001d6 #define PPC_INST_MULHWU 0x7c000016 #define PPC_INST_MULLI 0x1c000000 -#define PPC_INST_DIVWU 0x7c0003d6 +#define PPC_INST_DIVWU 0x7c000396 #define PPC_INST_RLWINM 0x54000000 #define PPC_INST_RLDICR 0x78000004 #define PPC_INST_SLW 0x7c000030 @@ -344,6 +348,8 @@ VSX_XX1((s), a, b)) #define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \ VSX_XX3((t), a, b)) +#define XXSWAPD(t, a) stringify_in_c(.long PPC_INST_XXSWAPD | \ + VSX_XX3((t), a, a)) #define XVCPSGNDP(t, a, b) stringify_in_c(.long (PPC_INST_XVCPSGNDP | \ VSX_XX3((t), (a), (b)))) diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 599545738af3..f595b98079ee 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h @@ -98,123 +98,51 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) #define REST_8GPRS(n, base) REST_4GPRS(n, base); REST_4GPRS(n+4, base) #define REST_10GPRS(n, base) REST_8GPRS(n, base); REST_2GPRS(n+8, base) -#define SAVE_FPR(n, base) stfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base) +#define SAVE_FPR(n, base) stfd n,8*TS_FPRWIDTH*(n)(base) #define SAVE_2FPRS(n, base) SAVE_FPR(n, base); SAVE_FPR(n+1, base) #define SAVE_4FPRS(n, base) SAVE_2FPRS(n, base); SAVE_2FPRS(n+2, base) #define SAVE_8FPRS(n, base) SAVE_4FPRS(n, base); SAVE_4FPRS(n+4, base) #define SAVE_16FPRS(n, base) SAVE_8FPRS(n, base); SAVE_8FPRS(n+8, base) #define SAVE_32FPRS(n, base) SAVE_16FPRS(n, base); SAVE_16FPRS(n+16, base) -#define REST_FPR(n, base) lfd n,THREAD_FPR0+8*TS_FPRWIDTH*(n)(base) +#define REST_FPR(n, base) lfd n,8*TS_FPRWIDTH*(n)(base) #define REST_2FPRS(n, base) REST_FPR(n, base); REST_FPR(n+1, base) #define REST_4FPRS(n, base) REST_2FPRS(n, base); REST_2FPRS(n+2, base) #define REST_8FPRS(n, base) REST_4FPRS(n, base); REST_4FPRS(n+4, base) #define REST_16FPRS(n, base) REST_8FPRS(n, base); REST_8FPRS(n+8, base) #define REST_32FPRS(n, base) REST_16FPRS(n, base); REST_16FPRS(n+16, base) -#define SAVE_VR(n,b,base) li b,THREAD_VR0+(16*(n)); stvx n,base,b +#define SAVE_VR(n,b,base) li b,16*(n); stvx n,base,b #define SAVE_2VRS(n,b,base) SAVE_VR(n,b,base); SAVE_VR(n+1,b,base) #define SAVE_4VRS(n,b,base) SAVE_2VRS(n,b,base); SAVE_2VRS(n+2,b,base) #define SAVE_8VRS(n,b,base) SAVE_4VRS(n,b,base); SAVE_4VRS(n+4,b,base) #define SAVE_16VRS(n,b,base) SAVE_8VRS(n,b,base); SAVE_8VRS(n+8,b,base) #define SAVE_32VRS(n,b,base) SAVE_16VRS(n,b,base); SAVE_16VRS(n+16,b,base) -#define REST_VR(n,b,base) li b,THREAD_VR0+(16*(n)); lvx n,base,b +#define REST_VR(n,b,base) li b,16*(n); lvx n,base,b #define REST_2VRS(n,b,base) REST_VR(n,b,base); REST_VR(n+1,b,base) #define REST_4VRS(n,b,base) REST_2VRS(n,b,base); REST_2VRS(n+2,b,base) #define REST_8VRS(n,b,base) REST_4VRS(n,b,base); REST_4VRS(n+4,b,base) #define REST_16VRS(n,b,base) REST_8VRS(n,b,base); REST_8VRS(n+8,b,base) #define REST_32VRS(n,b,base) REST_16VRS(n,b,base); REST_16VRS(n+16,b,base) -/* Save/restore FPRs, VRs and VSRs from their checkpointed backups in - * thread_struct: - */ -#define SAVE_FPR_TRANSACT(n, base) stfd n,THREAD_TRANSACT_FPR0+ \ - 8*TS_FPRWIDTH*(n)(base) -#define SAVE_2FPRS_TRANSACT(n, base) SAVE_FPR_TRANSACT(n, base); \ - SAVE_FPR_TRANSACT(n+1, base) -#define SAVE_4FPRS_TRANSACT(n, base) SAVE_2FPRS_TRANSACT(n, base); \ - SAVE_2FPRS_TRANSACT(n+2, base) -#define SAVE_8FPRS_TRANSACT(n, base) SAVE_4FPRS_TRANSACT(n, base); \ - SAVE_4FPRS_TRANSACT(n+4, base) -#define SAVE_16FPRS_TRANSACT(n, base) SAVE_8FPRS_TRANSACT(n, base); \ - SAVE_8FPRS_TRANSACT(n+8, base) -#define SAVE_32FPRS_TRANSACT(n, base) SAVE_16FPRS_TRANSACT(n, base); \ - SAVE_16FPRS_TRANSACT(n+16, base) - -#define REST_FPR_TRANSACT(n, base) lfd n,THREAD_TRANSACT_FPR0+ \ - 8*TS_FPRWIDTH*(n)(base) -#define REST_2FPRS_TRANSACT(n, base) REST_FPR_TRANSACT(n, base); \ - REST_FPR_TRANSACT(n+1, base) -#define REST_4FPRS_TRANSACT(n, base) REST_2FPRS_TRANSACT(n, base); \ - REST_2FPRS_TRANSACT(n+2, base) -#define REST_8FPRS_TRANSACT(n, base) REST_4FPRS_TRANSACT(n, base); \ - REST_4FPRS_TRANSACT(n+4, base) -#define REST_16FPRS_TRANSACT(n, base) REST_8FPRS_TRANSACT(n, base); \ - REST_8FPRS_TRANSACT(n+8, base) -#define REST_32FPRS_TRANSACT(n, base) REST_16FPRS_TRANSACT(n, base); \ - REST_16FPRS_TRANSACT(n+16, base) - - -#define SAVE_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \ - stvx n,b,base -#define SAVE_2VRS_TRANSACT(n,b,base) SAVE_VR_TRANSACT(n,b,base); \ - SAVE_VR_TRANSACT(n+1,b,base) -#define SAVE_4VRS_TRANSACT(n,b,base) SAVE_2VRS_TRANSACT(n,b,base); \ - SAVE_2VRS_TRANSACT(n+2,b,base) -#define SAVE_8VRS_TRANSACT(n,b,base) SAVE_4VRS_TRANSACT(n,b,base); \ - SAVE_4VRS_TRANSACT(n+4,b,base) -#define SAVE_16VRS_TRANSACT(n,b,base) SAVE_8VRS_TRANSACT(n,b,base); \ - SAVE_8VRS_TRANSACT(n+8,b,base) -#define SAVE_32VRS_TRANSACT(n,b,base) SAVE_16VRS_TRANSACT(n,b,base); \ - SAVE_16VRS_TRANSACT(n+16,b,base) - -#define REST_VR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VR0+(16*(n)); \ - lvx n,b,base -#define REST_2VRS_TRANSACT(n,b,base) REST_VR_TRANSACT(n,b,base); \ - REST_VR_TRANSACT(n+1,b,base) -#define REST_4VRS_TRANSACT(n,b,base) REST_2VRS_TRANSACT(n,b,base); \ - REST_2VRS_TRANSACT(n+2,b,base) -#define REST_8VRS_TRANSACT(n,b,base) REST_4VRS_TRANSACT(n,b,base); \ - REST_4VRS_TRANSACT(n+4,b,base) -#define REST_16VRS_TRANSACT(n,b,base) REST_8VRS_TRANSACT(n,b,base); \ - REST_8VRS_TRANSACT(n+8,b,base) -#define REST_32VRS_TRANSACT(n,b,base) REST_16VRS_TRANSACT(n,b,base); \ - REST_16VRS_TRANSACT(n+16,b,base) - - -#define SAVE_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \ - STXVD2X(n,R##base,R##b) -#define SAVE_2VSRS_TRANSACT(n,b,base) SAVE_VSR_TRANSACT(n,b,base); \ - SAVE_VSR_TRANSACT(n+1,b,base) -#define SAVE_4VSRS_TRANSACT(n,b,base) SAVE_2VSRS_TRANSACT(n,b,base); \ - SAVE_2VSRS_TRANSACT(n+2,b,base) -#define SAVE_8VSRS_TRANSACT(n,b,base) SAVE_4VSRS_TRANSACT(n,b,base); \ - SAVE_4VSRS_TRANSACT(n+4,b,base) -#define SAVE_16VSRS_TRANSACT(n,b,base) SAVE_8VSRS_TRANSACT(n,b,base); \ - SAVE_8VSRS_TRANSACT(n+8,b,base) -#define SAVE_32VSRS_TRANSACT(n,b,base) SAVE_16VSRS_TRANSACT(n,b,base); \ - SAVE_16VSRS_TRANSACT(n+16,b,base) - -#define REST_VSR_TRANSACT(n,b,base) li b,THREAD_TRANSACT_VSR0+(16*(n)); \ - LXVD2X(n,R##base,R##b) -#define REST_2VSRS_TRANSACT(n,b,base) REST_VSR_TRANSACT(n,b,base); \ - REST_VSR_TRANSACT(n+1,b,base) -#define REST_4VSRS_TRANSACT(n,b,base) REST_2VSRS_TRANSACT(n,b,base); \ - REST_2VSRS_TRANSACT(n+2,b,base) -#define REST_8VSRS_TRANSACT(n,b,base) REST_4VSRS_TRANSACT(n,b,base); \ - REST_4VSRS_TRANSACT(n+4,b,base) -#define REST_16VSRS_TRANSACT(n,b,base) REST_8VSRS_TRANSACT(n,b,base); \ - REST_8VSRS_TRANSACT(n+8,b,base) -#define REST_32VSRS_TRANSACT(n,b,base) REST_16VSRS_TRANSACT(n,b,base); \ - REST_16VSRS_TRANSACT(n+16,b,base) +#ifdef __BIG_ENDIAN__ +#define STXVD2X_ROT(n,b,base) STXVD2X(n,b,base) +#define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base) +#else +#define STXVD2X_ROT(n,b,base) XXSWAPD(n,n); \ + STXVD2X(n,b,base); \ + XXSWAPD(n,n) +#define LXVD2X_ROT(n,b,base) LXVD2X(n,b,base); \ + XXSWAPD(n,n) +#endif /* Save the lower 32 VSRs in the thread VSR region */ -#define SAVE_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); STXVD2X(n,R##base,R##b) +#define SAVE_VSR(n,b,base) li b,16*(n); STXVD2X_ROT(n,R##base,R##b) #define SAVE_2VSRS(n,b,base) SAVE_VSR(n,b,base); SAVE_VSR(n+1,b,base) #define SAVE_4VSRS(n,b,base) SAVE_2VSRS(n,b,base); SAVE_2VSRS(n+2,b,base) #define SAVE_8VSRS(n,b,base) SAVE_4VSRS(n,b,base); SAVE_4VSRS(n+4,b,base) #define SAVE_16VSRS(n,b,base) SAVE_8VSRS(n,b,base); SAVE_8VSRS(n+8,b,base) #define SAVE_32VSRS(n,b,base) SAVE_16VSRS(n,b,base); SAVE_16VSRS(n+16,b,base) -#define REST_VSR(n,b,base) li b,THREAD_VSR0+(16*(n)); LXVD2X(n,R##base,R##b) +#define REST_VSR(n,b,base) li b,16*(n); LXVD2X_ROT(n,R##base,R##b) #define REST_2VSRS(n,b,base) REST_VSR(n,b,base); REST_VSR(n+1,b,base) #define REST_4VSRS(n,b,base) REST_2VSRS(n,b,base); REST_2VSRS(n+2,b,base) #define REST_8VSRS(n,b,base) REST_4VSRS(n,b,base); REST_4VSRS(n+4,b,base) @@ -438,6 +366,8 @@ BEGIN_FTR_SECTION_NESTED(96); \ cmpwi dest,0; \ beq- 90b; \ END_FTR_SECTION_NESTED(CPU_FTR_CELL_TB_BUG, CPU_FTR_CELL_TB_BUG, 96) +#elif defined(CONFIG_8xx) +#define MFTB(dest) mftb dest #else #define MFTB(dest) mfspr dest, SPRN_TBRL #endif @@ -478,13 +408,6 @@ BEGIN_FTR_SECTION_NESTED(945) \ std ra,TASKTHREADPPR(rb); \ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945) -#define RESTORE_PPR(ra, rb) \ -BEGIN_FTR_SECTION_NESTED(946) \ - ld ra,PACACURRENT(r13); \ - ld rb,TASKTHREADPPR(ra); \ - mtspr SPRN_PPR,rb; /* Restore PPR */ \ -END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946) - #endif /* @@ -832,6 +755,35 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,946) #define N_SLINE 68 #define N_SO 100 -#endif /* __ASSEMBLY__ */ +/* + * Create an endian fixup trampoline + * + * This starts with a "tdi 0,0,0x48" instruction which is + * essentially a "trap never", and thus akin to a nop. + * + * The opcode for this instruction read with the wrong endian + * however results in a b . + 8 + * + * So essentially we use that trick to execute the following + * trampoline in "reverse endian" if we are running with the + * MSR_LE bit set the "wrong" way for whatever endianness the + * kernel is built for. + */ +#ifdef CONFIG_PPC_BOOK3E +#define FIXUP_ENDIAN +#else +#define FIXUP_ENDIAN \ + tdi 0,0,0x48; /* Reverse endian of b . + 8 */ \ + b $+36; /* Skip trampoline if endian is good */ \ + .long 0x05009f42; /* bcl 20,31,$+4 */ \ + .long 0xa602487d; /* mflr r10 */ \ + .long 0x1c004a39; /* addi r10,r10,28 */ \ + .long 0xa600607d; /* mfmsr r11 */ \ + .long 0x01006b69; /* xori r11,r11,1 */ \ + .long 0xa6035a7d; /* mtsrr0 r10 */ \ + .long 0xa6037b7d; /* mtsrr1 r11 */ \ + .long 0x2400004c /* rfid */ +#endif /* !CONFIG_PPC_BOOK3E */ +#endif /* __ASSEMBLY__ */ #endif /* _ASM_POWERPC_PPC_ASM_H */ diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index ce4de5aed7b5..fc14a38c7ccf 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h @@ -14,8 +14,18 @@ #ifdef CONFIG_VSX #define TS_FPRWIDTH 2 + +#ifdef __BIG_ENDIAN__ +#define TS_FPROFFSET 0 +#define TS_VSRLOWOFFSET 1 +#else +#define TS_FPROFFSET 1 +#define TS_VSRLOWOFFSET 0 +#endif + #else #define TS_FPRWIDTH 1 +#define TS_FPROFFSET 0 #endif #ifdef CONFIG_PPC64 @@ -142,26 +152,22 @@ typedef struct { unsigned long seg; } mm_segment_t; -#define TS_FPROFFSET 0 -#define TS_VSRLOWOFFSET 1 -#define TS_FPR(i) fpr[i][TS_FPROFFSET] -#define TS_TRANS_FPR(i) transact_fpr[i][TS_FPROFFSET] +#define TS_FPR(i) fp_state.fpr[i][TS_FPROFFSET] +#define TS_TRANS_FPR(i) transact_fp.fpr[i][TS_FPROFFSET] -struct thread_struct { - unsigned long ksp; /* Kernel stack pointer */ -#ifdef CONFIG_PPC64 - unsigned long ksp_vsid; -#endif - struct pt_regs *regs; /* Pointer to saved register state */ - mm_segment_t fs; /* for get_fs() validation */ -#ifdef CONFIG_BOOKE - /* BookE base exception scratch space; align on cacheline */ - unsigned long normsave[8] ____cacheline_aligned; -#endif -#ifdef CONFIG_PPC32 - void *pgdir; /* root of page-table tree */ - unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ -#endif +/* FP and VSX 0-31 register set */ +struct thread_fp_state { + u64 fpr[32][TS_FPRWIDTH] __attribute__((aligned(16))); + u64 fpscr; /* Floating point status */ +}; + +/* Complete AltiVec register set including VSCR */ +struct thread_vr_state { + vector128 vr[32] __attribute__((aligned(16))); + vector128 vscr __attribute__((aligned(16))); +}; + +struct debug_reg { #ifdef CONFIG_PPC_ADV_DEBUG_REGS /* * The following help to manage the use of Debug Control Registers @@ -198,13 +204,28 @@ struct thread_struct { unsigned long dvc2; #endif #endif - /* FP and VSX 0-31 register set */ - double fpr[32][TS_FPRWIDTH] __attribute__((aligned(16))); - struct { +}; - unsigned int pad; - unsigned int val; /* Floating point status */ - } fpscr; +struct thread_struct { + unsigned long ksp; /* Kernel stack pointer */ + +#ifdef CONFIG_PPC64 + unsigned long ksp_vsid; +#endif + struct pt_regs *regs; /* Pointer to saved register state */ + mm_segment_t fs; /* for get_fs() validation */ +#ifdef CONFIG_BOOKE + /* BookE base exception scratch space; align on cacheline */ + unsigned long normsave[8] ____cacheline_aligned; +#endif +#ifdef CONFIG_PPC32 + void *pgdir; /* root of page-table tree */ + unsigned long ksp_limit; /* if ksp <= ksp_limit stack overflow */ +#endif + /* Debug Registers */ + struct debug_reg debug; + struct thread_fp_state fp_state; + struct thread_fp_state *fp_save_area; int fpexc_mode; /* floating-point exception mode */ unsigned int align_ctl; /* alignment handling control */ #ifdef CONFIG_PPC64 @@ -222,10 +243,8 @@ struct thread_struct { struct arch_hw_breakpoint hw_brk; /* info on the hardware breakpoint */ unsigned long trap_nr; /* last trap # on this thread */ #ifdef CONFIG_ALTIVEC - /* Complete AltiVec register set */ - vector128 vr[32] __attribute__((aligned(16))); - /* AltiVec status */ - vector128 vscr __attribute__((aligned(16))); + struct thread_vr_state vr_state; + struct thread_vr_state *vr_save_area; unsigned long vrsave; int used_vr; /* set if process has used altivec */ #endif /* CONFIG_ALTIVEC */ @@ -262,13 +281,8 @@ struct thread_struct { * transact_fpr[] is the new set of transactional values. * VRs work the same way. */ - double transact_fpr[32][TS_FPRWIDTH]; - struct { - unsigned int pad; - unsigned int val; /* Floating point status */ - } transact_fpscr; - vector128 transact_vr[32] __attribute__((aligned(16))); - vector128 transact_vscr __attribute__((aligned(16))); + struct thread_fp_state transact_fp; + struct thread_vr_state transact_vr; unsigned long transact_vrsave; #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */ #ifdef CONFIG_KVM_BOOK3S_32_HANDLER @@ -322,8 +336,6 @@ struct thread_struct { .ksp = INIT_SP, \ .regs = (struct pt_regs *)INIT_SP - 1, /* XXX bogus, I think */ \ .fs = KERNEL_DS, \ - .fpr = {{0}}, \ - .fpscr = { .val = 0, }, \ .fpexc_mode = 0, \ .ppr = INIT_PPR, \ } @@ -361,6 +373,11 @@ extern int set_endian(struct task_struct *tsk, unsigned int val); extern int get_unalign_ctl(struct task_struct *tsk, unsigned long adr); extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); +extern void load_fp_state(struct thread_fp_state *fp); +extern void store_fp_state(struct thread_fp_state *fp); +extern void load_vr_state(struct thread_vr_state *vr); +extern void store_vr_state(struct thread_vr_state *vr); + static inline unsigned int __unpack_fe01(unsigned long msr_bits) { return ((msr_bits & MSR_FE0) >> 10) | ((msr_bits & MSR_FE1) >> 8); diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index 7d0c7f3a7171..d977b9b78696 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -1,4 +1,3 @@ -#include <linux/of.h> /* linux/of.h gets to determine #include ordering */ #ifndef _POWERPC_PROM_H #define _POWERPC_PROM_H #ifdef __KERNEL__ @@ -20,21 +19,17 @@ #include <asm/irq.h> #include <linux/atomic.h> -#define HAVE_ARCH_DEVTREE_FIXUPS +/* These includes should be removed once implicit includes are cleaned up. */ +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/platform_device.h> /* * OF address retreival & translation */ -/* Translate a DMA address from device space to CPU space */ -extern u64 of_translate_dma_address(struct device_node *dev, - const __be32 *in_addr); - -#ifdef CONFIG_PCI -extern unsigned long pci_address_to_pio(phys_addr_t address); -#define pci_address_to_pio pci_address_to_pio -#endif /* CONFIG_PCI */ - /* Parse the ibm,dma-window property of an OF node into the busno, phys and * size parameters. */ @@ -44,16 +39,6 @@ void of_parse_dma_window(struct device_node *dn, const __be32 *dma_window, extern void kdump_move_device_tree(void); -/* cache lookup */ -struct device_node *of_find_next_cache_node(struct device_node *np); - -#ifdef CONFIG_NUMA -extern int of_node_to_nid(struct device_node *device); -#else -static inline int of_node_to_nid(struct device_node *device) { return 0; } -#endif -#define of_node_to_nid of_node_to_nid - extern void of_instantiate_rtc(void); extern int of_get_ibm_chip_id(struct device_node *np); @@ -143,14 +128,5 @@ struct of_drconf_cell { */ extern unsigned char ibm_architecture_vec[]; -/* These includes are put at the bottom because they may contain things - * that are overridden by this file. Ideally they shouldn't be included - * by this file, but there are a bunch of .c files that currently depend - * on it. Eventually they will be cleaned up. */ -#include <linux/of_fdt.h> -#include <linux/of_address.h> -#include <linux/of_irq.h> -#include <linux/platform_device.h> - #endif /* __KERNEL__ */ #endif /* _POWERPC_PROM_H */ diff --git a/arch/powerpc/include/asm/pte-book3e.h b/arch/powerpc/include/asm/pte-book3e.h index 0156702ba24e..576ad88104cb 100644 --- a/arch/powerpc/include/asm/pte-book3e.h +++ b/arch/powerpc/include/asm/pte-book3e.h @@ -40,7 +40,7 @@ #define _PAGE_U1 0x010000 #define _PAGE_U0 0x020000 #define _PAGE_ACCESSED 0x040000 -#define _PAGE_LENDIAN 0x080000 +#define _PAGE_ENDIAN 0x080000 #define _PAGE_GUARDED 0x100000 #define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */ #define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */ diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index 10d1ef016bf1..fa8388ed94c5 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h @@ -115,7 +115,12 @@ #define MSR_64BIT MSR_SF /* Server variant */ -#define MSR_ (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV) +#define __MSR (MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV) +#ifdef __BIG_ENDIAN__ +#define MSR_ __MSR +#else +#define MSR_ (__MSR | MSR_LE) +#endif #define MSR_KERNEL (MSR_ | MSR_64BIT) #define MSR_USER32 (MSR_ | MSR_PR | MSR_EE) #define MSR_USER64 (MSR_USER32 | MSR_64BIT) @@ -243,6 +248,7 @@ #define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ #define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ #define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */ +#define SPRN_TBU40 0x11E /* Timebase upper 40 bits (hyper, R/W) */ #define SPRN_SPURR 0x134 /* Scaled PURR */ #define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */ #define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */ @@ -283,6 +289,7 @@ #define LPCR_ISL (1ul << (63-2)) #define LPCR_VC_SH (63-2) #define LPCR_DPFD_SH (63-11) +#define LPCR_DPFD (7ul << LPCR_DPFD_SH) #define LPCR_VRMASD (0x1ful << (63-16)) #define LPCR_VRMA_L (1ul << (63-12)) #define LPCR_VRMA_LP0 (1ul << (63-15)) @@ -299,6 +306,7 @@ #define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */ #define LPCR_MER 0x00000800 /* Mediated External Exception */ #define LPCR_MER_SH 11 +#define LPCR_TC 0x00000200 /* Translation control */ #define LPCR_LPES 0x0000000c #define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */ #define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */ @@ -311,6 +319,10 @@ #define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */ #define SPRN_HMER 0x150 /* Hardware m? error recovery */ #define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ +#define SPRN_PCR 0x152 /* Processor compatibility register */ +#define PCR_VEC_DIS (1ul << (63-0)) /* Vec. disable (bit NA since POWER8) */ +#define PCR_VSX_DIS (1ul << (63-1)) /* VSX disable (bit NA since POWER8) */ +#define PCR_ARCH_205 0x2 /* Architecture 2.05 */ #define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ #define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */ #define SPRN_TLBVPNR 0x155 /* P7 TLB control register */ @@ -420,6 +432,7 @@ #define HID4_RMLS2_SH (63 - 2) /* Real mode limit bottom 2 bits */ #define HID4_LPID5_SH (63 - 6) /* partition ID bottom 4 bits */ #define HID4_RMOR_SH (63 - 22) /* real mode offset (16 bits) */ +#define HID4_RMOR (0xFFFFul << HID4_RMOR_SH) #define HID4_LPES1 (1 << (63-57)) /* LPAR env. sel. bit 1 */ #define HID4_RMLS0_SH (63 - 58) /* Real mode limit top bit */ #define HID4_LPID1_SH 0 /* partition ID top 2 bits */ @@ -1102,6 +1115,13 @@ #define PVR_BE 0x0070 #define PVR_PA6T 0x0090 +/* "Logical" PVR values defined in PAPR, representing architecture levels */ +#define PVR_ARCH_204 0x0f000001 +#define PVR_ARCH_205 0x0f000002 +#define PVR_ARCH_206 0x0f000003 +#define PVR_ARCH_206p 0x0f100003 +#define PVR_ARCH_207 0x0f000004 + /* Macros for setting and retrieving special purpose registers */ #ifndef __ASSEMBLY__ #define mfmsr() ({unsigned long rval; \ @@ -1154,12 +1174,19 @@ #else /* __powerpc64__ */ +#if defined(CONFIG_8xx) +#define mftbl() ({unsigned long rval; \ + asm volatile("mftbl %0" : "=r" (rval)); rval;}) +#define mftbu() ({unsigned long rval; \ + asm volatile("mftbu %0" : "=r" (rval)); rval;}) +#else #define mftbl() ({unsigned long rval; \ asm volatile("mfspr %0, %1" : "=r" (rval) : \ "i" (SPRN_TBRL)); rval;}) #define mftbu() ({unsigned long rval; \ asm volatile("mfspr %0, %1" : "=r" (rval) : \ "i" (SPRN_TBRU)); rval;}) +#endif #endif /* !__powerpc64__ */ #define mttbl(v) asm volatile("mttbl %0":: "r"(v)) diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index ed8f836da094..2e31aacd8acc 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h @@ -381,7 +381,7 @@ #define DBCR0_IA34T 0x00004000 /* Instr Addr 3-4 range Toggle */ #define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ -#define dbcr_iac_range(task) ((task)->thread.dbcr0) +#define dbcr_iac_range(task) ((task)->thread.debug.dbcr0) #define DBCR_IAC12I DBCR0_IA12 /* Range Inclusive */ #define DBCR_IAC12X (DBCR0_IA12 | DBCR0_IA12X) /* Range Exclusive */ #define DBCR_IAC12MODE (DBCR0_IA12 | DBCR0_IA12X) /* IAC 1-2 Mode Bits */ @@ -395,7 +395,7 @@ #define DBCR1_DAC1W 0x20000000 /* DAC1 Write Debug Event */ #define DBCR1_DAC2W 0x10000000 /* DAC2 Write Debug Event */ -#define dbcr_dac(task) ((task)->thread.dbcr1) +#define dbcr_dac(task) ((task)->thread.debug.dbcr1) #define DBCR_DAC1R DBCR1_DAC1R #define DBCR_DAC1W DBCR1_DAC1W #define DBCR_DAC2R DBCR1_DAC2R @@ -441,7 +441,7 @@ #define DBCR0_CRET 0x00000020 /* Critical Return Debug Event */ #define DBCR0_FT 0x00000001 /* Freeze Timers on debug event */ -#define dbcr_dac(task) ((task)->thread.dbcr0) +#define dbcr_dac(task) ((task)->thread.debug.dbcr0) #define DBCR_DAC1R DBCR0_DAC1R #define DBCR_DAC1W DBCR0_DAC1W #define DBCR_DAC2R DBCR0_DAC2R @@ -475,7 +475,7 @@ #define DBCR1_IAC34MX 0x000000C0 /* Instr Addr 3-4 range eXclusive */ #define DBCR1_IAC34AT 0x00000001 /* Instr Addr 3-4 range Toggle */ -#define dbcr_iac_range(task) ((task)->thread.dbcr1) +#define dbcr_iac_range(task) ((task)->thread.debug.dbcr1) #define DBCR_IAC12I DBCR1_IAC12M /* Range Inclusive */ #define DBCR_IAC12X DBCR1_IAC12MX /* Range Exclusive */ #define DBCR_IAC12MODE DBCR1_IAC12MX /* IAC 1-2 Mode Bits */ diff --git a/arch/powerpc/include/asm/scom.h b/arch/powerpc/include/asm/scom.h index 0cabfd7bc2d1..f5cde45b1161 100644 --- a/arch/powerpc/include/asm/scom.h +++ b/arch/powerpc/include/asm/scom.h @@ -54,8 +54,8 @@ struct scom_controller { scom_map_t (*map)(struct device_node *ctrl_dev, u64 reg, u64 count); void (*unmap)(scom_map_t map); - u64 (*read)(scom_map_t map, u32 reg); - void (*write)(scom_map_t map, u32 reg, u64 value); + int (*read)(scom_map_t map, u64 reg, u64 *value); + int (*write)(scom_map_t map, u64 reg, u64 value); }; extern const struct scom_controller *scom_controller; @@ -133,10 +133,18 @@ static inline void scom_unmap(scom_map_t map) * scom_read - Read a SCOM register * @map: Result of scom_map * @reg: Register index within that map + * @value: Updated with the value read + * + * Returns 0 (success) or a negative error code */ -static inline u64 scom_read(scom_map_t map, u32 reg) +static inline int scom_read(scom_map_t map, u64 reg, u64 *value) { - return scom_controller->read(map, reg); + int rc; + + rc = scom_controller->read(map, reg, value); + if (rc) + *value = 0xfffffffffffffffful; + return rc; } /** @@ -144,12 +152,15 @@ static inline u64 scom_read(scom_map_t map, u32 reg) * @map: Result of scom_map * @reg: Register index within that map * @value: Value to write + * + * Returns 0 (success) or a negative error code */ -static inline void scom_write(scom_map_t map, u32 reg, u64 value) +static inline int scom_write(scom_map_t map, u64 reg, u64 value) { - scom_controller->write(map, reg, value); + return scom_controller->write(map, reg, value); } + #endif /* CONFIG_PPC_SCOM */ #endif /* __ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h index d3ca85529b8b..703a8412dac2 100644 --- a/arch/powerpc/include/asm/setup.h +++ b/arch/powerpc/include/asm/setup.h @@ -23,6 +23,10 @@ extern void reloc_got2(unsigned long); #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) +void check_for_initrd(void); +void do_init_bootmem(void); +void setup_panic(void); + #endif /* !__ASSEMBLY__ */ #endif /* _ASM_POWERPC_SETUP_H */ diff --git a/arch/powerpc/include/asm/sfp-machine.h b/arch/powerpc/include/asm/sfp-machine.h index 3a7a67a0d006..d89beaba26ff 100644 --- a/arch/powerpc/include/asm/sfp-machine.h +++ b/arch/powerpc/include/asm/sfp-machine.h @@ -125,7 +125,7 @@ #define FP_EX_DIVZERO (1 << (31 - 5)) #define FP_EX_INEXACT (1 << (31 - 6)) -#define __FPU_FPSCR (current->thread.fpscr.val) +#define __FPU_FPSCR (current->thread.fp_state.fpscr) /* We only actually write to the destination register * if exceptions signalled (if any) will not trap. diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 98da78e0c2c0..084e0807db98 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h @@ -33,6 +33,7 @@ extern int boot_cpuid; extern int spinning_secondaries; extern void cpu_die(void); +extern int cpu_to_chip_id(int cpu); #ifdef CONFIG_SMP @@ -112,7 +113,6 @@ static inline struct cpumask *cpu_core_mask(int cpu) } extern int cpu_to_core_id(int cpu); -extern int cpu_to_chip_id(int cpu); /* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. * diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h index 93f280e23279..37b7ca39ec9f 100644 --- a/arch/powerpc/include/asm/spu.h +++ b/arch/powerpc/include/asm/spu.h @@ -235,6 +235,7 @@ extern long spu_sys_callback(struct spu_syscall_block *s); /* syscalls implemented in spufs */ struct file; +struct coredump_params; struct spufs_calls { long (*create_thread)(const char __user *name, unsigned int flags, umode_t mode, @@ -242,7 +243,7 @@ struct spufs_calls { long (*spu_run)(struct file *filp, __u32 __user *unpc, __u32 __user *ustatus); int (*coredump_extra_notes_size)(void); - int (*coredump_extra_notes_write)(struct file *file, loff_t *foffset); + int (*coredump_extra_notes_write)(struct coredump_params *cprm); void (*notify_spus_active)(void); struct module *owner; }; diff --git a/arch/powerpc/include/asm/string.h b/arch/powerpc/include/asm/string.h index e40010abcaf1..0dffad6bcc84 100644 --- a/arch/powerpc/include/asm/string.h +++ b/arch/powerpc/include/asm/string.h @@ -10,7 +10,9 @@ #define __HAVE_ARCH_STRNCMP #define __HAVE_ARCH_STRCAT #define __HAVE_ARCH_MEMSET +#ifdef __BIG_ENDIAN__ #define __HAVE_ARCH_MEMCPY +#endif #define __HAVE_ARCH_MEMMOVE #define __HAVE_ARCH_MEMCMP #define __HAVE_ARCH_MEMCHR @@ -22,7 +24,9 @@ extern int strcmp(const char *,const char *); extern int strncmp(const char *, const char *, __kernel_size_t); extern char * strcat(char *, const char *); extern void * memset(void *,int,__kernel_size_t); +#ifdef __BIG_ENDIAN__ extern void * memcpy(void *,const void *,__kernel_size_t); +#endif extern void * memmove(void *,const void *,__kernel_size_t); extern int memcmp(const void *,const void *,__kernel_size_t); extern void * memchr(const void *,int,__kernel_size_t); diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h index 2be5618cdec6..aace90547614 100644 --- a/arch/powerpc/include/asm/switch_to.h +++ b/arch/powerpc/include/asm/switch_to.h @@ -35,6 +35,7 @@ extern void giveup_vsx(struct task_struct *); extern void enable_kernel_spe(void); extern void giveup_spe(struct task_struct *); extern void load_up_spe(struct task_struct *); +extern void switch_booke_debug_regs(struct debug_reg *new_debug); #ifndef CONFIG_SMP extern void discard_lazy_cpu_state(void); diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index ba7b1973866e..9854c564ac52 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -82,8 +82,6 @@ static inline struct thread_info *current_thread_info(void) #endif /* __ASSEMBLY__ */ -#define PREEMPT_ACTIVE 0x10000000 - /* * thread information flag bit numbers */ @@ -107,6 +105,9 @@ static inline struct thread_info *current_thread_info(void) #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation for stack store? */ #define TIF_MEMDIE 17 /* is terminating due to OOM killer */ +#if defined(CONFIG_PPC64) +#define TIF_ELF2ABI 18 /* function descriptors must die! */ +#endif /* as above, but as bit values */ #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) @@ -185,6 +186,12 @@ static inline bool test_thread_local_flags(unsigned int flags) #define is_32bit_task() (1) #endif +#if defined(CONFIG_PPC64) +#define is_elf2_task() (test_thread_flag(TIF_ELF2ABI)) +#else +#define is_elf2_task() (0) +#endif + #endif /* !__ASSEMBLY__ */ #endif /* __KERNEL__ */ diff --git a/arch/powerpc/include/asm/timex.h b/arch/powerpc/include/asm/timex.h index 18908caa1f3b..2cf846edb3fc 100644 --- a/arch/powerpc/include/asm/timex.h +++ b/arch/powerpc/include/asm/timex.h @@ -29,7 +29,11 @@ static inline cycles_t get_cycles(void) ret = 0; __asm__ __volatile__( +#ifdef CONFIG_8xx + "97: mftb %0\n" +#else "97: mfspr %0, %2\n" +#endif "99:\n" ".section __ftr_fixup,\"a\"\n" ".align 2\n" @@ -41,7 +45,11 @@ static inline cycles_t get_cycles(void) " .long 0\n" " .long 0\n" ".previous" +#ifdef CONFIG_8xx + : "=r" (ret) : "i" (CPU_FTR_601)); +#else : "=r" (ret) : "i" (CPU_FTR_601), "i" (SPRN_TBRL)); +#endif return ret; #endif } diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h index 5f1b1e3c2137..8296381ae432 100644 --- a/arch/powerpc/include/asm/unaligned.h +++ b/arch/powerpc/include/asm/unaligned.h @@ -4,13 +4,18 @@ #ifdef __KERNEL__ /* - * The PowerPC can do unaligned accesses itself in big endian mode. + * The PowerPC can do unaligned accesses itself based on its endian mode. */ #include <linux/unaligned/access_ok.h> #include <linux/unaligned/generic.h> +#ifdef __LITTLE_ENDIAN__ +#define get_unaligned __get_unaligned_le +#define put_unaligned __put_unaligned_le +#else #define get_unaligned __get_unaligned_be #define put_unaligned __put_unaligned_be +#endif #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_UNALIGNED_H */ diff --git a/arch/powerpc/include/asm/uprobes.h b/arch/powerpc/include/asm/uprobes.h index 23016020915e..75c6ecdb8f37 100644 --- a/arch/powerpc/include/asm/uprobes.h +++ b/arch/powerpc/include/asm/uprobes.h @@ -37,6 +37,7 @@ typedef ppc_opcode_t uprobe_opcode_t; struct arch_uprobe { union { u8 insn[MAX_UINSN_BYTES]; + u8 ixol[MAX_UINSN_BYTES]; u32 ainsn; }; }; @@ -45,11 +46,4 @@ struct arch_uprobe_task { unsigned long saved_trap_nr; }; -extern int arch_uprobe_analyze_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long addr); -extern int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs); -extern int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs); -extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk); -extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); -extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); -extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs); #endif /* _ASM_UPROBES_H */ diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h index d0b6d4ac6dda..9a5c928bb3c6 100644 --- a/arch/powerpc/include/asm/word-at-a-time.h +++ b/arch/powerpc/include/asm/word-at-a-time.h @@ -8,6 +8,8 @@ #include <linux/kernel.h> #include <asm/asm-compat.h> +#ifdef __BIG_ENDIAN__ + struct word_at_a_time { const unsigned long high_bits, low_bits; }; @@ -38,4 +40,80 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct return (val + c->high_bits) & ~rhs; } +#else + +struct word_at_a_time { + const unsigned long one_bits, high_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } + +#ifdef CONFIG_64BIT + +/* Alan Modra's little-endian strlen tail for 64-bit */ +#define create_zero_mask(mask) (mask) + +static inline unsigned long find_zero(unsigned long mask) +{ + unsigned long leading_zero_bits; + long trailing_zero_bit_mask; + + asm ("addi %1,%2,-1\n\t" + "andc %1,%1,%2\n\t" + "popcntd %0,%1" + : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask) + : "r" (mask)); + return leading_zero_bits >> 3; +} + +#else /* 32-bit case */ + +/* + * This is largely generic for little-endian machines, but the + * optimal byte mask counting is probably going to be something + * that is architecture-specific. If you have a reliably fast + * bit count instruction, that might be better than the multiply + * and shift, for example. + */ + +/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */ +static inline long count_masked_bytes(long mask) +{ + /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */ + long a = (0x0ff0001+mask) >> 23; + /* Fix the 1 for 00 case */ + return a & mask; +} + +static inline unsigned long create_zero_mask(unsigned long bits) +{ + bits = (bits - 1) & ~bits; + return bits >> 7; +} + +static inline unsigned long find_zero(unsigned long mask) +{ + return count_masked_bytes(mask); +} + +#endif + +/* Return nonzero if it has a zero */ +static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits; + *bits = mask; + return mask; +} + +static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +/* The mask we created is directly usable as a bytemask */ +#define zero_bytemask(mask) (mask) + +#endif + #endif /* _ASM_WORD_AT_A_TIME_H */ diff --git a/arch/powerpc/include/asm/xor.h b/arch/powerpc/include/asm/xor.h index c82eb12a5b18..0abb97f3be10 100644 --- a/arch/powerpc/include/asm/xor.h +++ b/arch/powerpc/include/asm/xor.h @@ -1 +1,68 @@ +/* + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. + * + * Copyright (C) IBM Corporation, 2012 + * + * Author: Anton Blanchard <anton@au.ibm.com> + */ +#ifndef _ASM_POWERPC_XOR_H +#define _ASM_POWERPC_XOR_H + +#ifdef CONFIG_ALTIVEC + +#include <asm/cputable.h> + +void xor_altivec_2(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in); +void xor_altivec_3(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in); +void xor_altivec_4(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in, + unsigned long *v4_in); +void xor_altivec_5(unsigned long bytes, unsigned long *v1_in, + unsigned long *v2_in, unsigned long *v3_in, + unsigned long *v4_in, unsigned long *v5_in); + +static struct xor_block_template xor_block_altivec = { + .name = "altivec", + .do_2 = xor_altivec_2, + .do_3 = xor_altivec_3, + .do_4 = xor_altivec_4, + .do_5 = xor_altivec_5, +}; + +#define XOR_SPEED_ALTIVEC() \ + do { \ + if (cpu_has_feature(CPU_FTR_ALTIVEC)) \ + xor_speed(&xor_block_altivec); \ + } while (0) +#else +#define XOR_SPEED_ALTIVEC() +#endif + +/* Also try the generic routines. */ #include <asm-generic/xor.h> + +#undef XOR_TRY_TEMPLATES +#define XOR_TRY_TEMPLATES \ +do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_8regs_p); \ + xor_speed(&xor_block_32regs); \ + xor_speed(&xor_block_32regs_p); \ + XOR_SPEED_ALTIVEC(); \ +} while (0) + +#endif /* _ASM_POWERPC_XOR_H */ diff --git a/arch/powerpc/include/uapi/asm/byteorder.h b/arch/powerpc/include/uapi/asm/byteorder.h index aa6cc4fac965..ca931d074000 100644 --- a/arch/powerpc/include/uapi/asm/byteorder.h +++ b/arch/powerpc/include/uapi/asm/byteorder.h @@ -7,6 +7,10 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ +#ifdef __LITTLE_ENDIAN__ +#include <linux/byteorder/little_endian.h> +#else #include <linux/byteorder/big_endian.h> +#endif #endif /* _ASM_POWERPC_BYTEORDER_H */ diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h index 0fb1a6e9ff90..6836ec79a830 100644 --- a/arch/powerpc/include/uapi/asm/kvm.h +++ b/arch/powerpc/include/uapi/asm/kvm.h @@ -27,6 +27,7 @@ #define __KVM_HAVE_PPC_SMT #define __KVM_HAVE_IRQCHIP #define __KVM_HAVE_IRQ_LINE +#define __KVM_HAVE_GUEST_DEBUG struct kvm_regs { __u64 pc; @@ -269,7 +270,24 @@ struct kvm_fpu { __u64 fpr[32]; }; +/* + * Defines for h/w breakpoint, watchpoint (read, write or both) and + * software breakpoint. + * These are used as "type" in KVM_SET_GUEST_DEBUG ioctl and "status" + * for KVM_DEBUG_EXIT. + */ +#define KVMPPC_DEBUG_NONE 0x0 +#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1) +#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2) +#define KVMPPC_DEBUG_WATCH_READ (1UL << 3) struct kvm_debug_exit_arch { + __u64 address; + /* + * exiting to userspace because of h/w breakpoint, watchpoint + * (read, write or both) and software breakpoint. + */ + __u32 status; + __u32 reserved; }; /* for KVM_SET_GUEST_DEBUG */ @@ -281,10 +299,6 @@ struct kvm_guest_debug_arch { * Type denotes h/w breakpoint, read watchpoint, write * watchpoint or watchpoint (both read and write). */ -#define KVMPPC_DEBUG_NONE 0x0 -#define KVMPPC_DEBUG_BREAKPOINT (1UL << 1) -#define KVMPPC_DEBUG_WATCH_WRITE (1UL << 2) -#define KVMPPC_DEBUG_WATCH_READ (1UL << 3) __u32 type; __u32 reserved; } bp[16]; @@ -429,6 +443,11 @@ struct kvm_get_htab_header { #define KVM_REG_PPC_MMCR0 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x10) #define KVM_REG_PPC_MMCR1 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x11) #define KVM_REG_PPC_MMCRA (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x12) +#define KVM_REG_PPC_MMCR2 (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x13) +#define KVM_REG_PPC_MMCRS (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x14) +#define KVM_REG_PPC_SIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x15) +#define KVM_REG_PPC_SDAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x16) +#define KVM_REG_PPC_SIER (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x17) #define KVM_REG_PPC_PMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x18) #define KVM_REG_PPC_PMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x19) @@ -499,6 +518,65 @@ struct kvm_get_htab_header { #define KVM_REG_PPC_TLB3PS (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9a) #define KVM_REG_PPC_EPTCFG (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9b) +/* Timebase offset */ +#define KVM_REG_PPC_TB_OFFSET (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9c) + +/* POWER8 registers */ +#define KVM_REG_PPC_SPMC1 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9d) +#define KVM_REG_PPC_SPMC2 (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0x9e) +#define KVM_REG_PPC_IAMR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x9f) +#define KVM_REG_PPC_TFHAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa0) +#define KVM_REG_PPC_TFIAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa1) +#define KVM_REG_PPC_TEXASR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa2) +#define KVM_REG_PPC_FSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa3) +#define KVM_REG_PPC_PSPB (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xa4) +#define KVM_REG_PPC_EBBHR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa5) +#define KVM_REG_PPC_EBBRR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa6) +#define KVM_REG_PPC_BESCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa7) +#define KVM_REG_PPC_TAR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa8) +#define KVM_REG_PPC_DPDES (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xa9) +#define KVM_REG_PPC_DAWR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaa) +#define KVM_REG_PPC_DAWRX (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xab) +#define KVM_REG_PPC_CIABR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xac) +#define KVM_REG_PPC_IC (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xad) +#define KVM_REG_PPC_VTB (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xae) +#define KVM_REG_PPC_CSIGR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xaf) +#define KVM_REG_PPC_TACR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb0) +#define KVM_REG_PPC_TCSCR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb1) +#define KVM_REG_PPC_PID (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb2) +#define KVM_REG_PPC_ACOP (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb3) + +#define KVM_REG_PPC_VRSAVE (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb4) +#define KVM_REG_PPC_LPCR (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb5) +#define KVM_REG_PPC_PPR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0xb6) + +/* Architecture compatibility level */ +#define KVM_REG_PPC_ARCH_COMPAT (KVM_REG_PPC | KVM_REG_SIZE_U32 | 0xb7) + +/* Transactional Memory checkpointed state: + * This is all GPRs, all VSX regs and a subset of SPRs + */ +#define KVM_REG_PPC_TM (KVM_REG_PPC | 0x80000000) +/* TM GPRs */ +#define KVM_REG_PPC_TM_GPR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0) +#define KVM_REG_PPC_TM_GPR(n) (KVM_REG_PPC_TM_GPR0 + (n)) +#define KVM_REG_PPC_TM_GPR31 (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x1f) +/* TM VSX */ +#define KVM_REG_PPC_TM_VSR0 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x20) +#define KVM_REG_PPC_TM_VSR(n) (KVM_REG_PPC_TM_VSR0 + (n)) +#define KVM_REG_PPC_TM_VSR63 (KVM_REG_PPC_TM | KVM_REG_SIZE_U128 | 0x5f) +/* TM SPRS */ +#define KVM_REG_PPC_TM_CR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x60) +#define KVM_REG_PPC_TM_LR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x61) +#define KVM_REG_PPC_TM_CTR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x62) +#define KVM_REG_PPC_TM_FPSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x63) +#define KVM_REG_PPC_TM_AMR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x64) +#define KVM_REG_PPC_TM_PPR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x65) +#define KVM_REG_PPC_TM_VRSAVE (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x66) +#define KVM_REG_PPC_TM_VSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U32 | 0x67) +#define KVM_REG_PPC_TM_DSCR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x68) +#define KVM_REG_PPC_TM_TAR (KVM_REG_PPC_TM | KVM_REG_SIZE_U64 | 0x69) + /* PPC64 eXternal Interrupt Controller Specification */ #define KVM_DEV_XICS_GRP_SOURCES 1 /* 64-bit source attributes */ diff --git a/arch/powerpc/include/uapi/asm/socket.h b/arch/powerpc/include/uapi/asm/socket.h index a6d74467c9ed..fa698324a1fd 100644 --- a/arch/powerpc/include/uapi/asm/socket.h +++ b/arch/powerpc/include/uapi/asm/socket.h @@ -83,4 +83,6 @@ #define SO_BUSY_POLL 46 +#define SO_MAX_PACING_RATE 47 + #endif /* _ASM_POWERPC_SOCKET_H */ |