diff options
Diffstat (limited to 'arch/arm/include')
-rw-r--r-- | arch/arm/include/asm/arch_gicv3.h | 54 | ||||
-rw-r--r-- | arch/arm/include/asm/efi.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/io.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_asm.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_host.h | 3 | ||||
-rw-r--r-- | arch/arm/include/asm/kvm_hyp.h | 1 | ||||
-rw-r--r-- | arch/arm/include/asm/mutex.h | 21 | ||||
-rw-r--r-- | arch/arm/include/asm/processor.h | 2 | ||||
-rw-r--r-- | arch/arm/include/asm/tlb.h | 21 | ||||
-rw-r--r-- | arch/arm/include/asm/xen/hypercall.h | 88 | ||||
-rw-r--r-- | arch/arm/include/asm/xen/hypervisor.h | 40 | ||||
-rw-r--r-- | arch/arm/include/asm/xen/interface.h | 86 | ||||
-rw-r--r-- | arch/arm/include/asm/xen/page-coherent.h | 99 | ||||
-rw-r--r-- | arch/arm/include/asm/xen/page.h | 123 | ||||
-rw-r--r-- | arch/arm/include/uapi/asm/kvm.h | 2 |
15 files changed, 73 insertions, 472 deletions
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h index a8088290b778..27475904e096 100644 --- a/arch/arm/include/asm/arch_gicv3.h +++ b/arch/arm/include/asm/arch_gicv3.h @@ -22,6 +22,7 @@ #include <linux/io.h> #include <asm/barrier.h> +#include <asm/cacheflush.h> #include <asm/cp15.h> #define ICC_EOIR1 __ACCESS_CP15(c12, 0, c12, 1) @@ -230,19 +231,14 @@ static inline void gic_write_bpr1(u32 val) * AArch32, since the syndrome register doesn't provide any information for * them. * Consequently, the following IO helpers use 32bit accesses. - * - * There are only two registers that need 64bit accesses in this driver: - * - GICD_IROUTERn, contain the affinity values associated to each interrupt. - * The upper-word (aff3) will always be 0, so there is no need for a lock. - * - GICR_TYPER is an ID register and doesn't need atomicity. */ -static inline void gic_write_irouter(u64 val, volatile void __iomem *addr) +static inline void __gic_writeq_nonatomic(u64 val, volatile void __iomem *addr) { writel_relaxed((u32)val, addr); writel_relaxed((u32)(val >> 32), addr + 4); } -static inline u64 gic_read_typer(const volatile void __iomem *addr) +static inline u64 __gic_readq_nonatomic(const volatile void __iomem *addr) { u64 val; @@ -251,5 +247,49 @@ static inline u64 gic_read_typer(const volatile void __iomem *addr) return val; } +#define gic_flush_dcache_to_poc(a,l) __cpuc_flush_dcache_area((a), (l)) + +/* + * GICD_IROUTERn, contain the affinity values associated to each interrupt. + * The upper-word (aff3) will always be 0, so there is no need for a lock. + */ +#define gic_write_irouter(v, c) __gic_writeq_nonatomic(v, c) + +/* + * GICR_TYPER is an ID register and doesn't need atomicity. + */ +#define gic_read_typer(c) __gic_readq_nonatomic(c) + +/* + * GITS_BASER - hi and lo bits may be accessed independently. + */ +#define gits_read_baser(c) __gic_readq_nonatomic(c) +#define gits_write_baser(v, c) __gic_writeq_nonatomic(v, c) + +/* + * GICR_PENDBASER and GICR_PROPBASE are changed with LPIs disabled, so they + * won't be being used during any updates and can be changed non-atomically + */ +#define gicr_read_propbaser(c) __gic_readq_nonatomic(c) +#define gicr_write_propbaser(v, c) __gic_writeq_nonatomic(v, c) +#define gicr_read_pendbaser(c) __gic_readq_nonatomic(c) +#define gicr_write_pendbaser(v, c) __gic_writeq_nonatomic(v, c) + +/* + * GITS_TYPER is an ID register and doesn't need atomicity. + */ +#define gits_read_typer(c) __gic_readq_nonatomic(c) + +/* + * GITS_CBASER - hi and lo bits may be accessed independently. + */ +#define gits_read_cbaser(c) __gic_readq_nonatomic(c) +#define gits_write_cbaser(v, c) __gic_writeq_nonatomic(v, c) + +/* + * GITS_CWRITER - hi and lo bits may be accessed independently. + */ +#define gits_write_cwriter(v, c) __gic_writeq_nonatomic(v, c) + #endif /* !__ASSEMBLY__ */ #endif /* !__ASM_ARCH_GICV3_H */ diff --git a/arch/arm/include/asm/efi.h b/arch/arm/include/asm/efi.h index 766bf9b78160..0b06f5341b45 100644 --- a/arch/arm/include/asm/efi.h +++ b/arch/arm/include/asm/efi.h @@ -57,6 +57,9 @@ void efi_virtmap_unload(void); #define __efi_call_early(f, ...) f(__VA_ARGS__) #define efi_is_64bit() (false) +#define efi_call_proto(protocol, f, instance, ...) \ + ((protocol##_t *)instance)->f(instance, ##__VA_ARGS__) + struct screen_info *alloc_screen_info(efi_system_table_t *sys_table_arg); void free_screen_info(efi_system_table_t *sys_table, struct screen_info *si); diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h index 021692c64de3..42871fb8340e 100644 --- a/arch/arm/include/asm/io.h +++ b/arch/arm/include/asm/io.h @@ -25,7 +25,6 @@ #include <linux/string.h> #include <linux/types.h> -#include <linux/blk_types.h> #include <asm/byteorder.h> #include <asm/memory.h> #include <asm-generic/pci_iomap.h> diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h index d7ea6bcb29bf..8ef05381984b 100644 --- a/arch/arm/include/asm/kvm_asm.h +++ b/arch/arm/include/asm/kvm_asm.h @@ -66,6 +66,7 @@ extern char __kvm_hyp_vector[]; extern void __kvm_flush_vm_context(void); extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); extern void __kvm_tlb_flush_vmid(struct kvm *kvm); +extern void __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu); extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu); diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 2d19e02d03fd..d5423ab15ed5 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -57,6 +57,9 @@ struct kvm_arch { /* VTTBR value associated with below pgd and vmid */ u64 vttbr; + /* The last vcpu id that ran on each physical CPU */ + int __percpu *last_vcpu_ran; + /* Timer */ struct arch_timer_kvm timer; diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h index 343135ede5fa..58508900c4bb 100644 --- a/arch/arm/include/asm/kvm_hyp.h +++ b/arch/arm/include/asm/kvm_hyp.h @@ -71,6 +71,7 @@ #define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0) #define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0) #define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0) +#define TLBIALL __ACCESS_CP15(c8, 0, c7, 0) #define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4) #define PRRR __ACCESS_CP15(c10, 0, c2, 0) #define NMRR __ACCESS_CP15(c10, 0, c2, 1) diff --git a/arch/arm/include/asm/mutex.h b/arch/arm/include/asm/mutex.h deleted file mode 100644 index 87c044910fe0..000000000000 --- a/arch/arm/include/asm/mutex.h +++ /dev/null @@ -1,21 +0,0 @@ -/* - * arch/arm/include/asm/mutex.h - * - * ARM optimized mutex locking primitives - * - * Please look into asm-generic/mutex-xchg.h for a formal definition. - */ -#ifndef _ASM_MUTEX_H -#define _ASM_MUTEX_H -/* - * On pre-ARMv6 hardware this results in a swp-based implementation, - * which is the most efficient. For ARMv6+, we have exclusive memory - * accessors and use atomic_dec to avoid the extra xchg operations - * on the locking slowpaths. - */ -#if __LINUX_ARM_ARCH__ < 6 -#include <asm-generic/mutex-xchg.h> -#else -#include <asm-generic/mutex-dec.h> -#endif -#endif /* _ASM_MUTEX_H */ diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h index 8a1e8e995dae..c3d5fc124a05 100644 --- a/arch/arm/include/asm/processor.h +++ b/arch/arm/include/asm/processor.h @@ -82,8 +82,6 @@ unsigned long get_wchan(struct task_struct *p); #define cpu_relax() barrier() #endif -#define cpu_relax_lowlatency() cpu_relax() - #define task_pt_regs(p) \ ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) diff --git a/arch/arm/include/asm/tlb.h b/arch/arm/include/asm/tlb.h index 1e25cd80589e..3f2eb76243e3 100644 --- a/arch/arm/include/asm/tlb.h +++ b/arch/arm/include/asm/tlb.h @@ -186,6 +186,8 @@ tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long addr) tlb_add_flush(tlb, addr); } +#define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \ + tlb_remove_tlb_entry(tlb, ptep, address) /* * In the case of tlb vma handling, we can optimise these away in the * case where we're doing a full MM flush. When we're doing a munmap, @@ -211,18 +213,17 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) static inline bool __tlb_remove_page(struct mmu_gather *tlb, struct page *page) { + tlb->pages[tlb->nr++] = page; + VM_WARN_ON(tlb->nr > tlb->max); if (tlb->nr == tlb->max) return true; - tlb->pages[tlb->nr++] = page; return false; } static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page) { - if (__tlb_remove_page(tlb, page)) { + if (__tlb_remove_page(tlb, page)) tlb_flush_mmu(tlb); - __tlb_remove_page(tlb, page); - } } static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, @@ -231,12 +232,6 @@ static inline bool __tlb_remove_page_size(struct mmu_gather *tlb, return __tlb_remove_page(tlb, page); } -static inline bool __tlb_remove_pte_page(struct mmu_gather *tlb, - struct page *page) -{ - return __tlb_remove_page(tlb, page); -} - static inline void tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size) { @@ -284,5 +279,11 @@ tlb_remove_pmd_tlb_entry(struct mmu_gather *tlb, pmd_t *pmdp, unsigned long addr #define tlb_migrate_finish(mm) do { } while (0) +#define tlb_remove_check_page_size_change tlb_remove_check_page_size_change +static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb, + unsigned int page_size) +{ +} + #endif /* CONFIG_MMU */ #endif diff --git a/arch/arm/include/asm/xen/hypercall.h b/arch/arm/include/asm/xen/hypercall.h index 9d874db13c0e..3522cbaed316 100644 --- a/arch/arm/include/asm/xen/hypercall.h +++ b/arch/arm/include/asm/xen/hypercall.h @@ -1,87 +1 @@ -/****************************************************************************** - * hypercall.h - * - * Linux-specific hypervisor handling. - * - * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation; or, when distributed - * separately from the Linux kernel or incorporated into other - * software packages, subject to the following license: - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this source file (the "Software"), to deal in the Software without - * restriction, including without limitation the rights to use, copy, modify, - * merge, publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING - * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS - * IN THE SOFTWARE. - */ - -#ifndef _ASM_ARM_XEN_HYPERCALL_H -#define _ASM_ARM_XEN_HYPERCALL_H - -#include <linux/bug.h> - -#include <xen/interface/xen.h> -#include <xen/interface/sched.h> -#include <xen/interface/platform.h> - -long privcmd_call(unsigned call, unsigned long a1, - unsigned long a2, unsigned long a3, - unsigned long a4, unsigned long a5); -int HYPERVISOR_xen_version(int cmd, void *arg); -int HYPERVISOR_console_io(int cmd, int count, char *str); -int HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count); -int HYPERVISOR_sched_op(int cmd, void *arg); -int HYPERVISOR_event_channel_op(int cmd, void *arg); -unsigned long HYPERVISOR_hvm_op(int op, void *arg); -int HYPERVISOR_memory_op(unsigned int cmd, void *arg); -int HYPERVISOR_physdev_op(int cmd, void *arg); -int HYPERVISOR_vcpu_op(int cmd, int vcpuid, void *extra_args); -int HYPERVISOR_tmem_op(void *arg); -int HYPERVISOR_vm_assist(unsigned int cmd, unsigned int type); -int HYPERVISOR_platform_op_raw(void *arg); -static inline int HYPERVISOR_platform_op(struct xen_platform_op *op) -{ - op->interface_version = XENPF_INTERFACE_VERSION; - return HYPERVISOR_platform_op_raw(op); -} -int HYPERVISOR_multicall(struct multicall_entry *calls, uint32_t nr); - -static inline int -HYPERVISOR_suspend(unsigned long start_info_mfn) -{ - struct sched_shutdown r = { .reason = SHUTDOWN_suspend }; - - /* start_info_mfn is unused on ARM */ - return HYPERVISOR_sched_op(SCHEDOP_shutdown, &r); -} - -static inline void -MULTI_update_va_mapping(struct multicall_entry *mcl, unsigned long va, - unsigned int new_val, unsigned long flags) -{ - BUG(); -} - -static inline void -MULTI_mmu_update(struct multicall_entry *mcl, struct mmu_update *req, - int count, int *success_count, domid_t domid) -{ - BUG(); -} - -#endif /* _ASM_ARM_XEN_HYPERCALL_H */ +#include <xen/arm/hypercall.h> diff --git a/arch/arm/include/asm/xen/hypervisor.h b/arch/arm/include/asm/xen/hypervisor.h index 95251512e2c4..d6e7709d0688 100644 --- a/arch/arm/include/asm/xen/hypervisor.h +++ b/arch/arm/include/asm/xen/hypervisor.h @@ -1,39 +1 @@ -#ifndef _ASM_ARM_XEN_HYPERVISOR_H -#define _ASM_ARM_XEN_HYPERVISOR_H - -#include <linux/init.h> - -extern struct shared_info *HYPERVISOR_shared_info; -extern struct start_info *xen_start_info; - -/* Lazy mode for batching updates / context switch */ -enum paravirt_lazy_mode { - PARAVIRT_LAZY_NONE, - PARAVIRT_LAZY_MMU, - PARAVIRT_LAZY_CPU, -}; - -static inline enum paravirt_lazy_mode paravirt_get_lazy_mode(void) -{ - return PARAVIRT_LAZY_NONE; -} - -extern struct dma_map_ops *xen_dma_ops; - -#ifdef CONFIG_XEN -void __init xen_early_init(void); -#else -static inline void xen_early_init(void) { return; } -#endif - -#ifdef CONFIG_HOTPLUG_CPU -static inline void xen_arch_register_cpu(int num) -{ -} - -static inline void xen_arch_unregister_cpu(int num) -{ -} -#endif - -#endif /* _ASM_ARM_XEN_HYPERVISOR_H */ +#include <xen/arm/hypervisor.h> diff --git a/arch/arm/include/asm/xen/interface.h b/arch/arm/include/asm/xen/interface.h index 75d596862892..88c0d75da190 100644 --- a/arch/arm/include/asm/xen/interface.h +++ b/arch/arm/include/asm/xen/interface.h @@ -1,85 +1 @@ -/****************************************************************************** - * Guest OS interface to ARM Xen. - * - * Stefano Stabellini <stefano.stabellini@eu.citrix.com>, Citrix, 2012 - */ - -#ifndef _ASM_ARM_XEN_INTERFACE_H -#define _ASM_ARM_XEN_INTERFACE_H - -#include <linux/types.h> - -#define uint64_aligned_t uint64_t __attribute__((aligned(8))) - -#define __DEFINE_GUEST_HANDLE(name, type) \ - typedef struct { union { type *p; uint64_aligned_t q; }; } \ - __guest_handle_ ## name - -#define DEFINE_GUEST_HANDLE_STRUCT(name) \ - __DEFINE_GUEST_HANDLE(name, struct name) -#define DEFINE_GUEST_HANDLE(name) __DEFINE_GUEST_HANDLE(name, name) -#define GUEST_HANDLE(name) __guest_handle_ ## name - -#define set_xen_guest_handle(hnd, val) \ - do { \ - if (sizeof(hnd) == 8) \ - *(uint64_t *)&(hnd) = 0; \ - (hnd).p = val; \ - } while (0) - -#define __HYPERVISOR_platform_op_raw __HYPERVISOR_platform_op - -#ifndef __ASSEMBLY__ -/* Explicitly size integers that represent pfns in the interface with - * Xen so that we can have one ABI that works for 32 and 64 bit guests. - * Note that this means that the xen_pfn_t type may be capable of - * representing pfn's which the guest cannot represent in its own pfn - * type. However since pfn space is controlled by the guest this is - * fine since it simply wouldn't be able to create any sure pfns in - * the first place. - */ -typedef uint64_t xen_pfn_t; -#define PRI_xen_pfn "llx" -typedef uint64_t xen_ulong_t; -#define PRI_xen_ulong "llx" -typedef int64_t xen_long_t; -#define PRI_xen_long "llx" -/* Guest handles for primitive C types. */ -__DEFINE_GUEST_HANDLE(uchar, unsigned char); -__DEFINE_GUEST_HANDLE(uint, unsigned int); -DEFINE_GUEST_HANDLE(char); -DEFINE_GUEST_HANDLE(int); -DEFINE_GUEST_HANDLE(void); -DEFINE_GUEST_HANDLE(uint64_t); -DEFINE_GUEST_HANDLE(uint32_t); -DEFINE_GUEST_HANDLE(xen_pfn_t); -DEFINE_GUEST_HANDLE(xen_ulong_t); - -/* Maximum number of virtual CPUs in multi-processor guests. */ -#define MAX_VIRT_CPUS 1 - -struct arch_vcpu_info { }; -struct arch_shared_info { }; - -/* TODO: Move pvclock definitions some place arch independent */ -struct pvclock_vcpu_time_info { - u32 version; - u32 pad0; - u64 tsc_timestamp; - u64 system_time; - u32 tsc_to_system_mul; - s8 tsc_shift; - u8 flags; - u8 pad[2]; -} __attribute__((__packed__)); /* 32 bytes */ - -/* It is OK to have a 12 bytes struct with no padding because it is packed */ -struct pvclock_wall_clock { - u32 version; - u32 sec; - u32 nsec; - u32 sec_hi; -} __attribute__((__packed__)); -#endif - -#endif /* _ASM_ARM_XEN_INTERFACE_H */ +#include <xen/arm/interface.h> diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index 95ce6ac3a971..b3ef061d8b74 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h @@ -1,98 +1 @@ -#ifndef _ASM_ARM_XEN_PAGE_COHERENT_H -#define _ASM_ARM_XEN_PAGE_COHERENT_H - -#include <asm/page.h> -#include <linux/dma-mapping.h> - -void __xen_dma_map_page(struct device *hwdev, struct page *page, - dma_addr_t dev_addr, unsigned long offset, size_t size, - enum dma_data_direction dir, unsigned long attrs); -void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, - unsigned long attrs); -void __xen_dma_sync_single_for_cpu(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir); - -void __xen_dma_sync_single_for_device(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir); - -static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs) -{ - return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs); -} - -static inline void xen_free_coherent_pages(struct device *hwdev, size_t size, - void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs) -{ - __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs); -} - -static inline void xen_dma_map_page(struct device *hwdev, struct page *page, - dma_addr_t dev_addr, unsigned long offset, size_t size, - enum dma_data_direction dir, unsigned long attrs) -{ - unsigned long page_pfn = page_to_xen_pfn(page); - unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr); - unsigned long compound_pages = - (1<<compound_order(page)) * XEN_PFN_PER_PAGE; - bool local = (page_pfn <= dev_pfn) && - (dev_pfn - page_pfn < compound_pages); - - /* - * Dom0 is mapped 1:1, while the Linux page can span across - * multiple Xen pages, it's not possible for it to contain a - * mix of local and foreign Xen pages. So if the first xen_pfn - * == mfn the page is local otherwise it's a foreign page - * grant-mapped in dom0. If the page is local we can safely - * call the native dma_ops function, otherwise we call the xen - * specific function. - */ - if (local) - __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); - else - __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs); -} - -static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, - size_t size, enum dma_data_direction dir, unsigned long attrs) -{ - unsigned long pfn = PFN_DOWN(handle); - /* - * Dom0 is mapped 1:1, while the Linux page can be spanned accross - * multiple Xen page, it's not possible to have a mix of local and - * foreign Xen page. Dom0 is mapped 1:1, so calling pfn_valid on a - * foreign mfn will always return false. If the page is local we can - * safely call the native dma_ops function, otherwise we call the xen - * specific function. - */ - if (pfn_valid(pfn)) { - if (__generic_dma_ops(hwdev)->unmap_page) - __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); - } else - __xen_dma_unmap_page(hwdev, handle, size, dir, attrs); -} - -static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - unsigned long pfn = PFN_DOWN(handle); - if (pfn_valid(pfn)) { - if (__generic_dma_ops(hwdev)->sync_single_for_cpu) - __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); - } else - __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir); -} - -static inline void xen_dma_sync_single_for_device(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - unsigned long pfn = PFN_DOWN(handle); - if (pfn_valid(pfn)) { - if (__generic_dma_ops(hwdev)->sync_single_for_device) - __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); - } else - __xen_dma_sync_single_for_device(hwdev, handle, size, dir); -} - -#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ +#include <xen/arm/page-coherent.h> diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index 415dbc6e43fd..31bbc803cecb 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -1,122 +1 @@ -#ifndef _ASM_ARM_XEN_PAGE_H -#define _ASM_ARM_XEN_PAGE_H - -#include <asm/page.h> -#include <asm/pgtable.h> - -#include <linux/pfn.h> -#include <linux/types.h> -#include <linux/dma-mapping.h> - -#include <xen/xen.h> -#include <xen/interface/grant_table.h> - -#define phys_to_machine_mapping_valid(pfn) (1) - -/* Xen machine address */ -typedef struct xmaddr { - phys_addr_t maddr; -} xmaddr_t; - -/* Xen pseudo-physical address */ -typedef struct xpaddr { - phys_addr_t paddr; -} xpaddr_t; - -#define XMADDR(x) ((xmaddr_t) { .maddr = (x) }) -#define XPADDR(x) ((xpaddr_t) { .paddr = (x) }) - -#define INVALID_P2M_ENTRY (~0UL) - -/* - * The pseudo-physical frame (pfn) used in all the helpers is always based - * on Xen page granularity (i.e 4KB). - * - * A Linux page may be split across multiple non-contiguous Xen page so we - * have to keep track with frame based on 4KB page granularity. - * - * PV drivers should never make a direct usage of those helpers (particularly - * pfn_to_gfn and gfn_to_pfn). - */ - -unsigned long __pfn_to_mfn(unsigned long pfn); -extern struct rb_root phys_to_mach; - -/* Pseudo-physical <-> Guest conversion */ -static inline unsigned long pfn_to_gfn(unsigned long pfn) -{ - return pfn; -} - -static inline unsigned long gfn_to_pfn(unsigned long gfn) -{ - return gfn; -} - -/* Pseudo-physical <-> BUS conversion */ -static inline unsigned long pfn_to_bfn(unsigned long pfn) -{ - unsigned long mfn; - - if (phys_to_mach.rb_node != NULL) { - mfn = __pfn_to_mfn(pfn); - if (mfn != INVALID_P2M_ENTRY) - return mfn; - } - - return pfn; -} - -static inline unsigned long bfn_to_pfn(unsigned long bfn) -{ - return bfn; -} - -#define bfn_to_local_pfn(bfn) bfn_to_pfn(bfn) - -/* VIRT <-> GUEST conversion */ -#define virt_to_gfn(v) (pfn_to_gfn(virt_to_phys(v) >> XEN_PAGE_SHIFT)) -#define gfn_to_virt(m) (__va(gfn_to_pfn(m) << XEN_PAGE_SHIFT)) - -/* Only used in PV code. But ARM guests are always HVM. */ -static inline xmaddr_t arbitrary_virt_to_machine(void *vaddr) -{ - BUG(); -} - -/* TODO: this shouldn't be here but it is because the frontend drivers - * are using it (its rolled in headers) even though we won't hit the code path. - * So for right now just punt with this. - */ -static inline pte_t *lookup_address(unsigned long address, unsigned int *level) -{ - BUG(); - return NULL; -} - -extern int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, - struct gnttab_map_grant_ref *kmap_ops, - struct page **pages, unsigned int count); - -extern int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, - struct gnttab_unmap_grant_ref *kunmap_ops, - struct page **pages, unsigned int count); - -bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); -bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, - unsigned long nr_pages); - -static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) -{ - return __set_phys_to_machine(pfn, mfn); -} - -#define xen_remap(cookie, size) ioremap_cache((cookie), (size)) -#define xen_unmap(cookie) iounmap((cookie)) - -bool xen_arch_need_swiotlb(struct device *dev, - phys_addr_t phys, - dma_addr_t dev_addr); -unsigned long xen_get_swiotlb_free_pages(unsigned int order); - -#endif /* _ASM_ARM_XEN_PAGE_H */ +#include <xen/arm/page.h> diff --git a/arch/arm/include/uapi/asm/kvm.h b/arch/arm/include/uapi/asm/kvm.h index b38c10c73579..af05f8e0903e 100644 --- a/arch/arm/include/uapi/asm/kvm.h +++ b/arch/arm/include/uapi/asm/kvm.h @@ -87,9 +87,11 @@ struct kvm_regs { /* Supported VGICv3 address types */ #define KVM_VGIC_V3_ADDR_TYPE_DIST 2 #define KVM_VGIC_V3_ADDR_TYPE_REDIST 3 +#define KVM_VGIC_ITS_ADDR_TYPE 4 #define KVM_VGIC_V3_DIST_SIZE SZ_64K #define KVM_VGIC_V3_REDIST_SIZE (2 * SZ_64K) +#define KVM_VGIC_V3_ITS_SIZE (2 * SZ_64K) #define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */ #define KVM_ARM_VCPU_PSCI_0_2 1 /* CPU uses PSCI v0.2 */ |