diff options
author | Khalid Aziz <khalid.aziz@oracle.com> | 2018-02-23 23:46:41 +0100 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-03-18 15:38:48 +0100 |
commit | 74a04967482faa7144b93dae3b2e913870dd421c (patch) | |
tree | 387f2497cfd053d99504a00ff8ccce0dd5fbd647 /arch | |
parent | mm: Allow arch code to override copy_highpage() (diff) | |
download | linux-74a04967482faa7144b93dae3b2e913870dd421c.tar.xz linux-74a04967482faa7144b93dae3b2e913870dd421c.zip |
sparc64: Add support for ADI (Application Data Integrity)
ADI is a new feature supported on SPARC M7 and newer processors to allow
hardware to catch rogue accesses to memory. ADI is supported for data
fetches only and not instruction fetches. An app can enable ADI on its
data pages, set version tags on them and use versioned addresses to
access the data pages. Upper bits of the address contain the version
tag. On M7 processors, upper four bits (bits 63-60) contain the version
tag. If a rogue app attempts to access ADI enabled data pages, its
access is blocked and processor generates an exception. Please see
Documentation/sparc/adi.txt for further details.
This patch extends mprotect to enable ADI (TSTATE.mcde), enable/disable
MCD (Memory Corruption Detection) on selected memory ranges, enable
TTE.mcd in PTEs, return ADI parameters to userspace and save/restore ADI
version tags on page swap out/in or migration. ADI is not enabled by
default for any task. A task must explicitly enable ADI on a memory
range and set version tag for ADI to be effective for the task.
Signed-off-by: Khalid Aziz <khalid.aziz@oracle.com>
Cc: Khalid Aziz <khalid@gonehiking.org>
Reviewed-by: Anthony Yznaga <anthony.yznaga@oracle.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sparc/include/asm/mman.h | 84 | ||||
-rw-r--r-- | arch/sparc/include/asm/mmu_64.h | 17 | ||||
-rw-r--r-- | arch/sparc/include/asm/mmu_context_64.h | 51 | ||||
-rw-r--r-- | arch/sparc/include/asm/page_64.h | 6 | ||||
-rw-r--r-- | arch/sparc/include/asm/pgtable_64.h | 46 | ||||
-rw-r--r-- | arch/sparc/include/asm/thread_info_64.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/asm/trap_block.h | 2 | ||||
-rw-r--r-- | arch/sparc/include/uapi/asm/mman.h | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/adi_64.c | 301 | ||||
-rw-r--r-- | arch/sparc/kernel/etrap_64.S | 27 | ||||
-rw-r--r-- | arch/sparc/kernel/process_64.c | 25 | ||||
-rw-r--r-- | arch/sparc/kernel/rtrap_64.S | 33 | ||||
-rw-r--r-- | arch/sparc/kernel/setup_64.c | 2 | ||||
-rw-r--r-- | arch/sparc/kernel/urtt_fill.S | 7 | ||||
-rw-r--r-- | arch/sparc/kernel/vmlinux.lds.S | 5 | ||||
-rw-r--r-- | arch/sparc/mm/gup.c | 37 | ||||
-rw-r--r-- | arch/sparc/mm/hugetlbpage.c | 14 | ||||
-rw-r--r-- | arch/sparc/mm/init_64.c | 69 | ||||
-rw-r--r-- | arch/sparc/mm/tsb.c | 21 |
19 files changed, 743 insertions, 8 deletions
diff --git a/arch/sparc/include/asm/mman.h b/arch/sparc/include/asm/mman.h index 7e9472143f9b..f94532f25db1 100644 --- a/arch/sparc/include/asm/mman.h +++ b/arch/sparc/include/asm/mman.h @@ -7,5 +7,87 @@ #ifndef __ASSEMBLY__ #define arch_mmap_check(addr,len,flags) sparc_mmap_check(addr,len) int sparc_mmap_check(unsigned long addr, unsigned long len); -#endif + +#ifdef CONFIG_SPARC64 +#include <asm/adi_64.h> + +static inline void ipi_set_tstate_mcde(void *arg) +{ + struct mm_struct *mm = arg; + + /* Set TSTATE_MCDE for the task using address map that ADI has been + * enabled on if the task is running. If not, it will be set + * automatically at the next context switch + */ + if (current->mm == mm) { + struct pt_regs *regs; + + regs = task_pt_regs(current); + regs->tstate |= TSTATE_MCDE; + } +} + +#define arch_calc_vm_prot_bits(prot, pkey) sparc_calc_vm_prot_bits(prot) +static inline unsigned long sparc_calc_vm_prot_bits(unsigned long prot) +{ + if (adi_capable() && (prot & PROT_ADI)) { + struct pt_regs *regs; + + if (!current->mm->context.adi) { + regs = task_pt_regs(current); + regs->tstate |= TSTATE_MCDE; + current->mm->context.adi = true; + on_each_cpu_mask(mm_cpumask(current->mm), + ipi_set_tstate_mcde, current->mm, 0); + } + return VM_SPARC_ADI; + } else { + return 0; + } +} + +#define arch_vm_get_page_prot(vm_flags) sparc_vm_get_page_prot(vm_flags) +static inline pgprot_t sparc_vm_get_page_prot(unsigned long vm_flags) +{ + return (vm_flags & VM_SPARC_ADI) ? __pgprot(_PAGE_MCD_4V) : __pgprot(0); +} + +#define arch_validate_prot(prot, addr) sparc_validate_prot(prot, addr) +static inline int sparc_validate_prot(unsigned long prot, unsigned long addr) +{ + if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM | PROT_ADI)) + return 0; + if (prot & PROT_ADI) { + if (!adi_capable()) + return 0; + + if (addr) { + struct vm_area_struct *vma; + + vma = find_vma(current->mm, addr); + if (vma) { + /* ADI can not be enabled on PFN + * mapped pages + */ + if (vma->vm_flags & (VM_PFNMAP | VM_MIXEDMAP)) + return 0; + + /* Mergeable pages can become unmergeable + * if ADI is enabled on them even if they + * have identical data on them. This can be + * because ADI enabled pages with identical + * data may still not have identical ADI + * tags on them. Disallow ADI on mergeable + * pages. + */ + if (vma->vm_flags & VM_MERGEABLE) + return 0; + } + } + } + return 1; +} +#endif /* CONFIG_SPARC64 */ + +#endif /* __ASSEMBLY__ */ #endif /* __SPARC_MMAN_H__ */ diff --git a/arch/sparc/include/asm/mmu_64.h b/arch/sparc/include/asm/mmu_64.h index ad4fb93508ba..7e2704c770e9 100644 --- a/arch/sparc/include/asm/mmu_64.h +++ b/arch/sparc/include/asm/mmu_64.h @@ -90,6 +90,20 @@ struct tsb_config { #define MM_NUM_TSBS 1 #endif +/* ADI tags are stored when a page is swapped out and the storage for + * tags is allocated dynamically. There is a tag storage descriptor + * associated with each set of tag storage pages. Tag storage descriptors + * are allocated dynamically. Since kernel will allocate a full page for + * each tag storage descriptor, we can store up to + * PAGE_SIZE/sizeof(tag storage descriptor) descriptors on that page. + */ +typedef struct { + unsigned long start; /* Start address for this tag storage */ + unsigned long end; /* Last address for tag storage */ + unsigned char *tags; /* Where the tags are */ + unsigned long tag_users; /* number of references to descriptor */ +} tag_storage_desc_t; + typedef struct { spinlock_t lock; unsigned long sparc64_ctx_val; @@ -98,6 +112,9 @@ typedef struct { struct tsb_config tsb_block[MM_NUM_TSBS]; struct hv_tsb_descr tsb_descr[MM_NUM_TSBS]; void *vdso; + bool adi; + tag_storage_desc_t *tag_store; + spinlock_t tag_lock; } mm_context_t; #endif /* !__ASSEMBLY__ */ diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h index b361702ef52a..312fcee8df2b 100644 --- a/arch/sparc/include/asm/mmu_context_64.h +++ b/arch/sparc/include/asm/mmu_context_64.h @@ -9,8 +9,10 @@ #include <linux/spinlock.h> #include <linux/mm_types.h> #include <linux/smp.h> +#include <linux/sched.h> #include <asm/spitfire.h> +#include <asm/adi_64.h> #include <asm-generic/mm_hooks.h> #include <asm/percpu.h> @@ -136,6 +138,55 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str #define deactivate_mm(tsk,mm) do { } while (0) #define activate_mm(active_mm, mm) switch_mm(active_mm, mm, NULL) + +#define __HAVE_ARCH_START_CONTEXT_SWITCH +static inline void arch_start_context_switch(struct task_struct *prev) +{ + /* Save the current state of MCDPER register for the process + * we are switching from + */ + if (adi_capable()) { + register unsigned long tmp_mcdper; + + __asm__ __volatile__( + ".word 0x83438000\n\t" /* rd %mcdper, %g1 */ + "mov %%g1, %0\n\t" + : "=r" (tmp_mcdper) + : + : "g1"); + if (tmp_mcdper) + set_tsk_thread_flag(prev, TIF_MCDPER); + else + clear_tsk_thread_flag(prev, TIF_MCDPER); + } +} + +#define finish_arch_post_lock_switch finish_arch_post_lock_switch +static inline void finish_arch_post_lock_switch(void) +{ + /* Restore the state of MCDPER register for the new process + * just switched to. + */ + if (adi_capable()) { + register unsigned long tmp_mcdper; + + tmp_mcdper = test_thread_flag(TIF_MCDPER); + __asm__ __volatile__( + "mov %0, %%g1\n\t" + ".word 0x9d800001\n\t" /* wr %g0, %g1, %mcdper" */ + ".word 0xaf902001\n\t" /* wrpr %g0, 1, %pmcdper */ + : + : "ir" (tmp_mcdper) + : "g1"); + if (current && current->mm && current->mm->context.adi) { + struct pt_regs *regs; + + regs = task_pt_regs(current); + regs->tstate |= TSTATE_MCDE; + } + } +} + #endif /* !(__ASSEMBLY__) */ #endif /* !(__SPARC64_MMU_CONTEXT_H) */ diff --git a/arch/sparc/include/asm/page_64.h b/arch/sparc/include/asm/page_64.h index c28379b1b0fc..e80f2d5bf62f 100644 --- a/arch/sparc/include/asm/page_64.h +++ b/arch/sparc/include/asm/page_64.h @@ -48,6 +48,12 @@ struct page; void clear_user_page(void *addr, unsigned long vaddr, struct page *page); #define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE) void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage); +#define __HAVE_ARCH_COPY_USER_HIGHPAGE +struct vm_area_struct; +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); +#define __HAVE_ARCH_COPY_HIGHPAGE +void copy_highpage(struct page *to, struct page *from); /* Unlike sparc32, sparc64's parameter passing API is more * sane in that structures which as small enough are passed diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 619332a44402..44d6ac47e035 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -19,6 +19,7 @@ #include <asm/types.h> #include <asm/spitfire.h> #include <asm/asi.h> +#include <asm/adi.h> #include <asm/page.h> #include <asm/processor.h> @@ -606,6 +607,18 @@ static inline pte_t pte_mkspecial(pte_t pte) return pte; } +static inline pte_t pte_mkmcd(pte_t pte) +{ + pte_val(pte) |= _PAGE_MCD_4V; + return pte; +} + +static inline pte_t pte_mknotmcd(pte_t pte) +{ + pte_val(pte) &= ~_PAGE_MCD_4V; + return pte; +} + static inline unsigned long pte_young(pte_t pte) { unsigned long mask; @@ -1048,6 +1061,39 @@ int page_in_phys_avail(unsigned long paddr); int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long, unsigned long, pgprot_t); +void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, pte_t pte); + +int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, pte_t oldpte); + +#define __HAVE_ARCH_DO_SWAP_PAGE +static inline void arch_do_swap_page(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, + pte_t pte, pte_t oldpte) +{ + /* If this is a new page being mapped in, there can be no + * ADI tags stored away for this page. Skip looking for + * stored tags + */ + if (pte_none(oldpte)) + return; + + if (adi_state.enabled && (pte_val(pte) & _PAGE_MCD_4V)) + adi_restore_tags(mm, vma, addr, pte); +} + +#define __HAVE_ARCH_UNMAP_ONE +static inline int arch_unmap_one(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr, pte_t oldpte) +{ + if (adi_state.enabled && (pte_val(oldpte) & _PAGE_MCD_4V)) + return adi_save_tags(mm, vma, addr, oldpte); + return 0; +} + static inline int io_remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot) diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h index f7e7b0baec9f..7fb676360928 100644 --- a/arch/sparc/include/asm/thread_info_64.h +++ b/arch/sparc/include/asm/thread_info_64.h @@ -188,7 +188,7 @@ register struct thread_info *current_thread_info_reg asm("g6"); * in using in assembly, else we can't use the mask as * an immediate value in instructions such as andcc. */ -/* flag bit 12 is available */ +#define TIF_MCDPER 12 /* Precise MCD exception */ #define TIF_MEMDIE 13 /* is terminating due to OOM killer */ #define TIF_POLLING_NRFLAG 14 diff --git a/arch/sparc/include/asm/trap_block.h b/arch/sparc/include/asm/trap_block.h index 6a4c8652ad67..0f6d0c4f6683 100644 --- a/arch/sparc/include/asm/trap_block.h +++ b/arch/sparc/include/asm/trap_block.h @@ -76,6 +76,8 @@ extern struct sun4v_1insn_patch_entry __sun4v_1insn_patch, __sun4v_1insn_patch_end; extern struct sun4v_1insn_patch_entry __fast_win_ctrl_1insn_patch, __fast_win_ctrl_1insn_patch_end; +extern struct sun4v_1insn_patch_entry __sun_m7_1insn_patch, + __sun_m7_1insn_patch_end; struct sun4v_2insn_patch_entry { unsigned int addr; diff --git a/arch/sparc/include/uapi/asm/mman.h b/arch/sparc/include/uapi/asm/mman.h index 715a2c927e79..f6f99ec65bb3 100644 --- a/arch/sparc/include/uapi/asm/mman.h +++ b/arch/sparc/include/uapi/asm/mman.h @@ -6,6 +6,8 @@ /* SunOS'ified... */ +#define PROT_ADI 0x10 /* ADI enabled */ + #define MAP_RENAME MAP_ANONYMOUS /* In SunOS terminology */ #define MAP_NORESERVE 0x40 /* don't reserve swap pages */ #define MAP_INHERIT 0x80 /* SunOS doesn't do this, but... */ diff --git a/arch/sparc/kernel/adi_64.c b/arch/sparc/kernel/adi_64.c index 8fb72585d9f1..d0a2ac975b42 100644 --- a/arch/sparc/kernel/adi_64.c +++ b/arch/sparc/kernel/adi_64.c @@ -8,10 +8,24 @@ * This work is licensed under the terms of the GNU GPL, version 2. */ #include <linux/init.h> +#include <linux/slab.h> +#include <linux/mm_types.h> #include <asm/mdesc.h> #include <asm/adi_64.h> +#include <asm/mmu_64.h> +#include <asm/pgtable_64.h> + +/* Each page of storage for ADI tags can accommodate tags for 128 + * pages. When ADI enabled pages are being swapped out, it would be + * prudent to allocate at least enough tag storage space to accommodate + * SWAPFILE_CLUSTER number of pages. Allocate enough tag storage to + * store tags for four SWAPFILE_CLUSTER pages to reduce need for + * further allocations for same vma. + */ +#define TAG_STORAGE_PAGES 8 struct adi_config adi_state; +EXPORT_SYMBOL(adi_state); /* mdesc_adi_init() : Parse machine description provided by the * hypervisor to detect ADI capabilities @@ -84,6 +98,19 @@ void __init mdesc_adi_init(void) goto adi_not_found; adi_state.caps.ue_on_adi = *val; + /* Some of the code to support swapping ADI tags is written + * assumption that two ADI tags can fit inside one byte. If + * this assumption is broken by a future architecture change, + * that code will have to be revisited. If that were to happen, + * disable ADI support so we do not get unpredictable results + * with programs trying to use ADI and their pages getting + * swapped out + */ + if (adi_state.caps.nbits > 4) { + pr_warn("WARNING: ADI tag size >4 on this platform. Disabling AADI support\n"); + adi_state.enabled = false; + } + mdesc_release(hp); return; @@ -94,3 +121,277 @@ adi_not_found: if (hp) mdesc_release(hp); } + +tag_storage_desc_t *find_tag_store(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr) +{ + tag_storage_desc_t *tag_desc = NULL; + unsigned long i, max_desc, flags; + + /* Check if this vma already has tag storage descriptor + * allocated for it. + */ + max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t); + if (mm->context.tag_store) { + tag_desc = mm->context.tag_store; + spin_lock_irqsave(&mm->context.tag_lock, flags); + for (i = 0; i < max_desc; i++) { + if ((addr >= tag_desc->start) && + ((addr + PAGE_SIZE - 1) <= tag_desc->end)) + break; + tag_desc++; + } + spin_unlock_irqrestore(&mm->context.tag_lock, flags); + + /* If no matching entries were found, this must be a + * freshly allocated page + */ + if (i >= max_desc) + tag_desc = NULL; + } + + return tag_desc; +} + +tag_storage_desc_t *alloc_tag_store(struct mm_struct *mm, + struct vm_area_struct *vma, + unsigned long addr) +{ + unsigned char *tags; + unsigned long i, size, max_desc, flags; + tag_storage_desc_t *tag_desc, *open_desc; + unsigned long end_addr, hole_start, hole_end; + + max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t); + open_desc = NULL; + hole_start = 0; + hole_end = ULONG_MAX; + end_addr = addr + PAGE_SIZE - 1; + + /* Check if this vma already has tag storage descriptor + * allocated for it. + */ + spin_lock_irqsave(&mm->context.tag_lock, flags); + if (mm->context.tag_store) { + tag_desc = mm->context.tag_store; + + /* Look for a matching entry for this address. While doing + * that, look for the first open slot as well and find + * the hole in already allocated range where this request + * will fit in. + */ + for (i = 0; i < max_desc; i++) { + if (tag_desc->tag_users == 0) { + if (open_desc == NULL) + open_desc = tag_desc; + } else { + if ((addr >= tag_desc->start) && + (tag_desc->end >= (addr + PAGE_SIZE - 1))) { + tag_desc->tag_users++; + goto out; + } + } + if ((tag_desc->start > end_addr) && + (tag_desc->start < hole_end)) + hole_end = tag_desc->start; + if ((tag_desc->end < addr) && + (tag_desc->end > hole_start)) + hole_start = tag_desc->end; + tag_desc++; + } + + } else { + size = sizeof(tag_storage_desc_t)*max_desc; + mm->context.tag_store = kzalloc(size, GFP_NOWAIT|__GFP_NOWARN); + if (mm->context.tag_store == NULL) { + tag_desc = NULL; + goto out; + } + tag_desc = mm->context.tag_store; + for (i = 0; i < max_desc; i++, tag_desc++) + tag_desc->tag_users = 0; + open_desc = mm->context.tag_store; + i = 0; + } + + /* Check if we ran out of tag storage descriptors */ + if (open_desc == NULL) { + tag_desc = NULL; + goto out; + } + + /* Mark this tag descriptor slot in use and then initialize it */ + tag_desc = open_desc; + tag_desc->tag_users = 1; + + /* Tag storage has not been allocated for this vma and space + * is available in tag storage descriptor. Since this page is + * being swapped out, there is high probability subsequent pages + * in the VMA will be swapped out as well. Allocate pages to + * store tags for as many pages in this vma as possible but not + * more than TAG_STORAGE_PAGES. Each byte in tag space holds + * two ADI tags since each ADI tag is 4 bits. Each ADI tag + * covers adi_blksize() worth of addresses. Check if the hole is + * big enough to accommodate full address range for using + * TAG_STORAGE_PAGES number of tag pages. + */ + size = TAG_STORAGE_PAGES * PAGE_SIZE; + end_addr = addr + (size*2*adi_blksize()) - 1; + /* Check for overflow. If overflow occurs, allocate only one page */ + if (end_addr < addr) { + size = PAGE_SIZE; + end_addr = addr + (size*2*adi_blksize()) - 1; + /* If overflow happens with the minimum tag storage + * allocation as well, adjust ending address for this + * tag storage. + */ + if (end_addr < addr) + end_addr = ULONG_MAX; + } + if (hole_end < end_addr) { + /* Available hole is too small on the upper end of + * address. Can we expand the range towards the lower + * address and maximize use of this slot? + */ + unsigned long tmp_addr; + + end_addr = hole_end - 1; + tmp_addr = end_addr - (size*2*adi_blksize()) + 1; + /* Check for underflow. If underflow occurs, allocate + * only one page for storing ADI tags + */ + if (tmp_addr > addr) { + size = PAGE_SIZE; + tmp_addr = end_addr - (size*2*adi_blksize()) - 1; + /* If underflow happens with the minimum tag storage + * allocation as well, adjust starting address for + * this tag storage. + */ + if (tmp_addr > addr) + tmp_addr = 0; + } + if (tmp_addr < hole_start) { + /* Available hole is restricted on lower address + * end as well + */ + tmp_addr = hole_start + 1; + } + addr = tmp_addr; + size = (end_addr + 1 - addr)/(2*adi_blksize()); + size = (size + (PAGE_SIZE-adi_blksize()))/PAGE_SIZE; + size = size * PAGE_SIZE; + } + tags = kzalloc(size, GFP_NOWAIT|__GFP_NOWARN); + if (tags == NULL) { + tag_desc->tag_users = 0; + tag_desc = NULL; + goto out; + } + tag_desc->start = addr; + tag_desc->tags = tags; + tag_desc->end = end_addr; + +out: + spin_unlock_irqrestore(&mm->context.tag_lock, flags); + return tag_desc; +} + +void del_tag_store(tag_storage_desc_t *tag_desc, struct mm_struct *mm) +{ + unsigned long flags; + unsigned char *tags = NULL; + + spin_lock_irqsave(&mm->context.tag_lock, flags); + tag_desc->tag_users--; + if (tag_desc->tag_users == 0) { + tag_desc->start = tag_desc->end = 0; + /* Do not free up the tag storage space allocated + * by the first descriptor. This is persistent + * emergency tag storage space for the task. + */ + if (tag_desc != mm->context.tag_store) { + tags = tag_desc->tags; + tag_desc->tags = NULL; + } + } + spin_unlock_irqrestore(&mm->context.tag_lock, flags); + kfree(tags); +} + +#define tag_start(addr, tag_desc) \ + ((tag_desc)->tags + ((addr - (tag_desc)->start)/(2*adi_blksize()))) + +/* Retrieve any saved ADI tags for the page being swapped back in and + * restore these tags to the newly allocated physical page. + */ +void adi_restore_tags(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, pte_t pte) +{ + unsigned char *tag; + tag_storage_desc_t *tag_desc; + unsigned long paddr, tmp, version1, version2; + + /* Check if the swapped out page has an ADI version + * saved. If yes, restore version tag to the newly + * allocated page. + */ + tag_desc = find_tag_store(mm, vma, addr); + if (tag_desc == NULL) + return; + + tag = tag_start(addr, tag_desc); + paddr = pte_val(pte) & _PAGE_PADDR_4V; + for (tmp = paddr; tmp < (paddr+PAGE_SIZE); tmp += adi_blksize()) { + version1 = (*tag) >> 4; + version2 = (*tag) & 0x0f; + *tag++ = 0; + asm volatile("stxa %0, [%1] %2\n\t" + : + : "r" (version1), "r" (tmp), + "i" (ASI_MCD_REAL)); + tmp += adi_blksize(); + asm volatile("stxa %0, [%1] %2\n\t" + : + : "r" (version2), "r" (tmp), + "i" (ASI_MCD_REAL)); + } + asm volatile("membar #Sync\n\t"); + + /* Check and mark this tag space for release later if + * the swapped in page was the last user of tag space + */ + del_tag_store(tag_desc, mm); +} + +/* A page is about to be swapped out. Save any ADI tags associated with + * this physical page so they can be restored later when the page is swapped + * back in. + */ +int adi_save_tags(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, pte_t oldpte) +{ + unsigned char *tag; + tag_storage_desc_t *tag_desc; + unsigned long version1, version2, paddr, tmp; + + tag_desc = alloc_tag_store(mm, vma, addr); + if (tag_desc == NULL) + return -1; + + tag = tag_start(addr, tag_desc); + paddr = pte_val(oldpte) & _PAGE_PADDR_4V; + for (tmp = paddr; tmp < (paddr+PAGE_SIZE); tmp += adi_blksize()) { + asm volatile("ldxa [%1] %2, %0\n\t" + : "=r" (version1) + : "r" (tmp), "i" (ASI_MCD_REAL)); + tmp += adi_blksize(); + asm volatile("ldxa [%1] %2, %0\n\t" + : "=r" (version2) + : "r" (tmp), "i" (ASI_MCD_REAL)); + *tag = (version1 << 4) | version2; + tag++; + } + + return 0; +} diff --git a/arch/sparc/kernel/etrap_64.S b/arch/sparc/kernel/etrap_64.S index 5c77a2e0e991..08cc41f64725 100644 --- a/arch/sparc/kernel/etrap_64.S +++ b/arch/sparc/kernel/etrap_64.S @@ -151,7 +151,32 @@ etrap_save: save %g2, -STACK_BIAS, %sp stx %g6, [%sp + PTREGS_OFF + PT_V9_G6] stx %g7, [%sp + PTREGS_OFF + PT_V9_G7] or %l7, %l0, %l7 - sethi %hi(TSTATE_TSO | TSTATE_PEF), %l0 +661: sethi %hi(TSTATE_TSO | TSTATE_PEF), %l0 + /* If userspace is using ADI, it could potentially pass + * a pointer with version tag embedded in it. To maintain + * the ADI security, we must enable PSTATE.mcde. Userspace + * would have already set TTE.mcd in an earlier call to + * kernel and set the version tag for the address being + * dereferenced. Setting PSTATE.mcde would ensure any + * access to userspace data through a system call honors + * ADI and does not allow a rogue app to bypass ADI by + * using system calls. Setting PSTATE.mcde only affects + * accesses to virtual addresses that have TTE.mcd set. + * Set PMCDPER to ensure any exceptions caused by ADI + * version tag mismatch are exposed before system call + * returns to userspace. Setting PMCDPER affects only + * writes to virtual addresses that have TTE.mcd set and + * have a version tag set as well. + */ + .section .sun_m7_1insn_patch, "ax" + .word 661b + sethi %hi(TSTATE_TSO | TSTATE_PEF | TSTATE_MCDE), %l0 + .previous +661: nop + .section .sun_m7_1insn_patch, "ax" + .word 661b + .word 0xaf902001 /* wrpr %g0, 1, %pmcdper */ + .previous or %l7, %l0, %l7 wrpr %l2, %tnpc wrpr %l7, (TSTATE_PRIV | TSTATE_IE), %tstate diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c index 318efd784a0b..454a8af28f13 100644 --- a/arch/sparc/kernel/process_64.c +++ b/arch/sparc/kernel/process_64.c @@ -670,6 +670,31 @@ int copy_thread(unsigned long clone_flags, unsigned long sp, return 0; } +/* TIF_MCDPER in thread info flags for current task is updated lazily upon + * a context switch. Update this flag in current task's thread flags + * before dup so the dup'd task will inherit the current TIF_MCDPER flag. + */ +int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) +{ + if (adi_capable()) { + register unsigned long tmp_mcdper; + + __asm__ __volatile__( + ".word 0x83438000\n\t" /* rd %mcdper, %g1 */ + "mov %%g1, %0\n\t" + : "=r" (tmp_mcdper) + : + : "g1"); + if (tmp_mcdper) + set_thread_flag(TIF_MCDPER); + else + clear_thread_flag(TIF_MCDPER); + } + + *dst = *src; + return 0; +} + typedef struct { union { unsigned int pr_regs[32]; diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S index 0b21042ab181..f6528884a2c8 100644 --- a/arch/sparc/kernel/rtrap_64.S +++ b/arch/sparc/kernel/rtrap_64.S @@ -25,13 +25,31 @@ .align 32 __handle_preemption: call SCHEDULE_USER - wrpr %g0, RTRAP_PSTATE, %pstate +661: wrpr %g0, RTRAP_PSTATE, %pstate + /* If userspace is using ADI, it could potentially pass + * a pointer with version tag embedded in it. To maintain + * the ADI security, we must re-enable PSTATE.mcde before + * we continue execution in the kernel for another thread. + */ + .section .sun_m7_1insn_patch, "ax" + .word 661b + wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate + .previous ba,pt %xcc, __handle_preemption_continue wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate __handle_user_windows: call fault_in_user_windows - wrpr %g0, RTRAP_PSTATE, %pstate +661: wrpr %g0, RTRAP_PSTATE, %pstate + /* If userspace is using ADI, it could potentially pass + * a pointer with version tag embedded in it. To maintain + * the ADI security, we must re-enable PSTATE.mcde before + * we continue execution in the kernel for another thread. + */ + .section .sun_m7_1insn_patch, "ax" + .word 661b + wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate + .previous ba,pt %xcc, __handle_preemption_continue wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate @@ -48,7 +66,16 @@ __handle_signal: add %sp, PTREGS_OFF, %o0 mov %l0, %o2 call do_notify_resume - wrpr %g0, RTRAP_PSTATE, %pstate +661: wrpr %g0, RTRAP_PSTATE, %pstate + /* If userspace is using ADI, it could potentially pass + * a pointer with version tag embedded in it. To maintain + * the ADI security, we must re-enable PSTATE.mcde before + * we continue execution in the kernel for another thread. + */ + .section .sun_m7_1insn_patch, "ax" + .word 661b + wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate + .previous wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate /* Signal delivery can modify pt_regs tstate, so we must diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c index 34f7a533a74f..7944b3ca216a 100644 --- a/arch/sparc/kernel/setup_64.c +++ b/arch/sparc/kernel/setup_64.c @@ -294,6 +294,8 @@ static void __init sun4v_patch(void) case SUN4V_CHIP_SPARC_M7: case SUN4V_CHIP_SPARC_M8: case SUN4V_CHIP_SPARC_SN: + sun4v_patch_1insn_range(&__sun_m7_1insn_patch, + &__sun_m7_1insn_patch_end); sun_m7_patch_2insn_range(&__sun_m7_2insn_patch, &__sun_m7_2insn_patch_end); break; diff --git a/arch/sparc/kernel/urtt_fill.S b/arch/sparc/kernel/urtt_fill.S index 44183aa59168..e4cee7be5cd0 100644 --- a/arch/sparc/kernel/urtt_fill.S +++ b/arch/sparc/kernel/urtt_fill.S @@ -50,7 +50,12 @@ user_rtt_fill_fixup_common: SET_GL(0) .previous - wrpr %g0, RTRAP_PSTATE, %pstate +661: wrpr %g0, RTRAP_PSTATE, %pstate + .section .sun_m7_1insn_patch, "ax" + .word 661b + /* Re-enable PSTATE.mcde to maintain ADI security */ + wrpr %g0, RTRAP_PSTATE|PSTATE_MCDE, %pstate + .previous mov %l1, %g6 ldx [%g6 + TI_TASK], %g4 diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S index 5a2344574f39..61afd787bd0c 100644 --- a/arch/sparc/kernel/vmlinux.lds.S +++ b/arch/sparc/kernel/vmlinux.lds.S @@ -145,6 +145,11 @@ SECTIONS *(.pause_3insn_patch) __pause_3insn_patch_end = .; } + .sun_m7_1insn_patch : { + __sun_m7_1insn_patch = .; + *(.sun_m7_1insn_patch) + __sun_m7_1insn_patch_end = .; + } .sun_m7_2insn_patch : { __sun_m7_2insn_patch = .; *(.sun_m7_2insn_patch) diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c index 5335ba3c850e..357b6047653a 100644 --- a/arch/sparc/mm/gup.c +++ b/arch/sparc/mm/gup.c @@ -12,6 +12,7 @@ #include <linux/pagemap.h> #include <linux/rwsem.h> #include <asm/pgtable.h> +#include <asm/adi.h> /* * The performance critical leaf functions are made noinline otherwise gcc @@ -201,6 +202,24 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write, pgd_t *pgdp; int nr = 0; +#ifdef CONFIG_SPARC64 + if (adi_capable()) { + long addr = start; + + /* If userspace has passed a versioned address, kernel + * will not find it in the VMAs since it does not store + * the version tags in the list of VMAs. Storing version + * tags in list of VMAs is impractical since they can be + * changed any time from userspace without dropping into + * kernel. Any address search in VMAs will be done with + * non-versioned addresses. Ensure the ADI version bits + * are dropped here by sign extending the last bit before + * ADI bits. IOMMU does not implement version tags. + */ + addr = (addr << (long)adi_nbits()) >> (long)adi_nbits(); + start = addr; + } +#endif start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; @@ -231,6 +250,24 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, pgd_t *pgdp; int nr = 0; +#ifdef CONFIG_SPARC64 + if (adi_capable()) { + long addr = start; + + /* If userspace has passed a versioned address, kernel + * will not find it in the VMAs since it does not store + * the version tags in the list of VMAs. Storing version + * tags in list of VMAs is impractical since they can be + * changed any time from userspace without dropping into + * kernel. Any address search in VMAs will be done with + * non-versioned addresses. Ensure the ADI version bits + * are dropped here by sign extending the last bit before + * ADI bits. IOMMU does not implements version tags, + */ + addr = (addr << (long)adi_nbits()) >> (long)adi_nbits(); + start = addr; + } +#endif start &= PAGE_MASK; addr = start; len = (unsigned long) nr_pages << PAGE_SHIFT; diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c index 0112d6942288..f78793a06bbd 100644 --- a/arch/sparc/mm/hugetlbpage.c +++ b/arch/sparc/mm/hugetlbpage.c @@ -182,8 +182,20 @@ pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, struct page *page, int writeable) { unsigned int shift = huge_page_shift(hstate_vma(vma)); + pte_t pte; - return hugepage_shift_to_tte(entry, shift); + pte = hugepage_shift_to_tte(entry, shift); + +#ifdef CONFIG_SPARC64 + /* If this vma has ADI enabled on it, turn on TTE.mcd + */ + if (vma->vm_flags & VM_SPARC_ADI) + return pte_mkmcd(pte); + else + return pte_mknotmcd(pte); +#else + return pte; +#endif } static unsigned int sun4v_huge_tte_to_shift(pte_t entry) diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 995f9490334d..cb9ebac6663f 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -3160,3 +3160,72 @@ void flush_tlb_kernel_range(unsigned long start, unsigned long end) do_flush_tlb_kernel_range(start, end); } } + +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + char *vfrom, *vto; + + vfrom = kmap_atomic(from); + vto = kmap_atomic(to); + copy_user_page(vto, vfrom, vaddr, to); + kunmap_atomic(vto); + kunmap_atomic(vfrom); + + /* If this page has ADI enabled, copy over any ADI tags + * as well + */ + if (vma->vm_flags & VM_SPARC_ADI) { + unsigned long pfrom, pto, i, adi_tag; + + pfrom = page_to_phys(from); + pto = page_to_phys(to); + + for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) { + asm volatile("ldxa [%1] %2, %0\n\t" + : "=r" (adi_tag) + : "r" (i), "i" (ASI_MCD_REAL)); + asm volatile("stxa %0, [%1] %2\n\t" + : + : "r" (adi_tag), "r" (pto), + "i" (ASI_MCD_REAL)); + pto += adi_blksize(); + } + asm volatile("membar #Sync\n\t"); + } +} +EXPORT_SYMBOL(copy_user_highpage); + +void copy_highpage(struct page *to, struct page *from) +{ + char *vfrom, *vto; + + vfrom = kmap_atomic(from); + vto = kmap_atomic(to); + copy_page(vto, vfrom); + kunmap_atomic(vto); + kunmap_atomic(vfrom); + + /* If this platform is ADI enabled, copy any ADI tags + * as well + */ + if (adi_capable()) { + unsigned long pfrom, pto, i, adi_tag; + + pfrom = page_to_phys(from); + pto = page_to_phys(to); + + for (i = pfrom; i < (pfrom + PAGE_SIZE); i += adi_blksize()) { + asm volatile("ldxa [%1] %2, %0\n\t" + : "=r" (adi_tag) + : "r" (i), "i" (ASI_MCD_REAL)); + asm volatile("stxa %0, [%1] %2\n\t" + : + : "r" (adi_tag), "r" (pto), + "i" (ASI_MCD_REAL)); + pto += adi_blksize(); + } + asm volatile("membar #Sync\n\t"); + } +} +EXPORT_SYMBOL(copy_highpage); diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index 75a04c1a2383..f5edc28aa3a5 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -546,6 +546,9 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) mm->context.sparc64_ctx_val = 0UL; + mm->context.tag_store = NULL; + spin_lock_init(&mm->context.tag_lock); + #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) /* We reset them to zero because the fork() page copying * will re-increment the counters as the parent PTEs are @@ -611,4 +614,22 @@ void destroy_context(struct mm_struct *mm) } spin_unlock_irqrestore(&ctx_alloc_lock, flags); + + /* If ADI tag storage was allocated for this task, free it */ + if (mm->context.tag_store) { + tag_storage_desc_t *tag_desc; + unsigned long max_desc; + unsigned char *tags; + + tag_desc = mm->context.tag_store; + max_desc = PAGE_SIZE/sizeof(tag_storage_desc_t); + for (i = 0; i < max_desc; i++) { + tags = tag_desc->tags; + tag_desc->tags = NULL; + kfree(tags); + tag_desc++; + } + kfree(mm->context.tag_store); + mm->context.tag_store = NULL; + } } |