summaryrefslogtreecommitdiffstats
path: root/arch/parisc/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/parisc/kernel')
-rw-r--r--arch/parisc/kernel/cache.c67
-rw-r--r--arch/parisc/kernel/irq.c2
-rw-r--r--arch/parisc/kernel/sys_parisc.c14
-rw-r--r--arch/parisc/kernel/syscall_table.S3
4 files changed, 12 insertions, 74 deletions
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index ac87a40502e6..f6448c7c62b5 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -323,7 +323,8 @@ void flush_dcache_page(struct page *page)
* specifically accesses it, of course) */
flush_tlb_page(mpnt, addr);
- if (old_addr == 0 || (old_addr & (SHMLBA - 1)) != (addr & (SHMLBA - 1))) {
+ if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
+ != (addr & (SHM_COLOUR - 1))) {
__flush_cache_page(mpnt, addr, page_to_phys(page));
if (old_addr)
printk(KERN_ERR "INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %s\n", old_addr, addr, mpnt->vm_file ? (char *)mpnt->vm_file->f_path.dentry->d_name.name : "(null)");
@@ -581,67 +582,3 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long
__flush_cache_page(vma, vmaddr, PFN_PHYS(pfn));
}
}
-
-#ifdef CONFIG_PARISC_TMPALIAS
-
-void clear_user_highpage(struct page *page, unsigned long vaddr)
-{
- void *vto;
- unsigned long flags;
-
- /* Clear using TMPALIAS region. The page doesn't need to
- be flushed but the kernel mapping needs to be purged. */
-
- vto = kmap_atomic(page);
-
- /* The PA-RISC 2.0 Architecture book states on page F-6:
- "Before a write-capable translation is enabled, *all*
- non-equivalently-aliased translations must be removed
- from the page table and purged from the TLB. (Note
- that the caches are not required to be flushed at this
- time.) Before any non-equivalent aliased translation
- is re-enabled, the virtual address range for the writeable
- page (the entire page) must be flushed from the cache,
- and the write-capable translation removed from the page
- table and purged from the TLB." */
-
- purge_kernel_dcache_page_asm((unsigned long)vto);
- purge_tlb_start(flags);
- pdtlb_kernel(vto);
- purge_tlb_end(flags);
- preempt_disable();
- clear_user_page_asm(vto, vaddr);
- preempt_enable();
-
- pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
-}
-
-void copy_user_highpage(struct page *to, struct page *from,
- unsigned long vaddr, struct vm_area_struct *vma)
-{
- void *vfrom, *vto;
- unsigned long flags;
-
- /* Copy using TMPALIAS region. This has the advantage
- that the `from' page doesn't need to be flushed. However,
- the `to' page must be flushed in copy_user_page_asm since
- it can be used to bring in executable code. */
-
- vfrom = kmap_atomic(from);
- vto = kmap_atomic(to);
-
- purge_kernel_dcache_page_asm((unsigned long)vto);
- purge_tlb_start(flags);
- pdtlb_kernel(vto);
- pdtlb_kernel(vfrom);
- purge_tlb_end(flags);
- preempt_disable();
- copy_user_page_asm(vto, vfrom, vaddr);
- flush_dcache_page_asm(__pa(vto), vaddr);
- preempt_enable();
-
- pagefault_enable(); /* kunmap_atomic(addr, KM_USER1); */
- pagefault_enable(); /* kunmap_atomic(addr, KM_USER0); */
-}
-
-#endif /* CONFIG_PARISC_TMPALIAS */
diff --git a/arch/parisc/kernel/irq.c b/arch/parisc/kernel/irq.c
index 8ceac4785609..cfe056fe7f5c 100644
--- a/arch/parisc/kernel/irq.c
+++ b/arch/parisc/kernel/irq.c
@@ -117,7 +117,7 @@ int cpu_check_affinity(struct irq_data *d, const struct cpumask *dest)
return -EINVAL;
/* whatever mask they set, we just allow one CPU */
- cpu_dest = first_cpu(*dest);
+ cpu_dest = cpumask_first_and(dest, cpu_online_mask);
return cpu_dest;
}
diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
index b7cadc4a06cd..31ffa9b55322 100644
--- a/arch/parisc/kernel/sys_parisc.c
+++ b/arch/parisc/kernel/sys_parisc.c
@@ -45,7 +45,7 @@
static int get_offset(unsigned int last_mmap)
{
- return (last_mmap & (SHMLBA-1)) >> PAGE_SHIFT;
+ return (last_mmap & (SHM_COLOUR-1)) >> PAGE_SHIFT;
}
static unsigned long shared_align_offset(unsigned int last_mmap,
@@ -57,8 +57,8 @@ static unsigned long shared_align_offset(unsigned int last_mmap,
static inline unsigned long COLOR_ALIGN(unsigned long addr,
unsigned int last_mmap, unsigned long pgoff)
{
- unsigned long base = (addr+SHMLBA-1) & ~(SHMLBA-1);
- unsigned long off = (SHMLBA-1) &
+ unsigned long base = (addr+SHM_COLOUR-1) & ~(SHM_COLOUR-1);
+ unsigned long off = (SHM_COLOUR-1) &
(shared_align_offset(last_mmap, pgoff) << PAGE_SHIFT);
return base + off;
@@ -101,7 +101,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
if (flags & MAP_FIXED) {
if ((flags & MAP_SHARED) && last_mmap &&
(addr - shared_align_offset(last_mmap, pgoff))
- & (SHMLBA - 1))
+ & (SHM_COLOUR - 1))
return -EINVAL;
goto found_addr;
}
@@ -122,7 +122,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
info.length = len;
info.low_limit = mm->mmap_legacy_base;
info.high_limit = mmap_upper_limit();
- info.align_mask = last_mmap ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
info.align_offset = shared_align_offset(last_mmap, pgoff);
addr = vm_unmapped_area(&info);
@@ -161,7 +161,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
if (flags & MAP_FIXED) {
if ((flags & MAP_SHARED) && last_mmap &&
(addr - shared_align_offset(last_mmap, pgoff))
- & (SHMLBA - 1))
+ & (SHM_COLOUR - 1))
return -EINVAL;
goto found_addr;
}
@@ -182,7 +182,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
info.length = len;
info.low_limit = PAGE_SIZE;
info.high_limit = mm->mmap_base;
- info.align_mask = last_mmap ? (PAGE_MASK & (SHMLBA - 1)) : 0;
+ info.align_mask = last_mmap ? (PAGE_MASK & (SHM_COLOUR - 1)) : 0;
info.align_offset = shared_align_offset(last_mmap, pgoff);
addr = vm_unmapped_area(&info);
if (!(addr & ~PAGE_MASK))
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index 8fa3fbb3e4d3..83ead0ea127d 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -392,7 +392,7 @@
ENTRY_COMP(vmsplice)
ENTRY_COMP(move_pages) /* 295 */
ENTRY_SAME(getcpu)
- ENTRY_SAME(epoll_pwait)
+ ENTRY_COMP(epoll_pwait)
ENTRY_COMP(statfs64)
ENTRY_COMP(fstatfs64)
ENTRY_COMP(kexec_load) /* 300 */
@@ -431,6 +431,7 @@
ENTRY_SAME(finit_module)
ENTRY_SAME(sched_setattr)
ENTRY_SAME(sched_getattr) /* 335 */
+ ENTRY_COMP(utimes)
/* Nothing yet */