summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2015-11-25 23:47:38 +0100
committerArnd Bergmann <arnd@arndb.de>2015-11-25 23:47:38 +0100
commitd3de94ba4e58e8043d500f7335797d6a2ac93248 (patch)
tree8382a170965eec00ebfe62e7b42dfdea9ca44f54 /arch
parentMerge tag 'imx-fixes-4.4' of git://git.kernel.org/pub/scm/linux/kernel/git/sh... (diff)
parentLinux 4.4-rc2 (diff)
downloadlinux-d3de94ba4e58e8043d500f7335797d6a2ac93248.tar.xz
linux-d3de94ba4e58e8043d500f7335797d6a2ac93248.zip
Merge tag 'v4.4-rc2' into fixes
Linux 4.4-rc2 is backmerged from the keystone fixes.
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/boot/dts/imx27.dtsi16
-rw-r--r--arch/arm/net/bpf_jit_32.c2
-rw-r--r--arch/arm64/crypto/aes-ce-cipher.c2
-rw-r--r--arch/arm64/include/asm/barrier.h16
-rw-r--r--arch/arm64/include/asm/compat.h3
-rw-r--r--arch/arm64/include/asm/dma-mapping.h13
-rw-r--r--arch/arm64/include/asm/mmu_context.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h1
-rw-r--r--arch/arm64/kernel/cpuinfo.c5
-rw-r--r--arch/arm64/kernel/efi.c14
-rw-r--r--arch/arm64/kernel/suspend.c10
-rw-r--r--arch/arm64/mm/dma-mapping.c35
-rw-r--r--arch/arm64/mm/mmu.c14
-rw-r--r--arch/arm64/net/bpf_jit_comp.c48
-rw-r--r--arch/mips/ath79/setup.c7
-rw-r--r--arch/mips/boot/dts/qca/ar9132.dtsi2
-rw-r--r--arch/mips/include/asm/page.h3
-rw-r--r--arch/parisc/Kconfig3
-rw-r--r--arch/parisc/include/asm/hugetlb.h85
-rw-r--r--arch/parisc/include/asm/page.h13
-rw-r--r--arch/parisc/include/asm/pgalloc.h2
-rw-r--r--arch/parisc/include/asm/pgtable.h26
-rw-r--r--arch/parisc/include/asm/processor.h27
-rw-r--r--arch/parisc/include/uapi/asm/mman.h10
-rw-r--r--arch/parisc/kernel/asm-offsets.c8
-rw-r--r--arch/parisc/kernel/entry.S56
-rw-r--r--arch/parisc/kernel/head.S4
-rw-r--r--arch/parisc/kernel/setup.c14
-rw-r--r--arch/parisc/kernel/syscall.S4
-rw-r--r--arch/parisc/kernel/traps.c35
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S9
-rw-r--r--arch/parisc/mm/Makefile1
-rw-r--r--arch/parisc/mm/hugetlbpage.c161
-rw-r--r--arch/parisc/mm/init.c40
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/s390/include/asm/cio.h1
-rw-r--r--arch/s390/include/asm/elf.h13
-rw-r--r--arch/s390/include/asm/ipl.h3
-rw-r--r--arch/s390/include/asm/pci_dma.h4
-rw-r--r--arch/s390/include/asm/trace/diag.h6
-rw-r--r--arch/s390/include/uapi/asm/unistd.h19
-rw-r--r--arch/s390/kernel/compat_wrapper.c1
-rw-r--r--arch/s390/kernel/diag.c4
-rw-r--r--arch/s390/kernel/head.S95
-rw-r--r--arch/s390/kernel/ipl.c65
-rw-r--r--arch/s390/kernel/process.c6
-rw-r--r--arch/s390/kernel/sclp.c2
-rw-r--r--arch/s390/kernel/setup.c3
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/kernel/trace.c6
-rw-r--r--arch/s390/mm/init.c30
-rw-r--r--arch/s390/mm/mmap.c60
-rw-r--r--arch/s390/pci/pci_dma.c84
-rw-r--r--arch/x86/include/asm/msr-index.h3
-rw-r--r--arch/x86/kernel/cpu/common.c3
-rw-r--r--arch/x86/kernel/fpu/signal.c11
-rw-r--r--arch/x86/kernel/fpu/xstate.c1
-rw-r--r--arch/x86/kernel/mcount_64.S6
-rw-r--r--arch/x86/mm/mpx.c47
61 files changed, 723 insertions, 446 deletions
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index feb9d34b239c..f818ea483aeb 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -486,7 +486,10 @@
compatible = "fsl,imx27-usb";
reg = <0x10024000 0x200>;
interrupts = <56>;
- clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
+ clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
+ <&clks IMX27_CLK_USB_AHB_GATE>,
+ <&clks IMX27_CLK_USB_DIV>;
+ clock-names = "ipg", "ahb", "per";
fsl,usbmisc = <&usbmisc 0>;
status = "disabled";
};
@@ -495,7 +498,10 @@
compatible = "fsl,imx27-usb";
reg = <0x10024200 0x200>;
interrupts = <54>;
- clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
+ clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
+ <&clks IMX27_CLK_USB_AHB_GATE>,
+ <&clks IMX27_CLK_USB_DIV>;
+ clock-names = "ipg", "ahb", "per";
fsl,usbmisc = <&usbmisc 1>;
dr_mode = "host";
status = "disabled";
@@ -505,7 +511,10 @@
compatible = "fsl,imx27-usb";
reg = <0x10024400 0x200>;
interrupts = <55>;
- clocks = <&clks IMX27_CLK_USB_IPG_GATE>;
+ clocks = <&clks IMX27_CLK_USB_IPG_GATE>,
+ <&clks IMX27_CLK_USB_AHB_GATE>,
+ <&clks IMX27_CLK_USB_DIV>;
+ clock-names = "ipg", "ahb", "per";
fsl,usbmisc = <&usbmisc 2>;
dr_mode = "host";
status = "disabled";
@@ -515,7 +524,6 @@
#index-cells = <1>;
compatible = "fsl,imx27-usbmisc";
reg = <0x10024600 0x200>;
- clocks = <&clks IMX27_CLK_USB_AHB_GATE>;
};
sahara2: sahara@10025000 {
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 2f4b14cfddb4..591f9db3bf40 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -1061,7 +1061,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
}
build_epilogue(&ctx);
- flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
+ flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
#if __LINUX_ARM_ARCH__ < 7
if (ctx.imm_count)
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c
index ce47792a983d..f7bd9bf0bbb3 100644
--- a/arch/arm64/crypto/aes-ce-cipher.c
+++ b/arch/arm64/crypto/aes-ce-cipher.c
@@ -237,7 +237,7 @@ EXPORT_SYMBOL(ce_aes_setkey);
static struct crypto_alg aes_alg = {
.cra_name = "aes",
.cra_driver_name = "aes-ce",
- .cra_priority = 300,
+ .cra_priority = 250,
.cra_flags = CRYPTO_ALG_TYPE_CIPHER,
.cra_blocksize = AES_BLOCK_SIZE,
.cra_ctxsize = sizeof(struct crypto_aes_ctx),
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 624f9679f4b0..9622eb48f894 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -64,27 +64,31 @@ do { \
#define smp_load_acquire(p) \
({ \
- typeof(*p) ___p1; \
+ union { typeof(*p) __val; char __c[1]; } __u; \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
case 1: \
asm volatile ("ldarb %w0, %1" \
- : "=r" (___p1) : "Q" (*p) : "memory"); \
+ : "=r" (*(__u8 *)__u.__c) \
+ : "Q" (*p) : "memory"); \
break; \
case 2: \
asm volatile ("ldarh %w0, %1" \
- : "=r" (___p1) : "Q" (*p) : "memory"); \
+ : "=r" (*(__u16 *)__u.__c) \
+ : "Q" (*p) : "memory"); \
break; \
case 4: \
asm volatile ("ldar %w0, %1" \
- : "=r" (___p1) : "Q" (*p) : "memory"); \
+ : "=r" (*(__u32 *)__u.__c) \
+ : "Q" (*p) : "memory"); \
break; \
case 8: \
asm volatile ("ldar %0, %1" \
- : "=r" (___p1) : "Q" (*p) : "memory"); \
+ : "=r" (*(__u64 *)__u.__c) \
+ : "Q" (*p) : "memory"); \
break; \
} \
- ___p1; \
+ __u.__val; \
})
#define read_barrier_depends() do { } while(0)
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 7fbed6919b54..eb8432bb82b8 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -23,7 +23,6 @@
*/
#include <linux/types.h>
#include <linux/sched.h>
-#include <linux/ptrace.h>
#define COMPAT_USER_HZ 100
#ifdef __AARCH64EB__
@@ -234,7 +233,7 @@ static inline compat_uptr_t ptr_to_compat(void __user *uptr)
return (u32)(unsigned long)uptr;
}
-#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs()))
+#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
static inline void __user *arch_compat_alloc_user_space(long len)
{
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 54d0ead41afc..61e08f360e31 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -18,7 +18,6 @@
#ifdef __KERNEL__
-#include <linux/acpi.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
@@ -26,22 +25,16 @@
#include <asm/xen/hypervisor.h>
#define DMA_ERROR_CODE (~(dma_addr_t)0)
-extern struct dma_map_ops *dma_ops;
extern struct dma_map_ops dummy_dma_ops;
static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
{
- if (unlikely(!dev))
- return dma_ops;
- else if (dev->archdata.dma_ops)
+ if (dev && dev->archdata.dma_ops)
return dev->archdata.dma_ops;
- else if (acpi_disabled)
- return dma_ops;
/*
- * When ACPI is enabled, if arch_set_dma_ops is not called,
- * we will disable device DMA capability by setting it
- * to dummy_dma_ops.
+ * We expect no ISA devices, and all other DMA masters are expected to
+ * have someone call arch_setup_dma_ops at device creation time.
*/
return &dummy_dma_ops;
}
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index c0e87898ba96..24165784b803 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -101,7 +101,7 @@ static inline void cpu_set_default_tcr_t0sz(void)
#define destroy_context(mm) do { } while(0)
void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
-#define init_new_context(tsk,mm) ({ atomic64_set(&mm->context.id, 0); 0; })
+#define init_new_context(tsk,mm) ({ atomic64_set(&(mm)->context.id, 0); 0; })
/*
* This is called when "tsk" is about to enter lazy TLB mode.
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 9819a9426b69..7e074f93f383 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -81,6 +81,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val);
#define PAGE_KERNEL __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_RO __pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
+#define PAGE_KERNEL_ROX __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
#define PAGE_KERNEL_EXEC __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
#define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 706679d0a0b4..212ae6361d8b 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -30,6 +30,7 @@
#include <linux/seq_file.h>
#include <linux/sched.h>
#include <linux/smp.h>
+#include <linux/delay.h>
/*
* In case the boot CPU is hotpluggable, we record its initial state and
@@ -112,6 +113,10 @@ static int c_show(struct seq_file *m, void *v)
*/
seq_printf(m, "processor\t: %d\n", i);
+ seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+ loops_per_jiffy / (500000UL/HZ),
+ loops_per_jiffy / (5000UL/HZ) % 100);
+
/*
* Dump out the common processor features in a single line.
* Userspace should read the hwcaps with getauxval(AT_HWCAP)
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index de46b50f4cdf..fc5508e0df57 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -224,6 +224,8 @@ static bool __init efi_virtmap_init(void)
{
efi_memory_desc_t *md;
+ init_new_context(NULL, &efi_mm);
+
for_each_efi_memory_desc(&memmap, md) {
u64 paddr, npages, size;
pgprot_t prot;
@@ -254,7 +256,8 @@ static bool __init efi_virtmap_init(void)
else
prot = PAGE_KERNEL;
- create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot);
+ create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size,
+ __pgprot(pgprot_val(prot) | PTE_NG));
}
return true;
}
@@ -329,14 +332,7 @@ core_initcall(arm64_dmi_init);
static void efi_set_pgd(struct mm_struct *mm)
{
- if (mm == &init_mm)
- cpu_set_reserved_ttbr0();
- else
- cpu_switch_mm(mm->pgd, mm);
-
- local_flush_tlb_all();
- if (icache_is_aivivt())
- __local_flush_icache_all();
+ switch_mm(NULL, mm, NULL);
}
void efi_virtmap_load(void)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index fce95e17cf7f..1095aa483a1c 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -1,3 +1,4 @@
+#include <linux/ftrace.h>
#include <linux/percpu.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
@@ -71,6 +72,13 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
local_dbg_save(flags);
/*
+ * Function graph tracer state gets incosistent when the kernel
+ * calls functions that never return (aka suspend finishers) hence
+ * disable graph tracing during their execution.
+ */
+ pause_graph_tracing();
+
+ /*
* mm context saved on the stack, it will be restored when
* the cpu comes out of reset through the identity mapped
* page tables, so that the thread address space is properly
@@ -111,6 +119,8 @@ int cpu_suspend(unsigned long arg, int (*fn)(unsigned long))
hw_breakpoint_restore(NULL);
}
+ unpause_graph_tracing();
+
/*
* Restore pstate flags. OS lock and mdscr have been already
* restored, so from this point onwards, debugging is fully
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 131a199114b4..7963aa4b5d28 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -18,6 +18,7 @@
*/
#include <linux/gfp.h>
+#include <linux/acpi.h>
#include <linux/export.h>
#include <linux/slab.h>
#include <linux/genalloc.h>
@@ -28,9 +29,6 @@
#include <asm/cacheflush.h>
-struct dma_map_ops *dma_ops;
-EXPORT_SYMBOL(dma_ops);
-
static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
bool coherent)
{
@@ -515,13 +513,7 @@ EXPORT_SYMBOL(dummy_dma_ops);
static int __init arm64_dma_init(void)
{
- int ret;
-
- dma_ops = &swiotlb_dma_ops;
-
- ret = atomic_pool_init();
-
- return ret;
+ return atomic_pool_init();
}
arch_initcall(arm64_dma_init);
@@ -552,10 +544,14 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
{
bool coherent = is_device_dma_coherent(dev);
int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
+ size_t iosize = size;
void *addr;
if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
return NULL;
+
+ size = PAGE_ALIGN(size);
+
/*
* Some drivers rely on this, and we probably don't want the
* possibility of stale kernel data being read by devices anyway.
@@ -566,7 +562,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
struct page **pages;
pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
- pages = iommu_dma_alloc(dev, size, gfp, ioprot, handle,
+ pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle,
flush_page);
if (!pages)
return NULL;
@@ -574,7 +570,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
__builtin_return_address(0));
if (!addr)
- iommu_dma_free(dev, pages, size, handle);
+ iommu_dma_free(dev, pages, iosize, handle);
} else {
struct page *page;
/*
@@ -591,7 +587,7 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
if (!addr)
return NULL;
- *handle = iommu_dma_map_page(dev, page, 0, size, ioprot);
+ *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
if (iommu_dma_mapping_error(dev, *handle)) {
if (coherent)
__free_pages(page, get_order(size));
@@ -606,6 +602,9 @@ static void *__iommu_alloc_attrs(struct device *dev, size_t size,
static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t handle, struct dma_attrs *attrs)
{
+ size_t iosize = size;
+
+ size = PAGE_ALIGN(size);
/*
* @cpu_addr will be one of 3 things depending on how it was allocated:
* - A remapped array of pages from iommu_dma_alloc(), for all
@@ -617,17 +616,17 @@ static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
* Hence how dodgy the below logic looks...
*/
if (__in_atomic_pool(cpu_addr, size)) {
- iommu_dma_unmap_page(dev, handle, size, 0, NULL);
+ iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
__free_from_pool(cpu_addr, size);
} else if (is_vmalloc_addr(cpu_addr)){
struct vm_struct *area = find_vm_area(cpu_addr);
if (WARN_ON(!area || !area->pages))
return;
- iommu_dma_free(dev, area->pages, size, &handle);
+ iommu_dma_free(dev, area->pages, iosize, &handle);
dma_common_free_remap(cpu_addr, size, VM_USERMAP);
} else {
- iommu_dma_unmap_page(dev, handle, size, 0, NULL);
+ iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
__free_pages(virt_to_page(cpu_addr), get_order(size));
}
}
@@ -984,8 +983,8 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
struct iommu_ops *iommu, bool coherent)
{
- if (!acpi_disabled && !dev->archdata.dma_ops)
- dev->archdata.dma_ops = dma_ops;
+ if (!dev->archdata.dma_ops)
+ dev->archdata.dma_ops = &swiotlb_dma_ops;
dev->archdata.dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e3f563c81c48..abb66f84d4ac 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -362,8 +362,8 @@ static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
* for now. This will get more fine grained later once all memory
* is mapped
*/
- unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
- unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+ unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
+ unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
if (end < kernel_x_start) {
create_mapping(start, __phys_to_virt(start),
@@ -451,18 +451,18 @@ static void __init fixup_executable(void)
{
#ifdef CONFIG_DEBUG_RODATA
/* now that we are actually fully mapped, make the start/end more fine grained */
- if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
+ if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
unsigned long aligned_start = round_down(__pa(_stext),
- SECTION_SIZE);
+ SWAPPER_BLOCK_SIZE);
create_mapping(aligned_start, __phys_to_virt(aligned_start),
__pa(_stext) - aligned_start,
PAGE_KERNEL);
}
- if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
+ if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
unsigned long aligned_end = round_up(__pa(__init_end),
- SECTION_SIZE);
+ SWAPPER_BLOCK_SIZE);
create_mapping(__pa(__init_end), (unsigned long)__init_end,
aligned_end - __pa(__init_end),
PAGE_KERNEL);
@@ -475,7 +475,7 @@ void mark_rodata_ro(void)
{
create_mapping_late(__pa(_stext), (unsigned long)_stext,
(unsigned long)_etext - (unsigned long)_stext,
- PAGE_KERNEL_EXEC | PTE_RDONLY);
+ PAGE_KERNEL_ROX);
}
#endif
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index cf3c7d4a1b58..d6a53ef2350b 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -50,7 +50,7 @@ static const int bpf2a64[] = {
[BPF_REG_8] = A64_R(21),
[BPF_REG_9] = A64_R(22),
/* read-only frame pointer to access stack */
- [BPF_REG_FP] = A64_FP,
+ [BPF_REG_FP] = A64_R(25),
/* temporary register for internal BPF JIT */
[TMP_REG_1] = A64_R(23),
[TMP_REG_2] = A64_R(24),
@@ -155,18 +155,49 @@ static void build_prologue(struct jit_ctx *ctx)
stack_size += 4; /* extra for skb_copy_bits buffer */
stack_size = STACK_ALIGN(stack_size);
+ /*
+ * BPF prog stack layout
+ *
+ * high
+ * original A64_SP => 0:+-----+ BPF prologue
+ * |FP/LR|
+ * current A64_FP => -16:+-----+
+ * | ... | callee saved registers
+ * +-----+
+ * | | x25/x26
+ * BPF fp register => -80:+-----+
+ * | |
+ * | ... | BPF prog stack
+ * | |
+ * | |
+ * current A64_SP => +-----+
+ * | |
+ * | ... | Function call stack
+ * | |
+ * +-----+
+ * low
+ *
+ */
+
+ /* Save FP and LR registers to stay align with ARM64 AAPCS */
+ emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
+ emit(A64_MOV(1, A64_FP, A64_SP), ctx);
+
/* Save callee-saved register */
emit(A64_PUSH(r6, r7, A64_SP), ctx);
emit(A64_PUSH(r8, r9, A64_SP), ctx);
if (ctx->tmp_used)
emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx);
- /* Set up BPF stack */
- emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
+ /* Save fp (x25) and x26. SP requires 16 bytes alignment */
+ emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx);
- /* Set up frame pointer */
+ /* Set up BPF prog stack base register (x25) */
emit(A64_MOV(1, fp, A64_SP), ctx);
+ /* Set up function call stack */
+ emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
+
/* Clear registers A and X */
emit_a64_mov_i64(ra, 0, ctx);
emit_a64_mov_i64(rx, 0, ctx);
@@ -190,14 +221,17 @@ static void build_epilogue(struct jit_ctx *ctx)
/* We're done with BPF stack */
emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx);
+ /* Restore fs (x25) and x26 */
+ emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
+
/* Restore callee-saved register */
if (ctx->tmp_used)
emit(A64_POP(tmp1, tmp2, A64_SP), ctx);
emit(A64_POP(r8, r9, A64_SP), ctx);
emit(A64_POP(r6, r7, A64_SP), ctx);
- /* Restore frame pointer */
- emit(A64_MOV(1, fp, A64_SP), ctx);
+ /* Restore FP/LR registers */
+ emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
/* Set return value */
emit(A64_MOV(1, A64_R(0), r0), ctx);
@@ -758,7 +792,7 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
if (bpf_jit_enable > 1)
bpf_jit_dump(prog->len, image_size, 2, ctx.image);
- bpf_flush_icache(ctx.image, ctx.image + ctx.idx);
+ bpf_flush_icache(header, ctx.image + ctx.idx);
set_memory_ro((unsigned long)header, header->pages);
prog->bpf_func = (void *)ctx.image;
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 1ba21204ebe0..8755d618e116 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -216,9 +216,9 @@ void __init plat_mem_setup(void)
AR71XX_RESET_SIZE);
ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE,
AR71XX_PLL_SIZE);
+ ath79_detect_sys_type();
ath79_ddr_ctrl_init();
- ath79_detect_sys_type();
if (mips_machtype != ATH79_MACH_GENERIC_OF)
detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
@@ -281,3 +281,8 @@ MIPS_MACHINE(ATH79_MACH_GENERIC,
"Generic",
"Generic AR71XX/AR724X/AR913X based board",
ath79_generic_init);
+
+MIPS_MACHINE(ATH79_MACH_GENERIC_OF,
+ "DTB",
+ "Generic AR71XX/AR724X/AR913X based board (DT)",
+ NULL);
diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi
index fb7734eadbf0..13d0439496a9 100644
--- a/arch/mips/boot/dts/qca/ar9132.dtsi
+++ b/arch/mips/boot/dts/qca/ar9132.dtsi
@@ -107,7 +107,7 @@
miscintc: interrupt-controller@18060010 {
compatible = "qca,ar9132-misc-intc",
"qca,ar7100-misc-intc";
- reg = <0x18060010 0x4>;
+ reg = <0x18060010 0x8>;
interrupt-parent = <&cpuintc>;
interrupts = <6>;
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index ad1fccdb8d13..2046c0230224 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -200,8 +200,9 @@ static inline int pfn_valid(unsigned long pfn)
{
/* avoid <linux/mm.h> include hell */
extern unsigned long max_mapnr;
+ unsigned long pfn_offset = ARCH_PFN_OFFSET;
- return pfn >= ARCH_PFN_OFFSET && pfn < max_mapnr;
+ return pfn >= pfn_offset && pfn < max_mapnr;
}
#elif defined(CONFIG_SPARSEMEM)
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index c36546959e86..729f89163bc3 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -108,6 +108,9 @@ config PGTABLE_LEVELS
default 3 if 64BIT && PARISC_PAGE_SIZE_4KB
default 2
+config SYS_SUPPORTS_HUGETLBFS
+ def_bool y if PA20
+
source "init/Kconfig"
source "kernel/Kconfig.freezer"
diff --git a/arch/parisc/include/asm/hugetlb.h b/arch/parisc/include/asm/hugetlb.h
new file mode 100644
index 000000000000..7d56a9ccb752
--- /dev/null
+++ b/arch/parisc/include/asm/hugetlb.h
@@ -0,0 +1,85 @@
+#ifndef _ASM_PARISC64_HUGETLB_H
+#define _ASM_PARISC64_HUGETLB_H
+
+#include <asm/page.h>
+#include <asm-generic/hugetlb.h>
+
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte);
+
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep);
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr,
+ unsigned long len) {
+ return 0;
+}
+
+/*
+ * If the arch doesn't supply something else, assume that hugepage
+ * size aligned regions are ok without further preparation.
+ */
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len)
+{
+ if (len & ~HPAGE_MASK)
+ return -EINVAL;
+ if (addr & ~HPAGE_MASK)
+ return -EINVAL;
+ return 0;
+}
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor,
+ unsigned long ceiling)
+{
+ free_pgd_range(tlb, addr, end, floor, ceiling);
+}
+
+static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep)
+{
+}
+
+static inline int huge_pte_none(pte_t pte)
+{
+ return pte_none(pte);
+}
+
+static inline pte_t huge_pte_wrprotect(pte_t pte)
+{
+ return pte_wrprotect(pte);
+}
+
+static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ pte_t old_pte = *ptep;
+ set_huge_pte_at(mm, addr, ptep, pte_wrprotect(old_pte));
+}
+
+static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty)
+{
+ int changed = !pte_same(*ptep, pte);
+ if (changed) {
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ flush_tlb_page(vma, addr);
+ }
+ return changed;
+}
+
+static inline pte_t huge_ptep_get(pte_t *ptep)
+{
+ return *ptep;
+}
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+}
+
+#endif /* _ASM_PARISC64_HUGETLB_H */
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index 60d5d174dfe4..80e742a1c162 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -145,11 +145,22 @@ extern int npmem_ranges;
#endif /* CONFIG_DISCONTIGMEM */
#ifdef CONFIG_HUGETLB_PAGE
-#define HPAGE_SHIFT 22 /* 4MB (is this fixed?) */
+#define HPAGE_SHIFT PMD_SHIFT /* fixed for transparent huge pages */
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+#if defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
+# define REAL_HPAGE_SHIFT 20 /* 20 = 1MB */
+# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_1M
+#elif !defined(CONFIG_64BIT) && defined(CONFIG_PARISC_PAGE_SIZE_4KB)
+# define REAL_HPAGE_SHIFT 22 /* 22 = 4MB */
+# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_4M
+#else
+# define REAL_HPAGE_SHIFT 24 /* 24 = 16MB */
+# define _HUGE_PAGE_SIZE_ENCODING_DEFAULT _PAGE_SIZE_ENCODING_16M
#endif
+#endif /* CONFIG_HUGETLB_PAGE */
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
index 3edbb9fc91b4..f2fd327dce2e 100644
--- a/arch/parisc/include/asm/pgalloc.h
+++ b/arch/parisc/include/asm/pgalloc.h
@@ -35,7 +35,7 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
PxD_FLAG_VALID |
PxD_FLAG_ATTACHED)
+ (__u32)(__pa((unsigned long)pgd) >> PxD_VALUE_SHIFT));
- /* The first pmd entry also is marked with _PAGE_GATEWAY as
+ /* The first pmd entry also is marked with PxD_FLAG_ATTACHED as
* a signal that this pmd may not be freed */
__pgd_val_set(*pgd, PxD_FLAG_ATTACHED);
#endif
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index f93c4a4e6580..d8534f95915a 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -83,7 +83,11 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, (unsigned long)pgd_val(e))
/* This is the size of the initially mapped kernel memory */
-#define KERNEL_INITIAL_ORDER 24 /* 0 to 1<<24 = 16MB */
+#ifdef CONFIG_64BIT
+#define KERNEL_INITIAL_ORDER 25 /* 1<<25 = 32MB */
+#else
+#define KERNEL_INITIAL_ORDER 24 /* 1<<24 = 16MB */
+#endif
#define KERNEL_INITIAL_SIZE (1 << KERNEL_INITIAL_ORDER)
#if CONFIG_PGTABLE_LEVELS == 3
@@ -167,7 +171,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
-/* bit 21 was formerly the FLUSH bit but is now unused */
+#define _PAGE_HPAGE_BIT 21 /* (0x400) Software: Huge Page */
#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessible page */
/* N.B. The bits are defined in terms of a 32 bit word above, so the */
@@ -194,6 +198,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
+#define _PAGE_HUGE (1 << xlate_pabit(_PAGE_HPAGE_BIT))
#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
@@ -217,7 +222,7 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
#define PxD_FLAG_VALID (1 << xlate_pabit(_PxD_VALID_BIT))
#define PxD_FLAG_MASK (0xf)
#define PxD_FLAG_SHIFT (4)
-#define PxD_VALUE_SHIFT (8) /* (PAGE_SHIFT-PxD_FLAG_SHIFT) */
+#define PxD_VALUE_SHIFT (PFN_PTE_SHIFT-PxD_FLAG_SHIFT)
#ifndef __ASSEMBLY__
@@ -363,6 +368,18 @@ static inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return
static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
/*
+ * Huge pte definitions.
+ */
+#ifdef CONFIG_HUGETLB_PAGE
+#define pte_huge(pte) (pte_val(pte) & _PAGE_HUGE)
+#define pte_mkhuge(pte) (__pte(pte_val(pte) | _PAGE_HUGE))
+#else
+#define pte_huge(pte) (0)
+#define pte_mkhuge(pte) (pte)
+#endif
+
+
+/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
@@ -410,8 +427,9 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
/* Find an entry in the second-level page table.. */
#if CONFIG_PGTABLE_LEVELS == 3
+#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
#define pmd_offset(dir,address) \
-((pmd_t *) pgd_page_vaddr(*(dir)) + (((address)>>PMD_SHIFT) & (PTRS_PER_PMD-1)))
+((pmd_t *) pgd_page_vaddr(*(dir)) + pmd_index(address))
#else
#define pmd_offset(dir,addr) ((pmd_t *) dir)
#endif
diff --git a/arch/parisc/include/asm/processor.h b/arch/parisc/include/asm/processor.h
index 54adb60c0a42..7e759ecb1343 100644
--- a/arch/parisc/include/asm/processor.h
+++ b/arch/parisc/include/asm/processor.h
@@ -192,33 +192,6 @@ void show_trace(struct task_struct *task, unsigned long *stack);
*/
typedef unsigned int elf_caddr_t;
-#define start_thread_som(regs, new_pc, new_sp) do { \
- unsigned long *sp = (unsigned long *)new_sp; \
- __u32 spaceid = (__u32)current->mm->context; \
- unsigned long pc = (unsigned long)new_pc; \
- /* offset pc for priv. level */ \
- pc |= 3; \
- \
- regs->iasq[0] = spaceid; \
- regs->iasq[1] = spaceid; \
- regs->iaoq[0] = pc; \
- regs->iaoq[1] = pc + 4; \
- regs->sr[2] = LINUX_GATEWAY_SPACE; \
- regs->sr[3] = 0xffff; \
- regs->sr[4] = spaceid; \
- regs->sr[5] = spaceid; \
- regs->sr[6] = spaceid; \
- regs->sr[7] = spaceid; \
- regs->gr[ 0] = USER_PSW; \
- regs->gr[30] = ((new_sp)+63)&~63; \
- regs->gr[31] = pc; \
- \
- get_user(regs->gr[26],&sp[0]); \
- get_user(regs->gr[25],&sp[-1]); \
- get_user(regs->gr[24],&sp[-2]); \
- get_user(regs->gr[23],&sp[-3]); \
-} while(0)
-
/* The ELF abi wants things done a "wee bit" differently than
* som does. Supporting this behavior here avoids
* having our own version of create_elf_tables.
diff --git a/arch/parisc/include/uapi/asm/mman.h b/arch/parisc/include/uapi/asm/mman.h
index ecc3ae1ca28e..dd4d1876a020 100644
--- a/arch/parisc/include/uapi/asm/mman.h
+++ b/arch/parisc/include/uapi/asm/mman.h
@@ -49,16 +49,6 @@
#define MADV_DONTFORK 10 /* don't inherit across fork */
#define MADV_DOFORK 11 /* do inherit across fork */
-/* The range 12-64 is reserved for page size specification. */
-#define MADV_4K_PAGES 12 /* Use 4K pages */
-#define MADV_16K_PAGES 14 /* Use 16K pages */
-#define MADV_64K_PAGES 16 /* Use 64K pages */
-#define MADV_256K_PAGES 18 /* Use 256K pages */
-#define MADV_1M_PAGES 20 /* Use 1 Megabyte pages */
-#define MADV_4M_PAGES 22 /* Use 4 Megabyte pages */
-#define MADV_16M_PAGES 24 /* Use 16 Megabyte pages */
-#define MADV_64M_PAGES 26 /* Use 64 Megabyte pages */
-
#define MADV_MERGEABLE 65 /* KSM may merge identical pages */
#define MADV_UNMERGEABLE 66 /* KSM may not merge identical pages */
diff --git a/arch/parisc/kernel/asm-offsets.c b/arch/parisc/kernel/asm-offsets.c
index 59001cea13f9..d2f62570a7b1 100644
--- a/arch/parisc/kernel/asm-offsets.c
+++ b/arch/parisc/kernel/asm-offsets.c
@@ -290,6 +290,14 @@ int main(void)
DEFINE(ASM_PFN_PTE_SHIFT, PFN_PTE_SHIFT);
DEFINE(ASM_PT_INITIAL, PT_INITIAL);
BLANK();
+ /* HUGEPAGE_SIZE is only used in vmlinux.lds.S to align kernel text
+ * and kernel data on physical huge pages */
+#ifdef CONFIG_HUGETLB_PAGE
+ DEFINE(HUGEPAGE_SIZE, 1UL << REAL_HPAGE_SHIFT);
+#else
+ DEFINE(HUGEPAGE_SIZE, PAGE_SIZE);
+#endif
+ BLANK();
DEFINE(EXCDATA_IP, offsetof(struct exception_data, fault_ip));
DEFINE(EXCDATA_SPACE, offsetof(struct exception_data, fault_space));
DEFINE(EXCDATA_ADDR, offsetof(struct exception_data, fault_addr));
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index c5ef4081b01d..623496c11756 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -502,21 +502,38 @@
STREG \pte,0(\ptp)
.endm
+ /* We have (depending on the page size):
+ * - 38 to 52-bit Physical Page Number
+ * - 12 to 26-bit page offset
+ */
/* bitshift difference between a PFN (based on kernel's PAGE_SIZE)
* to a CPU TLB 4k PFN (4k => 12 bits to shift) */
- #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
+ #define PAGE_ADD_SHIFT (PAGE_SHIFT-12)
+ #define PAGE_ADD_HUGE_SHIFT (REAL_HPAGE_SHIFT-12)
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
- .macro convert_for_tlb_insert20 pte
+ .macro convert_for_tlb_insert20 pte,tmp
+#ifdef CONFIG_HUGETLB_PAGE
+ copy \pte,\tmp
+ extrd,u \tmp,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
+ 64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
+
+ depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
+ (63-58)+PAGE_ADD_SHIFT,\pte
+ extrd,u,*= \tmp,_PAGE_HPAGE_BIT+32,1,%r0
+ depdi _HUGE_PAGE_SIZE_ENCODING_DEFAULT,63,\
+ (63-58)+PAGE_ADD_HUGE_SHIFT,\pte
+#else /* Huge pages disabled */
extrd,u \pte,(63-ASM_PFN_PTE_SHIFT)+(63-58)+PAGE_ADD_SHIFT,\
64-PAGE_SHIFT-PAGE_ADD_SHIFT,\pte
depdi _PAGE_SIZE_ENCODING_DEFAULT,63,\
(63-58)+PAGE_ADD_SHIFT,\pte
+#endif
.endm
/* Convert the pte and prot to tlb insertion values. How
* this happens is quite subtle, read below */
- .macro make_insert_tlb spc,pte,prot
+ .macro make_insert_tlb spc,pte,prot,tmp
space_to_prot \spc \prot /* create prot id from space */
/* The following is the real subtlety. This is depositing
* T <-> _PAGE_REFTRAP
@@ -553,7 +570,7 @@
depdi 1,12,1,\prot
/* Drop prot bits and convert to page addr for iitlbt and idtlbt */
- convert_for_tlb_insert20 \pte
+ convert_for_tlb_insert20 \pte \tmp
.endm
/* Identical macro to make_insert_tlb above, except it
@@ -646,17 +663,12 @@
/*
- * Align fault_vector_20 on 4K boundary so that both
- * fault_vector_11 and fault_vector_20 are on the
- * same page. This is only necessary as long as we
- * write protect the kernel text, which we may stop
- * doing once we use large page translations to cover
- * the static part of the kernel address space.
+ * Fault_vectors are architecturally required to be aligned on a 2K
+ * boundary
*/
.text
-
- .align 4096
+ .align 2048
ENTRY(fault_vector_20)
/* First vector is invalid (0) */
@@ -1147,7 +1159,7 @@ dtlb_miss_20w:
tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20w
update_accessed ptp,pte,t0,t1
- make_insert_tlb spc,pte,prot
+ make_insert_tlb spc,pte,prot,t1
idtlbt pte,prot
@@ -1173,7 +1185,7 @@ nadtlb_miss_20w:
tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20w
update_accessed ptp,pte,t0,t1
- make_insert_tlb spc,pte,prot
+ make_insert_tlb spc,pte,prot,t1
idtlbt pte,prot
@@ -1267,7 +1279,7 @@ dtlb_miss_20:
tlb_lock spc,ptp,pte,t0,t1,dtlb_check_alias_20
update_accessed ptp,pte,t0,t1
- make_insert_tlb spc,pte,prot
+ make_insert_tlb spc,pte,prot,t1
f_extend pte,t1
@@ -1295,7 +1307,7 @@ nadtlb_miss_20:
tlb_lock spc,ptp,pte,t0,t1,nadtlb_check_alias_20
update_accessed ptp,pte,t0,t1
- make_insert_tlb spc,pte,prot
+ make_insert_tlb spc,pte,prot,t1
f_extend pte,t1
@@ -1404,7 +1416,7 @@ itlb_miss_20w:
tlb_lock spc,ptp,pte,t0,t1,itlb_fault
update_accessed ptp,pte,t0,t1
- make_insert_tlb spc,pte,prot
+ make_insert_tlb spc,pte,prot,t1
iitlbt pte,prot
@@ -1428,7 +1440,7 @@ naitlb_miss_20w:
tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20w
update_accessed ptp,pte,t0,t1
- make_insert_tlb spc,pte,prot
+ make_insert_tlb spc,pte,prot,t1
iitlbt pte,prot
@@ -1514,7 +1526,7 @@ itlb_miss_20:
tlb_lock spc,ptp,pte,t0,t1,itlb_fault
update_accessed ptp,pte,t0,t1
- make_insert_tlb spc,pte,prot
+ make_insert_tlb spc,pte,prot,t1
f_extend pte,t1
@@ -1534,7 +1546,7 @@ naitlb_miss_20:
tlb_lock spc,ptp,pte,t0,t1,naitlb_check_alias_20
update_accessed ptp,pte,t0,t1
- make_insert_tlb spc,pte,prot
+ make_insert_tlb spc,pte,prot,t1
f_extend pte,t1
@@ -1566,7 +1578,7 @@ dbit_trap_20w:
tlb_lock spc,ptp,pte,t0,t1,dbit_fault
update_dirty ptp,pte,t1
- make_insert_tlb spc,pte,prot
+ make_insert_tlb spc,pte,prot,t1
idtlbt pte,prot
@@ -1610,7 +1622,7 @@ dbit_trap_20:
tlb_lock spc,ptp,pte,t0,t1,dbit_fault
update_dirty ptp,pte,t1
- make_insert_tlb spc,pte,prot
+ make_insert_tlb spc,pte,prot,t1
f_extend pte,t1
diff --git a/arch/parisc/kernel/head.S b/arch/parisc/kernel/head.S
index e7d64527aff9..75aa0db9f69e 100644
--- a/arch/parisc/kernel/head.S
+++ b/arch/parisc/kernel/head.S
@@ -69,7 +69,7 @@ $bss_loop:
stw,ma %arg2,4(%r1)
stw,ma %arg3,4(%r1)
- /* Initialize startup VM. Just map first 8/16 MB of memory */
+ /* Initialize startup VM. Just map first 16/32 MB of memory */
load32 PA(swapper_pg_dir),%r4
mtctl %r4,%cr24 /* Initialize kernel root pointer */
mtctl %r4,%cr25 /* Initialize user root pointer */
@@ -107,7 +107,7 @@ $bss_loop:
/* Now initialize the PTEs themselves. We use RWX for
* everything ... it will get remapped correctly later */
ldo 0+_PAGE_KERNEL_RWX(%r0),%r3 /* Hardwired 0 phys addr start */
- ldi (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
+ load32 (1<<(KERNEL_INITIAL_ORDER-PAGE_SHIFT)),%r11 /* PFN count */
load32 PA(pg0),%r1
$pgt_fill_loop:
diff --git a/arch/parisc/kernel/setup.c b/arch/parisc/kernel/setup.c
index 72a3c658ad7b..f7ea626e29c9 100644
--- a/arch/parisc/kernel/setup.c
+++ b/arch/parisc/kernel/setup.c
@@ -130,7 +130,16 @@ void __init setup_arch(char **cmdline_p)
printk(KERN_INFO "The 32-bit Kernel has started...\n");
#endif
- printk(KERN_INFO "Default page size is %dKB.\n", (int)(PAGE_SIZE / 1024));
+ printk(KERN_INFO "Kernel default page size is %d KB. Huge pages ",
+ (int)(PAGE_SIZE / 1024));
+#ifdef CONFIG_HUGETLB_PAGE
+ printk(KERN_CONT "enabled with %d MB physical and %d MB virtual size",
+ 1 << (REAL_HPAGE_SHIFT - 20), 1 << (HPAGE_SHIFT - 20));
+#else
+ printk(KERN_CONT "disabled");
+#endif
+ printk(KERN_CONT ".\n");
+
pdc_console_init();
@@ -377,6 +386,7 @@ arch_initcall(parisc_init);
void start_parisc(void)
{
extern void start_kernel(void);
+ extern void early_trap_init(void);
int ret, cpunum;
struct pdc_coproc_cfg coproc_cfg;
@@ -397,6 +407,8 @@ void start_parisc(void)
panic("must have an fpu to boot linux");
}
+ early_trap_init(); /* initialize checksum of fault_vector */
+
start_kernel();
// not reached
}
diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S
index 0b8d26d3ba43..3fbd7252a4b2 100644
--- a/arch/parisc/kernel/syscall.S
+++ b/arch/parisc/kernel/syscall.S
@@ -369,7 +369,7 @@ tracesys_exit:
ldo -16(%r30),%r29 /* Reference param save area */
#endif
ldo TASK_REGS(%r1),%r26
- bl do_syscall_trace_exit,%r2
+ BL do_syscall_trace_exit,%r2
STREG %r28,TASK_PT_GR28(%r1) /* save return value now */
ldo -THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r1 /* get task ptr */
LDREG TI_TASK(%r1), %r1
@@ -390,7 +390,7 @@ tracesys_sigexit:
#ifdef CONFIG_64BIT
ldo -16(%r30),%r29 /* Reference param save area */
#endif
- bl do_syscall_trace_exit,%r2
+ BL do_syscall_trace_exit,%r2
ldo TASK_REGS(%r1),%r26
ldil L%syscall_exit_rfi,%r1
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index b99b39f1da02..553b09855cfd 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -807,7 +807,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
}
-int __init check_ivt(void *iva)
+void __init initialize_ivt(const void *iva)
{
extern u32 os_hpmc_size;
extern const u32 os_hpmc[];
@@ -818,8 +818,8 @@ int __init check_ivt(void *iva)
u32 *hpmcp;
u32 length;
- if (strcmp((char *)iva, "cows can fly"))
- return -1;
+ if (strcmp((const char *)iva, "cows can fly"))
+ panic("IVT invalid");
ivap = (u32 *)iva;
@@ -839,28 +839,23 @@ int __init check_ivt(void *iva)
check += ivap[i];
ivap[5] = -check;
-
- return 0;
}
-#ifndef CONFIG_64BIT
-extern const void fault_vector_11;
-#endif
-extern const void fault_vector_20;
-void __init trap_init(void)
+/* early_trap_init() is called before we set up kernel mappings and
+ * write-protect the kernel */
+void __init early_trap_init(void)
{
- void *iva;
+ extern const void fault_vector_20;
- if (boot_cpu_data.cpu_type >= pcxu)
- iva = (void *) &fault_vector_20;
- else
-#ifdef CONFIG_64BIT
- panic("Can't boot 64-bit OS on PA1.1 processor!");
-#else
- iva = (void *) &fault_vector_11;
+#ifndef CONFIG_64BIT
+ extern const void fault_vector_11;
+ initialize_ivt(&fault_vector_11);
#endif
- if (check_ivt(iva))
- panic("IVT invalid");
+ initialize_ivt(&fault_vector_20);
+}
+
+void __init trap_init(void)
+{
}
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index 0dacc5ca555a..308f29081d46 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -60,7 +60,7 @@ SECTIONS
EXIT_DATA
}
PERCPU_SECTION(8)
- . = ALIGN(PAGE_SIZE);
+ . = ALIGN(HUGEPAGE_SIZE);
__init_end = .;
/* freed after init ends here */
@@ -116,7 +116,7 @@ SECTIONS
* that we can properly leave these
* as writable
*/
- . = ALIGN(PAGE_SIZE);
+ . = ALIGN(HUGEPAGE_SIZE);
data_start = .;
EXCEPTION_TABLE(8)
@@ -135,8 +135,11 @@ SECTIONS
_edata = .;
/* BSS */
- BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 8)
+ BSS_SECTION(PAGE_SIZE, PAGE_SIZE, PAGE_SIZE)
+
+ /* bootmap is allocated in setup_bootmem() directly behind bss. */
+ . = ALIGN(HUGEPAGE_SIZE);
_end = . ;
STABS_DEBUG
diff --git a/arch/parisc/mm/Makefile b/arch/parisc/mm/Makefile
index 758ceefb373a..134393de69d2 100644
--- a/arch/parisc/mm/Makefile
+++ b/arch/parisc/mm/Makefile
@@ -3,3 +3,4 @@
#
obj-y := init.o fault.o ioremap.o
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/parisc/mm/hugetlbpage.c b/arch/parisc/mm/hugetlbpage.c
new file mode 100644
index 000000000000..f6fdc77a72bd
--- /dev/null
+++ b/arch/parisc/mm/hugetlbpage.c
@@ -0,0 +1,161 @@
+/*
+ * PARISC64 Huge TLB page support.
+ *
+ * This parisc implementation is heavily based on the SPARC and x86 code.
+ *
+ * Copyright (C) 2015 Helge Deller <deller@gmx.de>
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/pagemap.h>
+#include <linux/sysctl.h>
+
+#include <asm/mman.h>
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+
+unsigned long
+hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ struct hstate *h = hstate_file(file);
+
+ if (len & ~huge_page_mask(h))
+ return -EINVAL;
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED)
+ if (prepare_hugepage_range(file, addr, len))
+ return -EINVAL;
+
+ if (addr)
+ addr = ALIGN(addr, huge_page_size(h));
+
+ /* we need to make sure the colouring is OK */
+ return arch_get_unmapped_area(file, addr, len, pgoff, flags);
+}
+
+
+pte_t *huge_pte_alloc(struct mm_struct *mm,
+ unsigned long addr, unsigned long sz)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte = NULL;
+
+ /* We must align the address, because our caller will run
+ * set_huge_pte_at() on whatever we return, which writes out
+ * all of the sub-ptes for the hugepage range. So we have
+ * to give it the first such sub-pte.
+ */
+ addr &= HPAGE_MASK;
+
+ pgd = pgd_offset(mm, addr);
+ pud = pud_alloc(mm, pgd, addr);
+ if (pud) {
+ pmd = pmd_alloc(mm, pud, addr);
+ if (pmd)
+ pte = pte_alloc_map(mm, NULL, pmd, addr);
+ }
+ return pte;
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte = NULL;
+
+ addr &= HPAGE_MASK;
+
+ pgd = pgd_offset(mm, addr);
+ if (!pgd_none(*pgd)) {
+ pud = pud_offset(pgd, addr);
+ if (!pud_none(*pud)) {
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_none(*pmd))
+ pte = pte_offset_map(pmd, addr);
+ }
+ }
+ return pte;
+}
+
+/* Purge data and instruction TLB entries. Must be called holding
+ * the pa_tlb_lock. The TLB purge instructions are slow on SMP
+ * machines since the purge must be broadcast to all CPUs.
+ */
+static inline void purge_tlb_entries_huge(struct mm_struct *mm, unsigned long addr)
+{
+ int i;
+
+ /* We may use multiple physical huge pages (e.g. 2x1 MB) to emulate
+ * Linux standard huge pages (e.g. 2 MB) */
+ BUILD_BUG_ON(REAL_HPAGE_SHIFT > HPAGE_SHIFT);
+
+ addr &= HPAGE_MASK;
+ addr |= _HUGE_PAGE_SIZE_ENCODING_DEFAULT;
+
+ for (i = 0; i < (1 << (HPAGE_SHIFT-REAL_HPAGE_SHIFT)); i++) {
+ mtsp(mm->context, 1);
+ pdtlb(addr);
+ if (unlikely(split_tlb))
+ pitlb(addr);
+ addr += (1UL << REAL_HPAGE_SHIFT);
+ }
+}
+
+void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t entry)
+{
+ unsigned long addr_start;
+ int i;
+
+ addr &= HPAGE_MASK;
+ addr_start = addr;
+
+ for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
+ /* Directly write pte entry. We could call set_pte_at(mm, addr, ptep, entry)
+ * instead, but then we get double locking on pa_tlb_lock. */
+ *ptep = entry;
+ ptep++;
+
+ /* Drop the PAGE_SIZE/non-huge tlb entry */
+ purge_tlb_entries(mm, addr);
+
+ addr += PAGE_SIZE;
+ pte_val(entry) += PAGE_SIZE;
+ }
+
+ purge_tlb_entries_huge(mm, addr_start);
+}
+
+
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep)
+{
+ pte_t entry;
+
+ entry = *ptep;
+ set_huge_pte_at(mm, addr, ptep, __pte(0));
+
+ return entry;
+}
+
+int pmd_huge(pmd_t pmd)
+{
+ return 0;
+}
+
+int pud_huge(pud_t pud)
+{
+ return 0;
+}
diff --git a/arch/parisc/mm/init.c b/arch/parisc/mm/init.c
index c5fec4890fdf..1b366c477687 100644
--- a/arch/parisc/mm/init.c
+++ b/arch/parisc/mm/init.c
@@ -409,15 +409,11 @@ static void __init map_pages(unsigned long start_vaddr,
unsigned long vaddr;
unsigned long ro_start;
unsigned long ro_end;
- unsigned long fv_addr;
- unsigned long gw_addr;
- extern const unsigned long fault_vector_20;
- extern void * const linux_gateway_page;
+ unsigned long kernel_end;
ro_start = __pa((unsigned long)_text);
ro_end = __pa((unsigned long)&data_start);
- fv_addr = __pa((unsigned long)&fault_vector_20) & PAGE_MASK;
- gw_addr = __pa((unsigned long)&linux_gateway_page) & PAGE_MASK;
+ kernel_end = __pa((unsigned long)&_end);
end_paddr = start_paddr + size;
@@ -475,24 +471,25 @@ static void __init map_pages(unsigned long start_vaddr,
for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
pte_t pte;
- /*
- * Map the fault vector writable so we can
- * write the HPMC checksum.
- */
if (force)
pte = __mk_pte(address, pgprot);
- else if (parisc_text_address(vaddr) &&
- address != fv_addr)
+ else if (parisc_text_address(vaddr)) {
pte = __mk_pte(address, PAGE_KERNEL_EXEC);
+ if (address >= ro_start && address < kernel_end)
+ pte = pte_mkhuge(pte);
+ }
else
#if defined(CONFIG_PARISC_PAGE_SIZE_4KB)
- if (address >= ro_start && address < ro_end
- && address != fv_addr
- && address != gw_addr)
- pte = __mk_pte(address, PAGE_KERNEL_RO);
- else
+ if (address >= ro_start && address < ro_end) {
+ pte = __mk_pte(address, PAGE_KERNEL_EXEC);
+ pte = pte_mkhuge(pte);
+ } else
#endif
+ {
pte = __mk_pte(address, pgprot);
+ if (address >= ro_start && address < kernel_end)
+ pte = pte_mkhuge(pte);
+ }
if (address >= end_paddr) {
if (force)
@@ -536,15 +533,12 @@ void free_initmem(void)
/* force the kernel to see the new TLB entries */
__flush_tlb_range(0, init_begin, init_end);
- /* Attempt to catch anyone trying to execute code here
- * by filling the page with BRK insns.
- */
- memset((void *)init_begin, 0x00, init_end - init_begin);
+
/* finally dump all the instructions which were cached, since the
* pages are no-longer executable */
flush_icache_range(init_begin, init_end);
- free_initmem_default(-1);
+ free_initmem_default(POISON_FREE_INITMEM);
/* set up a new led state on systems shipped LED State panel */
pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
@@ -728,8 +722,8 @@ static void __init pagetable_init(void)
unsigned long size;
start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
- end_paddr = start_paddr + (pmem_ranges[range].pages << PAGE_SHIFT);
size = pmem_ranges[range].pages << PAGE_SHIFT;
+ end_paddr = start_paddr + size;
map_pages((unsigned long)__va(start_paddr), start_paddr,
size, PAGE_KERNEL, 0);
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index c9e26cb264f4..f2b0b1b0c72a 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -382,3 +382,4 @@ COMPAT_SYS(shmat)
SYSCALL(shmdt)
SYSCALL(shmget)
COMPAT_SYS(shmctl)
+SYSCALL(mlock2)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 6d8f8023ac27..4b6b8ace18e0 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
#include <uapi/asm/unistd.h>
-#define __NR_syscalls 378
+#define __NR_syscalls 379
#define __NR__exit __NR_exit
#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 81579e93c659..1effea5193d6 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -400,5 +400,6 @@
#define __NR_shmdt 375
#define __NR_shmget 376
#define __NR_shmctl 377
+#define __NR_mlock2 378
#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 0c5d8ee657f0..d1e7b0a0feeb 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -312,6 +312,7 @@ extern void css_schedule_reprobe(void);
extern void reipl_ccw_dev(struct ccw_dev_id *id);
struct cio_iplinfo {
+ u8 ssid;
u16 devno;
int is_qdio;
};
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 3ad48f22de78..bab6739a1154 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -206,9 +206,16 @@ do { \
} while (0)
#endif /* CONFIG_COMPAT */
-extern unsigned long mmap_rnd_mask;
-
-#define STACK_RND_MASK (test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
+/*
+ * Cache aliasing on the latest machines calls for a mapping granularity
+ * of 512KB. For 64-bit processes use a 512KB alignment and a randomization
+ * of up to 1GB. For 31-bit processes the virtual address space is limited,
+ * use no alignment and limit the randomization to 8MB.
+ */
+#define BRK_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ffffUL)
+#define MMAP_RND_MASK (is_32bit_task() ? 0x7ffUL : 0x3ff80UL)
+#define MMAP_ALIGN_MASK (is_32bit_task() ? 0 : 0x7fUL)
+#define STACK_RND_MASK MMAP_RND_MASK
#define ARCH_DLINFO \
do { \
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 39ae6a359747..86634e71b69f 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -64,7 +64,8 @@ struct ipl_block_fcp {
struct ipl_block_ccw {
u8 reserved1[84];
- u8 reserved2[2];
+ u16 reserved2 : 13;
+ u8 ssid : 3;
u16 devno;
u8 vm_flags;
u8 reserved3[3];
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
index 7a7abf1a5537..1aac41e83ea1 100644
--- a/arch/s390/include/asm/pci_dma.h
+++ b/arch/s390/include/asm/pci_dma.h
@@ -195,5 +195,7 @@ void zpci_dma_exit_device(struct zpci_dev *);
void dma_free_seg_table(unsigned long);
unsigned long *dma_alloc_cpu_table(void);
void dma_cleanup_tables(unsigned long *);
-void dma_update_cpu_trans(unsigned long *, void *, dma_addr_t, int);
+unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr);
+void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags);
+
#endif
diff --git a/arch/s390/include/asm/trace/diag.h b/arch/s390/include/asm/trace/diag.h
index 776f307960cc..cc6cfe7889da 100644
--- a/arch/s390/include/asm/trace/diag.h
+++ b/arch/s390/include/asm/trace/diag.h
@@ -19,7 +19,7 @@
#define TRACE_INCLUDE_PATH asm/trace
#define TRACE_INCLUDE_FILE diag
-TRACE_EVENT(diagnose,
+TRACE_EVENT(s390_diagnose,
TP_PROTO(unsigned short nr),
TP_ARGS(nr),
TP_STRUCT__entry(
@@ -32,9 +32,9 @@ TRACE_EVENT(diagnose,
);
#ifdef CONFIG_TRACEPOINTS
-void trace_diagnose_norecursion(int diag_nr);
+void trace_s390_diagnose_norecursion(int diag_nr);
#else
-static inline void trace_diagnose_norecursion(int diag_nr) { }
+static inline void trace_s390_diagnose_norecursion(int diag_nr) { }
#endif
#endif /* _TRACE_S390_DIAG_H */
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index a848adba1504..34ec202472c6 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -192,14 +192,14 @@
#define __NR_set_tid_address 252
#define __NR_fadvise64 253
#define __NR_timer_create 254
-#define __NR_timer_settime (__NR_timer_create+1)
-#define __NR_timer_gettime (__NR_timer_create+2)
-#define __NR_timer_getoverrun (__NR_timer_create+3)
-#define __NR_timer_delete (__NR_timer_create+4)
-#define __NR_clock_settime (__NR_timer_create+5)
-#define __NR_clock_gettime (__NR_timer_create+6)
-#define __NR_clock_getres (__NR_timer_create+7)
-#define __NR_clock_nanosleep (__NR_timer_create+8)
+#define __NR_timer_settime 255
+#define __NR_timer_gettime 256
+#define __NR_timer_getoverrun 257
+#define __NR_timer_delete 258
+#define __NR_clock_settime 259
+#define __NR_clock_gettime 260
+#define __NR_clock_getres 261
+#define __NR_clock_nanosleep 262
/* Number 263 is reserved for vserver */
#define __NR_statfs64 265
#define __NR_fstatfs64 266
@@ -309,7 +309,8 @@
#define __NR_recvfrom 371
#define __NR_recvmsg 372
#define __NR_shutdown 373
-#define NR_syscalls 374
+#define __NR_mlock2 374
+#define NR_syscalls 375
/*
* There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index 09f194052df3..fac4eeddef91 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -176,3 +176,4 @@ COMPAT_SYSCALL_WRAP4(accept4, int, fd, struct sockaddr __user *, upeer_sockaddr,
COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
+COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index f98766ede4e1..48b37b8357e6 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -121,14 +121,14 @@ device_initcall(show_diag_stat_init);
void diag_stat_inc(enum diag_stat_enum nr)
{
this_cpu_inc(diag_stat.counter[nr]);
- trace_diagnose(diag_map[nr].code);
+ trace_s390_diagnose(diag_map[nr].code);
}
EXPORT_SYMBOL(diag_stat_inc);
void diag_stat_inc_norecursion(enum diag_stat_enum nr)
{
this_cpu_inc(diag_stat.counter[nr]);
- trace_diagnose_norecursion(diag_map[nr].code);
+ trace_s390_diagnose_norecursion(diag_map[nr].code);
}
EXPORT_SYMBOL(diag_stat_inc_norecursion);
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 1255c6c5353e..301ee9c70688 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -26,6 +26,7 @@
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/page.h>
+#include <asm/ptrace.h>
#define ARCH_OFFSET 4
@@ -59,19 +60,6 @@ __HEAD
.long 0x020006e0,0x20000050
.org 0x200
-#
-# subroutine to set architecture mode
-#
-.Lsetmode:
- mvi __LC_AR_MODE_ID,1 # set esame flag
- slr %r0,%r0 # set cpuid to zero
- lhi %r1,2 # mode 2 = esame (dump)
- sigp %r1,%r0,0x12 # switch to esame mode
- bras %r13,0f
- .fill 16,4,0x0
-0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
- sam31 # switch to 31 bit addressing mode
- br %r14
#
# subroutine to wait for end I/O
@@ -159,7 +147,14 @@ __HEAD
.long 0x02200050,0x00000000
iplstart:
- bas %r14,.Lsetmode # Immediately switch to 64 bit mode
+ mvi __LC_AR_MODE_ID,1 # set esame flag
+ slr %r0,%r0 # set cpuid to zero
+ lhi %r1,2 # mode 2 = esame (dump)
+ sigp %r1,%r0,0x12 # switch to esame mode
+ bras %r13,0f
+ .fill 16,4,0x0
+0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
+ sam31 # switch to 31 bit addressing mode
lh %r1,0xb8 # test if subchannel number
bct %r1,.Lnoload # is valid
l %r1,0xb8 # load ipl subchannel number
@@ -269,71 +264,6 @@ iplstart:
.Lcpuid:.fill 8,1,0
#
-# SALIPL loader support. Based on a patch by Rob van der Heij.
-# This entry point is called directly from the SALIPL loader and
-# doesn't need a builtin ipl record.
-#
- .org 0x800
-ENTRY(start)
- stm %r0,%r15,0x07b0 # store registers
- bas %r14,.Lsetmode # Immediately switch to 64 bit mode
- basr %r12,%r0
-.base:
- l %r11,.parm
- l %r8,.cmd # pointer to command buffer
-
- ltr %r9,%r9 # do we have SALIPL parameters?
- bp .sk8x8
-
- mvc 0(64,%r8),0x00b0 # copy saved registers
- xc 64(240-64,%r8),0(%r8) # remainder of buffer
- tr 0(64,%r8),.lowcase
- b .gotr
-.sk8x8:
- mvc 0(240,%r8),0(%r9) # copy iplparms into buffer
-.gotr:
- slr %r0,%r0
- st %r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
- st %r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
- j startup # continue with startup
-.cmd: .long COMMAND_LINE # address of command line buffer
-.parm: .long PARMAREA
-.lowcase:
- .byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07
- .byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
- .byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17
- .byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f
- .byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27
- .byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f
- .byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37
- .byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f
- .byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47
- .byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f
- .byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57
- .byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f
- .byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67
- .byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f
- .byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77
- .byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f
-
- .byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87
- .byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f
- .byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97
- .byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f
- .byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7
- .byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf
- .byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7
- .byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf
- .byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87 # .abcdefg
- .byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf # hi
- .byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97 # .jklmnop
- .byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf # qr
- .byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7 # ..stuvwx
- .byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef # yz
- .byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
- .byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
-
-#
# startup-code at 0x10000, running in absolute addressing mode
# this is called either by the ipl loader or directly by PSW restart
# or linload or SALIPL
@@ -364,7 +294,7 @@ ENTRY(startup_kdump)
bras %r13,0f
.fill 16,4,0x0
0: lmh %r0,%r15,0(%r13) # clear high-order half of gprs
- sam31 # switch to 31 bit addressing mode
+ sam64 # switch to 64 bit addressing mode
basr %r13,0 # get base
.LPG0:
xc 0x200(256),0x200 # partially clear lowcore
@@ -395,7 +325,7 @@ ENTRY(startup_kdump)
jnz 1b
j 4f
2: l %r15,.Lstack-.LPG0(%r13)
- ahi %r15,-96
+ ahi %r15,-STACK_FRAME_OVERHEAD
la %r2,.Lals_string-.LPG0(%r13)
l %r3,.Lsclp_print-.LPG0(%r13)
basr %r14,%r3
@@ -429,8 +359,7 @@ ENTRY(startup_kdump)
.long 1, 0xc0000000
#endif
4:
- /* Continue with 64bit startup code in head64.S */
- sam64 # switch to 64 bit mode
+ /* Continue with startup code in head64.S */
jg startup_continue
.align 8
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index f6d8acd7e136..b1f0a90f933b 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -121,6 +121,7 @@ static char *dump_type_str(enum dump_type type)
* Must be in data section since the bss section
* is not cleared when these are accessed.
*/
+static u8 ipl_ssid __attribute__((__section__(".data"))) = 0;
static u16 ipl_devno __attribute__((__section__(".data"))) = 0;
u32 ipl_flags __attribute__((__section__(".data"))) = 0;
@@ -197,6 +198,33 @@ static ssize_t sys_##_prefix##_##_name##_show(struct kobject *kobj, \
return snprintf(page, PAGE_SIZE, _format, ##args); \
}
+#define IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk) \
+static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj, \
+ struct kobj_attribute *attr, \
+ const char *buf, size_t len) \
+{ \
+ unsigned long long ssid, devno; \
+ \
+ if (sscanf(buf, "0.%llx.%llx\n", &ssid, &devno) != 2) \
+ return -EINVAL; \
+ \
+ if (ssid > __MAX_SSID || devno > __MAX_SUBCHANNEL) \
+ return -EINVAL; \
+ \
+ _ipl_blk.ssid = ssid; \
+ _ipl_blk.devno = devno; \
+ return len; \
+}
+
+#define DEFINE_IPL_CCW_ATTR_RW(_prefix, _name, _ipl_blk) \
+IPL_ATTR_SHOW_FN(_prefix, _name, "0.%x.%04x\n", \
+ _ipl_blk.ssid, _ipl_blk.devno); \
+IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk); \
+static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
+ __ATTR(_name, (S_IRUGO | S_IWUSR), \
+ sys_##_prefix##_##_name##_show, \
+ sys_##_prefix##_##_name##_store) \
+
#define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value) \
IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value) \
static struct kobj_attribute sys_##_prefix##_##_name##_attr = \
@@ -395,7 +423,7 @@ static ssize_t sys_ipl_device_show(struct kobject *kobj,
switch (ipl_info.type) {
case IPL_TYPE_CCW:
- return sprintf(page, "0.0.%04x\n", ipl_devno);
+ return sprintf(page, "0.%x.%04x\n", ipl_ssid, ipl_devno);
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno);
@@ -687,21 +715,14 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
struct bin_attribute *attr,
char *buf, loff_t off, size_t count)
{
+ size_t scpdata_len = count;
size_t padding;
- size_t scpdata_len;
-
- if (off < 0)
- return -EINVAL;
- if (off >= DIAG308_SCPDATA_SIZE)
- return -ENOSPC;
- if (count > DIAG308_SCPDATA_SIZE - off)
- count = DIAG308_SCPDATA_SIZE - off;
-
- memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf + off, count);
- scpdata_len = off + count;
+ if (off)
+ return -EINVAL;
+ memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf, count);
if (scpdata_len % 8) {
padding = 8 - (scpdata_len % 8);
memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len,
@@ -717,7 +738,7 @@ static ssize_t reipl_fcp_scpdata_write(struct file *filp, struct kobject *kobj,
}
static struct bin_attribute sys_reipl_fcp_scp_data_attr =
__BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read,
- reipl_fcp_scpdata_write, PAGE_SIZE);
+ reipl_fcp_scpdata_write, DIAG308_SCPDATA_SIZE);
static struct bin_attribute *reipl_fcp_bin_attrs[] = {
&sys_reipl_fcp_scp_data_attr,
@@ -814,9 +835,7 @@ static struct attribute_group reipl_fcp_attr_group = {
};
/* CCW reipl device attributes */
-
-DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
- reipl_block_ccw->ipl_info.ccw.devno);
+DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ipl_info.ccw);
/* NSS wrapper */
static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
@@ -1056,8 +1075,8 @@ static void __reipl_run(void *unused)
switch (reipl_method) {
case REIPL_METHOD_CCW_CIO:
+ devid.ssid = reipl_block_ccw->ipl_info.ccw.ssid;
devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
- devid.ssid = 0;
reipl_ccw_dev(&devid);
break;
case REIPL_METHOD_CCW_VM:
@@ -1192,6 +1211,7 @@ static int __init reipl_ccw_init(void)
reipl_block_ccw_init(reipl_block_ccw);
if (ipl_info.type == IPL_TYPE_CCW) {
+ reipl_block_ccw->ipl_info.ccw.ssid = ipl_ssid;
reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
reipl_block_ccw_fill_parms(reipl_block_ccw);
}
@@ -1336,9 +1356,7 @@ static struct attribute_group dump_fcp_attr_group = {
};
/* CCW dump device attributes */
-
-DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
- dump_block_ccw->ipl_info.ccw.devno);
+DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ipl_info.ccw);
static struct attribute *dump_ccw_attrs[] = {
&sys_dump_ccw_device_attr.attr,
@@ -1418,8 +1436,8 @@ static void __dump_run(void *unused)
switch (dump_method) {
case DUMP_METHOD_CCW_CIO:
+ devid.ssid = dump_block_ccw->ipl_info.ccw.ssid;
devid.devno = dump_block_ccw->ipl_info.ccw.devno;
- devid.ssid = 0;
reipl_ccw_dev(&devid);
break;
case DUMP_METHOD_CCW_VM:
@@ -1939,14 +1957,14 @@ void __init setup_ipl(void)
ipl_info.type = get_ipl_type();
switch (ipl_info.type) {
case IPL_TYPE_CCW:
+ ipl_info.data.ccw.dev_id.ssid = ipl_ssid;
ipl_info.data.ccw.dev_id.devno = ipl_devno;
- ipl_info.data.ccw.dev_id.ssid = 0;
break;
case IPL_TYPE_FCP:
case IPL_TYPE_FCP_DUMP:
+ ipl_info.data.fcp.dev_id.ssid = 0;
ipl_info.data.fcp.dev_id.devno =
IPL_PARMBLOCK_START->ipl_info.fcp.devno;
- ipl_info.data.fcp.dev_id.ssid = 0;
ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
break;
@@ -1978,6 +1996,7 @@ void __init ipl_save_parameters(void)
if (cio_get_iplinfo(&iplinfo))
return;
+ ipl_ssid = iplinfo.ssid;
ipl_devno = iplinfo.devno;
ipl_flags |= IPL_DEVNO_VALID;
if (!iplinfo.is_qdio)
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 688a3aad9c79..114ee8b96f17 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -243,11 +243,7 @@ unsigned long arch_align_stack(unsigned long sp)
static inline unsigned long brk_rnd(void)
{
- /* 8MB for 32bit, 1GB for 64bit */
- if (is_32bit_task())
- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
- else
- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
+ return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
}
unsigned long arch_randomize_brk(struct mm_struct *mm)
diff --git a/arch/s390/kernel/sclp.c b/arch/s390/kernel/sclp.c
index fa0bdff1d413..9fe7781a45cd 100644
--- a/arch/s390/kernel/sclp.c
+++ b/arch/s390/kernel/sclp.c
@@ -21,7 +21,7 @@ static void _sclp_wait_int(void)
__ctl_load(cr0_new, 0, 0);
psw_ext_save = S390_lowcore.external_new_psw;
- psw_mask = __extract_psw() & (PSW_MASK_EA | PSW_MASK_BA);
+ psw_mask = __extract_psw();
S390_lowcore.external_new_psw.mask = psw_mask;
psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT;
S390_lowcore.ext_int_code = 0;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index ce0cbd6ba7ca..c837bcacf218 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -764,9 +764,6 @@ static int __init setup_hwcaps(void)
get_cpu_id(&cpu_id);
add_device_randomness(&cpu_id, sizeof(cpu_id));
switch (cpu_id.machine) {
- case 0x9672:
- strcpy(elf_platform, "g5");
- break;
case 0x2064:
case 0x2066:
default: /* Use "z900" as default for 64 bit kernels. */
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 8c56929c8d82..5378c3ea1b98 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -382,3 +382,4 @@ SYSCALL(sys_sendmsg,compat_sys_sendmsg) /* 370 */
SYSCALL(sys_recvfrom,compat_sys_recvfrom)
SYSCALL(sys_recvmsg,compat_sys_recvmsg)
SYSCALL(sys_shutdown,sys_shutdown)
+SYSCALL(sys_mlock2,compat_sys_mlock2)
diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c
index 73239bb576c4..21a5df99552b 100644
--- a/arch/s390/kernel/trace.c
+++ b/arch/s390/kernel/trace.c
@@ -9,11 +9,11 @@
#define CREATE_TRACE_POINTS
#include <asm/trace/diag.h>
-EXPORT_TRACEPOINT_SYMBOL(diagnose);
+EXPORT_TRACEPOINT_SYMBOL(s390_diagnose);
static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth);
-void trace_diagnose_norecursion(int diag_nr)
+void trace_s390_diagnose_norecursion(int diag_nr)
{
unsigned long flags;
unsigned int *depth;
@@ -22,7 +22,7 @@ void trace_diagnose_norecursion(int diag_nr)
depth = this_cpu_ptr(&diagnose_trace_depth);
if (*depth == 0) {
(*depth)++;
- trace_diagnose(diag_nr);
+ trace_s390_diagnose(diag_nr);
(*depth)--;
}
local_irq_restore(flags);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index c3c07d3505ba..c722400c7697 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -48,37 +48,13 @@ EXPORT_SYMBOL(zero_page_mask);
static void __init setup_zero_pages(void)
{
- struct cpuid cpu_id;
unsigned int order;
struct page *page;
int i;
- get_cpu_id(&cpu_id);
- switch (cpu_id.machine) {
- case 0x9672: /* g5 */
- case 0x2064: /* z900 */
- case 0x2066: /* z900 */
- case 0x2084: /* z990 */
- case 0x2086: /* z990 */
- case 0x2094: /* z9-109 */
- case 0x2096: /* z9-109 */
- order = 0;
- break;
- case 0x2097: /* z10 */
- case 0x2098: /* z10 */
- case 0x2817: /* z196 */
- case 0x2818: /* z196 */
- order = 2;
- break;
- case 0x2827: /* zEC12 */
- case 0x2828: /* zEC12 */
- order = 5;
- break;
- case 0x2964: /* z13 */
- default:
- order = 7;
- break;
- }
+ /* Latest machines require a mapping granularity of 512KB */
+ order = 7;
+
/* Limit number of empty zero pages for small memory sizes */
while (order > 2 && (totalram_pages >> 10) < (1UL << order))
order--;
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 6e552af08c76..ea01477b4aa6 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -31,9 +31,6 @@
#include <linux/security.h>
#include <asm/pgalloc.h>
-unsigned long mmap_rnd_mask;
-static unsigned long mmap_align_mask;
-
static unsigned long stack_maxrandom_size(void)
{
if (!(current->flags & PF_RANDOMIZE))
@@ -62,10 +59,7 @@ static inline int mmap_is_legacy(void)
unsigned long arch_mmap_rnd(void)
{
- if (is_32bit_task())
- return (get_random_int() & 0x7ff) << PAGE_SHIFT;
- else
- return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
+ return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
}
static unsigned long mmap_base_legacy(unsigned long rnd)
@@ -92,7 +86,6 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
struct vm_unmapped_area_info info;
- int do_color_align;
if (len > TASK_SIZE - mmap_min_addr)
return -ENOMEM;
@@ -108,15 +101,14 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
return addr;
}
- do_color_align = 0;
- if (filp || (flags & MAP_SHARED))
- do_color_align = !is_32bit_task();
-
info.flags = 0;
info.length = len;
info.low_limit = mm->mmap_base;
info.high_limit = TASK_SIZE;
- info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
+ if (filp || (flags & MAP_SHARED))
+ info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
+ else
+ info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT;
return vm_unmapped_area(&info);
}
@@ -130,7 +122,6 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
struct mm_struct *mm = current->mm;
unsigned long addr = addr0;
struct vm_unmapped_area_info info;
- int do_color_align;
/* requested length too big for entire address space */
if (len > TASK_SIZE - mmap_min_addr)
@@ -148,15 +139,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
return addr;
}
- do_color_align = 0;
- if (filp || (flags & MAP_SHARED))
- do_color_align = !is_32bit_task();
-
info.flags = VM_UNMAPPED_AREA_TOPDOWN;
info.length = len;
info.low_limit = max(PAGE_SIZE, mmap_min_addr);
info.high_limit = mm->mmap_base;
- info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
+ if (filp || (flags & MAP_SHARED))
+ info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
+ else
+ info.align_mask = 0;
info.align_offset = pgoff << PAGE_SHIFT;
addr = vm_unmapped_area(&info);
@@ -254,35 +244,3 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
mm->get_unmapped_area = s390_get_unmapped_area_topdown;
}
}
-
-static int __init setup_mmap_rnd(void)
-{
- struct cpuid cpu_id;
-
- get_cpu_id(&cpu_id);
- switch (cpu_id.machine) {
- case 0x9672:
- case 0x2064:
- case 0x2066:
- case 0x2084:
- case 0x2086:
- case 0x2094:
- case 0x2096:
- case 0x2097:
- case 0x2098:
- case 0x2817:
- case 0x2818:
- case 0x2827:
- case 0x2828:
- mmap_rnd_mask = 0x7ffUL;
- mmap_align_mask = 0UL;
- break;
- case 0x2964: /* z13 */
- default:
- mmap_rnd_mask = 0x3ff80UL;
- mmap_align_mask = 0x7fUL;
- break;
- }
- return 0;
-}
-early_initcall(setup_mmap_rnd);
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 37d10f74425a..d348f2c09a1e 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -33,7 +33,7 @@ unsigned long *dma_alloc_cpu_table(void)
return NULL;
for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
- *entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
+ *entry = ZPCI_TABLE_INVALID;
return table;
}
@@ -51,7 +51,7 @@ static unsigned long *dma_alloc_page_table(void)
return NULL;
for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
- *entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
+ *entry = ZPCI_PTE_INVALID;
return table;
}
@@ -95,7 +95,7 @@ static unsigned long *dma_get_page_table_origin(unsigned long *entry)
return pto;
}
-static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
+unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
{
unsigned long *sto, *pto;
unsigned int rtx, sx, px;
@@ -114,20 +114,10 @@ static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr
return &pto[px];
}
-void dma_update_cpu_trans(unsigned long *dma_table, void *page_addr,
- dma_addr_t dma_addr, int flags)
+void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
{
- unsigned long *entry;
-
- entry = dma_walk_cpu_trans(dma_table, dma_addr);
- if (!entry) {
- WARN_ON_ONCE(1);
- return;
- }
-
if (flags & ZPCI_PTE_INVALID) {
invalidate_pt_entry(entry);
- return;
} else {
set_pt_pfaa(entry, page_addr);
validate_pt_entry(entry);
@@ -146,18 +136,25 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
u8 *page_addr = (u8 *) (pa & PAGE_MASK);
dma_addr_t start_dma_addr = dma_addr;
unsigned long irq_flags;
+ unsigned long *entry;
int i, rc = 0;
if (!nr_pages)
return -EINVAL;
spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
- if (!zdev->dma_table)
+ if (!zdev->dma_table) {
+ rc = -EINVAL;
goto no_refresh;
+ }
for (i = 0; i < nr_pages; i++) {
- dma_update_cpu_trans(zdev->dma_table, page_addr, dma_addr,
- flags);
+ entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
+ if (!entry) {
+ rc = -ENOMEM;
+ goto undo_cpu_trans;
+ }
+ dma_update_cpu_trans(entry, page_addr, flags);
page_addr += PAGE_SIZE;
dma_addr += PAGE_SIZE;
}
@@ -176,6 +173,18 @@ static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
nr_pages * PAGE_SIZE);
+undo_cpu_trans:
+ if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
+ flags = ZPCI_PTE_INVALID;
+ while (i-- > 0) {
+ page_addr -= PAGE_SIZE;
+ dma_addr -= PAGE_SIZE;
+ entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
+ if (!entry)
+ break;
+ dma_update_cpu_trans(entry, page_addr, flags);
+ }
+ }
no_refresh:
spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
@@ -260,6 +269,16 @@ out:
spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
}
+static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
+{
+ struct {
+ unsigned long rc;
+ unsigned long addr;
+ } __packed data = {rc, addr};
+
+ zpci_err_hex(&data, sizeof(data));
+}
+
static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction direction,
@@ -270,33 +289,40 @@ static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
unsigned long pa = page_to_phys(page) + offset;
int flags = ZPCI_PTE_VALID;
dma_addr_t dma_addr;
+ int ret;
/* This rounds up number of pages based on size and offset */
nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
- if (iommu_page_index == -1)
+ if (iommu_page_index == -1) {
+ ret = -ENOSPC;
goto out_err;
+ }
/* Use rounded up size */
size = nr_pages * PAGE_SIZE;
dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
- if (dma_addr + size > zdev->end_dma)
+ if (dma_addr + size > zdev->end_dma) {
+ ret = -ERANGE;
goto out_free;
+ }
if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
flags |= ZPCI_TABLE_PROTECTED;
- if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
- atomic64_add(nr_pages, &zdev->mapped_pages);
- return dma_addr + (offset & ~PAGE_MASK);
- }
+ ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
+ if (ret)
+ goto out_free;
+
+ atomic64_add(nr_pages, &zdev->mapped_pages);
+ return dma_addr + (offset & ~PAGE_MASK);
out_free:
dma_free_iommu(zdev, iommu_page_index, nr_pages);
out_err:
zpci_err("map error:\n");
- zpci_err_hex(&pa, sizeof(pa));
+ zpci_err_dma(ret, pa);
return DMA_ERROR_CODE;
}
@@ -306,14 +332,16 @@ static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
{
struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
unsigned long iommu_page_index;
- int npages;
+ int npages, ret;
npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr = dma_addr & PAGE_MASK;
- if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
- ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) {
+ ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
+ ZPCI_PTE_INVALID);
+ if (ret) {
zpci_err("unmap error:\n");
- zpci_err_hex(&dma_addr, sizeof(dma_addr));
+ zpci_err_dma(ret, dma_addr);
+ return;
}
atomic64_add(npages, &zdev->unmapped_pages);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 9f3905697f12..690b4027e17c 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -35,7 +35,7 @@
#define MSR_IA32_PERFCTR0 0x000000c1
#define MSR_IA32_PERFCTR1 0x000000c2
#define MSR_FSB_FREQ 0x000000cd
-#define MSR_NHM_PLATFORM_INFO 0x000000ce
+#define MSR_PLATFORM_INFO 0x000000ce
#define MSR_NHM_SNB_PKG_CST_CFG_CTL 0x000000e2
#define NHM_C3_AUTO_DEMOTE (1UL << 25)
@@ -44,7 +44,6 @@
#define SNB_C1_AUTO_UNDEMOTE (1UL << 27)
#define SNB_C3_AUTO_UNDEMOTE (1UL << 28)
-#define MSR_PLATFORM_INFO 0x000000ce
#define MSR_MTRRcap 0x000000fe
#define MSR_IA32_BBL_CR_CTL 0x00000119
#define MSR_IA32_BBL_CR_CTL3 0x0000011e
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4ddd780aeac9..c2b7522cbf35 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -273,10 +273,9 @@ __setup("nosmap", setup_disable_smap);
static __always_inline void setup_smap(struct cpuinfo_x86 *c)
{
- unsigned long eflags;
+ unsigned long eflags = native_save_fl();
/* This should have been cleared long ago */
- raw_local_save_flags(eflags);
BUG_ON(eflags & X86_EFLAGS_AC);
if (cpu_has(c, X86_FEATURE_SMAP)) {
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index ef29b742cea7..31c6a60505e6 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -385,20 +385,19 @@ fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
*/
void fpu__init_prepare_fx_sw_frame(void)
{
- int fsave_header_size = sizeof(struct fregs_state);
int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
- if (config_enabled(CONFIG_X86_32))
- size += fsave_header_size;
-
fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
fx_sw_reserved.extended_size = size;
fx_sw_reserved.xfeatures = xfeatures_mask;
fx_sw_reserved.xstate_size = xstate_size;
- if (config_enabled(CONFIG_IA32_EMULATION)) {
+ if (config_enabled(CONFIG_IA32_EMULATION) ||
+ config_enabled(CONFIG_X86_32)) {
+ int fsave_header_size = sizeof(struct fregs_state);
+
fx_sw_reserved_ia32 = fx_sw_reserved;
- fx_sw_reserved_ia32.extended_size += fsave_header_size;
+ fx_sw_reserved_ia32.extended_size = size + fsave_header_size;
}
}
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index 6454f2731b56..70fc312221fc 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -694,7 +694,6 @@ void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
if (!boot_cpu_has(X86_FEATURE_XSAVE))
return NULL;
- xsave = &current->thread.fpu.state.xsave;
/*
* We should not ever be requesting features that we
* have not enabled. Remember that pcntxt_mask is
diff --git a/arch/x86/kernel/mcount_64.S b/arch/x86/kernel/mcount_64.S
index 94ea120fa21f..87e1762e2bca 100644
--- a/arch/x86/kernel/mcount_64.S
+++ b/arch/x86/kernel/mcount_64.S
@@ -278,6 +278,12 @@ trace:
/* save_mcount_regs fills in first two parameters */
save_mcount_regs
+ /*
+ * When DYNAMIC_FTRACE is not defined, ARCH_SUPPORTS_FTRACE_OPS is not
+ * set (see include/asm/ftrace.h and include/linux/ftrace.h). Only the
+ * ip and parent ip are used and the list function is called when
+ * function tracing is enabled.
+ */
call *ftrace_trace_function
restore_mcount_regs
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index b0ae85f90f10..1202d5ca2fb5 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -586,6 +586,29 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
}
/*
+ * We only want to do a 4-byte get_user() on 32-bit. Otherwise,
+ * we might run off the end of the bounds table if we are on
+ * a 64-bit kernel and try to get 8 bytes.
+ */
+int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret,
+ long __user *bd_entry_ptr)
+{
+ u32 bd_entry_32;
+ int ret;
+
+ if (is_64bit_mm(mm))
+ return get_user(*bd_entry_ret, bd_entry_ptr);
+
+ /*
+ * Note that get_user() uses the type of the *pointer* to
+ * establish the size of the get, not the destination.
+ */
+ ret = get_user(bd_entry_32, (u32 __user *)bd_entry_ptr);
+ *bd_entry_ret = bd_entry_32;
+ return ret;
+}
+
+/*
* Get the base of bounds tables pointed by specific bounds
* directory entry.
*/
@@ -605,7 +628,7 @@ static int get_bt_addr(struct mm_struct *mm,
int need_write = 0;
pagefault_disable();
- ret = get_user(bd_entry, bd_entry_ptr);
+ ret = get_user_bd_entry(mm, &bd_entry, bd_entry_ptr);
pagefault_enable();
if (!ret)
break;
@@ -700,11 +723,23 @@ static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm,
*/
static inline unsigned long bd_entry_virt_space(struct mm_struct *mm)
{
- unsigned long long virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
- if (is_64bit_mm(mm))
- return virt_space / MPX_BD_NR_ENTRIES_64;
- else
- return virt_space / MPX_BD_NR_ENTRIES_32;
+ unsigned long long virt_space;
+ unsigned long long GB = (1ULL << 30);
+
+ /*
+ * This covers 32-bit emulation as well as 32-bit kernels
+ * running on 64-bit harware.
+ */
+ if (!is_64bit_mm(mm))
+ return (4ULL * GB) / MPX_BD_NR_ENTRIES_32;
+
+ /*
+ * 'x86_virt_bits' returns what the hardware is capable
+ * of, and returns the full >32-bit adddress space when
+ * running 32-bit kernels on 64-bit hardware.
+ */
+ virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
+ return virt_space / MPX_BD_NR_ENTRIES_64;
}
/*