diff options
Diffstat (limited to 'arch/tile')
-rw-r--r-- | arch/tile/Kconfig | 17 | ||||
-rw-r--r-- | arch/tile/include/asm/Kbuild | 1 | ||||
-rw-r--r-- | arch/tile/include/asm/atomic_32.h | 28 | ||||
-rw-r--r-- | arch/tile/include/asm/atomic_64.h | 40 | ||||
-rw-r--r-- | arch/tile/include/asm/elf.h | 4 | ||||
-rw-r--r-- | arch/tile/include/asm/io.h | 1 | ||||
-rw-r--r-- | arch/tile/include/asm/switch_to.h | 8 | ||||
-rw-r--r-- | arch/tile/include/asm/syscall.h | 28 | ||||
-rw-r--r-- | arch/tile/include/uapi/arch/opcode_tilegx.h | 6 | ||||
-rw-r--r-- | arch/tile/kernel/intvec_32.S | 1 | ||||
-rw-r--r-- | arch/tile/kernel/intvec_64.S | 1 | ||||
-rw-r--r-- | arch/tile/kernel/pci_gx.c | 5 | ||||
-rw-r--r-- | arch/tile/kernel/process.c | 5 | ||||
-rw-r--r-- | arch/tile/kernel/ptrace.c | 3 | ||||
-rw-r--r-- | arch/tile/kernel/sysfs.c | 11 | ||||
-rw-r--r-- | arch/tile/kernel/time.c | 8 | ||||
-rw-r--r-- | arch/tile/kernel/vdso/Makefile | 4 | ||||
-rw-r--r-- | arch/tile/lib/atomic_32.c | 23 | ||||
-rw-r--r-- | arch/tile/lib/atomic_asm_32.S | 4 | ||||
-rw-r--r-- | arch/tile/mm/init.c | 2 |
20 files changed, 173 insertions, 27 deletions
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 9def1f52d03a..2ba12d761723 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -32,6 +32,7 @@ config TILE select EDAC_SUPPORT select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNLEN_USER + select HAVE_ARCH_SECCOMP_FILTER # FIXME: investigate whether we need/want these options. # select HAVE_IOREMAP_PROT @@ -221,6 +222,22 @@ config COMPAT If enabled, the kernel will support running TILE-Gx binaries that were built with the -m32 option. +config SECCOMP + bool "Enable seccomp to safely compute untrusted bytecode" + depends on PROC_FS + help + This kernel feature is useful for number crunching applications + that may need to compute untrusted bytecode during their + execution. By using pipes or other transports made available to + the process as file descriptors supporting the read/write + syscalls, it's possible to isolate those applications in + their own address space using seccomp. Once seccomp is + enabled via prctl, it cannot be disabled and the task is only + allowed to execute a few safe syscalls defined by each seccomp + mode. + + If unsure, say N. + config SYSVIPC_COMPAT def_bool y depends on COMPAT && SYSVIPC diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index d8a843163471..ba35c41c71ff 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -28,6 +28,7 @@ generic-y += poll.h generic-y += posix_types.h generic-y += preempt.h generic-y += resource.h +generic-y += seccomp.h generic-y += sembuf.h generic-y += serial.h generic-y += shmbuf.h diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h index 1b109fad9fff..d320ce253d86 100644 --- a/arch/tile/include/asm/atomic_32.h +++ b/arch/tile/include/asm/atomic_32.h @@ -34,6 +34,19 @@ static inline void atomic_add(int i, atomic_t *v) _atomic_xchg_add(&v->counter, i); } +#define ATOMIC_OP(op) \ +unsigned long _atomic_##op(volatile unsigned long *p, unsigned long mask); \ +static inline void atomic_##op(int i, atomic_t *v) \ +{ \ + _atomic_##op((unsigned long *)&v->counter, i); \ +} + +ATOMIC_OP(and) +ATOMIC_OP(or) +ATOMIC_OP(xor) + +#undef ATOMIC_OP + /** * atomic_add_return - add integer and return * @v: pointer of type atomic_t @@ -113,6 +126,17 @@ static inline void atomic64_add(long long i, atomic64_t *v) _atomic64_xchg_add(&v->counter, i); } +#define ATOMIC64_OP(op) \ +long long _atomic64_##op(long long *v, long long n); \ +static inline void atomic64_##op(long long i, atomic64_t *v) \ +{ \ + _atomic64_##op(&v->counter, i); \ +} + +ATOMIC64_OP(and) +ATOMIC64_OP(or) +ATOMIC64_OP(xor) + /** * atomic64_add_return - add integer and return * @v: pointer of type atomic64_t @@ -225,6 +249,7 @@ extern struct __get_user __atomic_xchg_add(volatile int *p, int *lock, int n); extern struct __get_user __atomic_xchg_add_unless(volatile int *p, int *lock, int o, int n); extern struct __get_user __atomic_or(volatile int *p, int *lock, int n); +extern struct __get_user __atomic_and(volatile int *p, int *lock, int n); extern struct __get_user __atomic_andn(volatile int *p, int *lock, int n); extern struct __get_user __atomic_xor(volatile int *p, int *lock, int n); extern long long __atomic64_cmpxchg(volatile long long *p, int *lock, @@ -234,6 +259,9 @@ extern long long __atomic64_xchg_add(volatile long long *p, int *lock, long long n); extern long long __atomic64_xchg_add_unless(volatile long long *p, int *lock, long long o, long long n); +extern long long __atomic64_and(volatile long long *p, int *lock, long long n); +extern long long __atomic64_or(volatile long long *p, int *lock, long long n); +extern long long __atomic64_xor(volatile long long *p, int *lock, long long n); /* Return failure from the atomic wrappers. */ struct __get_user __atomic_bad_address(int __user *addr); diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h index 0496970cef82..096a56d6ead4 100644 --- a/arch/tile/include/asm/atomic_64.h +++ b/arch/tile/include/asm/atomic_64.h @@ -58,6 +58,26 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) return oldval; } +static inline void atomic_and(int i, atomic_t *v) +{ + __insn_fetchand4((void *)&v->counter, i); +} + +static inline void atomic_or(int i, atomic_t *v) +{ + __insn_fetchor4((void *)&v->counter, i); +} + +static inline void atomic_xor(int i, atomic_t *v) +{ + int guess, oldval = v->counter; + do { + guess = oldval; + __insn_mtspr(SPR_CMPEXCH_VALUE, guess); + oldval = __insn_cmpexch4(&v->counter, guess ^ i); + } while (guess != oldval); +} + /* Now the true 64-bit operations. */ #define ATOMIC64_INIT(i) { (i) } @@ -91,6 +111,26 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u) return oldval != u; } +static inline void atomic64_and(long i, atomic64_t *v) +{ + __insn_fetchand((void *)&v->counter, i); +} + +static inline void atomic64_or(long i, atomic64_t *v) +{ + __insn_fetchor((void *)&v->counter, i); +} + +static inline void atomic64_xor(long i, atomic64_t *v) +{ + long guess, oldval = v->counter; + do { + guess = oldval; + __insn_mtspr(SPR_CMPEXCH_VALUE, guess); + oldval = __insn_cmpexch(&v->counter, guess ^ i); + } while (guess != oldval); +} + #define atomic64_sub_return(i, v) atomic64_add_return(-(i), (v)) #define atomic64_sub(i, v) atomic64_add(-(i), (v)) #define atomic64_inc_return(v) atomic64_add_return(1, (v)) diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h index 41d9878a9686..c505d77e4d06 100644 --- a/arch/tile/include/asm/elf.h +++ b/arch/tile/include/asm/elf.h @@ -22,6 +22,7 @@ #include <arch/chip.h> #include <linux/ptrace.h> +#include <linux/elf-em.h> #include <asm/byteorder.h> #include <asm/page.h> @@ -30,9 +31,6 @@ typedef unsigned long elf_greg_t; #define ELF_NGREG (sizeof(struct pt_regs) / sizeof(elf_greg_t)) typedef elf_greg_t elf_gregset_t[ELF_NGREG]; -#define EM_TILEPRO 188 -#define EM_TILEGX 191 - /* Provide a nominal data structure. */ #define ELF_NFPREG 0 typedef double elf_fpreg_t; diff --git a/arch/tile/include/asm/io.h b/arch/tile/include/asm/io.h index dc61de15c1f9..322b5fe94781 100644 --- a/arch/tile/include/asm/io.h +++ b/arch/tile/include/asm/io.h @@ -55,6 +55,7 @@ extern void iounmap(volatile void __iomem *addr); #define ioremap_nocache(physaddr, size) ioremap(physaddr, size) #define ioremap_wc(physaddr, size) ioremap(physaddr, size) #define ioremap_wt(physaddr, size) ioremap(physaddr, size) +#define ioremap_uc(physaddr, size) ioremap(physaddr, size) #define ioremap_fullcache(physaddr, size) ioremap(physaddr, size) #define mmiowb() diff --git a/arch/tile/include/asm/switch_to.h b/arch/tile/include/asm/switch_to.h index b8f888cbe6b0..34ee72705521 100644 --- a/arch/tile/include/asm/switch_to.h +++ b/arch/tile/include/asm/switch_to.h @@ -53,15 +53,13 @@ extern unsigned long get_switch_to_pc(void); * Kernel threads can check to see if they need to migrate their * stack whenever they return from a context switch; for user * threads, we defer until they are returning to user-space. + * We defer homecache migration until the runqueue lock is released. */ -#define finish_arch_switch(prev) do { \ - if (unlikely((prev)->state == TASK_DEAD)) \ - __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | \ - ((prev)->pid << _SIM_CONTROL_OPERATOR_BITS)); \ +#define finish_arch_post_lock_switch() do { \ __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_SWITCH | \ (current->pid << _SIM_CONTROL_OPERATOR_BITS)); \ if (current->mm == NULL && !kstack_hash && \ - current_thread_info()->homecache_cpu != smp_processor_id()) \ + current_thread_info()->homecache_cpu != raw_smp_processor_id()) \ homecache_migrate_kthread(); \ } while (0) diff --git a/arch/tile/include/asm/syscall.h b/arch/tile/include/asm/syscall.h index 9644b88f133d..373d73064ea1 100644 --- a/arch/tile/include/asm/syscall.h +++ b/arch/tile/include/asm/syscall.h @@ -20,6 +20,8 @@ #include <linux/sched.h> #include <linux/err.h> +#include <linux/audit.h> +#include <linux/compat.h> #include <arch/abi.h> /* The array of function pointers for syscalls. */ @@ -61,7 +63,15 @@ static inline void syscall_set_return_value(struct task_struct *task, struct pt_regs *regs, int error, long val) { - regs->regs[0] = (long) error ?: val; + if (error) { + /* R0 is the passed-in negative error, R1 is positive. */ + regs->regs[0] = error; + regs->regs[1] = -error; + } else { + /* R1 set to zero to indicate no error. */ + regs->regs[0] = val; + regs->regs[1] = 0; + } } static inline void syscall_get_arguments(struct task_struct *task, @@ -82,4 +92,20 @@ static inline void syscall_set_arguments(struct task_struct *task, memcpy(®s[i], args, n * sizeof(args[0])); } +/* + * We don't care about endianness (__AUDIT_ARCH_LE bit) here because + * tile has the same system calls both on little- and big- endian. + */ +static inline int syscall_get_arch(void) +{ + if (is_compat_task()) + return AUDIT_ARCH_TILEGX32; + +#ifdef CONFIG_TILEGX + return AUDIT_ARCH_TILEGX; +#else + return AUDIT_ARCH_TILEPRO; +#endif +} + #endif /* _ASM_TILE_SYSCALL_H */ diff --git a/arch/tile/include/uapi/arch/opcode_tilegx.h b/arch/tile/include/uapi/arch/opcode_tilegx.h index d76ff2db745e..9e46eaa847d4 100644 --- a/arch/tile/include/uapi/arch/opcode_tilegx.h +++ b/arch/tile/include/uapi/arch/opcode_tilegx.h @@ -830,11 +830,11 @@ enum ADDX_RRR_0_OPCODE_X0 = 2, ADDX_RRR_0_OPCODE_X1 = 2, ADDX_RRR_0_OPCODE_Y0 = 0, - ADDX_SPECIAL_0_OPCODE_Y1 = 0, + ADDX_RRR_0_OPCODE_Y1 = 0, ADD_RRR_0_OPCODE_X0 = 3, ADD_RRR_0_OPCODE_X1 = 3, ADD_RRR_0_OPCODE_Y0 = 1, - ADD_SPECIAL_0_OPCODE_Y1 = 1, + ADD_RRR_0_OPCODE_Y1 = 1, ANDI_IMM8_OPCODE_X0 = 3, ANDI_IMM8_OPCODE_X1 = 3, ANDI_OPCODE_Y0 = 2, @@ -995,6 +995,7 @@ enum LD4U_ADD_IMM8_OPCODE_X1 = 12, LD4U_OPCODE_Y2 = 2, LD4U_UNARY_OPCODE_X1 = 20, + LDNA_ADD_IMM8_OPCODE_X1 = 21, LDNA_UNARY_OPCODE_X1 = 21, LDNT1S_ADD_IMM8_OPCODE_X1 = 13, LDNT1S_UNARY_OPCODE_X1 = 22, @@ -1015,7 +1016,6 @@ enum LD_UNARY_OPCODE_X1 = 29, LNK_UNARY_OPCODE_X1 = 30, LNK_UNARY_OPCODE_Y1 = 14, - LWNA_ADD_IMM8_OPCODE_X1 = 21, MFSPR_IMM8_OPCODE_X1 = 22, MF_UNARY_OPCODE_X1 = 31, MM_BF_OPCODE_X0 = 7, diff --git a/arch/tile/kernel/intvec_32.S b/arch/tile/kernel/intvec_32.S index cdbda45a4e4b..fbbe2ea882ea 100644 --- a/arch/tile/kernel/intvec_32.S +++ b/arch/tile/kernel/intvec_32.S @@ -1224,6 +1224,7 @@ handle_syscall: jal do_syscall_trace_enter } FEEDBACK_REENTER(handle_syscall) + blz r0, .Lsyscall_sigreturn_skip /* * We always reload our registers from the stack at this diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S index 800b91d3f9dc..58964d209d4d 100644 --- a/arch/tile/kernel/intvec_64.S +++ b/arch/tile/kernel/intvec_64.S @@ -1247,6 +1247,7 @@ handle_syscall: jal do_syscall_trace_enter } FEEDBACK_REENTER(handle_syscall) + bltz r0, .Lsyscall_sigreturn_skip /* * We always reload our registers from the stack at this diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c index b1df847d0686..b3f73fd764a3 100644 --- a/arch/tile/kernel/pci_gx.c +++ b/arch/tile/kernel/pci_gx.c @@ -304,11 +304,12 @@ static struct irq_chip tilegx_legacy_irq_chip = { * to Linux which just calls handle_level_irq() after clearing the * MAC INTx Assert status bit associated with this interrupt. */ -static void trio_handle_level_irq(unsigned int irq, struct irq_desc *desc) +static void trio_handle_level_irq(unsigned int __irq, struct irq_desc *desc) { struct pci_controller *controller = irq_desc_get_handler_data(desc); gxio_trio_context_t *trio_context = controller->trio; uint64_t intx = (uint64_t)irq_desc_get_chip_data(desc); + unsigned int irq = irq_desc_get_irq(desc); int mac = controller->mac; unsigned int reg_offset; uint64_t level_mask; @@ -1442,7 +1443,7 @@ static struct pci_ops tile_cfg_ops = { /* MSI support starts here. */ static unsigned int tilegx_msi_startup(struct irq_data *d) { - if (d->msi_desc) + if (irq_data_get_msi_desc(d)) pci_msi_unmask_irq(d); return 0; diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c index a45213781ad0..7d5769310bef 100644 --- a/arch/tile/kernel/process.c +++ b/arch/tile/kernel/process.c @@ -446,6 +446,11 @@ struct task_struct *__sched _switch_to(struct task_struct *prev, hardwall_switch_tasks(prev, next); #endif + /* Notify the simulator of task exit. */ + if (unlikely(prev->state == TASK_DEAD)) + __insn_mtspr(SPR_SIM_CONTROL, SIM_CONTROL_OS_EXIT | + (prev->pid << _SIM_CONTROL_OPERATOR_BITS)); + /* * Switch kernel SP, PC, and callee-saved registers. * In the context of the new task, return the old task pointer diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c index f84eed8243da..bdc126faf741 100644 --- a/arch/tile/kernel/ptrace.c +++ b/arch/tile/kernel/ptrace.c @@ -262,6 +262,9 @@ int do_syscall_trace_enter(struct pt_regs *regs) if (work & _TIF_NOHZ) user_exit(); + if (secure_computing() == -1) + return -1; + if (work & _TIF_SYSCALL_TRACE) { if (tracehook_report_syscall_entry(regs)) regs->regs[TREG_SYSCALL_NR] = -1; diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c index a3ed12f8f83b..825867c53853 100644 --- a/arch/tile/kernel/sysfs.c +++ b/arch/tile/kernel/sysfs.c @@ -198,16 +198,13 @@ static int hv_stats_device_add(struct device *dev, struct subsys_interface *sif) return err; } -static int hv_stats_device_remove(struct device *dev, - struct subsys_interface *sif) +static void hv_stats_device_remove(struct device *dev, + struct subsys_interface *sif) { int cpu = dev->id; - if (!cpu_online(cpu)) - return 0; - - sysfs_remove_file(&dev->kobj, &dev_attr_hv_stats.attr); - return 0; + if (cpu_online(cpu)) + sysfs_remove_file(&dev->kobj, &dev_attr_hv_stats.attr); } diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c index 00178ecf9aea..178989e6d3e3 100644 --- a/arch/tile/kernel/time.c +++ b/arch/tile/kernel/time.c @@ -140,10 +140,10 @@ static int tile_timer_set_next_event(unsigned long ticks, * Whenever anyone tries to change modes, we just mask interrupts * and wait for the next event to get set. */ -static void tile_timer_set_mode(enum clock_event_mode mode, - struct clock_event_device *evt) +static int tile_timer_shutdown(struct clock_event_device *evt) { arch_local_irq_mask_now(INT_TILE_TIMER); + return 0; } /* @@ -157,7 +157,9 @@ static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = { .rating = 100, .irq = -1, .set_next_event = tile_timer_set_next_event, - .set_mode = tile_timer_set_mode, + .set_state_shutdown = tile_timer_shutdown, + .set_state_oneshot = tile_timer_shutdown, + .tick_resume = tile_timer_shutdown, }; void setup_tile_timer(void) diff --git a/arch/tile/kernel/vdso/Makefile b/arch/tile/kernel/vdso/Makefile index a025f63d54cd..c54fff37b5ff 100644 --- a/arch/tile/kernel/vdso/Makefile +++ b/arch/tile/kernel/vdso/Makefile @@ -54,7 +54,7 @@ $(obj)/built-in.o: $(obj)/vdso-syms.o $(obj)/built-in.o: ld_flags += -R $(obj)/vdso-syms.o SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \ - $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) + $(call cc-ldoption, -Wl$(comma)--hash-style=both) SYSCFLAGS_vdso_syms.o = -r $(obj)/vdso-syms.o: $(src)/vdso.lds $(obj)/vrt_sigreturn.o FORCE $(call if_changed,vdsold) @@ -113,6 +113,6 @@ $(obj)/vrt_sigreturn32.o: $(obj)/vrt_sigreturn.S $(obj)/vdso32.o: $(obj)/vdso32.so SYSCFLAGS_vdso32.so.dbg = -m32 -shared -s -Wl,-soname=linux-vdso32.so.1 \ - $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) + $(call cc-ldoption, -Wl$(comma)--hash-style=both) $(obj)/vdso32.so.dbg: $(src)/vdso.lds $(obj-vdso32) $(call if_changed,vdsold) diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c index c89b211fd9e7..298df1e9912a 100644 --- a/arch/tile/lib/atomic_32.c +++ b/arch/tile/lib/atomic_32.c @@ -94,6 +94,12 @@ unsigned long _atomic_or(volatile unsigned long *p, unsigned long mask) } EXPORT_SYMBOL(_atomic_or); +unsigned long _atomic_and(volatile unsigned long *p, unsigned long mask) +{ + return __atomic_and((int *)p, __atomic_setup(p), mask).val; +} +EXPORT_SYMBOL(_atomic_and); + unsigned long _atomic_andn(volatile unsigned long *p, unsigned long mask) { return __atomic_andn((int *)p, __atomic_setup(p), mask).val; @@ -136,6 +142,23 @@ long long _atomic64_cmpxchg(long long *v, long long o, long long n) } EXPORT_SYMBOL(_atomic64_cmpxchg); +long long _atomic64_and(long long *v, long long n) +{ + return __atomic64_and(v, __atomic_setup(v), n); +} +EXPORT_SYMBOL(_atomic64_and); + +long long _atomic64_or(long long *v, long long n) +{ + return __atomic64_or(v, __atomic_setup(v), n); +} +EXPORT_SYMBOL(_atomic64_or); + +long long _atomic64_xor(long long *v, long long n) +{ + return __atomic64_xor(v, __atomic_setup(v), n); +} +EXPORT_SYMBOL(_atomic64_xor); /* * If any of the atomic or futex routines hit a bad address (not in diff --git a/arch/tile/lib/atomic_asm_32.S b/arch/tile/lib/atomic_asm_32.S index 6bda3132cd61..f611265633d6 100644 --- a/arch/tile/lib/atomic_asm_32.S +++ b/arch/tile/lib/atomic_asm_32.S @@ -178,6 +178,7 @@ atomic_op _xchg_add, 32, "add r24, r22, r2" atomic_op _xchg_add_unless, 32, \ "sne r26, r22, r2; { bbns r26, 3f; add r24, r22, r3 }" atomic_op _or, 32, "or r24, r22, r2" +atomic_op _and, 32, "and r24, r22, r2" atomic_op _andn, 32, "nor r2, r2, zero; and r24, r22, r2" atomic_op _xor, 32, "xor r24, r22, r2" @@ -191,6 +192,9 @@ atomic_op 64_xchg_add_unless, 64, \ { bbns r26, 3f; add r24, r22, r4 }; \ { bbns r27, 3f; add r25, r23, r5 }; \ slt_u r26, r24, r22; add r25, r25, r26" +atomic_op 64_or, 64, "{ or r24, r22, r2; or r25, r23, r3 }" +atomic_op 64_and, 64, "{ and r24, r22, r2; and r25, r23, r3 }" +atomic_op 64_xor, 64, "{ xor r24, r22, r2; xor r25, r23, r3 }" jrp lr /* happy backtracer */ diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c index 5bd252e3fdc5..d4e1fc41d06d 100644 --- a/arch/tile/mm/init.c +++ b/arch/tile/mm/init.c @@ -863,7 +863,7 @@ void __init mem_init(void) * memory to the highmem for now. */ #ifndef CONFIG_NEED_MULTIPLE_NODES -int arch_add_memory(u64 start, u64 size) +int arch_add_memory(u64 start, u64 size, bool for_device) { struct pglist_data *pgdata = &contig_page_data; struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1; |