summaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorH. Peter Anvin <hpa@linux.intel.com>2013-04-20 18:16:44 +0200
committerH. Peter Anvin <hpa@linux.intel.com>2013-04-20 18:16:44 +0200
commitf53f292eeaa234615c31a1306babe703fc4263f2 (patch)
tree707b0933a20f7dc05495e974243a23b5c9f8c918 /arch/ia64
parentx86, efi: Make efi_memblock_x86_reserve_range more readable (diff)
parentefi: split efisubsystem from efivars (diff)
downloadlinux-f53f292eeaa234615c31a1306babe703fc4263f2.tar.xz
linux-f53f292eeaa234615c31a1306babe703fc4263f2.zip
Merge remote-tracking branch 'efi/chainsaw' into x86/efi
Resolved Conflicts: drivers/firmware/efivars.c fs/efivarsfs/file.c Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig11
-rw-r--r--arch/ia64/hp/common/aml_nfw.c2
-rw-r--r--arch/ia64/hp/sim/Kconfig1
-rw-r--r--arch/ia64/hp/sim/simserial.c21
-rw-r--r--arch/ia64/include/asm/acpi.h4
-rw-r--r--arch/ia64/include/asm/cputime.h92
-rw-r--r--arch/ia64/include/asm/elf.h3
-rw-r--r--arch/ia64/include/asm/kvm_host.h4
-rw-r--r--arch/ia64/include/asm/signal.h10
-rw-r--r--arch/ia64/include/asm/thread_info.h4
-rw-r--r--arch/ia64/include/asm/unistd.h8
-rw-r--r--arch/ia64/include/asm/xen/minstate.h2
-rw-r--r--arch/ia64/include/uapi/asm/socket.h4
-rw-r--r--arch/ia64/kernel/asm-offsets.c2
-rw-r--r--arch/ia64/kernel/entry.S16
-rw-r--r--arch/ia64/kernel/fsys.S4
-rw-r--r--arch/ia64/kernel/head.S4
-rw-r--r--arch/ia64/kernel/ivt.S8
-rw-r--r--arch/ia64/kernel/kprobes.c8
-rw-r--r--arch/ia64/kernel/minstate.h2
-rw-r--r--arch/ia64/kernel/perfmon.c5
-rw-r--r--arch/ia64/kernel/process.c8
-rw-r--r--arch/ia64/kernel/salinfo.c6
-rw-r--r--arch/ia64/kernel/setup.c1
-rw-r--r--arch/ia64/kernel/signal.c19
-rw-r--r--arch/ia64/kernel/sys_ia64.c37
-rw-r--r--arch/ia64/kernel/time.c5
-rw-r--r--arch/ia64/kernel/traps.c2
-rw-r--r--arch/ia64/kvm/Kconfig2
-rw-r--r--arch/ia64/kvm/kvm-ia64.c8
-rw-r--r--arch/ia64/kvm/lapic.h6
-rw-r--r--arch/ia64/mm/contig.c2
-rw-r--r--arch/ia64/mm/discontig.c6
-rw-r--r--arch/ia64/mm/hugetlbpage.c20
-rw-r--r--arch/ia64/mm/init.c18
-rw-r--r--arch/ia64/pci/pci.c8
-rw-r--r--arch/ia64/xen/Kconfig2
37 files changed, 128 insertions, 237 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 3279646120e3..9a02f71c6b1f 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -26,10 +26,10 @@ config IA64
select HAVE_MEMBLOCK
select HAVE_MEMBLOCK_NODE_MAP
select HAVE_VIRT_CPU_ACCOUNTING
+ select VIRT_TO_BUS
select ARCH_DISCARD_MEMBLOCK
select GENERIC_IRQ_PROBE
select GENERIC_PENDING_IRQ if SMP
- select IRQ_PER_CPU
select GENERIC_IRQ_SHOW
select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_HAVE_NMI_SAFE_CMPXCHG
@@ -40,6 +40,7 @@ config IA64
select ARCH_THREAD_INFO_ALLOCATOR
select ARCH_CLOCKSOURCE_DATA
select GENERIC_TIME_VSYSCALL_OLD
+ select SYSCTL_ARCH_UNALIGN_NO_WARN
select HAVE_MOD_ARCH_SPECIFIC
select MODULES_USE_ELF_RELA
default y
@@ -375,8 +376,8 @@ config NR_CPUS
performance hit.
config HOTPLUG_CPU
- bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
- depends on SMP && EXPERIMENTAL
+ bool "Support for hot-pluggable CPUs"
+ depends on SMP
select HOTPLUG
default n
---help---
@@ -555,8 +556,8 @@ config IA64_HP_AML_NFW
source "drivers/sn/Kconfig"
config KEXEC
- bool "kexec system call (EXPERIMENTAL)"
- depends on EXPERIMENTAL && !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
+ bool "kexec system call"
+ depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU)
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/ia64/hp/common/aml_nfw.c b/arch/ia64/hp/common/aml_nfw.c
index 6192f7188654..916ffe770bcf 100644
--- a/arch/ia64/hp/common/aml_nfw.c
+++ b/arch/ia64/hp/common/aml_nfw.c
@@ -191,7 +191,7 @@ static int aml_nfw_add(struct acpi_device *device)
return aml_nfw_add_global_handler();
}
-static int aml_nfw_remove(struct acpi_device *device, int type)
+static int aml_nfw_remove(struct acpi_device *device)
{
return aml_nfw_remove_global_handler();
}
diff --git a/arch/ia64/hp/sim/Kconfig b/arch/ia64/hp/sim/Kconfig
index 8d513a8c5266..d84707d55203 100644
--- a/arch/ia64/hp/sim/Kconfig
+++ b/arch/ia64/hp/sim/Kconfig
@@ -8,6 +8,7 @@ config HP_SIMETH
config HP_SIMSERIAL
bool "Simulated serial driver support"
+ depends on TTY
config HP_SIMSERIAL_CONSOLE
bool "Console for HP simulator"
diff --git a/arch/ia64/hp/sim/simserial.c b/arch/ia64/hp/sim/simserial.c
index fc3924d18c1f..da2f319fb71d 100644
--- a/arch/ia64/hp/sim/simserial.c
+++ b/arch/ia64/hp/sim/simserial.c
@@ -53,7 +53,7 @@ struct tty_driver *hp_simserial_driver;
static struct console *console;
-static void receive_chars(struct tty_struct *tty)
+static void receive_chars(struct tty_port *port)
{
unsigned char ch;
static unsigned char seen_esc = 0;
@@ -81,10 +81,10 @@ static void receive_chars(struct tty_struct *tty)
}
seen_esc = 0;
- if (tty_insert_flip_char(tty, ch, TTY_NORMAL) == 0)
+ if (tty_insert_flip_char(port, ch, TTY_NORMAL) == 0)
break;
}
- tty_flip_buffer_push(tty);
+ tty_flip_buffer_push(port);
}
/*
@@ -93,18 +93,9 @@ static void receive_chars(struct tty_struct *tty)
static irqreturn_t rs_interrupt_single(int irq, void *dev_id)
{
struct serial_state *info = dev_id;
- struct tty_struct *tty = tty_port_tty_get(&info->port);
- if (!tty) {
- printk(KERN_INFO "%s: tty=0 problem\n", __func__);
- return IRQ_NONE;
- }
- /*
- * pretty simple in our case, because we only get interrupts
- * on inbound traffic
- */
- receive_chars(tty);
- tty_kref_put(tty);
+ receive_chars(&info->port);
+
return IRQ_HANDLED;
}
@@ -435,7 +426,7 @@ static int rs_open(struct tty_struct *tty, struct file * filp)
struct tty_port *port = &info->port;
tty->driver_data = info;
- tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
+ port->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
/*
* figure out which console to use (should be one already)
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 359e68a03ca3..faa1bf0da815 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -52,10 +52,6 @@
/* Asm macros */
-#define ACPI_ASM_MACROS
-#define BREAKPOINT3
-#define ACPI_DISABLE_IRQS() local_irq_disable()
-#define ACPI_ENABLE_IRQS() local_irq_enable()
#define ACPI_FLUSH_CPU_CACHE()
static inline int
diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h
index 7fcf7f08ab06..e2d3f5baf265 100644
--- a/arch/ia64/include/asm/cputime.h
+++ b/arch/ia64/include/asm/cputime.h
@@ -11,99 +11,19 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * If we have CONFIG_VIRT_CPU_ACCOUNTING, we measure cpu time in nsec.
+ * If we have CONFIG_VIRT_CPU_ACCOUNTING_NATIVE, we measure cpu time in nsec.
* Otherwise we measure cpu time in jiffies using the generic definitions.
*/
#ifndef __IA64_CPUTIME_H
#define __IA64_CPUTIME_H
-#ifndef CONFIG_VIRT_CPU_ACCOUNTING
-#include <asm-generic/cputime.h>
+#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
+# include <asm-generic/cputime.h>
#else
-
-#include <linux/time.h>
-#include <linux/jiffies.h>
-#include <asm/processor.h>
-
-typedef u64 __nocast cputime_t;
-typedef u64 __nocast cputime64_t;
-
-#define cputime_one_jiffy jiffies_to_cputime(1)
-
-/*
- * Convert cputime <-> jiffies (HZ)
- */
-#define cputime_to_jiffies(__ct) \
- ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
-#define jiffies_to_cputime(__jif) \
- (__force cputime_t)((__jif) * (NSEC_PER_SEC / HZ))
-#define cputime64_to_jiffies64(__ct) \
- ((__force u64)(__ct) / (NSEC_PER_SEC / HZ))
-#define jiffies64_to_cputime64(__jif) \
- (__force cputime64_t)((__jif) * (NSEC_PER_SEC / HZ))
-
-/*
- * Convert cputime <-> microseconds
- */
-#define cputime_to_usecs(__ct) \
- ((__force u64)(__ct) / NSEC_PER_USEC)
-#define usecs_to_cputime(__usecs) \
- (__force cputime_t)((__usecs) * NSEC_PER_USEC)
-#define usecs_to_cputime64(__usecs) \
- (__force cputime64_t)((__usecs) * NSEC_PER_USEC)
-
-/*
- * Convert cputime <-> seconds
- */
-#define cputime_to_secs(__ct) \
- ((__force u64)(__ct) / NSEC_PER_SEC)
-#define secs_to_cputime(__secs) \
- (__force cputime_t)((__secs) * NSEC_PER_SEC)
-
-/*
- * Convert cputime <-> timespec (nsec)
- */
-static inline cputime_t timespec_to_cputime(const struct timespec *val)
-{
- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
- return (__force cputime_t) ret;
-}
-static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
-{
- val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
- val->tv_nsec = (__force u64) ct % NSEC_PER_SEC;
-}
-
-/*
- * Convert cputime <-> timeval (msec)
- */
-static inline cputime_t timeval_to_cputime(struct timeval *val)
-{
- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
- return (__force cputime_t) ret;
-}
-static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
-{
- val->tv_sec = (__force u64) ct / NSEC_PER_SEC;
- val->tv_usec = ((__force u64) ct % NSEC_PER_SEC) / NSEC_PER_USEC;
-}
-
-/*
- * Convert cputime <-> clock (USER_HZ)
- */
-#define cputime_to_clock_t(__ct) \
- ((__force u64)(__ct) / (NSEC_PER_SEC / USER_HZ))
-#define clock_t_to_cputime(__x) \
- (__force cputime_t)((__x) * (NSEC_PER_SEC / USER_HZ))
-
-/*
- * Convert cputime64 to clock.
- */
-#define cputime64_to_clock_t(__ct) \
- cputime_to_clock_t((__force cputime_t)__ct)
-
+# include <asm/processor.h>
+# include <asm-generic/cputime_nsecs.h>
extern void arch_vtime_task_switch(struct task_struct *tsk);
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
#endif /* __IA64_CPUTIME_H */
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
index b5298eb09adb..5a83c5cc3dc8 100644
--- a/arch/ia64/include/asm/elf.h
+++ b/arch/ia64/include/asm/elf.h
@@ -201,9 +201,6 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
relevant until we have real hardware to play with... */
#define ELF_PLATFORM NULL
-#define SET_PERSONALITY(ex) \
- set_personality((current->personality & ~PER_MASK) | PER_LINUX)
-
#define elf_read_implies_exec(ex, executable_stack) \
((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index 6d6a5ac48d85..cfa74983c675 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -23,9 +23,7 @@
#ifndef __ASM_KVM_HOST_H
#define __ASM_KVM_HOST_H
-#define KVM_MEMORY_SLOTS 32
-/* memory slots that does not exposed to userspace */
-#define KVM_PRIVATE_MEM_SLOTS 4
+#define KVM_USER_MEM_SLOTS 32
#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
diff --git a/arch/ia64/include/asm/signal.h b/arch/ia64/include/asm/signal.h
index 3a1b20e74c5c..c62afa4a0dc2 100644
--- a/arch/ia64/include/asm/signal.h
+++ b/arch/ia64/include/asm/signal.h
@@ -26,16 +26,6 @@ typedef struct {
unsigned long sig[_NSIG_WORDS];
} sigset_t;
-struct sigaction {
- __sighandler_t sa_handler;
- unsigned long sa_flags;
- sigset_t sa_mask; /* mask last for extensibility */
-};
-
-struct k_sigaction {
- struct sigaction sa;
-};
-
# include <asm/sigcontext.h>
# endif /* !__ASSEMBLY__ */
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index ff2ae4136584..020d655ed082 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -31,7 +31,7 @@ struct thread_info {
mm_segment_t addr_limit; /* user-level address space limit */
int preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
struct restart_block restart_block;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
__u64 ac_stamp;
__u64 ac_leave;
__u64 ac_stime;
@@ -69,7 +69,7 @@ struct thread_info {
#define task_stack_page(tsk) ((void *)(tsk))
#define __HAVE_THREAD_FUNCTIONS
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#define setup_thread_stack(p, org) \
*task_thread_info(p) = *task_thread_info(org); \
task_thread_info(p)->ac_stime = 0; \
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index c3cc42a15af1..096373800f73 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -27,9 +27,6 @@
#define __IGNORE_vfork /* clone() */
#define __IGNORE_umount2 /* umount() */
-#define __ARCH_WANT_SYS_RT_SIGACTION
-#define __ARCH_WANT_SYS_RT_SIGSUSPEND
-
#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
#include <linux/types.h>
@@ -47,12 +44,7 @@ asmlinkage unsigned long sys_mmap2(
int prot, int flags,
int fd, long pgoff);
struct pt_regs;
-struct sigaction;
asmlinkage long sys_ia64_pipe(void);
-asmlinkage long sys_rt_sigaction(int sig,
- const struct sigaction __user *act,
- struct sigaction __user *oact,
- size_t sigsetsize);
/*
* "Conditional" syscalls
diff --git a/arch/ia64/include/asm/xen/minstate.h b/arch/ia64/include/asm/xen/minstate.h
index c57fa910f2c9..00cf03e0cb82 100644
--- a/arch/ia64/include/asm/xen/minstate.h
+++ b/arch/ia64/include/asm/xen/minstate.h
@@ -1,5 +1,5 @@
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/* read ar.itc in advance, and use it before leaving bank 0 */
#define XEN_ACCOUNT_GET_STAMP \
MOV_FROM_ITC(pUStk, p6, r20, r2);
diff --git a/arch/ia64/include/uapi/asm/socket.h b/arch/ia64/include/uapi/asm/socket.h
index 23d6759bb57b..c567adc8bea5 100644
--- a/arch/ia64/include/uapi/asm/socket.h
+++ b/arch/ia64/include/uapi/asm/socket.h
@@ -31,7 +31,7 @@
#define SO_PRIORITY 12
#define SO_LINGER 13
#define SO_BSDCOMPAT 14
-/* To add :#define SO_REUSEPORT 15 */
+#define SO_REUSEPORT 15
#define SO_PASSCRED 16
#define SO_PEERCRED 17
#define SO_RCVLOWAT 18
@@ -79,4 +79,6 @@
/* Instruct lower device to use last 4-bytes of skb data as FCS */
#define SO_NOFCS 43
+#define SO_LOCK_FILTER 44
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c
index a48bd9a9927b..46c9e3007315 100644
--- a/arch/ia64/kernel/asm-offsets.c
+++ b/arch/ia64/kernel/asm-offsets.c
@@ -41,7 +41,7 @@ void foo(void)
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp));
DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave));
DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime));
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index 6bfd8429ee0f..7a53530f22c2 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -724,7 +724,7 @@ GLOBAL_ENTRY(__paravirt_leave_syscall)
#endif
.global __paravirt_work_processed_syscall;
__paravirt_work_processed_syscall:
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
adds r2=PT(LOADRS)+16,r12
MOV_FROM_ITC(pUStk, p9, r22, r19) // fetch time at leave
adds r18=TI_FLAGS+IA64_TASK_SIZE,r13
@@ -762,7 +762,7 @@ __paravirt_work_processed_syscall:
ld8 r29=[r2],16 // M0|1 load cr.ipsr
ld8 r28=[r3],16 // M0|1 load cr.iip
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
(pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13
;;
ld8 r30=[r2],16 // M0|1 load cr.ifs
@@ -793,7 +793,7 @@ __paravirt_work_processed_syscall:
ld8.fill r1=[r3],16 // M0|1 load r1
(pUStk) mov r17=1 // A
;;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
(pUStk) st1 [r15]=r17 // M2|3
#else
(pUStk) st1 [r14]=r17 // M2|3
@@ -813,7 +813,7 @@ __paravirt_work_processed_syscall:
shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
COVER // B add current frame into dirty partition & set cr.ifs
;;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
mov r19=ar.bsp // M2 get new backing store pointer
st8 [r14]=r22 // M save time at leave
mov f10=f0 // F clear f10
@@ -948,7 +948,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
adds r16=PT(CR_IPSR)+16,r12
adds r17=PT(CR_IIP)+16,r12
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
.pred.rel.mutex pUStk,pKStk
MOV_FROM_PSR(pKStk, r22, r29) // M2 read PSR now that interrupts are disabled
MOV_FROM_ITC(pUStk, p9, r22, r29) // M fetch time at leave
@@ -981,7 +981,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
;;
ld8.fill r12=[r16],16
ld8.fill r13=[r17],16
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
(pUStk) adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18
#else
(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18
@@ -989,7 +989,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
;;
ld8 r20=[r16],16 // ar.fpsr
ld8.fill r15=[r17],16
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
(pUStk) adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 // deferred
#endif
;;
@@ -997,7 +997,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)
ld8.fill r2=[r17]
(pUStk) mov r17=1
;;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
// mmi_ : ld8 st1 shr;; mmi_ : st8 st1 shr;;
// mib : mov add br -> mib : ld8 add br
// bbb_ : br nop cover;; mbb_ : mov br cover;;
diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S
index e662f178b990..c4cd45d97749 100644
--- a/arch/ia64/kernel/fsys.S
+++ b/arch/ia64/kernel/fsys.S
@@ -529,7 +529,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
nop.i 0
;;
mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
MOV_FROM_ITC(p0, p6, r30, r23) // M get cycle for accounting
#else
nop.m 0
@@ -555,7 +555,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)
cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1
br.call.sptk.many b7=ia64_syscall_setup // B
;;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
// mov.m r30=ar.itc is called in advance
add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2
add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 4738ff7bd66a..9be4e497f3d3 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1073,7 +1073,7 @@ END(ia64_native_sched_clock)
sched_clock = ia64_native_sched_clock
#endif
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
GLOBAL_ENTRY(cycle_to_cputime)
alloc r16=ar.pfs,1,0,0,0
addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
@@ -1091,7 +1091,7 @@ GLOBAL_ENTRY(cycle_to_cputime)
shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT
br.ret.sptk.many rp
END(cycle_to_cputime)
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#ifdef CONFIG_IA64_BRL_EMU
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index fa25689fc453..689ffcaa284e 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -784,7 +784,7 @@ ENTRY(break_fault)
(p8) adds r28=16,r28 // A switch cr.iip to next bundle
(p9) adds r8=1,r8 // A increment ei to next slot
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
;;
mov b6=r30 // I0 setup syscall handler branch reg early
#else
@@ -801,7 +801,7 @@ ENTRY(break_fault)
//
///////////////////////////////////////////////////////////////////////
st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
MOV_FROM_ITC(p0, p14, r30, r18) // M get cycle for accounting
#else
mov b6=r30 // I0 setup syscall handler branch reg early
@@ -817,7 +817,7 @@ ENTRY(break_fault)
cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
br.call.sptk.many b7=ia64_syscall_setup // B
1:
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
// mov.m r30=ar.itc is called in advance, and r13 is current
add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13 // A
add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13 // A
@@ -1043,7 +1043,7 @@ END(ia64_syscall_setup)
DBG_FAULT(16)
FAULT(16)
-#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE)
+#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE)
/*
* There is no particular reason for this code to be here, other than
* that there happens to be space here that would go unused otherwise.
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 7026b29e277a..f8280a766a78 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -423,7 +423,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
{
struct kretprobe_instance *ri = NULL;
struct hlist_head *head, empty_rp;
- struct hlist_node *node, *tmp;
+ struct hlist_node *tmp;
unsigned long flags, orig_ret_address = 0;
unsigned long trampoline_address =
((struct fnptr *)kretprobe_trampoline)->ip;
@@ -444,7 +444,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* real return address, and all the rest will point to
* kretprobe_trampoline
*/
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
@@ -461,7 +461,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
regs->cr_iip = orig_ret_address;
- hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, head, hlist) {
if (ri->task != current)
/* another task is sharing our hash bucket */
continue;
@@ -487,7 +487,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
kretprobe_hash_unlock(current, &flags);
preempt_enable_no_resched();
- hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) {
+ hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
hlist_del(&ri->hlist);
kfree(ri);
}
diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h
index d56753a11636..cc82a7d744c9 100644
--- a/arch/ia64/kernel/minstate.h
+++ b/arch/ia64/kernel/minstate.h
@@ -4,7 +4,7 @@
#include "entry.h"
#include "paravirt_inst.h"
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
/* read ar.itc in advance, and use it before leaving bank 0 */
#define ACCOUNT_GET_STAMP \
(pUStk) mov.m r20=ar.itc;
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index ea39eba61ef5..2eda28414abb 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -619,6 +619,7 @@ static struct file_system_type pfm_fs_type = {
.mount = pfmfs_mount,
.kill_sb = kill_anon_super,
};
+MODULE_ALIAS_FS("pfmfs");
DEFINE_PER_CPU(unsigned long, pfm_syst_info);
DEFINE_PER_CPU(struct task_struct *, pmu_owner);
@@ -2221,9 +2222,9 @@ pfm_alloc_file(pfm_context_t *ctx)
d_add(path.dentry, inode);
file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
- if (!file) {
+ if (IS_ERR(file)) {
path_put(&path);
- return ERR_PTR(-ENFILE);
+ return file;
}
file->f_flags = O_RDONLY;
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 31360cbbd5f8..6f7dc8b7b35c 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -57,8 +57,6 @@ void (*ia64_mark_idle)(int);
unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
EXPORT_SYMBOL(boot_option_idle_override);
-void (*pm_idle) (void);
-EXPORT_SYMBOL(pm_idle);
void (*pm_power_off) (void);
EXPORT_SYMBOL(pm_power_off);
@@ -293,7 +291,6 @@ cpu_idle (void)
}
if (!need_resched()) {
- void (*idle)(void);
#ifdef CONFIG_SMP
min_xtp();
#endif
@@ -301,10 +298,7 @@ cpu_idle (void)
if (mark_idle)
(*mark_idle)(1);
- idle = pm_idle;
- if (!idle)
- idle = default_idle;
- (*idle)();
+ default_idle();
if (mark_idle)
(*mark_idle)(0);
#ifdef CONFIG_SMP
diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
index 79802e540e53..aa527d7e91f2 100644
--- a/arch/ia64/kernel/salinfo.c
+++ b/arch/ia64/kernel/salinfo.c
@@ -301,7 +301,7 @@ salinfo_event_open(struct inode *inode, struct file *file)
static ssize_t
salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct proc_dir_entry *entry = PDE(inode);
struct salinfo_data *data = entry->data;
char cmd[32];
@@ -463,7 +463,7 @@ retry:
static ssize_t
salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct proc_dir_entry *entry = PDE(inode);
struct salinfo_data *data = entry->data;
u8 *buf;
@@ -524,7 +524,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu)
static ssize_t
salinfo_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct proc_dir_entry *entry = PDE(inode);
struct salinfo_data *data = entry->data;
char cmd[32];
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index aaefd9b94f2f..2029cc0d2fc6 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -1051,7 +1051,6 @@ cpu_init (void)
max_num_phys_stacked = num_phys_stacked;
}
platform_cpu_init();
- pm_idle = default_idle;
}
void __init
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 680b73786be8..3637e03d2282 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -39,14 +39,6 @@
# define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0])
#endif
-asmlinkage long
-sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2,
- long arg3, long arg4, long arg5, long arg6, long arg7,
- struct pt_regs regs)
-{
- return do_sigaltstack(uss, uoss, regs.r12);
-}
-
static long
restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)
{
@@ -208,11 +200,8 @@ ia64_rt_sigreturn (struct sigscratch *scr)
printk("SIG return (%s:%d): sp=%lx ip=%lx\n",
current->comm, current->pid, scr->pt.r12, scr->pt.cr_iip);
#endif
- /*
- * It is more difficult to avoid calling this function than to
- * call it and ignore errors.
- */
- do_sigaltstack(&sc->sc_stack, NULL, scr->pt.r12);
+ if (restore_altstack(&sc->sc_stack))
+ goto give_sigsegv;
return retval;
give_sigsegv:
@@ -376,9 +365,7 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
err |= copy_siginfo_to_user(&frame->info, info);
- err |= __put_user(current->sas_ss_sp, &frame->sc.sc_stack.ss_sp);
- err |= __put_user(current->sas_ss_size, &frame->sc.sc_stack.ss_size);
- err |= __put_user(sas_ss_flags(scr->pt.r12), &frame->sc.sc_stack.ss_flags);
+ err |= __save_altstack(&frame->sc.sc_stack, scr->pt.r12);
err |= setup_sigcontext(&frame->sc, set, scr);
if (unlikely(err))
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index d9439ef2f661..41e33f84c185 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -25,9 +25,9 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
unsigned long pgoff, unsigned long flags)
{
long map_shared = (flags & MAP_SHARED);
- unsigned long start_addr, align_mask = PAGE_SIZE - 1;
+ unsigned long align_mask = 0;
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
+ struct vm_unmapped_area_info info;
if (len > RGN_MAP_LIMIT)
return -ENOMEM;
@@ -44,7 +44,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
addr = 0;
#endif
if (!addr)
- addr = mm->free_area_cache;
+ addr = TASK_UNMAPPED_BASE;
if (map_shared && (TASK_SIZE > 0xfffffffful))
/*
@@ -53,28 +53,15 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
* tasks, we prefer to avoid exhausting the address space too quickly by
* limiting alignment to a single page.
*/
- align_mask = SHMLBA - 1;
-
- full_search:
- start_addr = addr = (addr + align_mask) & ~align_mask;
-
- for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
- /* At this point: (!vma || addr < vma->vm_end). */
- if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
- if (start_addr != TASK_UNMAPPED_BASE) {
- /* Start a new search --- just in case we missed some holes. */
- addr = TASK_UNMAPPED_BASE;
- goto full_search;
- }
- return -ENOMEM;
- }
- if (!vma || addr + len <= vma->vm_start) {
- /* Remember the address where we stopped this search: */
- mm->free_area_cache = addr + len;
- return addr;
- }
- addr = (vma->vm_end + align_mask) & ~align_mask;
- }
+ align_mask = PAGE_MASK & (SHMLBA - 1);
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = addr;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = align_mask;
+ info.align_offset = 0;
+ return vm_unmapped_area(&info);
}
asmlinkage long
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 88a794536bc0..fbaac1afb844 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -77,7 +77,7 @@ static struct clocksource clocksource_itc = {
};
static struct clocksource *itc_clocksource;
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
#include <linux/kernel_stat.h>
@@ -136,13 +136,14 @@ void vtime_account_system(struct task_struct *tsk)
account_system_time(tsk, 0, delta, delta);
}
+EXPORT_SYMBOL_GPL(vtime_account_system);
void vtime_account_idle(struct task_struct *tsk)
{
account_idle_time(vtime_delta(tsk));
}
-#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
+#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static irqreturn_t
timer_interrupt (int irq, void *dev_id)
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index bd42b76000d1..f7f9f9c6caf0 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -72,7 +72,7 @@ die (const char *str, struct pt_regs *regs, long err)
bust_spinlocks(0);
die.lock_owner = -1;
- add_taint(TAINT_DIE);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
spin_unlock_irq(&die.lock);
if (!regs)
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index e7947528aee6..2cd225f8c68d 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -20,7 +20,7 @@ if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on BROKEN
- depends on HAVE_KVM && MODULES && EXPERIMENTAL
+ depends on HAVE_KVM && MODULES
# for device assignment:
depends on PCI
depends on BROKEN
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index bd1c51555038..ad3126a58644 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -955,7 +955,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
kvm_mem.guest_phys_addr;
kvm_userspace_mem.memory_size = kvm_mem.memory_size;
r = kvm_vm_ioctl_set_memory_region(kvm,
- &kvm_userspace_mem, 0);
+ &kvm_userspace_mem, false);
if (r)
goto out;
break;
@@ -1580,7 +1580,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
struct kvm_memory_slot *memslot,
struct kvm_memory_slot old,
struct kvm_userspace_memory_region *mem,
- int user_alloc)
+ bool user_alloc)
{
unsigned long i;
unsigned long pfn;
@@ -1611,7 +1611,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
void kvm_arch_commit_memory_region(struct kvm *kvm,
struct kvm_userspace_memory_region *mem,
struct kvm_memory_slot old,
- int user_alloc)
+ bool user_alloc)
{
return;
}
@@ -1834,7 +1834,7 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
mutex_lock(&kvm->slots_lock);
r = -EINVAL;
- if (log->slot >= KVM_MEMORY_SLOTS)
+ if (log->slot >= KVM_USER_MEM_SLOTS)
goto out;
memslot = id_to_memslot(kvm->memslots, log->slot);
diff --git a/arch/ia64/kvm/lapic.h b/arch/ia64/kvm/lapic.h
index c5f92a926a9a..c3e2935b6db4 100644
--- a/arch/ia64/kvm/lapic.h
+++ b/arch/ia64/kvm/lapic.h
@@ -27,4 +27,10 @@ int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq);
#define kvm_apic_present(x) (true)
#define kvm_lapic_enabled(x) (true)
+static inline bool kvm_apic_vid_enabled(void)
+{
+ /* IA64 has no apicv supporting, do nothing here */
+ return false;
+}
+
#endif
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 1516d1dc11fd..80dab509dfb0 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -93,7 +93,7 @@ void show_mem(unsigned int filter)
printk(KERN_INFO "%d pages swap cached\n", total_cached);
printk(KERN_INFO "Total of %ld pages in page table cache\n",
quicklist_total_size());
- printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
+ printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages());
}
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index c641333cd997..c2e955ee79a8 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -666,7 +666,7 @@ void show_mem(unsigned int filter)
printk(KERN_INFO "%d pages swap cached\n", total_cached);
printk(KERN_INFO "Total of %ld pages in page table cache\n",
quicklist_total_size());
- printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
+ printk(KERN_INFO "%ld free buffer pages\n", nr_free_buffer_pages());
}
/**
@@ -822,4 +822,8 @@ int __meminit vmemmap_populate(struct page *start_page,
{
return vmemmap_populate_basepages(start_page, size, node);
}
+
+void vmemmap_free(struct page *memmap, unsigned long nr_pages)
+{
+}
#endif
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index 5ca674b74737..76069c18ee42 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -148,7 +148,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags)
{
- struct vm_area_struct *vmm;
+ struct vm_unmapped_area_info info;
if (len > RGN_MAP_LIMIT)
return -ENOMEM;
@@ -165,16 +165,14 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
/* This code assumes that RGN_HPAGE != 0. */
if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
addr = HPAGE_REGION_BASE;
- else
- addr = ALIGN(addr, HPAGE_SIZE);
- for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
- /* At this point: (!vmm || addr < vmm->vm_end). */
- if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
- return -ENOMEM;
- if (!vmm || (addr + len) <= vmm->vm_start)
- return addr;
- addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
- }
+
+ info.flags = 0;
+ info.length = len;
+ info.low_limit = addr;
+ info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
+ info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
+ info.align_offset = 0;
+ return vm_unmapped_area(&info);
}
static int __init hugetlb_setup_sz(char *str)
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index b755ea92aea7..20bc967c7209 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -688,6 +688,24 @@ int arch_add_memory(int nid, u64 start, u64 size)
return ret;
}
+
+#ifdef CONFIG_MEMORY_HOTREMOVE
+int arch_remove_memory(u64 start, u64 size)
+{
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ struct zone *zone;
+ int ret;
+
+ zone = page_zone(pfn_to_page(start_pfn));
+ ret = __remove_pages(zone, start_pfn, nr_pages);
+ if (ret)
+ pr_warn("%s: Problem encountered in __remove_pages() as"
+ " ret=%d\n", __func__, ret);
+
+ return ret;
+}
+#endif
#endif
/*
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 55b72ad57329..60532ab27346 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -393,6 +393,14 @@ out1:
return NULL;
}
+int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
+{
+ struct pci_controller *controller = bridge->bus->sysdata;
+
+ ACPI_HANDLE_SET(&bridge->dev, controller->acpi_handle);
+ return 0;
+}
+
static int is_valid_resource(struct pci_dev *dev, int idx)
{
unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
diff --git a/arch/ia64/xen/Kconfig b/arch/ia64/xen/Kconfig
index 515e0826803a..5d8a06b0ddf7 100644
--- a/arch/ia64/xen/Kconfig
+++ b/arch/ia64/xen/Kconfig
@@ -5,7 +5,7 @@
config XEN
bool "Xen hypervisor support"
default y
- depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB && EXPERIMENTAL
+ depends on PARAVIRT && MCKINLEY && IA64_PAGE_SIZE_16KB
select XEN_XENCOMM
select NO_IDLE_HZ
# followings are required to save/restore.