diff options
Diffstat (limited to 'arch/ia64')
28 files changed, 268 insertions, 385 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 011a1cdf0eb5..1ee596cd942f 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -60,9 +60,7 @@ config IOMMU_HELPER bool config GENERIC_LOCKBREAK - bool - default y - depends on SMP && PREEMPT + def_bool n config RWSEM_XCHGADD_ALGORITHM bool @@ -500,6 +498,10 @@ config HAVE_ARCH_NODEDATA_EXTENSION def_bool y depends on NUMA +config ARCH_PROC_KCORE_TEXT + def_bool y + depends on PROC_KCORE + config IA32_SUPPORT bool "Support for Linux/x86 binaries" help diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c index 8cfb001092ab..674a8374c6d9 100644 --- a/arch/ia64/hp/common/sba_iommu.c +++ b/arch/ia64/hp/common/sba_iommu.c @@ -2026,24 +2026,21 @@ acpi_sba_ioc_add(struct acpi_device *device) struct ioc *ioc; acpi_status status; u64 hpa, length; - struct acpi_buffer buffer; struct acpi_device_info *dev_info; status = hp_acpi_csr_space(device->handle, &hpa, &length); if (ACPI_FAILURE(status)) return 1; - buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER; - status = acpi_get_object_info(device->handle, &buffer); + status = acpi_get_object_info(device->handle, &dev_info); if (ACPI_FAILURE(status)) return 1; - dev_info = buffer.pointer; /* * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI * root bridges, and its CSR space includes the IOC function. */ - if (strncmp("HWP0001", dev_info->hardware_id.value, 7) == 0) { + if (strncmp("HWP0001", dev_info->hardware_id.string, 7) == 0) { hpa += ZX1_IOC_OFFSET; /* zx1 based systems default to kernel page size iommu pages */ if (!iovp_shift) diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c index f92bdaac8976..c69552bf893e 100644 --- a/arch/ia64/ia32/binfmt_elf32.c +++ b/arch/ia64/ia32/binfmt_elf32.c @@ -69,11 +69,11 @@ ia32_install_gate_page (struct vm_area_struct *vma, struct vm_fault *vmf) } -static struct vm_operations_struct ia32_shared_page_vm_ops = { +static const struct vm_operations_struct ia32_shared_page_vm_ops = { .fault = ia32_install_shared_page }; -static struct vm_operations_struct ia32_gate_page_vm_ops = { +static const struct vm_operations_struct ia32_gate_page_vm_ops = { .fault = ia32_install_gate_page }; diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 16ef61a91d95..625ed8f76fce 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -1270,7 +1270,7 @@ putreg (struct task_struct *child, int regno, unsigned int value) case PT_CS: if (value != __USER_CS) printk(KERN_ERR - "ia32.putreg: attempt to to set invalid segment register %d = %x\n", + "ia32.putreg: attempt to set invalid segment register %d = %x\n", regno, value); break; default: diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h index 0f82cc2934e1..91df9686a0da 100644 --- a/arch/ia64/include/asm/acpi.h +++ b/arch/ia64/include/asm/acpi.h @@ -89,10 +89,12 @@ ia64_acpi_release_global_lock (unsigned int *lock) #define ACPI_RELEASE_GLOBAL_LOCK(facs, Acq) \ ((Acq) = ia64_acpi_release_global_lock(&facs->global_lock)) +#ifdef CONFIG_ACPI #define acpi_disabled 0 /* ACPI always enabled on IA64 */ #define acpi_noirq 0 /* ACPI always enabled on IA64 */ #define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ #define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ +#endif #define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ static inline void disable_acpi(void) { } diff --git a/arch/ia64/include/asm/cputime.h b/arch/ia64/include/asm/cputime.h index d20b998cb91d..7fa8a8594660 100644 --- a/arch/ia64/include/asm/cputime.h +++ b/arch/ia64/include/asm/cputime.h @@ -30,6 +30,7 @@ typedef u64 cputime_t; typedef u64 cputime64_t; #define cputime_zero ((cputime_t)0) +#define cputime_one_jiffy jiffies_to_cputime(1) #define cputime_max ((~((cputime_t)0) >> 1) - 1) #define cputime_add(__a, __b) ((__a) + (__b)) #define cputime_sub(__a, __b) ((__a) - (__b)) diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h index 44a0b53df900..c171cdf0a789 100644 --- a/arch/ia64/include/asm/mca.h +++ b/arch/ia64/include/asm/mca.h @@ -145,12 +145,14 @@ extern void ia64_mca_ucmc_handler(struct pt_regs *, struct ia64_sal_os_state *); extern void ia64_init_handler(struct pt_regs *, struct switch_stack *, struct ia64_sal_os_state *); +extern void ia64_os_init_on_kdump(void); extern void ia64_monarch_init_handler(void); extern void ia64_slave_init_handler(void); extern void ia64_mca_cmc_vector_setup(void); extern int ia64_reg_MCA_extension(int (*fn)(void *, struct ia64_sal_os_state *)); extern void ia64_unreg_MCA_extension(void); extern unsigned long ia64_get_rnat(unsigned long *); +extern void ia64_set_psr_mc(void); extern void ia64_mca_printk(const char * fmt, ...) __attribute__ ((format (printf, 1, 2))); diff --git a/arch/ia64/include/asm/mman.h b/arch/ia64/include/asm/mman.h index 48cf8b98a0b4..4459028e5aa8 100644 --- a/arch/ia64/include/asm/mman.h +++ b/arch/ia64/include/asm/mman.h @@ -8,19 +8,9 @@ * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co */ -#include <asm-generic/mman-common.h> +#include <asm-generic/mman.h> -#define MAP_GROWSDOWN 0x00100 /* stack-like segment */ -#define MAP_GROWSUP 0x00200 /* register stack-like segment */ -#define MAP_DENYWRITE 0x00800 /* ETXTBSY */ -#define MAP_EXECUTABLE 0x01000 /* mark it as an executable */ -#define MAP_LOCKED 0x02000 /* pages are locked */ -#define MAP_NORESERVE 0x04000 /* don't check for reservations */ -#define MAP_POPULATE 0x08000 /* populate (prefault) pagetables */ -#define MAP_NONBLOCK 0x10000 /* do not block on IO */ - -#define MCL_CURRENT 1 /* lock all current mappings */ -#define MCL_FUTURE 2 /* lock all future mappings */ +#define MAP_GROWSUP 0x0200 /* register stack-like segment */ #ifdef __KERNEL__ #ifndef __ASSEMBLY__ diff --git a/arch/ia64/include/asm/smp.h b/arch/ia64/include/asm/smp.h index d217d1d4e051..0b3b3997decd 100644 --- a/arch/ia64/include/asm/smp.h +++ b/arch/ia64/include/asm/smp.h @@ -127,7 +127,6 @@ extern int is_multithreading_enabled(void); extern void arch_send_call_function_single_ipi(int cpu); extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); -#define arch_send_call_function_ipi_mask arch_send_call_function_ipi_mask #else /* CONFIG_SMP */ diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h index 13ab71576bc7..30bb930e1111 100644 --- a/arch/ia64/include/asm/spinlock.h +++ b/arch/ia64/include/asm/spinlock.h @@ -19,103 +19,106 @@ #define __raw_spin_lock_init(x) ((x)->lock = 0) -#ifdef ASM_SUPPORTED /* - * Try to get the lock. If we fail to get the lock, make a non-standard call to - * ia64_spinlock_contention(). We do not use a normal call because that would force all - * callers of __raw_spin_lock() to be non-leaf routines. Instead, ia64_spinlock_contention() is - * carefully coded to touch only those registers that __raw_spin_lock() marks "clobbered". + * Ticket locks are conceptually two parts, one indicating the current head of + * the queue, and the other indicating the current tail. The lock is acquired + * by atomically noting the tail and incrementing it by one (thus adding + * ourself to the queue and noting our position), then waiting until the head + * becomes equal to the the initial value of the tail. + * + * 63 32 31 0 + * +----------------------------------------------------+ + * | next_ticket_number | now_serving | + * +----------------------------------------------------+ */ -#define IA64_SPINLOCK_CLOBBERS "ar.ccv", "ar.pfs", "p14", "p15", "r27", "r28", "r29", "r30", "b6", "memory" +#define TICKET_SHIFT 32 -static inline void -__raw_spin_lock_flags (raw_spinlock_t *lock, unsigned long flags) +static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) { - register volatile unsigned int *ptr asm ("r31") = &lock->lock; - -#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3) -# ifdef CONFIG_ITANIUM - /* don't use brl on Itanium... */ - asm volatile ("{\n\t" - " mov ar.ccv = r0\n\t" - " mov r28 = ip\n\t" - " mov r30 = 1;;\n\t" - "}\n\t" - "cmpxchg4.acq r30 = [%1], r30, ar.ccv\n\t" - "movl r29 = ia64_spinlock_contention_pre3_4;;\n\t" - "cmp4.ne p14, p0 = r30, r0\n\t" - "mov b6 = r29;;\n\t" - "mov r27=%2\n\t" - "(p14) br.cond.spnt.many b6" - : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); -# else - asm volatile ("{\n\t" - " mov ar.ccv = r0\n\t" - " mov r28 = ip\n\t" - " mov r30 = 1;;\n\t" - "}\n\t" - "cmpxchg4.acq r30 = [%1], r30, ar.ccv;;\n\t" - "cmp4.ne p14, p0 = r30, r0\n\t" - "mov r27=%2\n\t" - "(p14) brl.cond.spnt.many ia64_spinlock_contention_pre3_4;;" - : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); -# endif /* CONFIG_MCKINLEY */ -#else -# ifdef CONFIG_ITANIUM - /* don't use brl on Itanium... */ - /* mis-declare, so we get the entry-point, not it's function descriptor: */ - asm volatile ("mov r30 = 1\n\t" - "mov r27=%2\n\t" - "mov ar.ccv = r0;;\n\t" - "cmpxchg4.acq r30 = [%0], r30, ar.ccv\n\t" - "movl r29 = ia64_spinlock_contention;;\n\t" - "cmp4.ne p14, p0 = r30, r0\n\t" - "mov b6 = r29;;\n\t" - "(p14) br.call.spnt.many b6 = b6" - : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); -# else - asm volatile ("mov r30 = 1\n\t" - "mov r27=%2\n\t" - "mov ar.ccv = r0;;\n\t" - "cmpxchg4.acq r30 = [%0], r30, ar.ccv;;\n\t" - "cmp4.ne p14, p0 = r30, r0\n\t" - "(p14) brl.call.spnt.many b6=ia64_spinlock_contention;;" - : "=r"(ptr) : "r"(ptr), "r" (flags) : IA64_SPINLOCK_CLOBBERS); -# endif /* CONFIG_MCKINLEY */ -#endif + int *p = (int *)&lock->lock, turn, now_serving; + + now_serving = *p; + turn = ia64_fetchadd(1, p+1, acq); + + if (turn == now_serving) + return; + + do { + cpu_relax(); + } while (ACCESS_ONCE(*p) != turn); } -#define __raw_spin_lock(lock) __raw_spin_lock_flags(lock, 0) +static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) +{ + long tmp = ACCESS_ONCE(lock->lock), try; -/* Unlock by doing an ordered store and releasing the cacheline with nta */ -static inline void __raw_spin_unlock(raw_spinlock_t *x) { - barrier(); - asm volatile ("st4.rel.nta [%0] = r0\n\t" :: "r"(x)); + if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) { + try = tmp + (1L << TICKET_SHIFT); + + return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp; + } + return 0; } -#else /* !ASM_SUPPORTED */ -#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) -# define __raw_spin_lock(x) \ -do { \ - __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ - __u64 ia64_spinlock_val; \ - ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \ - if (unlikely(ia64_spinlock_val)) { \ - do { \ - while (*ia64_spinlock_ptr) \ - ia64_barrier(); \ - ia64_spinlock_val = ia64_cmpxchg4_acq(ia64_spinlock_ptr, 1, 0); \ - } while (ia64_spinlock_val); \ - } \ -} while (0) -#define __raw_spin_unlock(x) do { barrier(); ((raw_spinlock_t *) x)->lock = 0; } while (0) -#endif /* !ASM_SUPPORTED */ +static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) +{ + int *p = (int *)&lock->lock; + + (void)ia64_fetchadd(1, p, rel); +} + +static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) +{ + long tmp = ACCESS_ONCE(lock->lock); + + return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1)); +} + +static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) +{ + long tmp = ACCESS_ONCE(lock->lock); -#define __raw_spin_is_locked(x) ((x)->lock != 0) -#define __raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) -#define __raw_spin_unlock_wait(lock) \ - do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) + return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1; +} + +static inline int __raw_spin_is_locked(raw_spinlock_t *lock) +{ + return __ticket_spin_is_locked(lock); +} + +static inline int __raw_spin_is_contended(raw_spinlock_t *lock) +{ + return __ticket_spin_is_contended(lock); +} +#define __raw_spin_is_contended __raw_spin_is_contended + +static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) +{ + __ticket_spin_lock(lock); +} + +static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) +{ + return __ticket_spin_trylock(lock); +} + +static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) +{ + __ticket_spin_unlock(lock); +} + +static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, + unsigned long flags) +{ + __raw_spin_lock(lock); +} + +static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) +{ + while (__raw_spin_is_locked(lock)) + cpu_relax(); +} #define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) #define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h index 474e46f1ab4a..b61d136d9bc2 100644 --- a/arch/ia64/include/asm/spinlock_types.h +++ b/arch/ia64/include/asm/spinlock_types.h @@ -6,7 +6,7 @@ #endif typedef struct { - volatile unsigned int lock; + volatile unsigned long lock; } raw_spinlock_t; #define __RAW_SPIN_LOCK_UNLOCKED { 0 } diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h index d0141fbf51d0..3ddb4e709dba 100644 --- a/arch/ia64/include/asm/topology.h +++ b/arch/ia64/include/asm/topology.h @@ -33,7 +33,6 @@ /* * Returns a bitmask of CPUs on Node 'node'. */ -#define node_to_cpumask(node) (node_to_cpu_mask[node]) #define cpumask_of_node(node) (&node_to_cpu_mask[node]) /* @@ -104,8 +103,6 @@ void build_cpu_to_node_map(void); #ifdef CONFIG_SMP #define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id) #define topology_core_id(cpu) (cpu_data(cpu)->core_id) -#define topology_core_siblings(cpu) (cpu_core_map[cpu]) -#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu)) #define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) #define topology_thread_cpumask(cpu) (&per_cpu(cpu_sibling_map, cpu)) #define smt_capable() (smp_num_siblings > 1) diff --git a/arch/ia64/install.sh b/arch/ia64/install.sh index 929e780026d1..0e932f5dcd1a 100644 --- a/arch/ia64/install.sh +++ b/arch/ia64/install.sh @@ -21,8 +21,8 @@ # User may have a custom install script -if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi -if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi +if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi +if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi # Default install - same as make zlilo diff --git a/arch/ia64/kernel/Makefile.gate b/arch/ia64/kernel/Makefile.gate index 1d87f84069b3..ab9b03a9adcc 100644 --- a/arch/ia64/kernel/Makefile.gate +++ b/arch/ia64/kernel/Makefile.gate @@ -10,7 +10,7 @@ quiet_cmd_gate = GATE $@ cmd_gate = $(CC) -nostdlib $(GATECFLAGS_$(@F)) -Wl,-T,$(filter-out FORCE,$^) -o $@ GATECFLAGS_gate.so = -shared -s -Wl,-soname=linux-gate.so.1 \ - $(call ld-option, -Wl$(comma)--hash-style=sysv) + $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) $(obj)/gate.so: $(obj)/gate.lds $(obj)/gate.o FORCE $(call if_changed,gate) diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c index f065093f8e9b..6631a9dfafdc 100644 --- a/arch/ia64/kernel/crash.c +++ b/arch/ia64/kernel/crash.c @@ -23,6 +23,7 @@ int kdump_status[NR_CPUS]; static atomic_t kdump_cpu_frozen; atomic_t kdump_in_progress; +static int kdump_freeze_monarch; static int kdump_on_init = 1; static int kdump_on_fatal_mca = 1; @@ -108,10 +109,38 @@ machine_crash_shutdown(struct pt_regs *pt) */ kexec_disable_iosapic(); #ifdef CONFIG_SMP + /* + * If kdump_on_init is set and an INIT is asserted here, kdump will + * be started again via INIT monarch. + */ + local_irq_disable(); + ia64_set_psr_mc(); /* mask MCA/INIT */ + if (atomic_inc_return(&kdump_in_progress) != 1) + unw_init_running(kdump_cpu_freeze, NULL); + + /* + * Now this cpu is ready for kdump. + * Stop all others by IPI or INIT. They could receive INIT from + * outside and might be INIT monarch, but only thing they have to + * do is falling into kdump_cpu_freeze(). + * + * If an INIT is asserted here: + * - All receivers might be slaves, since some of cpus could already + * be frozen and INIT might be masked on monarch. In this case, + * all slaves will be frozen soon since kdump_in_progress will let + * them into DIE_INIT_SLAVE_LEAVE. + * - One might be a monarch, but INIT rendezvous will fail since + * at least this cpu already have INIT masked so it never join + * to the rendezvous. In this case, all slaves and monarch will + * be frozen soon with no wait since the INIT rendezvous is skipped + * by kdump_in_progress. + */ kdump_smp_send_stop(); /* not all cpu response to IPI, send INIT to freeze them */ - if (kdump_wait_cpu_freeze() && kdump_on_init) { + if (kdump_wait_cpu_freeze()) { kdump_smp_send_init(); + /* wait again, don't go ahead if possible */ + kdump_wait_cpu_freeze(); } #endif } @@ -129,17 +158,17 @@ void kdump_cpu_freeze(struct unw_frame_info *info, void *arg) { int cpuid; + local_irq_disable(); cpuid = smp_processor_id(); crash_save_this_cpu(); current->thread.ksp = (__u64)info->sw - 16; + + ia64_set_psr_mc(); /* mask MCA/INIT and stop reentrance */ + atomic_inc(&kdump_cpu_frozen); kdump_status[cpuid] = 1; mb(); -#ifdef CONFIG_HOTPLUG_CPU - if (cpuid != 0) - ia64_jump_to_sal(&sal_boot_rendez_state[cpuid]); -#endif for (;;) cpu_relax(); } @@ -150,6 +179,20 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) struct ia64_mca_notify_die *nd; struct die_args *args = data; + if (atomic_read(&kdump_in_progress)) { + switch (val) { + case DIE_INIT_MONARCH_LEAVE: + if (!kdump_freeze_monarch) + break; + /* fall through */ + case DIE_INIT_SLAVE_LEAVE: + case DIE_INIT_MONARCH_ENTER: + case DIE_MCA_RENDZVOUS_LEAVE: + unw_init_running(kdump_cpu_freeze, NULL); + break; + } + } + if (!kdump_on_init && !kdump_on_fatal_mca) return NOTIFY_DONE; @@ -162,43 +205,31 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data) } if (val != DIE_INIT_MONARCH_LEAVE && - val != DIE_INIT_SLAVE_LEAVE && val != DIE_INIT_MONARCH_PROCESS && - val != DIE_MCA_RENDZVOUS_LEAVE && val != DIE_MCA_MONARCH_LEAVE) return NOTIFY_DONE; nd = (struct ia64_mca_notify_die *)args->err; - /* Reason code 1 means machine check rendezvous*/ - if ((val == DIE_INIT_MONARCH_LEAVE || val == DIE_INIT_SLAVE_LEAVE - || val == DIE_INIT_MONARCH_PROCESS) && nd->sos->rv_rc == 1) - return NOTIFY_DONE; switch (val) { case DIE_INIT_MONARCH_PROCESS: - if (kdump_on_init) { - atomic_set(&kdump_in_progress, 1); - *(nd->monarch_cpu) = -1; + /* Reason code 1 means machine check rendezvous*/ + if (kdump_on_init && (nd->sos->rv_rc != 1)) { + if (atomic_inc_return(&kdump_in_progress) != 1) + kdump_freeze_monarch = 1; } break; case DIE_INIT_MONARCH_LEAVE: - if (kdump_on_init) + /* Reason code 1 means machine check rendezvous*/ + if (kdump_on_init && (nd->sos->rv_rc != 1)) machine_kdump_on_init(); break; - case DIE_INIT_SLAVE_LEAVE: - if (atomic_read(&kdump_in_progress)) - unw_init_running(kdump_cpu_freeze, NULL); - break; - case DIE_MCA_RENDZVOUS_LEAVE: - if (atomic_read(&kdump_in_progress)) - unw_init_running(kdump_cpu_freeze, NULL); - break; case DIE_MCA_MONARCH_LEAVE: /* *(nd->data) indicate if MCA is recoverable */ if (kdump_on_fatal_mca && !(*(nd->data))) { - atomic_set(&kdump_in_progress, 1); - *(nd->monarch_cpu) = -1; - machine_kdump_on_init(); + if (atomic_inc_return(&kdump_in_progress) == 1) + machine_kdump_on_init(); + /* We got fatal MCA while kdump!? No way!! */ } break; } diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 23f846de62d5..696eff28a0c4 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -167,7 +167,7 @@ RestRR: \ mov _tmp2=((ia64_rid(IA64_REGION_ID_KERNEL, (num<<61)) << 8) | (pgsize << 2) | vhpt);; \ mov rr[_tmp1]=_tmp2 - .section __special_page_section,"ax" + __PAGE_ALIGNED_DATA .global empty_zero_page empty_zero_page: @@ -181,7 +181,7 @@ swapper_pg_dir: halt_msg: stringz "Halting kernel\n" - .section .text.head,"ax" + __REF .global start_ap @@ -1130,95 +1130,6 @@ SET_REG(b5); #endif /* CONFIG_IA64_BRL_EMU */ #ifdef CONFIG_SMP - /* - * This routine handles spinlock contention. It uses a non-standard calling - * convention to avoid converting leaf routines into interior routines. Because - * of this special convention, there are several restrictions: - * - * - do not use gp relative variables, this code is called from the kernel - * and from modules, r1 is undefined. - * - do not use stacked registers, the caller owns them. - * - do not use the scratch stack space, the caller owns it. - * - do not use any registers other than the ones listed below - * - * Inputs: - * ar.pfs - saved CFM of caller - * ar.ccv - 0 (and available for use) - * r27 - flags from spin_lock_irqsave or 0. Must be preserved. - * r28 - available for use. - * r29 - available for use. - * r30 - available for use. - * r31 - address of lock, available for use. - * b6 - return address - * p14 - available for use. - * p15 - used to track flag status. - * - * If you patch this code to use more registers, do not forget to update - * the clobber lists for spin_lock() in arch/ia64/include/asm/spinlock.h. - */ - -#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3) - -GLOBAL_ENTRY(ia64_spinlock_contention_pre3_4) - .prologue - .save ar.pfs, r0 // this code effectively has a zero frame size - .save rp, r28 - .body - nop 0 - tbit.nz p15,p0=r27,IA64_PSR_I_BIT - .restore sp // pop existing prologue after next insn - mov b6 = r28 - .prologue - .save ar.pfs, r0 - .altrp b6 - .body - ;; -(p15) ssm psr.i // reenable interrupts if they were on - // DavidM says that srlz.d is slow and is not required in this case -.wait: - // exponential backoff, kdb, lockmeter etc. go in here - hint @pause - ld4 r30=[r31] // don't use ld4.bias; if it's contended, we won't write the word - nop 0 - ;; - cmp4.ne p14,p0=r30,r0 -(p14) br.cond.sptk.few .wait -(p15) rsm psr.i // disable interrupts if we reenabled them - br.cond.sptk.few b6 // lock is now free, try to acquire - .global ia64_spinlock_contention_pre3_4_end // for kernprof -ia64_spinlock_contention_pre3_4_end: -END(ia64_spinlock_contention_pre3_4) - -#else - -GLOBAL_ENTRY(ia64_spinlock_contention) - .prologue - .altrp b6 - .body - tbit.nz p15,p0=r27,IA64_PSR_I_BIT - ;; -.wait: -(p15) ssm psr.i // reenable interrupts if they were on - // DavidM says that srlz.d is slow and is not required in this case -.wait2: - // exponential backoff, kdb, lockmeter etc. go in here - hint @pause - ld4 r30=[r31] // don't use ld4.bias; if it's contended, we won't write the word - ;; - cmp4.ne p14,p0=r30,r0 - mov r30 = 1 -(p14) br.cond.sptk.few .wait2 -(p15) rsm psr.i // disable interrupts if we reenabled them - ;; - cmpxchg4.acq r30=[r31], r30, ar.ccv - ;; - cmp4.ne p14,p0=r0,r30 -(p14) br.cond.sptk.few .wait - - br.ret.sptk.many b6 // lock is now taken -END(ia64_spinlock_contention) - -#endif #ifdef CONFIG_HOTPLUG_CPU GLOBAL_ENTRY(ia64_jump_to_sal) @@ -1242,7 +1153,7 @@ GLOBAL_ENTRY(ia64_jump_to_sal) movl r16=SAL_PSR_BITS_TO_SET;; mov cr.ipsr=r16 mov cr.ifs=r0;; - rfi;; + rfi;; // note: this unmask MCA/INIT (psr.mc) 1: /* * Invalidate all TLB data/inst diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c index 8ebccb589e1c..14d39e300627 100644 --- a/arch/ia64/kernel/ia64_ksyms.c +++ b/arch/ia64/kernel/ia64_ksyms.c @@ -84,26 +84,6 @@ EXPORT_SYMBOL(ia64_save_scratch_fpregs); #include <asm/unwind.h> EXPORT_SYMBOL(unw_init_running); -#ifdef ASM_SUPPORTED -# ifdef CONFIG_SMP -# if (__GNUC__ == 3 && __GNUC_MINOR__ < 3) -/* - * This is not a normal routine and we don't want a function descriptor for it, so we use - * a fake declaration here. - */ -extern char ia64_spinlock_contention_pre3_4; -EXPORT_SYMBOL(ia64_spinlock_contention_pre3_4); -# else -/* - * This is not a normal routine and we don't want a function descriptor for it, so we use - * a fake declaration here. - */ -extern char ia64_spinlock_contention; -EXPORT_SYMBOL(ia64_spinlock_contention); -# endif -# endif -#endif - #if defined(CONFIG_IA64_ESI) || defined(CONFIG_IA64_ESI_MODULE) extern void esi_call_phys (void); EXPORT_SYMBOL_GPL(esi_call_phys); diff --git a/arch/ia64/kernel/init_task.c b/arch/ia64/kernel/init_task.c index c475fc281be7..e253ab8fcbc8 100644 --- a/arch/ia64/kernel/init_task.c +++ b/arch/ia64/kernel/init_task.c @@ -33,7 +33,8 @@ union { struct thread_info thread_info; } s; unsigned long stack[KERNEL_STACK_SIZE/sizeof (unsigned long)]; -} init_task_mem asm ("init_task") __attribute__((section(".data.init_task"))) = {{ +} init_task_mem asm ("init_task") __init_task_data = + {{ .task = INIT_TASK(init_task_mem.s.task), .thread_info = INIT_THREAD_INFO(init_task_mem.s.task) }}; diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c index 0823de1f6ebe..3d3aeef46947 100644 --- a/arch/ia64/kernel/machine_kexec.c +++ b/arch/ia64/kernel/machine_kexec.c @@ -24,6 +24,8 @@ #include <asm/delay.h> #include <asm/meminit.h> #include <asm/processor.h> +#include <asm/sal.h> +#include <asm/mca.h> typedef NORET_TYPE void (*relocate_new_kernel_t)( unsigned long indirection_page, @@ -85,13 +87,26 @@ static void ia64_machine_kexec(struct unw_frame_info *info, void *arg) void *pal_addr = efi_get_pal_addr(); unsigned long code_addr = (unsigned long)page_address(image->control_code_page); int ii; + u64 fp, gp; + ia64_fptr_t *init_handler = (ia64_fptr_t *)ia64_os_init_on_kdump; BUG_ON(!image); if (image->type == KEXEC_TYPE_CRASH) { crash_save_this_cpu(); current->thread.ksp = (__u64)info->sw - 16; + + /* Register noop init handler */ + fp = ia64_tpa(init_handler->fp); + gp = ia64_tpa(ia64_getreg(_IA64_REG_GP)); + ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, fp, gp, 0, fp, gp, 0); + } else { + /* Unregister init handlers of current kernel */ + ia64_sal_set_vectors(SAL_VECTOR_OS_INIT, 0, 0, 0, 0, 0, 0); } + /* Unregister mca handler - No more recovery on current kernel */ + ia64_sal_set_vectors(SAL_VECTOR_OS_MCA, 0, 0, 0, 0, 0, 0); + /* Interrupts aren't acceptable while we reboot */ local_irq_disable(); diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 7b30d21c5190..d2877a7bfe2e 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -1682,14 +1682,25 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw, if (!sos->monarch) { ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT; + +#ifdef CONFIG_KEXEC + while (monarch_cpu == -1 && !atomic_read(&kdump_in_progress)) + udelay(1000); +#else while (monarch_cpu == -1) - cpu_relax(); /* spin until monarch enters */ + cpu_relax(); /* spin until monarch enters */ +#endif NOTIFY_INIT(DIE_INIT_SLAVE_ENTER, regs, (long)&nd, 1); NOTIFY_INIT(DIE_INIT_SLAVE_PROCESS, regs, (long)&nd, 1); +#ifdef CONFIG_KEXEC + while (monarch_cpu != -1 && !atomic_read(&kdump_in_progress)) + udelay(1000); +#else while (monarch_cpu != -1) - cpu_relax(); /* spin until monarch leaves */ + cpu_relax(); /* spin until monarch leaves */ +#endif NOTIFY_INIT(DIE_INIT_SLAVE_LEAVE, regs, (long)&nd, 1); diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S index a06d46548ff9..7461d2573d41 100644 --- a/arch/ia64/kernel/mca_asm.S +++ b/arch/ia64/kernel/mca_asm.S @@ -40,6 +40,7 @@ .global ia64_do_tlb_purge .global ia64_os_mca_dispatch + .global ia64_os_init_on_kdump .global ia64_os_init_dispatch_monarch .global ia64_os_init_dispatch_slave @@ -299,6 +300,25 @@ END(ia64_os_mca_virtual_begin) //StartMain//////////////////////////////////////////////////////////////////// // +// NOP init handler for kdump. In panic situation, we may receive INIT +// while kernel transition. Since we initialize registers on leave from +// current kernel, no longer monarch/slave handlers of current kernel in +// virtual mode are called safely. +// We can unregister these init handlers from SAL, however then the INIT +// will result in warmboot by SAL and we cannot retrieve the crashdump. +// Therefore register this NOP function to SAL, to prevent entering virtual +// mode and resulting warmboot by SAL. +// +ia64_os_init_on_kdump: + mov r8=r0 // IA64_INIT_RESUME + mov r9=r10 // SAL_GP + mov r22=r17 // *minstate + ;; + mov r10=r0 // return to same context + mov b0=r12 // SAL_CHECK return address + br b0 + +// // SAL to OS entry point for INIT on all processors. This has been defined for // registration purposes with SAL as a part of ia64_mca_init. Monarch and // slave INIT have identical processing, except for the value of the @@ -1073,3 +1093,30 @@ GLOBAL_ENTRY(ia64_get_rnat) mov ar.rsc=3 br.ret.sptk.many rp END(ia64_get_rnat) + + +// void ia64_set_psr_mc(void) +// +// Set psr.mc bit to mask MCA/INIT. +GLOBAL_ENTRY(ia64_set_psr_mc) + rsm psr.i | psr.ic // disable interrupts + ;; + srlz.d + ;; + mov r14 = psr // get psr{36:35,31:0} + movl r15 = 1f + ;; + dep r14 = -1, r14, PSR_MC, 1 // set psr.mc + ;; + dep r14 = -1, r14, PSR_IC, 1 // set psr.ic + ;; + dep r14 = -1, r14, PSR_BN, 1 // keep bank1 in use + ;; + mov cr.ipsr = r14 + mov cr.ifs = r0 + mov cr.iip = r15 + ;; + rfi +1: + br.ret.sptk.many rp +END(ia64_set_psr_mc) diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c index 223abb134105..285aae8431c6 100644 --- a/arch/ia64/kernel/pci-swiotlb.c +++ b/arch/ia64/kernel/pci-swiotlb.c @@ -46,7 +46,7 @@ void __init swiotlb_dma_init(void) void __init pci_swiotlb_init(void) { - if (!iommu_detected || iommu_pass_through) { + if (!iommu_detected) { #ifdef CONFIG_IA64_GENERIC swiotlb = 1; printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S index 903babd22d62..32f6fc131fbe 100644 --- a/arch/ia64/kernel/relocate_kernel.S +++ b/arch/ia64/kernel/relocate_kernel.S @@ -52,7 +52,7 @@ GLOBAL_ENTRY(relocate_new_kernel) srlz.i ;; mov ar.rnat=r18 - rfi + rfi // note: this unmask MCA/INIT (psr.mc) ;; 1: //physical mode code begin diff --git a/arch/ia64/kernel/smp.c b/arch/ia64/kernel/smp.c index 93ebfea43c6c..dabeefe21134 100644 --- a/arch/ia64/kernel/smp.c +++ b/arch/ia64/kernel/smp.c @@ -302,7 +302,7 @@ smp_flush_tlb_mm (struct mm_struct *mm) return; } - smp_call_function_mask(mm->cpu_vm_mask, + smp_call_function_many(mm_cpumask(mm), (void (*)(void *))local_finish_flush_tlb_mm, mm, 1); local_irq_disable(); local_finish_flush_tlb_mm(mm); diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index eb4214d1c5af..0a0c77b2c988 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -51,8 +51,6 @@ SECTIONS KPROBES_TEXT *(.gnu.linkonce.t*) } - .text.head : AT(ADDR(.text.head) - LOAD_OFFSET) - { *(.text.head) } .text2 : AT(ADDR(.text2) - LOAD_OFFSET) { *(.text2) } #ifdef CONFIG_SMP @@ -66,14 +64,7 @@ SECTIONS NOTES :code :note /* put .notes in text and mark in PT_NOTE */ code_continues : {} :code /* switch back to regular program... */ - /* Exception table */ - . = ALIGN(16); - __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) - { - __start___ex_table = .; - *(__ex_table) - __stop___ex_table = .; - } + EXCEPTION_TABLE(16) /* MCA table */ . = ALIGN(16); @@ -115,38 +106,9 @@ SECTIONS . = ALIGN(PAGE_SIZE); __init_begin = .; - .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) - { - _sinittext = .; - INIT_TEXT - _einittext = .; - } - - .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) - { INIT_DATA } -#ifdef CONFIG_BLK_DEV_INITRD - .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) - { - __initramfs_start = .; - *(.init.ramfs) - __initramfs_end = .; - } -#endif - - . = ALIGN(16); - .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) - { - __setup_start = .; - *(.init.setup) - __setup_end = .; - } - .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) - { - __initcall_start = .; - INITCALLS - __initcall_end = .; - } + INIT_TEXT_SECTION(PAGE_SIZE) + INIT_DATA_SECTION(16) .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET) { @@ -204,24 +166,13 @@ SECTIONS } #endif - . = ALIGN(8); - __con_initcall_start = .; - .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) - { *(.con_initcall.init) } - __con_initcall_end = .; - __security_initcall_start = .; - .security_initcall.init : AT(ADDR(.security_initcall.init) - LOAD_OFFSET) - { *(.security_initcall.init) } - __security_initcall_end = .; . = ALIGN(PAGE_SIZE); __init_end = .; - /* The initial task and kernel stack */ - .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) - { *(.data.init_task) } - .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) - { *(__special_page_section) + { + PAGE_ALIGNED_DATA(PAGE_SIZE) + . = ALIGN(PAGE_SIZE); __start_gate_section = .; *(.data.gate) __stop_gate_section = .; @@ -236,12 +187,6 @@ SECTIONS * kernel data */ - .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) - { *(.data.read_mostly) } - - .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) - { *(.data.cacheline_aligned) } - /* Per-cpu data: */ . = ALIGN(PERCPU_PAGE_SIZE); PERCPU_VADDR(PERCPU_ADDR, :percpu) @@ -258,6 +203,9 @@ SECTIONS __cpu0_per_cpu = .; . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */ #endif + INIT_TASK_DATA(PAGE_SIZE) + CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) + READ_MOSTLY_DATA(SMP_CACHE_BYTES) DATA_DATA *(.data1) *(.gnu.linkonce.d*) @@ -274,48 +222,15 @@ SECTIONS .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) { *(.sdata) *(.sdata1) *(.srdata) } _edata = .; - __bss_start = .; - .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) - { *(.sbss) *(.scommon) } - .bss : AT(ADDR(.bss) - LOAD_OFFSET) - { *(.bss) *(COMMON) } - __bss_stop = .; + + BSS_SECTION(0, 0, 0) _end = .; code : { } :code - /* Stabs debugging sections. */ - .stab 0 : { *(.stab) } - .stabstr 0 : { *(.stabstr) } - .stab.excl 0 : { *(.stab.excl) } - .stab.exclstr 0 : { *(.stab.exclstr) } - .stab.index 0 : { *(.stab.index) } - .stab.indexstr 0 : { *(.stab.indexstr) } - /* DWARF debug sections. - Symbols in the DWARF debugging sections are relative to the beginning - of the section so we begin them at 0. */ - /* DWARF 1 */ - .debug 0 : { *(.debug) } - .line 0 : { *(.line) } - /* GNU DWARF 1 extensions */ - .debug_srcinfo 0 : { *(.debug_srcinfo) } - .debug_sfnames 0 : { *(.debug_sfnames) } - /* DWARF 1.1 and DWARF 2 */ - .debug_aranges 0 : { *(.debug_aranges) } - .debug_pubnames 0 : { *(.debug_pubnames) } - /* DWARF 2 */ - .debug_info 0 : { *(.debug_info) } - .debug_abbrev 0 : { *(.debug_abbrev) } - .debug_line 0 : { *(.debug_line) } - .debug_frame 0 : { *(.debug_frame) } - .debug_str 0 : { *(.debug_str) } - .debug_loc 0 : { *(.debug_loc) } - .debug_macinfo 0 : { *(.debug_macinfo) } - /* SGI/MIPS DWARF 2 extensions */ - .debug_weaknames 0 : { *(.debug_weaknames) } - .debug_funcnames 0 : { *(.debug_funcnames) } - .debug_typenames 0 : { *(.debug_typenames) } - .debug_varnames 0 : { *(.debug_varnames) } + + STABS_DEBUG + DWARF_DEBUG /* Default discards */ DISCARDS diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index b115b3bbf04a..1857766a63c1 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -617,7 +617,6 @@ mem_init (void) long reserved_pages, codesize, datasize, initsize; pg_data_t *pgdat; int i; - static struct kcore_list kcore_mem, kcore_vmem, kcore_kernel; BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE); BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE); @@ -639,10 +638,6 @@ mem_init (void) high_memory = __va(max_low_pfn * PAGE_SIZE); - kclist_add(&kcore_mem, __va(0), max_low_pfn * PAGE_SIZE); - kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); - kclist_add(&kcore_kernel, _stext, _end - _stext); - for_each_online_pgdat(pgdat) if (pgdat->bdata->node_bootmem_map) totalram_pages += free_all_bootmem_node(pgdat); @@ -655,7 +650,7 @@ mem_init (void) initsize = (unsigned long) __init_end - (unsigned long) __init_begin; printk(KERN_INFO "Memory: %luk/%luk available (%luk code, %luk reserved, " - "%luk data, %luk init)\n", (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10), + "%luk data, %luk init)\n", nr_free_pages() << (PAGE_SHIFT - 10), num_physpages << (PAGE_SHIFT - 10), codesize >> 10, reserved_pages << (PAGE_SHIFT - 10), datasize >> 10, initsize >> 10); diff --git a/arch/ia64/oprofile/backtrace.c b/arch/ia64/oprofile/backtrace.c index adb01566bd57..5cdd7e4a597c 100644 --- a/arch/ia64/oprofile/backtrace.c +++ b/arch/ia64/oprofile/backtrace.c @@ -32,24 +32,6 @@ typedef struct u64 *prev_pfs_loc; /* state for WAR for old spinlock ool code */ } ia64_backtrace_t; -#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3) -/* - * Returns non-zero if the PC is in the spinlock contention out-of-line code - * with non-standard calling sequence (on older compilers). - */ -static __inline__ int in_old_ool_spinlock_code(unsigned long pc) -{ - extern const char ia64_spinlock_contention_pre3_4[] __attribute__ ((weak)); - extern const char ia64_spinlock_contention_pre3_4_end[] __attribute__ ((weak)); - unsigned long sc_start = (unsigned long)ia64_spinlock_contention_pre3_4; - unsigned long sc_end = (unsigned long)ia64_spinlock_contention_pre3_4_end; - return (sc_start && sc_end && pc >= sc_start && pc < sc_end); -} -#else -/* Newer spinlock code does a proper br.call and works fine with the unwinder */ -#define in_old_ool_spinlock_code(pc) 0 -#endif - /* Returns non-zero if the PC is in the Interrupt Vector Table */ static __inline__ int in_ivt_code(unsigned long pc) { @@ -80,7 +62,7 @@ static __inline__ int next_frame(ia64_backtrace_t *bt) */ if (bt->prev_pfs_loc && bt->regs && bt->frame.pfs_loc == bt->prev_pfs_loc) bt->frame.pfs_loc = &bt->regs->ar_pfs; - bt->prev_pfs_loc = (in_old_ool_spinlock_code(bt->frame.ip) ? bt->frame.pfs_loc : NULL); + bt->prev_pfs_loc = NULL; return unw_unwind(&bt->frame) == 0; } diff --git a/arch/ia64/sn/pci/pcibr/pcibr_ate.c b/arch/ia64/sn/pci/pcibr/pcibr_ate.c index 239b3cedcf2b..5bc34eac9e01 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_ate.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_ate.c @@ -54,6 +54,8 @@ static int find_free_ate(struct ate_resource *ate_resource, int start, break; } } + if (i >= ate_resource->num_ate) + return -1; } else index++; /* Try next ate */ } |