summaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig3
-rw-r--r--arch/ia64/Makefile2
-rw-r--r--arch/ia64/hp/common/sba_iommu.c40
-rw-r--r--arch/ia64/ia32/elfcore32.h2
-rw-r--r--arch/ia64/ia32/ia32_entry.S4
-rw-r--r--arch/ia64/ia32/sys_ia32.c58
-rw-r--r--arch/ia64/include/asm/acpi.h6
-rw-r--r--arch/ia64/include/asm/asm-offsets.h1
-rw-r--r--arch/ia64/include/asm/bitops.h2
-rw-r--r--arch/ia64/include/asm/cacheflush.h1
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/ia64/include/asm/elf.h5
-rw-r--r--arch/ia64/include/asm/ftrace.h1
-rw-r--r--arch/ia64/include/asm/hw_irq.h6
-rw-r--r--arch/ia64/include/asm/io.h2
-rw-r--r--arch/ia64/include/asm/irq.h2
-rw-r--r--arch/ia64/include/asm/kprobes.h5
-rw-r--r--arch/ia64/include/asm/kvm.h1
-rw-r--r--arch/ia64/include/asm/kvm_host.h1
-rw-r--r--arch/ia64/include/asm/mca.h5
-rw-r--r--arch/ia64/include/asm/meminit.h2
-rw-r--r--arch/ia64/include/asm/numa.h2
-rw-r--r--arch/ia64/include/asm/perfmon_default_smpl.h2
-rw-r--r--arch/ia64/include/asm/pgtable.h3
-rw-r--r--arch/ia64/include/asm/processor.h6
-rw-r--r--arch/ia64/include/asm/rwsem.h2
-rw-r--r--arch/ia64/include/asm/sn/shubio.h2
-rw-r--r--arch/ia64/include/asm/socket.h2
-rw-r--r--arch/ia64/include/asm/spinlock.h136
-rw-r--r--arch/ia64/include/asm/spinlock_types.h10
-rw-r--r--arch/ia64/include/asm/swiotlb.h2
-rw-r--r--arch/ia64/include/asm/tlb.h2
-rw-r--r--arch/ia64/include/asm/topology.h4
-rw-r--r--arch/ia64/include/asm/types.h5
-rw-r--r--arch/ia64/include/asm/unistd.h3
-rw-r--r--arch/ia64/include/asm/xen/hypervisor.h28
-rw-r--r--arch/ia64/kernel/Makefile11
-rw-r--r--arch/ia64/kernel/acpi-processor.c85
-rw-r--r--arch/ia64/kernel/acpi.c33
-rw-r--r--arch/ia64/kernel/crash.c11
-rw-r--r--arch/ia64/kernel/entry.S1
-rw-r--r--arch/ia64/kernel/esi.c2
-rw-r--r--arch/ia64/kernel/head.S4
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c2
-rw-r--r--arch/ia64/kernel/iosapic.c6
-rw-r--r--arch/ia64/kernel/irq.c4
-rw-r--r--arch/ia64/kernel/irq_ia64.c10
-rw-r--r--arch/ia64/kernel/mca.c114
-rw-r--r--arch/ia64/kernel/mca_asm.S2
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c4
-rw-r--r--arch/ia64/kernel/perfmon.c33
-rw-r--r--arch/ia64/kernel/relocate_kernel.S2
-rw-r--r--arch/ia64/kernel/setup.c27
-rw-r--r--arch/ia64/kernel/sys_ia64.c83
-rw-r--r--arch/ia64/kernel/time.c4
-rw-r--r--arch/ia64/kernel/unaligned.c6
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S11
-rw-r--r--arch/ia64/kvm/Makefile2
-rw-r--r--arch/ia64/kvm/asm-offsets.c1
-rw-r--r--arch/ia64/kvm/kvm-ia64.c19
-rw-r--r--arch/ia64/kvm/vcpu.h9
-rw-r--r--arch/ia64/kvm/vmm.c4
-rw-r--r--arch/ia64/kvm/vtlb.c2
-rw-r--r--arch/ia64/mm/contig.c99
-rw-r--r--arch/ia64/mm/discontig.c129
-rw-r--r--arch/ia64/mm/init.c6
-rw-r--r--arch/ia64/mm/ioremap.c11
-rw-r--r--arch/ia64/mm/tlb.c56
-rw-r--r--arch/ia64/pci/pci.c42
-rw-r--r--arch/ia64/sn/kernel/io_acpi_init.c2
-rw-r--r--arch/ia64/sn/kernel/io_common.c8
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c8
-rw-r--r--arch/ia64/sn/kernel/sn2/sn_hwperf.c7
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c19
-rw-r--r--arch/ia64/xen/irq_xen.c131
-rw-r--r--arch/ia64/xen/time.c22
76 files changed, 693 insertions, 699 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 1ee596cd942f..2d7f56a98e0f 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -87,9 +87,6 @@ config GENERIC_TIME_VSYSCALL
bool
default y
-config HAVE_LEGACY_PER_CPU_AREA
- def_bool y
-
config HAVE_SETUP_PER_CPU_AREA
def_bool y
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index e7cbaa02cd0b..475e2725fbde 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -103,4 +103,4 @@ archprepare: make_nr_irqs_h FORCE
PHONY += make_nr_irqs_h FORCE
make_nr_irqs_h: FORCE
- $(Q)$(MAKE) $(build)=arch/ia64/kernel include/asm-ia64/nr-irqs.h
+ $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 674a8374c6d9..e14c492a8a93 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -677,12 +677,19 @@ sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
spin_unlock_irqrestore(&ioc->saved_lock, flags);
pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
- if (unlikely(pide >= (ioc->res_size << 3)))
- panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
- ioc->ioc_hpa);
+ if (unlikely(pide >= (ioc->res_size << 3))) {
+ printk(KERN_WARNING "%s: I/O MMU @ %p is"
+ "out of mapping resources, %u %u %lx\n",
+ __func__, ioc->ioc_hpa, ioc->res_size,
+ pages_needed, dma_get_seg_boundary(dev));
+ return -1;
+ }
#else
- panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
- ioc->ioc_hpa);
+ printk(KERN_WARNING "%s: I/O MMU @ %p is"
+ "out of mapping resources, %u %u %lx\n",
+ __func__, ioc->ioc_hpa, ioc->res_size,
+ pages_needed, dma_get_seg_boundary(dev));
+ return -1;
#endif
}
}
@@ -965,6 +972,8 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
#endif
pide = sba_alloc_range(ioc, dev, size);
+ if (pide < 0)
+ return 0;
iovp = (dma_addr_t) pide << iovp_shift;
@@ -1320,6 +1329,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
unsigned long dma_offset, dma_len; /* start/len of DMA stream */
int n_mappings = 0;
unsigned int max_seg_size = dma_get_max_seg_size(dev);
+ int idx;
while (nents > 0) {
unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
@@ -1381,7 +1391,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
#endif
/*
- ** Not virtually contigous.
+ ** Not virtually contiguous.
** Terminate prev chunk.
** Start a new chunk.
**
@@ -1418,16 +1428,22 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
vcontig_sg->dma_length = vcontig_len;
dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
ASSERT(dma_len <= DMA_CHUNK_SIZE);
- dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
- | (sba_alloc_range(ioc, dev, dma_len) << iovp_shift)
- | dma_offset);
+ idx = sba_alloc_range(ioc, dev, dma_len);
+ if (idx < 0) {
+ dma_sg->dma_length = 0;
+ return -1;
+ }
+ dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
+ | dma_offset);
n_mappings++;
}
return n_mappings;
}
-
+static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
+ int nents, enum dma_data_direction dir,
+ struct dma_attrs *attrs);
/**
* sba_map_sg - map Scatter/Gather list
* @dev: instance of PCI owned by the driver that's asking.
@@ -1493,6 +1509,10 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
** Access to the virtual address is what forces a two pass algorithm.
*/
coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
+ if (coalesced < 0) {
+ sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
+ return 0;
+ }
/*
** Program the I/O Pdir
diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h
index 9a3abf58cea3..657725742617 100644
--- a/arch/ia64/ia32/elfcore32.h
+++ b/arch/ia64/ia32/elfcore32.h
@@ -11,8 +11,6 @@
#include <asm/intrinsics.h>
#include <asm/uaccess.h>
-#define USE_ELF_CORE_DUMP 1
-
/* Override elfcore.h */
#define _LINUX_ELFCORE_H 1
typedef unsigned int elf_greg_t;
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
index af9405cd70e5..2fd7479aa216 100644
--- a/arch/ia64/ia32/ia32_entry.S
+++ b/arch/ia64/ia32/ia32_entry.S
@@ -79,7 +79,7 @@ GLOBAL_ENTRY(ia32_ret_from_clone)
(p6) br.cond.spnt .ia32_strace_check_retval
;; // prevent RAW on r8
END(ia32_ret_from_clone)
- // fall thrugh
+ // fall through
GLOBAL_ENTRY(ia32_ret_from_syscall)
PT_REGS_UNWIND_INFO(0)
@@ -327,7 +327,7 @@ ia32_syscall_table:
data8 compat_sys_writev
data8 sys_getsid
data8 sys_fdatasync
- data8 sys32_sysctl
+ data8 compat_sys_sysctl
data8 sys_mlock /* 150 */
data8 sys_munlock
data8 sys_mlockall
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index 625ed8f76fce..045b746b9808 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -858,6 +858,9 @@ ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot
prot = get_prot32(prot);
+ if (flags & MAP_HUGETLB)
+ return -ENOMEM;
+
#if PAGE_SHIFT > IA32_PAGE_SHIFT
mutex_lock(&ia32_mmap_mutex);
{
@@ -1628,61 +1631,6 @@ sys32_msync (unsigned int start, unsigned int len, int flags)
return sys_msync(addr, len + (start - addr), flags);
}
-struct sysctl32 {
- unsigned int name;
- int nlen;
- unsigned int oldval;
- unsigned int oldlenp;
- unsigned int newval;
- unsigned int newlen;
- unsigned int __unused[4];
-};
-
-#ifdef CONFIG_SYSCTL_SYSCALL
-asmlinkage long
-sys32_sysctl (struct sysctl32 __user *args)
-{
- struct sysctl32 a32;
- mm_segment_t old_fs = get_fs ();
- void __user *oldvalp, *newvalp;
- size_t oldlen;
- int __user *namep;
- long ret;
-
- if (copy_from_user(&a32, args, sizeof(a32)))
- return -EFAULT;
-
- /*
- * We need to pre-validate these because we have to disable address checking
- * before calling do_sysctl() because of OLDLEN but we can't run the risk of the
- * user specifying bad addresses here. Well, since we're dealing with 32 bit
- * addresses, we KNOW that access_ok() will always succeed, so this is an
- * expensive NOP, but so what...
- */
- namep = (int __user *) compat_ptr(a32.name);
- oldvalp = compat_ptr(a32.oldval);
- newvalp = compat_ptr(a32.newval);
-
- if ((oldvalp && get_user(oldlen, (int __user *) compat_ptr(a32.oldlenp)))
- || !access_ok(VERIFY_WRITE, namep, 0)
- || !access_ok(VERIFY_WRITE, oldvalp, 0)
- || !access_ok(VERIFY_WRITE, newvalp, 0))
- return -EFAULT;
-
- set_fs(KERNEL_DS);
- lock_kernel();
- ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *) &oldlen,
- newvalp, (size_t) a32.newlen);
- unlock_kernel();
- set_fs(old_fs);
-
- if (oldvalp && put_user (oldlen, (int __user *) compat_ptr(a32.oldlenp)))
- return -EFAULT;
-
- return ret;
-}
-#endif
-
asmlinkage long
sys32_newuname (struct new_utsname __user *name)
{
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 91df9686a0da..7ae58892ba8d 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -132,6 +132,12 @@ extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
#endif
+static inline bool arch_has_acpi_pdc(void) { return true; }
+static inline void arch_acpi_set_pdc_bits(u32 *buf)
+{
+ buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
+}
+
#define acpi_unlazy_tlb(x)
#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/ia64/include/asm/asm-offsets.h b/arch/ia64/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/ia64/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 57a2787bc9fb..6ebc229a1c51 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr)
* @addr: Address to start counting from
*
* Similarly to clear_bit_unlock, the implementation uses a store
- * with release semantics. See also __raw_spin_unlock().
+ * with release semantics. See also arch_spin_unlock().
*/
static __inline__ void
__clear_bit_unlock(int nr, void *addr)
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h
index c8ce2719fee8..429eefc93ee7 100644
--- a/arch/ia64/include/asm/cacheflush.h
+++ b/arch/ia64/include/asm/cacheflush.h
@@ -25,6 +25,7 @@
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
#define flush_dcache_page(page) \
do { \
clear_bit(PG_arch_1, &(page)->flags); \
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 8d3c79cd81e7..7d09a09cdaad 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -73,7 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
if (!dev->dma_mask)
return 0;
- return addr + size <= *dev->dma_mask;
+ return addr + size - 1 <= *dev->dma_mask;
}
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
index 86eddee029cb..4c41656ede87 100644
--- a/arch/ia64/include/asm/elf.h
+++ b/arch/ia64/include/asm/elf.h
@@ -25,7 +25,6 @@
#define ELF_DATA ELFDATA2LSB
#define ELF_ARCH EM_IA_64
-#define USE_ELF_CORE_DUMP
#define CORE_DUMP_USE_REGSET
/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
@@ -202,7 +201,9 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
relevant until we have real hardware to play with... */
#define ELF_PLATFORM NULL
-#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
+#define SET_PERSONALITY(ex) \
+ set_personality((current->personality & ~PER_MASK) | PER_LINUX)
+
#define elf_read_implies_exec(ex, executable_stack) \
((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
diff --git a/arch/ia64/include/asm/ftrace.h b/arch/ia64/include/asm/ftrace.h
index d20db3c2a656..fbd1a2470cae 100644
--- a/arch/ia64/include/asm/ftrace.h
+++ b/arch/ia64/include/asm/ftrace.h
@@ -8,7 +8,6 @@
extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0);
#define mcount _mcount
-#include <asm/kprobes.h>
/* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */
#define MCOUNT_ADDR (((struct fnptr *)mcount)->ip)
#define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip)
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h
index 91619b31dbf5..bf2e37493e04 100644
--- a/arch/ia64/include/asm/hw_irq.h
+++ b/arch/ia64/include/asm/hw_irq.h
@@ -59,7 +59,13 @@ typedef u16 ia64_vector;
extern int ia64_first_device_vector;
extern int ia64_last_device_vector;
+#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_DIG))
+/* Reserve the lower priority vector than device vectors for "move IRQ" IPI */
+#define IA64_IRQ_MOVE_VECTOR 0x30 /* "move IRQ" IPI */
+#define IA64_DEF_FIRST_DEVICE_VECTOR 0x31
+#else
#define IA64_DEF_FIRST_DEVICE_VECTOR 0x30
+#endif
#define IA64_DEF_LAST_DEVICE_VECTOR 0xe7
#define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector
#define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 0d9d16e2d949..cc8335eb3110 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -424,6 +424,8 @@ __writeq (unsigned long val, volatile void __iomem *addr)
extern void __iomem * ioremap(unsigned long offset, unsigned long size);
extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
extern void iounmap (volatile void __iomem *addr);
+extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
+extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
/*
* String version of IO memory access ops:
diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h
index 5282546cdf82..91b920fd7d53 100644
--- a/arch/ia64/include/asm/irq.h
+++ b/arch/ia64/include/asm/irq.h
@@ -13,7 +13,7 @@
#include <linux/types.h>
#include <linux/cpumask.h>
-#include <asm-ia64/nr-irqs.h>
+#include <generated/nr-irqs.h>
static __inline__ int
irq_canonicalize (int irq)
diff --git a/arch/ia64/include/asm/kprobes.h b/arch/ia64/include/asm/kprobes.h
index dbf83fb28db3..d5505d6f2382 100644
--- a/arch/ia64/include/asm/kprobes.h
+++ b/arch/ia64/include/asm/kprobes.h
@@ -103,11 +103,6 @@ typedef struct kprobe_opcode {
bundle_t bundle;
} kprobe_opcode_t;
-struct fnptr {
- unsigned long ip;
- unsigned long gp;
-};
-
/* Architecture specific copy of original instruction*/
struct arch_specific_insn {
/* copy of the instruction to be emulated */
diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h
index 18a7e49abbc5..bc90c75adf67 100644
--- a/arch/ia64/include/asm/kvm.h
+++ b/arch/ia64/include/asm/kvm.h
@@ -60,6 +60,7 @@ struct kvm_ioapic_state {
#define KVM_IRQCHIP_PIC_MASTER 0
#define KVM_IRQCHIP_PIC_SLAVE 1
#define KVM_IRQCHIP_IOAPIC 2
+#define KVM_NR_IRQCHIPS 3
#define KVM_CONTEXT_SIZE 8*1024
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index d9b6325a9328..a362e67e0ca6 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -475,7 +475,6 @@ struct kvm_arch {
struct list_head assigned_dev_head;
struct iommu_domain *iommu_domain;
int iommu_flags;
- struct hlist_head irq_ack_notifier_list;
unsigned long irq_sources_bitmap;
unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
index c171cdf0a789..43f96ab18fa0 100644
--- a/arch/ia64/include/asm/mca.h
+++ b/arch/ia64/include/asm/mca.h
@@ -106,6 +106,11 @@ struct ia64_sal_os_state {
unsigned long os_status; /* OS status to SAL, enum below */
unsigned long context; /* 0 if return to same context
1 if return to new context */
+
+ /* I-resources */
+ unsigned long iip;
+ unsigned long ipsr;
+ unsigned long ifs;
};
enum {
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
index 688a812c017d..61c7b1750b16 100644
--- a/arch/ia64/include/asm/meminit.h
+++ b/arch/ia64/include/asm/meminit.h
@@ -61,7 +61,7 @@ extern int register_active_ranges(u64 start, u64 len, int nid);
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
- extern unsigned long vmalloc_end;
+ extern unsigned long VMALLOC_END;
extern struct page *vmem_map;
extern int find_largest_hole(u64 start, u64 end, void *arg);
extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h
index 3499ff57bf42..6a8a27cfae3e 100644
--- a/arch/ia64/include/asm/numa.h
+++ b/arch/ia64/include/asm/numa.h
@@ -22,8 +22,6 @@
#include <asm/mmzone.h>
-#define NUMA_NO_NODE -1
-
extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
extern pg_data_t *pgdat_list[MAX_NUMNODES];
diff --git a/arch/ia64/include/asm/perfmon_default_smpl.h b/arch/ia64/include/asm/perfmon_default_smpl.h
index 48822c0811d8..74724b24c2b7 100644
--- a/arch/ia64/include/asm/perfmon_default_smpl.h
+++ b/arch/ia64/include/asm/perfmon_default_smpl.h
@@ -67,7 +67,7 @@ typedef struct {
unsigned long ip; /* where did the overflow interrupt happened */
unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */
- unsigned short cpu; /* cpu on which the overfow occured */
+ unsigned short cpu; /* cpu on which the overflow occured */
unsigned short set; /* event set active when overflow ocurred */
int tgid; /* thread group id (for NPTL, this is getpid()) */
} pfm_default_smpl_entry_t;
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 8840a690d1e7..69bf13857a9f 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -228,8 +228,7 @@ ia64_phys_addr_valid (unsigned long addr)
#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
#ifdef CONFIG_VIRTUAL_MEM_MAP
# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
-# define VMALLOC_END vmalloc_end
- extern unsigned long vmalloc_end;
+extern unsigned long VMALLOC_END;
#else
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
/* SPARSEMEM_VMEMMAP uses half of vmalloc... */
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 3eaeedf1aef2..7fa90f73f6be 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -229,7 +229,7 @@ struct cpuinfo_ia64 {
#endif
};
-DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
+DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
/*
* The "local" data variable. It refers to the per-CPU data of the currently executing
@@ -237,8 +237,8 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
* Do not use the address of local_cpu_data, since it will be different from
* cpu_data(smp_processor_id())!
*/
-#define local_cpu_data (&__ia64_per_cpu_var(cpu_info))
-#define cpu_data(cpu) (&per_cpu(cpu_info, cpu))
+#define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info))
+#define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu))
extern void print_cpu_info (struct cpuinfo_ia64 *);
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index fbee74b15782..e8762688e8e3 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -47,7 +47,7 @@ struct rw_semaphore {
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
#define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \
+ { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
LIST_HEAD_INIT((name).wait_list) }
#define DECLARE_RWSEM(name) \
diff --git a/arch/ia64/include/asm/sn/shubio.h b/arch/ia64/include/asm/sn/shubio.h
index 22a6f18a5313..6052422a22b3 100644
--- a/arch/ia64/include/asm/sn/shubio.h
+++ b/arch/ia64/include/asm/sn/shubio.h
@@ -3289,7 +3289,7 @@ typedef ii_icrb0_e_u_t icrbe_t;
#define IIO_IIDSR_LVL_SHIFT 0
#define IIO_IIDSR_LVL_MASK 0x000000ff
-/* Xtalk timeout threshhold register (IIO_IXTT) */
+/* Xtalk timeout threshold register (IIO_IXTT) */
#define IXTT_RRSP_TO_SHFT 55 /* read response timeout */
#define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT)
#define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h
index 0b0d5ff062e5..51427eaa51ba 100644
--- a/arch/ia64/include/asm/socket.h
+++ b/arch/ia64/include/asm/socket.h
@@ -69,4 +69,6 @@
#define SO_PROTOCOL 38
#define SO_DOMAIN 39
+#define SO_RXQ_OVFL 40
+
#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 30bb930e1111..1a91c9121d17 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -17,7 +17,7 @@
#include <asm/intrinsics.h>
#include <asm/system.h>
-#define __raw_spin_lock_init(x) ((x)->lock = 0)
+#define arch_spin_lock_init(x) ((x)->lock = 0)
/*
* Ticket locks are conceptually two parts, one indicating the current head of
@@ -25,108 +25,128 @@
* by atomically noting the tail and incrementing it by one (thus adding
* ourself to the queue and noting our position), then waiting until the head
* becomes equal to the the initial value of the tail.
+ * The pad bits in the middle are used to prevent the next_ticket number
+ * overflowing into the now_serving number.
*
- * 63 32 31 0
+ * 31 17 16 15 14 0
* +----------------------------------------------------+
- * | next_ticket_number | now_serving |
+ * | now_serving | padding | next_ticket |
* +----------------------------------------------------+
*/
-#define TICKET_SHIFT 32
+#define TICKET_SHIFT 17
+#define TICKET_BITS 15
+#define TICKET_MASK ((1 << TICKET_BITS) - 1)
-static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
{
- int *p = (int *)&lock->lock, turn, now_serving;
+ int *p = (int *)&lock->lock, ticket, serve;
- now_serving = *p;
- turn = ia64_fetchadd(1, p+1, acq);
+ ticket = ia64_fetchadd(1, p, acq);
- if (turn == now_serving)
+ if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
return;
- do {
+ ia64_invala();
+
+ for (;;) {
+ asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(p) : "memory");
+
+ if (!(((serve >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
+ return;
cpu_relax();
- } while (ACCESS_ONCE(*p) != turn);
+ }
}
-static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
- long tmp = ACCESS_ONCE(lock->lock), try;
-
- if (!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1))) {
- try = tmp + (1L << TICKET_SHIFT);
+ int tmp = ACCESS_ONCE(lock->lock);
- return ia64_cmpxchg(acq, &lock->lock, tmp, try, sizeof (tmp)) == tmp;
- }
+ if (!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK))
+ return ia64_cmpxchg(acq, &lock->lock, tmp, tmp + 1, sizeof (tmp)) == tmp;
return 0;
}
-static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
{
- int *p = (int *)&lock->lock;
+ unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
- (void)ia64_fetchadd(1, p, rel);
+ asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
+ ACCESS_ONCE(*p) = (tmp + 2) & ~1;
}
-static inline int __ticket_spin_is_locked(raw_spinlock_t *lock)
+static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ int *p = (int *)&lock->lock, ticket;
+
+ ia64_invala();
+
+ for (;;) {
+ asm volatile ("ld4.c.nc %0=[%1]" : "=r"(ticket) : "r"(p) : "memory");
+ if (!(((ticket >> TICKET_SHIFT) ^ ticket) & TICKET_MASK))
+ return;
+ cpu_relax();
+ }
+}
+
+static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);
- return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1L << TICKET_SHIFT) - 1));
+ return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
}
-static inline int __ticket_spin_is_contended(raw_spinlock_t *lock)
+static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
{
long tmp = ACCESS_ONCE(lock->lock);
- return (((tmp >> TICKET_SHIFT) - tmp) & ((1L << TICKET_SHIFT) - 1)) > 1;
+ return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
}
-static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
+static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
return __ticket_spin_is_locked(lock);
}
-static inline int __raw_spin_is_contended(raw_spinlock_t *lock)
+static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
return __ticket_spin_is_contended(lock);
}
-#define __raw_spin_is_contended __raw_spin_is_contended
+#define arch_spin_is_contended arch_spin_is_contended
-static __always_inline void __raw_spin_lock(raw_spinlock_t *lock)
+static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
{
__ticket_spin_lock(lock);
}
-static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock)
+static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return __ticket_spin_trylock(lock);
}
-static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock)
+static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
{
__ticket_spin_unlock(lock);
}
-static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock,
+static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
unsigned long flags)
{
- __raw_spin_lock(lock);
+ arch_spin_lock(lock);
}
-static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock)
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
- while (__raw_spin_is_locked(lock))
- cpu_relax();
+ __ticket_spin_unlock_wait(lock);
}
-#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
-#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0)
+#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
+#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
#ifdef ASM_SUPPORTED
static __always_inline void
-__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
+arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1,%2\n"
@@ -149,15 +169,15 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
: "p6", "p7", "r2", "memory");
}
-#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0)
+#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
#else /* !ASM_SUPPORTED */
-#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw)
+#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
-#define __raw_read_lock(rw) \
+#define arch_read_lock(rw) \
do { \
- raw_rwlock_t *__read_lock_ptr = (rw); \
+ arch_rwlock_t *__read_lock_ptr = (rw); \
\
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
@@ -168,16 +188,16 @@ do { \
#endif /* !ASM_SUPPORTED */
-#define __raw_read_unlock(rw) \
+#define arch_read_unlock(rw) \
do { \
- raw_rwlock_t *__read_lock_ptr = (rw); \
+ arch_rwlock_t *__read_lock_ptr = (rw); \
ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
} while (0)
#ifdef ASM_SUPPORTED
static __always_inline void
-__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
+arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
{
__asm__ __volatile__ (
"tbit.nz p6, p0 = %1, %2\n"
@@ -201,9 +221,9 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
: "ar.ccv", "p6", "p7", "r2", "r29", "memory");
}
-#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0)
+#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
-#define __raw_write_trylock(rw) \
+#define arch_write_trylock(rw) \
({ \
register long result; \
\
@@ -215,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
(result == 0); \
})
-static inline void __raw_write_unlock(raw_rwlock_t *x)
+static inline void arch_write_unlock(arch_rwlock_t *x)
{
u8 *y = (u8 *)x;
barrier();
@@ -224,9 +244,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
#else /* !ASM_SUPPORTED */
-#define __raw_write_lock_flags(l, flags) __raw_write_lock(l)
+#define arch_write_lock_flags(l, flags) arch_write_lock(l)
-#define __raw_write_lock(l) \
+#define arch_write_lock(l) \
({ \
__u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
__u32 *ia64_write_lock_ptr = (__u32 *) (l); \
@@ -237,7 +257,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
} while (ia64_val); \
})
-#define __raw_write_trylock(rw) \
+#define arch_write_trylock(rw) \
({ \
__u64 ia64_val; \
__u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
@@ -245,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
(ia64_val == 0); \
})
-static inline void __raw_write_unlock(raw_rwlock_t *x)
+static inline void arch_write_unlock(arch_rwlock_t *x)
{
barrier();
x->write_lock = 0;
@@ -253,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
#endif /* !ASM_SUPPORTED */
-static inline int __raw_read_trylock(raw_rwlock_t *x)
+static inline int arch_read_trylock(arch_rwlock_t *x)
{
union {
- raw_rwlock_t lock;
+ arch_rwlock_t lock;
__u32 word;
} old, new;
old.lock = new.lock = *x;
@@ -265,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x)
return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
}
-#define _raw_spin_relax(lock) cpu_relax()
-#define _raw_read_relax(lock) cpu_relax()
-#define _raw_write_relax(lock) cpu_relax()
+#define arch_spin_relax(lock) cpu_relax()
+#define arch_read_relax(lock) cpu_relax()
+#define arch_write_relax(lock) cpu_relax()
#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
index b61d136d9bc2..e2b42a52a6d3 100644
--- a/arch/ia64/include/asm/spinlock_types.h
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -6,16 +6,16 @@
#endif
typedef struct {
- volatile unsigned long lock;
-} raw_spinlock_t;
+ volatile unsigned int lock;
+} arch_spinlock_t;
-#define __RAW_SPIN_LOCK_UNLOCKED { 0 }
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
typedef struct {
volatile unsigned int read_counter : 31;
volatile unsigned int write_lock : 1;
-} raw_rwlock_t;
+} arch_rwlock_t;
-#define __RAW_RW_LOCK_UNLOCKED { 0, 0 }
+#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 }
#endif
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h
index dcbaea7ce128..f0acde68aaea 100644
--- a/arch/ia64/include/asm/swiotlb.h
+++ b/arch/ia64/include/asm/swiotlb.h
@@ -4,8 +4,6 @@
#include <linux/dma-mapping.h>
#include <linux/swiotlb.h>
-extern int swiotlb_force;
-
#ifdef CONFIG_SWIOTLB
extern int swiotlb;
extern void pci_swiotlb_init(void);
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 85d965cb19a0..23cce999eb1c 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -74,7 +74,7 @@ struct ia64_tr_entry {
extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
extern void ia64_ptr_entry(u64 target_mask, int slot);
-extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
+extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
/*
region register macros
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index 3ddb4e709dba..d323071d0f91 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -33,7 +33,9 @@
/*
* Returns a bitmask of CPUs on Node 'node'.
*/
-#define cpumask_of_node(node) (&node_to_cpu_mask[node])
+#define cpumask_of_node(node) ((node) == -1 ? \
+ cpu_all_mask : \
+ &node_to_cpu_mask[node])
/*
* Returns the number of the node containing Node 'nid'.
diff --git a/arch/ia64/include/asm/types.h b/arch/ia64/include/asm/types.h
index bcd260e597de..93773fd37be0 100644
--- a/arch/ia64/include/asm/types.h
+++ b/arch/ia64/include/asm/types.h
@@ -35,6 +35,11 @@ typedef unsigned int umode_t;
*/
# ifdef __KERNEL__
+struct fnptr {
+ unsigned long ip;
+ unsigned long gp;
+};
+
/* DMA addresses are 64-bits wide, in general. */
typedef u64 dma_addr_t;
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 5a5347f5c4e4..10a8f21ca9e3 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -311,11 +311,12 @@
#define __NR_preadv 1319
#define __NR_pwritev 1320
#define __NR_rt_tgsigqueueinfo 1321
+#define __NR_recvmmsg 1322
#ifdef __KERNEL__
-#define NR_syscalls 298 /* length of syscall table */
+#define NR_syscalls 299 /* length of syscall table */
/*
* The following defines stop scripts/checksyscalls.sh from complaining about
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
index 88afb54501e4..67455c2ed2b1 100644
--- a/arch/ia64/include/asm/xen/hypervisor.h
+++ b/arch/ia64/include/asm/xen/hypervisor.h
@@ -37,35 +37,9 @@
#include <xen/interface/xen.h>
#include <xen/interface/version.h> /* to compile feature.c */
#include <xen/features.h> /* to comiple xen-netfront.c */
+#include <xen/xen.h>
#include <asm/xen/hypercall.h>
-/* xen_domain_type is set before executing any C code by early_xen_setup */
-enum xen_domain_type {
- XEN_NATIVE, /* running on bare hardware */
- XEN_PV_DOMAIN, /* running in a PV domain */
- XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/
-};
-
-#ifdef CONFIG_XEN
-extern enum xen_domain_type xen_domain_type;
-#else
-#define xen_domain_type XEN_NATIVE
-#endif
-
-#define xen_domain() (xen_domain_type != XEN_NATIVE)
-#define xen_pv_domain() (xen_domain() && \
- xen_domain_type == XEN_PV_DOMAIN)
-#define xen_hvm_domain() (xen_domain() && \
- xen_domain_type == XEN_HVM_DOMAIN)
-
-#ifdef CONFIG_XEN_DOM0
-#define xen_initial_domain() (xen_pv_domain() && \
- (xen_start_info->flags & SIF_INITDOMAIN))
-#else
-#define xen_initial_domain() (0)
-#endif
-
-
#ifdef CONFIG_XEN
extern struct shared_info *HYPERVISOR_shared_info;
extern struct start_info *xen_start_info;
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 6b7edcab0cb5..e1236349c99f 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -18,10 +18,6 @@ obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o
obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o
-ifneq ($(CONFIG_ACPI_PROCESSOR),)
-obj-y += acpi-processor.o
-endif
-
obj-$(CONFIG_IA64_PALINFO) += palinfo.o
obj-$(CONFIG_IOSAPIC) += iosapic.o
obj-$(CONFIG_MODULES) += module.o
@@ -81,17 +77,14 @@ define cmd_nr_irqs
endef
# We use internal kbuild rules to avoid the "is up to date" message from make
-arch/$(SRCARCH)/kernel/nr-irqs.s: $(srctree)/arch/$(SRCARCH)/kernel/nr-irqs.c \
- $(wildcard $(srctree)/include/asm-ia64/*/irq.h)
+arch/$(SRCARCH)/kernel/nr-irqs.s: arch/$(SRCARCH)/kernel/nr-irqs.c
$(Q)mkdir -p $(dir $@)
$(call if_changed_dep,cc_s_c)
-include/asm-ia64/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
+include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
$(Q)mkdir -p $(dir $@)
$(call cmd,nr_irqs)
-clean-files += $(objtree)/include/asm-ia64/nr-irqs.h
-
#
# native ivt.S, entry.S and fsys.S
#
diff --git a/arch/ia64/kernel/acpi-processor.c b/arch/ia64/kernel/acpi-processor.c
deleted file mode 100644
index dbda7bde6112..000000000000
--- a/arch/ia64/kernel/acpi-processor.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * arch/ia64/kernel/acpi-processor.c
- *
- * Copyright (C) 2005 Intel Corporation
- * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
- * - Added _PDC for platforms with Intel CPUs
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/acpi.h>
-
-#include <acpi/processor.h>
-#include <asm/acpi.h>
-
-static void init_intel_pdc(struct acpi_processor *pr)
-{
- struct acpi_object_list *obj_list;
- union acpi_object *obj;
- u32 *buf;
-
- /* allocate and initialize pdc. It will be used later. */
- obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
- if (!obj_list) {
- printk(KERN_ERR "Memory allocation error\n");
- return;
- }
-
- obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
- if (!obj) {
- printk(KERN_ERR "Memory allocation error\n");
- kfree(obj_list);
- return;
- }
-
- buf = kmalloc(12, GFP_KERNEL);
- if (!buf) {
- printk(KERN_ERR "Memory allocation error\n");
- kfree(obj);
- kfree(obj_list);
- return;
- }
-
- buf[0] = ACPI_PDC_REVISION_ID;
- buf[1] = 1;
- buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
- /*
- * The default of PDC_SMP_T_SWCOORD bit is set for IA64 cpu so
- * that OSPM is capable of native ACPI throttling software
- * coordination using BIOS supplied _TSD info.
- */
- buf[2] |= ACPI_PDC_SMP_T_SWCOORD;
-
- obj->type = ACPI_TYPE_BUFFER;
- obj->buffer.length = 12;
- obj->buffer.pointer = (u8 *) buf;
- obj_list->count = 1;
- obj_list->pointer = obj;
- pr->pdc = obj_list;
-
- return;
-}
-
-/* Initialize _PDC data based on the CPU vendor */
-void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
-{
- pr->pdc = NULL;
- init_intel_pdc(pr);
- return;
-}
-
-EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
-
-void arch_acpi_processor_cleanup_pdc(struct acpi_processor *pr)
-{
- if (pr->pdc) {
- kfree(pr->pdc->pointer->buffer.pointer);
- kfree(pr->pdc->pointer);
- kfree(pr->pdc);
- pr->pdc = NULL;
- }
-}
-
-EXPORT_SYMBOL(arch_acpi_processor_cleanup_pdc);
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index baec6f00f7f3..40574ae11401 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -702,11 +702,23 @@ int __init early_acpi_boot_init(void)
printk(KERN_ERR PREFIX
"Error parsing MADT - no LAPIC entries\n");
+#ifdef CONFIG_SMP
+ if (available_cpus == 0) {
+ printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
+ printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
+ smp_boot_data.cpu_phys_id[available_cpus] =
+ hard_smp_processor_id();
+ available_cpus = 1; /* We've got at least one of these, no? */
+ }
+ smp_boot_data.cpu_count = available_cpus;
+#endif
+ /* Make boot-up look pretty */
+ printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
+ total_cpus);
+
return 0;
}
-
-
int __init acpi_boot_init(void)
{
@@ -769,18 +781,8 @@ int __init acpi_boot_init(void)
if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
printk(KERN_ERR PREFIX "Can't find FADT\n");
+#ifdef CONFIG_ACPI_NUMA
#ifdef CONFIG_SMP
- if (available_cpus == 0) {
- printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
- printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
- smp_boot_data.cpu_phys_id[available_cpus] =
- hard_smp_processor_id();
- available_cpus = 1; /* We've got at least one of these, no? */
- }
- smp_boot_data.cpu_count = available_cpus;
-
- smp_build_cpu_map();
-# ifdef CONFIG_ACPI_NUMA
if (srat_num_cpus == 0) {
int cpu, i = 1;
for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
@@ -789,14 +791,9 @@ int __init acpi_boot_init(void)
node_cpuid[i++].phys_id =
smp_boot_data.cpu_phys_id[cpu];
}
-# endif
#endif
-#ifdef CONFIG_ACPI_NUMA
build_cpu_to_node_map();
#endif
- /* Make boot-up look pretty */
- printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
- total_cpus);
return 0;
}
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index 6631a9dfafdc..b942f4032d7a 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -239,32 +239,29 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
#ifdef CONFIG_SYSCTL
static ctl_table kdump_ctl_table[] = {
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "kdump_on_init",
.data = &kdump_on_init,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "kdump_on_fatal_mca",
.data = &kdump_on_fatal_mca,
.maxlen = sizeof(int),
.mode = 0644,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
- { .ctl_name = 0 }
+ { }
};
static ctl_table sys_table[] = {
{
- .ctl_name = CTL_KERN,
.procname = "kernel",
.mode = 0555,
.child = kdump_ctl_table,
},
- { .ctl_name = 0 }
+ { }
};
#endif
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index d0e7d37017b4..d75b872ca4dc 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -1806,6 +1806,7 @@ sys_call_table:
data8 sys_preadv
data8 sys_pwritev // 1320
data8 sys_rt_tgsigqueueinfo
+ data8 sys_recvmmsg
.org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/ia64/kernel/esi.c b/arch/ia64/kernel/esi.c
index d5764a3d74af..b091111270cb 100644
--- a/arch/ia64/kernel/esi.c
+++ b/arch/ia64/kernel/esi.c
@@ -84,7 +84,7 @@ static int __init esi_init (void)
case ESI_DESC_ENTRY_POINT:
break;
default:
- printk(KERN_WARNING "Unkown table type %d found in "
+ printk(KERN_WARNING "Unknown table type %d found in "
"ESI table, ignoring rest of table\n", *p);
return -ENODEV;
}
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 696eff28a0c4..17a9fba38930 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1051,7 +1051,7 @@ END(ia64_delay_loop)
* intermediate precision so that we can produce a full 64-bit result.
*/
GLOBAL_ENTRY(ia64_native_sched_clock)
- addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
+ addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
;;
ldf8 f8=[r8]
@@ -1077,7 +1077,7 @@ sched_clock = ia64_native_sched_clock
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
GLOBAL_ENTRY(cycle_to_cputime)
alloc r16=ar.pfs,1,0,0,0
- addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
+ addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
;;
ldf8 f8=[r8]
;;
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 14d39e300627..461b99902bf6 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -30,7 +30,7 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic
#endif
#include <asm/processor.h>
-EXPORT_SYMBOL(per_cpu__cpu_info);
+EXPORT_SYMBOL(per_cpu__ia64_cpu_info);
#ifdef CONFIG_SMP
EXPORT_SYMBOL(per_cpu__local_per_cpu_offset);
#endif
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index dab4d393908c..95ac77aeae9b 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -793,12 +793,12 @@ iosapic_register_intr (unsigned int gsi,
goto unlock_iosapic_lock;
}
- spin_lock(&irq_desc[irq].lock);
+ raw_spin_lock(&irq_desc[irq].lock);
dest = get_target_cpu(gsi, irq);
dmode = choose_dmode();
err = register_intr(gsi, irq, dmode, polarity, trigger);
if (err < 0) {
- spin_unlock(&irq_desc[irq].lock);
+ raw_spin_unlock(&irq_desc[irq].lock);
irq = err;
goto unlock_iosapic_lock;
}
@@ -817,7 +817,7 @@ iosapic_register_intr (unsigned int gsi,
(polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
cpu_logical_id(dest), dest, irq_to_vector(irq));
- spin_unlock(&irq_desc[irq].lock);
+ raw_spin_unlock(&irq_desc[irq].lock);
unlock_iosapic_lock:
spin_unlock_irqrestore(&iosapic_lock, flags);
return irq;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 7d8951229e7c..94ee9d067cbd 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v)
}
if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
+ raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
action = irq_desc[i].action;
if (!action)
goto skip;
@@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v)
seq_putc(p, '\n');
skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
} else if (i == NR_IRQS)
seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
return 0;
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index dd9d7b54f1a1..d4093a173a3e 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -260,7 +260,6 @@ void __setup_vector_irq(int cpu)
}
#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
-#define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR
static enum vector_domain_type {
VECTOR_DOMAIN_NONE,
@@ -345,7 +344,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
desc = irq_desc + irq;
cfg = irq_cfg + irq;
- spin_lock(&desc->lock);
+ raw_spin_lock(&desc->lock);
if (!cfg->move_cleanup_count)
goto unlock;
@@ -358,7 +357,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
spin_unlock_irqrestore(&vector_lock, flags);
cfg->move_cleanup_count--;
unlock:
- spin_unlock(&desc->lock);
+ raw_spin_unlock(&desc->lock);
}
return IRQ_HANDLED;
}
@@ -659,11 +658,8 @@ init_IRQ (void)
register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
#ifdef CONFIG_SMP
#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
- if (vector_domain_type != VECTOR_DOMAIN_NONE) {
- BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR);
- IA64_FIRST_DEVICE_VECTOR++;
+ if (vector_domain_type != VECTOR_DOMAIN_NONE)
register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
- }
#endif
#endif
#ifdef CONFIG_PERFMON
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index d2877a7bfe2e..378b4833024f 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -887,6 +887,65 @@ ia64_mca_modify_comm(const struct task_struct *previous_current)
memcpy(current->comm, comm, sizeof(current->comm));
}
+static void
+finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos,
+ unsigned long *nat)
+{
+ const pal_min_state_area_t *ms = sos->pal_min_state;
+ const u64 *bank;
+
+ /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
+ * pmsa_{xip,xpsr,xfs}
+ */
+ if (ia64_psr(regs)->ic) {
+ regs->cr_iip = ms->pmsa_iip;
+ regs->cr_ipsr = ms->pmsa_ipsr;
+ regs->cr_ifs = ms->pmsa_ifs;
+ } else {
+ regs->cr_iip = ms->pmsa_xip;
+ regs->cr_ipsr = ms->pmsa_xpsr;
+ regs->cr_ifs = ms->pmsa_xfs;
+
+ sos->iip = ms->pmsa_iip;
+ sos->ipsr = ms->pmsa_ipsr;
+ sos->ifs = ms->pmsa_ifs;
+ }
+ regs->pr = ms->pmsa_pr;
+ regs->b0 = ms->pmsa_br0;
+ regs->ar_rsc = ms->pmsa_rsc;
+ copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &regs->r1, nat);
+ copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &regs->r2, nat);
+ copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &regs->r3, nat);
+ copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &regs->r8, nat);
+ copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &regs->r9, nat);
+ copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &regs->r10, nat);
+ copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &regs->r11, nat);
+ copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &regs->r12, nat);
+ copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &regs->r13, nat);
+ copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &regs->r14, nat);
+ copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &regs->r15, nat);
+ if (ia64_psr(regs)->bn)
+ bank = ms->pmsa_bank1_gr;
+ else
+ bank = ms->pmsa_bank0_gr;
+ copy_reg(&bank[16-16], ms->pmsa_nat_bits, &regs->r16, nat);
+ copy_reg(&bank[17-16], ms->pmsa_nat_bits, &regs->r17, nat);
+ copy_reg(&bank[18-16], ms->pmsa_nat_bits, &regs->r18, nat);
+ copy_reg(&bank[19-16], ms->pmsa_nat_bits, &regs->r19, nat);
+ copy_reg(&bank[20-16], ms->pmsa_nat_bits, &regs->r20, nat);
+ copy_reg(&bank[21-16], ms->pmsa_nat_bits, &regs->r21, nat);
+ copy_reg(&bank[22-16], ms->pmsa_nat_bits, &regs->r22, nat);
+ copy_reg(&bank[23-16], ms->pmsa_nat_bits, &regs->r23, nat);
+ copy_reg(&bank[24-16], ms->pmsa_nat_bits, &regs->r24, nat);
+ copy_reg(&bank[25-16], ms->pmsa_nat_bits, &regs->r25, nat);
+ copy_reg(&bank[26-16], ms->pmsa_nat_bits, &regs->r26, nat);
+ copy_reg(&bank[27-16], ms->pmsa_nat_bits, &regs->r27, nat);
+ copy_reg(&bank[28-16], ms->pmsa_nat_bits, &regs->r28, nat);
+ copy_reg(&bank[29-16], ms->pmsa_nat_bits, &regs->r29, nat);
+ copy_reg(&bank[30-16], ms->pmsa_nat_bits, &regs->r30, nat);
+ copy_reg(&bank[31-16], ms->pmsa_nat_bits, &regs->r31, nat);
+}
+
/* On entry to this routine, we are running on the per cpu stack, see
* mca_asm.h. The original stack has not been touched by this event. Some of
* the original stack's registers will be in the RBS on this stack. This stack
@@ -921,7 +980,6 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
u64 r12 = ms->pmsa_gr[12-1], r13 = ms->pmsa_gr[13-1];
u64 ar_bspstore = regs->ar_bspstore;
u64 ar_bsp = regs->ar_bspstore + (loadrs >> 16);
- const u64 *bank;
const char *msg;
int cpu = smp_processor_id();
@@ -1024,54 +1082,9 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
p = (char *)r12 - sizeof(*regs);
old_regs = (struct pt_regs *)p;
memcpy(old_regs, regs, sizeof(*regs));
- /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
- * pmsa_{xip,xpsr,xfs}
- */
- if (ia64_psr(regs)->ic) {
- old_regs->cr_iip = ms->pmsa_iip;
- old_regs->cr_ipsr = ms->pmsa_ipsr;
- old_regs->cr_ifs = ms->pmsa_ifs;
- } else {
- old_regs->cr_iip = ms->pmsa_xip;
- old_regs->cr_ipsr = ms->pmsa_xpsr;
- old_regs->cr_ifs = ms->pmsa_xfs;
- }
- old_regs->pr = ms->pmsa_pr;
- old_regs->b0 = ms->pmsa_br0;
old_regs->loadrs = loadrs;
- old_regs->ar_rsc = ms->pmsa_rsc;
old_unat = old_regs->ar_unat;
- copy_reg(&ms->pmsa_gr[1-1], ms->pmsa_nat_bits, &old_regs->r1, &old_unat);
- copy_reg(&ms->pmsa_gr[2-1], ms->pmsa_nat_bits, &old_regs->r2, &old_unat);
- copy_reg(&ms->pmsa_gr[3-1], ms->pmsa_nat_bits, &old_regs->r3, &old_unat);
- copy_reg(&ms->pmsa_gr[8-1], ms->pmsa_nat_bits, &old_regs->r8, &old_unat);
- copy_reg(&ms->pmsa_gr[9-1], ms->pmsa_nat_bits, &old_regs->r9, &old_unat);
- copy_reg(&ms->pmsa_gr[10-1], ms->pmsa_nat_bits, &old_regs->r10, &old_unat);
- copy_reg(&ms->pmsa_gr[11-1], ms->pmsa_nat_bits, &old_regs->r11, &old_unat);
- copy_reg(&ms->pmsa_gr[12-1], ms->pmsa_nat_bits, &old_regs->r12, &old_unat);
- copy_reg(&ms->pmsa_gr[13-1], ms->pmsa_nat_bits, &old_regs->r13, &old_unat);
- copy_reg(&ms->pmsa_gr[14-1], ms->pmsa_nat_bits, &old_regs->r14, &old_unat);
- copy_reg(&ms->pmsa_gr[15-1], ms->pmsa_nat_bits, &old_regs->r15, &old_unat);
- if (ia64_psr(old_regs)->bn)
- bank = ms->pmsa_bank1_gr;
- else
- bank = ms->pmsa_bank0_gr;
- copy_reg(&bank[16-16], ms->pmsa_nat_bits, &old_regs->r16, &old_unat);
- copy_reg(&bank[17-16], ms->pmsa_nat_bits, &old_regs->r17, &old_unat);
- copy_reg(&bank[18-16], ms->pmsa_nat_bits, &old_regs->r18, &old_unat);
- copy_reg(&bank[19-16], ms->pmsa_nat_bits, &old_regs->r19, &old_unat);
- copy_reg(&bank[20-16], ms->pmsa_nat_bits, &old_regs->r20, &old_unat);
- copy_reg(&bank[21-16], ms->pmsa_nat_bits, &old_regs->r21, &old_unat);
- copy_reg(&bank[22-16], ms->pmsa_nat_bits, &old_regs->r22, &old_unat);
- copy_reg(&bank[23-16], ms->pmsa_nat_bits, &old_regs->r23, &old_unat);
- copy_reg(&bank[24-16], ms->pmsa_nat_bits, &old_regs->r24, &old_unat);
- copy_reg(&bank[25-16], ms->pmsa_nat_bits, &old_regs->r25, &old_unat);
- copy_reg(&bank[26-16], ms->pmsa_nat_bits, &old_regs->r26, &old_unat);
- copy_reg(&bank[27-16], ms->pmsa_nat_bits, &old_regs->r27, &old_unat);
- copy_reg(&bank[28-16], ms->pmsa_nat_bits, &old_regs->r28, &old_unat);
- copy_reg(&bank[29-16], ms->pmsa_nat_bits, &old_regs->r29, &old_unat);
- copy_reg(&bank[30-16], ms->pmsa_nat_bits, &old_regs->r30, &old_unat);
- copy_reg(&bank[31-16], ms->pmsa_nat_bits, &old_regs->r31, &old_unat);
+ finish_pt_regs(old_regs, sos, &old_unat);
/* Next stack a struct switch_stack. mca_asm.S built a partial
* switch_stack, copy it and fill in the blanks using pt_regs and
@@ -1141,6 +1154,8 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
no_mod:
mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
smp_processor_id(), type, msg);
+ old_unat = regs->ar_unat;
+ finish_pt_regs(regs, sos, &old_unat);
return previous_current;
}
@@ -1210,9 +1225,12 @@ static void mca_insert_tr(u64 iord)
unsigned long psr;
int cpu = smp_processor_id();
+ if (!ia64_idtrs[cpu])
+ return;
+
psr = ia64_clear_ic();
for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
- p = &__per_cpu_idtrs[cpu][iord-1][i];
+ p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX;
if (p->pte & 0x1) {
old_rr = ia64_get_rr(p->ifa);
if (old_rr != p->rr) {
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 7461d2573d41..d5bdf9de36b6 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -59,7 +59,7 @@
ia64_do_tlb_purge:
#define O(member) IA64_CPUINFO_##member##_OFFSET
- GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
+ GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
;;
addl r17=O(PTCE_STRIDE),r2
addl r2=O(PTCE_BASE),r2
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 285aae8431c6..53292abf846c 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -41,7 +41,7 @@ struct dma_map_ops swiotlb_dma_ops = {
void __init swiotlb_dma_init(void)
{
dma_ops = &swiotlb_dma_ops;
- swiotlb_init();
+ swiotlb_init(1);
}
void __init pci_swiotlb_init(void)
@@ -51,7 +51,7 @@ void __init pci_swiotlb_init(void)
swiotlb = 1;
printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
machvec_init("dig");
- swiotlb_init();
+ swiotlb_init(1);
dma_ops = &swiotlb_dma_ops;
#else
panic("Unable to find Intel IOMMU");
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index f1782705b1f7..6bcbe215b9a4 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -522,42 +522,37 @@ EXPORT_SYMBOL(pfm_sysctl);
static ctl_table pfm_ctl_table[]={
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "debug",
.data = &pfm_sysctl.debug,
.maxlen = sizeof(int),
.mode = 0666,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "debug_ovfl",
.data = &pfm_sysctl.debug_ovfl,
.maxlen = sizeof(int),
.mode = 0666,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "fastctxsw",
.data = &pfm_sysctl.fastctxsw,
.maxlen = sizeof(int),
.mode = 0600,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "expert_mode",
.data = &pfm_sysctl.expert_mode,
.maxlen = sizeof(int),
.mode = 0600,
- .proc_handler = &proc_dointvec,
+ .proc_handler = proc_dointvec,
},
{}
};
static ctl_table pfm_sysctl_dir[] = {
{
- .ctl_name = CTL_UNNUMBERED,
.procname = "perfmon",
.mode = 0555,
.child = pfm_ctl_table,
@@ -566,7 +561,6 @@ static ctl_table pfm_sysctl_dir[] = {
};
static ctl_table pfm_sysctl_root[] = {
{
- .ctl_name = CTL_KERN,
.procname = "kernel",
.mode = 0555,
.child = pfm_sysctl_dir,
@@ -2206,7 +2200,7 @@ pfm_alloc_file(pfm_context_t *ctx)
{
struct file *file;
struct inode *inode;
- struct dentry *dentry;
+ struct path path;
char name[32];
struct qstr this;
@@ -2231,18 +2225,19 @@ pfm_alloc_file(pfm_context_t *ctx)
/*
* allocate a new dcache entry
*/
- dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
- if (!dentry) {
+ path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
+ if (!path.dentry) {
iput(inode);
return ERR_PTR(-ENOMEM);
}
+ path.mnt = mntget(pfmfs_mnt);
- dentry->d_op = &pfmfs_dentry_operations;
- d_add(dentry, inode);
+ path.dentry->d_op = &pfmfs_dentry_operations;
+ d_add(path.dentry, inode);
- file = alloc_file(pfmfs_mnt, dentry, FMODE_READ, &pfm_file_ops);
+ file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
if (!file) {
- dput(dentry);
+ path_put(&path);
return ERR_PTR(-ENFILE);
}
@@ -2298,7 +2293,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
* if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
* return -ENOMEM;
*/
- if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur)
+ if (size > task_rlimit(task, RLIMIT_MEMLOCK))
return -ENOMEM;
/*
@@ -3523,7 +3518,7 @@ pfm_use_debug_registers(struct task_struct *task)
* IA64_THREAD_DBG_VALID set. This indicates a task which was
* able to use the debug registers for debugging purposes via
* ptrace(). Therefore we know it was not using them for
- * perfmormance monitoring, so we only decrement the number
+ * performance monitoring, so we only decrement the number
* of "ptraced" debug register users to keep the count up to date
*/
int
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S
index 32f6fc131fbe..c370e02f0061 100644
--- a/arch/ia64/kernel/relocate_kernel.S
+++ b/arch/ia64/kernel/relocate_kernel.S
@@ -61,7 +61,7 @@ GLOBAL_ENTRY(relocate_new_kernel)
// purge all TC entries
#define O(member) IA64_CPUINFO_##member##_OFFSET
- GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2
+ GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
;;
addl r17=O(PTCE_STRIDE),r2
addl r2=O(PTCE_BASE),r2
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 1de86c96801d..a1ea87919777 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -74,7 +74,7 @@ unsigned long __per_cpu_offset[NR_CPUS];
EXPORT_SYMBOL(__per_cpu_offset);
#endif
-DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info);
+DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
unsigned long ia64_cycles_per_usec;
struct ia64_boot_param *ia64_boot_param;
@@ -566,19 +566,18 @@ setup_arch (char **cmdline_p)
early_acpi_boot_init();
# ifdef CONFIG_ACPI_NUMA
acpi_numa_init();
-#ifdef CONFIG_ACPI_HOTPLUG_CPU
+# ifdef CONFIG_ACPI_HOTPLUG_CPU
prefill_possible_map();
-#endif
+# endif
per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
32 : cpus_weight(early_cpu_possible_map)),
additional_cpus > 0 ? additional_cpus : 0);
# endif
-#else
-# ifdef CONFIG_SMP
- smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
-# endif
#endif /* CONFIG_APCI_BOOT */
+#ifdef CONFIG_SMP
+ smp_build_cpu_map();
+#endif
find_memory();
/* process SAL system table: */
@@ -856,18 +855,6 @@ identify_cpu (struct cpuinfo_ia64 *c)
}
/*
- * In UP configuration, setup_per_cpu_areas() is defined in
- * include/linux/percpu.h
- */
-#ifdef CONFIG_SMP
-void __init
-setup_per_cpu_areas (void)
-{
- /* start_kernel() requires this... */
-}
-#endif
-
-/*
* Do the following calculations:
*
* 1. the max. cache line size.
@@ -980,7 +967,7 @@ cpu_init (void)
* depends on the data returned by identify_cpu(). We break the dependency by
* accessing cpu_data() through the canonical per-CPU address.
*/
- cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start);
+ cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
identify_cpu(cpu_info);
#ifdef CONFIG_MCKINLEY
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index 92ed83f34036..609d50056a6c 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -100,51 +100,7 @@ sys_getpagesize (void)
asmlinkage unsigned long
ia64_brk (unsigned long brk)
{
- unsigned long rlim, retval, newbrk, oldbrk;
- struct mm_struct *mm = current->mm;
-
- /*
- * Most of this replicates the code in sys_brk() except for an additional safety
- * check and the clearing of r8. However, we can't call sys_brk() because we need
- * to acquire the mmap_sem before we can do the test...
- */
- down_write(&mm->mmap_sem);
-
- if (brk < mm->end_code)
- goto out;
- newbrk = PAGE_ALIGN(brk);
- oldbrk = PAGE_ALIGN(mm->brk);
- if (oldbrk == newbrk)
- goto set_brk;
-
- /* Always allow shrinking brk. */
- if (brk <= mm->brk) {
- if (!do_munmap(mm, newbrk, oldbrk-newbrk))
- goto set_brk;
- goto out;
- }
-
- /* Check against unimplemented/unmapped addresses: */
- if ((newbrk - oldbrk) > RGN_MAP_LIMIT || REGION_OFFSET(newbrk) > RGN_MAP_LIMIT)
- goto out;
-
- /* Check against rlimit.. */
- rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
- if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
- goto out;
-
- /* Check against existing mmap mappings. */
- if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
- goto out;
-
- /* Ok, looks good - let it rip. */
- if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
- goto out;
-set_brk:
- mm->brk = brk;
-out:
- retval = mm->brk;
- up_write(&mm->mmap_sem);
+ unsigned long retval = sys_brk(brk);
force_successful_syscall_return();
return retval;
}
@@ -185,39 +141,6 @@ int ia64_mmap_check(unsigned long addr, unsigned long len,
return 0;
}
-static inline unsigned long
-do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff)
-{
- struct file *file = NULL;
-
- flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
- if (!(flags & MAP_ANONYMOUS)) {
- file = fget(fd);
- if (!file)
- return -EBADF;
-
- if (!file->f_op || !file->f_op->mmap) {
- addr = -ENODEV;
- goto out;
- }
- }
-
- /* Careful about overflows.. */
- len = PAGE_ALIGN(len);
- if (!len || len > TASK_SIZE) {
- addr = -EINVAL;
- goto out;
- }
-
- down_write(&current->mm->mmap_sem);
- addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
- up_write(&current->mm->mmap_sem);
-
-out: if (file)
- fput(file);
- return addr;
-}
-
/*
* mmap2() is like mmap() except that the offset is expressed in units
* of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces
@@ -226,7 +149,7 @@ out: if (file)
asmlinkage unsigned long
sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff)
{
- addr = do_mmap2(addr, len, prot, flags, fd, pgoff);
+ addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
if (!IS_ERR((void *) addr))
force_successful_syscall_return();
return addr;
@@ -238,7 +161,7 @@ sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, lo
if (offset_in_page(off) != 0)
return -EINVAL;
- addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
+ addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
if (!IS_ERR((void *) addr))
force_successful_syscall_return();
return addr;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 4990495d7531..a35c661e5e89 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -473,7 +473,7 @@ void update_vsyscall_tz(void)
{
}
-void update_vsyscall(struct timespec *wall, struct clocksource *c)
+void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult)
{
unsigned long flags;
@@ -481,7 +481,7 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c)
/* copy fsyscall clock data */
fsyscall_gtod_data.clk_mask = c->mask;
- fsyscall_gtod_data.clk_mult = c->mult;
+ fsyscall_gtod_data.clk_mult = mult;
fsyscall_gtod_data.clk_shift = c->shift;
fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio;
fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index 6db08599ebbc..776dd40397e2 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -60,7 +60,6 @@ dump (const char *str, void *vp, size_t len)
*/
int no_unaligned_warning;
int unaligned_dump_stack;
-static int noprint_warning;
/*
* For M-unit:
@@ -1357,9 +1356,8 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
/* watch for command names containing %s */
printk(KERN_WARNING "%s", buf);
} else {
- if (no_unaligned_warning && !noprint_warning) {
- noprint_warning = 1;
- printk(KERN_WARNING "%s(%d) encountered an "
+ if (no_unaligned_warning) {
+ printk_once(KERN_WARNING "%s(%d) encountered an "
"unaligned exception which required\n"
"kernel assistance, which degrades "
"the performance of the application.\n"
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 0a0c77b2c988..1295ba327f6f 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -166,6 +166,12 @@ SECTIONS
}
#endif
+#ifdef CONFIG_SMP
+ . = ALIGN(PERCPU_PAGE_SIZE);
+ __cpu0_per_cpu = .;
+ . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
+#endif
+
. = ALIGN(PAGE_SIZE);
__init_end = .;
@@ -198,11 +204,6 @@ SECTIONS
data : { } :data
.data : AT(ADDR(.data) - LOAD_OFFSET)
{
-#ifdef CONFIG_SMP
- . = ALIGN(PERCPU_PAGE_SIZE);
- __cpu0_per_cpu = .;
- . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
-#endif
INIT_TASK_DATA(PAGE_SIZE)
CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
READ_MOSTLY_DATA(SMP_CACHE_BYTES)
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile
index 0bb99b732908..1089b3e918ac 100644
--- a/arch/ia64/kvm/Makefile
+++ b/arch/ia64/kvm/Makefile
@@ -49,7 +49,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
- coalesced_mmio.o irq_comm.o)
+ coalesced_mmio.o irq_comm.o assigned-dev.o)
ifeq ($(CONFIG_IOMMU_API),y)
common-objs += $(addprefix ../../../virt/kvm/, iommu.o)
diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c
index 0c3564a7a033..9324c875caf5 100644
--- a/arch/ia64/kvm/asm-offsets.c
+++ b/arch/ia64/kvm/asm-offsets.c
@@ -22,7 +22,6 @@
*
*/
-#include <linux/autoconf.h>
#include <linux/kvm_host.h>
#include <linux/kbuild.h>
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 0ad09f05efa9..5fdeec5fddcf 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
static DEFINE_SPINLOCK(vp_lock);
-void kvm_arch_hardware_enable(void *garbage)
+int kvm_arch_hardware_enable(void *garbage)
{
long status;
long tmp_base;
@@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage)
slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
local_irq_restore(saved_psr);
if (slot < 0)
- return;
+ return -EINVAL;
spin_lock(&vp_lock);
status = ia64_pal_vp_init_env(kvm_vsa_base ?
@@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage)
__pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
if (status != 0) {
printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
- return ;
+ return -EINVAL;
}
if (!kvm_vsa_base) {
@@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage)
}
spin_unlock(&vp_lock);
ia64_ptr_entry(0x3, slot);
+
+ return 0;
}
void kvm_arch_hardware_disable(void *garbage)
@@ -851,8 +853,7 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
r = 0;
switch (chip->chip_id) {
case KVM_IRQCHIP_IOAPIC:
- memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm),
- sizeof(struct kvm_ioapic_state));
+ r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
break;
default:
r = -EINVAL;
@@ -868,9 +869,7 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
r = 0;
switch (chip->chip_id) {
case KVM_IRQCHIP_IOAPIC:
- memcpy(ioapic_irqchip(kvm),
- &chip->chip.ioapic,
- sizeof(struct kvm_ioapic_state));
+ r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
break;
default:
r = -EINVAL;
@@ -944,7 +943,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
{
struct kvm *kvm = filp->private_data;
void __user *argp = (void __user *)arg;
- int r = -EINVAL;
+ int r = -ENOTTY;
switch (ioctl) {
case KVM_SET_MEMORY_REGION: {
@@ -985,10 +984,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
goto out;
if (irqchip_in_kernel(kvm)) {
__s32 status;
- mutex_lock(&kvm->irq_lock);
status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
irq_event.irq, irq_event.level);
- mutex_unlock(&kvm->irq_lock);
if (ioctl == KVM_IRQ_LINE_STATUS) {
irq_event.status = status;
if (copy_to_user(argp, &irq_event,
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
index 360724d3ae69..988911b4cc7a 100644
--- a/arch/ia64/kvm/vcpu.h
+++ b/arch/ia64/kvm/vcpu.h
@@ -388,6 +388,9 @@ static inline u64 __gpfn_is_io(u64 gpfn)
#define _vmm_raw_spin_lock(x) do {}while(0)
#define _vmm_raw_spin_unlock(x) do {}while(0)
#else
+typedef struct {
+ volatile unsigned int lock;
+} vmm_spinlock_t;
#define _vmm_raw_spin_lock(x) \
do { \
__u32 *ia64_spinlock_ptr = (__u32 *) (x); \
@@ -405,12 +408,12 @@ static inline u64 __gpfn_is_io(u64 gpfn)
#define _vmm_raw_spin_unlock(x) \
do { barrier(); \
- ((spinlock_t *)x)->raw_lock.lock = 0; } \
+ ((vmm_spinlock_t *)x)->lock = 0; } \
while (0)
#endif
-void vmm_spin_lock(spinlock_t *lock);
-void vmm_spin_unlock(spinlock_t *lock);
+void vmm_spin_lock(vmm_spinlock_t *lock);
+void vmm_spin_unlock(vmm_spinlock_t *lock);
enum {
I_TLB = 1,
D_TLB = 2
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
index f4b4c899bb6c..7a62f75778c5 100644
--- a/arch/ia64/kvm/vmm.c
+++ b/arch/ia64/kvm/vmm.c
@@ -60,12 +60,12 @@ static void __exit kvm_vmm_exit(void)
return ;
}
-void vmm_spin_lock(spinlock_t *lock)
+void vmm_spin_lock(vmm_spinlock_t *lock)
{
_vmm_raw_spin_lock(lock);
}
-void vmm_spin_unlock(spinlock_t *lock)
+void vmm_spin_unlock(vmm_spinlock_t *lock)
{
_vmm_raw_spin_unlock(lock);
}
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 20b3852f7a6e..4332f7ee5203 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -182,7 +182,7 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
{
u64 i, dirty_pages = 1;
u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
- spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
+ vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 2f724d2bf299..54bf54059811 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -154,38 +154,99 @@ static void *cpu_data;
void * __cpuinit
per_cpu_init (void)
{
- int cpu;
- static int first_time=1;
+ static bool first_time = true;
+ void *cpu0_data = __cpu0_per_cpu;
+ unsigned int cpu;
+
+ if (!first_time)
+ goto skip;
+ first_time = false;
/*
- * get_free_pages() cannot be used before cpu_init() done. BSP
- * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
- * get_zeroed_page().
+ * get_free_pages() cannot be used before cpu_init() done.
+ * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
+ * to avoid that AP calls get_zeroed_page().
*/
- if (first_time) {
- void *cpu0_data = __cpu0_per_cpu;
+ for_each_possible_cpu(cpu) {
+ void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
- first_time=0;
+ memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
+ __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
+ per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
- __per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start;
- per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0];
+ /*
+ * percpu area for cpu0 is moved from the __init area
+ * which is setup by head.S and used till this point.
+ * Update ar.k3. This move is ensures that percpu
+ * area for cpu0 is on the correct node and its
+ * virtual address isn't insanely far from other
+ * percpu areas which is important for congruent
+ * percpu allocator.
+ */
+ if (cpu == 0)
+ ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
+ (unsigned long)__per_cpu_start);
- for (cpu = 1; cpu < NR_CPUS; cpu++) {
- memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
- __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
- cpu_data += PERCPU_PAGE_SIZE;
- per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
- }
+ cpu_data += PERCPU_PAGE_SIZE;
}
+skip:
return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
}
static inline void
alloc_per_cpu_data(void)
{
- cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1,
+ cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
}
+
+/**
+ * setup_per_cpu_areas - setup percpu areas
+ *
+ * Arch code has already allocated and initialized percpu areas. All
+ * this function has to do is to teach the determined layout to the
+ * dynamic percpu allocator, which happens to be more complex than
+ * creating whole new ones using helpers.
+ */
+void __init
+setup_per_cpu_areas(void)
+{
+ struct pcpu_alloc_info *ai;
+ struct pcpu_group_info *gi;
+ unsigned int cpu;
+ ssize_t static_size, reserved_size, dyn_size;
+ int rc;
+
+ ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
+ if (!ai)
+ panic("failed to allocate pcpu_alloc_info");
+ gi = &ai->groups[0];
+
+ /* units are assigned consecutively to possible cpus */
+ for_each_possible_cpu(cpu)
+ gi->cpu_map[gi->nr_units++] = cpu;
+
+ /* set parameters */
+ static_size = __per_cpu_end - __per_cpu_start;
+ reserved_size = PERCPU_MODULE_RESERVE;
+ dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
+ if (dyn_size < 0)
+ panic("percpu area overflow static=%zd reserved=%zd\n",
+ static_size, reserved_size);
+
+ ai->static_size = static_size;
+ ai->reserved_size = reserved_size;
+ ai->dyn_size = dyn_size;
+ ai->unit_size = PERCPU_PAGE_SIZE;
+ ai->atom_size = PAGE_SIZE;
+ ai->alloc_size = PERCPU_PAGE_SIZE;
+
+ rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
+ if (rc)
+ panic("failed to setup percpu area (err=%d)", rc);
+
+ pcpu_free_alloc_info(ai);
+}
#else
#define alloc_per_cpu_data() do { } while (0)
#endif /* CONFIG_SMP */
@@ -270,8 +331,8 @@ paging_init (void)
map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
sizeof(struct page));
- vmalloc_end -= map_size;
- vmem_map = (struct page *) vmalloc_end;
+ VMALLOC_END -= map_size;
+ vmem_map = (struct page *) VMALLOC_END;
efi_memmap_walk(create_mem_map_page_table, NULL);
/*
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index d85ba98d9008..19c4b2195dce 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -143,22 +143,120 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
int cpu;
for_each_possible_early_cpu(cpu) {
- if (cpu == 0) {
- void *cpu0_data = __cpu0_per_cpu;
- __per_cpu_offset[cpu] = (char*)cpu0_data -
- __per_cpu_start;
- } else if (node == node_cpuid[cpu].nid) {
- memcpy(__va(cpu_data), __phys_per_cpu_start,
- __per_cpu_end - __per_cpu_start);
- __per_cpu_offset[cpu] = (char*)__va(cpu_data) -
- __per_cpu_start;
- cpu_data += PERCPU_PAGE_SIZE;
- }
+ void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
+
+ if (node != node_cpuid[cpu].nid)
+ continue;
+
+ memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
+ __per_cpu_offset[cpu] = (char *)__va(cpu_data) -
+ __per_cpu_start;
+
+ /*
+ * percpu area for cpu0 is moved from the __init area
+ * which is setup by head.S and used till this point.
+ * Update ar.k3. This move is ensures that percpu
+ * area for cpu0 is on the correct node and its
+ * virtual address isn't insanely far from other
+ * percpu areas which is important for congruent
+ * percpu allocator.
+ */
+ if (cpu == 0)
+ ia64_set_kr(IA64_KR_PER_CPU_DATA,
+ (unsigned long)cpu_data -
+ (unsigned long)__per_cpu_start);
+
+ cpu_data += PERCPU_PAGE_SIZE;
}
#endif
return cpu_data;
}
+#ifdef CONFIG_SMP
+/**
+ * setup_per_cpu_areas - setup percpu areas
+ *
+ * Arch code has already allocated and initialized percpu areas. All
+ * this function has to do is to teach the determined layout to the
+ * dynamic percpu allocator, which happens to be more complex than
+ * creating whole new ones using helpers.
+ */
+void __init setup_per_cpu_areas(void)
+{
+ struct pcpu_alloc_info *ai;
+ struct pcpu_group_info *uninitialized_var(gi);
+ unsigned int *cpu_map;
+ void *base;
+ unsigned long base_offset;
+ unsigned int cpu;
+ ssize_t static_size, reserved_size, dyn_size;
+ int node, prev_node, unit, nr_units, rc;
+
+ ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
+ if (!ai)
+ panic("failed to allocate pcpu_alloc_info");
+ cpu_map = ai->groups[0].cpu_map;
+
+ /* determine base */
+ base = (void *)ULONG_MAX;
+ for_each_possible_cpu(cpu)
+ base = min(base,
+ (void *)(__per_cpu_offset[cpu] + __per_cpu_start));
+ base_offset = (void *)__per_cpu_start - base;
+
+ /* build cpu_map, units are grouped by node */
+ unit = 0;
+ for_each_node(node)
+ for_each_possible_cpu(cpu)
+ if (node == node_cpuid[cpu].nid)
+ cpu_map[unit++] = cpu;
+ nr_units = unit;
+
+ /* set basic parameters */
+ static_size = __per_cpu_end - __per_cpu_start;
+ reserved_size = PERCPU_MODULE_RESERVE;
+ dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
+ if (dyn_size < 0)
+ panic("percpu area overflow static=%zd reserved=%zd\n",
+ static_size, reserved_size);
+
+ ai->static_size = static_size;
+ ai->reserved_size = reserved_size;
+ ai->dyn_size = dyn_size;
+ ai->unit_size = PERCPU_PAGE_SIZE;
+ ai->atom_size = PAGE_SIZE;
+ ai->alloc_size = PERCPU_PAGE_SIZE;
+
+ /*
+ * CPUs are put into groups according to node. Walk cpu_map
+ * and create new groups at node boundaries.
+ */
+ prev_node = -1;
+ ai->nr_groups = 0;
+ for (unit = 0; unit < nr_units; unit++) {
+ cpu = cpu_map[unit];
+ node = node_cpuid[cpu].nid;
+
+ if (node == prev_node) {
+ gi->nr_units++;
+ continue;
+ }
+ prev_node = node;
+
+ gi = &ai->groups[ai->nr_groups++];
+ gi->nr_units = 1;
+ gi->base_offset = __per_cpu_offset[cpu] + base_offset;
+ gi->cpu_map = &cpu_map[unit];
+ }
+
+ rc = pcpu_setup_first_chunk(ai, base);
+ if (rc)
+ panic("failed to setup percpu area (err=%d)", rc);
+
+ pcpu_free_alloc_info(ai);
+}
+#endif
+
/**
* fill_pernode - initialize pernode data.
* @node: the node id.
@@ -352,7 +450,8 @@ static void __init initialize_pernode_data(void)
/* Set the node_data pointer for each per-cpu struct */
for_each_possible_early_cpu(cpu) {
node = node_cpuid[cpu].nid;
- per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
+ per_cpu(ia64_cpu_info, cpu).node_data =
+ mem_data[node].node_data;
}
#else
{
@@ -360,7 +459,7 @@ static void __init initialize_pernode_data(void)
cpu = 0;
node = node_cpuid[cpu].nid;
cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
- ((char *)&per_cpu__cpu_info - __per_cpu_start));
+ ((char *)&per_cpu__ia64_cpu_info - __per_cpu_start));
cpu0_cpu_info->node_data = mem_data[node].node_data;
}
#endif /* CONFIG_SMP */
@@ -666,9 +765,9 @@ void __init paging_init(void)
sparse_init();
#ifdef CONFIG_VIRTUAL_MEM_MAP
- vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+ VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
sizeof(struct page));
- vmem_map = (struct page *) vmalloc_end;
+ vmem_map = (struct page *) VMALLOC_END;
efi_memmap_walk(create_mem_map_page_table, NULL);
printk("Virtual mem_map starts at 0x%p\n", vmem_map);
#endif
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1857766a63c1..7c0d4814a68d 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -44,8 +44,8 @@ extern void ia64_tlb_init (void);
unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
#ifdef CONFIG_VIRTUAL_MEM_MAP
-unsigned long vmalloc_end = VMALLOC_END_INIT;
-EXPORT_SYMBOL(vmalloc_end);
+unsigned long VMALLOC_END = VMALLOC_END_INIT;
+EXPORT_SYMBOL(VMALLOC_END);
struct page *vmem_map;
EXPORT_SYMBOL(vmem_map);
#endif
@@ -91,7 +91,7 @@ dma_mark_clean(void *addr, size_t size)
inline void
ia64_set_rbs_bot (void)
{
- unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16;
+ unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
if (stack_size > MAX_USER_STACK_SIZE)
stack_size = MAX_USER_STACK_SIZE;
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 2a140627dfd6..3dccdd8eb275 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -22,6 +22,12 @@ __ioremap (unsigned long phys_addr)
}
void __iomem *
+early_ioremap (unsigned long phys_addr, unsigned long size)
+{
+ return __ioremap(phys_addr);
+}
+
+void __iomem *
ioremap (unsigned long phys_addr, unsigned long size)
{
void __iomem *addr;
@@ -102,6 +108,11 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size)
EXPORT_SYMBOL(ioremap_nocache);
void
+early_iounmap (volatile void __iomem *addr, unsigned long size)
+{
+}
+
+void
iounmap (volatile void __iomem *addr)
{
if (REGION_NUMBER(addr) == RGN_GATE)
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index f426dc78d959..f3de9d7a98b4 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -48,7 +48,7 @@ DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/
DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
-struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX];
+struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
/*
* Initializes the ia64_ctx.bitmap array based on max_ctx+1.
@@ -100,24 +100,36 @@ wrap_mmu_context (struct mm_struct *mm)
* this primitive it can be moved up to a spinaphore.h header.
*/
struct spinaphore {
- atomic_t cur;
+ unsigned long ticket;
+ unsigned long serve;
};
static inline void spinaphore_init(struct spinaphore *ss, int val)
{
- atomic_set(&ss->cur, val);
+ ss->ticket = 0;
+ ss->serve = val;
}
static inline void down_spin(struct spinaphore *ss)
{
- while (unlikely(!atomic_add_unless(&ss->cur, -1, 0)))
- while (atomic_read(&ss->cur) == 0)
- cpu_relax();
+ unsigned long t = ia64_fetchadd(1, &ss->ticket, acq), serve;
+
+ if (time_before(t, ss->serve))
+ return;
+
+ ia64_invala();
+
+ for (;;) {
+ asm volatile ("ld4.c.nc %0=[%1]" : "=r"(serve) : "r"(&ss->serve) : "memory");
+ if (time_before(t, serve))
+ return;
+ cpu_relax();
+ }
}
static inline void up_spin(struct spinaphore *ss)
{
- atomic_add(1, &ss->cur);
+ ia64_fetchadd(1, &ss->serve, rel);
}
static struct spinaphore ptcg_sem;
@@ -417,10 +429,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
struct ia64_tr_entry *p;
int cpu = smp_processor_id();
+ if (!ia64_idtrs[cpu]) {
+ ia64_idtrs[cpu] = kmalloc(2 * IA64_TR_ALLOC_MAX *
+ sizeof (struct ia64_tr_entry), GFP_KERNEL);
+ if (!ia64_idtrs[cpu])
+ return -ENOMEM;
+ }
r = -EINVAL;
/*Check overlap with existing TR entries*/
if (target_mask & 0x1) {
- p = &__per_cpu_idtrs[cpu][0][0];
+ p = ia64_idtrs[cpu];
for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
i++, p++) {
if (p->pte & 0x1)
@@ -432,7 +450,7 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
}
}
if (target_mask & 0x2) {
- p = &__per_cpu_idtrs[cpu][1][0];
+ p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
i++, p++) {
if (p->pte & 0x1)
@@ -447,16 +465,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
switch (target_mask & 0x3) {
case 1:
- if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1))
+ if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
goto found;
continue;
case 2:
- if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
+ if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
goto found;
continue;
case 3:
- if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) &&
- !(__per_cpu_idtrs[cpu][1][i].pte & 0x1))
+ if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
+ !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
goto found;
continue;
default:
@@ -476,7 +494,7 @@ found:
if (target_mask & 0x1) {
ia64_itr(0x1, i, va, pte, log_size);
ia64_srlz_i();
- p = &__per_cpu_idtrs[cpu][0][i];
+ p = ia64_idtrs[cpu] + i;
p->ifa = va;
p->pte = pte;
p->itir = log_size << 2;
@@ -485,7 +503,7 @@ found:
if (target_mask & 0x2) {
ia64_itr(0x2, i, va, pte, log_size);
ia64_srlz_i();
- p = &__per_cpu_idtrs[cpu][1][i];
+ p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
p->ifa = va;
p->pte = pte;
p->itir = log_size << 2;
@@ -516,7 +534,7 @@ void ia64_ptr_entry(u64 target_mask, int slot)
return;
if (target_mask & 0x1) {
- p = &__per_cpu_idtrs[cpu][0][slot];
+ p = ia64_idtrs[cpu] + slot;
if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
p->pte = 0;
ia64_ptr(0x1, p->ifa, p->itir>>2);
@@ -525,7 +543,7 @@ void ia64_ptr_entry(u64 target_mask, int slot)
}
if (target_mask & 0x2) {
- p = &__per_cpu_idtrs[cpu][1][slot];
+ p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
p->pte = 0;
ia64_ptr(0x2, p->ifa, p->itir>>2);
@@ -534,8 +552,8 @@ void ia64_ptr_entry(u64 target_mask, int slot)
}
for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
- if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) ||
- (__per_cpu_idtrs[cpu][1][i].pte & 0x1))
+ if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
+ ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
break;
}
per_cpu(ia64_tr_used, cpu) = i;
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 7de76dd352fe..df639db779f9 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -56,10 +56,13 @@ int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn,
if ((seg | reg) <= 255) {
addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
mode = 0;
- } else {
+ } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
mode = 1;
+ } else {
+ return -EINVAL;
}
+
result = ia64_sal_pci_config_read(addr, mode, len, &data);
if (result != 0)
return -EINVAL;
@@ -80,9 +83,11 @@ int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn,
if ((seg | reg) <= 255) {
addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
mode = 0;
- } else {
+ } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
mode = 1;
+ } else {
+ return -EINVAL;
}
result = ia64_sal_pci_config_write(addr, mode, len, value);
if (result != 0)
@@ -126,6 +131,7 @@ alloc_pci_controller (int seg)
}
struct pci_root_info {
+ struct acpi_device *bridge;
struct pci_controller *controller;
char *name;
};
@@ -292,9 +298,20 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
window->offset = offset;
if (insert_resource(root, &window->resource)) {
- printk(KERN_ERR "alloc 0x%llx-0x%llx from %s for %s failed\n",
- window->resource.start, window->resource.end,
- root->name, info->name);
+ dev_err(&info->bridge->dev,
+ "can't allocate host bridge window %pR\n",
+ &window->resource);
+ } else {
+ if (offset)
+ dev_info(&info->bridge->dev, "host bridge window %pR "
+ "(PCI address [%#llx-%#llx])\n",
+ &window->resource,
+ window->resource.start - offset,
+ window->resource.end - offset);
+ else
+ dev_info(&info->bridge->dev,
+ "host bridge window %pR\n",
+ &window->resource);
}
return AE_OK;
@@ -314,8 +331,9 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl)
(res->end - res->start < 16))
continue;
if (j >= PCI_BUS_NUM_RESOURCES) {
- printk("Ignoring range [%#llx-%#llx] (%lx)\n",
- res->start, res->end, res->flags);
+ dev_warn(&bus->dev,
+ "ignoring host bridge window %pR (no space)\n",
+ res);
continue;
}
bus->resource[j++] = res;
@@ -359,6 +377,7 @@ pci_acpi_scan_root(struct acpi_device *device, int domain, int bus)
goto out3;
sprintf(name, "PCI Bus %04x:%02x", domain, bus);
+ info.bridge = device;
info.controller = controller;
info.name = name;
acpi_walk_resources(device->handle, METHOD_NAME__CRS,
@@ -715,9 +734,6 @@ int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
return ret;
}
-/* It's defined in drivers/pci/pci.c */
-extern u8 pci_cache_line_size;
-
/**
* set_pci_cacheline_size - determine cacheline size for PCI devices
*
@@ -726,7 +742,7 @@ extern u8 pci_cache_line_size;
*
* Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
*/
-static void __init set_pci_cacheline_size(void)
+static void __init set_pci_dfl_cacheline_size(void)
{
unsigned long levels, unique_caches;
long status;
@@ -746,7 +762,7 @@ static void __init set_pci_cacheline_size(void)
"(status=%ld)\n", __func__, status);
return;
}
- pci_cache_line_size = (1 << cci.pcci_line_size) / 4;
+ pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
}
u64 ia64_dma_get_required_mask(struct device *dev)
@@ -777,7 +793,7 @@ EXPORT_SYMBOL_GPL(dma_get_required_mask);
static int __init pcibios_init(void)
{
- set_pci_cacheline_size();
+ set_pci_dfl_cacheline_size();
return 0;
}
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
index fd50ff94302b..66f633bff059 100644
--- a/arch/ia64/sn/kernel/io_acpi_init.c
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -390,7 +390,7 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
pcidev_match.handle = NULL;
acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX,
- find_matching_device, &pcidev_match, NULL);
+ find_matching_device, NULL, &pcidev_match, NULL);
if (!pcidev_match.handle) {
printk(KERN_ERR
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index 25831c47c579..308e6595110e 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -119,7 +119,6 @@ sn_pcidev_info_get(struct pci_dev *dev)
* Additionally note that the struct sn_flush_device_war also has to be
* removed from arch/ia64/sn/include/xtalk/hubdev.h
*/
-static u8 war_implemented = 0;
static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
struct sn_flush_device_common *common)
@@ -128,11 +127,8 @@ static s64 sn_device_fixup_war(u64 nasid, u64 widget, int device,
struct sn_flush_device_war *dev_entry;
struct ia64_sal_retval isrv = {0,0,0,0};
- if (!war_implemented) {
- printk(KERN_WARNING "PROM version < 4.50 -- implementing old "
- "PROM flush WAR\n");
- war_implemented = 1;
- }
+ printk_once(KERN_WARNING
+ "PROM version < 4.50 -- implementing old PROM flush WAR\n");
war_list = kzalloc(DEV_PER_WIDGET * sizeof(*war_list), GFP_KERNEL);
BUG_ON(!war_list);
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index 1176506b2bae..e884ba4e031d 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -496,13 +496,13 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
stat->deadlocks,
- 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
- 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec,
- 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec,
+ 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
+ 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
+ 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
stat->shub_ptc_flushes_not_my_mm,
stat->deadlocks2,
stat->shub_ipi_flushes,
- 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec);
+ 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec);
}
return 0;
}
diff --git a/arch/ia64/sn/kernel/sn2/sn_hwperf.c b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
index 4c7e74790958..55ac3c4e11d2 100644
--- a/arch/ia64/sn/kernel/sn2/sn_hwperf.c
+++ b/arch/ia64/sn/kernel/sn2/sn_hwperf.c
@@ -786,17 +786,18 @@ sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg)
break;
case SN_HWPERF_GET_OBJ_NODE:
- if (a.sz != sizeof(u64) || a.arg < 0) {
+ i = a.arg;
+ if (a.sz != sizeof(u64) || i < 0) {
r = -EINVAL;
goto error;
}
if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) {
- if (a.arg >= nobj) {
+ if (i >= nobj) {
r = -EINVAL;
vfree(objs);
goto error;
}
- if (objs[(i = a.arg)].id != a.arg) {
+ if (objs[i].id != a.arg) {
for (i = 0; i < nobj; i++) {
if (objs[i].id == a.arg)
break;
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 35b2a27d2e77..efb454534e52 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -9,6 +9,7 @@
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
+#include <linux/bitmap.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/addrs.h>
#include <asm/sn/io.h>
@@ -369,7 +370,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
static dma_addr_t
tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
{
- int i, ps, ps_shift, entry, entries, mapsize, last_entry;
+ int ps, ps_shift, entry, entries, mapsize;
u64 xio_addr, end_xio_addr;
struct tioca_common *tioca_common;
struct tioca_kernel *tioca_kern;
@@ -410,23 +411,13 @@ tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
map = tioca_kern->ca_pcigart_pagemap;
mapsize = tioca_kern->ca_pcigart_entries;
- entry = find_first_zero_bit(map, mapsize);
- while (entry < mapsize) {
- last_entry = find_next_bit(map, mapsize, entry);
-
- if (last_entry - entry >= entries)
- break;
-
- entry = find_next_zero_bit(map, mapsize, last_entry);
- }
-
- if (entry > mapsize) {
+ entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0);
+ if (entry >= mapsize) {
kfree(ca_dmamap);
goto map_return;
}
- for (i = 0; i < entries; i++)
- set_bit(entry + i, map);
+ bitmap_set(map, entry, entries);
bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c
index f042e192d2fe..a3fb7cf9ae1d 100644
--- a/arch/ia64/xen/irq_xen.c
+++ b/arch/ia64/xen/irq_xen.c
@@ -63,19 +63,19 @@ xen_free_irq_vector(int vector)
}
-static DEFINE_PER_CPU(int, timer_irq) = -1;
-static DEFINE_PER_CPU(int, ipi_irq) = -1;
-static DEFINE_PER_CPU(int, resched_irq) = -1;
-static DEFINE_PER_CPU(int, cmc_irq) = -1;
-static DEFINE_PER_CPU(int, cmcp_irq) = -1;
-static DEFINE_PER_CPU(int, cpep_irq) = -1;
+static DEFINE_PER_CPU(int, xen_timer_irq) = -1;
+static DEFINE_PER_CPU(int, xen_ipi_irq) = -1;
+static DEFINE_PER_CPU(int, xen_resched_irq) = -1;
+static DEFINE_PER_CPU(int, xen_cmc_irq) = -1;
+static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1;
+static DEFINE_PER_CPU(int, xen_cpep_irq) = -1;
#define NAME_SIZE 15
-static DEFINE_PER_CPU(char[NAME_SIZE], timer_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], resched_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name);
-static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name);
+static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name);
#undef NAME_SIZE
struct saved_irq {
@@ -144,64 +144,64 @@ __xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
if (xen_slab_ready) {
switch (vec) {
case IA64_TIMER_VECTOR:
- snprintf(per_cpu(timer_name, cpu),
- sizeof(per_cpu(timer_name, cpu)),
+ snprintf(per_cpu(xen_timer_name, cpu),
+ sizeof(per_cpu(xen_timer_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
action->handler, action->flags,
- per_cpu(timer_name, cpu), action->dev_id);
- per_cpu(timer_irq, cpu) = irq;
+ per_cpu(xen_timer_name, cpu), action->dev_id);
+ per_cpu(xen_timer_irq, cpu) = irq;
break;
case IA64_IPI_RESCHEDULE:
- snprintf(per_cpu(resched_name, cpu),
- sizeof(per_cpu(resched_name, cpu)),
+ snprintf(per_cpu(xen_resched_name, cpu),
+ sizeof(per_cpu(xen_resched_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
action->handler, action->flags,
- per_cpu(resched_name, cpu), action->dev_id);
- per_cpu(resched_irq, cpu) = irq;
+ per_cpu(xen_resched_name, cpu), action->dev_id);
+ per_cpu(xen_resched_irq, cpu) = irq;
break;
case IA64_IPI_VECTOR:
- snprintf(per_cpu(ipi_name, cpu),
- sizeof(per_cpu(ipi_name, cpu)),
+ snprintf(per_cpu(xen_ipi_name, cpu),
+ sizeof(per_cpu(xen_ipi_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
action->handler, action->flags,
- per_cpu(ipi_name, cpu), action->dev_id);
- per_cpu(ipi_irq, cpu) = irq;
+ per_cpu(xen_ipi_name, cpu), action->dev_id);
+ per_cpu(xen_ipi_irq, cpu) = irq;
break;
case IA64_CMC_VECTOR:
- snprintf(per_cpu(cmc_name, cpu),
- sizeof(per_cpu(cmc_name, cpu)),
+ snprintf(per_cpu(xen_cmc_name, cpu),
+ sizeof(per_cpu(xen_cmc_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
- action->handler,
- action->flags,
- per_cpu(cmc_name, cpu),
- action->dev_id);
- per_cpu(cmc_irq, cpu) = irq;
+ action->handler,
+ action->flags,
+ per_cpu(xen_cmc_name, cpu),
+ action->dev_id);
+ per_cpu(xen_cmc_irq, cpu) = irq;
break;
case IA64_CMCP_VECTOR:
- snprintf(per_cpu(cmcp_name, cpu),
- sizeof(per_cpu(cmcp_name, cpu)),
+ snprintf(per_cpu(xen_cmcp_name, cpu),
+ sizeof(per_cpu(xen_cmcp_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
- action->handler,
- action->flags,
- per_cpu(cmcp_name, cpu),
- action->dev_id);
- per_cpu(cmcp_irq, cpu) = irq;
+ action->handler,
+ action->flags,
+ per_cpu(xen_cmcp_name, cpu),
+ action->dev_id);
+ per_cpu(xen_cmcp_irq, cpu) = irq;
break;
case IA64_CPEP_VECTOR:
- snprintf(per_cpu(cpep_name, cpu),
- sizeof(per_cpu(cpep_name, cpu)),
+ snprintf(per_cpu(xen_cpep_name, cpu),
+ sizeof(per_cpu(xen_cpep_name, cpu)),
"%s%d", action->name, cpu);
irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
- action->handler,
- action->flags,
- per_cpu(cpep_name, cpu),
- action->dev_id);
- per_cpu(cpep_irq, cpu) = irq;
+ action->handler,
+ action->flags,
+ per_cpu(xen_cpep_name, cpu),
+ action->dev_id);
+ per_cpu(xen_cpep_irq, cpu) = irq;
break;
case IA64_CPE_VECTOR:
case IA64_MCA_RENDEZ_VECTOR:
@@ -275,30 +275,33 @@ unbind_evtchn_callback(struct notifier_block *nfb,
if (action == CPU_DEAD) {
/* Unregister evtchn. */
- if (per_cpu(cpep_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL);
- per_cpu(cpep_irq, cpu) = -1;
+ if (per_cpu(xen_cpep_irq, cpu) >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu),
+ NULL);
+ per_cpu(xen_cpep_irq, cpu) = -1;
}
- if (per_cpu(cmcp_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL);
- per_cpu(cmcp_irq, cpu) = -1;
+ if (per_cpu(xen_cmcp_irq, cpu) >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu),
+ NULL);
+ per_cpu(xen_cmcp_irq, cpu) = -1;
}
- if (per_cpu(cmc_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL);
- per_cpu(cmc_irq, cpu) = -1;
+ if (per_cpu(xen_cmc_irq, cpu) >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL);
+ per_cpu(xen_cmc_irq, cpu) = -1;
}
- if (per_cpu(ipi_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL);
- per_cpu(ipi_irq, cpu) = -1;
+ if (per_cpu(xen_ipi_irq, cpu) >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL);
+ per_cpu(xen_ipi_irq, cpu) = -1;
}
- if (per_cpu(resched_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(resched_irq, cpu),
- NULL);
- per_cpu(resched_irq, cpu) = -1;
+ if (per_cpu(xen_resched_irq, cpu) >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu),
+ NULL);
+ per_cpu(xen_resched_irq, cpu) = -1;
}
- if (per_cpu(timer_irq, cpu) >= 0) {
- unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
- per_cpu(timer_irq, cpu) = -1;
+ if (per_cpu(xen_timer_irq, cpu) >= 0) {
+ unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu),
+ NULL);
+ per_cpu(xen_timer_irq, cpu) = -1;
}
}
return NOTIFY_OK;
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
index dbeadb9c8e20..c1c544513e8d 100644
--- a/arch/ia64/xen/time.c
+++ b/arch/ia64/xen/time.c
@@ -34,15 +34,15 @@
#include "../kernel/fsyscall_gtod_data.h"
-DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
-DEFINE_PER_CPU(unsigned long, processed_stolen_time);
-DEFINE_PER_CPU(unsigned long, processed_blocked_time);
+static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
+static DEFINE_PER_CPU(unsigned long, xen_stolen_time);
+static DEFINE_PER_CPU(unsigned long, xen_blocked_time);
/* taken from i386/kernel/time-xen.c */
static void xen_init_missing_ticks_accounting(int cpu)
{
struct vcpu_register_runstate_memory_area area;
- struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
+ struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu);
int rc;
memset(runstate, 0, sizeof(*runstate));
@@ -52,8 +52,8 @@ static void xen_init_missing_ticks_accounting(int cpu)
&area);
WARN_ON(rc && rc != -ENOSYS);
- per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
- per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
+ per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
+ per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
+ runstate->time[RUNSTATE_offline];
}
@@ -68,7 +68,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
BUG_ON(preemptible());
- state = &__get_cpu_var(runstate);
+ state = &__get_cpu_var(xen_runstate);
/*
* The runstate info is always updated by the hypervisor on
@@ -103,12 +103,12 @@ consider_steal_time(unsigned long new_itm)
* This function just checks and reject this effect.
*/
if (!time_after_eq(runstate.time[RUNSTATE_blocked],
- per_cpu(processed_blocked_time, cpu)))
+ per_cpu(xen_blocked_time, cpu)))
blocked = 0;
if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
runstate.time[RUNSTATE_offline],
- per_cpu(processed_stolen_time, cpu)))
+ per_cpu(xen_stolen_time, cpu)))
stolen = 0;
if (!time_after(delta_itm + new_itm, ia64_get_itc()))
@@ -147,8 +147,8 @@ consider_steal_time(unsigned long new_itm)
} else {
local_cpu_data->itm_next = delta_itm + new_itm;
}
- per_cpu(processed_stolen_time, cpu) += NS_PER_TICK * stolen;
- per_cpu(processed_blocked_time, cpu) += NS_PER_TICK * blocked;
+ per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
+ per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
}
return delta_itm;
}