diff options
Diffstat (limited to 'arch/ia64')
40 files changed, 496 insertions, 556 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index ff7ae6b664e8..edffe25a477a 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -34,6 +34,10 @@ config RWSEM_XCHGADD_ALGORITHM bool default y +config GENERIC_FIND_NEXT_BIT + bool + default y + config GENERIC_CALIBRATE_DELAY bool default y @@ -42,6 +46,10 @@ config TIME_INTERPOLATION bool default y +config DMI + bool + default y + config EFI bool default y @@ -252,6 +260,15 @@ config NR_CPUS than 64 will cause the use of a CPU mask array, causing a small performance hit. +config IA64_NR_NODES + int "Maximum number of NODEs (256-1024)" if (IA64_SGI_SN2 || IA64_GENERIC) + range 256 1024 + depends on IA64_SGI_SN2 || IA64_GENERIC + default "256" + help + This option specifies the maximum number of nodes in your SSI system. + If in doubt, use the default. + config HOTPLUG_CPU bool "Support for hot-pluggable CPUs (EXPERIMENTAL)" depends on SMP && EXPERIMENTAL diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile index f722e1a25948..80ea7506fa1a 100644 --- a/arch/ia64/Makefile +++ b/arch/ia64/Makefile @@ -1,6 +1,9 @@ # # ia64/Makefile # +# This file is included by the global makefile so that you can add your own +# architecture-specific flags and dependencies. +# # This file is subject to the terms and conditions of the GNU General Public # License. See the file "COPYING" in the main directory of this archive # for more details. @@ -62,7 +65,7 @@ drivers-$(CONFIG_OPROFILE) += arch/ia64/oprofile/ boot := arch/ia64/hp/sim/boot -.PHONY: boot compressed check +PHONY += boot compressed check all: compressed unwcheck diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig index 744fd2f79f61..0d29aa2066b3 100644 --- a/arch/ia64/configs/gensparse_defconfig +++ b/arch/ia64/configs/gensparse_defconfig @@ -116,6 +116,7 @@ CONFIG_IOSAPIC=y CONFIG_FORCE_MAX_ZONEORDER=17 CONFIG_SMP=y CONFIG_NR_CPUS=512 +CONFIG_IA64_NR_NODES=256 CONFIG_HOTPLUG_CPU=y # CONFIG_SCHED_SMT is not set # CONFIG_PREEMPT is not set diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig index 8206752161bb..a718034d68d0 100644 --- a/arch/ia64/configs/sn2_defconfig +++ b/arch/ia64/configs/sn2_defconfig @@ -116,6 +116,7 @@ CONFIG_IA64_SGI_SN_XP=m CONFIG_FORCE_MAX_ZONEORDER=17 CONFIG_SMP=y CONFIG_NR_CPUS=1024 +CONFIG_IA64_NR_NODES=256 # CONFIG_HOTPLUG_CPU is not set CONFIG_SCHED_SMT=y CONFIG_PREEMPT=y diff --git a/arch/ia64/defconfig b/arch/ia64/defconfig index 3e767288a745..6cba55da572a 100644 --- a/arch/ia64/defconfig +++ b/arch/ia64/defconfig @@ -116,6 +116,7 @@ CONFIG_IOSAPIC=y CONFIG_FORCE_MAX_ZONEORDER=17 CONFIG_SMP=y CONFIG_NR_CPUS=512 +CONFIG_IA64_NR_NODES=256 CONFIG_HOTPLUG_CPU=y # CONFIG_SCHED_SMT is not set # CONFIG_PREEMPT is not set diff --git a/arch/ia64/dig/setup.c b/arch/ia64/dig/setup.c index c9104bfff667..38aa9c108857 100644 --- a/arch/ia64/dig/setup.c +++ b/arch/ia64/dig/setup.c @@ -69,8 +69,3 @@ dig_setup (char **cmdline_p) screen_info.orig_video_isVGA = 1; /* XXX fake */ screen_info.orig_video_ega_bx = 3; /* XXX fake */ } - -void __init -dig_irq_init (void) -{ -} diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h index 68ceb4e690c7..ccb98ed48e58 100644 --- a/arch/ia64/ia32/ia32priv.h +++ b/arch/ia64/ia32/ia32priv.h @@ -29,9 +29,9 @@ struct partial_page { struct partial_page *next; /* linked list, sorted by address */ struct rb_node pp_rb; - /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*32 + /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64 * should suffice.*/ - unsigned int bitmap; + unsigned long bitmap; unsigned int base; }; diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c index 70dba1f0e2ee..5366b3b23d09 100644 --- a/arch/ia64/ia32/sys_ia32.c +++ b/arch/ia64/ia32/sys_ia32.c @@ -25,7 +25,6 @@ #include <linux/resource.h> #include <linux/times.h> #include <linux/utsname.h> -#include <linux/timex.h> #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/sem.h> @@ -1166,19 +1165,7 @@ put_tv32 (struct compat_timeval __user *o, struct timeval *i) asmlinkage unsigned long sys32_alarm (unsigned int seconds) { - struct itimerval it_new, it_old; - unsigned int oldalarm; - - it_new.it_interval.tv_sec = it_new.it_interval.tv_usec = 0; - it_new.it_value.tv_sec = seconds; - it_new.it_value.tv_usec = 0; - do_setitimer(ITIMER_REAL, &it_new, &it_old); - oldalarm = it_old.it_value.tv_sec; - /* ehhh.. We can't return 0 if we have an alarm pending.. */ - /* And we'd better return too much than too little anyway */ - if (it_old.it_value.tv_usec) - oldalarm++; - return oldalarm; + return alarm_setitimer(seconds); } /* Translations due to time_t size differences. Which affects all @@ -2603,78 +2590,4 @@ sys32_setresgid(compat_gid_t rgid, compat_gid_t egid, ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid); return sys_setresgid(srgid, segid, ssgid); } - -/* Handle adjtimex compatibility. */ - -struct timex32 { - u32 modes; - s32 offset, freq, maxerror, esterror; - s32 status, constant, precision, tolerance; - struct compat_timeval time; - s32 tick; - s32 ppsfreq, jitter, shift, stabil; - s32 jitcnt, calcnt, errcnt, stbcnt; - s32 :32; s32 :32; s32 :32; s32 :32; - s32 :32; s32 :32; s32 :32; s32 :32; - s32 :32; s32 :32; s32 :32; s32 :32; -}; - -extern int do_adjtimex(struct timex *); - -asmlinkage long -sys32_adjtimex(struct timex32 *utp) -{ - struct timex txc; - int ret; - - memset(&txc, 0, sizeof(struct timex)); - - if(get_user(txc.modes, &utp->modes) || - __get_user(txc.offset, &utp->offset) || - __get_user(txc.freq, &utp->freq) || - __get_user(txc.maxerror, &utp->maxerror) || - __get_user(txc.esterror, &utp->esterror) || - __get_user(txc.status, &utp->status) || - __get_user(txc.constant, &utp->constant) || - __get_user(txc.precision, &utp->precision) || - __get_user(txc.tolerance, &utp->tolerance) || - __get_user(txc.time.tv_sec, &utp->time.tv_sec) || - __get_user(txc.time.tv_usec, &utp->time.tv_usec) || - __get_user(txc.tick, &utp->tick) || - __get_user(txc.ppsfreq, &utp->ppsfreq) || - __get_user(txc.jitter, &utp->jitter) || - __get_user(txc.shift, &utp->shift) || - __get_user(txc.stabil, &utp->stabil) || - __get_user(txc.jitcnt, &utp->jitcnt) || - __get_user(txc.calcnt, &utp->calcnt) || - __get_user(txc.errcnt, &utp->errcnt) || - __get_user(txc.stbcnt, &utp->stbcnt)) - return -EFAULT; - - ret = do_adjtimex(&txc); - - if(put_user(txc.modes, &utp->modes) || - __put_user(txc.offset, &utp->offset) || - __put_user(txc.freq, &utp->freq) || - __put_user(txc.maxerror, &utp->maxerror) || - __put_user(txc.esterror, &utp->esterror) || - __put_user(txc.status, &utp->status) || - __put_user(txc.constant, &utp->constant) || - __put_user(txc.precision, &utp->precision) || - __put_user(txc.tolerance, &utp->tolerance) || - __put_user(txc.time.tv_sec, &utp->time.tv_sec) || - __put_user(txc.time.tv_usec, &utp->time.tv_usec) || - __put_user(txc.tick, &utp->tick) || - __put_user(txc.ppsfreq, &utp->ppsfreq) || - __put_user(txc.jitter, &utp->jitter) || - __put_user(txc.shift, &utp->shift) || - __put_user(txc.stabil, &utp->stabil) || - __put_user(txc.jitcnt, &utp->jitcnt) || - __put_user(txc.calcnt, &utp->calcnt) || - __put_user(txc.errcnt, &utp->errcnt) || - __put_user(txc.stbcnt, &utp->stbcnt)) - ret = -EFAULT; - - return ret; -} #endif /* NOTYET */ diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index 09a0dbc17fb6..59e871dae742 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -7,7 +7,7 @@ extra-y := head.o init_task.o vmlinux.lds obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ irq_lsapic.o ivt.o machvec.o pal.o patch.o process.o perfmon.o ptrace.o sal.o \ salinfo.o semaphore.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ - unwind.o mca.o mca_asm.o topology.o + unwind.o mca.o mca_asm.o topology.o dmi_scan.o obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o @@ -30,6 +30,7 @@ obj-$(CONFIG_IA64_MCA_RECOVERY) += mca_recovery.o obj-$(CONFIG_KPROBES) += kprobes.o jprobes.o obj-$(CONFIG_IA64_UNCACHED_ALLOCATOR) += uncached.o mca_recovery-y += mca_drv.o mca_drv_asm.o +dmi_scan-y += ../../i386/kernel/dmi_scan.o # The gate DSO image is built using a special linker script. targets += gate.so gate-syms.o diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 4722ec51c70c..58c93a30348c 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -420,6 +420,26 @@ int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS]; int __initdata nid_to_pxm_map[MAX_NUMNODES]; static struct acpi_table_slit __initdata *slit_table; +static int get_processor_proximity_domain(struct acpi_table_processor_affinity *pa) +{ + int pxm; + + pxm = pa->proximity_domain; + if (ia64_platform_is("sn2")) + pxm += pa->reserved[0] << 8; + return pxm; +} + +static int get_memory_proximity_domain(struct acpi_table_memory_affinity *ma) +{ + int pxm; + + pxm = ma->proximity_domain; + if (ia64_platform_is("sn2")) + pxm += ma->reserved1[0] << 8; + return pxm; +} + /* * ACPI 2.0 SLIT (System Locality Information Table) * http://devresource.hp.com/devresource/Docs/TechPapers/IA64/slit.pdf @@ -443,13 +463,20 @@ void __init acpi_numa_slit_init(struct acpi_table_slit *slit) void __init acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa) { + int pxm; + + if (!pa->flags.enabled) + return; + + pxm = get_processor_proximity_domain(pa); + /* record this node in proximity bitmap */ - pxm_bit_set(pa->proximity_domain); + pxm_bit_set(pxm); node_cpuid[srat_num_cpus].phys_id = (pa->apic_id << 8) | (pa->lsapic_eid); /* nid should be overridden as logical node id later */ - node_cpuid[srat_num_cpus].nid = pa->proximity_domain; + node_cpuid[srat_num_cpus].nid = pxm; srat_num_cpus++; } @@ -457,10 +484,10 @@ void __init acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma) { unsigned long paddr, size; - u8 pxm; + int pxm; struct node_memblk_s *p, *q, *pend; - pxm = ma->proximity_domain; + pxm = get_memory_proximity_domain(ma); /* fill node memory chunk structure */ paddr = ma->base_addr_hi; @@ -624,9 +651,9 @@ unsigned long __init acpi_find_rsdp(void) { unsigned long rsdp_phys = 0; - if (efi.acpi20) - rsdp_phys = __pa(efi.acpi20); - else if (efi.acpi) + if (efi.acpi20 != EFI_INVALID_TABLE_ADDR) + rsdp_phys = efi.acpi20; + else if (efi.acpi != EFI_INVALID_TABLE_ADDR) printk(KERN_WARNING PREFIX "v1.0/r0.71 tables no longer supported\n"); return rsdp_phys; diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c index 9990320b6f9a..12cfedce73b1 100644 --- a/arch/ia64/kernel/efi.c +++ b/arch/ia64/kernel/efi.c @@ -458,24 +458,33 @@ efi_init (void) printk(KERN_INFO "EFI v%u.%.02u by %s:", efi.systab->hdr.revision >> 16, efi.systab->hdr.revision & 0xffff, vendor); + efi.mps = EFI_INVALID_TABLE_ADDR; + efi.acpi = EFI_INVALID_TABLE_ADDR; + efi.acpi20 = EFI_INVALID_TABLE_ADDR; + efi.smbios = EFI_INVALID_TABLE_ADDR; + efi.sal_systab = EFI_INVALID_TABLE_ADDR; + efi.boot_info = EFI_INVALID_TABLE_ADDR; + efi.hcdp = EFI_INVALID_TABLE_ADDR; + efi.uga = EFI_INVALID_TABLE_ADDR; + for (i = 0; i < (int) efi.systab->nr_tables; i++) { if (efi_guidcmp(config_tables[i].guid, MPS_TABLE_GUID) == 0) { - efi.mps = __va(config_tables[i].table); + efi.mps = config_tables[i].table; printk(" MPS=0x%lx", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, ACPI_20_TABLE_GUID) == 0) { - efi.acpi20 = __va(config_tables[i].table); + efi.acpi20 = config_tables[i].table; printk(" ACPI 2.0=0x%lx", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, ACPI_TABLE_GUID) == 0) { - efi.acpi = __va(config_tables[i].table); + efi.acpi = config_tables[i].table; printk(" ACPI=0x%lx", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, SMBIOS_TABLE_GUID) == 0) { - efi.smbios = __va(config_tables[i].table); + efi.smbios = config_tables[i].table; printk(" SMBIOS=0x%lx", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, SAL_SYSTEM_TABLE_GUID) == 0) { - efi.sal_systab = __va(config_tables[i].table); + efi.sal_systab = config_tables[i].table; printk(" SALsystab=0x%lx", config_tables[i].table); } else if (efi_guidcmp(config_tables[i].guid, HCDP_TABLE_GUID) == 0) { - efi.hcdp = __va(config_tables[i].table); + efi.hcdp = config_tables[i].table; printk(" HCDP=0x%lx", config_tables[i].table); } } @@ -677,27 +686,34 @@ EXPORT_SYMBOL(efi_mem_attributes); /* * Determines whether the memory at phys_addr supports the desired * attribute (WB, UC, etc). If this returns 1, the caller can safely - * access *size bytes at phys_addr with the specified attribute. + * access size bytes at phys_addr with the specified attribute. */ -static int -efi_mem_attribute_range (unsigned long phys_addr, unsigned long *size, u64 attr) +int +efi_mem_attribute_range (unsigned long phys_addr, unsigned long size, u64 attr) { + unsigned long end = phys_addr + size; efi_memory_desc_t *md = efi_memory_descriptor(phys_addr); - unsigned long md_end; - if (!md || (md->attribute & attr) != attr) + /* + * Some firmware doesn't report MMIO regions in the EFI memory + * map. The Intel BigSur (a.k.a. HP i2000) has this problem. + * On those platforms, we have to assume UC is valid everywhere. + */ + if (!md || (md->attribute & attr) != attr) { + if (attr == EFI_MEMORY_UC && !efi_memmap_has_mmio()) + return 1; return 0; + } do { - md_end = efi_md_end(md); - if (phys_addr + *size <= md_end) + unsigned long md_end = efi_md_end(md); + + if (end <= md_end) return 1; md = efi_memory_descriptor(md_end); - if (!md || (md->attribute & attr) != attr) { - *size = md_end - phys_addr; - return 1; - } + if (!md || (md->attribute & attr) != attr) + return 0; } while (md); return 0; } @@ -708,7 +724,7 @@ efi_mem_attribute_range (unsigned long phys_addr, unsigned long *size, u64 attr) * control access size. */ int -valid_phys_addr_range (unsigned long phys_addr, unsigned long *size) +valid_phys_addr_range (unsigned long phys_addr, unsigned long size) { return efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB); } @@ -723,7 +739,7 @@ valid_phys_addr_range (unsigned long phys_addr, unsigned long *size) * because that doesn't appear in the boot-time EFI memory map. */ int -valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long *size) +valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long size) { if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_WB)) return 1; @@ -731,14 +747,6 @@ valid_mmap_phys_addr_range (unsigned long phys_addr, unsigned long *size) if (efi_mem_attribute_range(phys_addr, size, EFI_MEMORY_UC)) return 1; - /* - * Some firmware doesn't report MMIO regions in the EFI memory map. - * The Intel BigSur (a.k.a. HP i2000) has this problem. In this - * case, we can't use the EFI memory map to validate mmap requests. - */ - if (!efi_memmap_has_mmio()) - return 1; - return 0; } diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index dcd906fe5749..829a43cab797 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S @@ -865,6 +865,7 @@ ENTRY(interrupt) ;; SAVE_REST ;; + MCA_RECOVER_RANGE(interrupt) alloc r14=ar.pfs,0,0,2,0 // must be first in an insn group mov out0=cr.ivr // pass cr.ivr as first arg add out1=16,sp // pass pointer to pt_regs as second arg diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 50ae8c7d453d..789881ca83d4 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -34,6 +34,7 @@ #include <asm/pgtable.h> #include <asm/kdebug.h> #include <asm/sections.h> +#include <asm/uaccess.h> extern void jprobe_inst_return(void); @@ -722,13 +723,50 @@ static int __kprobes kprobes_fault_handler(struct pt_regs *regs, int trapnr) struct kprobe *cur = kprobe_running(); struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); - if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) - return 1; - if (kcb->kprobe_status & KPROBE_HIT_SS) { - resume_execution(cur, regs); - reset_current_kprobe(); + switch(kcb->kprobe_status) { + case KPROBE_HIT_SS: + case KPROBE_REENTER: + /* + * We are here because the instruction being single + * stepped caused a page fault. We reset the current + * kprobe and the instruction pointer points back to + * the probe address and allow the page fault handler + * to continue as a normal page fault. + */ + regs->cr_iip = ((unsigned long)cur->addr) & ~0xFULL; + ia64_psr(regs)->ri = ((unsigned long)cur->addr) & 0xf; + if (kcb->kprobe_status == KPROBE_REENTER) + restore_previous_kprobe(kcb); + else + reset_current_kprobe(); preempt_enable_no_resched(); + break; + case KPROBE_HIT_ACTIVE: + case KPROBE_HIT_SSDONE: + /* + * We increment the nmissed count for accounting, + * we can also use npre/npostfault count for accouting + * these specific fault cases. + */ + kprobes_inc_nmissed_count(cur); + + /* + * We come here because instructions in the pre/post + * handler caused the page_fault, this could happen + * if handler tries to access user space by + * copy_from_user(), get_user() etc. Let the + * user-specified handler try to fix it first. + */ + if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) + return 1; + + /* + * Let ia64_do_page_fault() fix it. + */ + break; + default: + break; } return 0; @@ -740,6 +778,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self, struct die_args *args = (struct die_args *)data; int ret = NOTIFY_DONE; + if (args->regs && user_mode(args->regs)) + return ret; + switch(val) { case DIE_BREAK: /* err is break number from ia64_bad_break() */ diff --git a/arch/ia64/kernel/machvec.c b/arch/ia64/kernel/machvec.c index c3a04ee7f4f6..4b0b71d5aef4 100644 --- a/arch/ia64/kernel/machvec.c +++ b/arch/ia64/kernel/machvec.c @@ -14,7 +14,15 @@ struct ia64_machine_vector ia64_mv; EXPORT_SYMBOL(ia64_mv); -static struct ia64_machine_vector * +static __initdata const char *mvec_name; +static __init int setup_mvec(char *s) +{ + mvec_name = s; + return 0; +} +early_param("machvec", setup_mvec); + +static struct ia64_machine_vector * __init lookup_machvec (const char *name) { extern struct ia64_machine_vector machvec_start[]; @@ -33,10 +41,13 @@ machvec_init (const char *name) { struct ia64_machine_vector *mv; + if (!name) + name = mvec_name ? mvec_name : acpi_get_sysname(); mv = lookup_machvec(name); - if (!mv) { - panic("generic kernel failed to find machine vector for platform %s!", name); - } + if (!mv) + panic("generic kernel failed to find machine vector for" + " platform %s!", name); + ia64_mv = *mv; printk(KERN_INFO "booting generic kernel on platform %s\n", name); } diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index b57e723f194c..8963171788d5 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -69,6 +69,7 @@ #include <linux/kernel.h> #include <linux/smp.h> #include <linux/workqueue.h> +#include <linux/cpumask.h> #include <asm/delay.h> #include <asm/kdebug.h> @@ -83,6 +84,7 @@ #include <asm/irq.h> #include <asm/hw_irq.h> +#include "mca_drv.h" #include "entry.h" #if defined(IA64_MCA_DEBUG_INFO) @@ -133,7 +135,7 @@ static int cpe_poll_enabled = 1; extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe); -static int mca_init; +static int mca_init __initdata; static void inline @@ -184,7 +186,7 @@ static ia64_state_log_t ia64_state_log[IA64_MAX_LOG_TYPES]; * Inputs : info_type (SAL_INFO_TYPE_{MCA,INIT,CMC,CPE}) * Outputs : None */ -static void +static void __init ia64_log_init(int sal_info_type) { u64 max_size = 0; @@ -281,6 +283,50 @@ ia64_mca_log_sal_error_record(int sal_info_type) ia64_sal_clear_state_info(sal_info_type); } +/* + * search_mca_table + * See if the MCA surfaced in an instruction range + * that has been tagged as recoverable. + * + * Inputs + * first First address range to check + * last Last address range to check + * ip Instruction pointer, address we are looking for + * + * Return value: + * 1 on Success (in the table)/ 0 on Failure (not in the table) + */ +int +search_mca_table (const struct mca_table_entry *first, + const struct mca_table_entry *last, + unsigned long ip) +{ + const struct mca_table_entry *curr; + u64 curr_start, curr_end; + + curr = first; + while (curr <= last) { + curr_start = (u64) &curr->start_addr + curr->start_addr; + curr_end = (u64) &curr->end_addr + curr->end_addr; + + if ((ip >= curr_start) && (ip <= curr_end)) { + return 1; + } + curr++; + } + return 0; +} + +/* Given an address, look for it in the mca tables. */ +int mca_recover_range(unsigned long addr) +{ + extern struct mca_table_entry __start___mca_table[]; + extern struct mca_table_entry __stop___mca_table[]; + + return search_mca_table(__start___mca_table, __stop___mca_table-1, addr); +} +EXPORT_SYMBOL_GPL(mca_recover_range); + #ifdef CONFIG_ACPI int cpe_vector = -1; @@ -355,7 +401,7 @@ ia64_mca_cpe_int_handler (int cpe_irq, void *arg, struct pt_regs *ptregs) * Outputs * None */ -static void +static void __init ia64_mca_register_cpev (int cpev) { /* Register the CPE interrupt vector with SAL */ @@ -386,7 +432,7 @@ ia64_mca_register_cpev (int cpev) * Outputs * None */ -void +void __cpuinit ia64_mca_cmc_vector_setup (void) { cmcv_reg_t cmcv; @@ -747,31 +793,34 @@ ia64_mca_modify_original_stack(struct pt_regs *regs, ia64_mca_modify_comm(previous_current); goto no_mod; } - if (r13 != sos->prev_IA64_KR_CURRENT) { - msg = "inconsistent previous current and r13"; - goto no_mod; - } - if ((r12 - r13) >= KERNEL_STACK_SIZE) { - msg = "inconsistent r12 and r13"; - goto no_mod; - } - if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) { - msg = "inconsistent ar.bspstore and r13"; - goto no_mod; - } - va.p = old_bspstore; - if (va.f.reg < 5) { - msg = "old_bspstore is in the wrong region"; - goto no_mod; - } - if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) { - msg = "inconsistent ar.bsp and r13"; - goto no_mod; - } - size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8; - if (ar_bspstore + size > r12) { - msg = "no room for blocked state"; - goto no_mod; + + if (!mca_recover_range(ms->pmsa_iip)) { + if (r13 != sos->prev_IA64_KR_CURRENT) { + msg = "inconsistent previous current and r13"; + goto no_mod; + } + if ((r12 - r13) >= KERNEL_STACK_SIZE) { + msg = "inconsistent r12 and r13"; + goto no_mod; + } + if ((ar_bspstore - r13) >= KERNEL_STACK_SIZE) { + msg = "inconsistent ar.bspstore and r13"; + goto no_mod; + } + va.p = old_bspstore; + if (va.f.reg < 5) { + msg = "old_bspstore is in the wrong region"; + goto no_mod; + } + if ((ar_bsp - r13) >= KERNEL_STACK_SIZE) { + msg = "inconsistent ar.bsp and r13"; + goto no_mod; + } + size += (ia64_rse_skip_regs(old_bspstore, slots) - old_bspstore) * 8; + if (ar_bspstore + size > r12) { + msg = "no room for blocked state"; + goto no_mod; + } } ia64_mca_modify_comm(previous_current); @@ -1443,7 +1492,7 @@ static struct irqaction mca_cpep_irqaction = { * format most of the fields. */ -static void +static void __cpuinit format_mca_init_stack(void *mca_data, unsigned long offset, const char *type, int cpu) { @@ -1457,7 +1506,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset, ti->cpu = cpu; p->thread_info = ti; p->state = TASK_UNINTERRUPTIBLE; - __set_bit(cpu, &p->cpus_allowed); + cpu_set(cpu, p->cpus_allowed); INIT_LIST_HEAD(&p->tasks); p->parent = p->real_parent = p->group_leader = p; INIT_LIST_HEAD(&p->children); @@ -1467,7 +1516,7 @@ format_mca_init_stack(void *mca_data, unsigned long offset, /* Do per-CPU MCA-related initialization. */ -void __devinit +void __cpuinit ia64_mca_cpu_init(void *cpu_data) { void *pal_vaddr; diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index e883d85906db..37c88eb55873 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -6,6 +6,7 @@ * Copyright (C) Hidetoshi Seto (seto.hidetoshi@jp.fujitsu.com) * Copyright (C) 2005 Silicon Graphics, Inc * Copyright (C) 2005 Keith Owens <kaos@sgi.com> + * Copyright (C) 2006 Russ Anderson <rja@sgi.com> */ #include <linux/config.h> #include <linux/types.h> @@ -121,11 +122,12 @@ mca_page_isolate(unsigned long paddr) */ void -mca_handler_bh(unsigned long paddr) +mca_handler_bh(unsigned long paddr, void *iip, unsigned long ipsr) { - printk(KERN_ERR - "OS_MCA: process [pid: %d](%s) encounters MCA (paddr=%lx)\n", - current->pid, current->comm, paddr); + printk(KERN_ERR "OS_MCA: process [cpu %d, pid: %d, uid: %d, " + "iip: %p, psr: 0x%lx,paddr: 0x%lx](%s) encounters MCA.\n", + raw_smp_processor_id(), current->pid, current->uid, + iip, ipsr, paddr, current->comm); spin_lock(&mca_bh_lock); switch (mca_page_isolate(paddr)) { @@ -442,21 +444,26 @@ recover_from_read_error(slidx_table_t *slidx, if (!peidx_bottom(peidx) || !(peidx_bottom(peidx)->valid.minstate)) return 0; psr1 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_ipsr); + psr2 =(struct ia64_psr *)&(peidx_minstate_area(peidx)->pmsa_xpsr); /* * Check the privilege level of interrupted context. * If it is user-mode, then terminate affected process. */ - if (psr1->cpl != 0) { + + pmsa = sos->pal_min_state; + if (psr1->cpl != 0 || + ((psr2->cpl != 0) && mca_recover_range(pmsa->pmsa_iip))) { smei = peidx_bus_check(peidx, 0); if (smei->valid.target_identifier) { /* * setup for resume to bottom half of MCA, * "mca_handler_bhhook" */ - pmsa = sos->pal_min_state; - /* pass to bhhook as 1st argument (gr8) */ + /* pass to bhhook as argument (gr8, ...) */ pmsa->pmsa_gr[8-1] = smei->target_identifier; + pmsa->pmsa_gr[9-1] = pmsa->pmsa_iip; + pmsa->pmsa_gr[10-1] = pmsa->pmsa_ipsr; /* set interrupted return address (but no use) */ pmsa->pmsa_br0 = pmsa->pmsa_iip; /* change resume address to bottom half */ @@ -466,6 +473,7 @@ recover_from_read_error(slidx_table_t *slidx, psr2 = (struct ia64_psr *)&pmsa->pmsa_ipsr; psr2->cpl = 0; psr2->ri = 0; + psr2->bn = 1; psr2->i = 0; return 1; diff --git a/arch/ia64/kernel/mca_drv.h b/arch/ia64/kernel/mca_drv.h index e2f6fa1e0ef6..31a2e52bb16f 100644 --- a/arch/ia64/kernel/mca_drv.h +++ b/arch/ia64/kernel/mca_drv.h @@ -111,3 +111,10 @@ typedef struct slidx_table { slidx_foreach_entry(__pos, &((slidx)->sec)) { __count++; }\ __count; }) +struct mca_table_entry { + int start_addr; /* location-relative starting address of MCA recoverable range */ + int end_addr; /* location-relative ending address of MCA recoverable range */ +}; + +extern const struct mca_table_entry *search_mca_tables (unsigned long addr); +extern int mca_recover_range(unsigned long); diff --git a/arch/ia64/kernel/mca_drv_asm.S b/arch/ia64/kernel/mca_drv_asm.S index 3f298ee4d00c..e6a580d354b9 100644 --- a/arch/ia64/kernel/mca_drv_asm.S +++ b/arch/ia64/kernel/mca_drv_asm.S @@ -14,15 +14,12 @@ GLOBAL_ENTRY(mca_handler_bhhook) invala // clear RSE ? - ;; cover ;; clrrrb ;; - alloc r16=ar.pfs,0,2,1,0 // make a new frame - ;; + alloc r16=ar.pfs,0,2,3,0 // make a new frame mov ar.rsc=0 - ;; mov r13=IA64_KR(CURRENT) // current task pointer ;; mov r2=r13 @@ -30,7 +27,6 @@ GLOBAL_ENTRY(mca_handler_bhhook) addl r22=IA64_RBS_OFFSET,r2 ;; mov ar.bspstore=r22 - ;; addl sp=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 ;; adds r2=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13 @@ -40,12 +36,12 @@ GLOBAL_ENTRY(mca_handler_bhhook) movl loc1=mca_handler_bh // recovery C function ;; mov out0=r8 // poisoned address + mov out1=r9 // iip + mov out2=r10 // psr mov b6=loc1 ;; mov loc1=rp - ;; - ssm psr.i - ;; + ssm psr.i | psr.ic br.call.sptk.many rp=b6 // does not return ... ;; mov ar.pfs=loc0 @@ -53,5 +49,4 @@ GLOBAL_ENTRY(mca_handler_bhhook) ;; mov r8=r0 br.ret.sptk.many rp - ;; END(mca_handler_bhhook) diff --git a/arch/ia64/kernel/numa.c b/arch/ia64/kernel/numa.c index a68ce6678092..0766493d4d00 100644 --- a/arch/ia64/kernel/numa.c +++ b/arch/ia64/kernel/numa.c @@ -25,7 +25,7 @@ #include <asm/processor.h> #include <asm/smp.h> -u8 cpu_to_node_map[NR_CPUS] __cacheline_aligned; +u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; EXPORT_SYMBOL(cpu_to_node_map); cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; diff --git a/arch/ia64/kernel/patch.c b/arch/ia64/kernel/patch.c index 6a4ac7d70b35..bc11bb096f58 100644 --- a/arch/ia64/kernel/patch.c +++ b/arch/ia64/kernel/patch.c @@ -115,7 +115,7 @@ ia64_patch_vtop (unsigned long start, unsigned long end) ia64_srlz_i(); } -void +void __init ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) { static int first_time = 1; @@ -149,7 +149,7 @@ ia64_patch_mckinley_e9 (unsigned long start, unsigned long end) ia64_srlz_i(); } -static void +static void __init patch_fsyscall_table (unsigned long start, unsigned long end) { extern unsigned long fsyscall_table[NR_syscalls]; @@ -166,7 +166,7 @@ patch_fsyscall_table (unsigned long start, unsigned long end) ia64_srlz_i(); } -static void +static void __init patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) { extern char fsys_bubble_down[]; @@ -184,7 +184,7 @@ patch_brl_fsys_bubble_down (unsigned long start, unsigned long end) ia64_srlz_i(); } -void +void __init ia64_patch_gate (void) { # define START(name) ((unsigned long) __start_gate_##name##_patchlist) diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 309d59658e5f..355d57970ba3 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -30,7 +30,6 @@ #include <linux/efi.h> #include <linux/interrupt.h> #include <linux/delay.h> -#include <linux/kprobes.h> #include <asm/cpu.h> #include <asm/delay.h> @@ -738,13 +737,6 @@ void exit_thread (void) { - /* - * Remove function-return probe instances associated with this task - * and put them back on the free list. Do not insert an exit probe for - * this function, it will be disabled by kprobe_flush_task if you do. - */ - kprobe_flush_task(current); - ia64_drop_fpu(current); #ifdef CONFIG_PERFMON /* if needed, stop monitoring and flush state to perfmon context */ diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c index eaed14aac6aa..9887c8787e7a 100644 --- a/arch/ia64/kernel/ptrace.c +++ b/arch/ia64/kernel/ptrace.c @@ -1656,8 +1656,14 @@ syscall_trace_leave (long arg0, long arg1, long arg2, long arg3, long arg4, long arg5, long arg6, long arg7, struct pt_regs regs) { - if (unlikely(current->audit_context)) - audit_syscall_exit(current, AUDITSC_RESULT(regs.r10), regs.r8); + if (unlikely(current->audit_context)) { + int success = AUDITSC_RESULT(regs.r10); + long result = regs.r8; + + if (success != AUDITSC_SUCCESS) + result = -result; + audit_syscall_exit(current, success, result); + } if (test_thread_flag(TIF_SYSCALL_TRACE) && (current->ptrace & PT_PTRACED)) diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index 958c1508036f..e4dfda1eb7dd 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -37,6 +37,7 @@ #include <linux/string.h> #include <linux/threads.h> #include <linux/tty.h> +#include <linux/dmi.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/efi.h> @@ -130,8 +131,8 @@ EXPORT_SYMBOL(ia64_max_iommu_merge_mask); /* * We use a special marker for the end of memory and it uses the extra (+1) slot */ -struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1]; -int num_rsvd_regions; +struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata; +int num_rsvd_regions __initdata; /* @@ -140,7 +141,7 @@ int num_rsvd_regions; * caller-specified function is called with the memory ranges that remain after filtering. * This routine does not assume the incoming segments are sorted. */ -int +int __init filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) { unsigned long range_start, range_end, prev_start; @@ -176,7 +177,7 @@ filter_rsvd_memory (unsigned long start, unsigned long end, void *arg) return 0; } -static void +static void __init sort_regions (struct rsvd_region *rsvd_region, int max) { int j; @@ -217,7 +218,7 @@ __initcall(register_memory); * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, * see include/asm-ia64/meminit.h if you need to define more. */ -void +void __init reserve_memory (void) { int n = 0; @@ -269,7 +270,7 @@ reserve_memory (void) * Grab the initrd start and end from the boot parameter struct given us by * the boot loader. */ -void +void __init find_initrd (void) { #ifdef CONFIG_BLK_DEV_INITRD @@ -361,7 +362,7 @@ mark_bsp_online (void) } #ifdef CONFIG_SMP -static void +static void __init check_for_logical_procs (void) { pal_logical_to_physical_t info; @@ -388,6 +389,14 @@ check_for_logical_procs (void) } #endif +static __initdata int nomca; +static __init int setup_nomca(char *s) +{ + nomca = 1; + return 0; +} +early_param("nomca", setup_nomca); + void __init setup_arch (char **cmdline_p) { @@ -401,35 +410,15 @@ setup_arch (char **cmdline_p) efi_init(); io_port_init(); + parse_early_param(); + #ifdef CONFIG_IA64_GENERIC - { - const char *mvec_name = strstr (*cmdline_p, "machvec="); - char str[64]; - - if (mvec_name) { - const char *end; - size_t len; - - mvec_name += 8; - end = strchr (mvec_name, ' '); - if (end) - len = end - mvec_name; - else - len = strlen (mvec_name); - len = min(len, sizeof (str) - 1); - strncpy (str, mvec_name, len); - str[len] = '\0'; - mvec_name = str; - } else - mvec_name = acpi_get_sysname(); - machvec_init(mvec_name); - } + machvec_init(NULL); #endif if (early_console_setup(*cmdline_p) == 0) mark_bsp_online(); - parse_early_param(); #ifdef CONFIG_ACPI /* Initialize the ACPI boot-time table parser */ acpi_table_init(); @@ -445,7 +434,7 @@ setup_arch (char **cmdline_p) find_memory(); /* process SAL system table: */ - ia64_sal_init(efi.sal_systab); + ia64_sal_init(__va(efi.sal_systab)); ia64_setup_printk_clock(); @@ -492,7 +481,7 @@ setup_arch (char **cmdline_p) #endif /* enable IA-64 Machine Check Abort Handling unless disabled */ - if (!strstr(saved_command_line, "nomca")) + if (!nomca) ia64_mca_init(); platform_setup(cmdline_p); @@ -622,7 +611,7 @@ struct seq_operations cpuinfo_op = { .show = show_cpuinfo }; -void +static void __cpuinit identify_cpu (struct cpuinfo_ia64 *c) { union { @@ -699,7 +688,7 @@ setup_per_cpu_areas (void) * In addition, the minimum of the i-cache stride sizes is calculated for * "flush_icache_range()". */ -static void +static void __cpuinit get_max_cacheline_size (void) { unsigned long line_size, max = 1; @@ -762,10 +751,10 @@ get_max_cacheline_size (void) * cpu_init() initializes state that is per-CPU. This function acts * as a 'CPU state barrier', nothing should get across. */ -void +void __cpuinit cpu_init (void) { - extern void __devinit ia64_mmu_init (void *); + extern void __cpuinit ia64_mmu_init (void *); unsigned long num_phys_stacked; pal_vm_info_2_u_t vmi; unsigned int max_ctx; @@ -893,9 +882,16 @@ void sched_cacheflush(void) ia64_sal_cache_flush(3); } -void +void __init check_bugs (void) { ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, (unsigned long) __end___mckinley_e9_bundles); } + +static int __init run_dmi_scan(void) +{ + dmi_scan_machine(); + return 0; +} +core_initcall(run_dmi_scan); diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index c4b633b36dab..44e9547878ac 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -624,32 +624,8 @@ void __devinit smp_prepare_boot_cpu(void) per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; } -/* - * mt_info[] is a temporary store for all info returned by - * PAL_LOGICAL_TO_PHYSICAL, to be copied into cpuinfo_ia64 when the - * specific cpu comes. - */ -static struct { - __u32 socket_id; - __u16 core_id; - __u16 thread_id; - __u16 proc_fixed_addr; - __u8 valid; -} mt_info[NR_CPUS] __devinitdata; - #ifdef CONFIG_HOTPLUG_CPU static inline void -remove_from_mtinfo(int cpu) -{ - int i; - - for_each_cpu(i) - if (mt_info[i].valid && mt_info[i].socket_id == - cpu_data(cpu)->socket_id) - mt_info[i].valid = 0; -} - -static inline void clear_cpu_sibling_map(int cpu) { int i; @@ -678,12 +654,6 @@ remove_siblinginfo(int cpu) /* remove it from all sibling map's */ clear_cpu_sibling_map(cpu); - - /* if this cpu is the last in the core group, remove all its info - * from mt_info structure - */ - if (last) - remove_from_mtinfo(cpu); } extern void fixup_irqs(void); @@ -878,40 +848,6 @@ init_smp_config(void) ia64_sal_strerror(sal_ret)); } -static inline int __devinit -check_for_mtinfo_index(void) -{ - int i; - - for_each_cpu(i) - if (!mt_info[i].valid) - return i; - - return -1; -} - -/* - * Search the mt_info to find out if this socket's cid/tid information is - * cached or not. If the socket exists, fill in the core_id and thread_id - * in cpuinfo - */ -static int __devinit -check_for_new_socket(__u16 logical_address, struct cpuinfo_ia64 *c) -{ - int i; - __u32 sid = c->socket_id; - - for_each_cpu(i) { - if (mt_info[i].valid && mt_info[i].proc_fixed_addr == logical_address - && mt_info[i].socket_id == sid) { - c->core_id = mt_info[i].core_id; - c->thread_id = mt_info[i].thread_id; - return 1; /* not a new socket */ - } - } - return 0; -} - /* * identify_siblings(cpu) gets called from identify_cpu. This populates the * information related to logical execution units in per_cpu_data structure. @@ -921,14 +857,12 @@ identify_siblings(struct cpuinfo_ia64 *c) { s64 status; u16 pltid; - u64 proc_fixed_addr; - int count, i; pal_logical_to_physical_t info; if (smp_num_cpucores == 1 && smp_num_siblings == 1) return; - if ((status = ia64_pal_logical_to_phys(0, &info)) != PAL_STATUS_SUCCESS) { + if ((status = ia64_pal_logical_to_phys(-1, &info)) != PAL_STATUS_SUCCESS) { printk(KERN_ERR "ia64_pal_logical_to_phys failed with %ld\n", status); return; @@ -937,47 +871,12 @@ identify_siblings(struct cpuinfo_ia64 *c) printk(KERN_ERR "ia64_sal_pltid failed with %ld\n", status); return; } - if ((status = ia64_pal_fixed_addr(&proc_fixed_addr)) != PAL_STATUS_SUCCESS) { - printk(KERN_ERR "ia64_pal_fixed_addr failed with %ld\n", status); - return; - } c->socket_id = (pltid << 8) | info.overview_ppid; c->cores_per_socket = info.overview_cpp; c->threads_per_core = info.overview_tpc; - count = c->num_log = info.overview_num_log; + c->num_log = info.overview_num_log; - /* If the thread and core id information is already cached, then - * we will simply update cpu_info and return. Otherwise, we will - * do the PAL calls and cache core and thread id's of all the siblings. - */ - if (check_for_new_socket(proc_fixed_addr, c)) - return; - - for (i = 0; i < count; i++) { - int index; - - if (i && (status = ia64_pal_logical_to_phys(i, &info)) - != PAL_STATUS_SUCCESS) { - printk(KERN_ERR "ia64_pal_logical_to_phys failed" - " with %ld\n", status); - return; - } - if (info.log2_la == proc_fixed_addr) { - c->core_id = info.log1_cid; - c->thread_id = info.log1_tid; - } - - index = check_for_mtinfo_index(); - /* We will not do the mt_info caching optimization in this case. - */ - if (index < 0) - continue; - - mt_info[index].valid = 1; - mt_info[index].socket_id = c->socket_id; - mt_info[index].core_id = info.log1_cid; - mt_info[index].thread_id = info.log1_tid; - mt_info[index].proc_fixed_addr = info.log2_la; - } + c->core_id = info.log1_cid; + c->thread_id = info.log1_tid; } diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index dabd6c32641e..7c1ddc8ac443 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -30,19 +30,19 @@ extern spinlock_t timerlist_lock; fpswa_interface_t *fpswa_interface; EXPORT_SYMBOL(fpswa_interface); -struct notifier_block *ia64die_chain; +ATOMIC_NOTIFIER_HEAD(ia64die_chain); int register_die_notifier(struct notifier_block *nb) { - return notifier_chain_register(&ia64die_chain, nb); + return atomic_notifier_chain_register(&ia64die_chain, nb); } EXPORT_SYMBOL_GPL(register_die_notifier); int unregister_die_notifier(struct notifier_block *nb) { - return notifier_chain_unregister(&ia64die_chain, nb); + return atomic_notifier_chain_unregister(&ia64die_chain, nb); } EXPORT_SYMBOL_GPL(unregister_die_notifier); diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S index 73af6267d2ef..0b9e56dd7f05 100644 --- a/arch/ia64/kernel/vmlinux.lds.S +++ b/arch/ia64/kernel/vmlinux.lds.S @@ -70,34 +70,9 @@ SECTIONS __stop___ex_table = .; } - .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET) - { - __start___vtop_patchlist = .; - *(.data.patch.vtop) - __end___vtop_patchlist = .; - } - - .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET) - { - __start___mckinley_e9_bundles = .; - *(.data.patch.mckinley_e9) - __end___mckinley_e9_bundles = .; - } - /* Global data */ _data = .; -#if defined(CONFIG_IA64_GENERIC) - /* Machine Vector */ - . = ALIGN(16); - .machvec : AT(ADDR(.machvec) - LOAD_OFFSET) - { - machvec_start = .; - *(.machvec) - machvec_end = .; - } -#endif - /* Unwind info & table: */ . = ALIGN(8); .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) @@ -154,6 +129,41 @@ SECTIONS *(.initcall7.init) __initcall_end = .; } + + /* MCA table */ + . = ALIGN(16); + __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) + { + __start___mca_table = .; + *(__mca_table) + __stop___mca_table = .; + } + + .data.patch.vtop : AT(ADDR(.data.patch.vtop) - LOAD_OFFSET) + { + __start___vtop_patchlist = .; + *(.data.patch.vtop) + __end___vtop_patchlist = .; + } + + .data.patch.mckinley_e9 : AT(ADDR(.data.patch.mckinley_e9) - LOAD_OFFSET) + { + __start___mckinley_e9_bundles = .; + *(.data.patch.mckinley_e9) + __end___mckinley_e9_bundles = .; + } + +#if defined(CONFIG_IA64_GENERIC) + /* Machine Vector */ + . = ALIGN(16); + .machvec : AT(ADDR(.machvec) - LOAD_OFFSET) + { + machvec_start = .; + *(.machvec) + machvec_end = .; + } +#endif + __con_initcall_start = .; .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) { *(.con_initcall.init) } diff --git a/arch/ia64/lib/Makefile b/arch/ia64/lib/Makefile index ac64664a1807..d8536a2c22a9 100644 --- a/arch/ia64/lib/Makefile +++ b/arch/ia64/lib/Makefile @@ -6,7 +6,7 @@ obj-y := io.o lib-y := __divsi3.o __udivsi3.o __modsi3.o __umodsi3.o \ __divdi3.o __udivdi3.o __moddi3.o __umoddi3.o \ - bitop.o checksum.o clear_page.o csum_partial_copy.o \ + checksum.o clear_page.o csum_partial_copy.o \ clear_user.o strncpy_from_user.o strlen_user.o strnlen_user.o \ flush.o ip_fast_csum.o do_csum.o \ memset.o strlen.o diff --git a/arch/ia64/lib/bitop.c b/arch/ia64/lib/bitop.c deleted file mode 100644 index 82e299c8464e..000000000000 --- a/arch/ia64/lib/bitop.c +++ /dev/null @@ -1,88 +0,0 @@ -#include <linux/compiler.h> -#include <linux/types.h> -#include <asm/intrinsics.h> -#include <linux/module.h> -#include <linux/bitops.h> - -/* - * Find next zero bit in a bitmap reasonably efficiently.. - */ - -int __find_next_zero_bit (const void *addr, unsigned long size, unsigned long offset) -{ - unsigned long *p = ((unsigned long *) addr) + (offset >> 6); - unsigned long result = offset & ~63UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 63UL; - if (offset) { - tmp = *(p++); - tmp |= ~0UL >> (64-offset); - if (size < 64) - goto found_first; - if (~tmp) - goto found_middle; - size -= 64; - result += 64; - } - while (size & ~63UL) { - if (~(tmp = *(p++))) - goto found_middle; - result += 64; - size -= 64; - } - if (!size) - return result; - tmp = *p; -found_first: - tmp |= ~0UL << size; - if (tmp == ~0UL) /* any bits zero? */ - return result + size; /* nope */ -found_middle: - return result + ffz(tmp); -} -EXPORT_SYMBOL(__find_next_zero_bit); - -/* - * Find next bit in a bitmap reasonably efficiently.. - */ -int __find_next_bit(const void *addr, unsigned long size, unsigned long offset) -{ - unsigned long *p = ((unsigned long *) addr) + (offset >> 6); - unsigned long result = offset & ~63UL; - unsigned long tmp; - - if (offset >= size) - return size; - size -= result; - offset &= 63UL; - if (offset) { - tmp = *(p++); - tmp &= ~0UL << offset; - if (size < 64) - goto found_first; - if (tmp) - goto found_middle; - size -= 64; - result += 64; - } - while (size & ~63UL) { - if ((tmp = *(p++))) - goto found_middle; - result += 64; - size -= 64; - } - if (!size) - return result; - tmp = *p; - found_first: - tmp &= ~0UL >> (64-size); - if (tmp == 0UL) /* Are any bits set? */ - return result + size; /* Nope. */ - found_middle: - return result + __ffs(tmp); -} -EXPORT_SYMBOL(__find_next_bit); diff --git a/arch/ia64/mm/Makefile b/arch/ia64/mm/Makefile index d78d20f0a0f0..bb0a01a81878 100644 --- a/arch/ia64/mm/Makefile +++ b/arch/ia64/mm/Makefile @@ -2,7 +2,7 @@ # Makefile for the ia64-specific parts of the memory manager. # -obj-y := init.o fault.o tlb.o extable.o +obj-y := init.o fault.o tlb.o extable.o ioremap.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_NUMA) += numa.o diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index 9855ba318094..84fd1c14c8a9 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c @@ -97,7 +97,7 @@ find_max_pfn (unsigned long start, unsigned long end, void *arg) * Find a place to put the bootmap and return its starting address in * bootmap_start. This address must be page-aligned. */ -int +static int __init find_bootmap_location (unsigned long start, unsigned long end, void *arg) { unsigned long needed = *(unsigned long *)arg; @@ -141,7 +141,7 @@ find_bootmap_location (unsigned long start, unsigned long end, void *arg) * Walk the EFI memory map and find usable memory for the system, taking * into account reserved areas. */ -void +void __init find_memory (void) { unsigned long bootmap_size; @@ -176,7 +176,7 @@ find_memory (void) * * Allocate and setup per-cpu data areas. */ -void * +void * __cpuinit per_cpu_init (void) { void *cpu_data; @@ -228,7 +228,7 @@ count_dma_pages (u64 start, u64 end, void *arg) * Set up the page tables. */ -void +void __init paging_init (void) { unsigned long max_dma; diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index 573d5cc63e2b..ec9eeb89975d 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c @@ -379,31 +379,6 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize) } /** - * pgdat_insert - insert the pgdat into global pgdat_list - * @pgdat: the pgdat for a node. - */ -static void __init pgdat_insert(pg_data_t *pgdat) -{ - pg_data_t *prev = NULL, *next; - - for_each_pgdat(next) - if (pgdat->node_id < next->node_id) - break; - else - prev = next; - - if (prev) { - prev->pgdat_next = pgdat; - pgdat->pgdat_next = next; - } else { - pgdat->pgdat_next = pgdat_list; - pgdat_list = pgdat; - } - - return; -} - -/** * memory_less_nodes - allocate and initialize CPU only nodes pernode * information. */ @@ -525,7 +500,7 @@ void __init find_memory(void) * find_pernode_space() does most of this already, we just need to set * local_per_cpu_offset */ -void *per_cpu_init(void) +void __cpuinit *per_cpu_init(void) { int cpu; static int first_time = 1; @@ -560,7 +535,7 @@ void show_mem(void) printk("Mem-info:\n"); show_free_areas(); printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); - for_each_pgdat(pgdat) { + for_each_online_pgdat(pgdat) { unsigned long present; unsigned long flags; int shared = 0, cached = 0, reserved = 0; @@ -745,11 +720,5 @@ void __init paging_init(void) pfn_offset, zholes_size); } - /* - * Make memory less nodes become a member of the known nodes. - */ - for_each_node_mask(node, memory_less_mask) - pgdat_insert(mem_data[node].pgdat); - zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); } diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c index 9dbc7dadd165..8d506710fdbd 100644 --- a/arch/ia64/mm/hugetlbpage.c +++ b/arch/ia64/mm/hugetlbpage.c @@ -113,8 +113,7 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, unsigned long floor, unsigned long ceiling) { /* - * This is called only when is_hugepage_only_range(addr,), - * and it follows that is_hugepage_only_range(end,) also. + * This is called to free hugetlb page tables. * * The offset of these addresses from the base of the hugetlb * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that @@ -126,9 +125,9 @@ void hugetlb_free_pgd_range(struct mmu_gather **tlb, addr = htlbpage_to_page(addr); end = htlbpage_to_page(end); - if (is_hugepage_only_range(tlb->mm, floor, HPAGE_SIZE)) + if (REGION_NUMBER(floor) == RGN_HPAGE) floor = htlbpage_to_page(floor); - if (is_hugepage_only_range(tlb->mm, ceiling, HPAGE_SIZE)) + if (REGION_NUMBER(ceiling) == RGN_HPAGE) ceiling = htlbpage_to_page(ceiling); free_pgd_range(tlb, addr, end, floor, ceiling); diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 08d94e6bfa18..2ef1151cde90 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -206,7 +206,7 @@ free_initmem (void) (__init_end - __init_begin) >> 10); } -void +void __init free_initrd_mem (unsigned long start, unsigned long end) { struct page *page; @@ -261,7 +261,7 @@ free_initrd_mem (unsigned long start, unsigned long end) /* * This installs a clean page in the kernel's page table. */ -struct page * +static struct page * __init put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) { pgd_t *pgd; @@ -294,7 +294,7 @@ put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot) return page; } -static void +static void __init setup_gate (void) { struct page *page; @@ -411,7 +411,7 @@ ia64_mmu_init (void *my_cpu_data) #ifdef CONFIG_VIRTUAL_MEM_MAP -int +int __init create_mem_map_page_table (u64 start, u64 end, void *arg) { unsigned long address, start_page, end_page; @@ -519,7 +519,7 @@ ia64_pfn_valid (unsigned long pfn) } EXPORT_SYMBOL(ia64_pfn_valid); -int +int __init find_largest_hole (u64 start, u64 end, void *arg) { u64 *max_gap = arg; @@ -535,7 +535,7 @@ find_largest_hole (u64 start, u64 end, void *arg) } #endif /* CONFIG_VIRTUAL_MEM_MAP */ -static int +static int __init count_reserved_pages (u64 start, u64 end, void *arg) { unsigned long num_reserved = 0; @@ -556,7 +556,7 @@ count_reserved_pages (u64 start, u64 end, void *arg) * purposes. */ -static int nolwsys; +static int nolwsys __initdata; static int __init nolwsys_setup (char *s) @@ -567,7 +567,7 @@ nolwsys_setup (char *s) __setup("nolwsys", nolwsys_setup); -void +void __init mem_init (void) { long reserved_pages, codesize, datasize, initsize; @@ -600,7 +600,7 @@ mem_init (void) kclist_add(&kcore_vmem, (void *)VMALLOC_START, VMALLOC_END-VMALLOC_START); kclist_add(&kcore_kernel, _stext, _end - _stext); - for_each_pgdat(pgdat) + for_each_online_pgdat(pgdat) if (pgdat->bdata->node_bootmem_map) totalram_pages += free_all_bootmem_node(pgdat); diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c new file mode 100644 index 000000000000..62328621f99c --- /dev/null +++ b/arch/ia64/mm/ioremap.c @@ -0,0 +1,43 @@ +/* + * (c) Copyright 2006 Hewlett-Packard Development Company, L.P. + * Bjorn Helgaas <bjorn.helgaas@hp.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ + +#include <linux/compiler.h> +#include <linux/module.h> +#include <linux/efi.h> +#include <asm/io.h> + +static inline void __iomem * +__ioremap (unsigned long offset, unsigned long size) +{ + return (void __iomem *) (__IA64_UNCACHED_OFFSET | offset); +} + +void __iomem * +ioremap (unsigned long offset, unsigned long size) +{ + if (efi_mem_attribute_range(offset, size, EFI_MEMORY_UC)) + return __ioremap(offset, size); + + if (efi_mem_attribute_range(offset, size, EFI_MEMORY_WB)) + return phys_to_virt(offset); + + /* + * Someday this should check ACPI resources so we + * can do the right thing for hot-plugged regions. + */ + return __ioremap(offset, size); +} +EXPORT_SYMBOL(ioremap); + +void __iomem * +ioremap_nocache (unsigned long offset, unsigned long size) +{ + return __ioremap(offset, size); +} +EXPORT_SYMBOL(ioremap_nocache); diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c index dfb3f2902379..5101ac462643 100644 --- a/arch/ia64/sn/kernel/io_init.c +++ b/arch/ia64/sn/kernel/io_init.c @@ -13,6 +13,8 @@ #include <asm/sn/sn_feature_sets.h> #include <asm/sn/geo.h> #include <asm/sn/io.h> +#include <asm/sn/l1.h> +#include <asm/sn/module.h> #include <asm/sn/pcibr_provider.h> #include <asm/sn/pcibus_provider_defs.h> #include <asm/sn/pcidev.h> @@ -710,9 +712,36 @@ cnodeid_get_geoid(cnodeid_t cnode) return hubdev->hdi_geoid; } +void sn_generate_path(struct pci_bus *pci_bus, char *address) +{ + nasid_t nasid; + cnodeid_t cnode; + geoid_t geoid; + moduleid_t moduleid; + u16 bricktype; + + nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base); + cnode = nasid_to_cnodeid(nasid); + geoid = cnodeid_get_geoid(cnode); + moduleid = geo_module(geoid); + + sprintf(address, "module_%c%c%c%c%.2d", + '0'+RACK_GET_CLASS(MODULE_GET_RACK(moduleid)), + '0'+RACK_GET_GROUP(MODULE_GET_RACK(moduleid)), + '0'+RACK_GET_NUM(MODULE_GET_RACK(moduleid)), + MODULE_GET_BTCHAR(moduleid), MODULE_GET_BPOS(moduleid)); + + /* Tollhouse requires slot id to be displayed */ + bricktype = MODULE_GET_BTYPE(moduleid); + if ((bricktype == L1_BRICKTYPE_191010) || + (bricktype == L1_BRICKTYPE_1932)) + sprintf(address, "%s^%d", address, geo_slot(geoid)); +} + subsys_initcall(sn_pci_init); EXPORT_SYMBOL(sn_pci_fixup_slot); EXPORT_SYMBOL(sn_pci_unfixup_slot); EXPORT_SYMBOL(sn_pci_controller_fixup); EXPORT_SYMBOL(sn_bus_store_sysdata); EXPORT_SYMBOL(sn_bus_free_sysdata); +EXPORT_SYMBOL(sn_generate_path); diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c index c373113d073a..c265e02f5036 100644 --- a/arch/ia64/sn/kernel/irq.c +++ b/arch/ia64/sn/kernel/irq.c @@ -350,9 +350,6 @@ static void force_interrupt(int irq) static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) { u64 regval; - int irr_reg_num; - int irr_bit; - u64 irr_reg; struct pcidev_info *pcidev_info; struct pcibus_info *pcibus_info; @@ -373,23 +370,7 @@ static void sn_check_intr(int irq, struct sn_irq_info *sn_irq_info) pdi_pcibus_info; regval = pcireg_intr_status_get(pcibus_info); - irr_reg_num = irq_to_vector(irq) / 64; - irr_bit = irq_to_vector(irq) % 64; - switch (irr_reg_num) { - case 0: - irr_reg = ia64_getreg(_IA64_REG_CR_IRR0); - break; - case 1: - irr_reg = ia64_getreg(_IA64_REG_CR_IRR1); - break; - case 2: - irr_reg = ia64_getreg(_IA64_REG_CR_IRR2); - break; - case 3: - irr_reg = ia64_getreg(_IA64_REG_CR_IRR3); - break; - } - if (!test_bit(irr_bit, &irr_reg)) { + if (!ia64_get_irr(irq_to_vector(irq))) { if (!test_bit(irq, pda->sn_in_service_ivecs)) { regval &= 0xff; if (sn_irq_info->irq_int_bit & regval & diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c index 8b6d5c844708..30988dfbddff 100644 --- a/arch/ia64/sn/kernel/setup.c +++ b/arch/ia64/sn/kernel/setup.c @@ -327,10 +327,11 @@ sn_scan_pcdp(void) struct pcdp_interface_pci if_pci; extern struct efi efi; - pcdp = efi.hcdp; - if (! pcdp) + if (efi.hcdp == EFI_INVALID_TABLE_ADDR) return; /* no hcdp/pcdp table */ + pcdp = __va(efi.hcdp); + if (pcdp->rev < 3) return; /* only support PCDP (rev >= 3) */ diff --git a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c index c686d9c12f7b..5100261310f7 100644 --- a/arch/ia64/sn/kernel/sn2/sn_proc_fs.c +++ b/arch/ia64/sn/kernel/sn2/sn_proc_fs.c @@ -93,19 +93,22 @@ static int coherence_id_open(struct inode *inode, struct file *file) static struct proc_dir_entry *sn_procfs_create_entry(const char *name, struct proc_dir_entry *parent, int (*openfunc)(struct inode *, struct file *), - int (*releasefunc)(struct inode *, struct file *)) + int (*releasefunc)(struct inode *, struct file *), + ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *)) { struct proc_dir_entry *e = create_proc_entry(name, 0444, parent); if (e) { - e->proc_fops = (struct file_operations *)kmalloc( - sizeof(struct file_operations), GFP_KERNEL); - if (e->proc_fops) { - memset(e->proc_fops, 0, sizeof(struct file_operations)); - e->proc_fops->open = openfunc; - e->proc_fops->read = seq_read; - e->proc_fops->llseek = seq_lseek; - e->proc_fops->release = releasefunc; + struct file_operations *f; + + f = kzalloc(sizeof(*f), GFP_KERNEL); + if (f) { + f->open = openfunc; + f->read = seq_read; + f->llseek = seq_lseek; + f->release = releasefunc; + f->write = write; + e->proc_fops = f; } } @@ -119,31 +122,29 @@ extern int sn_topology_release(struct inode *, struct file *); void register_sn_procfs(void) { static struct proc_dir_entry *sgi_proc_dir = NULL; - struct proc_dir_entry *e; BUG_ON(sgi_proc_dir != NULL); if (!(sgi_proc_dir = proc_mkdir("sgi_sn", NULL))) return; sn_procfs_create_entry("partition_id", sgi_proc_dir, - partition_id_open, single_release); + partition_id_open, single_release, NULL); sn_procfs_create_entry("system_serial_number", sgi_proc_dir, - system_serial_number_open, single_release); + system_serial_number_open, single_release, NULL); sn_procfs_create_entry("licenseID", sgi_proc_dir, - licenseID_open, single_release); + licenseID_open, single_release, NULL); - e = sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir, - sn_force_interrupt_open, single_release); - if (e) - e->proc_fops->write = sn_force_interrupt_write_proc; + sn_procfs_create_entry("sn_force_interrupt", sgi_proc_dir, + sn_force_interrupt_open, single_release, + sn_force_interrupt_write_proc); sn_procfs_create_entry("coherence_id", sgi_proc_dir, - coherence_id_open, single_release); + coherence_id_open, single_release, NULL); sn_procfs_create_entry("sn_topology", sgi_proc_dir, - sn_topology_open, sn_topology_release); + sn_topology_open, sn_topology_release, NULL); } #endif /* CONFIG_PROC_FS */ diff --git a/arch/ia64/sn/kernel/tiocx.c b/arch/ia64/sn/kernel/tiocx.c index 99cb28e74295..feaf1a6e8101 100644 --- a/arch/ia64/sn/kernel/tiocx.c +++ b/arch/ia64/sn/kernel/tiocx.c @@ -369,9 +369,15 @@ static void tio_corelet_reset(nasid_t nasid, int corelet) static int is_fpga_tio(int nasid, int *bt) { - int ioboard_type; + u16 ioboard_type; + s64 rc; - ioboard_type = ia64_sn_sysctl_ioboard_get(nasid); + rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard_type); + if (rc) { + printk(KERN_WARNING "ia64_sn_sysctl_ioboard_get failed: %ld\n", + rc); + return 0; + } switch (ioboard_type) { case L1_BRICKTYPE_SA: diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c index 98f716bd92f0..ab1211ef0176 100644 --- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c +++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c @@ -74,6 +74,22 @@ static int sal_pcibr_error_interrupt(struct pcibus_info *soft) return (int)ret_stuff.v0; } +u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus) +{ + s64 rc; + u16 ioboard; + nasid_t nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base); + + rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard); + if (rc) { + printk(KERN_WARNING "ia64_sn_sysctl_ioboard_get failed: %ld\n", + rc); + return 0; + } + + return ioboard; +} + /* * PCI Bridge Error interrupt handler. Gets invoked whenever a PCI * bridge sends an error interrupt. @@ -255,3 +271,4 @@ pcibr_init_provider(void) EXPORT_SYMBOL_GPL(sal_pcibr_slot_enable); EXPORT_SYMBOL_GPL(sal_pcibr_slot_disable); +EXPORT_SYMBOL_GPL(sn_ioboard_to_pci_bus); |